ultralytics / yolov3

YOLOv3 in PyTorch > ONNX > CoreML > TFLite
https://docs.ultralytics.com
GNU Affero General Public License v3.0
10.18k stars 3.44k forks source link

convert pt file to onnx format #1649

Closed ardeal closed 3 years ago

ardeal commented 3 years ago

Hi,

The following code is based on master branch of this repository. I am trying to convert pt file of yolov3 to onnx format.

The following line could be correctly executed: p = torch_model(x)

however, torch.onnx.export() function encountered the following issue:

Traceback (most recent call last):
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\utils.py", line 632, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\utils.py", line 449, in _model_to_graph
    params_dict = torch._C._jit_pass_onnx_constant_fold(graph, params_dict,
RuntimeError: Input, output and indices must be on the current device
from __future__ import print_function
import os
import datetime
import numpy as np

from models.yolo import *
import torch
# import torch.nn as nn
# from models.common import Conv, DWConv
from utils.google_utils import attempt_download

class Ensemble(nn.ModuleList):
    # Ensemble of models
    def __init__(self):
        super(Ensemble, self).__init__()

    def forward(self, x, augment=False):
        y = []
        for module in self:
            y.append(module(x, augment)[0])
        # y = torch.stack(y).max(0)[0]  # max ensemble
        # y = torch.cat(y, 1)  # nms ensemble
        y = torch.stack(y).mean(0)  # mean ensemble
        return y, None  # inference, train output

def attempt_load(weights, map_location=None):
    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    model = Ensemble()
    for w in weights if isinstance(weights, list) else [weights]:
        attempt_download(w)
        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model

    # Compatibility updates
    for m in model.modules():
        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
            m.inplace = True  # pytorch 1.7.0 compatibility
        elif type(m) is Conv:
            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility

    if len(model) == 1:
        return model[-1]  # return model
    else:
        print('Ensemble created with %s\n' % weights)
        for k in ['names', 'stride']:
            setattr(model, k, getattr(model[-1], k))
        return model  # return ensemble

def convert_model(input_pth, output_onnx):

    print('cuda is available == {}'.format(torch.cuda.is_available()))
    device = select_device('')
    nc = 4
    torch_model = attempt_load(input_pth, map_location=device).half()
    # torch_model = torch_model.to(device)
    # torch_model = torch_model.cpu()
    torch_model.eval()

    half = True
    imgsz = 640
    batch_size = 2  # just a random number
    # x = torch.rand(batch_size, 3, 640, 640, device=device).half()
    x = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    x = x.half()
    p = torch_model(x)

    # p = torch_model(x.half() if half else x) if device.type != 'cpu' else None

    torch.onnx.export(torch_model,  # model being run
                      x, #.to(device),  # model input (or a tuple for multiple inputs)
                      output_onnx,  # where to save the model (can be a file or file-like object)
                      opset_version=11,  # the ONNX version to export the model to
                      input_names=['input'],  # the model's input names
                      output_names=['output'],  # the model's output names
                      verbose=True
                  )

    return
leeyunhome commented 3 years ago

Hi,

The following code is based on master branch of this repository. I am trying to convert pt file of yolov3 to onnx format.

The following line could be correctly executed: p = torch_model(x)

however, torch.onnx.export() function encountered the following issue:

Traceback (most recent call last):
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\utils.py", line 632, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "C:\ProgramData\Anaconda3\lib\site-packages\torch\onnx\utils.py", line 449, in _model_to_graph
    params_dict = torch._C._jit_pass_onnx_constant_fold(graph, params_dict,
RuntimeError: Input, output and indices must be on the current device
from __future__ import print_function
import os
import datetime
import numpy as np

from models.yolo import *
import torch
# import torch.nn as nn
# from models.common import Conv, DWConv
from utils.google_utils import attempt_download

class Ensemble(nn.ModuleList):
    # Ensemble of models
    def __init__(self):
        super(Ensemble, self).__init__()

    def forward(self, x, augment=False):
        y = []
        for module in self:
            y.append(module(x, augment)[0])
        # y = torch.stack(y).max(0)[0]  # max ensemble
        # y = torch.cat(y, 1)  # nms ensemble
        y = torch.stack(y).mean(0)  # mean ensemble
        return y, None  # inference, train output

def attempt_load(weights, map_location=None):
    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    model = Ensemble()
    for w in weights if isinstance(weights, list) else [weights]:
        attempt_download(w)
        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model

    # Compatibility updates
    for m in model.modules():
        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
            m.inplace = True  # pytorch 1.7.0 compatibility
        elif type(m) is Conv:
            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility

    if len(model) == 1:
        return model[-1]  # return model
    else:
        print('Ensemble created with %s\n' % weights)
        for k in ['names', 'stride']:
            setattr(model, k, getattr(model[-1], k))
        return model  # return ensemble

def convert_model(input_pth, output_onnx):

    print('cuda is available == {}'.format(torch.cuda.is_available()))
    device = select_device('')
    nc = 4
    torch_model = attempt_load(input_pth, map_location=device).half()
    # torch_model = torch_model.to(device)
    # torch_model = torch_model.cpu()
    torch_model.eval()

    half = True
    imgsz = 640
    batch_size = 2  # just a random number
    # x = torch.rand(batch_size, 3, 640, 640, device=device).half()
    x = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    x = x.half()
    p = torch_model(x)

    # p = torch_model(x.half() if half else x) if device.type != 'cpu' else None

    torch.onnx.export(torch_model,  # model being run
                      x, #.to(device),  # model input (or a tuple for multiple inputs)
                      output_onnx,  # where to save the model (can be a file or file-like object)
                      opset_version=11,  # the ONNX version to export the model to
                      input_names=['input'],  # the model's input names
                      output_names=['output'],  # the model's output names
                      verbose=True
                  )

    return

Hello.

Have you ever gotten an answer to this question? I am struggling with the same problem. I want to convert the .pt model learned in this archive to onnx.

Thank you.

ardeal commented 3 years ago

several months ago, I could convert pt/pth file to onnx correctly. now, I couldn't. There might be something with new version of Pytorch. I am also asking the same question on pytorch forum, and am waiting for their answer.

leeyunhome commented 3 years ago

several months ago, I could convert pt/pth file to onnx correctly. now, I couldn't. There might be something with new version of Pytorch. I am also asking the same question on pytorch forum, and am waiting for their answer.

Can you tell me the address of the thread in the Pytorch forum? I just wanted to be able to see it.

Thank you.

ardeal commented 3 years ago

https://discuss.pytorch.org/

https://discuss.pytorch.org/t/convet-pt-file-of-pytorch-yolov3-to-onnx/108470

ardeal commented 3 years ago

I eventually figured out this issue: First solution is that the code in models/export.py in this repo works. The second solution: I compared my code pasted on this topic with the code in models/export.py in this repo, and eventually found out that the following code works. the most important change is the line: torch_model.model[-1].export = True . This is the difference.


from __future__ import print_function
import os
import datetime
import numpy as np

from models.yolo import *
import torch
# import torch.nn as nn
# from models.common import Conv, DWConv
from utils.google_utils import attempt_download
import onnx

class Ensemble(nn.ModuleList):
    # Ensemble of models
    def __init__(self):
        super(Ensemble, self).__init__()

    def forward(self, x, augment=False):
        y = []
        for module in self:
            y.append(module(x, augment)[0])
        # y = torch.stack(y).max(0)[0]  # max ensemble
        # y = torch.cat(y, 1)  # nms ensemble
        y = torch.stack(y).mean(0)  # mean ensemble
        return y, None  # inference, train output

def attempt_load(weights, map_location=None):
    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
    model = Ensemble()
    for w in weights if isinstance(weights, list) else [weights]:
        attempt_download(w)
        model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval())  # load FP32 model

    # Compatibility updates
    for m in model.modules():
        if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
            m.inplace = True  # pytorch 1.7.0 compatibility
        elif type(m) is Conv:
            m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility

    if len(model) == 1:
        return model[-1]  # return model
    else:
        print('Ensemble created with %s\n' % weights)
        for k in ['names', 'stride']:
            setattr(model, k, getattr(model[-1], k))
        return model  # return ensemble

def convert_model(input_pth, output_onnx):

    print('cuda is available == {}'.format(torch.cuda.is_available()))
    device = select_device('')
    nc = 4
    torch_model = attempt_load(input_pth, map_location=device).half()
    # torch_model = torch_model.to(device)
    # torch_model = torch_model.cpu()

    torch_model.model[-1].export = True

    torch_model.eval()

    half = True
    imgsz = 640
    batch_size = 2  # just a random number
    # x = torch.rand(batch_size, 3, 640, 640, device=device).half()
    x = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    x = x.half()
    p = torch_model(x)

    torch.onnx.export(torch_model,  # model being run
                      x, #.to(device),  # model input (or a tuple for multiple inputs)
                      output_onnx,  # where to save the model (can be a file or file-like object)
                      verbose=False,
                      opset_version=12,  # the ONNX version to export the model to
                      input_names=['images'],  # the model's input names
                      output_names=['classes', 'boxes'] if p is None else ['output']  # the model's output names

                  )

    # onnx_model = onnx.load(f)  # load onnx model
    # onnx.checker.check_model(onnx_model)

    return
glenn-jocher commented 11 months ago

@ardeal that's great to hear that you were able to resolve the issue! Thank you for sharing the solution with the community. It's very helpful for others who might encounter the same problem. If you have any more questions or need further assistance, feel free to ask!