Open sharanms88 opened 4 days ago
Yoloact does not convert to onnx using mmdepoy. It seems to fail on the fact its using custom data classes for inference
mmdetection config mmdeploy config
@openmmlab-bot
from mmdeploy.apis import torch2onnx torch2onnx( img='/home/ubuntu/mydata/outward/forward/images/1wu1MRg17P-camera-video-segment-1715120945165.mp4.jpeg', work_dir='/home/ubuntu/tadirs/onnx_exports/', save_file='/home/ubuntu/tadirs/onnx_exports/exported_demo.onnx', deploy_cfg='/home/ubuntu/co/mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py', model_cfg='/home/ubuntu/co/mlcv_projects/projects/ray-projects/ddd/code/mmdet_configs/yoloact/yolact_r50_1xb8-55e_coco.py', model_checkpoint='/home/ubuntu/tadirs/checkpoints/yolact_r50_1x8_coco_20200908-f38d58df_modified_weights.pth', device='cuda' )
Error Trace below
SHELL=/bin/bash CONDA_EXE=/home/ubuntu/anaconda3/bin/conda _CE_M= PWD=/home/ubuntu LOGNAME=ubuntu XDG_SESSION_TYPE=tty CONDA_PREFIX=/home/ubuntu/anaconda3/envs/ray_py310 Cuda
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) Cell In[26], line 2 1 from mmdeploy.apis import torch2onnx ----> 2 torch2onnx( 3 img='/home/ubuntu/mydata/outward/forward/images/1wu1MRg17P-camera-video-segment-1715120945165.mp4.jpeg', 4 work_dir='/home/ubuntu/tadirs/onnx_exports/', 5 save_file='/home/ubuntu/tadirs/onnx_exports/sharan_demo.onnx', 6 deploy_cfg='/home/ubuntu/co/mmdeploy/configs/mmdet/instance-seg/instance-seg_onnxruntime_dynamic.py', 7 model_cfg='/home/ubuntu/co/mlcv_projects/projects/ray-projects/ddd/code/mmdet_configs/yoloact/yolact_r50_1xb8-55e_coco.py', 8 model_checkpoint='/home/ubuntu/tadirs/checkpoints/yolact_r50_1x8_coco_20200908-f38d58df_modified_weights.pth', 9 device='cpu' 10 ) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:356, in PipelineManager.register_pipeline.<locals>._register.<locals>._wrap(*args, **kwargs) 354 @wraps(func) 355 def _wrap(*args, **kwargs): --> 356 return self.call_function(func_name_, *args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:326, in PipelineManager.call_function(self, func_name, *args, **kwargs) 324 return self.get_result_sync(call_id) 325 else: --> 326 return self.call_function_local(func_name, *args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:275, in PipelineManager.call_function_local(self, func_name, *args, **kwargs) 273 pipe_caller._call_id = self._call_id 274 self._call_id += 1 --> 275 return pipe_caller(*args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:107, in PipelineCaller.__call__(self, *args, **kwargs) 104 func = getattr(mod, impl_name, None) 105 assert func is not None, \ 106 f'Can not find implementation of {self._func_name}' --> 107 ret = func(*args, **kwargs) 108 for output_hook in self.output_hooks: 109 ret = output_hook(ret) File ~/co/mmdeploy/mmdeploy/apis/pytorch2onnx.py:98, in torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device) 96 optimize = False 97 with no_mp(): ---> 98 export( 99 torch_model, 100 model_inputs, 101 input_metas=input_metas, 102 output_path_prefix=output_prefix, 103 backend=backend, 104 input_names=input_names, 105 output_names=output_names, 106 context_info=context_info, 107 opset_version=opset_version, 108 dynamic_axes=dynamic_axes, 109 verbose=verbose, 110 keep_initializers_as_inputs=keep_initializers_as_inputs, 111 optimize=optimize) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:356, in PipelineManager.register_pipeline.<locals>._register.<locals>._wrap(*args, **kwargs) 354 @wraps(func) 355 def _wrap(*args, **kwargs): --> 356 return self.call_function(func_name_, *args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:326, in PipelineManager.call_function(self, func_name, *args, **kwargs) 324 return self.get_result_sync(call_id) 325 else: --> 326 return self.call_function_local(func_name, *args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:275, in PipelineManager.call_function_local(self, func_name, *args, **kwargs) 273 pipe_caller._call_id = self._call_id 274 self._call_id += 1 --> 275 return pipe_caller(*args, **kwargs) File ~/co/mmdeploy/mmdeploy/apis/core/pipeline_manager.py:107, in PipelineCaller.__call__(self, *args, **kwargs) 104 func = getattr(mod, impl_name, None) 105 assert func is not None, \ 106 f'Can not find implementation of {self._func_name}' --> 107 ret = func(*args, **kwargs) 108 for output_hook in self.output_hooks: 109 ret = output_hook(ret) File ~/co/mmdeploy/mmdeploy/apis/onnx/export.py:138, in export(model, args, output_path_prefix, backend, input_metas, context_info, input_names, output_names, opset_version, dynamic_axes, verbose, keep_initializers_as_inputs, optimize) 136 else: 137 raise RuntimeError(f'Not supported args: {args}') --> 138 torch.onnx.export( 139 patched_model, 140 args, 141 output_path, 142 export_params=True, 143 input_names=input_names, 144 output_names=output_names, 145 opset_version=opset_version, 146 dynamic_axes=dynamic_axes, 147 keep_initializers_as_inputs=keep_initializers_as_inputs, 148 verbose=verbose) 150 if input_metas is not None: 151 patched_model.forward = model_forward File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/onnx/utils.py:506, in export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, custom_opsets, export_modules_as_functions) 188 @_beartype.beartype 189 def export( 190 model: Union[torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction], (...) 206 export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]] = False, 207 ) -> None: 208 r"""Exports a model into ONNX format. 209 210 If ``model`` is not a :class:`torch.jit.ScriptModule` nor a (...) 503 All errors are subclasses of :class:`errors.OnnxExporterError`. 504 """ --> 506 _export( 507 model, 508 args, 509 f, 510 export_params, 511 verbose, 512 training, 513 input_names, 514 output_names, 515 operator_export_type=operator_export_type, 516 opset_version=opset_version, 517 do_constant_folding=do_constant_folding, 518 dynamic_axes=dynamic_axes, 519 keep_initializers_as_inputs=keep_initializers_as_inputs, 520 custom_opsets=custom_opsets, 521 export_modules_as_functions=export_modules_as_functions, 522 ) File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/onnx/utils.py:1548, in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, opset_version, do_constant_folding, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, onnx_shape_inference, export_modules_as_functions) 1545 dynamic_axes = {} 1546 _validate_dynamic_axes(dynamic_axes, model, input_names, output_names) -> 1548 graph, params_dict, torch_out = _model_to_graph( 1549 model, 1550 args, 1551 verbose, 1552 input_names, 1553 output_names, 1554 operator_export_type, 1555 val_do_constant_folding, 1556 fixed_batch_size=fixed_batch_size, 1557 training=training, 1558 dynamic_axes=dynamic_axes, 1559 ) 1561 # TODO: Don't allocate a in-memory string for the protobuf 1562 defer_weight_export = ( 1563 export_type is not _exporter_states.ExportTypes.PROTOBUF_FILE 1564 ) File ~/co/mmdeploy/mmdeploy/apis/onnx/optimizer.py:27, in model_to_graph__custom_optimizer(*args, **kwargs) 25 """Rewriter of _model_to_graph, add custom passes.""" 26 ctx = FUNCTION_REWRITER.get_context() ---> 27 graph, params_dict, torch_out = ctx.origin_func(*args, **kwargs) 28 if hasattr(ctx, 'opset'): 29 opset_version = ctx.opset File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/onnx/utils.py:1113, in _model_to_graph(model, args, verbose, input_names, output_names, operator_export_type, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size, training, dynamic_axes) 1110 args = (args,) 1112 model = _pre_trace_quant_model(model, args) -> 1113 graph, params, torch_out, module = _create_jit_graph(model, args) 1114 params_dict = _get_named_param_dict(graph, params) 1116 try: File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/onnx/utils.py:989, in _create_jit_graph(model, args) 984 graph = _C._propagate_and_assign_input_shapes( 985 graph, flattened_args, param_count_list, False, False 986 ) 987 return graph, params, torch_out, None --> 989 graph, torch_out = _trace_and_get_graph_from_model(model, args) 990 _C._jit_pass_onnx_lint(graph) 991 state_dict = torch.jit._unique_state_dict(model) File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/onnx/utils.py:893, in _trace_and_get_graph_from_model(model, args) 891 prev_autocast_cache_enabled = torch.is_autocast_cache_enabled() 892 torch.set_autocast_cache_enabled(False) --> 893 trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph( 894 model, 895 args, 896 strict=False, 897 _force_outplace=False, 898 _return_inputs_states=True, 899 ) 900 torch.set_autocast_cache_enabled(prev_autocast_cache_enabled) 902 warn_on_static_input_change(inputs_states) File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/jit/_trace.py:1268, in _get_trace_graph(f, args, kwargs, strict, _force_outplace, return_inputs, _return_inputs_states) 1266 if not isinstance(args, tuple): 1267 args = (args,) -> 1268 outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs) 1269 return outs File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs) 1496 # If we don't have any hooks, we want to skip the rest of the logic in 1497 # this function, and just call forward. 1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): -> 1501 return forward_call(*args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = [], [] File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/jit/_trace.py:127, in ONNXTracedModule.forward(self, *args) 124 else: 125 return tuple(out_vars) --> 127 graph, out = torch._C._create_graph_by_tracing( 128 wrapper, 129 in_vars + module_state, 130 _create_interpreter_name_lookup_fn(), 131 self.strict, 132 self._force_outplace, 133 ) 135 if self._return_inputs: 136 return graph, outs[0], ret_inputs[0] File ~/anaconda3/envs/ray_py310/lib/python3.10/site-packages/torch/jit/_trace.py:121, in ONNXTracedModule.forward.<locals>.wrapper(*args) 119 if self._return_inputs_states: 120 inputs_states[0] = (inputs_states[0], trace_inputs) --> 121 out_vars, _ = _flatten(outs) 122 if len(out_vars) == 1: 123 return out_vars[0] RuntimeError: Only tuples, lists and Variables are supported as JIT inputs/outputs. Dictionaries and strings are also accepted, but their usage is not recommended. Here, received an input of unsupported type: InstanceData
I think you can use yolov8 instead of MM, the segmentation part of yolov8 is based on yoloact.
Checklist
Describe the bug
Yoloact does not convert to onnx using mmdepoy. It seems to fail on the fact its using custom data classes for inference
Reproduction
mmdetection config mmdeploy config
@openmmlab-bot
Error Trace below
Environment
Error traceback