Closed leemayi closed 1 year ago
the mmdeploy-1.2.0/configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py:
base = [ './text-recognition_dynamic.py', '../../base/backends/tensorrt.py' ]
backend_config = dict( common_config=dict(max_workspace_size=1 << 31), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[20, 3, 32, 32], opt_shape=[20, 3, 32, 64], max_shape=[20, 3, 32, 640]))) ])
but when I use tensorrt-fp16.py, I can succeed
hmmm, sounds like a bug of TensorRT. I did not get this error.
This issue is marked as stale because it has been marked as invalid or awaiting response for 7 days without any further response. It will be closed in 5 days if the stale label is not removed or if there is no further response.
This issue is closed because it has been stale for 5 days. Please open a new issue if you have similar issues or you have any new updates now.
same here
Checklist
Describe the bug
export mmocr satrn model to tensorrt backend, I got an error:
[08/28/2023-15:06:56] [TRT] [W] Tensor DataType is determined at build time for tensors not marked as input or output. [08/28/2023-15:06:56] [TRT] [W] Tensor DataType is determined at build time for tensors not marked as input or output. [08/28/2023-15:06:56] [TRT] [W] Tensor DataType is determined at build time for tensors not marked as input or output. [08/28/2023-15:06:59] [TRT] [I] Graph optimization time: 1.85392 seconds. [08/28/2023-15:06:59] [TRT] [I] Local timing cache in use. Profiling results in this builder pass will not be stored. [08/28/2023-15:07:02] [TRT] [E] 10: Could not find any implementation for node {ForeignNode[onnx::Equal_453...Transpose_115 + Reshape_119]}. [08/28/2023-15:07:03] [TRT] [E] 10: [optimizer.cpp::nvinfer1::builder::cgraph::LeafCNode::computeCosts::3869] Error Code 10: Internal Error (Could not find any implementation for node {ForeignNode[onnx::Equal_453...Transpose_115 + Reshape_119]}.) Traceback (most recent call last): File "C:\ProgramData\Miniconda3\envs\mmdeploy_1.0\lib\runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\ProgramData\Miniconda3\envs\mmdeploy_1.0\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy__main.py", line 39, in
cli.main()
File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 430, in main
onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, device)
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\apis\core\pipeline_manager.py", line 356, in _wrap
return self.call_function(funcname, *args, kwargs)
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\apis\core\pipeline_manager.py", line 326, in call_function
return self.call_function_local(func_name, *args, *kwargs)
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\apis\core\pipeline_manager.py", line 275, in call_function_local
return pipe_caller(args, kwargs)
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\apis\core\pipeline_manager.py", line 107, in call__
ret = func(*args, **kwargs)
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\backend\tensorrt\onnx2tensorrt.py", line 79, in onnx2tensorrt
from_onnx(
File "d:\mmdep\for_deploy_1.0\mmdeploy-1.2.0\mmdeploy\backend\tensorrt\utils.py", line 248, in from_onnx
assert engine is not None, 'Failed to create TensorRT engine'
AssertionError: Failed to create TensorRT engine
run() File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 284, in run_file runpy.run_path(target, run_name="main") File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy_vendored\pydevd_pydevd_bundle\pydevd_runpy.py", line 321, in run_path return _run_module_code(code, init_globals, run_name, File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy_vendored\pydevd_pydevd_bundle\pydevd_runpy.py", line 135, in _run_module_code _run_code(code, mod_globals, init_globals, File "c:\Users\jushiPC.vscode\extensions\ms-python.python-2023.14.0\pythonFiles\lib\python\debugpy_vendored\pydevd_pydevd_bundle\pydevd_runpy.py", line 124, in _run_code exec(code, run_globals) File "D:\mmdep\for_deploy_1.0\conver_ocr_rec.py", line 23, in
Reproduction
from mmdeploy.apis import torch2onnx from mmdeploy.apis.tensorrt import onnx2tensorrt from mmdeploy.backend.sdk.export_info import export2SDK import os
img = '.\1.bmp' work_dir = 'work_dir/trt/satrn_b20' save_file = 'end2end.onnx' deploy_cfg = 'mmdeploy-1.2.0/configs/mmocr/text-recognition/text-recognition_tensorrt_dynamic-32x32-32x640.py' model_cfg = 'satrn.py' model_checkpoint = 'satrn.pth' device = 'cuda'
torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)
onnx_model = os.path.join(work_dir, save_file) save_file = 'end2end.engine' model_id = 0 device = 'cuda' onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, device)
export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)
Environment
Error traceback