Open wavelet2008 opened 9 months ago
python app.py --config ../YOLO-World/configs/pretrain/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py --checkpoint ../YOLO-World/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth torch.onnx.export( deploy_model, fake_input, f, input_names=["images"], output_names=output_names, opset_version=12, )
score_threshold = torch.tensor([score_threshold]) Traceback (most recent call last): File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/routes.py", line 422, in run_predict output = await app.get_blocks().process_api( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1323, in process_api result = await self.call_function( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1051, in call_function prediction = await anyio.to_thread.run_sync( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, args) File "/home/lan/det/cog-yolo-world/tools/demo.py", line 123, in export_model torch.onnx.export( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 316, in export return utils.export(model, args, f, export_params, verbose, training, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 107, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 724, in _export _model_to_graph(model, args, verbose, input_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph graph = _optimize_graph(graph, operator_export_type, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph graph = torch._C._jit_pass_onnx(graph, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 373, in _run_symbolic_function return utils._run_symbolic_function(args, **kwargs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 1028, in _run_symbolic_function symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 982, in _find_symbolic_in_registry return sym_registry.get_registered_op(op_name, domain, opset_version) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_registry.py", line 125, in get_registered_op raise RuntimeError(msg) RuntimeError: Exporting the operator triu to ONNX opset version 12 is not supported. Support for this operator was added in version 14, try exporting with this version.
torch.onnx.export( deploy_model, fake_input, f, input_names=["images"], output_names=output_names, opset_version=14, )
score_threshold = torch.tensor([score_threshold]) /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:325: UserWarning: Type cannot be inferred, which might cause exported graph to produce incorrect results. warnings.warn("Type cannot be inferred, which might cause exported graph to produce incorrect results.") /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:716: UserWarning: allowzero=0 by default. In order to honor zero value in shape use allowzero=1 warnings.warn("allowzero=0 by default. In order to honor zero value in shape use allowzero=1") /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_opset9.py:2815: UserWarning: Exporting aten::index operator of advanced indexing in opset 14 is achieved by combination of multiple ONNX operators, including Reshape, Transpose, Concat, and Gather. If indices include negative values, the exported graph will produce incorrect results. warnings.warn("Exporting aten::index operator of advanced indexing in opset " + WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:258: UserWarning: ONNX export failed on adaptive_max_pool2d because input size not accessible not supported warnings.warn("ONNX export failed on " + op + " because " + msg + " not supported") Traceback (most recent call last): File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/routes.py", line 422, in run_predict output = await app.get_blocks().process_api( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1323, in process_api result = await self.call_function( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1051, in call_function prediction = await anyio.to_thread.run_sync( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, args) File "/home/lan/det/cog-yolo-world/tools/demo.py", line 123, in export_model torch.onnx.export( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 316, in export return utils.export(model, args, f, export_params, verbose, training, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 107, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 724, in _export _model_to_graph(model, args, verbose, input_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph graph = _optimize_graph(graph, operator_export_type, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph graph = torch._C._jit_pass_onnx(graph, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 373, in _run_symbolic_function return utils._run_symbolic_function(args, *kwargs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 1032, in _run_symbolic_function return symbolic_fn(g, inputs, **attrs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_opset9.py", line 990, in symbolic_fn return sym_help._onnx_unsupported(name + ", since output size is not factor of input size") File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py", line 262, in _onnx_unsupported raise RuntimeError("Unsupported: ONNX export of operator {}. " RuntimeError: Unsupported: ONNX export of operator adaptive_max_pool2d, since output size is not factor of input size. Please feel free to request support or submit a pull request on PyTorch GitHub score_threshold = torch.tensor([score_threshold]) Do you want to modify the fixed value or is it better to come out like this? Thanks
me too!
python app.py --config ../YOLO-World/configs/pretrain/yolo_world_l_dual_vlpan_l2norm_2e-3_100e_4x8gpus_obj365v1_goldg_train_lvis_val.py --checkpoint ../YOLO-World/yolo_world_l_clip_base_dual_vlpan_2e-3adamw_32xb16_100e_o365_goldg_train_pretrained-0e566235.pth torch.onnx.export( deploy_model, fake_input, f, input_names=["images"], output_names=output_names, opset_version=12, )
score_threshold = torch.tensor([score_threshold]) Traceback (most recent call last): File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/routes.py", line 422, in run_predict output = await app.get_blocks().process_api( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1323, in process_api result = await self.call_function( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1051, in call_function prediction = await anyio.to_thread.run_sync( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, args) File "/home/lan/det/cog-yolo-world/tools/demo.py", line 123, in export_model torch.onnx.export( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 316, in export return utils.export(model, args, f, export_params, verbose, training, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 107, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 724, in _export _model_to_graph(model, args, verbose, input_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph graph = _optimize_graph(graph, operator_export_type, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph graph = torch._C._jit_pass_onnx(graph, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 373, in _run_symbolic_function return utils._run_symbolic_function(args, **kwargs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 1028, in _run_symbolic_function symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 982, in _find_symbolic_in_registry return sym_registry.get_registered_op(op_name, domain, opset_version) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_registry.py", line 125, in get_registered_op raise RuntimeError(msg) RuntimeError: Exporting the operator triu to ONNX opset version 12 is not supported. Support for this operator was added in version 14, try exporting with this version.
torch.onnx.export( deploy_model, fake_input, f, input_names=["images"], output_names=output_names, opset_version=14, )
score_threshold = torch.tensor([score_threshold]) /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:325: UserWarning: Type cannot be inferred, which might cause exported graph to produce incorrect results. warnings.warn("Type cannot be inferred, which might cause exported graph to produce incorrect results.") /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:716: UserWarning: allowzero=0 by default. In order to honor zero value in shape use allowzero=1 warnings.warn("allowzero=0 by default. In order to honor zero value in shape use allowzero=1") /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_opset9.py:2815: UserWarning: Exporting aten::index operator of advanced indexing in opset 14 is achieved by combination of multiple ONNX operators, including Reshape, Transpose, Concat, and Gather. If indices include negative values, the exported graph will produce incorrect results. warnings.warn("Exporting aten::index operator of advanced indexing in opset " + WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. /home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py:258: UserWarning: ONNX export failed on adaptive_max_pool2d because input size not accessible not supported warnings.warn("ONNX export failed on " + op + " because " + msg + " not supported") Traceback (most recent call last): File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/routes.py", line 422, in run_predict output = await app.get_blocks().process_api( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1323, in process_api result = await self.call_function( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/gradio/blocks.py", line 1051, in call_function prediction = await anyio.to_thread.run_sync( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/to_thread.py", line 31, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread return await future File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 867, in run result = context.run(func, args) File "/home/lan/det/cog-yolo-world/tools/demo.py", line 123, in export_model torch.onnx.export( File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 316, in export return utils.export(model, args, f, export_params, verbose, training, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 107, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 724, in _export _model_to_graph(model, args, verbose, input_names, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph graph = _optimize_graph(graph, operator_export_type, File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph graph = torch._C._jit_pass_onnx(graph, operator_export_type) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/init.py", line 373, in _run_symbolic_function return utils._run_symbolic_function(args, *kwargs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/utils.py", line 1032, in _run_symbolic_function return symbolic_fn(g, inputs, **attrs) File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_opset9.py", line 990, in symbolic_fn return sym_help._onnx_unsupported(name + ", since output size is not factor of input size") File "/home/lan/anaconda3/envs/face19/lib/python3.9/site-packages/torch/onnx/symbolic_helper.py", line 262, in _onnx_unsupported raise RuntimeError("Unsupported: ONNX export of operator {}. " RuntimeError: Unsupported: ONNX export of operator adaptive_max_pool2d, since output size is not factor of input size. Please feel free to request support or submit a pull request on PyTorch GitHub score_threshold = torch.tensor([score_threshold]) Do you want to modify the fixed value or is it better to come out like this? Thanks