Closed schyun9212 closed 4 years ago
To export ONNX with custom operator, we should register custom operator explicitly
# Create custom symbolic function
from torch.onnx.symbolic_helper import parse_args
@parse_args('v', 'v', 'f', 'i')
def symbolic_foo_forward(g, input1, input2, attr1, attr2):
return g.op("Foo", input1, input2, attr1_f=attr1, attr2_i=attr2)
# Register custom symbolic function
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic('custom_ops::foo_forward', symbolic_foo_forward, 9)
class FooModel(torch.nn.Module):
def __init__(self, attr1, attr2):
super(FooModule, self).__init__()
self.attr1 = attr1
self.attr2 = attr2
def forward(self, input1, input2):
# Calling custom op
return torch.ops.custom_ops.foo_forward(input1, input2, self.attr1, self.attr2)
model = FooModel(attr1, attr2)
torch.onnx.export(model, (dummy_input1, dummy_input2), 'model.onnx')
I registered custom operators and onnx model seems to be successfully created. But a validation error is occurred when I load the model.
from torch.onnx.symbolic_helper import parse_args
@parse_args('v', 'v', 'f')
def symbolic_nms(g, dets, scores, threshold):
# Constant value must be converted to tensor
threshold = g.op("Constant", value_t=torch.tensor(threshold, dtype=torch.float))
return g.op("nms", dets, scores, threshold)
@parse_args('v', 'v', 'f', 'i', 'i', 'i')
def symbolic_roi_align_forward(g, grad, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio):
# Constant value must be converted to tensor
spatial_scale = g.op("Constant", value_t=torch.tensor(spatial_scale, dtype=torch.float))
pooled_height = g.op("Constant", value_t=torch.tensor(pooled_height, dtype=torch.int64))
pooled_width = g.op("Constant", value_t=torch.tensor(pooled_width, dtype=torch.int64))
sampling_ratio = g.op("Constant", value_t=torch.tensor(sampling_ratio, dtype=torch.int64))
return g.op("roi_align_foward", grad, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio)
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic('maskrcnn_benchmark::nms', symbolic_nms, 10)
register_custom_op_symbolic('maskrcnn_benchmark::roi_align_forward', symbolic_roi_align_forward, 10)
model = MaskRCNNModel()
model.eval()
torch.onnx.export(model, (image, ), MODEL_PATH,
do_constant_folding=True,
opset_version=ONNX_OPSET_VERSION)
Error message is
Traceback (most recent call last):
File "export_to_onnx.py", line 89, in <module>
onnx.checker.check_model(loaded_onnx_model)
File "/home/jade/.pyenv/versions/3.7.6/envs/maskrcnn-tracing-latest/lib/python3.7/site-packages/onnx/checker.py", line 91, in check_model
C.check_model(model.SerializeToString())
onnx.onnx_cpp2py_export.checker.ValidationError: No Op registered for nms with domain_version of 10
==> Context: Bad node spec: input: "656" input: "658" input: "659" output: "660" op_type: "nms"
🐛 Bug
ONNX exporting failed. Maybe custom operator nms is not recognized.
To Reproduce
Steps to reproduce the behavior:
Expected behavior
Environment
PyTorch version: 1.3.1 Is debug build: No CUDA used to build PyTorch: 10.1.243
OS: Ubuntu 18.04.3 LTS GCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0 CMake version: version 3.10.2
Python version: 3.7 Is CUDA available: Yes CUDA runtime version: 10.1.243 GPU models and configuration: GPU 0: GeForce RTX 2080 Ti Nvidia driver version: 440.44 cuDNN version: Probably one of the following: /usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudnn.so.7 /usr/local/cuda-10.1/targets/x86_64-linux/lib/libcudnn.so.7.6.5
Versions of relevant libraries: [pip3] numpy==1.18.1 [pip3] torch==1.3.1 [pip3] torchvision==0.4.2 [conda] Could not collect