Closed Kodemannen closed 1 year ago
Please have a look at tools/deploy/export_model.py
. You are using the wrong input and model for export.
@ichitaka sorry I borrow this thread for another issue.
Please have a look at tools/deploy/export_model.py. You are using the wrong input and model for export.
yes, I use that script, successfully exported, and run the onnx output model. But it seems that the onnx model does not support batching right (it only runs with a single image)?
I want to support batching then I'm going to modify output format without instance
and directly export from the model as below. Is there any simple way to support batching?
torch.onnx.export(model,
inputs,
"training_5_checkpoint_8414.onnx",
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}}
)
@ichitaka Thank you, this seems to do the trick =)
@ichitaka Thank you, this seems to do the trick =)
Hi, do you have your code yet I'm trying to export onnx.
@bouachalazhar This worked to export a model that could be successfully reloaded and run inference, but I think something is still lacking.
Haven't looked at it in a while, but if I remember correctly, the reloaded (.onnx) model only outputted the bounding boxes and lacked the mask/segmentation-output.
Let me know if it works for you or not! :)
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from typing import Dict, List, Tuple
import torch
from torch import Tensor, nn
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, detection_utils
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
from detectron2.export import (
STABLE_ONNX_OPSET_VERSION,
TracingAdapter,
dump_torchscript_IR,
scripting_with_instances,
)
from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.structures import Boxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
def add_new_fields_to_config(cfg):
"""
Denne adda jeg
"""
cfg.INPUT.BRIGHTNESS_DELTA = 0.0
cfg.INPUT.CONTRAST_DELTA = 0.0
cfg.INPUT.SATURATION_DELTA = 0.0
cfg.INPUT.HUE_DELTA = 0.0
cfg.INPUT.HE_NORM = False
cfg.DATASETS.NUM_TRAIN_IMAGES = 1000 # Just some number to be overwritten by config
cfg.DATASETS.TEST_JSON = ("",)
cfg.SOLVER.NUM_EPOCHS = 1
cfg.SOLVER.STEP_SIZE_EPOCHS = 10 # Reduce learning rate by gamma every steps size epochs
return cfg
# def setup_cfg(args):
# cfg = get_cfg()
# # cuda context is initialized before creating dataloader, so we don't fork anymore
# cfg.DATALOADER.NUM_WORKERS = 0
# add_pointrend_config(cfg)
# cfg.merge_from_file(args.config_file)
# cfg.merge_from_list(args.opts)
# cfg.freeze()
# return cfg
def export_caffe2_tracing(cfg, torch_model, inputs):
from detectron2.export import Caffe2Tracer
tracer = Caffe2Tracer(cfg, torch_model, inputs)
if args.format == "caffe2":
caffe2_model = tracer.export_caffe2()
caffe2_model.save_protobuf(args.output)
# draw the caffe2 graph
caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
return caffe2_model
elif args.format == "onnx":
import onnx
onnx_model = tracer.export_onnx()
onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
elif args.format == "torchscript":
ts_model = tracer.export_torchscript()
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# experimental. API not yet final
def export_scripting(torch_model):
assert TORCH_VERSION >= (1, 8)
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
assert args.format == "torchscript", "Scripting only supports torchscript format."
class ScriptableAdapterBase(nn.Module):
# Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
# by not retuning instances but dicts. Otherwise the exported model is not deployable
def __init__(self):
super().__init__()
self.model = torch_model
self.eval()
if isinstance(torch_model, GeneralizedRCNN):
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model.inference(inputs, do_postprocess=False)
return [i.get_fields() for i in instances]
else:
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model(inputs)
return [i.get_fields() for i in instances]
ts_model = scripting_with_instances(ScriptableAdapter(), fields)
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# TODO inference in Python now missing postprocessing glue code
return None
# experimental. API not yet final
def export_tracing(torch_model, inputs):
assert TORCH_VERSION >= (1, 8)
image = inputs[0]["image"]
inputs = [{"image": image}] # remove other unused keys
if isinstance(torch_model, GeneralizedRCNN):
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
else:
inference = None # assume that we just call the model directly
traceable_model = TracingAdapter(torch_model, inputs, inference)
if args.format == "torchscript":
ts_model = torch.jit.trace(traceable_model, (image,))
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
elif args.format == "onnx":
with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f:
torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION)
logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
logger.info("Outputs schema: " + str(traceable_model.outputs_schema))
if args.format != "torchscript":
return None
if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
return None
def eval_wrapper(inputs):
"""
The exported model does not contain the final resize step, which is typically
unused in deployment but needed for evaluation. We add it manually here.
"""
input = inputs[0]
instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"]
postprocessed = detector_postprocess(instances, input["height"], input["width"])
return [{"instances": postprocessed}]
return eval_wrapper
def get_sample_inputs(args):
if args.sample_image is None:
# get a first batch from dataset
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
first_batch = next(iter(data_loader))
return first_batch
else:
# get a sample data
original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT)
# Do same preprocessing as DefaultPredictor
aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
height, width = original_image.shape[:2]
image = aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
# Sample ready
sample_inputs = [inputs]
return sample_inputs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export a model for deployment.")
parser.add_argument(
"--format",
choices=["caffe2", "onnx", "torchscript"],
help="output format",
default="torchscript",
)
parser.add_argument(
"--export-method",
choices=["caffe2_tracing", "tracing", "scripting"],
help="Method to export models",
default="tracing",
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--sample-image", default=None, type=str, help="sample image for input")
parser.add_argument("--run-eval", action="store_true")
parser.add_argument("--output", help="output directory for the converted model")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
logger = setup_logger()
logger.info("Command line arguments: " + str(args))
PathManager.mkdirs(args.output)
# Disable re-specialization on new shapes. Otherwise --run-eval will be slow
torch._C._jit_set_bailout_depth(1)
# cfg = setup_cfg(args) # sånn det gjøres originalt
device = torch.device("cpu")
#------------------------------------------------------------------------
# Load config from a .yaml describing the model:
#------------------------------------------------------------------------
cfg = get_cfg()
cfg = add_new_fields_to_config(cfg)
yaml_config_path = os.path.expanduser("~/data/detectron2-data/model/config.yaml")
cfg.merge_from_file(yaml_config_path)
#------------------------------------------------------------------------
# Create model instance with given config:
#------------------------------------------------------------------------
# This sets up the model with random parameters
torch_model = build_model(cfg).to(device)
#------------------------------------------------------------------------
# Load weights from a .pth file:
#------------------------------------------------------------------------
weights_path = os.path.expanduser("~/data/detectron2-data/model/training_5_checkpoint_8414.pth")
DetectionCheckpointer(torch_model).load(weights_path)
#------------------------------------------------------------------------
# NB! set the model to inference mode before exporting:
#------------------------------------------------------------------------
torch_model.eval()
# create a torch model
# torch_model = build_model(cfg)
# DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
# torch_model.eval()
# convert and save model
if args.export_method == "caffe2_tracing":
sample_inputs = get_sample_inputs(args)
exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs)
elif args.export_method == "scripting":
exported_model = export_scripting(torch_model)
elif args.export_method == "tracing":
sample_inputs = get_sample_inputs(args)
exported_model = export_tracing(torch_model, sample_inputs)
# run evaluation with the converted model
if args.run_eval:
assert exported_model is not None, (
"Python inference is not yet implemented for "
f"export_method={args.export_method}, format={args.format}."
)
logger.info("Running evaluation ... this takes a long time if you export to CPU.")
dataset = cfg.DATASETS.TEST[0]
data_loader = build_detection_test_loader(cfg, dataset)
# NOTE: hard-coded evaluator. change to the evaluator for your dataset
evaluator = COCOEvaluator(dataset, output_dir=args.output)
metrics = inference_on_dataset(exported_model, data_loader, evaluator)
print_csv_format(metrics)
logger.info("Success.")
#
@bouachalazhar This worked to export a model that could be successfully reloaded and run inference, but I think something is still lacking.
Haven't looked at it in a while, but if I remember correctly, the reloaded (.onnx) model only outputted the bounding boxes and lacked the mask/segmentation-output.
Let me know if it works for you or not! :)
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. import argparse import os from typing import Dict, List, Tuple import torch from torch import Tensor, nn import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import build_detection_test_loader, detection_utils from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format from detectron2.export import ( STABLE_ONNX_OPSET_VERSION, TracingAdapter, dump_torchscript_IR, scripting_with_instances, ) from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model from detectron2.modeling.postprocessing import detector_postprocess from detectron2.projects.point_rend import add_pointrend_config from detectron2.structures import Boxes from detectron2.utils.env import TORCH_VERSION from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger def add_new_fields_to_config(cfg): """ Denne adda jeg """ cfg.INPUT.BRIGHTNESS_DELTA = 0.0 cfg.INPUT.CONTRAST_DELTA = 0.0 cfg.INPUT.SATURATION_DELTA = 0.0 cfg.INPUT.HUE_DELTA = 0.0 cfg.INPUT.HE_NORM = False cfg.DATASETS.NUM_TRAIN_IMAGES = 1000 # Just some number to be overwritten by config cfg.DATASETS.TEST_JSON = ("",) cfg.SOLVER.NUM_EPOCHS = 1 cfg.SOLVER.STEP_SIZE_EPOCHS = 10 # Reduce learning rate by gamma every steps size epochs return cfg # def setup_cfg(args): # cfg = get_cfg() # # cuda context is initialized before creating dataloader, so we don't fork anymore # cfg.DATALOADER.NUM_WORKERS = 0 # add_pointrend_config(cfg) # cfg.merge_from_file(args.config_file) # cfg.merge_from_list(args.opts) # cfg.freeze() # return cfg def export_caffe2_tracing(cfg, torch_model, inputs): from detectron2.export import Caffe2Tracer tracer = Caffe2Tracer(cfg, torch_model, inputs) if args.format == "caffe2": caffe2_model = tracer.export_caffe2() caffe2_model.save_protobuf(args.output) # draw the caffe2 graph caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs) return caffe2_model elif args.format == "onnx": import onnx onnx_model = tracer.export_onnx() onnx.save(onnx_model, os.path.join(args.output, "model.onnx")) elif args.format == "torchscript": ts_model = tracer.export_torchscript() with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: torch.jit.save(ts_model, f) dump_torchscript_IR(ts_model, args.output) # experimental. API not yet final def export_scripting(torch_model): assert TORCH_VERSION >= (1, 8) fields = { "proposal_boxes": Boxes, "objectness_logits": Tensor, "pred_boxes": Boxes, "scores": Tensor, "pred_classes": Tensor, "pred_masks": Tensor, "pred_keypoints": torch.Tensor, "pred_keypoint_heatmaps": torch.Tensor, } assert args.format == "torchscript", "Scripting only supports torchscript format." class ScriptableAdapterBase(nn.Module): # Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944 # by not retuning instances but dicts. Otherwise the exported model is not deployable def __init__(self): super().__init__() self.model = torch_model self.eval() if isinstance(torch_model, GeneralizedRCNN): class ScriptableAdapter(ScriptableAdapterBase): def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: instances = self.model.inference(inputs, do_postprocess=False) return [i.get_fields() for i in instances] else: class ScriptableAdapter(ScriptableAdapterBase): def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: instances = self.model(inputs) return [i.get_fields() for i in instances] ts_model = scripting_with_instances(ScriptableAdapter(), fields) with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: torch.jit.save(ts_model, f) dump_torchscript_IR(ts_model, args.output) # TODO inference in Python now missing postprocessing glue code return None # experimental. API not yet final def export_tracing(torch_model, inputs): assert TORCH_VERSION >= (1, 8) image = inputs[0]["image"] inputs = [{"image": image}] # remove other unused keys if isinstance(torch_model, GeneralizedRCNN): def inference(model, inputs): # use do_postprocess=False so it returns ROI mask inst = model.inference(inputs, do_postprocess=False)[0] return [{"instances": inst}] else: inference = None # assume that we just call the model directly traceable_model = TracingAdapter(torch_model, inputs, inference) if args.format == "torchscript": ts_model = torch.jit.trace(traceable_model, (image,)) with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f: torch.jit.save(ts_model, f) dump_torchscript_IR(ts_model, args.output) elif args.format == "onnx": with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f: torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION) logger.info("Inputs schema: " + str(traceable_model.inputs_schema)) logger.info("Outputs schema: " + str(traceable_model.outputs_schema)) if args.format != "torchscript": return None if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)): return None def eval_wrapper(inputs): """ The exported model does not contain the final resize step, which is typically unused in deployment but needed for evaluation. We add it manually here. """ input = inputs[0] instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"] postprocessed = detector_postprocess(instances, input["height"], input["width"]) return [{"instances": postprocessed}] return eval_wrapper def get_sample_inputs(args): if args.sample_image is None: # get a first batch from dataset data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) first_batch = next(iter(data_loader)) return first_batch else: # get a sample data original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT) # Do same preprocessing as DefaultPredictor aug = T.ResizeShortestEdge( [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST ) height, width = original_image.shape[:2] image = aug.get_transform(original_image).apply_image(original_image) image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) inputs = {"image": image, "height": height, "width": width} # Sample ready sample_inputs = [inputs] return sample_inputs if __name__ == "__main__": parser = argparse.ArgumentParser(description="Export a model for deployment.") parser.add_argument( "--format", choices=["caffe2", "onnx", "torchscript"], help="output format", default="torchscript", ) parser.add_argument( "--export-method", choices=["caffe2_tracing", "tracing", "scripting"], help="Method to export models", default="tracing", ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument("--sample-image", default=None, type=str, help="sample image for input") parser.add_argument("--run-eval", action="store_true") parser.add_argument("--output", help="output directory for the converted model") parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() logger = setup_logger() logger.info("Command line arguments: " + str(args)) PathManager.mkdirs(args.output) # Disable re-specialization on new shapes. Otherwise --run-eval will be slow torch._C._jit_set_bailout_depth(1) # cfg = setup_cfg(args) # sånn det gjøres originalt device = torch.device("cpu") #------------------------------------------------------------------------ # Load config from a .yaml describing the model: #------------------------------------------------------------------------ cfg = get_cfg() cfg = add_new_fields_to_config(cfg) yaml_config_path = os.path.expanduser("~/data/detectron2-data/model/config.yaml") cfg.merge_from_file(yaml_config_path) #------------------------------------------------------------------------ # Create model instance with given config: #------------------------------------------------------------------------ # This sets up the model with random parameters torch_model = build_model(cfg).to(device) #------------------------------------------------------------------------ # Load weights from a .pth file: #------------------------------------------------------------------------ weights_path = os.path.expanduser("~/data/detectron2-data/model/training_5_checkpoint_8414.pth") DetectionCheckpointer(torch_model).load(weights_path) #------------------------------------------------------------------------ # NB! set the model to inference mode before exporting: #------------------------------------------------------------------------ torch_model.eval() # create a torch model # torch_model = build_model(cfg) # DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) # torch_model.eval() # convert and save model if args.export_method == "caffe2_tracing": sample_inputs = get_sample_inputs(args) exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs) elif args.export_method == "scripting": exported_model = export_scripting(torch_model) elif args.export_method == "tracing": sample_inputs = get_sample_inputs(args) exported_model = export_tracing(torch_model, sample_inputs) # run evaluation with the converted model if args.run_eval: assert exported_model is not None, ( "Python inference is not yet implemented for " f"export_method={args.export_method}, format={args.format}." ) logger.info("Running evaluation ... this takes a long time if you export to CPU.") dataset = cfg.DATASETS.TEST[0] data_loader = build_detection_test_loader(cfg, dataset) # NOTE: hard-coded evaluator. change to the evaluator for your dataset evaluator = COCOEvaluator(dataset, output_dir=args.output) metrics = inference_on_dataset(exported_model, data_loader, evaluator) print_csv_format(metrics) logger.info("Success.")
Doesn't work :
[03/03 23:30:26 d2.checkpoint.detection_checkpoint]: [DetectionCheckpointer] Loading from https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x/139653917/model_final_2d9806.pkl ...
usage: ipykernel_launcher.py [-h] [--format {caffe2,onnx,torchscript}]
[--export-method {caffe2_tracing,tracing,scripting}]
[--config-file FILE]
[--sample-image SAMPLE_IMAGE] [--run-eval]
[--output OUTPUT]
...
ipykernel_launcher.py: error: argument --format: invalid choice: '"[c:\\Users\\Lazhar](file:///C://Users//Lazhar) Bouacha\\AppData\\Roaming\\jupyter\\runtime\\kernel-v2-206204MCo1hPy9nOY.json"' (choose from 'caffe2', 'onnx', 'torchscript')
Output exceeds the [size limit](command:workbench.action.openSettings?%5B%22notebook.output.textLineLimit%22%5D). Open the full output data [in a text editor](command:workbench.action.openLargeOutput?6b3418d6-c8d7-45a5-af31-ca9b2468dac9)
---------------------------------------------------------------------------
ArgumentError Traceback (most recent call last)
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:1859, in ArgumentParser.parse_known_args(self, args, namespace)
1858 try:
-> 1859 namespace, args = self._parse_known_args(args, namespace)
1860 except ArgumentError:
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2072, in ArgumentParser._parse_known_args(self, arg_strings, namespace)
2071 # consume the next optional and any arguments for it
-> 2072 start_index = consume_optional(start_index)
2074 # consume any positionals following the last Optional
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2012, in ArgumentParser._parse_known_args..consume_optional(start_index)
2011 for action, args, option_string in action_tuples:
-> 2012 take_action(action, args, option_string)
2013 return stop
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:1920, in ArgumentParser._parse_known_args..take_action(action, argument_strings, option_string)
1919 seen_actions.add(action)
-> 1920 argument_values = self._get_values(action, argument_strings)
1922 # error if this argument is not allowed with other previously
1923 # seen arguments, assuming that actions that use the default
1924 # value don't really count as "present"
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2456, in ArgumentParser._get_values(self, action, arg_strings)
2455 value = self._get_value(action, arg_string)
-> 2456 self._check_value(action, value)
2458 # REMAINDER arguments convert all values, checking none
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2512, in ArgumentParser._check_value(self, action, value)
2511 msg = _('invalid choice: %(value)r (choose from %(choices)s)')
-> 2512 raise ArgumentError(action, msg % args)
ArgumentError: argument --format: invalid choice: '"[c:\\Users\\Lazhar](file:///C://Users//Lazhar) Bouacha\\AppData\\Roaming\\jupyter\\runtime\\kernel-v2-206204MCo1hPy9nOY.json"' (choose from 'caffe2', 'onnx', 'torchscript')
During handling of the above exception, another exception occurred:
SystemExit Traceback (most recent call last)
[... skipping hidden 1 frame]
Cell In[16], line 224
218 parser.add_argument(
219 "opts",
220 help="Modify config options using the command-line",
221 default=None,
222 nargs=argparse.REMAINDER,
223 )
--> 224 args = parser.parse_args()
225 logger = setup_logger()
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:1826, in ArgumentParser.parse_args(self, args, namespace)
1825 def parse_args(self, args=None, namespace=None):
-> 1826 args, argv = self.parse_known_args(args, namespace)
1827 if argv:
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:1862, in ArgumentParser.parse_known_args(self, args, namespace)
1861 err = _sys.exc_info()[1]
-> 1862 self.error(str(err))
1863 else:
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2587, in ArgumentParser.error(self, message)
2586 args = {'prog': self.prog, 'message': message}
-> 2587 self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
File [C:\Program](file:///C:/Program) Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2800.0_x64__qbz5n2kfra8p0\lib\argparse.py:2574, in ArgumentParser.exit(self, status, message)
2573 self._print_message(message, _sys.stderr)
-> 2574 _sys.exit(status)
SystemExit: 2
During handling of the above exception, another exception occurred:
AssertionError Traceback (most recent call last)
[... skipping hidden 1 frame]
File [~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\IPython\core\interactiveshell.py:2047](https://file+.vscode-resource.vscode-cdn.net/c%3A/Users/Lazhar%20Bouacha/OneDrive%20-%20CHAMP%27S/PhD/~/AppData/Local/Packages/PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0/LocalCache/local-packages/Python310/site-packages/IPython/core/interactiveshell.py:2047), in InteractiveShell.showtraceback(self, exc_tuple, filename, tb_offset, exception_only, running_compiled_code)
2044 if exception_only:
2045 stb = ['An exception has occurred, use %tb to see '
2046 'the full traceback.\n']
-> 2047 stb.extend(self.InteractiveTB.get_exception_only(etype,
2048 value))
2049 else:
2050 try:
2051 # Exception classes can customise their traceback - we
2052 # use this in IPython.parallel for exceptions occurring
2053 # in the engines. This should return a list of strings.
File [~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\IPython\core\ultratb.py:585](https://file+.vscode-resource.vscode-cdn.net/c%3A/Users/Lazhar%20Bouacha/OneDrive%20-%20CHAMP%27S/PhD/~/AppData/Local/Packages/PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0/LocalCache/local-packages/Python310/site-packages/IPython/core/ultratb.py:585), in ListTB.get_exception_only(self, etype, value)
577 def get_exception_only(self, etype, value):
578 """Only print the exception type and message, without a traceback.
579
580 Parameters
(...)
583 value : exception value
584 """
...
174 if isinstance(error, str):
175 error = AssertionError(error)
--> 176 raise error
AssertionError:
I have a trained Mask R-CNN model that I am trying to export to .onnx format, but I am having some trouble with it.
One loose guess I have is that the error stems from the model expecting dictionaries as input, which
torch.onnx.export()
may not like, but I don't really know.Instructions To Reproduce the 🐛 Bug:
The code is as follows:
Expected behavior:
If there are no obvious error in "full logs" provided above, please tell us the expected behavior.
Environment:
Provide your environment information using the following command: