nod-ai / SHARK-TestSuite

Temporary home of a test suite we are evaluating
Apache License 2.0
2 stars 29 forks source link

retinanet_resnet50_fpn_vaiq_int8 model support #199

Open AmosLewis opened 5 months ago

AmosLewis commented 5 months ago

Failed op:

Before fixing expand op, we cannot get any other fail op signature information. The following cmd is blocked by expand.

torch-mlir-opt -convert-torch-onnx-to-torch retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir > retinanet_resnet50_fpn_vaiq_int8.default.onnx.torch.mlir 
retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir:1067:13: error: failed to legalize operation 'torch.operator' that was explicitly marked illegal
    %1063 = torch.operator "onnx.Pad"(%895, %1062, %none) {torch.onnx.mode = "constant"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?],si64>, !torch.none) -> !torch.vtensor<[?,?,?],f32> 
            ^
retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir:1067:13: note: see current operation: %3130 = "torch.operator"(%2516, %3129, %0) <{name = "onnx.Pad"}> {torch.onnx.mode = "constant"} : (!torch.vtensor<[?,?,?],f32>, !torch.vtensor<[?],si64>, !torch.none) -> !torch.vtensor<[?,?,?],f32>
PhaneeshB commented 5 months ago

After this lands https://github.com/llvm/torch-mlir/pull/3214

Next set of missing OPs :

vivekkhandelwal1 commented 5 months ago

Hi @renxida, still getting the error for Onnx.If:

retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir:6276:13: error: failed to legalize operation 'torch.operator' that was explicitly marked illegal
    %6272 = torch.operator "onnx.If"(%6271) : (!torch.vtensor<[],i1>) -> !torch.vtensor<[?],si64> {
AmosLewis commented 4 months ago

After Integrate torch-mlir@ec6d7aa onnx.resize op https://github.com/iree-org/iree/pull/17358: Still onnx.if failed.

retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir:6276:13: error: failed to legalize operation 'torch.operator' that was explicitly marked illegal
    %6272 = torch.operator "onnx.If"(%6271) : (!torch.vtensor<[],i1>) -> !torch.vtensor<[?],si64> {
            ^
retinanet_resnet50_fpn_vaiq_int8.default.torch-onnx.mlir:6276:13: note: see current operation: 
%23747 = "torch.operator"(%23746) <{name = "onnx.If"}> ({
  %23822 = "torch.operator"(%23713) <{name = "onnx.ReduceMax"}> {torch.onnx.keepdims = 0 : si64} : (!torch.vtensor<[?,?],f32>) -> !torch.vtensor<[],f32>
  %23823 = "torch.operator"(%23721) <{name = "onnx.Cast"}> {torch.onnx.to = 1 : si64} : (!torch.vtensor<[?],si64>) -> !torch.vtensor<[?],f32>
  %23824 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3571> : tensor<f32>} : () -> !torch.vtensor<[],f32>
  %23825 = "torch.operator"(%23822, %23824) <{name = "onnx.Add"}> : (!torch.vtensor<[],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[],f32>
  %23826 = "torch.operator"(%23823, %23825) <{name = "onnx.Mul"}> : (!torch.vtensor<[?],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[?],f32>
  %23827 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3572> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23828 = "torch.operator"(%23826, %23827) <{name = "onnx.Unsqueeze"}> : (!torch.vtensor<[?],f32>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[?,1],f32>
  %23829 = "torch.operator"(%23713, %23828) <{name = "onnx.Add"}> : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1],f32>) -> !torch.vtensor<[?,?],f32>
  %23830 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3573> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23831 = "torch.operator"(%23829, %23830) <{name = "onnx.Unsqueeze"}> : (!torch.vtensor<[?,?],f32>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1,?,?],f32>
  %23832 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3574> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23833 = "torch.operator"(%23717, %23832) <{name = "onnx.Unsqueeze"}> : (!torch.vtensor<[?],f32>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1,?],f32>
  %23834 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3575> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23835 = "torch.operator"(%23833, %23834) <{name = "onnx.Unsqueeze"}> : (!torch.vtensor<[1,?],f32>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1,1,?],f32>
  %23836 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3576> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23837 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3577> : tensor<1xf32>} : () -> !torch.vtensor<[1],f32>
  %23838 = "torch.operator"(%23831, %23835, %23836, %23837) <{name = "onnx.NonMaxSuppression"}> : (!torch.vtensor<[1,?,?],f32>, !torch.vtensor<[1,1,?],f32>, !torch.vtensor<[1],si64>, !torch.vtensor<[1],f32>) -> !torch.vtensor<[?,3],si64>
  %23839 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3578> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23840 = "torch.operator"(%23838, %23839) <{name = "onnx.Gather"}> {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[?,3],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[?,1],si64>
  %23841 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3579> : tensor<1xsi64>} : () -> !torch.vtensor<[1],si64>
  %23842 = "torch.operator"(%23840, %23841) <{name = "onnx.Squeeze"}> : (!torch.vtensor<[?,1],si64>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[?],si64>
  "torch.operator_terminator"(%23842) : (!torch.vtensor<[?],si64>) -> ()
}, {
  %23821 = "torch.operator"() <{name = "onnx.Constant"}> {torch.onnx.value = dense_resource<__3580> : tensor<0xsi64>} : () -> !torch.vtensor<[0],si64>
  "torch.operator_terminator"(%23821) : (!torch.vtensor<[0],si64>) -> ()
}) : (!torch.vtensor<[],i1>) -> !torch.vtensor<[?],si64>