Open azuryl opened 2 years ago
tensorrt 8.2.2.1 CUDA 10.2 UBUNTU 18.04 convert Efficientnet in Adabins to TRT https://github.com/shariqfarooq123/AdaBins/blob/2fb686a66a304f0a719bc53d77412460af97fd61/models/layers.py (trt8.2py39) delight-gpu@LIGHT-24B:~/project/torch2trt$ python 2trt.py init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomFCPluginDynamic init.py BB: CustomGeluPluginDynamic init.py BB: GroupNormalizationPlugin init.py BB: RnRes2Br1Br2c_TRT init.py BB: RnRes2Br1Br2c_TRT init.py BB: RnRes2Br2bBr2c_TRT init.py BB: RnRes2Br2bBr2c_TRT init.py BB: SingleStepLSTMPlugin init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: RnRes2FullFusion_TRT init.py BB: InstanceNormalization_TRT init.py load_plugins PLUGINS_LOADED: True Using cache found in /home/delight-gpu/.cache/torch/hub/rwightman_gen-efficientnet-pytorch_master Loading base model ()...Done. Removing last two layers (global_pool & classifier). Building Encoder-Decoder model..########nn.Conv2d Done. call torch2trt!!!!!!!!!!!!!!!!!!1 @@@@self.decoder UnetAdaptiveBins %%%%########forward Encoder End YYYYYYYY########forward Encoder &&&&&&########forward decoderBN ########F.interpolate ########F.interpolate ########F.interpolate ########F.interpolate ^^^^^self.adaptive_bins_layer>>> !!!!!!!!!!!!!!!!!!mViT.py forward >>>>>>PatchTransformerEncoder !!!!!!!!!!!!!layer.py self.embedding_convPxP(x).flatten PatchTransformerEncoder !!!!!!!!!!!!!layer.py embeddings1_shape 300 <SliceBackward0 object at 0x7f64a940e640> !!!!!!!!!!!!!layer.py positional_encodings1: tensor([[7.3730e-01, 1.4520e-01, 7.8958e-01, ..., 9.1535e-01, 1.6060e-01, 8.2893e-01], [5.6483e-01, 6.9625e-01, 6.8629e-01, ..., 4.5704e-01, 3.7665e-01, 2.8165e-01], [9.0804e-02, 9.0169e-01, 6.0779e-01, ..., 4.3940e-03, 1.0482e-01, 2.9530e-01], ..., [1.1430e-01, 4.0873e-01, 5.7549e-01, ..., 8.9175e-01, 9.8352e-01, 7.2419e-01], [3.0608e-01, 3.3149e-02, 7.1508e-04, ..., 4.2568e-01, 1.6471e-02, 8.5658e-01], [8.2255e-01, 7.7846e-02, 1.3803e-01, ..., 3.7904e-01, 6.2948e-01, 3.6152e-01]], device='cuda:0', grad_fn=) !!!!!!!!!!!!!layer.py embeddings PatchTransformerEncoder !!!!!!!!!!!!!!!!!!patch_transformer mViT.py forward !!!!!!!!!!!!!!!!!!dot_product_layer mViT.py forward !!!!!!!!!!!!!!!!!!norm == linear mViT.py forward !!!!!!!!!!!!!!!!!!torch.relu(y) mViT.py forward !!!!!!!!!!!!!!!!!! keepdim=True mViT.py forward @@@@self.decoder UnetAdaptiveBins %%%%########forward Encoder Conv2d.py convert_conv2d!!!!!!! Conv2d.py convert_conv2d!!!!!!! Conv2d.py convert_conv2d!!!!!!! Conv2d.py convert_conv2d!!!!!!! Conv2d.py convert_conv2d!!!!!!! End YYYYYYYY########forward Encoder &&&&&&########forward decoderBN ########F.interpolate convert_interpolate_trt7############# interprolate.py align_corners: True trt_version() > '8.0 ########F.interpolate convert_interpolate_trt7############# interprolate.py align_corners: True trt_version() > '8.0 ########F.interpolate convert_interpolate_trt7############# interprolate.py align_corners: True trt_version() > '8.0 ########F.interpolate convert_interpolate_trt7############# interprolate.py align_corners: True trt_version() > '8.0 ^^^^^self.adaptive_bins_layer>>> !!!!!!!!!!!!!!!!!!mViT.py forward >>>>>>PatchTransformerEncoder !!!!!!!!!!!!!layer.py self.embedding_convPxP(x).flatten PatchTransformerEncoder !!!!!!!!!!!!!layer.py embeddings1_shape 300 input: Parameter containing: tensor([[0.7373, 0.1452, 0.7896, ..., 0.9154, 0.1606, 0.8289], [0.5648, 0.6962, 0.6863, ..., 0.4570, 0.3767, 0.2816], [0.0908, 0.9017, 0.6078, ..., 0.0044, 0.1048, 0.2953], ..., [0.8716, 0.6719, 0.2597, ..., 0.3142, 0.6792, 0.6670], [0.2257, 0.1760, 0.6620, ..., 0.4528, 0.1546, 0.7144], [0.3901, 0.1327, 0.5800, ..., 0.5972, 0.3272, 0.5098]], device='cuda:0', requires_grad=True) Traceback (most recent call last): File "/home/delight-gpu/project/torch2trt/2trt.py", line 64, in model_trt = torch2trt(model, [x],max_workspace_size=2<<10) File "/home/delight-gpu/project/torch2trt/torch2trt/torch2trt.py", line 553, in torch2trt outputs = module(inputs) File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl return forward_call(input, kwargs) File "/home/delight-gpu/project/torch2trt/models/unet_adaptive_bins.py", line 103, in forward bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(unet_out) File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl return forward_call(*input, *kwargs) File "/home/delight-gpu/project/torch2trt/models/miniViT.py", line 27, in forward tgt = self.patch_transformer(x.clone()) # .shape = S, N, E File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl return forward_call(input, kwargs) File "/home/delight-gpu/project/torch2trt/models/layers.py", line 25, in forward print(self.positional_encodings[:embeddings1_shape, :].grad_fn) File "/home/delight-gpu/project/torch2trt/torch2trt/torch2trt.py", line 300, in wrapper converter"converter" File "/home/delight-gpu/project/torch2trt/torch2trt/converters/getitem.py", line 31, in convert_tensor_getitem input_trt = input._trt AttributeError: 'Parameter' object has no attribute '_trt'
same issue
tensorrt 8.2.2.1 CUDA 10.2 UBUNTU 18.04 convert Efficientnet in Adabins to TRT https://github.com/shariqfarooq123/AdaBins/blob/2fb686a66a304f0a719bc53d77412460af97fd61/models/layers.py (trt8.2py39) delight-gpu@LIGHT-24B:~/project/torch2trt$ python 2trt.py init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomQKVToContextPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomEmbLayerNormPluginDynamic init.py BB: CustomFCPluginDynamic init.py BB: CustomGeluPluginDynamic init.py BB: GroupNormalizationPlugin init.py BB: RnRes2Br1Br2c_TRT init.py BB: RnRes2Br1Br2c_TRT init.py BB: RnRes2Br2bBr2c_TRT init.py BB: RnRes2Br2bBr2c_TRT init.py BB: SingleStepLSTMPlugin init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: CustomSkipLayerNormPluginDynamic init.py BB: RnRes2FullFusion_TRT init.py BB: InstanceNormalization_TRT init.py load_plugins PLUGINS_LOADED: True Using cache found in /home/delight-gpu/.cache/torch/hub/rwightman_gen-efficientnet-pytorch_master Loading base model ()...Done. Removing last two layers (global_pool & classifier). Building Encoder-Decoder model..########nn.Conv2d Done. call torch2trt!!!!!!!!!!!!!!!!!!1 @@@@self.decoder UnetAdaptiveBins %%%%########forward Encoder End YYYYYYYY########forward Encoder &&&&&&########forward decoderBN ########F.interpolate ########F.interpolate ########F.interpolate ########F.interpolate ^^^^^self.adaptive_bins_layer>>> !!!!!!!!!!!!!!!!!!mViT.py forward >>>>>>PatchTransformerEncoder !!!!!!!!!!!!!layer.py self.embedding_convPxP(x).flatten PatchTransformerEncoder !!!!!!!!!!!!!layer.py embeddings1_shape 300 <SliceBackward0 object at 0x7f64a940e640> !!!!!!!!!!!!!layer.py positional_encodings1: tensor([[7.3730e-01, 1.4520e-01, 7.8958e-01, ..., 9.1535e-01, 1.6060e-01, 8.2893e-01], [5.6483e-01, 6.9625e-01, 6.8629e-01, ..., 4.5704e-01, 3.7665e-01, 2.8165e-01], [9.0804e-02, 9.0169e-01, 6.0779e-01, ..., 4.3940e-03, 1.0482e-01, 2.9530e-01], ..., [1.1430e-01, 4.0873e-01, 5.7549e-01, ..., 8.9175e-01, 9.8352e-01, 7.2419e-01], [3.0608e-01, 3.3149e-02, 7.1508e-04, ..., 4.2568e-01, 1.6471e-02, 8.5658e-01], [8.2255e-01, 7.7846e-02, 1.3803e-01, ..., 3.7904e-01, 6.2948e-01, 3.6152e-01]], device='cuda:0', grad_fn=)
!!!!!!!!!!!!!layer.py embeddings PatchTransformerEncoder
!!!!!!!!!!!!!!!!!!patch_transformer mViT.py forward
!!!!!!!!!!!!!!!!!!dot_product_layer mViT.py forward
!!!!!!!!!!!!!!!!!!norm == linear mViT.py forward
!!!!!!!!!!!!!!!!!!torch.relu(y) mViT.py forward
!!!!!!!!!!!!!!!!!! keepdim=True mViT.py forward
@@@@self.decoder UnetAdaptiveBins
%%%%########forward Encoder
Conv2d.py convert_conv2d!!!!!!!
Conv2d.py convert_conv2d!!!!!!!
Conv2d.py convert_conv2d!!!!!!!
Conv2d.py convert_conv2d!!!!!!!
Conv2d.py convert_conv2d!!!!!!!
End YYYYYYYY########forward Encoder
&&&&&&########forward decoderBN
########F.interpolate
convert_interpolate_trt7#############
interprolate.py align_corners: True
trt_version() > '8.0
########F.interpolate
convert_interpolate_trt7#############
interprolate.py align_corners: True
trt_version() > '8.0
########F.interpolate
convert_interpolate_trt7#############
interprolate.py align_corners: True
trt_version() > '8.0
########F.interpolate
convert_interpolate_trt7#############
interprolate.py align_corners: True
trt_version() > '8.0
^^^^^self.adaptive_bins_layer>>>
!!!!!!!!!!!!!!!!!!mViT.py forward >>>>>>PatchTransformerEncoder
!!!!!!!!!!!!!layer.py self.embedding_convPxP(x).flatten PatchTransformerEncoder
!!!!!!!!!!!!!layer.py embeddings1_shape 300
input:
Parameter containing:
tensor([[0.7373, 0.1452, 0.7896, ..., 0.9154, 0.1606, 0.8289],
[0.5648, 0.6962, 0.6863, ..., 0.4570, 0.3767, 0.2816],
[0.0908, 0.9017, 0.6078, ..., 0.0044, 0.1048, 0.2953],
...,
[0.8716, 0.6719, 0.2597, ..., 0.3142, 0.6792, 0.6670],
[0.2257, 0.1760, 0.6620, ..., 0.4528, 0.1546, 0.7144],
[0.3901, 0.1327, 0.5800, ..., 0.5972, 0.3272, 0.5098]],
device='cuda:0', requires_grad=True)
Traceback (most recent call last):
File "/home/delight-gpu/project/torch2trt/2trt.py", line 64, in
model_trt = torch2trt(model, [x],max_workspace_size=2<<10)
File "/home/delight-gpu/project/torch2trt/torch2trt/torch2trt.py", line 553, in torch2trt
outputs = module(inputs)
File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(input, kwargs)
File "/home/delight-gpu/project/torch2trt/models/unet_adaptive_bins.py", line 103, in forward
bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(unet_out)
File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, *kwargs)
File "/home/delight-gpu/project/torch2trt/models/miniViT.py", line 27, in forward
tgt = self.patch_transformer(x.clone()) # .shape = S, N, E
File "/home/delight-gpu/anaconda3/envs/trt8.2py39/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(input, kwargs)
File "/home/delight-gpu/project/torch2trt/models/layers.py", line 25, in forward
print(self.positional_encodings[:embeddings1_shape, :].grad_fn)
File "/home/delight-gpu/project/torch2trt/torch2trt/torch2trt.py", line 300, in wrapper
converter"converter"
File "/home/delight-gpu/project/torch2trt/torch2trt/converters/getitem.py", line 31, in convert_tensor_getitem
input_trt = input._trt
AttributeError: 'Parameter' object has no attribute '_trt'