NVIDIA-AI-IOT / torch2trt

An easy to use PyTorch to TensorRT converter
MIT License
4.56k stars 673 forks source link

when use own defined convertor, why int tensor input has no _trt attribute? #943

Open lantudou opened 1 month ago

lantudou commented 1 month ago

here is a simple reproduce code

import os
import time
import torch

import torchvision
from torch2trt.torch2trt import *
import numpy as np

class Sparse(torch.nn.Module):

    def __init__(self, embedding_size):
        super().__init__()
        self._embedding_size =  embedding_size
        self._output = torch.zeros((4, self._embedding_size))

    def forward(self, x):
        x = x.float()
        return self._output

class Model(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.sparse = Sparse(100)
        self.linear = torch.nn.Linear(100, 200)

    def forward(self, x):
        y = self.sparse(x)
        return self.linear(y)

@tensorrt_converter(Sparse.forward)
def convert_sparse(ctx):
    module  = get_arg(ctx, 'self', pos=0, default=None)
    x  = get_arg(ctx, 'x', pos=1, default=None)

    print(x._trt)  # verfiy _trt attribute

if __name__ == "__main__":
    model = Sparse(100)
    model.eval()

    x = torch.ones((1, 3, 224, 224), dtype=torch.int32).cuda()
    #x = torch.ones((1, 3, 224, 224), dtype=torch.float32).cuda()   It works
    model_trt = torch2trt(model, [x])

AttributeError: 'Tensor' object has no attribute '_trt'

lantudou commented 1 month ago

Is this a feature in the design, or is it simply a bug?

lantudou commented 1 month ago

so use add_missing_trt_tensors is correct method?