jacobgil / pytorch-grad-cam

Advanced AI Explainability for computer vision. Support for CNNs, Vision Transformers, Classification, Object detection, Segmentation, Image similarity and more.
https://jacobgil.github.io/pytorch-gradcam-book
MIT License
10.7k stars 1.57k forks source link

AttributeError: 'NoneType' object has no attribute 'shape' #393

Closed tleyden closed 1 year ago

tleyden commented 1 year ago

I'm getting a AttributeError: 'NoneType' object has no attribute 'shape' error when trying to generate gradcam visualizations. This is with PyTorch Version: 1.13.1+cu117 and Torchvision Version: 0.14.1+cu117.

Any idea what I'm doing wrong or how to get past this error?

from PIL import Image
from torchvision import datasets, models, transforms

# Load resnet18 model
model = models.resnet18(pretrained=False)
model.load_state_dict(torch.load("path_to_model.pt))

# Apply transform to input image
transform = transforms.Compose(
    [
        transforms.Resize(size=(299, 299)),
        transforms.ToTensor(),
        transforms.Normalize([0.2858, 0.2105, 0.1557], [0.3035, 0.2469, 0.2060])
    ]
)
image = Image.open("path_to_image.jpg")
input_tensor = transform(image)

# Create batch
input_tensor = input_tensor.unsqueeze(0)

# Choose target layers - This is what is recommended for resnet18 
target_layers = [model.layer4[-1]]  
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)

# Display gradcam visualization
grayscale_cam = cam(input_tensor=input_tensor, targets=None)
grayscale_cam = grayscale_cam[0, :]
visualization = show_cam_on_image(image, grayscale_cam, use_rgb=True)

Error message:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[12], line 41
     23 cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
     25 # You can also use it within a with statement, to make sure it is freed,
     26 # In case you need to re-create it inside an outer loop:
     27 # with GradCAM(model=model, target_layers=target_layers, use_cuda=args.use_cuda) as cam:
   (...)
     39 # You can also pass aug_smooth=True and eigen_smooth=True, to apply smoothing.
     40 # grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
---> 41 grayscale_cam = cam(input_tensor=input_tensor, targets=None)
     43 # In this example grayscale_cam has only one image in the batch:
     44 grayscale_cam = grayscale_cam[0, :]

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/pytorch_grad_cam/base_cam.py:188, in BaseCAM.__call__(self, input_tensor, targets, aug_smooth, eigen_smooth)
    184 if aug_smooth is True:
    185     return self.forward_augmentation_smoothing(
    186         input_tensor, targets, eigen_smooth)
--> 188 return self.forward(input_tensor,
    189                     targets, eigen_smooth)

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/pytorch_grad_cam/base_cam.py:84, in BaseCAM.forward(self, input_tensor, targets, eigen_smooth)
     81     self.model.zero_grad()
     82     loss = sum([target(output)
     83                for target, output in zip(targets, outputs)])
---> 84     loss.backward(retain_graph=True)
     86 # In most of the saliency attribution papers, the saliency is
     87 # computed with a single target layer.
     88 # Commonly it is the last convolutional layer.
   (...)
     93 # use all conv layers for example, all Batchnorm layers,
     94 # or something else.
     95 cam_per_layer = self.compute_cam_per_layer(input_tensor,
     96                                            targets,
     97                                            eigen_smooth)

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/torch/_tensor.py:488, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs)
    478 if has_torch_function_unary(self):
    479     return handle_torch_function(
    480         Tensor.backward,
    481         (self,),
   (...)
    486         inputs=inputs,
    487     )
--> 488 torch.autograd.backward(
    489     self, gradient, retain_graph, create_graph, inputs=inputs
    490 )

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/torch/autograd/__init__.py:197, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
    192     retain_graph = create_graph
    194 # The reason we repeat same the comment below is that
    195 # some Python versions print out the first line of a multi-line function
    196 # calls in the traceback and some print out the last line
--> 197 Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
    198     tensors, grad_tensors_, retain_graph, create_graph, inputs,
    199     allow_unreachable=True, accumulate_grad=True)

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/torch/nn/modules/module.py:62, in _WrappedHook.__call__(self, *args, **kwargs)
     60     if module is None:
     61         raise RuntimeError("You are trying to call the hook of a dead Module!")
---> 62     return self.hook(module, *args, **kwargs)
     63 return self.hook(*args, **kwargs)

File /opt/miniconda/miniconda3/envs/youth_eye_detection/lib/python3.8/site-packages/flashtorch/saliency/backprop.py:217, in Backprop._register_conv_hook.<locals>._record_gradients(module, grad_in, grad_out)
    216 def _record_gradients(module, grad_in, grad_out):
--> 217     if self.gradients.shape == grad_in[0].shape:
    218         self.gradients = grad_in[0]

AttributeError: 'NoneType' object has no attribute 'shape'
tleyden commented 1 year ago

After taking a closer look at the stacktrace, looks like another library ("flashtorch") crept in. I am removing all references of flashtorch and retrying. Please disregard and sorry for the noise!