Project-MONAI / MONAI

AI Toolkit for Healthcare Imaging
https://monai.io/
Apache License 2.0
5.77k stars 1.07k forks source link

RuntimeError: applying transform <monai.transforms.croppad.array.RandSpatialCrop object at 0x7fc49d5160d0> #3604

Closed nikombr closed 2 years ago

nikombr commented 2 years ago

Hi, we are doing a project with the data from https://warwick.ac.uk/fac/cross_fac/tia/data/glascontest/download/

Though we have run into some problems. We have done training and validation with the data converted to black and white, but it does not seem to work for the data in color. When working with the original images in colors, we get an error when creating the dataloader. Can you help us understand what goes wrong?

Here is the code:

import logging import os import sys import tempfile from glob import glob

import torch from PIL import Image from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter

import monai from monai.data import ArrayDataset, create_test_image_2d, create_test_image_3d, decollate_batch from monai.inferers import sliding_window_inference from monai.metrics import DiceMetric from monai.transforms import ( Activations, AddChannel, AsDiscrete, Compose, LoadImage, RandRotate90, RandSpatialCrop, ScaleIntensity, EnsureType, RandAffine, AsChannelFirst, ) from monai.visualize import plot_2d_or_3d_image

import matplotlib.pyplot as plt import numpy as np import datetime #for labelling text-files and graphs for different runs

monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO)

data_dir = 'warwick' #loading training set into temporary dir for training images = sorted(glob(os.path.join(datadir, "train?.bmp"))) images2 = sorted(glob(os.path.join(datadir, "train??.bmp"))) #thos could probably be a oneliner, somehow images3 = sorted(glob(os.path.join(datadir, "testA?.bmp"))) images4 = sorted(glob(os.path.join(datadir, "testA??.bmp"))) images.extend(images2) images.extend(images3) images.extend(images4) del images2 del images3 del images4 segs = sorted(glob(os.path.join(datadir, "train?_anno.bmp"))) segs2 = sorted(glob(os.path.join(datadir, "train??_anno.bmp"))) segs3 = sorted(glob(os.path.join(datadir, "testA?_anno.bmp"))) segs4 = sorted(glob(os.path.join(datadir, "testA??_anno.bmp"))) segs.extend(segs2) segs.extend(segs3) segs.extend(segs4) del segs2 del segs3 del segs4 num_total = len(images) print(len(images)) image_width, image_height = Image.open(images[0]).size

training_data_size = len(images)

print(len(images)) print(len(segs))

num_total = len(images) plt.subplots(3, 3, figsize = (8,8)) for i, k in enumerate(np.random.randint(num_total,size=9)): im = Image.open(images[k]) print("chosen image is:") print(images[k]) print(np.array(im).shape) arr = np.array(im) plt.subplot(3, 3, i+1) plt.imshow(arr,cmap=plt.get_cmap('gray')) plt.tight_layout() plt.show()

plt.subplots(3, 3, figsize = (8,8)) for i, k in enumerate(np.random.randint(num_total,size=9)): im = Image.open(segs[k]) print("chosen image is:") print(segs[k]) print(np.array(im).shape) arr = np.array(im) plt.subplot(3, 3, i+1) plt.imshow(arr,cmap=plt.get_cmap('gray')) plt.tight_layout() plt.show()

Model parameter definitions

num_epochs = 100 training_sensitivity = 0.015 #1e-4 roi_size = (96, 96) sw_batch_size = 4 log_val = datetime.datetime.now() log_val = logval.strftime("%H%M_%S")

Transforms

train_imtrans = Compose( [ LoadImage(image_only=True),

AddChannel(),

    AsChannelFirst(),#tilføjet
    ScaleIntensity(),
    RandSpatialCrop((128, 128), random_size=False),
    RandRotate90(prob=0.5, spatial_axes=(0, 1)),
    EnsureType(),
    RandAffine(prob=0.5,as_tensor_output=False),
]

) train_segtrans = Compose( [ LoadImage(image_only=True),

AddChannel(),

    AsChannelFirst(),#tilføjet
    ScaleIntensity(),
    RandSpatialCrop((128, 128), random_size=False),
    RandRotate90(prob=0.5, spatial_axes=(0, 1)),
    EnsureType(),
    RandAffine(prob=0.5,as_tensor_output=False),
]

) val_imtrans = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()]) val_segtrans = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()])

Dataloader

check_ds = ArrayDataset(images, train_imtrans, segs, train_segtrans) check_loader = DataLoader(check_ds, batch_size=10, num_workers=0, pin_memory=torch.cuda.is_available()) im, seg = monai.utils.misc.first(check_loader) print(im.shape, seg.shape)

and when running, we get the following output:

MONAI version: 0.8.0 Numpy version: 1.20.3 Pytorch version: 1.10.1 MONAI flags: HAS_EXT = False, USE_COMPILED = False MONAI rev id: 714d00dffe6653e21260160666c4c201ab66511b

Optional dependencies: Pytorch Ignite version: NOT INSTALLED or UNKNOWN VERSION. Nibabel version: NOT INSTALLED or UNKNOWN VERSION. scikit-image version: 0.18.3 Pillow version: 8.4.0 Tensorboard version: 2.6.0 gdown version: NOT INSTALLED or UNKNOWN VERSION. TorchVision version: 0.11.2 tqdm version: 4.62.3 lmdb version: NOT INSTALLED or UNKNOWN VERSION. psutil version: 5.8.0 pandas version: 1.3.4 einops version: NOT INSTALLED or UNKNOWN VERSION. transformers version: NOT INSTALLED or UNKNOWN VERSION. mlflow version: NOT INSTALLED or UNKNOWN VERSION.

For details about installing the optional dependencies, please visit: https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies

145 145 145 chosen image is: warwick/train_10.bmp (522, 775, 3) chosen image is: warwick/train_66.bmp (522, 775, 3) chosen image is: warwick/testA_16.bmp (442, 581, 3) chosen image is: warwick/testA_2.bmp (453, 589, 3) chosen image is: warwick/train_43.bmp (522, 775, 3) chosen image is: warwick/train_9.bmp (522, 775, 3) chosen image is: warwick/testA_26.bmp (522, 775, 3) chosen image is: warwick/train_1.bmp (522, 775, 3) chosen image is: warwick/train_46.bmp (522, 775, 3) chosen image is: warwick/testA_32_anno.bmp (522, 775) chosen image is: warwick/testA_23_anno.bmp (522, 775) chosen image is: warwick/testA_14_anno.bmp (522, 775) chosen image is: warwick/testA_59_anno.bmp (522, 775) chosen image is: warwick/testA_11_anno.bmp (522, 775) chosen image is: warwick/train_12_anno.bmp (433, 574) chosen image is: warwick/train_52_anno.bmp (522, 775) chosen image is: warwick/testA_48_anno.bmp (522, 775) chosen image is: warwick/train_77_anno.bmp (522, 775)

=== Transform input info -- RandSpatialCrop ===

=== Transform input info -- RandSpatialCrop ===

=== Transform input info -- RandSpatialCrop ===

=== Transform input info -- RandSpatialCrop === INFO:DataStats: === Transform input info -- RandSpatialCrop === Data statistics: Type: <class 'numpy.ndarray'> float32 Shape: (522, 775) Value range: (0.0, 1.0) Data statistics: Type: <class 'numpy.ndarray'> float32 Shape: (522, 775) Value range: (0.0, 1.0) Data statistics: Type: <class 'numpy.ndarray'> float32 Shape: (522, 775) Value range: (0.0, 1.0) Data statistics: Type: <class 'numpy.ndarray'> float32 Shape: (522, 775) Value range: (0.0, 1.0) INFO:DataStats:Data statistics: Type: <class 'numpy.ndarray'> float32 Shape: (522, 775) Value range: (0.0, 1.0) Traceback (most recent call last): File "/Users/name/anaconda3/lib/python3.9/multiprocessing/queues.py", line 251, in _feed send_bytes(obj) File "/Users/name/anaconda3/lib/python3.9/multiprocessing/connection.py", line 205, in send_bytes self._send_bytes(m[offset:offset + size]) File "/Users/name/anaconda3/lib/python3.9/multiprocessing/connection.py", line 416, in _send_bytes self._send(header + buf) File "/Users/name/anaconda3/lib/python3.9/multiprocessing/connection.py", line 373, in _send n = write(self._handle, buf) BrokenPipeError: [Errno 32] Broken pipe Traceback (most recent call last):

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 82, in apply_transform return _apply_transform(transform, data, unpack_items)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 53, in _apply_transform return transform(parameters)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/croppad/array.py", line 535, in call self.randomize(img.shape[1:])

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/croppad/array.py", line 520, in randomize self._size = fall_back_tuple(self.roi_size, img_size)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/utils/misc.py", line 180, in fall_back_tuple user = ensure_tuple_rep(user_provided, ndim)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/utils/misc.py", line 137, in ensure_tuple_rep raise ValueError(f"Sequence must have length {dim}, got {len(tup)}.")

ValueError: Sequence must have length 1, got 2.

The above exception was the direct cause of the following exception:

Traceback (most recent call last):

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 82, in apply_transform return _apply_transform(transform, data, unpack_items)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 53, in _apply_transform return transform(parameters)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/compose.py", line 160, in call input_ = apply_transform(transform, input, self.map_items, self.unpack_items)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 106, in apply_transform raise RuntimeError(f"applying transform {transform}") from e

RuntimeError: applying transform <monai.transforms.croppad.array.RandSpatialCrop object at 0x7fc49d5160d0>

The above exception was the direct cause of the following exception:

Traceback (most recent call last):

File "/Users/name/OneDrive/DTU/5. semester/Projektarbejde/test.py", line 149, in im, seg = monai.utils.misc.first(check_loader)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/utils/misc.py", line 73, in first for i in iterable:

File "/Users/name/anaconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in next data = self._next_data()

File "/Users/name/anaconda3/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 561, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration

File "/Users/name/anaconda3/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch data = [self.dataset[idx] for idx in possibly_batched_index]

File "/Users/name/anaconda3/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 49, in data = [self.dataset[idx] for idx in possibly_batched_index]

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/data/dataset.py", line 1105, in getitem return self.dataset[index]

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/data/dataset.py", line 96, in getitem return self._transform(index)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/data/dataset.py", line 1002, in _transform data.extend(to_list(dataset[index]))

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/data/dataset.py", line 96, in getitem return self._transform(index)

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/data/dataset.py", line 82, in _transform return apply_transform(self.transform, data_i) if self.transform is not None else data_i

File "/Users/name/anaconda3/lib/python3.9/site-packages/monai/transforms/transform.py", line 106, in apply_transform raise RuntimeError(f"applying transform {transform}") from e

RuntimeError: applying transform <monai.transforms.compose.Compose object at 0x7fc49d16de80>

rijobro commented 2 years ago

You say that you're using colour images (I'm assuming RGB), but when you print the image shape, e.g.:

chosen image is:
warwick/train_77_anno.bmp
(522, 775)

the images only seem to be 2-dimensional, i.e., they don't have a colour component.