Project-MONAI / MONAI

AI Toolkit for Healthcare Imaging
https://monai.io/
Apache License 2.0
5.79k stars 1.07k forks source link

Runtime Error doing Transforms #6039

Closed mk19154953 closed 1 year ago

mk19154953 commented 1 year ago

Hello I'm working from https://colab.research.google.com/github/Project-MONAI/tutorials/blob/main/modules/autoencoder_mednist.ipynb I am trying to get this script to work for my dataset. My data is 3d unlike the example data so I reformatted my data into 2 dimensions using SqueezeDimd:

patch_transform = Compose(
    [
        SqueezeDimd(keys=["orig"], dim=-1),  # squeeze the last dim
        Resized(keys=["orig"], spatial_size=[48, 48]),
         SqueezeDimd(keys=["gaus"], dim=-1),  # squeeze the last dim
        Resized(keys=["gaus"], spatial_size=[48, 48]),
         SqueezeDimd(keys=["s&p"], dim=-1),  # squeeze the last dim
        Resized(keys=["s&p"], spatial_size=[48, 48])
        # to use crop/pad instead of reszie:
        # ResizeWithPadOrCropd(keys=["img", "seg"], spatial_size=[48, 48], mode="replicate"),
    ]
)

When I try to train the model I get a runtime error:

RuntimeError                              Traceback (most recent call last)
[<ipython-input-37-7522a1f6f011>](https://localhost:8080/#) in <module>
      6 #print(training_type)
      7 for training_type in training_types:
----> 8     model, epoch_loss = train(training_type, max_epochs=max_epochs)
      9     models.append(model)
     10     epoch_losses.append(epoch_loss)
RuntimeError: applying transform <monai.transforms.compose.Compose object at 0x7f67150fa460>

For reference here is the code I am running

Import and Configuration

!python -c "import monai" || pip install -q "monai-weekly[pillow, tqdm]"
import logging
import os
import shutil
import sys
import tempfile
import random
import numpy as np
from tqdm import trange
import matplotlib.pyplot as plt
import torch
from skimage.util import random_noise

from monai.apps import download_and_extract
from monai.config import print_config
from monai.data import CacheDataset, ArrayDataset,DataLoader
from monai.networks.nets import AutoEncoder
from monai.transforms import (
    EnsureChannelFirstD,
    Compose,
    LoadImageD,
    RandFlipD,
    RandRotateD,
    RandZoomD,
    ScaleIntensityD,
    EnsureTypeD,
    Lambda,
)
from monai.utils import set_determinism
%matplotlib inline
# install itkwidgets for the tutorial

!pip install -q itkwidgets
# install tensorboard-plugin-3d for the tutorial
!pip install -q tensorboard-plugin-3d
import torch
from torch.utils.tensorboard import SummaryWriter
from monai.utils import first, set_determinism
from monai.transforms import (
    EnsureChannelFirstd,
    Compose,
    CropForegroundd,
    LoadImaged,
    Orientationd,
    ScaleIntensityRanged,
    Spacingd,
)
from monai.data import DataLoader, Dataset
from monai.config import print_config
from monai.apps import download_and_extract
from monai.visualize import blend_images, matshow3d, plot_2d_or_3d_image
import tempfile
import shutil
import os
import glob
#import matplotlib.pyplot as plt
#from itkwidgets import view
print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
set_determinism(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def plot_ims(ims, shape=None, figsize=(10, 10), titles=None):
    length=len(ims)
    print(length)
    shape = (1, len(ims)) if shape is None else shape
    plt.subplots(*shape, figsize=figsize)
    for i, im in enumerate(ims):
        plt.subplot(*shape, i + 1)
        im = plt.imread(im) if isinstance(im, str) else torch.squeeze(im)
        plt.imshow(im, cmap='gray')
        if titles is not None:
            plt.title(titles[i])
        plt.axis('off')
    plt.tight_layout()
    plt.show()

Get the data

from google.colab import drive
drive.mount('/content/gdrive',force_remount=True)
root_dir =  '/content/gdrive/Shareddrives/Whole Body MR/Images'
!ls '/content/gdrive/Shareddrives/Whole Body MR/Images'
#!ls root_dir
#root_dir= !ls /content/gdrive/Shareddrives/Whole  Body MR/Images

print(root_dir)
from google.colab import drive
drive.mount('/content/gdrive',force_remount=True)
root_dir =  '/content/gdrive/Shareddrives/Whole Body MR/Images'
!ls '/content/gdrive/Shareddrives/Whole Body MR/Images'
#!ls root_dir
#root_dir= !ls /content/gdrive/Shareddrives/Whole  Body MR/Images

print(root_dir)
!pip install matplotlib==3.1.3

#! pip install matplotlib --version=3.1.2
data_dir = root_dir
print( data_dir)
train_images = sorted(glob.glob(os.path.join(data_dir, "*.nii")))
#train_labels = sorted(glob.glob(os.path.join(data_dir, "labelsTr", "*.nii")))
data_dicts = [
    {"image": image_name }
    for image_name  in zip(train_images)
]
set_determinism(seed=0)
transform = Compose([
    LoadImaged(keys=["image"]),
    EnsureChannelFirstd(keys=["image"]),
    Orientationd(keys=["image"], axcodes="PLS"),
   # Spacingd(keys=["image"], pixdim=(1.5, 1.5, 2.0), mode=("bilinear", "nearest")),
    ScaleIntensityRanged(keys=["image"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
    CropForegroundd(keys=["image"], source_key="image"),
])
check_ds = Dataset(data=data_dicts, transform=transform)
check_loader = DataLoader(check_ds, batch_size=1)
data = first(check_loader)
print(f"image shape: {data['image'].shape}")
matshow3d(
    volume=data['image'],
    fig=None, title="input image",
    figsize=(100, 100),
    every_n=10,
    frame_dim=-1,
    show=True,
    cmap="gray",
)
#print(f"image shape: {data['image'].shape}, label shape: {data['label'].shape}")

#if not os.path.exists(data_dir):
  #  download_and_extract(resource, compressed_file, root_dir, md5)
im_dir = data_dir
all_filenames = [os.path.join(im_dir, filename)
                 for filename in os.listdir(im_dir)]
print(all_filenames)
test_frac = 0.2
num_test = int(len(all_filenames) * test_frac)
num_train = len(all_filenames) - num_test
#train_datadict = [{"im": fname} for fname in all_filenames[:num_train]]

#test_datadict = [{"im": fname} for fname in all_filenames[-num_test:]]
test_datadict = [{"im": fname} for fname in all_filenames[:num_train]]
train_datadict = [{"im": fname} for fname in all_filenames[-num_test:]]

print(f"total number of images: {len(all_filenames)}")
print(f"number of images for training: {len(train_datadict)}")
print(f"number of images for testing: {len(test_datadict)}")

Create Image Transform Chain

    "orig": d["im"],
    "gaus": torch.tensor(
        random_noise(d["im"], mode='gaussian'), dtype=torch.float32),
    "s&p": torch.tensor(random_noise(d["im"], mode='s&p', salt_vs_pepper=0.1)),
})
#NoiseLambda.view()
# 
train_transforms = Compose(
    [
        LoadImageD(keys=["im"]),
        EnsureChannelFirstD(keys=["im"]),
        ScaleIntensityD(keys=["im"]),
        RandRotateD(keys=["im"], range_x=np.pi / 12, prob=0.5, keep_size=True),
        RandFlipD(keys=["im"], spatial_axis=0, prob=0.5),
        RandZoomD(keys=["im"], min_zoom=0.9, max_zoom=1.1, prob=0.5),
        EnsureTypeD(keys=["im"]),
        NoiseLambda,

    ]
)
from monai.data import create_test_image_3d, pad_list_data_collate, DataLoader
#batch_size = 300
batch_size=16
num_workers = 0

train_ds = CacheDataset(train_datadict, train_transforms)
#print(type(train_ds))
#train_ds[1]
train_loader = DataLoader(train_ds, batch_size=batch_size,
                         shuffle=True, num_workers=num_workers,collate_fn=pad_list_data_collate)
def get_single_im(ds):
    loader = torch.utils.data.DataLoader(
        ds, batch_size=1, num_workers= 2, shuffle=True)
    itera = iter(loader)
    return next(itera)
data = get_single_im(train_ds)
print(data['orig'].shape)
matshow3d(
    volume=data['gaus'],
    fig=None, title="input image",
    figsize=(100, 100),
    every_n=10,
    frame_dim=-1,
    show=True,
    cmap="gray",
)
matshow3d(
    volume=data['orig'],
    fig=None, title="input image",
    figsize=(100, 100),
    every_n=10,
    frame_dim=-1,
    show=True,
    cmap="gray",
)
matshow3d(
    volume=data['s&p'],
    fig=None, title="input image",
    figsize=(100, 100),
    every_n=10,
    frame_dim=-1,
    show=True,
    cmap="gray",
)from monai.data import create_test_image_3d, pad_list_data_collate, DataLoader
train_loader = DataLoader(
        train_ds,
        batch_size=2,
        shuffle=True,
        num_workers=2,
        drop_last=True,
        persistent_workers=True,
        collate_fn=pad_list_data_collate,
        pin_memory=torch.cuda.is_available(),

    )
import tensorflow as tf
from tensorflow import keras
class Linear(keras.layers.Layer):
    def __init__(self, units=32, input_dim=32):
        super(Linear, self).__init__()
        w_init = tf.random_normal_initializer()
        self.w = tf.Variable(
            initial_value=w_init(shape=(input_dim, units), dtype="float32"),
            trainable=True,
        )
        b_init = tf.zeros_initializer()
        self.b = tf.Variable(
            initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
        )

    def call(self, inputs):
        return tf.matmul(inputs, self.w) + self.b
class Linear(keras.layers.Layer):
    def __init__(self, units=32):
        super(Linear, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.w = self.add_weight(
            shape=(input_shape[-1], self.units),
            initializer="random_normal",
            trainable=True,
        )
        self.b = self.add_weight(
            shape=(self.units,), initializer="random_normal", trainable=True
        )
 def call(self, inputs):
        return tf.matmul(inputs, self.w) + self.b
linear_layer = Linear(32)

# The layer's weights are created dynamically the first time the layer is called
x = tf.ones((2, 2))
y = linear_layer(x)
from keras import activations
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.dtensor import utils
from keras.layers.convolutional.base_conv import Conv
from tensorflow.python.util.tf_export import keras_export
def __init__(

        filters,
        kernel_size,
        strides=(1, 1),
        padding="valid",
        data_format=None,
        dilation_rate=(1, 1),
        groups=1,
        activation=None,
        use_bias=True,
        kernel_initializer="glorot_uniform",
        bias_initializer="zeros",
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None,
        **kwargs
    ):
 tf.keras.layers.Conv2D(
    2,1,7200,7200,
    filters,
    kernel_size,
    strides=(1, 1),
    padding='valid',
    data_format=None,
    dilation_rate=(1, 1),
    groups=1,
    activation=None,
    use_bias=True,
    kernel_initializer='glorot_uniform',
    bias_initializer='zeros',
    kernel_regularizer=None,
    bias_regularizer=None,
    activity_regularizer=None,
    kernel_constraint=None,
    bias_constraint=None,
    **kwargs
)from monai.transforms import (
    Compose,
    EnsureChannelFirstd,
    EnsureTyped,
    LoadImaged,
    RandRotate90d,
    Resized,
    ScaleIntensityd,
    SqueezeDimd,
)
patch_func = monai.data.PatchIterd(
    keys=["orig"],
    patch_size=(None, None, 1),  # dynamic first two dimensions
    start_pos=(0, 0, 0)
)
patch_ds = monai.data.GridPatchDataset( data=train_ds, patch_iter=patch_func, transform=patch_transform, with_coordinates=False)
train_loader = DataLoader(
    patch_ds,
    batch_size=5,
    num_workers=2, 
    collate_fn=pad_list_data_collate,
    pin_memory=torch.cuda.is_available(),

) 

 #check_data = monai.utils.misc.first(train_loader)

#T.view(2,1,256,256)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#y_train = y_train.type(torch.float64).to(device) 
from monai.transforms import EnsureChannelFirst, Compose, RandRotate90, Resize, ScaleIntensity
#train_transforms = Compose([ScaleIntensity(), EnsureChannelFirst(), Resize((96, 96, 96)), RandRotate90()])
#val_transforms = Compose([ScaleIntensity(), EnsureChannelFirst(), Resize((96, 96, 96))])

def train(dict_key_for_training, max_epochs=10, learning_rate=1e-3):

    model = AutoEncoder(
       spatial_dims=2,
        in_channels=1,
        out_channels=1,channels=(4, 8, 16, 32), 
      strides=(2, 2, 2, 2),

    ).to(device)

    # Create loss fn and optimiser

    loss_function = torch.nn.MSELoss()
 # torch.nn.Linear(),
    #model= torch.nn.Sequential( torch.nn.Linear(96,5),
    #torch.nn.ReLU(),
    #torch.nn.Dropout())
    print(model.parameters())
    #example_tensor = torch.Tensor(
   # [ [1,1]
   # ]
#)
   # new_tensor = example_tensor.to(device)
   # new_tensor.dtype=torch.float
    #cuda0 = torch.device('cuda:0')
    #torch.ones([1,1])
    #model = num_classes=5
    #model = num_classes=(5,)

    print(model)
    optimizer = torch.optim.Adam(model.parameters(), learning_rate)
   # optimizer = torch.optim.Adam(train_ds)

    epoch_loss_values = []

    t = trange(
        max_epochs,
        desc=f"{dict_key_for_training} -- epoch 0, avg loss: inf", leave=True)
    for epoch in t:
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs = batch_data[dict_key_for_training].to(device)
            #inputs= inputs.reshape(2,1,256,395)
          #  inputs= inputs.reshape(372,372,373)
            print(inputs.shape)
            #torch.Size([2, 1, 256, 256, 395])
            torch.nn.Conv2d(2,1,256,256)
            global train_ds
            global img
           ## for filename in os.listdir(im_dir):
             # img= get_single_im(train_ds)
             # img = img.resize((2296,1724))

            optimizer.zero_grad()
            outputs = model(inputs)
            model.train()
            loss = loss_function(outputs, batch_data['orig'].to(device))
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        epoch_loss /= step
        epoch_loss_values.append(epoch_loss)
        t.set_description(
            f"{dict_key_for_training} -- epoch {epoch + 1}"
            + f", average loss: {epoch_loss:.4f}")
    return model, epoch_loss_values
    if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                num_correct = 0.0
                metric_count = 0
                for train_ds in train_loader:
                    train_images = train_ds.to(device)
                    train_outputs = model(train_images)
                    value = torch.eq(val_outputs.argmax(dim=1), val_labels)
                    metric_count += len(value)
                    num_correct += value.sum().item()
                metric = num_correct / metric_count
                metric_values.append(metric)
                if metric > best_metric:
                    best_metric = metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), "best_metric_model_classification3d_array.pth")
                    print("saved new best metric model")
                print(
                    "current epoch: {} current accuracy: {:.4f} best accuracy: {:.4f} at epoch {}".format(
                        epoch + 1, metric, best_metric, best_metric_epoch
                    )
                )
                writer.add_scalar("val_accuracy", metric, epoch + 1)
    print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
max_epochs = 50
import torch
training_types = ['orig', 'gaus', 's&p']
models = []
epoch_losses = []
#print(training_type)
for training_type in training_types:
    model, epoch_loss = train(training_type, max_epochs=max_epochs)
    models.append(model)
    epoch_losses.append(epoch_loss)
KumoLiu commented 1 year ago

Hi @mk19154953, could you please share the whole error message, so then I could take a deep look at it? Thanks!

mk19154953 commented 1 year ago

Hi This is actually the whole error message. I'm unfortunately not getting anything else that will point me in the right direction