Project-MONAI / MONAI

AI Toolkit for Healthcare Imaging
https://monai.io/
Apache License 2.0
5.5k stars 1.01k forks source link

ValueError: w2 should be positive, but is -6.401211e-07 RuntimeError: applying transform <monai.transforms.io.dictionary.LoadImaged object at 0x7f31f810f790> #7812

Closed A-little-candy closed 1 month ago

A-little-candy commented 1 month ago

I am reproducing a sam method and use MONAI to preprocess my 3D image like the raw code. It seems to run well at first, but after a while it will show such an error. I can't see the inside process, so I don't know what's wrong with it and how to fix it. Hope someone could tell me. Sincere gratitude for you. Here is my error result: 1 2 and Here is my code: `

train_transforms = Compose( [
LoadImaged(keys=["image", "label"], ensure_channel_first=True), ScaleIntensityRanged( keys=["image"], a_min=0, a_max=5000, b_min=0.0, b_max=1.0, clip=True, ), CropForegroundd(keys=["image", "label"], source_key="image"), Orientationd(keys=["image", "label"], axcodes="RAS"),

Spacingd(

        #     keys=["image", "label"],
        #     pixdim=(1.5, 1.5, 2.0),
        #     mode=("bilinear", "nearest"), 
        # ),
        Spacingd(
            keys=["image", "label"],
            pixdim=(1.0, 1.0, 1.0),
            mode=("bilinear", "nearest"), 
        ),
        EnsureTyped(keys=["image", "label"], device=device, track_meta=False),
        RandCropByPosNegLabeld(
            keys=["image", "label"],
            label_key="label",
            spatial_size=(args.roi_size, args.roi_size, args.chunk), 
            pos=1,
            neg=1,
            num_samples=args.num_sample,
            image_key="image",
            image_threshold=0,
        ),
        RandFlipd(
            keys=["image", "label"],
            spatial_axis=[0],
            prob=0.10,
        ),
        RandFlipd(
            keys=["image", "label"],
            spatial_axis=[1],
            prob=0.10,
        ),
        RandFlipd(
            keys=["image", "label"],
            spatial_axis=[2],
            prob=0.10,
        ),
        RandRotate90d(
            keys=["image", "label"],
            prob=0.10,
            max_k=3,
        ),
        RandShiftIntensityd(
            keys=["image"],
            offsets=0.10,
            prob=0.50,
        ),
    ]
)
val_transforms = Compose(
    [
        LoadImaged(keys=["image", "label"], ensure_channel_first=True),
        # ScaleIntensityRanged(
        #     keys=["image"], a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True
        # ),
        ScaleIntensityRanged(
            keys=["image"], a_min=0, a_max=5000, b_min=0.0, b_max=1.0, clip=True
        ),
        CropForegroundd(keys=["image", "label"], source_key="image"),
        Orientationd(keys=["image", "label"], axcodes="RAS"),
        Spacingd(
            keys=["image", "label"],
            # pixdim=(1.5, 1.5, 2.0),
            pixdim=(1.0, 1.0, 1.0),
            mode=("bilinear", "nearest"),
        ),
        EnsureTyped(keys=["image", "label"], device=device, track_meta=True),
    ]
)

data_dir = args.data_path
# split_JSON = "dataset_0.json"

# datasets = os.path.join(data_dir, split_JSON)
# datalist = load_decathlon_datalist(datasets, True, "training")
# val_files = load_decathlon_datalist(datasets, True, "validation")

train_images = sorted(os.listdir(os.path.join(data_dir,"TrainData")))
train_labels = sorted(os.listdir(os.path.join(data_dir,"TrainLabels")))
val_images = sorted(os.listdir(os.path.join(data_dir,"ValData")))
val_labels = sorted(os.listdir(os.path.join(data_dir,"ValLabels")))
datalist = [{"image":os.path.join(data_dir,"TrainData/")+image_name,"label":os.path.join(data_dir,"TrainLabels/")+label_name} for image_name,label_name in zip(train_images,train_labels)]
val_files = [{"image":os.path.join(data_dir,"ValData/")+image_name,"label":os.path.join(data_dir,"ValLabels/")+label_name} for image_name,label_name in zip(val_images,val_labels)]
train_ds = CacheDataset(
    data=datalist,
    transform=train_transforms,
    cache_num=24,
    cache_rate=1.0,
    num_workers=8,
)
train_loader = ThreadDataLoader(train_ds, num_workers=0, batch_size=args.b, shuffle=True)
val_ds = CacheDataset(
    data=val_files, transform=val_transforms, cache_num=2, cache_rate=1.0, num_workers=0
)
val_loader = ThreadDataLoader(val_ds, num_workers=0, batch_size=1)

`

A-little-candy commented 1 month ago

I find it is cut in the 15th image every time. Maybe it's because the 15th image is from another dataset. Here is its metadata. Hope someone can find sth not right. image

A-little-candy commented 1 month ago

Solved add this nib.Nifti1Header.quaternion_threshold = -1e-06 from https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3703 not monai problem but nibabel problem