dataset_name = 'Dataset777_MyDataset'
# Create necessary directories
imagesTr_dir = os.path.join(nnUNet_raw, dataset_name, 'imagesTr')
labelsTr_dir = os.path.join(nnUNet_raw, dataset_name, 'labelsTr')
os.makedirs(imagesTr_dir, exist_ok=True)
os.makedirs(labelsTr_dir, exist_ok=True)
# Map modalities to four-digit indices
modality_mapping = {
'ciss': '0000',
't2_n4': '0001'
}
# Initialize case ID counter and mapping
case_counter = 1
case_id_mapping = {}
# Define source directory
source_dir = 'Training Patches 10062024'
# List all files in the source directory
files = [f for f in os.listdir(source_dir) if f.endswith('.nii.gz')]
for file in files:
# Extract information using regex
match = re.match(r'subj(\d+)_team(\d+)_patch(\d+)_(\w+).nii.gz', file)
if match:
subject, team, patch, modality = match.groups()
case_key = f'subj{subject}_team{team}_patch{patch}'
# Assign a unique case ID
if case_key not in case_id_mapping:
case_id = f'{case_counter:03d}'
case_id_mapping[case_key] = case_id
case_counter += 1
else:
case_id = case_id_mapping[case_key]
# Determine destination filename
if modality in modality_mapping:
modality_index = modality_mapping[modality]
dest_filename = f'case_{case_id}_{modality_index}.nii.gz'
dest_path = os.path.join(imagesTr_dir, dest_filename)
elif modality == 'final':
dest_filename = f'case_{case_id}.nii.gz'
dest_path = os.path.join(labelsTr_dir, dest_filename)
else:
continue # Skip unrecognized modalities
# Copy and rename the file
shutil.copyfile(os.path.join(source_dir, file), dest_path)
else:
print(f"File did not match pattern: {file}")
# Prepare the dataset.json
channel_names = {
"0000": "ciss",
"0001": "t2_n4"
}
dataset_info = {
"channel_names": channel_names,
"file_ending": ".nii.gz",
"tensor_image_size": "3D",
"numTraining": len(case_id_mapping),
"dataset_name": "Dataset777_MyDataset",
"license": "Not licensed",
"description": "Segmentation dataset",
"reference": "Add reference if applicable",
"modality": channel_names,
"overwrite_image_reader_writer": None,
"labels": {
"background": 0,
"Cortex Gray Matter": 1,
"Normal-Appearing White Matter": 2,
"White Matter Hyperintensities": 3,
"Subcortical/limbic gray matter": 4,
"Other/Artifact": 5,
}
}
# Save dataset.json
with open(os.path.join(nnUNet_raw, dataset_name, 'dataset.json'), 'w') as f:
json.dump(dataset_info, f, indent=4)
This is what I get as an error:
Fingerprint extraction...
Dataset777_MyDataset
Traceback (most recent call last):
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/bin/nnUNetv2_plan_and_preprocess", line 8, in <module>
sys.exit(plan_and_preprocess_entry())
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py", line 180, in plan_and_preprocess_entry
extract_fingerprints(args.d, args.fpe, args.npfp, args.verify_dataset_integrity, args.clean, args.verbose)
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/experiment_planning/plan_and_preprocess_api.py", line 47, in extract_fingerprints
extract_fingerprint_dataset(d, fingerprint_extractor_class, num_processes, check_dataset_integrity, clean,
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/experiment_planning/plan_and_preprocess_api.py", line 30, in extract_fingerprint_dataset
verify_dataset_integrity(join(nnUNet_raw, dataset_name), num_processes)
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/experiment_planning/verify_dataset_integrity.py", line 200, in verify_dataset_integrity
reader_writer_class = determine_reader_writer_from_dataset_json(dataset_json, dataset[dataset.keys().__iter__().__next__()]['images'][0])
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/imageio/reader_writer_registry.py", line 31, in determine_reader_writer_from_dataset_json
ret = recursive_find_reader_writer_by_name(ioclass_name)
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/imageio/reader_writer_registry.py", line 74, in recursive_find_reader_writer_by_name
ret = recursive_find_python_class(join(nnunetv2.__path__[0], "imageio"), rw_class_name, 'nnunetv2.imageio')
File "/Users/georgeaidinis/miniconda3/envs/BIA_1/lib/python3.10/site-packages/nnunetv2/utilities/find_class_by_name.py", line 13, in recursive_find_python_class
if hasattr(m, class_name):
TypeError: hasattr(): attribute name must be string
Hi, I'm trying to run the following command:
nnUNetv2_plan_and_preprocess -d 777 --verify_dataset_integrity --verbose --clean
This is my
dataset.json
:I've previously run this code:
This is what I get as an error:
I'm on a Mac(M1), running (conda):
python version : 3.10.14.final.0 nnunetv2 : 2.5.1
Any ideas as to why this might happen?