lzhLab / cross-modal-guidance

3 stars 0 forks source link

what I predicted was a liver, not a liver tumor #1

Open 1362303119 opened 3 months ago

1362303119 commented 3 months ago

Hello, I tried to reproduce your code, but what I predicted was a liver, not a liver tumor, why is that?

1362303119 commented 3 months ago

![Uploading 微信图片_20240408223953.png…]()

1362303119 commented 3 months ago

Here's the code I'm going to convert nii.gz data into slices: def windowing(img, window_center, window_width): img_min = window_center - (window_width / 2.0) img_max = window_center + (window_width / 2.0) img = np.clip(img, img_min, img_max) img = (img - img_min) / (img_max - img_min) return img

直方图均衡化

def histogram_equalization(img): hist, bins = np.histogram(img.flatten(), 256, [0, 1]) cdf = hist.cumsum() cdf_normalized = cdf * hist.max() / cdf.max() img_equalized = np.interp(img.flatten(), bins[:-1], cdf_normalized) return img_equalized.reshape(img.shape)

def window_transform(ct_array, windowWidth, windowCenter): """ return: trucated image according to window center and window width """ minWindow = float(windowCenter) - 0.5 float(windowWidth) maxWindow = float(windowCenter) + 0.5 float(windowWidth) ct_array[ct_array < minWindow] = minWindow ct_array[ct_array > maxWindow] = maxWindow return ct_array

def standartize(array): mean = np.mean(array) std = np.std(array) array = array - mean array = array / std return array

def nii_to_image(niifile): filenames = os.listdir(filepath) #读取nii文件 slice_trans = []

for f in filenames:
    #开始读取nii文件
    img_path = os.path.join(filepath, f)
    img = nib.load(img_path)  #读取nii
    img_fdata = img.get_fdata()
    img_fdata = np.rot90(img_fdata)
    fname = f.replace('.nii.gz', '') #去掉nii的后缀名
    img_f_path = os.path.join(imgfile, fname)
    # 创建nii对应图像的文件夹
    if not os.path.exists(img_f_path):
        os.mkdir(img_f_path)  #新建文件夹

    #开始转换图像
    (x,y,z) = img.shape
    for i in range(z):   #是z的图象序列
        slice = img_fdata[:, :, i]  #选择哪个方向的切片自己决定\
        windowed_image = windowing(slice, 55, 250)
        # 直方图均衡化
        equalized_image = histogram_equalization(windowed_image)
        # 归一化
        normalized_image = (equalized_image - np.min(equalized_image)) / (
                np.max(equalized_image) - np.min(equalized_image))
        #slice = standartize(window_transform(slice, 250, 55))
        slice= (normalized_image * 255).astype(np.uint8)
        imageio.imwrite(os.path.join(img_f_path, '{}.png'.format(i)), slice)

if name == 'main': filepath = './mutildata/ART/batch1/mask' imgfile = './datasets/data/MPTH/tumor_mask/art' nii_to_image(filepath)

nanananamin commented 3 months ago

If you want to apply our experimental code, you need to follow the file structure in the readme.md file. And the liver lesion segmentation experiment requires both liver label and lesion label.

1362303119 commented 3 months ago

如果要应用我们的实验代码,则需要遵循 readme.md 文件中的文件结构。肝病灶分割实验需要肝脏标记和病灶标记。

I organized the format according to your data, including the tumor label, and the liver mask that I had divided with 3Dresnet.

nanananamin commented 3 months ago

如果要应用我们的实验代码,则需要遵循 readme.md 文件中的文件结构。肝病灶分割实验需要肝脏标记和病灶标记。

I organized the format according to your data, including the tumor label, and the liver mask that I had divided with 3Dresnet.

Please double-check that your experiment labels are correct and that the dataset code is correct.

1362303119 commented 3 months ago

Hello, I found out that the part in your code that saves the predict result, the predict is a liver tumor. But in the prediction code that I wrote myself, what was predicted by the preservation model was the liver, and I would like to ask you, what is wrong with my prediction code?

def predict(param_set, model): ckpt_dir = param_set['model_loc'] folder = param_set['folder'] save_dir = param_set['result_dir'] + folder

dice_dir = save_dir + '/dice'
test_result_dir = save_dir + '/testResult'
test_batch = ['batch5']
pv_test_dir = param_set['testdir']
art_test_dir = pv_test_dir.replace('PV', 'ART')

if not os.path.exists(test_result_dir):
    os.makedirs(test_result_dir)
if not os.path.exists(dice_dir):
    os.mkdir(dice_dir)

model_name = ckpt_dir.split('/')[-1]
model.load_state_dict(torch.load(ckpt_dir))
model.eval()
print('Current model: ',model_name)

model_test_dir = os.path.join(test_result_dir,model_name[:-4])
# save
print(test_result_dir)
print(model_name)
print(model_test_dir)
if not os.path.exists(model_test_dir):
    os.mkdir(model_test_dir)

print('Applying on: ' + test_batch[0])

files = os.listdir(pv_test_dir)
files.sort()

dice_score = []
dice_intersection, dice_union = 0, 0

predict_list = open('val_list_fold0.txt', 'r').readlines()
predict_list = [i.strip() for i in predict_list]

for file in os.scandir(os.path.join('./test_data/ART')):
    if file.name.split('.')[0] in predict_list:
        image_art = sitk.GetArrayFromImage(sitk.ReadImage(file.path))
        image_pv = sitk.GetArrayFromImage(sitk.ReadImage(file.path.replace('ART', 'PV')))
        mask_IM = sitk.ReadImage(file.path.replace('ART', 'mask'))
        pv_mask_array = sitk.GetArrayFromImage(mask_IM)
        print(file)
        mask_voxel_coords = np.where(pv_mask_array != 0)
        minzidx = int(np.min(mask_voxel_coords[0]))
        maxzidx = int(np.max(mask_voxel_coords[0]))+1

        windowed_pv = windowing(image_pv, 55, 250)
        windowed_art = windowing(image_art, 55, 250)
        image_pv = (windowed_pv  - np.min(windowed_pv)) / (
                np.max(windowed_pv) - np.min(windowed_pv))
        image_art = (windowed_art - np.min(windowed_art)) / (
                np.max(windowed_art) - np.min(windowed_art))
        image_pv = np.asarray(image_pv,np.float32)
        image_art = np.asarray(image_art,np.float32)
        outputs = []

        for i in range(image_pv.shape[0]):
            output = np.zeros((image_pv.shape[1], image_pv.shape[2]))

            if i >= minzidx + 1 and i < maxzidx-2:
                #selected_pv_slices = image_pv[i][None]
                #selected_art_slices = image_art[i][None]
                selected_pv_slices = image_pv[i - 1: i + 2]
                selected_art_slices = image_art[i - 1: i + 2]
                selected_mask_slices = pv_mask_array[i]

                if selected_mask_slices.max() != 0:
                    mask_voxel_coords = np.where(selected_mask_slices != 0)
                    minxidx = int(np.min(mask_voxel_coords[0]))
                    maxxidx = int(np.max(mask_voxel_coords[0])) + 1
                    minyidx = int(np.min(mask_voxel_coords[1]))
                    maxyidx = int(np.max(mask_voxel_coords[1])) + 1

                    bbox = [[minxidx, maxxidx], [minyidx, maxyidx]]
                    output = model_apply(model, selected_pv_slices, selected_art_slices, param_set['ins'], file.name.split('.')[0], bbox) #test on cropped patches and stitch the results
            outputs.append(output)

        out_str = model_test_dir + '/' + file.name
        result_img = np.asarray(outputs)
        result_img[result_img >= 0.5] = 1
        result_img[result_img < 0.5] = 0

        #output.save(out_str)
        new_mask_IM = sitk.GetImageFromArray(result_img)
        new_mask_IM.CopyInformation(mask_IM)
        sitk.WriteImage(new_mask_IM, out_str)

def model_apply(model,image_pv,image_art,ins,file, bbox): caseNum = file

avk = 4
nrotate = 1
wI = np.zeros([ins, ins])
pmap = np.zeros([image_pv.shape[1], image_pv.shape[2]])
avI = np.zeros([image_pv.shape[1], image_pv.shape[2]])
for i in range(ins):
    for j in range(ins):
        dx = min(i, ins - 1 - i)
        dy = min(j, ins - 1 - j)
        d = min(dx, dy) + 1
        wI[i, j] = d
wI = wI / wI.max()

cen_pv_x = int((bbox[0][0] + bbox[0][1]) / 2)  # pv center x
cen_pv_y = int((bbox[1][0] + bbox[1][1]) / 2)  # pv center y
cen_art_x = int((bbox[0][0] + bbox[0][1]) / 2)  # art center
cen_art_y = int((bbox[1][0] + bbox[1][1]) / 2)  # art center

for i1 in range(math.ceil(float(avk) * (float(image_pv.shape[1]) - float(ins)) / float(ins)) + 1):
    for j1 in range(math.ceil(float(avk) * (float(image_pv.shape[2]) - float(ins)) / float(ins)) + 1):
        # pv start and end index
        insti = math.floor(float(i1) * float(ins) / float(avk))
        instj = math.floor(float(j1) * float(ins) / float(avk))
        inedi = insti + ins
        inedj = instj + ins

        # art start and end index
        insti_art = max(insti + cen_art_x - cen_pv_x, 0)
        instj_art = max(instj + cen_art_y - cen_pv_y, 0)
        inedi_art = insti_art + ins
        inedj_art = instj_art + ins

        if inedi > image_pv.shape[1]:
            inedi = image_pv.shape[1]
            insti = inedi - ins
        if inedj > image_pv.shape[2]:
            inedj = image_pv.shape[2]
            instj = inedj - ins
        if inedi_art > image_art.shape[1]:
            inedi_art = image_art.shape[1]
            insti_art = inedi_art - ins
        if inedj_art > image_art.shape[2]:
            inedj_art = image_art.shape[2]
            instj_art = inedj_art - ins

        small_pmap = np.zeros([ins, ins])

        for i in range(nrotate):
            small_in_pv = image_pv[:, insti:inedi, instj:inedj]
            small_in_art = image_art[:, insti_art:inedi_art, instj_art:inedj_art]
            small_in_pv = np.rot90(small_in_pv,i)
            small_in_art = np.rot90(small_in_art,i)

            tI_pv = torch.Tensor(small_in_pv.copy()).cuda()
            tI_art = torch.Tensor(small_in_art.copy()).cuda()
            mask = torch.zeros_like(tI_pv).unsqueeze(0)

            _,_, pred, _ = model(tI_pv.unsqueeze(0), tI_art.unsqueeze(0), mask, mask)
            prob = pred.squeeze(0).data.cpu().numpy()
            small_out = prob[0]
            small_out = np.rot90(small_out,-i)

            small_pmap = small_pmap + np.array(small_out)

        small_pmap = small_pmap / nrotate

        pmap[insti:inedi, instj:inedj] += np.multiply(small_pmap, wI)
        avI[insti:inedi, instj:inedj] += wI
pmap_img = np.divide(pmap, avI)
return pmap_img

如果要应用我们的实验代码,则需要遵循 readme.md 文件中的文件结构。肝病灶分割实验需要肝脏标记和病灶标记。

I organized the format according to your data, including the tumor label, and the liver mask that I had divided with 3Dresnet.

Please double-check that your experiment labels are correct and that the dataset code is correct.

1362303119 commented 3 months ago

Is there a problem with the mask here, please?

tI_pv = torch.Tensor(small_in_pv.copy()).cuda() tI_art = torch.Tensor(small_in_art.copy()).cuda() mask = torch.zeros_like(tI_pv).unsqueeze(0)

        _,_, pred, _ = model(tI_pv.unsqueeze(0), tI_art.unsqueeze(0), mask, mask)