Open hdnminh opened 8 months ago
Hi team, I am trying to understand the method setup_DA_params defined in nnUNetTrainerV2. Could you give me some comments on this code, especially self.basic_generator_patch_size?
def setup_DA_params(self): """ - we increase roation angle from [-15, 15] to [-30, 30] - scale range is now (0.7, 1.4), was (0.85, 1.25) - we don't do elastic deformation anymore :return: """ self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.data_aug_params["scale_range"] = (0.7, 1.4) self.data_aug_params["do_elastic"] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size self.data_aug_params["num_cached_per_thread"] = 2
and what does this function do?
def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range): """ calculating the size of a patch after applying rotations and scaling. """ if isinstance(rot_x, (tuple, list)): rot_x = max(np.abs(rot_x)) if isinstance(rot_y, (tuple, list)): rot_y = max(np.abs(rot_y)) if isinstance(rot_z, (tuple, list)): rot_z = max(np.abs(rot_z)) rot_x = min(90 / 360 * 2. * np.pi, rot_x) rot_y = min(90 / 360 * 2. * np.pi, rot_y) rot_z = min(90 / 360 * 2. * np.pi, rot_z) from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d coords = np.array(final_patch_size) final_shape = np.copy(coords) if len(coords) == 3: final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0) final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0) final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0) elif len(coords) == 2: final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0) final_shape /= min(scale_range) return final_shape.astype(int)
Hey @TaWald
If you take the time to think of some specific questions I will be happy to give you answers, but please be more specific
Hi team, I am trying to understand the method setup_DA_params defined in nnUNetTrainerV2. Could you give me some comments on this code, especially self.basic_generator_patch_size?
and what does this function do?