Kainmueller-Lab / PatchPerPix_experiments

experiments script for the PatchPerPix instance segmentation method
8 stars 3 forks source link

BBBC010 setup08 lower performance than expected #10

Closed pietramarrone closed 1 year ago

pietramarrone commented 2 years ago

Hi, I have managed to run the setup08 for the worms instance segmentation task, but the performance @test is lower than the one reported in the publication. Below you can see my results - I have left the parameters in the config file untouched:

PPP_BBBC01_setup08_performance

Do you have any idea why this would happen?

Here below also an example of the prediction for one of the low performance images (C07):

C07_example
pietramarrone commented 2 years ago

Hi, @abred is there anything we can do to diagnose the problem here?

Thank you, Andrea

abred commented 2 years ago

Hi Andrea, sorry, I overlooked this issue. I'm not sure, I think there might be an issue with the config file, I'll ask my colleague, as I didn't work on this dataset that much In the meanwhile, could you please give it a try with this config file:

[general]
logging = 'INFO'
debug = false
overwrite = false

[data]
train_data = '/home/peter/data/datasets/data_wormbodies/train'
val_data = '/home/peter/data/datasets/data_wormbodies/test'
test_data = '/home/peter/data/datasets/data_wormbodies/test'
voxel_size = [ 1, 1,]
input_format = "zarr"
raw_key = "volumes/raw_bf"
gt_key = "volumes/gt_labels"
one_instance_per_channel_gt = "volumes/gt_labels"
num_channels = 1
validate_on_train = false

[model]
train_net_name = "train_net"
test_net_name = "test_net"
train_input_shape = [ 256, 256,]
test_input_shape = [ 512, 512,]
patchshape = [ 1, 41, 41,]
patchstride = [ 1, 1, 1,]
num_fmaps = 40
max_num_inst = 2
fmap_inc_factors = [ 2, 2, 2, 2,]
fmap_dec_factors = [ 1, 1, 1, 1,]
downsample_factors = [ [ 2, 2,], [ 2, 2,], [ 2, 2,], [ 2, 2,],]
activation = "relu"
padding = "valid"
kernel_size = 3
num_repetitions = 2
upsampling = "resize_conv"
overlapping_inst = true
regress_num_inst = false
code_units = 252
autoencoder_chkpt = "this"

[autoencoder]
train_net_name = "train_net"
test_net_name = "test_net"
train_input_shape = [ 1, 41, 41,]
test_input_shape = [ 1, 41, 41,]
patchshape = [ 1, 41, 41,]
patchstride = [ 1, 1, 1,]
network_type = "conv"
activation = "relu"
num_fmaps = [ 32, 64, 128,]
downsample_factors = [ [ 2, 2,], [ 2, 2,], [ 2, 2,],]
upsampling = "resize_conv"
kernel_size = 3
num_repetitions = 2
padding = "same"
code_method = "conv1x1_b"
code_activation = "sigmoid"
code_units = 252
regularizer = "l2"
regularizer_weight = 0.0001
overlapping_inst = true

[optimizer]
optimizer = "Adam"
lr = 5e-5

[preprocessing]

[training]
batch_size = 1
num_gpus = 1
num_workers = 16
cache_size = 40
max_iterations = 700000
checkpoints = 25000
snapshots = 25000
profiling = 25000
train_code = true

[prediction]
output_format = "zarr"
num_workers = 5
cache_size = 40
aff_key = 'volumes/pred_affs'
code_key = 'volumes/pred_code'
fg_key = 'volumes/pred_numinst'
fg_thresh = 0.5
decode_batch_size = 1024

[validation]
params = ["mws", "skipThinCover", "patch_threshold", "fc_threshold"]
mws = [true]
skipThinCover = [true]
patch_threshold = [0.5, 0.6, 0.7]
fc_threshold = [0.5, 0.6, 0.7]
checkpoints = [500000, 600000, 700000]

[cross_validate]
checkpoints = [550000, 600000, 650000, 700000]
patch_threshold = [0.5, 0.6, 0.7]
fc_threshold = [0.5, 0.6, 0.7]

[vote_instances]
patch_threshold = 0.6
fc_threshold = 0.6
cuda = true
blockwise = false
num_parallel_samples = 4
num_workers = 1
chunksize = [ 92, 92, 92,]
debug = false
select_patches_for_sparse_data = true
save_no_intermediates = true
output_format = "hdf"
includeSinglePatchCCS = false
sample = 1.0
removeIntersection = true
mws = true
isbiHack = false
mask_fg_border = false
graphToInst = false
skipLookup = false
skipConsensus = false
skipRanking = false
skipThinCover = true
affinity_graph_voting = false
affinity_graph_voting_selected = false
termAfterThinCover = false
do_nms = false
one_instance_per_channel = true
fg_thresh_vi = -1.0
consensus_interleaved_cnt = false
consensus_norm_prob_product = false
consensus_prob_product = true
consensus_norm_aff = true
vi_bg_use_inv_th = true
vi_bg_use_half_th = false
vi_bg_use_less_than_th = false
rank_norm_patch_score = true
rank_int_counter = false
patch_graph_norm_aff = true
blockwise_old_stitch_fn = false
only_bb = false
overlap = [ 0, 0, 5,]
flip_cons_arr_axes = false
return_intermediates = false

[evaluation]
num_workers = 1
res_key = "vote_instances"
metric = "confusion_matrix.avAP"
print_f_factor_perc_gt_0_8 = false
use_linear_sum_assignment = false
foreground_only = false

[postprocessing]

[visualize]
samples_to_visualize = []
show_patches = false
show_instances = false

[postprocessing.watershed]
output_format = "hdf"

[training.augmentation.elastic]
control_point_spacing = [ 40, 40,]
jitter_sigma = [ 2, 2,]
rotation_min = 0
rotation_max = 90
subsample = 2

[training.augmentation.intensity]
scale = [ 0.9, 1.1,]
shift = [ -0.1, 0.1,]

[training.augmentation.simple]

[training.sampling]
min_masked = 0.002
min_masked_overlap = 0.002
overlap_min_dist = 0
overlap_max_dist = 15
probability_overlap = 0.5
probability_fg = 0.5