thangvubk / SoftGroup

[CVPR 2022 Oral] SoftGroup for Instance Segmentation on 3D Point Clouds
MIT License
339 stars 80 forks source link

Training scannet v2 from scratch #168

Closed LinfengUP closed 1 year ago

LinfengUP commented 1 year ago

I try to train scannet v2 from scratch. I follow the setting in configs by copying trianset 4 times and set prepare_epoch to 20. After 128 epochs of training, the mAP is only about 30. Can anyone share training scannet from scratch config?

Here is my config

model:
  prepare_epoch: 20
  channels: 32
  num_blocks: 7
  semantic_classes: 20
  instance_classes: 18
  sem2ins_classes: []
  semantic_only: False
  ignore_label: -100
  grouping_cfg:
    score_thr: 0.2
    radius: 0.04
    mean_active: 300
    class_numpoint_mean: [-1., -1., 3917., 12056., 2303.,
                          8331., 3948., 3166., 5629., 11719.,
                          1003., 3317., 4912., 10221., 3889.,
                          4136., 2120., 945., 3967., 2589.]
    npoint_thr: 0.05  # absolute if class_numpoint == -1, relative if class_numpoint != -1
    ignore_classes: [0, 1]
  instance_voxel_cfg:
    scale: 50
    spatial_shape: 20
  train_cfg:
    max_proposal_num: 200
    pos_iou_thr: 0.5
  test_cfg:
    x4_split: False
    cls_score_thr: 0.001
    mask_score_thr: -0.5
    min_npoint: 100
    eval_tasks: ['semantic', 'instance']
  # fixed_modules: ['input_conv', 'unet', 'output_layer', 'semantic_linear', 'offset_linear']

data:
  train:
    type: 'scannetv2'
    data_root: 'dataset/scannetv2'
    prefix: 'train'
    suffix: '_inst_nostuff.pth'
    training: True
    repeat: 4
    voxel_cfg:
      scale: 50
      spatial_shape: [128, 512]
      max_npoint: 250000
      min_npoint: 5000
  test:
    type: 'scannetv2'
    data_root: 'dataset/scannetv2'
    prefix: 'val'
    suffix: '_inst_nostuff.pth'
    training: False
    with_label: True
    voxel_cfg:
      scale: 50
      spatial_shape: [128, 512]
      max_npoint: 250000
      min_npoint: 5000

dataloader:
  train:
    batch_size: 4
    num_workers: 4
  test:
    batch_size: 1
    num_workers: 1

optimizer:
  type: 'Adam'
  lr: 0.004

fp16: False
epochs: 128
step_epoch: 128
save_freq: 4
pretrain: ''
work_dir: ''