Closed phunix9 closed 3 years ago
Hi @phunix9 Could you please provide the specific model, config, and training and testing command?
Sorry for reply late, I trained the swin-transformer with my own dataset (90,000 images for train and 20,000 images for test). Of course I use the config for test which was generated after the training finished. Here is the config , log, training and testing command.
norm_cfg = dict(type='BN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=224, embed_dims=96, patch_size=4, window_size=7, mlp_ratio=4, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.3, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', requires_grad=True), pretrain_style='official'), decode_head=dict( type='UPerHead', in_channels=[96, 192, 384, 768], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=2, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict(type='DiceLoss', loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=384, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=2, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict(type='DiceLoss', loss_weight=0.4)), train_cfg=dict(), test_cfg=dict(mode='whole')) dataset_type = 'farmland2clsDataset' data_root = '/media/netcia/新加卷/mmsegmentation/map' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (256, 256) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(256, 256), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(256, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=1, train=dict( type='farmland2clsDataset', data_root='/media/netcia/新加卷/mmsegmentation/map', img_dir='rgb_dir/train', ann_dir='2cls/train', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(256, 256), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ]), val=dict( type='farmland2clsDataset', data_root='/media/netcia/新加卷/mmsegmentation/map', img_dir='rgb_dir/val', ann_dir='2cls/val', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(256, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ]), test=dict( type='farmland2clsDataset', data_root='/media/netcia/新加卷/mmsegmentation/map', img_dir='rgb_dir/val', ann_dir='2cls/val', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(256, 256), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ])) log_config = dict( interval=500, hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] cudnn_benchmark = True optimizer = dict( type='AdamW', lr=6e-05, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys=dict( absolute_pos_embed=dict(decay_mult=0.0), relative_position_bias_table=dict(decay_mult=0.0), norm=dict(decay_mult=0.0)))) optimizer_config = dict() lr_config = dict( policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-06, power=1.0, min_lr=0.0, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=300000) checkpoint_config = dict(by_epoch=False, interval=50000) evaluation = dict(interval=10000, metric='mIoU') work_dir = '0721_rgb_2cls' gpu_ids = [2] —————————————————————————————————————— 20210721_104604.log —————————————————————————————————————— test command: python test.py config .../upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K2.py checkpoint .../latest.pth --aug-test --eval mIoU
I got 0.53 IoU of farmland but during the training, IoU was 0.7 at least. I don't know how to solve it.
Thanks!
Hi @phunix9
python test.py config .../upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K2.py checkpoint .../latest.pth --aug-test --eval mIoU
If you use aug-test
in the testing phase, the result you obtained is MS (Multi-scale) result while the result in the training phase is SS (Single scale) result.
Try
python test.py config .../upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K2.py checkpoint /latest.pth --eval mIoU
Hello,
I have a question about the mIoU in the training process and test process.
I trained the dataset with validate and val result mIoU was 0.7(for example) after last epoch.
However, when I used the trained model to test the same dataset, the mIoU changed and it was 0.5.
I don't know why the test result mIoU changed and I want to know how to make the values the same
Thanks!