open-mmlab / mmsegmentation

OpenMMLab Semantic Segmentation Toolbox and Benchmark.
https://mmsegmentation.readthedocs.io/en/main/
Apache License 2.0
8.02k stars 2.58k forks source link

二分类结果全为nan #3721

Open Saillxl opened 3 months ago

Saillxl commented 3 months ago

我的配置文件如下:# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=40000) checkpoint_config = dict(by_epoch=False, interval=20000) evaluation = dict(interval=5000, metric=['mIoU', 'mDice', 'mFscore']) log_config = dict( interval=200, hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict( type='ResNetV1cMoreFeature', depth=50, num_stages=4, out_channels=(3, 64, 256, 512, 1024, 2048), out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=(1, 2, 2, 2), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnet50_v1c')), feat_sizes=(32, 16, 8), patch_sizes=(4, 2, 1), tf_out_channels=(256, 256, 256), decoder_channels=(256, 256), low_feat_index=-4, low_feat_out_channels=48, first_up=8, second_up=4, msa_heads=(4, 4, 4), mca_heads=(4, 4, 4), position_embed='condition', classes=2, activation=None, auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=4, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=2, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), loss=dict( type='TFLoss', kl_weight=0.1, bce_weight=1.0), train_cfg=dict(), test_cfg=dict(mode='whole') ) dataset_type = 'BUSIDataset' data_root = '/data/project/paper/fold_1/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_scale = (256, 256) crop_size = (256, 256) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_scale,

img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],

    flip=False,
    transforms=[
        dict(type='Resize', img_scale=img_scale, keep_ratio=True),
        dict(type='RandomFlip'),
        dict(type='Normalize', **img_norm_cfg),
        dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
        dict(type='ImageToTensor', keys=['img']),
        dict(type='Collect', keys=['img'])
    ])

] data = dict( samples_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, data_root=data_root, img_dir='img_dir/train/', ann_dir='ann_dir/train/', pipeline=train_pipeline), val=dict( type=dataset_type, data_root=data_root, img_dir='img_dir/val/', ann_dir='ann_dir/val/', pipeline=test_pipeline), test=dict( type=dataset_type, data_root=data_root, img_dir='img_dir/test/', ann_dir='ann_dir/test/', pipeline=test_pipeline)) 为什么我的结果: +------------+-----+-----+------+--------+-----------+--------+ | Class | IoU | Acc | Dice | Fscore | Precision | Recall | +------------+-----+-----+------+--------+-----------+--------+ | background | nan | nan | nan | nan | nan | nan | | tumor | nan | nan | nan | nan | nan | nan | +------------+-----+-----+------+--------+-----------+--------+

Saillxl commented 3 months ago

我的0/1像素值的数据集跑了mmseg1.×版本的unet,deeplab都没问题,在复现这个mmseg0.×版本的出现这样的问题。感谢!