open-mmlab / mmdetection

OpenMMLab Detection Toolbox and Benchmark
https://mmdetection.readthedocs.io
Apache License 2.0
29.21k stars 9.4k forks source link

use GA to libra_rcnn #970

Closed lijunhuippl closed 4 years ago

lijunhuippl commented 5 years ago

when I change the RPNhead to GARPNHEAD. The loss Growing very fast in 1 Epoch The eighth time。

lijunhuippl commented 5 years ago

2019-07-10 12-04-53屏幕截图

lijunhuippl commented 5 years ago

please help

lijunhuippl commented 5 years ago

model settings

model = dict( type='FasterRCNN', pretrained='open-mmlab://resnet50_caffe', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe'),
neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),

    dict(
        type='BFP',
        in_channels=256,
        num_levels=5,
        refine_level=2,
        refine_type='conv')
],
rpn_head=dict(
    type='GARPNHead',
    in_channels=256,
    feat_channels=256,
    octave_base_scale=8,
    scales_per_octave=3,
    octave_ratios=[0.5, 1.0, 2.0],
    anchor_strides=[4, 8, 16, 32, 64],
    anchor_base_sizes=None,
    anchoring_means=[.0, .0, .0, .0],
    anchoring_stds=[0.07, 0.07, 0.14, 0.14],
    target_means=(.0, .0, .0, .0),
    target_stds=[0.07, 0.07, 0.11, 0.11],
    loc_filter_thr=0.01,
    loss_loc=dict(
        type='FocalLoss',
        use_sigmoid=True,
        gamma=2.0,
        alpha=0.25,
        loss_weight=1.0),
    loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
    loss_cls=dict(
        type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
    type='SingleRoIExtractor',
    roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
    out_channels=256,
    featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
    type='SharedFCBBoxHead',
    num_fcs=2,
    in_channels=256,
    fc_out_channels=1024,
    roi_feat_size=7,
    num_classes=21,
    target_means=[0., 0., 0., 0.],
    target_stds=[0.1, 0.1, 0.2, 0.2],
    reg_class_agnostic=False,
    loss_cls=dict(
        type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),

loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))

    loss_bbox=dict(
        type='BalancedL1Loss',
        alpha=0.5,
        gamma=1.5,
        beta=1.0,
        loss_weight=1.0)))

model training and testing settings

train_cfg = dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, center_ratio=0.2, ignore_ratio=0.5, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=300,#2000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, ignore_iof_thr=-1),

sampler=dict(

type='RandomSampler',

num=256,

pos_fraction=0.25,

neg_pos_ub=-1,

add_gt_as_proposals=True),

pos_weight=-1,

debug=False))

    sampler=dict(
        type='CombinedSampler',
        num=512,
        pos_fraction=0.25,
        add_gt_as_proposals=True,
        pos_sampler=dict(type='InstanceBalancedPosSampler'),
        neg_sampler=dict(
            type='IoUBalancedNegSampler',
            floor_thr=-1,
            floor_fraction=0,
            num_bins=3)),
    pos_weight=-1,
    debug=False))

test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=300,#1000, nms_thr=0.7, min_bbox_size=0),

rcnn=dict(

score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)

rcnn=dict(
    score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)

# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)

)

dataset settings

dataset_type = 'VOCDataset'#'CocoDataset' data_root = 'data/VOCdevkit/'#'data/coco/' img_norm_cfg = dict( mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)

mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

data = dict( imgs_per_gpu=1, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'trainVOC2007/ImageSets/Main/trainval.txt',#'annotations/instances_train2017.json', img_prefix=data_root + 'trainVOC2007',#'train2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0.5, with_mask=False, with_crowd=True, with_label=True), val=dict( type=dataset_type, ann_file=data_root + 'trainVOC2007/ImageSets/Main/val.txt',#'annotations/instances_val2017.json', img_prefix=data_root + 'trainVOC2007',#'val2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0, with_mask=False, with_crowd=True, with_label=True), test=dict( type=dataset_type, ann_file=data_root + 'testVOC2007/ImageSets/Main/test.txt',#'annotations/instances_val2017.json', img_prefix=data_root + 'testVOC2007',#'val2017/', img_scale=(1333, 800), img_norm_cfg=img_norm_cfg, size_divisor=32, flip_ratio=0, with_mask=False, with_label=False, test_mode=True))

optimizer

optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))

learning policy

lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) checkpoint_config = dict(interval=1)

yapf:disable

log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'),

dict(type='TensorboardLoggerHook')

])

yapf:enable

runtime settings

total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ga_libra'#libra_faster_rcnn_r50_fpn_1x' load_from = None resume_from = None workflow = [('train', 1)]

lijunhuippl commented 5 years ago

I just change the learning rate from 0.02 to 0.0002. Now it looks like working. But is it right ?

gittigxuy commented 5 years ago

@lijunhuippl ,have you check your result?does it improve the result?

lijunhuippl commented 5 years ago

@lijunhuippl ,have you check your result?does it improve the result?

only 0.3%. I think maybe something wrong.

liuliaocheng commented 4 years ago

@lijunhuippl ,have you check your result?does it improve the result?

only 0.3%. I think maybe something wrong.

Maybe it's just 0.3%