Closed yassinzak closed 2 years ago
Could you show your s2anet_r50_fpn_ms_rr_dota_le135.py
? We did not provide this configuration file.
'/media/yetman/Yassin/Datasets/DotaV1/' is the directory of the multi-scale images.
The images' name follow this format as preprocessed by DOTA_devkit from s2anet repository : {image name} {image scale} {x} {y}.png such as P00311.02472824.png .
This is the configuration message when I run the training :
mmrotate - INFO - Config:
dataset_type = 'DOTADataset'
data_root = '/media/yetman/Yassin/Datasets/DotaV1/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RResize', img_scale=(1024, 1024)),
dict(
type='RRandomFlip',
flip_ratio=[0.25, 0.25, 0.25],
direction=['horizontal', 'vertical', 'diagonal'],
version='le135'),
dict(
type='PolyRandomRotate',
rotate_ratio=0.5,
angles_range=180,
auto_bound=False,
rect_classes=[9, 11],
version='le135'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='RResize'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(
type='DOTADataset',
ann_file='/media/yetman/Yassin/Datasets/DotaV1/trainval/labelTxt/',
img_prefix='/media/yetman/Yassin/Datasets/DotaV1/trainval/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RResize', img_scale=(1024, 1024)),
dict(
type='RRandomFlip',
flip_ratio=[0.25, 0.25, 0.25],
direction=['horizontal', 'vertical', 'diagonal'],
version='le135'),
dict(
type='PolyRandomRotate',
rotate_ratio=0.5,
angles_range=180,
auto_bound=False,
rect_classes=[9, 11],
version='le135'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
],
version='le135'),
val=dict(
type='DOTADataset',
ann_file='/media/yetman/Yassin/Datasets/DotaV1/trainval/labelTxt/',
img_prefix='/media/yetman/Yassin/Datasets/DotaV1/trainval/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='RResize'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
],
version='le135'),
test=dict(
type='DOTADataset',
ann_file='/media/yetman/Yassin/Datasets/DotaV1/test/images/',
img_prefix='/media/yetman/Yassin/Datasets/DotaV1/test/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='RResize'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
],
version='le135'))
evaluation = dict(interval=12, metric='mAP')
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.3333333333333333,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
angle_version = 'le135'
model = dict(
type='S2ANet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
zero_init_residual=False,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
fam_head=dict(
type='RotatedRetinaHead',
num_classes=15,
in_channels=256,
stacked_convs=2,
feat_channels=256,
assign_by_circumhbbox=None,
anchor_generator=dict(
type='RotatedAnchorGenerator',
scales=[4],
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHAOBBoxCoder',
angle_range='le135',
norm_factor=1,
edge_swap=False,
proj_xy=True,
target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
align_cfgs=dict(
type='AlignConv',
kernel_size=3,
channels=256,
featmap_strides=[8, 16, 32, 64, 128]),
odm_head=dict(
type='ODMRefineHead',
num_classes=15,
in_channels=256,
stacked_convs=2,
feat_channels=256,
assign_by_circumhbbox=None,
anchor_generator=dict(
type='PseudoAnchorGenerator', strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHAOBBoxCoder',
angle_range='le135',
norm_factor=1,
edge_swap=False,
proj_xy=True,
target_means=(0.0, 0.0, 0.0, 0.0, 0.0),
target_stds=(1.0, 1.0, 1.0, 1.0, 1.0)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
train_cfg=dict(
fam_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
iou_calculator=dict(type='RBboxOverlaps2D')),
allowed_border=-1,
pos_weight=-1,
debug=False),
odm_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1,
iou_calculator=dict(type='RBboxOverlaps2D')),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(iou_thr=0.1),
max_per_img=2000))
work_dir = './work_dirs/s2anet_r50_fpn_ms_rr_dota_le135'
auto_resume = False
gpu_ids = range(0, 1)
You must use test images preprocessed by MMRotate's tool, which is at tools/data/dota/split/img_split.py
.
Thanks. I will test it soon. I don't have access to the machine currently. The code worked using single scale training and testing images from the preprocessed data using DOTA_devkit from s2anet repository (same for the training using images/labels from multi scale preprocessed data) , so I thought that MMRotate's data processing tool has the same result.
I tried to replicate s2anet using multi-scale testing on this repository and I get very low mAP. I traced the issue and I found that the results output from the test prediction is as follows :
For multi-scale experiments, the original test images are resized at three scales (0.5, 1.0 and 1.5) and then they are cropped into 1024×1024 images with a stride of 512. I used the following command to produce such results :
python tools/test.py configs/s2anet/s2anet_r50_fpn_ms_rr_dota_le135.py work_dirs/s2anet_r50_fpn_ms_rr_dota_le135/epoch_12.pth --format-only --eval-options submission_dir=results/s2anet_debug/Task1_results
I think that the integration of the bounding boxes results between different scales are either done incorrectly or have I missed certain parameters to do multi-scale testing?
Multi-scale testing on the original repository of s2anet https://github.com/csuhan/s2anet worked for me.