Closed MiguelAngeloMartinsRibeiro closed 3 years ago
I found that I was changing in the coco_base_config instead of yolact_base_config
Anyways, if you've some tips to reduce the training speed I would appreciate. Also if you've tips about the cuda error cause I've 8GB ram on the Nvidia card
So I wanna reduce my training speed. I need to reduce to at least 1 day.
I started with a dataset of 25000 images 550x550 pixels, and it gave me an ETA of 6 days, then I reduced the dataset to around 9000 images but I got the same ETA. This reduction was supposed to reduce the ETA, no?
So after this I tried to change the settings on config.py, to be more precise the max_iterations one and I set it to 1 to see the difference. What can I do to reduce it?
I'm using a batch-size of 6 cause I get the cuda error (out of memory) if I use 8. I've a Nvidia gtx 2070 and this is my config.py file
from backbone import ResNetBackbone, VGGBackbone, ResNetBackboneGN, DarkNetBackbone from math import sqrt import torch
for making bounding boxes pretty
COLORS = ((244, 67, 54), (233, 30, 99), (156, 39, 176), (103, 58, 183), ( 63, 81, 181), ( 33, 150, 243), ( 3, 169, 244), ( 0, 188, 212), ( 0, 150, 136), ( 76, 175, 80), (139, 195, 74), (205, 220, 57), (255, 235, 59), (255, 193, 7), (255, 152, 0), (255, 87, 34), (121, 85, 72), (158, 158, 158), ( 96, 125, 139))
These are in BGR and are for ImageNet
MEANS = (103.94, 116.78, 123.68) STD = (57.38, 57.12, 58.40)
BLENDERCLASSES = ('boat',) BLENDERCLASSESINSTANCE = ('boat', 'vessel',)
----------------------- CONFIG CLASS -----------------------
class Config(object): """ Holds the configuration for anything you want it to. To get the currently active config, call get_cfg().
----------------------- DATASETS -----------------------
dataset_base = Config({ 'name': 'Base Dataset',
})
BlenderSet550 = dataset_base.copy({ 'name': 'BlenderSet550',
})
----------------------- TRANSFORMS -----------------------
resnet_transform = Config({ 'channel_order': 'RGB', 'normalize': True, 'subtract_means': False, 'to_float': False, })
vgg_transform = Config({
Note that though vgg is traditionally BGR,
})
darknet_transform = Config({ 'channel_order': 'RGB', 'normalize': False, 'subtract_means': False, 'to_float': True, })
----------------------- BACKBONES -----------------------
backbone_base = Config({ 'name': 'Base Backbone', 'path': 'path/to/pretrained/weights', 'type': object, 'args': tuple(), 'transform': resnet_transform,
})
resnet101_backbone = backbone_base.copy({ 'name': 'ResNet101', 'path': 'resnet101_reducedfc.pth', 'type': ResNetBackbone, 'args': ([3, 4, 23, 3],), 'transform': resnet_transform,
})
resnet101_gn_backbone = backbone_base.copy({ 'name': 'ResNet101_GN', 'path': 'R-101-GN.pkl', 'type': ResNetBackboneGN, 'args': ([3, 4, 23, 3],), 'transform': resnet_transform,
})
resnet101_dcn_inter3_backbone = resnet101_backbone.copy({ 'name': 'ResNet101_DCN_Interval3', 'args': ([3, 4, 23, 3], [0, 4, 23, 3], 3), })
resnet50_backbone = resnet101_backbone.copy({ 'name': 'ResNet50', 'path': 'resnet50-19c8e357.pth', 'type': ResNetBackbone, 'args': ([3, 4, 6, 3],), 'transform': resnet_transform, })
resnet50_dcnv2_backbone = resnet50_backbone.copy({ 'name': 'ResNet50_DCNv2', 'args': ([3, 4, 6, 3], [0, 4, 6, 3]), })
darknet53_backbone = backbone_base.copy({ 'name': 'DarkNet53', 'path': 'darknet53.pth', 'type': DarkNetBackbone, 'args': ([1, 2, 8, 8, 4],), 'transform': darknet_transform,
})
vgg16_arch = [[64, 64], [ 'M', 128, 128], [ 'M', 256, 256, 256], [('M', {'kernel_size': 2, 'stride': 2, 'ceil_mode': True}), 512, 512, 512], [ 'M', 512, 512, 512], [('M', {'kernel_size': 3, 'stride': 1, 'padding': 1}), (1024, {'kernel_size': 3, 'padding': 6, 'dilation': 6}), (1024, {'kernel_size': 1})]]
vgg16_backbone = backbone_base.copy({ 'name': 'VGG16', 'path': 'vgg16_reducedfc.pth', 'type': VGGBackbone, 'args': (vgg16_arch, [(256, 2), (128, 2), (128, 1), (128, 1)], [3]), 'transform': vgg_transform,
})
----------------------- MASK BRANCH TYPES -----------------------
mask_type = Config({
Direct produces masks directly as the output of each pred module.
})
----------------------- ACTIVATION FUNCTIONS -----------------------
activation_func = Config({ 'tanh': torch.tanh, 'sigmoid': torch.sigmoid, 'softmax': lambda x: torch.nn.functional.softmax(x, dim=-1), 'relu': lambda x: torch.nn.functional.relu(x, inplace=True), 'none': lambda x: x, })
----------------------- FPN DEFAULTS -----------------------
fpn_base = Config({
The number of features to have in each FPN layer
})
----------------------- CONFIG DEFAULTS -----------------------
coco_base_config = Config({ 'dataset': BlenderSet550, 'num_classes': 2, # This should include the background class
})
----------------------- YOLACT v1.0 CONFIGS -----------------------
yolact_base_config = coco_base_config.copy({ 'name': 'yolact_base',
})
yolact_im400_config = yolact_base_config.copy({ 'name': 'yolact_im400',
})
yolact_im700_config = yolact_base_config.copy({ 'name': 'yolact_im700',
})
yolact_darknet53_config = yolact_base_config.copy({ 'name': 'yolact_darknet53',
})
yolact_resnet50_config = yolact_base_config.copy({ 'name': 'yolact_resnet50',
})
----------------------- YOLACT++ CONFIGS -----------------------
yolact_plus_base_config = yolact_base_config.copy({ 'name': 'yolact_plus_base',
})
yolact_plus_resnet50_config = yolact_plus_base_config.copy({ 'name': 'yolact_plus_resnet50',
})
Default config
cfg = yolact_base_config.copy()
def set_cfg(config_name:str): """ Sets the active config. Works even if cfg is already imported! """ global cfg
def set_dataset(dataset_name:str): """ Sets the dataset of the current config. """ cfg.dataset = eval(dataset_name)
AND the result
[ 0] 110 || B: 5.428 | C: 2.847 | M: 5.781 | S: 0.034 | T: 14.091 || ETA: 5 days, 21:18:22 || timer: 0.471 [ 0] 120 || B: 5.287 | C: 2.716 | M: 5.668 | S: 0.024 | T: 13.695 || ETA: 5 days, 21:10:55 || timer: 0.471
I also get this warnings when I begin training
/home/mribeiro/tese/Yolact++/utils/augmentations.py:309: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray. mode = random.choice(self.sample_options) /home/mribeiro/tese/Yolact++/utils/augmentations.py:309: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray. mode = random.choice(self.sample_options) /home/mribeiro/tese/Yolact++/utils/augmentations.py:309: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray. mode = random.choice(self.sample_options) /home/mribeiro/tese/Yolact++/utils/augmentations.py:309: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when