aim-uofa / AdelaiDet

AdelaiDet is an open source toolbox for multiple instance-level detection and recognition tasks.
https://git.io/AdelaiDet
Other
3.38k stars 650 forks source link

solov2 RuntimeError: nvrtc: error: invalid value for --gpu-architecture (-arch) #381

Open xiezhiyu275 opened 3 years ago

xiezhiyu275 commented 3 years ago

/root/anaconda3/bin/python3 /home/workspace/xzy/solov2/AdelaiDet-master/tools/train_net.py --config-file /home/workspace/xzy/solov2/AdelaiDet-master/configs/SOLOv2/R50_3x.yaml --num-gpus 1 --resume OUTPUT_DIR /home/workspace/xzy/solov2/AdelaiDet-master/training_dir/SOLOv2_R50_3x Command Line Args: Namespace(config_file='/home/workspace/xzy/solov2/AdelaiDet-master/configs/SOLOv2/R50_3x.yaml', dist_url='tcp://127.0.0.1:49152', eval_only=False, machine_rank=0, num_gpus=1, num_machines=1, opts=['OUTPUT_DIR', '/home/workspace/xzy/solov2/AdelaiDet-master/training_dir/SOLOv2_R50_3x'], resume=True) [05/30 10:46:58 detectron2]: Rank of current process: 0. World size: 1 [05/30 10:46:59 detectron2]: Environment info:


sys.platform linux Python 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] numpy 1.19.2 detectron2 0.3 @/home/workspace/xzy/detectron2.2.1/detectron2/detectron2 Compiler GCC 7.3 CUDA compiler CUDA 11.1 detectron2 arch flags 8.6 DETECTRON2_ENV_MODULE PyTorch 1.7.0 @/root/anaconda3/lib/python3.8/site-packages/torch PyTorch debug build True GPU available True GPU 0 GeForce RTX 3080 (arch=8.6) CUDA_HOME /usr/local/cuda-11.1 Pillow 8.0.1 torchvision 0.8.1 @/root/anaconda3/lib/python3.8/site-packages/torchvision torchvision arch flags 3.5, 5.0, 6.0, 7.0, 7.5, 8.0 fvcore 0.1.5 cv2 4.1.2


PyTorch built with:

[05/30 10:46:59 detectron2]: Command line arguments: Namespace(config_file='/home/workspace/xzy/solov2/AdelaiDet-master/configs/SOLOv2/R50_3x.yaml', dist_url='tcp://127.0.0.1:49152', eval_only=False, machine_rank=0, num_gpus=1, num_machines=1, opts=['OUTPUT_DIR', '/home/workspace/xzy/solov2/AdelaiDet-master/training_dir/SOLOv2_R50_3x'], resume=True) [05/30 10:46:59 detectron2]: Contents of args.config_file=/home/workspace/xzy/solov2/AdelaiDet-master/configs/SOLOv2/R50_3x.yaml: BASE: "Base-SOLOv2.yaml" MODEL: WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" RESNETS: DEPTH: 50 SOLVER: STEPS: (210000, 250000) MAX_ITER: 100

[05/30 10:46:59 detectron2]: Running with full config: CUDNN_BENCHMARK: False DATALOADER: ASPECT_RATIO_GROUPING: True FILTER_EMPTY_ANNOTATIONS: True NUM_WORKERS: 4 REPEAT_THRESHOLD: 0.0 SAMPLER_TRAIN: TrainingSampler DATASETS: PRECOMPUTED_PROPOSAL_TOPK_TEST: 1000 PRECOMPUTED_PROPOSAL_TOPK_TRAIN: 2000 PROPOSAL_FILES_TEST: () PROPOSAL_FILES_TRAIN: () TEST: () TRAIN: ('custom',) GLOBAL: HACK: 1.0 INPUT: CROP: CROP_INSTANCE: True ENABLED: False SIZE: [0.9, 0.9] TYPE: relative_range FORMAT: BGR HFLIP_TRAIN: True MASK_FORMAT: bitmask MAX_SIZE_TEST: 1333 MAX_SIZE_TRAIN: 1333 MIN_SIZE_TEST: 800 MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) MIN_SIZE_TRAIN_SAMPLING: choice RANDOM_FLIP: horizontal MODEL: ANCHOR_GENERATOR: ANGLES: [[-90, 0, 90]] ASPECT_RATIOS: [[0.5, 1.0, 2.0]] NAME: DefaultAnchorGenerator OFFSET: 0.0 SIZES: [[32, 64, 128, 256, 512]] BACKBONE: ANTI_ALIAS: False FREEZE_AT: 2 NAME: build_resnet_fpn_backbone BASIS_MODULE: ANN_SET: coco COMMON_STRIDE: 8 CONVS_DIM: 128 IN_FEATURES: ['p3', 'p4', 'p5'] LOSS_ON: False LOSS_WEIGHT: 0.3 NAME: ProtoNet NORM: SyncBN NUM_BASES: 4 NUM_CLASSES: 80 NUM_CONVS: 3 BATEXT: CANONICAL_SIZE: 96 CONV_DIM: 256 IN_FEATURES: ['p2', 'p3', 'p4'] NUM_CHARS: 25 NUM_CONV: 2 POOLER_RESOLUTION: (8, 32) POOLER_SCALES: (0.25, 0.125, 0.0625) RECOGNITION_LOSS: ctc RECOGNIZER: attn SAMPLING_RATIO: 1 VOC_SIZE: 96 BLENDMASK: ATTN_SIZE: 14 BOTTOM_RESOLUTION: 56 INSTANCE_LOSS_WEIGHT: 1.0 POOLER_SAMPLING_RATIO: 1 POOLER_SCALES: (0.25,) POOLER_TYPE: ROIAlignV2 TOP_INTERP: bilinear VISUALIZE: False BiFPN: IN_FEATURES: ['res2', 'res3', 'res4', 'res5'] NORM: NUM_REPEATS: 6 OUT_CHANNELS: 160 CONDINST: MASK_BRANCH: CHANNELS: 128 IN_FEATURES: ['p3', 'p4', 'p5'] NORM: BN NUM_CONVS: 4 OUT_CHANNELS: 8 SEMANTIC_LOSS_ON: False MASK_HEAD: CHANNELS: 8 DISABLE_REL_COORDS: False NUM_LAYERS: 3 USE_FP16: False MASK_OUT_STRIDE: 4 MAX_PROPOSALS: -1 DEVICE: cuda DLA: CONV_BODY: DLA34 NORM: FrozenBN OUT_FEATURES: ['stage2', 'stage3', 'stage4', 'stage5'] FCOS: CENTER_SAMPLE: True FPN_STRIDES: [8, 16, 32, 64, 128] INFERENCE_TH_TEST: 0.05 INFERENCE_TH_TRAIN: 0.05 IN_FEATURES: ['p3', 'p4', 'p5', 'p6', 'p7'] LOC_LOSS_TYPE: giou LOSS_ALPHA: 0.25 LOSS_GAMMA: 2.0 NMS_TH: 0.6 NORM: GN NUM_BOX_CONVS: 4 NUM_CLASSES: 80 NUM_CLS_CONVS: 4 NUM_SHARE_CONVS: 0 POST_NMS_TOPK_TEST: 100 POST_NMS_TOPK_TRAIN: 100 POS_RADIUS: 1.5 PRE_NMS_TOPK_TEST: 1000 PRE_NMS_TOPK_TRAIN: 1000 PRIOR_PROB: 0.01 SIZES_OF_INTEREST: [64, 128, 256, 512] THRESH_WITH_CTR: False TOP_LEVELS: 2 USE_DEFORMABLE: False USE_RELU: True USE_SCALE: True YIELD_PROPOSAL: False FPN: FUSE_TYPE: sum IN_FEATURES: ['res2', 'res3', 'res4', 'res5'] NORM: OUT_CHANNELS: 256 KEYPOINT_ON: False LOAD_PROPOSALS: False MASK_ON: True MEInst: AGNOSTIC: True CENTER_SAMPLE: True DIM_MASK: 60 FLAG_PARAMETERS: False FPN_STRIDES: [8, 16, 32, 64, 128] GCN_KERNEL_SIZE: 9 INFERENCE_TH_TEST: 0.05 INFERENCE_TH_TRAIN: 0.05 IN_FEATURES: ['p3', 'p4', 'p5', 'p6', 'p7'] IOU_LABELS: [0, 1] IOU_THRESHOLDS: [0.5] LAST_DEFORMABLE: False LOC_LOSS_TYPE: giou LOSS_ALPHA: 0.25 LOSS_GAMMA: 2.0 LOSS_ON_MASK: False MASK_LOSS_TYPE: mse MASK_ON: True MASK_SIZE: 28 NMS_TH: 0.6 NORM: GN NUM_BOX_CONVS: 4 NUM_CLASSES: 80 NUM_CLS_CONVS: 4 NUM_MASK_CONVS: 4 NUM_SHARE_CONVS: 0 PATH_COMPONENTS: datasets/coco/components/coco_2017_train_class_agnosticTrue_whitenTrue_sigmoidTrue_60.npz POST_NMS_TOPK_TEST: 100 POST_NMS_TOPK_TRAIN: 100 POS_RADIUS: 1.5 PRE_NMS_TOPK_TEST: 1000 PRE_NMS_TOPK_TRAIN: 1000 PRIOR_PROB: 0.01 SIGMOID: True SIZES_OF_INTEREST: [64, 128, 256, 512] THRESH_WITH_CTR: False TOP_LEVELS: 2 TYPE_DEFORMABLE: DCNv1 USE_DEFORMABLE: False USE_GCN_IN_MASK: False USE_RELU: True USE_SCALE: True WHITEN: True META_ARCHITECTURE: SOLOv2 MOBILENET: False PANOPTIC_FPN: COMBINE: ENABLED: True INSTANCES_CONFIDENCE_THRESH: 0.5 OVERLAP_THRESH: 0.5 STUFF_AREA_LIMIT: 4096 INSTANCE_LOSS_WEIGHT: 1.0 PIXEL_MEAN: [103.53, 116.28, 123.675] PIXEL_STD: [1.0, 1.0, 1.0] PROPOSAL_GENERATOR: MIN_SIZE: 0 NAME: RPN RESNETS: DEFORM_INTERVAL: 1 DEFORM_MODULATED: False DEFORM_NUM_GROUPS: 1 DEFORM_ON_PER_STAGE: [False, False, False, False] DEPTH: 50 NORM: FrozenBN NUM_GROUPS: 1 OUT_FEATURES: ['res2', 'res3', 'res4', 'res5'] RES2_OUT_CHANNELS: 256 RES5_DILATION: 1 STEM_OUT_CHANNELS: 64 STRIDE_IN_1X1: True WIDTH_PER_GROUP: 64 RETINANET: BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_WEIGHTS: (1.0, 1.0, 1.0, 1.0) FOCAL_LOSS_ALPHA: 0.25 FOCAL_LOSS_GAMMA: 2.0 IN_FEATURES: ['p3', 'p4', 'p5', 'p6', 'p7'] IOU_LABELS: [0, -1, 1] IOU_THRESHOLDS: [0.4, 0.5] NMS_THRESH_TEST: 0.5 NORM: NUM_CLASSES: 80 NUM_CONVS: 4 PRIOR_PROB: 0.01 SCORE_THRESH_TEST: 0.05 SMOOTH_L1_LOSS_BETA: 0.1 TOPK_CANDIDATES_TEST: 1000 ROI_BOX_CASCADE_HEAD: BBOX_REG_WEIGHTS: ((10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0)) IOUS: (0.5, 0.6, 0.7) ROI_BOX_HEAD: BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_LOSS_WEIGHT: 1.0 BBOX_REG_WEIGHTS: (10.0, 10.0, 5.0, 5.0) CLS_AGNOSTIC_BBOX_REG: False CONV_DIM: 256 FC_DIM: 1024 NAME: NORM: NUM_CONV: 0 NUM_FC: 0 POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 SMOOTH_L1_BETA: 0.0 TRAIN_ON_PRED_BOXES: False ROI_HEADS: BATCH_SIZE_PER_IMAGE: 512 IN_FEATURES: ['res4'] IOU_LABELS: [0, 1] IOU_THRESHOLDS: [0.5] NAME: Res5ROIHeads NMS_THRESH_TEST: 0.5 NUM_CLASSES: 2 POSITIVE_FRACTION: 0.25 PROPOSAL_APPEND_GT: True SCORE_THRESH_TEST: 0.05 ROI_KEYPOINT_HEAD: CONV_DIMS: (512, 512, 512, 512, 512, 512, 512, 512) LOSS_WEIGHT: 1.0 MIN_KEYPOINTS_PER_IMAGE: 1 NAME: KRCNNConvDeconvUpsampleHead NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: True NUM_KEYPOINTS: 17 POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 ROI_MASK_HEAD: CLS_AGNOSTIC_MASK: False CONV_DIM: 256 NAME: MaskRCNNConvUpsampleHead NORM: NUM_CONV: 0 POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 0 POOLER_TYPE: ROIAlignV2 RPN: BATCH_SIZE_PER_IMAGE: 256 BBOX_REG_LOSS_TYPE: smooth_l1 BBOX_REG_LOSS_WEIGHT: 1.0 BBOX_REG_WEIGHTS: (1.0, 1.0, 1.0, 1.0) BOUNDARY_THRESH: -1 HEAD_NAME: StandardRPNHead IN_FEATURES: ['res4'] IOU_LABELS: [0, -1, 1] IOU_THRESHOLDS: [0.3, 0.7] LOSS_WEIGHT: 1.0 NMS_THRESH: 0.7 POSITIVE_FRACTION: 0.5 POST_NMS_TOPK_TEST: 1000 POST_NMS_TOPK_TRAIN: 2000 PRE_NMS_TOPK_TEST: 6000 PRE_NMS_TOPK_TRAIN: 12000 SMOOTH_L1_BETA: 0.0 SEM_SEG_HEAD: COMMON_STRIDE: 4 CONVS_DIM: 128 IGNORE_VALUE: 255 IN_FEATURES: ['p2', 'p3', 'p4', 'p5'] LOSS_WEIGHT: 1.0 NAME: SemSegFPNHead NORM: GN NUM_CLASSES: 54 SOLOV2: FPN_INSTANCE_STRIDES: [8, 8, 16, 32, 32] FPN_SCALE_RANGES: ((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)) INSTANCE_CHANNELS: 512 INSTANCE_IN_CHANNELS: 256 INSTANCE_IN_FEATURES: ['p2', 'p3', 'p4', 'p5', 'p6'] LOSS: DICE_WEIGHT: 3.0 FOCAL_ALPHA: 0.25 FOCAL_GAMMA: 2.0 FOCAL_USE_SIGMOID: True FOCAL_WEIGHT: 1.0 MASK_CHANNELS: 128 MASK_IN_CHANNELS: 256 MASK_IN_FEATURES: ['p2', 'p3', 'p4', 'p5'] MASK_THR: 0.5 MAX_PER_IMG: 100 NMS_KERNEL: gaussian NMS_PRE: 500 NMS_SIGMA: 2 NMS_TYPE: matrix NORM: GN NUM_CLASSES: 80 NUM_GRIDS: [40, 36, 24, 16, 12] NUM_INSTANCE_CONVS: 4 NUM_KERNELS: 256 NUM_MASKS: 256 PRIOR_PROB: 0.01 SCORE_THR: 0.1 SIGMA: 0.2 TYPE_DCN: DCN UPDATE_THR: 0.05 USE_COORD_CONV: True USE_DCN_IN_INSTANCE: False TOP_MODULE: DIM: 16 NAME: conv VOVNET: BACKBONE_OUT_CHANNELS: 256 CONV_BODY: V-39-eSE NORM: FrozenBN OUT_CHANNELS: 256 OUT_FEATURES: ['stage2', 'stage3', 'stage4', 'stage5'] WEIGHTS: /home/workspace/xzy/solov2/AdelaiDet-master/SOLOv2_R50_3x.pth OUTPUT_DIR: /home/workspace/xzy/solov2/AdelaiDet-master/training_dir/SOLOv2_R50_3x SEED: -1 SOLVER: AMP: ENABLED: False BASE_LR: 0.00125 BIAS_LR_FACTOR: 1.0 CHECKPOINT_PERIOD: 5000 CLIP_GRADIENTS: CLIP_TYPE: value CLIP_VALUE: 1.0 ENABLED: False NORM_TYPE: 2.0 GAMMA: 0.1 IMS_PER_BATCH: 4 LR_SCHEDULER_NAME: WarmupMultiStepLR MAX_ITER: 100 MOMENTUM: 0.9 NESTEROV: False REFERENCE_WORLD_SIZE: 0 STEPS: (210000, 250000) WARMUP_FACTOR: 0.01 WARMUP_ITERS: 1000 WARMUP_METHOD: linear WEIGHT_DECAY: 0.0001 WEIGHT_DECAY_BIAS: 0.0001 WEIGHT_DECAY_NORM: 0.0 TEST: AUG: ENABLED: False FLIP: True MAX_SIZE: 4000 MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) DETECTIONS_PER_IMAGE: 100 EVAL_PERIOD: 0 EXPECTED_RESULTS: [] KEYPOINT_OKS_SIGMAS: [] PRECISE_BN: ENABLED: False NUM_ITER: 200 VERSION: 2 VIS_PERIOD: 0 [05/30 10:46:59 detectron2]: Full config saved to /home/workspace/xzy/solov2/AdelaiDet-master/training_dir/SOLOv2_R50_3x/config.yaml [05/30 10:46:59 d2.utils.env]: Using a generated random seed 59646823

SOLOv2( (backbone): FPN( (fpn_lateral2): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1)) (fpn_output2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (fpn_lateral3): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1)) (fpn_output3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (fpn_lateral4): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1)) (fpn_output4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (fpn_lateral5): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1)) (fpn_output5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (top_block): LastLevelMaxPool() (bottom_up): ResNet( (stem): BasicStem( (conv1): Conv2d( 3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) ) (res2): Sequential( (0): BottleneckBlock( (shortcut): Conv2d( 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv1): Conv2d( 64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv2): Conv2d( 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv3): Conv2d( 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) ) (1): BottleneckBlock( (conv1): Conv2d( 256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv2): Conv2d( 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv3): Conv2d( 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) ) (2): BottleneckBlock( (conv1): Conv2d( 256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv2): Conv2d( 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05) ) (conv3): Conv2d( 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) ) ) (res3): Sequential( (0): BottleneckBlock( (shortcut): Conv2d( 256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv1): Conv2d( 256, 128, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv2): Conv2d( 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv3): Conv2d( 128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) ) (1): BottleneckBlock( (conv1): Conv2d( 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv2): Conv2d( 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv3): Conv2d( 128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) ) (2): BottleneckBlock( (conv1): Conv2d( 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv2): Conv2d( 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv3): Conv2d( 128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) ) (3): BottleneckBlock( (conv1): Conv2d( 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv2): Conv2d( 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05) ) (conv3): Conv2d( 128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) ) ) (res4): Sequential( (0): BottleneckBlock( (shortcut): Conv2d( 512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) (conv1): Conv2d( 512, 256, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) (1): BottleneckBlock( (conv1): Conv2d( 1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) (2): BottleneckBlock( (conv1): Conv2d( 1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) (3): BottleneckBlock( (conv1): Conv2d( 1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) (4): BottleneckBlock( (conv1): Conv2d( 1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) (5): BottleneckBlock( (conv1): Conv2d( 1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv2): Conv2d( 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05) ) (conv3): Conv2d( 256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05) ) ) ) (res5): Sequential( (0): BottleneckBlock( (shortcut): Conv2d( 1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05) ) (conv1): Conv2d( 1024, 512, kernel_size=(1, 1), stride=(2, 2), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv2): Conv2d( 512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv3): Conv2d( 512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05) ) ) (1): BottleneckBlock( (conv1): Conv2d( 2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv2): Conv2d( 512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv3): Conv2d( 512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05) ) ) (2): BottleneckBlock( (conv1): Conv2d( 2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv2): Conv2d( 512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05) ) (conv3): Conv2d( 512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05) ) ) ) ) ) (ins_head): SOLOv2InsHead( (cate_tower): Sequential( (0): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 512, eps=1e-05, affine=True) (2): ReLU(inplace=True) (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (4): GroupNorm(32, 512, eps=1e-05, affine=True) (5): ReLU(inplace=True) (6): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (7): GroupNorm(32, 512, eps=1e-05, affine=True) (8): ReLU(inplace=True) (9): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (10): GroupNorm(32, 512, eps=1e-05, affine=True) (11): ReLU(inplace=True) ) (kernel_tower): Sequential( (0): Conv2d(258, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 512, eps=1e-05, affine=True) (2): ReLU(inplace=True) (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (4): GroupNorm(32, 512, eps=1e-05, affine=True) (5): ReLU(inplace=True) (6): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (7): GroupNorm(32, 512, eps=1e-05, affine=True) (8): ReLU(inplace=True) (9): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (10): GroupNorm(32, 512, eps=1e-05, affine=True) (11): ReLU(inplace=True) ) (cate_pred): Conv2d(512, 80, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (kernel_pred): Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) ) (mask_head): SOLOv2MaskHead( (convs_all_levels): ModuleList( (0): Sequential( (conv0): Sequential( (0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) ) (1): Sequential( (conv0): Sequential( (0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample0): Upsample(scale_factor=2.0, mode=bilinear) ) (2): Sequential( (conv0): Sequential( (0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample0): Upsample(scale_factor=2.0, mode=bilinear) (conv1): Sequential( (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample1): Upsample(scale_factor=2.0, mode=bilinear) ) (3): Sequential( (conv0): Sequential( (0): Conv2d(258, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample0): Upsample(scale_factor=2.0, mode=bilinear) (conv1): Sequential( (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample1): Upsample(scale_factor=2.0, mode=bilinear) (conv2): Sequential( (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): GroupNorm(32, 128, eps=1e-05, affine=True) (2): ReLU() ) (upsample2): Upsample(scale_factor=2.0, mode=bilinear) ) ) (conv_pred): Sequential( (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): GroupNorm(32, 256, eps=1e-05, affine=True) (2): ReLU(inplace=True) ) ) ) [05/30 10:47:03 d2.data.dataset_mapper]: [DatasetMapper] Augmentations used in training: [ResizeShortestEdge(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style='choice'), RandomFlip()] [05/30 10:47:03 adet.data.dataset_mapper]: Rebuilding the augmentations. The previous augmentations will be overridden. [05/30 10:47:03 adet.data.detection_utils]: Augmentations used in training: [ResizeShortestEdge(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style='choice'), RandomFlip()] [05/30 10:47:03 d2.data.datasets.coco]: Loaded 7 images in COCO format from /home/workspace/xzy/solov2/AdelaiDet-master/datasets/coco/annotations/instances_train2017.json [05/30 10:47:03 d2.data.build]: Removed 0 images with no usable annotations. 7 images left. [05/30 10:47:03 d2.data.build]: Distribution of instances among all 1 categories: category #instances
c 18

[05/30 10:47:03 d2.data.build]: Using training sampler TrainingSampler [05/30 10:47:03 d2.data.common]: Serializing 7 elements to byte tensors and concatenating them all ... [05/30 10:47:03 d2.data.common]: Serialized dataset takes 0.01 MiB [05/30 10:47:04 fvcore.common.checkpoint]: [Checkpointer] Loading from /home/workspace/xzy/solov2/AdelaiDet-master/SOLOv2_R50_3x.pth ... [05/30 10:47:05 adet.trainer]: Starting training from iteration 0 /root/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py:3060: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details. warnings.warn("Default upsampling behavior when mode={} is changed " /root/anaconda3/lib/python3.8/site-packages/torch/nn/functional.py:3103: UserWarning: The default behavior for interpolate/upsample with float scale_factor changed in 1.6.0 to align with other frameworks/libraries, and now uses scale_factor directly, instead of relying on the computed output size. If you wish to restore the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details. warnings.warn("The default behavior for interpolate/upsample with float scale_factor changed " /home/workspace/xzy/solov2/AdelaiDet-master/adet/modeling/solov2/solov2.py:188: UserWarning: This overload of nonzero is deprecated: nonzero() Consider using one of the following signatures instead: nonzero(, bool as_tuple) (Triggered internally at /opt/conda/conda-bld/pytorch_1603729096996/work/torch/csrc/utils/python_arg_parser.cpp:882.) hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() Traceback (most recent call last): File "/home/workspace/xzy/solov2/AdelaiDet-master/tools/train_net.py", line 233, in launch( File "/home/workspace/xzy/detectron2.2.1/detectron2/detectron2/engine/launch.py", line 62, in launch main_func(args) File "/home/workspace/xzy/solov2/AdelaiDet-master/tools/train_net.py", line 227, in main return trainer.train() File "/home/workspace/xzy/solov2/AdelaiDet-master/tools/train_net.py", line 87, in train self.train_loop(self.start_iter, self.max_iter) File "/home/workspace/xzy/solov2/AdelaiDet-master/tools/train_net.py", line 76, in train_loop self.run_step() File "/home/workspace/xzy/detectron2.2.1/detectron2/detectron2/engine/defaults.py", line 429, in run_step self._trainer.run_step() File "/home/workspace/xzy/detectron2.2.1/detectron2/detectron2/engine/train_loop.py", line 228, in run_step loss_dict = self.model(data) File "/root/anaconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, *kwargs) File "/home/workspace/xzy/solov2/AdelaiDet-master/adet/modeling/solov2/solov2.py", line 137, in forward losses = self.loss(cate_pred, kernel_pred, mask_pred, targets) File "/home/workspace/xzy/solov2/AdelaiDet-master/adet/modeling/solov2/solov2.py", line 333, in loss loss_cate = self.focal_loss_weight sigmoid_focal_loss_jit(flatten_cate_preds, flatten_cate_labels_oh, RuntimeError: nvrtc: error: invalid value for --gpu-architecture (-arch)

nvrtc compilation failed:

define NAN __int_as_float(0x7fffffff)

define POS_INFINITY __int_as_float(0x7f800000)

define NEG_INFINITY __int_as_float(0xff800000)

template device T maximum(T a, T b) { return isnan(a) ? a : (a > b ? a : b); }

template device T minimum(T a, T b) { return isnan(a) ? a : (a < b ? a : b); }

extern "C" global void func_1(float t0, float v1, float t2, float t3, float aten_mul_flat, float aten_pow_flat, float aten_add_flat, float aten_add_flat_1, float aten_add_flat_2, float aten_sigmoid_flat) { { float v = __ldg(t3 + 512 blockIdx.x + threadIdx.x); aten_sigmoidflat[512 * blockIdx.x + threadIdx.x] = 1.f / (1.f + (expf(0.f - v))); float t2 = ldg(t2 + 512 blockIdx.x + threadIdx.x); float aten_addflat = aten_add_flat_1[512 blockIdx.x + threadIdx.x]; aten_addflat = (0.f - t2_) + 1.f; aten_add_flat_1[512 blockIdx.x + threadIdx.x] = aten_addflat; float v_1 = __ldg(t3 + 512 blockIdx.x + threadIdx.x); aten_add_flat_2[512 * blockIdx.x + threadIdx.x] = (0.f - 1.f / (1.f + (expf(0.f - v_1)))) + 1.f; float v_2 = ldg(t3 + 512 blockIdx.x + threadIdx.x); float v_3 = __ldg(t3 + 512 blockIdx.x + threadIdx.x); aten_add_flat[512 blockIdx.x + threadIdx.x] = (0.f - ((1.f / (1.f + (expf(0.f - v_2)))) t2_ + ((0.f - 1.f / (1.f + (expf(0.f - v3)))) + 1.f) * ((0.f - t2) + 1.f))) + 1.f; float v_4 = ldg(t3 + 512 blockIdx.x + threadIdx.x); float v_5 = __ldg(t3 + 512 blockIdx.x + threadIdx.x); aten_pow_flat[512 blockIdx.x + threadIdx.x] = powf((0.f - ((1.f / (1.f + (expf(0.f - v_4)))) t2_ + ((0.f - 1.f / (1.f + (expf(0.f - v5)))) + 1.f) * ((0.f - t2) + 1.f))) + 1.f, v1); float v_6 = ldg(t0 + 512 blockIdx.x + threadIdx.x); float v_7 = __ldg(t3 + 512 blockIdx.x + threadIdx.x); float v_8 = __ldg(t3 + 512 blockIdx.x + threadIdx.x); aten_mul_flat[512 blockIdx.x + threadIdx.x] = v_6 (powf((0.f - ((1.f / (1.f + (expf(0.f - v_7)))) t2_ + ((0.f - 1.f / (1.f + (expf(0.f - v8)))) + 1.f) * ((0.f - t2) + 1.f))) + 1.f, v1)); } }

Process finished with exit code 1

xiezhiyu275 commented 3 years ago

Is this related to CUDA?