eric612 / Caffe-YOLOv3-Windows

A windows caffe implementation of YOLO detection network
Other
212 stars 128 forks source link

training is ok ,deploy test is ok ,but not get good result . #19

Open LEXUSAPI opened 5 years ago

LEXUSAPI commented 5 years ago

hello ! i am use mobilenet_yolov3_lite_solver.prototxt ,mobilenet_yolov3_lite_train.prototxt and mobilenet_yolov3_lite_test.prototxt to get the caffe_model_file.

train_net: "models/yolov3/mobilenet_yolov3_lite_train.prototxt" test_net: "models/yolov3/mobilenet_yolov3_lite_test.prototxt" test_iter: 4952 test_interval: 1000 base_lr: 0.0005 display: 10 max_iter: 120000 lr_policy: "multistep" gamma: 0.5 weight_decay: 0.00005 snapshot: 1000 snapshot_prefix: "models/yolov3/mobilenet_yolov3_lite_deploy" solver_mode: GPU debug_info: false snapshot_after_train: true test_initialization: false average_loss: 10 stepvalue: 20000 stepvalue: 40000 stepvalue: 60000 iter_size: 4 type: "RMSProp" eval_type: "detection" ap_version: "11point" show_per_class_result: true

name: "MobileNet-YOLO" layer { name: "data" type: "AnnotatedData" top: "data" top: "label" include { phase: TRAIN }

transform_param { scale: 0.007843 mirror: true mean_value: 127.5 mean_value: 127.5 mean_value: 127.5

resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 608
  width: 608
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 416
  width: 416
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 320
  width: 320
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 352
  width: 352
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 384
  width: 384
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 448
  width: 448
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 480
  width: 480
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 512
  width: 512
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 544
  width: 544
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}
resize_param {
  prob: 0.1
  resize_mode: WARP
  height: 576
  width: 576
  interp_mode: LINEAR
  interp_mode: AREA
  interp_mode: LANCZOS4
}

emit_constraint {
  emit_type: CENTER
}
distort_param {
  brightness_prob: 0.5
  brightness_delta: 32.0
  contrast_prob: 0.5
  contrast_lower: 0.5
  contrast_upper: 1.5
  hue_prob: 0.5
  hue_delta: 18.0
  saturation_prob: 0.5
  saturation_lower: 0.5
  saturation_upper: 1.5
  random_order_prob: 0.0
}
expand_param {
  prob: 0.5
  max_expand_ratio: 3.0
}

} data_param { source: "examples/VOC0712/VOC0712_trainval_lmdb" batch_size: 6 backend: LMDB } annotated_data_param { yolo_data_type : 1 yolo_data_jitter : 0.3 label_map_file: "data/VOC0712/labelmap_voc.prototxt" } }

layer { name: "conv0" type: "Convolution" bottom: "data" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "conv0/bn" type: "BatchNorm" bottom: "conv0" top: "conv0" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv0/scale" type: "Scale" bottom: "conv0" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv0/relu" type: "ReLU" bottom: "conv0" top: "conv0" } layer { name: "conv1/dw" type: "DepthwiseConvolution" bottom: "conv0" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 32 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv1/dw/bn" type: "BatchNorm" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/dw/scale" type: "Scale" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/dw/relu" type: "ReLU" bottom: "conv1/dw" top: "conv1/dw" } layer { name: "conv1" type: "Convolution" bottom: "conv1/dw" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv1/bn" type: "BatchNorm" bottom: "conv1" top: "conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/scale" type: "Scale" bottom: "conv1" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/relu" type: "ReLU" bottom: "conv1" top: "conv1" } layer { name: "conv2/dw" type: "DepthwiseConvolution" bottom: "conv1" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 64 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv2/dw/bn" type: "BatchNorm" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/dw/scale" type: "Scale" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/dw/relu" type: "ReLU" bottom: "conv2/dw" top: "conv2/dw" } layer { name: "conv2" type: "Convolution" bottom: "conv2/dw" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv2/bn" type: "BatchNorm" bottom: "conv2" top: "conv2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/scale" type: "Scale" bottom: "conv2" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/relu" type: "ReLU" bottom: "conv2" top: "conv2" } layer { name: "conv3/dw" type: "DepthwiseConvolution" bottom: "conv2" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv3/dw/bn" type: "BatchNorm" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/dw/scale" type: "Scale" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/dw/relu" type: "ReLU" bottom: "conv3/dw" top: "conv3/dw" } layer { name: "conv3" type: "Convolution" bottom: "conv3/dw" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv3/bn" type: "BatchNorm" bottom: "conv3" top: "conv3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/scale" type: "Scale" bottom: "conv3" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/relu" type: "ReLU" bottom: "conv3" top: "conv3" } layer { name: "conv4/dw" type: "DepthwiseConvolution" bottom: "conv3" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv4/dw/bn" type: "BatchNorm" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/dw/scale" type: "Scale" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/dw/relu" type: "ReLU" bottom: "conv4/dw" top: "conv4/dw" } layer { name: "conv4" type: "Convolution" bottom: "conv4/dw" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv4/bn" type: "BatchNorm" bottom: "conv4" top: "conv4" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/scale" type: "Scale" bottom: "conv4" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/relu" type: "ReLU" bottom: "conv4" top: "conv4" } layer { name: "conv5/dw" type: "DepthwiseConvolution" bottom: "conv4" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv5/dw/bn" type: "BatchNorm" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/dw/scale" type: "Scale" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/dw/relu" type: "ReLU" bottom: "conv5/dw" top: "conv5/dw" } layer { name: "conv5" type: "Convolution" bottom: "conv5/dw" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv5/bn" type: "BatchNorm" bottom: "conv5" top: "conv5" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/scale" type: "Scale" bottom: "conv5" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/relu" type: "ReLU" bottom: "conv5" top: "conv5" } layer { name: "conv6/dw" type: "DepthwiseConvolution" bottom: "conv5" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv6/dw/bn" type: "BatchNorm" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/dw/scale" type: "Scale" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/dw/relu" type: "ReLU" bottom: "conv6/dw" top: "conv6/dw" } layer { name: "conv6" type: "Convolution" bottom: "conv6/dw" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv6/bn" type: "BatchNorm" bottom: "conv6" top: "conv6" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/scale" type: "Scale" bottom: "conv6" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/relu" type: "ReLU" bottom: "conv6" top: "conv6" } layer { name: "conv7/dw" type: "DepthwiseConvolution" bottom: "conv6" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv7/dw/bn" type: "BatchNorm" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/dw/scale" type: "Scale" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/dw/relu" type: "ReLU" bottom: "conv7/dw" top: "conv7/dw" } layer { name: "conv7" type: "Convolution" bottom: "conv7/dw" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv7/bn" type: "BatchNorm" bottom: "conv7" top: "conv7" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/scale" type: "Scale" bottom: "conv7" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/relu" type: "ReLU" bottom: "conv7" top: "conv7" } layer { name: "conv8/dw" type: "DepthwiseConvolution" bottom: "conv7" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv8/dw/bn" type: "BatchNorm" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/dw/scale" type: "Scale" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/dw/relu" type: "ReLU" bottom: "conv8/dw" top: "conv8/dw" } layer { name: "conv8" type: "Convolution" bottom: "conv8/dw" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv8/bn" type: "BatchNorm" bottom: "conv8" top: "conv8" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/scale" type: "Scale" bottom: "conv8" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/relu" type: "ReLU" bottom: "conv8" top: "conv8" } layer { name: "conv9/dw" type: "DepthwiseConvolution" bottom: "conv8" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv9/dw/bn" type: "BatchNorm" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/dw/scale" type: "Scale" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/dw/relu" type: "ReLU" bottom: "conv9/dw" top: "conv9/dw" } layer { name: "conv9" type: "Convolution" bottom: "conv9/dw" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv9/bn" type: "BatchNorm" bottom: "conv9" top: "conv9" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/scale" type: "Scale" bottom: "conv9" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/relu" type: "ReLU" bottom: "conv9" top: "conv9" } layer { name: "conv10/dw" type: "DepthwiseConvolution" bottom: "conv9" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv10/dw/bn" type: "BatchNorm" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/dw/scale" type: "Scale" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/dw/relu" type: "ReLU" bottom: "conv10/dw" top: "conv10/dw" } layer { name: "conv10" type: "Convolution" bottom: "conv10/dw" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv10/bn" type: "BatchNorm" bottom: "conv10" top: "conv10" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/scale" type: "Scale" bottom: "conv10" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/relu" type: "ReLU" bottom: "conv10" top: "conv10" } layer { name: "conv11/dw" type: "DepthwiseConvolution" bottom: "conv10" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv11/dw/bn" type: "BatchNorm" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/dw/scale" type: "Scale" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/dw/relu" type: "ReLU" bottom: "conv11/dw" top: "conv11/dw" } layer { name: "conv11" type: "Convolution" bottom: "conv11/dw" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv11/bn" type: "BatchNorm" bottom: "conv11" top: "conv11" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/scale" type: "Scale" bottom: "conv11" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/relu" type: "ReLU" bottom: "conv11" top: "conv11" } layer { name: "conv12/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv12/dw/bn" type: "BatchNorm" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/dw/scale" type: "Scale" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/dw/relu" type: "ReLU" bottom: "conv12/dw" top: "conv12/dw" } layer { name: "conv12" type: "Convolution" bottom: "conv12/dw" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv12/bn" type: "BatchNorm" bottom: "conv12" top: "conv12" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/scale" type: "Scale" bottom: "conv12" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/relu" type: "ReLU" bottom: "conv12" top: "conv12" } layer { name: "conv13/dw" type: "DepthwiseConvolution" bottom: "conv12" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv13/dw/bn" type: "BatchNorm" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/dw/scale" type: "Scale" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/dw/relu" type: "ReLU" bottom: "conv13/dw" top: "conv13/dw" } layer { name: "conv13" type: "Convolution" bottom: "conv13/dw" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv13/bn" type: "BatchNorm" bottom: "conv13" top: "conv13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/scale" type: "Scale" bottom: "conv13" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/relu" type: "ReLU" bottom: "conv13" top: "conv13" }

layer { name: "conv16/dw" type: "DepthwiseConvolution" bottom: "conv13" top: "conv16/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv16/dw/bn" type: "BatchNorm" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/dw/scale" type: "Scale" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/dw/relu" type: "ReLU" bottom: "conv16/dw" top: "conv16/dw" } layer { name: "conv16" type: "Convolution" bottom: "conv16/dw" top: "conv16" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv16/bn" type: "BatchNorm" bottom: "conv16" top: "conv16" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/scale" type: "Scale" bottom: "conv16" top: "conv16" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/relu" type: "ReLU" bottom: "conv16" top: "conv16" } layer { name: "upsample" type: "Deconvolution" bottom: "conv16" top: "upsample" param { lr_mult: 0 decay_mult: 0 } convolution_param { num_output: 512 kernel_size: 4 stride: 2 pad: 1 group: 512 weight_filler: { type: "bilinear" } bias_term: false } } layer { name: "conv17/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv17/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv17/dw/bn" type: "BatchNorm" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/dw/scale" type: "Scale" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/dw/relu" type: "ReLU" bottom: "conv17/dw" top: "conv17/dw" } layer { name: "conv17" type: "Convolution" bottom: "conv17/dw" top: "conv17" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv17/bn" type: "BatchNorm" bottom: "conv17" top: "conv17" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/scale" type: "Scale" bottom: "conv17" top: "conv17" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/relu" type: "ReLU" bottom: "conv17" top: "conv17" } layer { name: "conv17/sum" type: "Eltwise" bottom: "conv17" bottom: "upsample" top: "conv17/sum" } layer { name: "conv18/dw" type: "DepthwiseConvolution" bottom: "conv17/sum" top: "conv18/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv18/dw/bn" type: "BatchNorm" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/dw/scale" type: "Scale" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/dw/relu" type: "ReLU" bottom: "conv18/dw" top: "conv18/dw" } layer { name: "conv18" type: "Convolution" bottom: "conv18/dw" top: "conv18" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv18/bn" type: "BatchNorm" bottom: "conv18" top: "conv18" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/scale" type: "Scale" bottom: "conv18" top: "conv18" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/relu" type: "ReLU" bottom: "conv18" top: "conv18" }

layer { name: "conv20" type: "Convolution" bottom: "conv16" top: "conv20" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "conv21" type: "Convolution" bottom: "conv18" top: "conv21" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "Yolov3Loss1" type: "Yolov3" bottom: "conv20" bottom: "label" top: "det_loss1" loss_weight: 1 yolov3_param { side: 13 num_class: 20 num: 3
object_scale: 5.0 noobject_scale: 1.0 class_scale: 1.0 coord_scale: 1.0 thresh: 0.6 anchors_scale : 32 use_logic_gradient : false

10,14, 23,27, 37,58, 81,82, 135,169, 344,319

biases: 10
biases: 14
biases: 23
biases: 27
biases: 37
biases: 58
biases: 81
biases: 82
biases: 135
biases: 169
biases: 344
biases: 319

mask:3
mask:4
mask:5

} } layer { name: "Yolov3Loss2" type: "Yolov3" bottom: "conv21" bottom: "label" top: "det_loss2" loss_weight: 1 yolov3_param { side: 26 num_class: 20 num: 3
object_scale: 5.0 noobject_scale: 1.0 class_scale: 1.0 coord_scale: 1.0 thresh: 0.6 anchors_scale : 16 use_logic_gradient : false

10,14, 23,27, 37,58, 81,82, 135,169, 344,319

biases: 10
biases: 14
biases: 23
biases: 27
biases: 37
biases: 58
biases: 81
biases: 82
biases: 135
biases: 169
biases: 344
biases: 319

mask:0
mask:1
mask:2

} }

name: "MobileNet-YOLO" layer { name: "data" type: "AnnotatedData" top: "data" top: "label" include { phase: TEST } transform_param { scale: 0.007843 mean_value: 127.5 mean_value: 127.5 mean_value: 127.5 resize_param { prob: 1.0 resize_mode: WARP height: 416 width: 416 interp_mode: LINEAR } } data_param { source: "examples/VOC0712/VOC0712_test_lmdb" batch_size: 1 backend: LMDB } annotated_data_param { batch_sampler { } label_map_file: "data/VOC0712/labelmap_voc.prototxt" } }

layer { name: "conv0" type: "Convolution" bottom: "data" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "conv0/bn" type: "BatchNorm" bottom: "conv0" top: "conv0" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv0/scale" type: "Scale" bottom: "conv0" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv0/relu" type: "ReLU" bottom: "conv0" top: "conv0" } layer { name: "conv1/dw" type: "DepthwiseConvolution" bottom: "conv0" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 32 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv1/dw/bn" type: "BatchNorm" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/dw/scale" type: "Scale" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/dw/relu" type: "ReLU" bottom: "conv1/dw" top: "conv1/dw" } layer { name: "conv1" type: "Convolution" bottom: "conv1/dw" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv1/bn" type: "BatchNorm" bottom: "conv1" top: "conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/scale" type: "Scale" bottom: "conv1" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/relu" type: "ReLU" bottom: "conv1" top: "conv1" } layer { name: "conv2/dw" type: "DepthwiseConvolution" bottom: "conv1" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 64 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv2/dw/bn" type: "BatchNorm" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/dw/scale" type: "Scale" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/dw/relu" type: "ReLU" bottom: "conv2/dw" top: "conv2/dw" } layer { name: "conv2" type: "Convolution" bottom: "conv2/dw" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv2/bn" type: "BatchNorm" bottom: "conv2" top: "conv2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/scale" type: "Scale" bottom: "conv2" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/relu" type: "ReLU" bottom: "conv2" top: "conv2" } layer { name: "conv3/dw" type: "DepthwiseConvolution" bottom: "conv2" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv3/dw/bn" type: "BatchNorm" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/dw/scale" type: "Scale" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/dw/relu" type: "ReLU" bottom: "conv3/dw" top: "conv3/dw" } layer { name: "conv3" type: "Convolution" bottom: "conv3/dw" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv3/bn" type: "BatchNorm" bottom: "conv3" top: "conv3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/scale" type: "Scale" bottom: "conv3" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/relu" type: "ReLU" bottom: "conv3" top: "conv3" } layer { name: "conv4/dw" type: "DepthwiseConvolution" bottom: "conv3" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv4/dw/bn" type: "BatchNorm" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/dw/scale" type: "Scale" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/dw/relu" type: "ReLU" bottom: "conv4/dw" top: "conv4/dw" } layer { name: "conv4" type: "Convolution" bottom: "conv4/dw" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv4/bn" type: "BatchNorm" bottom: "conv4" top: "conv4" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/scale" type: "Scale" bottom: "conv4" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/relu" type: "ReLU" bottom: "conv4" top: "conv4" } layer { name: "conv5/dw" type: "DepthwiseConvolution" bottom: "conv4" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv5/dw/bn" type: "BatchNorm" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/dw/scale" type: "Scale" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/dw/relu" type: "ReLU" bottom: "conv5/dw" top: "conv5/dw" } layer { name: "conv5" type: "Convolution" bottom: "conv5/dw" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv5/bn" type: "BatchNorm" bottom: "conv5" top: "conv5" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/scale" type: "Scale" bottom: "conv5" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/relu" type: "ReLU" bottom: "conv5" top: "conv5" } layer { name: "conv6/dw" type: "DepthwiseConvolution" bottom: "conv5" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv6/dw/bn" type: "BatchNorm" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/dw/scale" type: "Scale" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/dw/relu" type: "ReLU" bottom: "conv6/dw" top: "conv6/dw" } layer { name: "conv6" type: "Convolution" bottom: "conv6/dw" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv6/bn" type: "BatchNorm" bottom: "conv6" top: "conv6" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/scale" type: "Scale" bottom: "conv6" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/relu" type: "ReLU" bottom: "conv6" top: "conv6" } layer { name: "conv7/dw" type: "DepthwiseConvolution" bottom: "conv6" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv7/dw/bn" type: "BatchNorm" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/dw/scale" type: "Scale" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/dw/relu" type: "ReLU" bottom: "conv7/dw" top: "conv7/dw" } layer { name: "conv7" type: "Convolution" bottom: "conv7/dw" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv7/bn" type: "BatchNorm" bottom: "conv7" top: "conv7" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/scale" type: "Scale" bottom: "conv7" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/relu" type: "ReLU" bottom: "conv7" top: "conv7" } layer { name: "conv8/dw" type: "DepthwiseConvolution" bottom: "conv7" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv8/dw/bn" type: "BatchNorm" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/dw/scale" type: "Scale" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/dw/relu" type: "ReLU" bottom: "conv8/dw" top: "conv8/dw" } layer { name: "conv8" type: "Convolution" bottom: "conv8/dw" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv8/bn" type: "BatchNorm" bottom: "conv8" top: "conv8" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/scale" type: "Scale" bottom: "conv8" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/relu" type: "ReLU" bottom: "conv8" top: "conv8" } layer { name: "conv9/dw" type: "DepthwiseConvolution" bottom: "conv8" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv9/dw/bn" type: "BatchNorm" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/dw/scale" type: "Scale" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/dw/relu" type: "ReLU" bottom: "conv9/dw" top: "conv9/dw" } layer { name: "conv9" type: "Convolution" bottom: "conv9/dw" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv9/bn" type: "BatchNorm" bottom: "conv9" top: "conv9" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/scale" type: "Scale" bottom: "conv9" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/relu" type: "ReLU" bottom: "conv9" top: "conv9" } layer { name: "conv10/dw" type: "DepthwiseConvolution" bottom: "conv9" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv10/dw/bn" type: "BatchNorm" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/dw/scale" type: "Scale" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/dw/relu" type: "ReLU" bottom: "conv10/dw" top: "conv10/dw" } layer { name: "conv10" type: "Convolution" bottom: "conv10/dw" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv10/bn" type: "BatchNorm" bottom: "conv10" top: "conv10" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/scale" type: "Scale" bottom: "conv10" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/relu" type: "ReLU" bottom: "conv10" top: "conv10" } layer { name: "conv11/dw" type: "DepthwiseConvolution" bottom: "conv10" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv11/dw/bn" type: "BatchNorm" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/dw/scale" type: "Scale" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/dw/relu" type: "ReLU" bottom: "conv11/dw" top: "conv11/dw" } layer { name: "conv11" type: "Convolution" bottom: "conv11/dw" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv11/bn" type: "BatchNorm" bottom: "conv11" top: "conv11" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/scale" type: "Scale" bottom: "conv11" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/relu" type: "ReLU" bottom: "conv11" top: "conv11" } layer { name: "conv12/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv12/dw/bn" type: "BatchNorm" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/dw/scale" type: "Scale" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/dw/relu" type: "ReLU" bottom: "conv12/dw" top: "conv12/dw" } layer { name: "conv12" type: "Convolution" bottom: "conv12/dw" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv12/bn" type: "BatchNorm" bottom: "conv12" top: "conv12" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/scale" type: "Scale" bottom: "conv12" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/relu" type: "ReLU" bottom: "conv12" top: "conv12" } layer { name: "conv13/dw" type: "DepthwiseConvolution" bottom: "conv12" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv13/dw/bn" type: "BatchNorm" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/dw/scale" type: "Scale" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/dw/relu" type: "ReLU" bottom: "conv13/dw" top: "conv13/dw" } layer { name: "conv13" type: "Convolution" bottom: "conv13/dw" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv13/bn" type: "BatchNorm" bottom: "conv13" top: "conv13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/scale" type: "Scale" bottom: "conv13" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/relu" type: "ReLU" bottom: "conv13" top: "conv13" }

layer { name: "conv16/dw" type: "DepthwiseConvolution" bottom: "conv13" top: "conv16/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv16/dw/bn" type: "BatchNorm" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/dw/scale" type: "Scale" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/dw/relu" type: "ReLU" bottom: "conv16/dw" top: "conv16/dw" } layer { name: "conv16" type: "Convolution" bottom: "conv16/dw" top: "conv16" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv16/bn" type: "BatchNorm" bottom: "conv16" top: "conv16" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/scale" type: "Scale" bottom: "conv16" top: "conv16" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/relu" type: "ReLU" bottom: "conv16" top: "conv16" } layer { name: "upsample" type: "Deconvolution" bottom: "conv16" top: "upsample" param { lr_mult: 0 decay_mult: 0 } convolution_param { num_output: 512 kernel_size: 4 stride: 2 pad: 1 group: 512 weight_filler: { type: "bilinear" } bias_term: false } } layer { name: "conv17/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv17/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv17/dw/bn" type: "BatchNorm" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/dw/scale" type: "Scale" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/dw/relu" type: "ReLU" bottom: "conv17/dw" top: "conv17/dw" } layer { name: "conv17" type: "Convolution" bottom: "conv17/dw" top: "conv17" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv17/bn" type: "BatchNorm" bottom: "conv17" top: "conv17" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/scale" type: "Scale" bottom: "conv17" top: "conv17" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/relu" type: "ReLU" bottom: "conv17" top: "conv17" } layer { name: "conv17/sum" type: "Eltwise" bottom: "conv17" bottom: "upsample" top: "conv17/sum" } layer { name: "conv18/dw" type: "DepthwiseConvolution" bottom: "conv17/sum" top: "conv18/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv18/dw/bn" type: "BatchNorm" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/dw/scale" type: "Scale" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/dw/relu" type: "ReLU" bottom: "conv18/dw" top: "conv18/dw" } layer { name: "conv18" type: "Convolution" bottom: "conv18/dw" top: "conv18" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv18/bn" type: "BatchNorm" bottom: "conv18" top: "conv18" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/scale" type: "Scale" bottom: "conv18" top: "conv18" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/relu" type: "ReLU" bottom: "conv18" top: "conv18" }

layer { name: "conv20" type: "Convolution" bottom: "conv16" top: "conv20" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "conv21" type: "Convolution" bottom: "conv18" top: "conv21" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "detection_out" type: "Yolov3DetectionOutput" bottom: "conv20" bottom: "conv21" top: "detection_out" include { phase: TEST } yolov3_detection_output_param { confidence_threshold: 0.01 nms_threshold: 0.55 num_classes: 20

#10,14,  23,27,  37,58,  81,82,  135,169,  344,319
biases: 10
biases: 14
biases: 23
biases: 27
biases: 37
biases: 58
biases: 81
biases: 82
biases: 135
biases: 169
biases: 344
biases: 319

mask:3
mask:4
mask:5  
mask:0
mask:1
mask:2
anchors_scale:32
anchors_scale:16
mask_group_num:2

} } layer { name: "detection_eval" type: "DetectionEvaluate" bottom: "detection_out" bottom: "label" top: "detection_eval" include { phase: TEST } detection_evaluate_param { num_classes: 21 background_label_id: 0 overlap_threshold: 0.5 evaluate_difficult_gt: false } }

here ,i am not using pre-caffemodel to train ,just argc=3 argv[1]="train" ,argv[2]="--solver=***.prototxt "

when i test the train_model it is runing ok ,but not get good result ,here not merge bn in deploy ,and not change the model weights . it is some error in this way .

following is deploy file ; name: "MobileNet-YOLO" layer { name: "data" type: "Input" top: "data" input_param { shape { dim: 1 dim: 3 dim: 320 dim: 320 } } }

layer { name: "conv0" type: "Convolution" bottom: "data" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "conv0/bn" type: "BatchNorm" bottom: "conv0" top: "conv0" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv0/scale" type: "Scale" bottom: "conv0" top: "conv0" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv0/relu" type: "ReLU" bottom: "conv0" top: "conv0" } layer { name: "conv1/dw" type: "DepthwiseConvolution" bottom: "conv0" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 32 bias_term: false pad: 1 kernel_size: 3 group: 32 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv1/dw/bn" type: "BatchNorm" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/dw/scale" type: "Scale" bottom: "conv1/dw" top: "conv1/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/dw/relu" type: "ReLU" bottom: "conv1/dw" top: "conv1/dw" } layer { name: "conv1" type: "Convolution" bottom: "conv1/dw" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv1/bn" type: "BatchNorm" bottom: "conv1" top: "conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv1/scale" type: "Scale" bottom: "conv1" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv1/relu" type: "ReLU" bottom: "conv1" top: "conv1" } layer { name: "conv2/dw" type: "DepthwiseConvolution" bottom: "conv1" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 64 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 64 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv2/dw/bn" type: "BatchNorm" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/dw/scale" type: "Scale" bottom: "conv2/dw" top: "conv2/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/dw/relu" type: "ReLU" bottom: "conv2/dw" top: "conv2/dw" } layer { name: "conv2" type: "Convolution" bottom: "conv2/dw" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv2/bn" type: "BatchNorm" bottom: "conv2" top: "conv2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv2/scale" type: "Scale" bottom: "conv2" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv2/relu" type: "ReLU" bottom: "conv2" top: "conv2" } layer { name: "conv3/dw" type: "DepthwiseConvolution" bottom: "conv2" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv3/dw/bn" type: "BatchNorm" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/dw/scale" type: "Scale" bottom: "conv3/dw" top: "conv3/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/dw/relu" type: "ReLU" bottom: "conv3/dw" top: "conv3/dw" } layer { name: "conv3" type: "Convolution" bottom: "conv3/dw" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv3/bn" type: "BatchNorm" bottom: "conv3" top: "conv3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv3/scale" type: "Scale" bottom: "conv3" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv3/relu" type: "ReLU" bottom: "conv3" top: "conv3" } layer { name: "conv4/dw" type: "DepthwiseConvolution" bottom: "conv3" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 128 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv4/dw/bn" type: "BatchNorm" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/dw/scale" type: "Scale" bottom: "conv4/dw" top: "conv4/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/dw/relu" type: "ReLU" bottom: "conv4/dw" top: "conv4/dw" } layer { name: "conv4" type: "Convolution" bottom: "conv4/dw" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv4/bn" type: "BatchNorm" bottom: "conv4" top: "conv4" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv4/scale" type: "Scale" bottom: "conv4" top: "conv4" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv4/relu" type: "ReLU" bottom: "conv4" top: "conv4" } layer { name: "conv5/dw" type: "DepthwiseConvolution" bottom: "conv4" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv5/dw/bn" type: "BatchNorm" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/dw/scale" type: "Scale" bottom: "conv5/dw" top: "conv5/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/dw/relu" type: "ReLU" bottom: "conv5/dw" top: "conv5/dw" } layer { name: "conv5" type: "Convolution" bottom: "conv5/dw" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv5/bn" type: "BatchNorm" bottom: "conv5" top: "conv5" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv5/scale" type: "Scale" bottom: "conv5" top: "conv5" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv5/relu" type: "ReLU" bottom: "conv5" top: "conv5" } layer { name: "conv6/dw" type: "DepthwiseConvolution" bottom: "conv5" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 256 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv6/dw/bn" type: "BatchNorm" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/dw/scale" type: "Scale" bottom: "conv6/dw" top: "conv6/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/dw/relu" type: "ReLU" bottom: "conv6/dw" top: "conv6/dw" } layer { name: "conv6" type: "Convolution" bottom: "conv6/dw" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv6/bn" type: "BatchNorm" bottom: "conv6" top: "conv6" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv6/scale" type: "Scale" bottom: "conv6" top: "conv6" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv6/relu" type: "ReLU" bottom: "conv6" top: "conv6" } layer { name: "conv7/dw" type: "DepthwiseConvolution" bottom: "conv6" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv7/dw/bn" type: "BatchNorm" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/dw/scale" type: "Scale" bottom: "conv7/dw" top: "conv7/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/dw/relu" type: "ReLU" bottom: "conv7/dw" top: "conv7/dw" } layer { name: "conv7" type: "Convolution" bottom: "conv7/dw" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv7/bn" type: "BatchNorm" bottom: "conv7" top: "conv7" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv7/scale" type: "Scale" bottom: "conv7" top: "conv7" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv7/relu" type: "ReLU" bottom: "conv7" top: "conv7" } layer { name: "conv8/dw" type: "DepthwiseConvolution" bottom: "conv7" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv8/dw/bn" type: "BatchNorm" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/dw/scale" type: "Scale" bottom: "conv8/dw" top: "conv8/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/dw/relu" type: "ReLU" bottom: "conv8/dw" top: "conv8/dw" } layer { name: "conv8" type: "Convolution" bottom: "conv8/dw" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv8/bn" type: "BatchNorm" bottom: "conv8" top: "conv8" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv8/scale" type: "Scale" bottom: "conv8" top: "conv8" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv8/relu" type: "ReLU" bottom: "conv8" top: "conv8" } layer { name: "conv9/dw" type: "DepthwiseConvolution" bottom: "conv8" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv9/dw/bn" type: "BatchNorm" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/dw/scale" type: "Scale" bottom: "conv9/dw" top: "conv9/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/dw/relu" type: "ReLU" bottom: "conv9/dw" top: "conv9/dw" } layer { name: "conv9" type: "Convolution" bottom: "conv9/dw" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv9/bn" type: "BatchNorm" bottom: "conv9" top: "conv9" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv9/scale" type: "Scale" bottom: "conv9" top: "conv9" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv9/relu" type: "ReLU" bottom: "conv9" top: "conv9" } layer { name: "conv10/dw" type: "DepthwiseConvolution" bottom: "conv9" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv10/dw/bn" type: "BatchNorm" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/dw/scale" type: "Scale" bottom: "conv10/dw" top: "conv10/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/dw/relu" type: "ReLU" bottom: "conv10/dw" top: "conv10/dw" } layer { name: "conv10" type: "Convolution" bottom: "conv10/dw" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv10/bn" type: "BatchNorm" bottom: "conv10" top: "conv10" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv10/scale" type: "Scale" bottom: "conv10" top: "conv10" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv10/relu" type: "ReLU" bottom: "conv10" top: "conv10" } layer { name: "conv11/dw" type: "DepthwiseConvolution" bottom: "conv10" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv11/dw/bn" type: "BatchNorm" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/dw/scale" type: "Scale" bottom: "conv11/dw" top: "conv11/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/dw/relu" type: "ReLU" bottom: "conv11/dw" top: "conv11/dw" } layer { name: "conv11" type: "Convolution" bottom: "conv11/dw" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv11/bn" type: "BatchNorm" bottom: "conv11" top: "conv11" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv11/scale" type: "Scale" bottom: "conv11" top: "conv11" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv11/relu" type: "ReLU" bottom: "conv11" top: "conv11" } layer { name: "conv12/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 stride: 2 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv12/dw/bn" type: "BatchNorm" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/dw/scale" type: "Scale" bottom: "conv12/dw" top: "conv12/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/dw/relu" type: "ReLU" bottom: "conv12/dw" top: "conv12/dw" } layer { name: "conv12" type: "Convolution" bottom: "conv12/dw" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv12/bn" type: "BatchNorm" bottom: "conv12" top: "conv12" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv12/scale" type: "Scale" bottom: "conv12" top: "conv12" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv12/relu" type: "ReLU" bottom: "conv12" top: "conv12" } layer { name: "conv13/dw" type: "DepthwiseConvolution" bottom: "conv12" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv13/dw/bn" type: "BatchNorm" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/dw/scale" type: "Scale" bottom: "conv13/dw" top: "conv13/dw" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/dw/relu" type: "ReLU" bottom: "conv13/dw" top: "conv13/dw" } layer { name: "conv13" type: "Convolution" bottom: "conv13/dw" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv13/bn" type: "BatchNorm" bottom: "conv13" top: "conv13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv13/scale" type: "Scale" bottom: "conv13" top: "conv13" param { lr_mult: 0.1 decay_mult: 0.0 } param { lr_mult: 0.2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv13/relu" type: "ReLU" bottom: "conv13" top: "conv13" }

layer { name: "conv16/dw" type: "DepthwiseConvolution" bottom: "conv13" top: "conv16/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false pad: 1 kernel_size: 3 group: 1024 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv16/dw/bn" type: "BatchNorm" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/dw/scale" type: "Scale" bottom: "conv16/dw" top: "conv16/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/dw/relu" type: "ReLU" bottom: "conv16/dw" top: "conv16/dw" } layer { name: "conv16" type: "Convolution" bottom: "conv16/dw" top: "conv16" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv16/bn" type: "BatchNorm" bottom: "conv16" top: "conv16" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv16/scale" type: "Scale" bottom: "conv16" top: "conv16" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv16/relu" type: "ReLU" bottom: "conv16" top: "conv16" } layer { name: "upsample" type: "Deconvolution" bottom: "conv16" top: "upsample" param { lr_mult: 0 decay_mult: 0 } convolution_param { num_output: 512 kernel_size: 4 stride: 2 pad: 1 group: 512 weight_filler: { type: "bilinear" } bias_term: false } } layer { name: "conv17/dw" type: "DepthwiseConvolution" bottom: "conv11" top: "conv17/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv17/dw/bn" type: "BatchNorm" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/dw/scale" type: "Scale" bottom: "conv17/dw" top: "conv17/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/dw/relu" type: "ReLU" bottom: "conv17/dw" top: "conv17/dw" } layer { name: "conv17" type: "Convolution" bottom: "conv17/dw" top: "conv17" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv17/bn" type: "BatchNorm" bottom: "conv17" top: "conv17" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv17/scale" type: "Scale" bottom: "conv17" top: "conv17" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv17/relu" type: "ReLU" bottom: "conv17" top: "conv17" } layer { name: "conv17/sum" type: "Eltwise" bottom: "conv17" bottom: "upsample" top: "conv17/sum" } layer { name: "conv18/dw" type: "DepthwiseConvolution" bottom: "conv17/sum" top: "conv18/dw" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 group: 512 engine: CAFFE weight_filler { type: "msra" } } } layer { name: "conv18/dw/bn" type: "BatchNorm" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/dw/scale" type: "Scale" bottom: "conv18/dw" top: "conv18/dw" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/dw/relu" type: "ReLU" bottom: "conv18/dw" top: "conv18/dw" } layer { name: "conv18" type: "Convolution" bottom: "conv18/dw" top: "conv18" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 1024 bias_term: false kernel_size: 1 weight_filler { type: "msra" } } } layer { name: "conv18/bn" type: "BatchNorm" bottom: "conv18" top: "conv18" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "conv18/scale" type: "Scale" bottom: "conv18" top: "conv18" param { lr_mult: 1 decay_mult: 0.0 } param { lr_mult: 2 decay_mult: 0.0 } scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "conv18/relu" type: "ReLU" bottom: "conv18" top: "conv18" }

layer { name: "conv20" type: "Convolution" bottom: "conv16" top: "conv20" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "conv21" type: "Convolution" bottom: "conv18" top: "conv21" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 75 kernel_size: 1 pad: 0 stride: 1 weight_filler { type: "msra" } bias_filler { value: 0 } } }

layer { name: "detection_out" type: "Yolov3DetectionOutput" bottom: "conv20" bottom: "conv21" top: "detection_out" include { phase: TEST } yolov3_detection_output_param { confidence_threshold: 0.01 nms_threshold: 0.45 num_classes: 20

#10,14,  23,27,  37,58,  81,82,  135,169,  344,319
biases: 10
biases: 14
biases: 23
biases: 27
biases: 37
biases: 58
biases: 81
biases: 82
biases: 135
biases: 169
biases: 344
biases: 319

mask:3
mask:4
mask:5  
mask:0
mask:1
mask:2
anchors_scale:32
anchors_scale:16
mask_group_num:2

} }

thankes !!

LEXUSAPI commented 5 years ago

I0605 19:18:20.943547 3300 solver.cpp:273] Train net output #1: det_loss2 = 0.000832803 ( 1 = 0.000832803 loss) I0605 19:18:20.944080 3300 sgd_solver.cpp:121] Iteration 12460, lr = 0.0005 I0605 19:18:22.859643 3300 yolov3_layer.cpp:350] avg_noobj: 0.00265248 avg_obj: 0.0310271 avg_iou: 0.645616 avg_cat: 0.182219 recall: 0.808538 recall75: 0.308882 count: 12 I0605 19:18:22.899511 3300 yolov3_layer.cpp:350] avg_noobj: 0.000263958 avg_obj: 0.00243713 avg_iou: 0.572827 avg_cat: 0.24511 recall: 0.678571 recall75: 0.160714 count: 3 I0605 19:18:26.603961 3300 yolov3_layer.cpp:350] avg_noobj: 0.00316648 avg_obj: 0.038487 avg_iou: 0.654488 avg_cat: 0.156267 recall: 0.882007 recall75: 0.235718 count: 11 I0605 19:18:26.643882 3300 yolov3_layer.cpp:350] avg_noobj: 0.000323673 avg_obj: 0.00709611 avg_iou: 0.604563 avg_cat: 0.248297 recall: 0.801073 recall75: 0.0942551 count: 7 I0605 19:18:31.597422 3300 yolov3_layer.cpp:350] avg_noobj: 0.0026287 avg_obj: 0.0299306 avg_iou: 0.61082 avg_cat: 0.192662 recall: 0.706834 recall75: 0.222176 count: 12 I0605 19:18:31.653295 3300 yolov3_layer.cpp:350] avg_noobj: 0.000298749 avg_obj: 0.00397734 avg_iou: 0.515808 avg_cat: 0.273514 recall: 0.523367 recall75: 0.0416667 count: 7 I0605 19:18:36.183598 3300 yolov3_layer.cpp:350] avg_noobj: 0.0028444 avg_obj: 0.0347357 avg_iou: 0.623817 avg_cat: 0.186791 recall: 0.835623 recall75: 0.197344 count: 13 I0605 19:18:36.206562 3300 yolov3_layer.cpp:350] avg_noobj: 0.000331346 avg_obj: 0.0167292 avg_iou: 0.509811 avg_cat: 0.24838 recall: 0.432576 recall75: 0.025 count: 7 Could not create logging file: Invalid argument COULD NOT CREATE A LOGGINGFILE 20190605-191838.11712!I0605 19:18:38.221812 3300 solver.cpp:254] Iteration 12470 (0.578796 iter/s, 17.2773s/10 iters), loss = 8.12812 I0605 19:18:38.223780 3300 solver.cpp:273] Train net output #0: det_loss1 = 4.47542 ( 1 = 4.47542 loss) I0605 19:18:38.223780 3300 solver.cpp:273] Train net output #1: det_loss2 = 1.50113 ( 1 = 1.50113 loss) I0605 19:18:38.223780 3300 sgd_solver.cpp:121] Iteration 12470, lr = 0.0005 I0605 19:18:40.587811 3300 yolov3_layer.cpp:350] avg_noobj: 0.00254414 avg_obj: 0.0309991 avg_iou: 0.619606 avg_cat: 0.161684 recall: 0.846208 recall75: 0.170459 count: 9 I0605 19:18:40.604766 3300 yolov3_layer.cpp:350] avg_noobj: 0.000289791 avg_obj: 0.00290392 avg_iou: 0.480005 avg_cat: 0.24822 recall: 0.506173 recall75: 0.095679 count: 9 I0605 19:18:44.863792 3300 yolov3_layer.cpp:350] avg_noobj: 0.00232901 avg_obj: 0.0248103 avg_iou: 0.649525 avg_cat: 0.189379 recall: 0.900835 recall75: 0.252914 count: 9 I0605 19:18:44.876760 3300 yolov3_layer.cpp:350] avg_noobj: 0.000206504 avg_obj: 0.0149982 avg_iou: 0.511707 avg_cat: 0.282013 recall: 0.583333 recall75: 0 count: 2 I0605 19:18:48.945741 3300 yolov3_layer.cpp:350] avg_noobj: 0.00270699 avg_obj: 0.0483267 avg_iou: 0.667386 avg_cat: 0.225893 recall: 0.859401 recall75: 0.285593 count: 10 I0605 19:18:48.969678 3300 yolov3_layer.cpp:350] avg_noobj: 0.000200723 avg_obj: 0.00455505 avg_iou: 0.560183 avg_cat: 0.206003 recall: 0.579861 recall75: 0.0416667 count: 4 I0605 19:18:53.853471 3300 yolov3_layer.cpp:350] avg_noobj: 0.00223947 avg_obj: 0.023926 avg_iou: 0.595783 avg_cat: 0.208393 recall: 0.714516 recall75: 0.148311 count: 14 I0605 19:18:53.891368 3300 yolov3_layer.cpp:350] avg_noobj: 0.000156055 avg_obj: 0.00600492 avg_iou: 0.554579 avg_cat: 0.149696 recall: 0.731481 recall75: 0.111111 count: 7 I0605 19:18:55.819883 3300 solver.cpp:254] Iteration 12480 (0.568315 iter/s, 17.5959s/10 iters), loss = 6.52835 I0605 19:18:55.820880 3300 solver.cpp:273] Train net output #0: det_loss1 = 5.67403 ( 1 = 5.67403 loss) I0605 19:18:55.820880 3300 solver.cpp:273] Train net output #1: det_loss2 = 0.45078 ( 1 = 0.45078 loss) I0605 19:18:55.821877 3300 sgd_solver.cpp:121] Iteration 12480, lr = 0.0005 I0605 19:18:58.130084 3300 yolov3_layer.cpp:350] avg_noobj: 0.00329695 avg_obj: 0.051156 avg_iou: 0.62381 avg_cat: 0.21351 recall: 0.798148 recall75: 0.205259 count: 12 I0605 19:18:58.194357 3300 yolov3_layer.cpp:350] avg_noobj: 0.000303462 avg_obj: 0.00781951 avg_iou: 0.516708 avg_cat: 0.266599 recall: 0.578571 recall75: 0.0178571 count: 3 I0605 19:19:02.389202 3300 yolov3_layer.cpp:350] avg_noobj: 0.00302735 avg_obj: 0.0298257 avg_iou: 0.617936 avg_cat: 0.236452 recall: 0.821002 recall75: 0.152967 count: 13 I0605 19:19:02.462007 3300 yolov3_layer.cpp:350] avg_noobj: 0.000219126 avg_obj: 0.003597 avg_iou: 0.511626 avg_cat: 0.217333 recall: 0.611111 recall75: 0 count: 4 I0605 19:19:06.461671 3300 yolov3_layer.cpp:350] avg_noobj: 0.00392475 avg_obj: 0.0540566 avg_iou: 0.626273 avg_cat: 0.23 recall: 0.789026 recall75: 0.197008 count: 12 I0605 19:19:06.475636 3300 yolov3_layer.cpp:350] avg_noobj: 0.000282378 avg_obj: 0.00604608 avg_iou: 0.529508 avg_cat: 0.287539 recall: 0.714225 recall75: 0.0277778 count: 7 I0605 19:19:10.846474 3300 yolov3_layer.cpp:350] avg_noobj: 0.00313368 avg_obj: 0.039936 avg_iou: 0.658179 avg_cat: 0.293206 recall: 0.887963 recall75: 0.308808 count: 11 I0605 19:19:10.884347 3300 yolov3_layer.cpp:350] avg_noobj: 0.000382866 avg_obj: 0.00453615 avg_iou: 0.506867 avg_cat: 0.248793 recall: 0.496074 recall75: 0.0148237 count: 9 I0605 19:19:12.377353 3300 solver.cpp:254] Iteration 12490 (0.604047 iter/s, 16.555s/10 iters), loss = 8.02961 I0605 19:19:12.377353 3300 solver.cpp:273] Train net output #0: det_loss1 = 4.84919 ( 1 = 4.84919 loss) I0605 19:19:12.377353 3300 solver.cpp:273] Train net output #1: det_loss2 = 1.36246 ( 1 = 1.36246 loss) I0605 19:19:12.378350 3300 sgd_solver.cpp:121] Iteration 12490, lr = 0.0005 I0605 19:19:14.582921 3300 yolov3_layer.cpp:350] avg_noobj: 0.00333838 avg_obj: 0.036755 avg_iou: 0.651256 avg_cat: 0.256216 recall: 0.89077 recall75: 0.236208 count: 11 I0605 19:19:14.595885 3300 yolov3_layer.cpp:350] avg_noobj: 0.000532896 avg_obj: 0.0108854 avg_iou: 0.519539 avg_cat: 0.2004 recall: 0.595767 recall75: 0 count: 4 I0605 19:19:18.971384 3300 yolov3_layer.cpp:350] avg_noobj: 0.00282901 avg_obj: 0.0556201 avg_iou: 0.636931 avg_cat: 0.200537 recall: 0.837536 recall75: 0.20425 count: 13 I0605 19:19:19.057150 3300 yolov3_layer.cpp:350] avg_noobj: 0.000246464 avg_obj: 0.00441543 avg_iou: 0.581093 avg_cat: 0.242356 recall: 0.811905 recall75: 0.0416667 count: 5 Could not create logging file: Invalid argument COULD NOT CREATE A LOGGINGFILE 20190605-191922.11712!I0605 19:19:22.776623 3300 yolov3_layer.cpp:350] avg_noobj: 0.0033964 avg_obj: 0.044132 avg_iou: 0.59832 avg_cat: 0.259916 recall: 0.776648 recall75: 0.120015 count: 13 I0605 19:19:22.828485 3300 yolov3_layer.cpp:350] avg_noobj: 0.000378613 avg_obj: 0.00417216 avg_iou: 0.501294 avg_cat: 0.152246 recall: 0.552047 recall75: 0.157407 count: 6 I0605 19:19:27.216162 3300 yolov3_layer.cpp:350] avg_noobj: 0.00311634 avg_obj: 0.0462743 avg_iou: 0.629828 avg_cat: 0.180549 recall: 0.83356 recall75: 0.210415 count: 14 I0605 19:19:27.259021 3300 yolov3_layer.cpp:350] avg_noobj: 0.000310787 avg_obj: 0.00269606 avg_iou: 0.486832 avg_cat: 0.239308 recall: 0.35 recall75: 0.00625 count: 7 I0605 19:19:29.451915 3300 solver.cpp:254] Iteration 12500 (0.585719 iter/s, 17.073s/10 iters), loss = 8.03561 I0605 19:19:29.451915 3300 solver.cpp:273] Train net output #0: det_loss1 = 6.2078 ( 1 = 6.2078 loss) I0605 19:19:29.452885 3300 solver.cpp:273] Train net output #1: det_loss2 = 1.89563 ( 1 = 1.89563 loss) I0605 19:19:29.452885 3300 sgd_solver.cpp:121] Iteration 12500, lr = 0.0005 I0605 19:19:31.924057 3300 yolov3_layer.cpp:350] avg_noobj: 0.00276208 avg_obj: 0.0346788 avg_iou: 0.634167 avg_cat: 0.179232 recall: 0.859802 recall75: 0.155379 count: 11 I0605 19:19:31.947994 3300 yolov3_layer.cpp:350] avg_noobj: 0.000226774 avg_obj: 0.00218301 avg_iou: 0.452355 avg_cat: 0.116414 recall: 0.316667 recall75: 0 count: 3 I0605 19:19:36.020443 3300 yolov3_layer.cpp:350] avg_noobj: 0.0028489 avg_obj: 0.0421461 avg_iou: 0.624828 avg_cat: 0.200173 recall: 0.829125 recall75: 0.195354 count: 10 I0605 19:19:36.046401 3300 yolov3_layer.cpp:350] avg_noobj: 0.000331418 avg_obj: 0.0111531 avg_iou: 0.497412 avg_cat: 0.202312 recall: 0.48125 recall75: 0.00625 count: 4 I0605 19:19:40.397859 3300 yolov3_layer.cpp:350] avg_noobj: 0.00282483 avg_obj: 0.0304993 avg_iou: 0.633767 avg_cat: 0.175424 recall: 0.866694 recall75: 0.189815 count: 12 I0605 19:19:40.432792 3300 yolov3_layer.cpp:350] avg_noobj: 0.000247293 avg_obj: 0.0034656 avg_iou: 0.516862 avg_cat: 0.261066 recall: 0.591702 recall75: 0.0542208 count: 7 I0605 19:19:44.954481 3300 yolov3_layer.cpp:350] avg_noobj: 0.00242155 avg_obj: 0.0231582 avg_iou: 0.620893 avg_cat: 0.170172 recall: 0.822558 recall75: 0.155372 count: 10 I0605 19:19:44.979414 3300 yolov3_layer.cpp:350] avg_noobj: 0.000300307 avg_obj: 0.00297815 avg_iou: 0.487224 avg_cat: 0.179455 recall: 0.534318 recall75: 0 count: 6 I0605 19:19:47.230954 3300 solver.cpp:254] Iteration 12510 (0.562492 iter/s, 17.778s/10 iters), loss = 7.38494 I0605 19:19:47.231928 3300 solver.cpp:273] Train net output #0: det_loss1 = 4.81007 ( 1 = 4.81007 loss) I0605 19:19:47.231928 3300 solver.cpp:273] Train net output #1: det_loss2 = 1.16328 ( 1 = 1.16328 loss) I0605 19:19:47.232925 3300 sgd_solver.cpp:121] Iteration 12510, lr = 0.0005 I0605 19:19:49.022966 3300 yolov3_layer.cpp:350] avg_noobj: 0.00285788 avg_obj: 0.0443964 avg_iou: 0.634798 avg_cat: 0.199004 recall: 0.85553 recall75: 0.156816 count: 11 I0605 19:19:49.043910 3300 yolov3_layer.cpp:350] avg_noobj: 0.000372392 avg_obj: 0.00322275 avg_iou: 0.452622 avg_cat: 0.305551 recall: 0.404762 recall75: 0 count: 5 I0605 19:19:53.121110 3300 yolov3_layer.cpp:350] avg_noobj: 0.00291626 avg_obj: 0.0314833 avg_iou: 0.640478 avg_cat: 0.184493 recall: 0.863073 recall75: 0.209155 count: 10 I0605 19:19:53.155017 3300 yolov3_layer.cpp:350] avg_noobj: 0.000369423 avg_obj: 0.00877558 avg_iou: 0.493772 avg_cat: 0.183857 recall: 0.556878 recall75: 0 count: 3 I0605 19:19:57.379637 3300 yolov3_layer.cpp:350] avg_noobj: 0.00238114 avg_obj: 0.049195 avg_iou: 0.63194 avg_cat: 0.198527 recall: 0.843922 recall75: 0.152469 count: 11 I0605 19:19:57.453439 3300 yolov3_layer.cpp:350] avg_noobj: 0.000255439 avg_obj: 0.00362778 avg_iou: 0.458997 avg_cat: 0.19757 recall: 0.434259 recall75: 0 count: 9 I0605 19:20:01.836761 3300 yolov3_layer.cpp:350] avg_noobj: 0.00266701 avg_obj: 0.0311816 avg_iou: 0.638568 avg_cat: 0.227812 recall: 0.77353 recall75: 0.290579 count: 12 I0605 19:20:01.871668 3300 yolov3_layer.cpp:350] avg_noobj: 0.000326695 avg_obj: 0.00722598 avg_iou: 0.521178 avg_cat: 0.185146 recall: 0.623016 recall75: 0.0208333 count: 7 I0605 19:20:03.418504 3300 solver.cpp:254] Iteration 12520 (0.617859 iter/s, 16.1849s/10 iters), loss = 7.53383 I0605 19:20:03.418504 3300 solver.cpp:273] Train net output #0: det_loss1 = 2.87461 ( 1 = 2.87461 loss) I0605 19:20:03.419502 3300 solver.cpp:273] Train net output #1: det_loss2 = 4.34306 ( 1 = 4.34306 loss) I0605 19:20:03.419502 3300 sgd_solver.cpp:121] Iteration 12520, lr = 0.0005 Could not create logging file: Invalid argument COULD NOT CREATE A LOGGINGFILE 20190605-192005.11712!I0605 19:20:05.818264 3300 yolov3_layer.cpp:350] avg_noobj: 0.00309157 avg_obj: 0.0532336 avg_iou: 0.649751 avg_cat: 0.162685 recall: 0.858502 recall75: 0.214735 count: 10 I0605 19:20:05.851202 3300 yolov3_layer.cpp:350] avg_noobj: 0.000403206 avg_obj: 0.00716492 avg_iou: 0.445854 avg_cat: 0.304836 recall: 0.412829 recall75: 0 count: 13 I0605 19:20:09.390074 3300 yolov3_layer.cpp:350] avg_noobj: 0.00312518 avg_obj: 0.0370927 avg_iou: 0.633618 avg_cat: 0.198188 recall: 0.850838 recall75: 0.160494 count: 11 I0605 19:20:09.431963 3300 yolov3_layer.cpp:350] avg_noobj: 0.000694415 avg_obj: 0.0104734 avg_iou: 0.474882 avg_cat: 0.264217 recall: 0.436111 recall75: 0.015873 count: 9 I0605 19:20:13.822546 3300 yolov3_layer.cpp:350] avg_noobj: 0.00259194 avg_obj: 0.0403508 avg_iou: 0.665718 avg_cat: 0.194452 recall: 0.860831 recall75: 0.292669 count: 9 I0605 19:20:13.842519 3300 yolov3_layer.cpp:350] avg_noobj: 0.000312286 avg_obj: 0.0053947 avg_iou: 0.459958 avg_cat: 0.193672 recall: 0.417659 recall75: 0.0138889 count: 4 I0605 19:20:17.691043 3300 yolov3_layer.cpp:350] avg_noobj: 0.00287202 avg_obj: 0.0291737 avg_iou: 0.647074 avg_cat: 0.153396 recall: 0.850703 recall75: 0.247723 count: 11 I0605 19:20:17.726924 3300 yolov3_layer.cpp:350] avg_noobj: 0.000309667 avg_obj: 0.0040057 avg_iou: 0.519263 avg_cat: 0.174034 recall: 0.545855 recall75: 0.0343915 count: 5 I0605 19:20:19.872403 3300 solver.cpp:254] Iteration 12530 (0.607809 iter/s, 16.4525s/10 iters), loss = 7.6842 I0605 19:20:19.873380 3300 solver.cpp:273] Train net output #0: det_loss1 = 7.65109 ( 1 = 7.65109 loss) I0605 19:20:19.874377 3300 solver.cpp:273] Train net output #1: det_loss2 = 0.877699 ( 1 = 0.877699 loss) I0605 19:20:19.874377 3300 sgd_solver.cpp:121] Iteration 12530, lr = 0.0005 I0605 19:20:22.845577 3300 yolov3_layer.cpp:350] avg_noobj: 0.00232477 avg_obj: 0.0431203 avg_iou: 0.651754 avg_cat: 0.203131 recall: 0.83947 recall75: 0.234785 count: 11 I0605 19:20:22.881479 3300 yolov3_layer.cpp:350] avg_noobj: 0.000257757 avg_obj: 0.00343921 avg_iou: 0.496588 avg_cat: 0.237333 recall: 0.510417 recall75: 0.0125 count: 5 I0605 19:20:27.404374 3300 yolov3_layer.cpp:350] avg_noobj: 0.00279358 avg_obj: 0.0319883 avg_iou: 0.609273 avg_cat: 0.179511 recall: 0.757692 recall75: 0.194499 count: 15 I0605 19:20:27.417367 3300 yolov3_layer.cpp:350] avg_noobj: 0.000316511 avg_obj: 0.00295685 avg_iou: 0.465988 avg_cat: 0.301883 recall: 0.453483 recall75: 0.0679012 count: 4 I0605 19:20:31.498841 3300 yolov3_layer.cpp:350] avg_noobj: 0.00359538 avg_obj: 0.0485006 avg_iou: 0.617241 avg_cat: 0.209228 recall: 0.7878 recall75: 0.150163 count: 10 I0605 19:20:31.525769 3300 yolov3_layer.cpp:350] avg_noobj: 0.000437292 avg_obj: 0.0134618 avg_iou: 0.551258 avg_cat: 0.153278 recall: 0.588624 recall75: 0.101852 count: 5 I0605 19:20:35.994704 3300 yolov3_layer.cpp:350] avg_noobj: 0.00283231 avg_obj: 0.0303469 avg_iou: 0.64724 avg_cat: 0.219677 recall: 0.892877 recall75: 0.175024 count: 12 I0605 19:20:36.017643 3300 yolov3_layer.cpp:350] avg_noobj: 0.000295402 avg_obj: 0.00658006 avg_iou: 0.458386 avg_cat: 0.210321 recall: 0.445218 recall75: 0.0592593 count: 7 I0605 19:20:37.947180 3300 solver.cpp:254] Iteration 12540 (0.553318 iter/s, 18.0728s/10 iters), loss = 8.33392 I0605 19:20:37.948176 3300 solver.cpp:273] Train net output #0: det_loss1 = 3.58086 ( 1 = 3.58086 loss) I0605 19:20:37.948176 3300 solver.cpp:273] Train net output #1: det_loss2 = 0.347006 ( 1 = 0.347006 loss) I0605 19:20:37.949173 3300 sgd_solver.cpp:121] Iteration 12540, lr = 0.0005 I0605 19:20:40.172420 3300 yolov3_layer.cpp:350] avg_noobj: 0.00293626 avg_obj: 0.0321759 avg_iou: 0.643307 avg_cat: 0.19418 recall: 0.881558 recall75: 0.198515 count: 12 I0605 19:20:40.194362 3300 yolov3_layer.cpp:350] avg_noobj: 0.00034704 avg_obj: 0.0100363 avg_iou: 0.500065 avg_cat: 0.257968 recall: 0.515476 recall75: 0.0178571 count: 6 I0605 19:20:44.057361 3300 yolov3_layer.cpp:350] avg_noobj: 0.00313916 avg_obj: 0.0527417 avg_iou: 0.659828 avg_cat: 0.233123 recall: 0.86524 recall75: 0.239137 count: 10 I0605 19:20:44.103235 3300 yolov3_layer.cpp:350] avg_noobj: 0.000418076 avg_obj: 0.00766261 avg_iou: 0.495713 avg_cat: 0.224374 recall: 0.544444 recall75: 0.104938 count: 7 I0605 19:20:47.635077 3300 yolov3_layer.cpp:350] avg_noobj: 0.00355725 avg_obj: 0.0325799 avg_iou: 0.655993 avg_cat: 0.251627 recall: 0.915658 recall75: 0.217177 count: 12 I0605 19:20:47.650038 3300 yolov3_layer.cpp:350] avg_noobj: 0.00040842 avg_obj: 0.00592278 avg_iou: 0.491019 avg_cat: 0.202457 recall: 0.476871 recall75: 0 count: 5 I0605 19:20:52.118727 3300 yolov3_layer.cpp:350] avg_noobj: 0.00275538 avg_obj: 0.0323469 avg_iou: 0.646056 avg_cat: 0.215758 recall: 0.898798 recall75: 0.194672 count: 13 I0605 19:20:52.139672 3300 yolov3_layer.cpp:350] avg_noobj: 0.000349215 avg_obj: 0.00229965 avg_iou: 0.44785 avg_cat: 0.206496 recall: 0.315882 recall75: 0.0138889 count: 6 Could not create logging file: Invalid argument COULD NOT CREATE A LOGGINGFILE 20190605-192053.11712!I0605 19:20:53.571318 3300 solver.cpp:254] Iteration 12550 (0.640119 iter/s, 15.6221s/10 iters), loss = 7.89049 I0605 19:20:53.573312 3300 solver.cpp:273] Train net output #0: det_loss1 = 3.84329 ( 1 = 3.84329 loss) I0605 19:20:53.573312 3300 solver.cpp:273] Train net output #1: det_loss2 = 2.17349 ( 1 = 2.17349 loss) I0605 19:20:53.574306 3300 sgd_solver.cpp:121] Iteration 12550, lr = 0.0005 I0605 19:20:56.050925 3300 yolov3_layer.cpp:350] avg_noobj: 0.00385313 avg_obj: 0.0378496 avg_iou: 0.614312 avg_cat: 0.225314 recall: 0.819727 recall75: 0.129331 count: 16 I0605 19:20:56.112759 3300 yolov3_layer.cpp:350] avg_noobj: 0.000424033 avg_obj: 0.00678592 avg_iou: 0.507608 avg_cat: 0.338597 recall: 0.507937 recall75: 0.031746 count: 5 I0605 19:21:00.417657 3300 yolov3_layer.cpp:350] avg_noobj: 0.00359072 avg_obj: 0.0333965 avg_iou: 0.651351 avg_cat: 0.197982 recall: 0.850168 recall75: 0.252022 count: 11 I0605 19:21:00.457552 3300 yolov3_layer.cpp:350] avg_noobj: 0.000331282 avg_obj: 0.00566017 avg_iou: 0.457309 avg_cat: 0.316339 recall: 0.295238 recall75: 0.0238095 count: 6 I0605 19:21:04.788831 3300 yolov3_layer.cpp:350] avg_noobj: 0.00282905 avg_obj: 0.029601 avg_iou: 0.647409 avg_cat: 0.212677 recall: 0.869678 recall75: 0.164323 count: 11 I0605 19:21:04.808776 3300 yolov3_layer.cpp:350] avg_noobj: 0.000245929 avg_obj: 0.00465359 avg_iou: 0.535713 avg_cat: 0.342484 recall: 0.717262 recall75: 0 count: 3 I0605 19:21:08.607992 3300 yolov3_layer.cpp:350] avg_noobj: 0.00301587 avg_obj: 0.0453556 avg_iou: 0.649629 avg_cat: 0.184469 recall: 0.868559 recall75: 0.306434 count: 11 I0605 19:21:08.650851 3300 yolov3_layer.cpp:350] avg_noobj: 0.000336037 avg_obj: 0.00359891 avg_iou: 0.468336 avg_cat: 0.107412 recall: 0.5 recall75: 0 count: 4 I0605 19:21:10.414767 3300 solver.cpp:254] Iteration 12560 (0.593801 iter/s, 16.8407s/10 iters), loss = 7.04955 I0605 19:21:10.415766 3300 solver.cpp:273] Train net output #0: det_loss1 = 4.34492 ( 1 = 4.34492 loss) I0605 19:21:10.415766 3300 solver.cpp:273] Train net output #1: det_loss2 = 2.30185 (* 1 = 2.30185 loss) I0605 19:21:10.415766 3300 sgd_solver.cpp:121] Iteration 12560, lr = 0.0005 I0605 19:21:12.959175 3300 yolov3_layer.cpp:350] avg_noobj: 0.00287661 avg_obj: 0.037193 avg_iou: 0.637186 avg_cat: 0.242921 recall: 0.84307 recall75: 0.218659 count: 10 I0605 19:21:12.986104 3300 yolov3_layer.cpp:350] avg_noobj: 0.000288361 avg_obj: 0.00429907 avg_iou: 0.566808 avg_cat: 0.341548 recall: 0.709524 recall75: 0 count: 3 I0605 19:21:18.145488 3300 yolov3_layer.cpp:350] avg_noobj: 0.00221051 avg_obj: 0.0170261 avg_iou: 0.609285 avg_cat: 0.229931 recall: 0.75377 recall75: 0.21076 count: 11 I0605 19:21:18.201313 3300 yolov3_layer.cpp:350] avg_noobj: 0.00013712 avg_obj: 0.00131666 avg_iou: 0.553459 avg_cat: 0.31128 recall: 0.690774 recall75: 0.0741071 count: 6 I0605 19:21:22.322890 3300 yolov3_layer.cpp:350] avg_noobj: 0.00284479 avg_obj: 0.0270393 avg_iou: 0.667795 avg_cat: 0.176689 recall: 0.923801 recall75: 0.305505 count: 10 I0605 19:21:22.372757 3300 yolov3_layer.cpp:350] avg_noobj: 0.000349923 avg_obj: 0.00619466 avg_iou: 0.504442 avg_cat: 0.260977 recall: 0.612037 recall75: 0 count: 6 I0605 19:21:26.330718 3300 yolov3_layer.cpp:350] avg_noobj: 0.00285011 avg_obj: 0.044199 avg_iou: 0.661605 avg_cat: 0.23663 recall: 0.868804 recall75: 0.290537 count: 11 I0605 19:21:26.359613 3300 yolov3_layer.cpp:350] avg_noobj: 0.00036655 avg_obj: 0.00700356 avg_iou: 0.538065 avg_cat: 0.170565 recall: 0.711806 recall75: 0 count: 2

guods commented 5 years ago

I met the same problem, did you sovle it ? could you tell me how to do it ?

LEXUSAPI commented 4 years ago

I met the same problem, did you sovle it ? could you tell me how to do it ?

it is beacuse Whether in loading pre-trained model . if you is chinease,you can add my wechat , number is 13083501623 .