sanghoon / pva-faster-rcnn

Demo code for PVANet
https://arxiv.org/abs/1611.08588
Other
651 stars 241 forks source link

About base_size #36

Closed songjmcn closed 7 years ago

songjmcn commented 7 years ago

I use the upsampling layer and unsamlping the hyper layer.So the base_size will be 8 ,and feat_stride will be 8.But it dose not work as I wzpwcting.This is My prototxt .What's wrong with it. name: "PVANET-lite"

################################################################################

Input

################################################################################

layer { name: 'input-data' type: 'Python' top: 'data' top: 'im_info' top: 'gt_boxes' include { phase: TRAIN } python_param { module: 'roi_data_layer.layer' layer: 'RoIDataLayer' param_str: "'num_classes': 21" } }

layer {

name: "input-data"

type: "DummyData"

top: "data"

top: "im_info"

include { phase: TEST }

dummy_data_param {

shape { dim: 1 dim: 3 dim: 640 dim: 1056 }

shape { dim: 1 dim: 4 }

}

}

################################################################################

Conv 1

################################################################################ layer { name: "conv1" type: "Convolution" bottom: "data" top: "conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 4 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "conv1/bn" type: "BatchNorm" bottom: "conv1" top: "conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "conv1/scale" type: "Scale" bottom: "conv1" top: "conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "relu1" type: "ReLU" bottom: "conv1" top: "conv1" }

################################################################################

Conv 2

################################################################################ layer { name: "conv2" type: "Convolution" bottom: "conv1" top: "conv2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 48 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "conv2/bn" type: "BatchNorm" bottom: "conv2" top: "conv2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "conv2/scale" type: "Scale" bottom: "conv2" top: "conv2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "relu2" type: "ReLU" bottom: "conv2" top: "conv2" }

################################################################################

Conv 3

################################################################################ layer { name: "conv3" type: "Convolution" bottom: "conv2" top: "conv3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "conv3/bn" type: "BatchNorm" bottom: "conv3" top: "conv3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "conv3/scale" type: "Scale" bottom: "conv3" top: "conv3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "relu3" type: "ReLU" bottom: "conv3" top: "conv3" }

################################################################################

Inception 3a

################################################################################ layer { name: "inc3a/pool1" type: "Pooling" bottom: "conv3" top: "inc3a/pool1" pooling_param { kernel_size: 3 stride: 2 pad: 0 pool: MAX } } layer { name: "inc3a/conv1" type: "Convolution" bottom: "inc3a/pool1" top: "inc3a/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv1/bn" type: "BatchNorm" bottom: "inc3a/conv1" top: "inc3a/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv1/scale" type: "Scale" bottom: "inc3a/conv1" top: "inc3a/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu1" type: "ReLU" bottom: "inc3a/conv1" top: "inc3a/conv1" } layer { name: "inc3a/conv3_1" type: "Convolution" bottom: "conv3" top: "inc3a/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv3_1/bn" type: "BatchNorm" bottom: "inc3a/conv3_1" top: "inc3a/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv3_1/scale" type: "Scale" bottom: "inc3a/conv3_1" top: "inc3a/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu3_1" type: "ReLU" bottom: "inc3a/conv3_1" top: "inc3a/conv3_1" } layer { name: "inc3a/conv3_2" type: "Convolution" bottom: "inc3a/conv3_1" top: "inc3a/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv3_2/bn" type: "BatchNorm" bottom: "inc3a/conv3_2" top: "inc3a/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv3_2/scale" type: "Scale" bottom: "inc3a/conv3_2" top: "inc3a/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu3_2" type: "ReLU" bottom: "inc3a/conv3_2" top: "inc3a/conv3_2" } layer { name: "inc3a/conv5_1" type: "Convolution" bottom: "conv3" top: "inc3a/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv5_1/bn" type: "BatchNorm" bottom: "inc3a/conv5_1" top: "inc3a/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv5_1/scale" type: "Scale" bottom: "inc3a/conv5_1" top: "inc3a/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu5_1" type: "ReLU" bottom: "inc3a/conv5_1" top: "inc3a/conv5_1" } layer { name: "inc3a/conv5_2" type: "Convolution" bottom: "inc3a/conv5_1" top: "inc3a/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv5_2/bn" type: "BatchNorm" bottom: "inc3a/conv5_2" top: "inc3a/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv5_2/scale" type: "Scale" bottom: "inc3a/conv5_2" top: "inc3a/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu5_2" type: "ReLU" bottom: "inc3a/conv5_2" top: "inc3a/conv5_2" } layer { name: "inc3a/conv5_3" type: "Convolution" bottom: "inc3a/conv5_2" top: "inc3a/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3a/conv5_3/bn" type: "BatchNorm" bottom: "inc3a/conv5_3" top: "inc3a/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3a/conv5_3/scale" type: "Scale" bottom: "inc3a/conv5_3" top: "inc3a/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3a/relu5_3" type: "ReLU" bottom: "inc3a/conv5_3" top: "inc3a/conv5_3" } layer { name: "inc3a" type: "Concat" bottom: "inc3a/conv1" bottom: "inc3a/conv3_2" bottom: "inc3a/conv5_3" top: "inc3a" }

################################################################################

Inception 3b

################################################################################ layer { name: "inc3b/conv1" type: "Convolution" bottom: "inc3a" top: "inc3b/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv1/bn" type: "BatchNorm" bottom: "inc3b/conv1" top: "inc3b/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv1/scale" type: "Scale" bottom: "inc3b/conv1" top: "inc3b/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu1" type: "ReLU" bottom: "inc3b/conv1" top: "inc3b/conv1" } layer { name: "inc3b/conv3_1" type: "Convolution" bottom: "inc3a" top: "inc3b/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv3_1/bn" type: "BatchNorm" bottom: "inc3b/conv3_1" top: "inc3b/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv3_1/scale" type: "Scale" bottom: "inc3b/conv3_1" top: "inc3b/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu3_1" type: "ReLU" bottom: "inc3b/conv3_1" top: "inc3b/conv3_1" } layer { name: "inc3b/conv3_2" type: "Convolution" bottom: "inc3b/conv3_1" top: "inc3b/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv3_2/bn" type: "BatchNorm" bottom: "inc3b/conv3_2" top: "inc3b/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv3_2/scale" type: "Scale" bottom: "inc3b/conv3_2" top: "inc3b/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu3_2" type: "ReLU" bottom: "inc3b/conv3_2" top: "inc3b/conv3_2" } layer { name: "inc3b/conv5_1" type: "Convolution" bottom: "inc3a" top: "inc3b/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv5_1/bn" type: "BatchNorm" bottom: "inc3b/conv5_1" top: "inc3b/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv5_1/scale" type: "Scale" bottom: "inc3b/conv5_1" top: "inc3b/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu5_1" type: "ReLU" bottom: "inc3b/conv5_1" top: "inc3b/conv5_1" } layer { name: "inc3b/conv5_2" type: "Convolution" bottom: "inc3b/conv5_1" top: "inc3b/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv5_2/bn" type: "BatchNorm" bottom: "inc3b/conv5_2" top: "inc3b/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv5_2/scale" type: "Scale" bottom: "inc3b/conv5_2" top: "inc3b/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu5_2" type: "ReLU" bottom: "inc3b/conv5_2" top: "inc3b/conv5_2" } layer { name: "inc3b/conv5_3" type: "Convolution" bottom: "inc3b/conv5_2" top: "inc3b/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3b/conv5_3/bn" type: "BatchNorm" bottom: "inc3b/conv5_3" top: "inc3b/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3b/conv5_3/scale" type: "Scale" bottom: "inc3b/conv5_3" top: "inc3b/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3b/relu5_3" type: "ReLU" bottom: "inc3b/conv5_3" top: "inc3b/conv5_3" } layer { name: "inc3b" type: "Concat" bottom: "inc3b/conv1" bottom: "inc3b/conv3_2" bottom: "inc3b/conv5_3" top: "inc3b" }

################################################################################

Inception 3c

################################################################################ layer { name: "inc3c/conv1" type: "Convolution" bottom: "inc3b" top: "inc3c/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv1/bn" type: "BatchNorm" bottom: "inc3c/conv1" top: "inc3c/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv1/scale" type: "Scale" bottom: "inc3c/conv1" top: "inc3c/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu1" type: "ReLU" bottom: "inc3c/conv1" top: "inc3c/conv1" } layer { name: "inc3c/conv3_1" type: "Convolution" bottom: "inc3b" top: "inc3c/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv3_1/bn" type: "BatchNorm" bottom: "inc3c/conv3_1" top: "inc3c/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv3_1/scale" type: "Scale" bottom: "inc3c/conv3_1" top: "inc3c/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu3_1" type: "ReLU" bottom: "inc3c/conv3_1" top: "inc3c/conv3_1" } layer { name: "inc3c/conv3_2" type: "Convolution" bottom: "inc3c/conv3_1" top: "inc3c/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv3_2/bn" type: "BatchNorm" bottom: "inc3c/conv3_2" top: "inc3c/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv3_2/scale" type: "Scale" bottom: "inc3c/conv3_2" top: "inc3c/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu3_2" type: "ReLU" bottom: "inc3c/conv3_2" top: "inc3c/conv3_2" } layer { name: "inc3c/conv5_1" type: "Convolution" bottom: "inc3b" top: "inc3c/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv5_1/bn" type: "BatchNorm" bottom: "inc3c/conv5_1" top: "inc3c/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv5_1/scale" type: "Scale" bottom: "inc3c/conv5_1" top: "inc3c/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu5_1" type: "ReLU" bottom: "inc3c/conv5_1" top: "inc3c/conv5_1" } layer { name: "inc3c/conv5_2" type: "Convolution" bottom: "inc3c/conv5_1" top: "inc3c/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv5_2/bn" type: "BatchNorm" bottom: "inc3c/conv5_2" top: "inc3c/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv5_2/scale" type: "Scale" bottom: "inc3c/conv5_2" top: "inc3c/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu5_2" type: "ReLU" bottom: "inc3c/conv5_2" top: "inc3c/conv5_2" } layer { name: "inc3c/conv5_3" type: "Convolution" bottom: "inc3c/conv5_2" top: "inc3c/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3c/conv5_3/bn" type: "BatchNorm" bottom: "inc3c/conv5_3" top: "inc3c/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3c/conv5_3/scale" type: "Scale" bottom: "inc3c/conv5_3" top: "inc3c/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3c/relu5_3" type: "ReLU" bottom: "inc3c/conv5_3" top: "inc3c/conv5_3" } layer { name: "inc3c" type: "Concat" bottom: "inc3c/conv1" bottom: "inc3c/conv3_2" bottom: "inc3c/conv5_3" top: "inc3c" }

################################################################################

Inception 3d

################################################################################ layer { name: "inc3d/conv1" type: "Convolution" bottom: "inc3c" top: "inc3d/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv1/bn" type: "BatchNorm" bottom: "inc3d/conv1" top: "inc3d/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv1/scale" type: "Scale" bottom: "inc3d/conv1" top: "inc3d/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu1" type: "ReLU" bottom: "inc3d/conv1" top: "inc3d/conv1" } layer { name: "inc3d/conv3_1" type: "Convolution" bottom: "inc3c" top: "inc3d/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv3_1/bn" type: "BatchNorm" bottom: "inc3d/conv3_1" top: "inc3d/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv3_1/scale" type: "Scale" bottom: "inc3d/conv3_1" top: "inc3d/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu3_1" type: "ReLU" bottom: "inc3d/conv3_1" top: "inc3d/conv3_1" } layer { name: "inc3d/conv3_2" type: "Convolution" bottom: "inc3d/conv3_1" top: "inc3d/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv3_2/bn" type: "BatchNorm" bottom: "inc3d/conv3_2" top: "inc3d/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv3_2/scale" type: "Scale" bottom: "inc3d/conv3_2" top: "inc3d/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu3_2" type: "ReLU" bottom: "inc3d/conv3_2" top: "inc3d/conv3_2" } layer { name: "inc3d/conv5_1" type: "Convolution" bottom: "inc3c" top: "inc3d/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv5_1/bn" type: "BatchNorm" bottom: "inc3d/conv5_1" top: "inc3d/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv5_1/scale" type: "Scale" bottom: "inc3d/conv5_1" top: "inc3d/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu5_1" type: "ReLU" bottom: "inc3d/conv5_1" top: "inc3d/conv5_1" } layer { name: "inc3d/conv5_2" type: "Convolution" bottom: "inc3d/conv5_1" top: "inc3d/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv5_2/bn" type: "BatchNorm" bottom: "inc3d/conv5_2" top: "inc3d/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv5_2/scale" type: "Scale" bottom: "inc3d/conv5_2" top: "inc3d/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu5_2" type: "ReLU" bottom: "inc3d/conv5_2" top: "inc3d/conv5_2" } layer { name: "inc3d/conv5_3" type: "Convolution" bottom: "inc3d/conv5_2" top: "inc3d/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3d/conv5_3/bn" type: "BatchNorm" bottom: "inc3d/conv5_3" top: "inc3d/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3d/conv5_3/scale" type: "Scale" bottom: "inc3d/conv5_3" top: "inc3d/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3d/relu5_3" type: "ReLU" bottom: "inc3d/conv5_3" top: "inc3d/conv5_3" } layer { name: "inc3d" type: "Concat" bottom: "inc3d/conv1" bottom: "inc3d/conv3_2" bottom: "inc3d/conv5_3" top: "inc3d" }

################################################################################

Inception 3e

################################################################################ layer { name: "inc3e/conv1" type: "Convolution" bottom: "inc3d" top: "inc3e/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv1/bn" type: "BatchNorm" bottom: "inc3e/conv1" top: "inc3e/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv1/scale" type: "Scale" bottom: "inc3e/conv1" top: "inc3e/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu1" type: "ReLU" bottom: "inc3e/conv1" top: "inc3e/conv1" } layer { name: "inc3e/conv3_1" type: "Convolution" bottom: "inc3d" top: "inc3e/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv3_1/bn" type: "BatchNorm" bottom: "inc3e/conv3_1" top: "inc3e/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv3_1/scale" type: "Scale" bottom: "inc3e/conv3_1" top: "inc3e/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu3_1" type: "ReLU" bottom: "inc3e/conv3_1" top: "inc3e/conv3_1" } layer { name: "inc3e/conv3_2" type: "Convolution" bottom: "inc3e/conv3_1" top: "inc3e/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 64 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv3_2/bn" type: "BatchNorm" bottom: "inc3e/conv3_2" top: "inc3e/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv3_2/scale" type: "Scale" bottom: "inc3e/conv3_2" top: "inc3e/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu3_2" type: "ReLU" bottom: "inc3e/conv3_2" top: "inc3e/conv3_2" } layer { name: "inc3e/conv5_1" type: "Convolution" bottom: "inc3d" top: "inc3e/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv5_1/bn" type: "BatchNorm" bottom: "inc3e/conv5_1" top: "inc3e/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv5_1/scale" type: "Scale" bottom: "inc3e/conv5_1" top: "inc3e/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu5_1" type: "ReLU" bottom: "inc3e/conv5_1" top: "inc3e/conv5_1" } layer { name: "inc3e/conv5_2" type: "Convolution" bottom: "inc3e/conv5_1" top: "inc3e/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv5_2/bn" type: "BatchNorm" bottom: "inc3e/conv5_2" top: "inc3e/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv5_2/scale" type: "Scale" bottom: "inc3e/conv5_2" top: "inc3e/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu5_2" type: "ReLU" bottom: "inc3e/conv5_2" top: "inc3e/conv5_2" } layer { name: "inc3e/conv5_3" type: "Convolution" bottom: "inc3e/conv5_2" top: "inc3e/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc3e/conv5_3/bn" type: "BatchNorm" bottom: "inc3e/conv5_3" top: "inc3e/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc3e/conv5_3/scale" type: "Scale" bottom: "inc3e/conv5_3" top: "inc3e/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc3e/relu5_3" type: "ReLU" bottom: "inc3e/conv5_3" top: "inc3e/conv5_3" } layer { name: "inc3e" type: "Concat" bottom: "inc3e/conv1" bottom: "inc3e/conv3_2" bottom: "inc3e/conv5_3" top: "inc3e" }

################################################################################

Inception 4a

################################################################################ layer { name: "inc4a/pool1" type: "Pooling" bottom: "inc3e" top: "inc4a/pool1" pooling_param { kernel_size: 3 stride: 2 pad: 0 pool: MAX } } layer { name: "inc4a/conv1" type: "Convolution" bottom: "inc4a/pool1" top: "inc4a/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv1/bn" type: "BatchNorm" bottom: "inc4a/conv1" top: "inc4a/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv1/scale" type: "Scale" bottom: "inc4a/conv1" top: "inc4a/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu1" type: "ReLU" bottom: "inc4a/conv1" top: "inc4a/conv1" } layer { name: "inc4a/conv3_1" type: "Convolution" bottom: "inc3e" top: "inc4a/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv3_1/bn" type: "BatchNorm" bottom: "inc4a/conv3_1" top: "inc4a/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv3_1/scale" type: "Scale" bottom: "inc4a/conv3_1" top: "inc4a/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu3_1" type: "ReLU" bottom: "inc4a/conv3_1" top: "inc4a/conv3_1" } layer { name: "inc4a/conv3_2" type: "Convolution" bottom: "inc4a/conv3_1" top: "inc4a/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv3_2/bn" type: "BatchNorm" bottom: "inc4a/conv3_2" top: "inc4a/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv3_2/scale" type: "Scale" bottom: "inc4a/conv3_2" top: "inc4a/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu3_2" type: "ReLU" bottom: "inc4a/conv3_2" top: "inc4a/conv3_2" } layer { name: "inc4a/conv5_1" type: "Convolution" bottom: "inc3e" top: "inc4a/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv5_1/bn" type: "BatchNorm" bottom: "inc4a/conv5_1" top: "inc4a/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv5_1/scale" type: "Scale" bottom: "inc4a/conv5_1" top: "inc4a/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu5_1" type: "ReLU" bottom: "inc4a/conv5_1" top: "inc4a/conv5_1" } layer { name: "inc4a/conv5_2" type: "Convolution" bottom: "inc4a/conv5_1" top: "inc4a/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv5_2/bn" type: "BatchNorm" bottom: "inc4a/conv5_2" top: "inc4a/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv5_2/scale" type: "Scale" bottom: "inc4a/conv5_2" top: "inc4a/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu5_2" type: "ReLU" bottom: "inc4a/conv5_2" top: "inc4a/conv5_2" } layer { name: "inc4a/conv5_3" type: "Convolution" bottom: "inc4a/conv5_2" top: "inc4a/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 stride: 2 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4a/conv5_3/bn" type: "BatchNorm" bottom: "inc4a/conv5_3" top: "inc4a/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4a/conv5_3/scale" type: "Scale" bottom: "inc4a/conv5_3" top: "inc4a/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4a/relu5_3" type: "ReLU" bottom: "inc4a/conv5_3" top: "inc4a/conv5_3" } layer { name: "inc4a" type: "Concat" bottom: "inc4a/conv1" bottom: "inc4a/conv3_2" bottom: "inc4a/conv5_3" top: "inc4a" }

################################################################################

Inception 4b

################################################################################ layer { name: "inc4b/conv1" type: "Convolution" bottom: "inc4a" top: "inc4b/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv1/bn" type: "BatchNorm" bottom: "inc4b/conv1" top: "inc4b/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv1/scale" type: "Scale" bottom: "inc4b/conv1" top: "inc4b/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu1" type: "ReLU" bottom: "inc4b/conv1" top: "inc4b/conv1" } layer { name: "inc4b/conv3_1" type: "Convolution" bottom: "inc4a" top: "inc4b/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv3_1/bn" type: "BatchNorm" bottom: "inc4b/conv3_1" top: "inc4b/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv3_1/scale" type: "Scale" bottom: "inc4b/conv3_1" top: "inc4b/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu3_1" type: "ReLU" bottom: "inc4b/conv3_1" top: "inc4b/conv3_1" } layer { name: "inc4b/conv3_2" type: "Convolution" bottom: "inc4b/conv3_1" top: "inc4b/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv3_2/bn" type: "BatchNorm" bottom: "inc4b/conv3_2" top: "inc4b/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv3_2/scale" type: "Scale" bottom: "inc4b/conv3_2" top: "inc4b/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu3_2" type: "ReLU" bottom: "inc4b/conv3_2" top: "inc4b/conv3_2" } layer { name: "inc4b/conv5_1" type: "Convolution" bottom: "inc4a" top: "inc4b/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv5_1/bn" type: "BatchNorm" bottom: "inc4b/conv5_1" top: "inc4b/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv5_1/scale" type: "Scale" bottom: "inc4b/conv5_1" top: "inc4b/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu5_1" type: "ReLU" bottom: "inc4b/conv5_1" top: "inc4b/conv5_1" } layer { name: "inc4b/conv5_2" type: "Convolution" bottom: "inc4b/conv5_1" top: "inc4b/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv5_2/bn" type: "BatchNorm" bottom: "inc4b/conv5_2" top: "inc4b/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv5_2/scale" type: "Scale" bottom: "inc4b/conv5_2" top: "inc4b/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu5_2" type: "ReLU" bottom: "inc4b/conv5_2" top: "inc4b/conv5_2" } layer { name: "inc4b/conv5_3" type: "Convolution" bottom: "inc4b/conv5_2" top: "inc4b/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4b/conv5_3/bn" type: "BatchNorm" bottom: "inc4b/conv5_3" top: "inc4b/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4b/conv5_3/scale" type: "Scale" bottom: "inc4b/conv5_3" top: "inc4b/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4b/relu5_3" type: "ReLU" bottom: "inc4b/conv5_3" top: "inc4b/conv5_3" } layer { name: "inc4b" type: "Concat" bottom: "inc4b/conv1" bottom: "inc4b/conv3_2" bottom: "inc4b/conv5_3" top: "inc4b" }

################################################################################

Inception 4c

################################################################################ layer { name: "inc4c/conv1" type: "Convolution" bottom: "inc4b" top: "inc4c/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv1/bn" type: "BatchNorm" bottom: "inc4c/conv1" top: "inc4c/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv1/scale" type: "Scale" bottom: "inc4c/conv1" top: "inc4c/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu1" type: "ReLU" bottom: "inc4c/conv1" top: "inc4c/conv1" } layer { name: "inc4c/conv3_1" type: "Convolution" bottom: "inc4b" top: "inc4c/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv3_1/bn" type: "BatchNorm" bottom: "inc4c/conv3_1" top: "inc4c/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv3_1/scale" type: "Scale" bottom: "inc4c/conv3_1" top: "inc4c/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu3_1" type: "ReLU" bottom: "inc4c/conv3_1" top: "inc4c/conv3_1" } layer { name: "inc4c/conv3_2" type: "Convolution" bottom: "inc4c/conv3_1" top: "inc4c/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv3_2/bn" type: "BatchNorm" bottom: "inc4c/conv3_2" top: "inc4c/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv3_2/scale" type: "Scale" bottom: "inc4c/conv3_2" top: "inc4c/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu3_2" type: "ReLU" bottom: "inc4c/conv3_2" top: "inc4c/conv3_2" } layer { name: "inc4c/conv5_1" type: "Convolution" bottom: "inc4b" top: "inc4c/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv5_1/bn" type: "BatchNorm" bottom: "inc4c/conv5_1" top: "inc4c/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv5_1/scale" type: "Scale" bottom: "inc4c/conv5_1" top: "inc4c/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu5_1" type: "ReLU" bottom: "inc4c/conv5_1" top: "inc4c/conv5_1" } layer { name: "inc4c/conv5_2" type: "Convolution" bottom: "inc4c/conv5_1" top: "inc4c/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv5_2/bn" type: "BatchNorm" bottom: "inc4c/conv5_2" top: "inc4c/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv5_2/scale" type: "Scale" bottom: "inc4c/conv5_2" top: "inc4c/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu5_2" type: "ReLU" bottom: "inc4c/conv5_2" top: "inc4c/conv5_2" } layer { name: "inc4c/conv5_3" type: "Convolution" bottom: "inc4c/conv5_2" top: "inc4c/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4c/conv5_3/bn" type: "BatchNorm" bottom: "inc4c/conv5_3" top: "inc4c/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4c/conv5_3/scale" type: "Scale" bottom: "inc4c/conv5_3" top: "inc4c/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4c/relu5_3" type: "ReLU" bottom: "inc4c/conv5_3" top: "inc4c/conv5_3" } layer { name: "inc4c" type: "Concat" bottom: "inc4c/conv1" bottom: "inc4c/conv3_2" bottom: "inc4c/conv5_3" top: "inc4c" }

################################################################################

Inception 4d

################################################################################ layer { name: "inc4d/conv1" type: "Convolution" bottom: "inc4c" top: "inc4d/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv1/bn" type: "BatchNorm" bottom: "inc4d/conv1" top: "inc4d/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv1/scale" type: "Scale" bottom: "inc4d/conv1" top: "inc4d/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu1" type: "ReLU" bottom: "inc4d/conv1" top: "inc4d/conv1" } layer { name: "inc4d/conv3_1" type: "Convolution" bottom: "inc4c" top: "inc4d/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv3_1/bn" type: "BatchNorm" bottom: "inc4d/conv3_1" top: "inc4d/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv3_1/scale" type: "Scale" bottom: "inc4d/conv3_1" top: "inc4d/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu3_1" type: "ReLU" bottom: "inc4d/conv3_1" top: "inc4d/conv3_1" } layer { name: "inc4d/conv3_2" type: "Convolution" bottom: "inc4d/conv3_1" top: "inc4d/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv3_2/bn" type: "BatchNorm" bottom: "inc4d/conv3_2" top: "inc4d/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv3_2/scale" type: "Scale" bottom: "inc4d/conv3_2" top: "inc4d/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu3_2" type: "ReLU" bottom: "inc4d/conv3_2" top: "inc4d/conv3_2" } layer { name: "inc4d/conv5_1" type: "Convolution" bottom: "inc4c" top: "inc4d/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv5_1/bn" type: "BatchNorm" bottom: "inc4d/conv5_1" top: "inc4d/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv5_1/scale" type: "Scale" bottom: "inc4d/conv5_1" top: "inc4d/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu5_1" type: "ReLU" bottom: "inc4d/conv5_1" top: "inc4d/conv5_1" } layer { name: "inc4d/conv5_2" type: "Convolution" bottom: "inc4d/conv5_1" top: "inc4d/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv5_2/bn" type: "BatchNorm" bottom: "inc4d/conv5_2" top: "inc4d/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv5_2/scale" type: "Scale" bottom: "inc4d/conv5_2" top: "inc4d/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu5_2" type: "ReLU" bottom: "inc4d/conv5_2" top: "inc4d/conv5_2" } layer { name: "inc4d/conv5_3" type: "Convolution" bottom: "inc4d/conv5_2" top: "inc4d/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4d/conv5_3/bn" type: "BatchNorm" bottom: "inc4d/conv5_3" top: "inc4d/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4d/conv5_3/scale" type: "Scale" bottom: "inc4d/conv5_3" top: "inc4d/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4d/relu5_3" type: "ReLU" bottom: "inc4d/conv5_3" top: "inc4d/conv5_3" } layer { name: "inc4d" type: "Concat" bottom: "inc4d/conv1" bottom: "inc4d/conv3_2" bottom: "inc4d/conv5_3" top: "inc4d" }

################################################################################

Inception 4e

################################################################################ layer { name: "inc4e/conv1" type: "Convolution" bottom: "inc4d" top: "inc4e/conv1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv1/bn" type: "BatchNorm" bottom: "inc4e/conv1" top: "inc4e/conv1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv1/scale" type: "Scale" bottom: "inc4e/conv1" top: "inc4e/conv1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu1" type: "ReLU" bottom: "inc4e/conv1" top: "inc4e/conv1" } layer { name: "inc4e/conv3_1" type: "Convolution" bottom: "inc4d" top: "inc4e/conv3_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv3_1/bn" type: "BatchNorm" bottom: "inc4e/conv3_1" top: "inc4e/conv3_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv3_1/scale" type: "Scale" bottom: "inc4e/conv3_1" top: "inc4e/conv3_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu3_1" type: "ReLU" bottom: "inc4e/conv3_1" top: "inc4e/conv3_1" } layer { name: "inc4e/conv3_2" type: "Convolution" bottom: "inc4e/conv3_1" top: "inc4e/conv3_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv3_2/bn" type: "BatchNorm" bottom: "inc4e/conv3_2" top: "inc4e/conv3_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv3_2/scale" type: "Scale" bottom: "inc4e/conv3_2" top: "inc4e/conv3_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu3_2" type: "ReLU" bottom: "inc4e/conv3_2" top: "inc4e/conv3_2" } layer { name: "inc4e/conv5_1" type: "Convolution" bottom: "inc4d" top: "inc4e/conv5_1" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 16 kernel_size: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv5_1/bn" type: "BatchNorm" bottom: "inc4e/conv5_1" top: "inc4e/conv5_1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv5_1/scale" type: "Scale" bottom: "inc4e/conv5_1" top: "inc4e/conv5_1" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu5_1" type: "ReLU" bottom: "inc4e/conv5_1" top: "inc4e/conv5_1" } layer { name: "inc4e/conv5_2" type: "Convolution" bottom: "inc4e/conv5_1" top: "inc4e/conv5_2" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv5_2/bn" type: "BatchNorm" bottom: "inc4e/conv5_2" top: "inc4e/conv5_2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv5_2/scale" type: "Scale" bottom: "inc4e/conv5_2" top: "inc4e/conv5_2" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu5_2" type: "ReLU" bottom: "inc4e/conv5_2" top: "inc4e/conv5_2" } layer { name: "inc4e/conv5_3" type: "Convolution" bottom: "inc4e/conv5_2" top: "inc4e/conv5_3" param { lr_mult: 0.1 decay_mult: 0.1 } param { lr_mult: 0.2 decay_mult: 0 } convolution_param { num_output: 32 kernel_size: 3 pad: 1 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "inc4e/conv5_3/bn" type: "BatchNorm" bottom: "inc4e/conv5_3" top: "inc4e/conv5_3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "inc4e/conv5_3/scale" type: "Scale" bottom: "inc4e/conv5_3" top: "inc4e/conv5_3" param { lr_mult: 0.1 decay_mult: 0 } param { lr_mult: 0.1 decay_mult: 0 } scale_param { bias_term: true } } layer { name: "inc4e/relu5_3" type: "ReLU" bottom: "inc4e/conv5_3" top: "inc4e/conv5_3" } layer { name: "inc4e" type: "Concat" bottom: "inc4e/conv1" bottom: "inc4e/conv3_2" bottom: "inc4e/conv5_3" top: "inc4e" }

################################################################################

hyper feature

################################################################################ layer { name: "downsample" type: "Pooling" bottom: "conv3" top: "downsample" pooling_param { kernel_size: 3 stride: 2 pad: 0 pool: MAX } } layer { name: "upsample" type: "Deconvolution" bottom: "inc4e" top: "upsample" param { lr_mult: 0 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 4 stride: 2 pad: 1 group: 256 weight_filler: { type: "bilinear" } bias_term: false } } layer { name: "concat" type: "Concat" bottom: "downsample" bottom: "inc3e" bottom: "upsample" top: "concat" concat_param { axis: 1 } } layer { name: "convf" type: "Convolution" bottom: "concat" top: "convf" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 1 stride: 1 pad: 0 weight_filler { type: "xavier" std: 0.1 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "reluf" type: "ReLU" bottom: "convf" top: "convf" } layer { name: "upsample-2x" type: "Deconvolution" bottom: "convf" top: "upsample-2x" param { lr_mult: 0 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 4 stride: 2 pad: 1 group: 256 weight_filler: { type: "bilinear" } bias_term: false } }

################################################################################

RPN

################################################################################

RPN

layer { name: "rpn_conv1" type: "Convolution" bottom: "upsample-2x" top: "rpn_conv1" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 1 stride: 1 pad: 0 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "rpn_relu1" type: "ReLU" bottom: "rpn_conv1" top: "rpn_conv1" }

layer { name: "rpn_cls_score-v2" type: "Convolution" bottom: "rpn_conv1" top: "rpn_cls_score" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 50

num_output: 72

kernel_size: 1  stride: 1  pad: 0
weight_filler { type: "gaussian"  std: 0.01 }
bias_filler { type: "constant"  value: 0 }

} } layer { name: "rpn_bbox_pred-v2" type: "Convolution" bottom: "rpn_conv1" top: "rpn_bbox_pred" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 100

num_output: 144

kernel_size: 1  stride: 1  pad: 0
weight_filler { type: "gaussian"  std: 0.01 }
bias_filler { type: "constant"  value: 0 }

} }

layer { bottom: "rpn_cls_score" top: "rpn_cls_score_reshape" name: "rpn_cls_score_reshape" type: "Reshape" reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } } layer { name: 'rpn-data' type: 'Python' bottom: 'rpn_cls_score' bottom: 'gt_boxes' bottom: 'im_info' bottom: 'data' top: 'rpn_labels' top: 'rpn_bbox_targets' top: 'rpn_bbox_inside_weights' top: 'rpn_bbox_outside_weights' include { phase: TRAIN } python_param { module: 'rpn.anchor_target_layer' layer: 'AnchorTargetLayer' param_str: "{'feat_stride': 8, 'ratios': [0.5, 0.667, 1, 1.5, 2], 'scales': [3, 6, 9, 16, 32]}" } } layer { name: "rpn_loss_cls" type: "SoftmaxWithLoss" bottom: "rpn_cls_score_reshape" bottom: "rpn_labels" propagate_down: 1 propagate_down: 0 top: "rpn_loss_cls" include { phase: TRAIN } loss_weight: 1 loss_param { ignore_label: -1 normalize: true } } layer { name: "rpn_loss_bbox" type: "SmoothL1Loss" bottom: "rpn_bbox_pred" bottom: "rpn_bbox_targets" bottom: "rpn_bbox_inside_weights" bottom: "rpn_bbox_outside_weights" top: "rpn_loss_bbox" include { phase: TRAIN } loss_weight: 1 smooth_l1_loss_param { sigma: 3.0 } }

################################################################################

Proposal

################################################################################ layer { name: "rpn_cls_prob" type: "Softmax" bottom: "rpn_cls_score_reshape" top: "rpn_cls_prob" } layer { name: 'rpn_cls_prob_reshape' type: 'Reshape' bottom: 'rpn_cls_prob' top: 'rpn_cls_prob_reshape' reshape_param { shape { dim: 0 dim: 50 dim: -1 dim: 0 } } }

layer { name: 'proposal' type: 'Proposal' bottom: 'rpn_cls_prob_reshape' bottom: 'rpn_bbox_pred' bottom: 'im_info' top: 'rois' top: 'scores'

include { phase: TEST }

proposal_param { ratio: 0.5 ratio: 0.667 ratio: 1.0 ratio: 1.5 ratio: 2.0 scale: 3 scale: 6 scale: 9 scale: 16 scale: 32 base_size: 8 feat_stride: 8 pre_nms_topn: 6000 post_nms_topn: 4000 nms_thresh: 0.7 min_size: 8 } } layer { name: 'mute_rpn_scores' bottom: 'scores' type: 'Silence' }

layer { name: 'roi-data' type: 'Python' bottom: 'rois' bottom: 'gt_boxes' top: 'rois' top: 'labels' top: 'bbox_targets' top: 'bbox_inside_weights' top: 'bbox_outside_weights' python_param { module: 'rpn.proposal_target_layer' layer: 'ProposalTargetLayer' param_str: "'num_classes': 21" } }

################################################################################

RCNN

################################################################################ layer { name: "roi_pool_conv5" type: "ROIPooling" bottom: "upsample-2x" bottom: "rois" top: "roi_pool_conv5" roi_pooling_param { pooled_w: 6 pooled_h: 6 spatial_scale: 0.125# 1/8 } } layer { name: "fc6_L" type: "InnerProduct" bottom: "roi_pool_conv5" top: "fc6_L" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 512 weight_filler { type: "xavier" std: 0.005 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "fc6_U" type: "InnerProduct" bottom: "fc6_L" top: "fc6_U" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 4096 weight_filler { type: "xavier" std: 0.005 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "relu6" type: "ReLU" bottom: "fc6_U" top: "fc6_U" }

################################################################################

fc 7

################################################################################ layer { name: "fc7_L" type: "InnerProduct" bottom: "fc6_U" top: "fc7_L" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 128 weight_filler { type: "xavier" std: 0.005 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "fc7_U" type: "InnerProduct" bottom: "fc7_L" top: "fc7_U" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 4096 weight_filler { type: "xavier" std: 0.005 } bias_filler { type: "constant" value: 0.1 } } } layer { name: "relu7" type: "ReLU" bottom: "fc7_U" top: "fc7_U" }

################################################################################

output

################################################################################ layer { name: "cls_score" type: "InnerProduct" bottom: "fc7_U" top: "cls_score" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 21 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "bbox_pred" type: "InnerProduct" bottom: "fc7_U" top: "bbox_pred" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 84 weight_filler { type: "gaussian" std: 0.001 } bias_filler { type: "constant" value: 0 } } } layer { name: "loss_cls" type: "SoftmaxWithLoss" bottom: "cls_score" bottom: "labels" propagate_down: 1 propagate_down: 0 top: "loss_cls" include { phase: TRAIN } loss_weight: 1 loss_param { ignore_label: -1 normalize: true } } layer { name: "loss_bbox" type: "SmoothL1Loss" bottom: "bbox_pred" bottom: "bbox_targets" bottom: "bbox_inside_weights" bottom: "bbox_outside_weights" top: "loss_bbox" include { phase: TRAIN } loss_weight: 1 }

layer {

name: "cls_prob"

type: "Softmax"

bottom: "cls_score"

top: "cls_prob"

include { phase: TEST }

loss_param {

ignore_label: -1

normalize: true

}

}

sanghoon commented 7 years ago

I'm sorry but it's a bit difficult to read your prototxt. I'd recommend you attach snippets from it.