Open RubinaRashid opened 8 years ago
Can you explain what you mean by kernel restarting error? Many others have used the prototxt files provided without any error? Can you please post the error you get when using the prototxt files provided on github? Then, we can move to the residual model you are asking about. -chris
when i run the code python kernel restart again n again...plz look at the picture...you have used "#Pad" ....what does it mean?
On Fri, Nov 25, 2016 at 10:33 PM, Chris Thomas notifications@github.com wrote:
Can you explain what you mean by kernel restarting error? -chris
From: RubinaRashid [mailto:notifications@github.com] Sent: Friday, November 25, 2016 10:17 AM To: CLT29/OpenSALICON Subject: [CLT29/OpenSALICON] # pad =1 means? (#3)
hi when i run your code then i found kernel restarting error.... then i made my own prototxt with resnet model.
At the end Convolution18 layer do not get values.it shows just zero.plz guide me...
layer { name: "FineCustomData" type: "Python" top: "Python1" python_param { module: "ResCustomData" layer: "ResCustomData" param_str: "1,3,300,600" } } layer { name: "Convolution1" type: "Convolution" bottom: "Python1" top: "Convolution1" convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm1" type: "BatchNorm" bottom: "Convolution1" top: "Convolution1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale1" type: "Scale" bottom: "Convolution1" top: "Convolution1" scale_param { bias_term: true } } layer { name: "ReLU1" type: "ReLU" bottom: "Convolution1" top: "Convolution1" } layer { name: "Convolution2" type: "Convolution" bottom: "Convolution1" top: "Convolution2" convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm2" type: "BatchNorm" bottom: "Convolution2" top: "Convolution2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale2" type: "Scale" bottom: "Convolution2" top: "Convolution2" scale_param { bias_term: true } } layer { name: "ReLU2" type: "ReLU" bottom: "Convolution2" top: "Convolution2" } layer { name: "Pooling1" type: "Pooling" bottom: "Convolution2" top: "Pooling1" pooling_param { pool: MAX kernel_size: 3 stride: 1 } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "Pooling1" top: "Pooling1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale3" type: "Scale" bottom: "Pooling1" top: "Pooling1" scale_param { bias_term: true } } layer { name: "Convolution3" type: "Convolution" bottom: "Pooling1" top: "Convolution3" convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm4" type: "BatchNorm" bottom: "Convolution3" top: "Convolution3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale4" type: "Scale" bottom: "Convolution3" top: "Convolution3" scale_param { bias_term: true } } layer { name: "ReLU3" type: "ReLU" bottom: "Convolution3" top: "Convolution3" } layer { name: "Convolution4" type: "Convolution" bottom: "Convolution3" top: "Convolution4" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution5" type: "Convolution" bottom: "Pooling1" top: "Convolution5" convolution_param { num_output: 128 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise1" type: "Eltwise" bottom: "Convolution4" bottom: "Convolution5" top: "Eltwise1" eltwise_param { operation: SUM } } layer { name: "BatchNorm5" type: "BatchNorm" bottom: "Eltwise1" top: "BatchNorm5" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale5" type: "Scale" bottom: "BatchNorm5" top: "BatchNorm5" scale_param { bias_term: true } } layer { name: "ReLU4" type: "ReLU" bottom: "BatchNorm5" top: "BatchNorm5" } layer { name: "Convolution6" type: "Convolution" bottom: "BatchNorm5" top: "Convolution6" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm6" type: "BatchNorm" bottom: "Convolution6" top: "Convolution6" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale6" type: "Scale" bottom: "Convolution6" top: "Convolution6" scale_param { bias_term: true } } layer { name: "ReLU5" type: "ReLU" bottom: "Convolution6" top: "Convolution6" } layer { name: "Convolution7" type: "Convolution" bottom: "Convolution6" top: "Convolution7" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise2" type: "Eltwise" bottom: "Eltwise1" bottom: "Convolution7" top: "Eltwise2" eltwise_param { operation: SUM } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "Eltwise2" top: "Eltwise2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale7" type: "Scale" bottom: "Eltwise2" top: "Eltwise2" scale_param { bias_term: true } } layer { name: "Convolution8" type: "Convolution" bottom: "Eltwise2" top: "Convolution8" convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm8" type: "BatchNorm" bottom: "Convolution8" top: "Convolution8" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale8" type: "Scale" bottom: "Convolution8" top: "Convolution8" scale_param { bias_term: true } } layer { name: "ReLU6" type: "ReLU" bottom: "Convolution8" top: "Convolution8" } layer { name: "Convolution9" type: "Convolution" bottom: "Convolution8" top: "Convolution9" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution10" type: "Convolution" bottom: "Eltwise2" top: "Convolution10" convolution_param { num_output: 256 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise3" type: "Eltwise" bottom: "Convolution9" bottom: "Convolution10" top: "Eltwise3" eltwise_param { operation: SUM } } layer { name: "BatchNorm9" type: "BatchNorm" bottom: "Eltwise3" top: "BatchNorm9" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale9" type: "Scale" bottom: "BatchNorm9" top: "BatchNorm9" scale_param { bias_term: true } } layer { name: "ReLU7" type: "ReLU" bottom: "BatchNorm9" top: "BatchNorm9" } layer { name: "Convolution11" type: "Convolution" bottom: "BatchNorm9" top: "Convolution11" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm10" type: "BatchNorm" bottom: "Convolution11" top: "Convolution11" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale10" type: "Scale" bottom: "Convolution11" top: "Convolution11" scale_param { bias_term: true } } layer { name: "ReLU8" type: "ReLU" bottom: "Convolution11" top: "Convolution11" } layer { name: "Convolution12" type: "Convolution" bottom: "Convolution11" top: "Convolution12" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise4" type: "Eltwise" bottom: "Eltwise3" bottom: "Convolution12" top: "Eltwise4" eltwise_param { operation: SUM } } layer { name: "BatchNorm11" type: "BatchNorm" bottom: "Eltwise4" top: "Eltwise4" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale11" type: "Scale" bottom: "Eltwise4" top: "Eltwise4" scale_param { bias_term: true } } layer { name: "Convolution13" type: "Convolution" bottom: "Eltwise4" top: "Convolution13" convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm12" type: "BatchNorm" bottom: "Convolution13" top: "Convolution13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale12" type: "Scale" bottom: "Convolution13" top: "Convolution13" scale_param { bias_term: true } } layer { name: "ReLU9" type: "ReLU" bottom: "Convolution13" top: "Convolution13" } layer { name: "Convolution14" type: "Convolution" bottom: "Convolution13" top: "Convolution14" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution15" type: "Convolution" bottom: "Eltwise4" top: "Convolution15" convolution_param { num_output: 512 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise5" type: "Eltwise" bottom: "Convolution14" bottom: "Convolution15" top: "Eltwise5" eltwise_param { operation: SUM } } layer { name: "BatchNorm13" type: "BatchNorm" bottom: "Eltwise5" top: "BatchNorm13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale13" type: "Scale" bottom: "BatchNorm13" top: "BatchNorm13" scale_param { bias_term: true } } layer { name: "ReLU10" type: "ReLU" bottom: "BatchNorm13" top: "BatchNorm13" } layer { name: "Convolution16" type: "Convolution" bottom: "BatchNorm13" top: "Convolution16" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm14" type: "BatchNorm" bottom: "Convolution16" top: "Convolution16" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale14" type: "Scale" bottom: "Convolution16" top: "Convolution16" scale_param { bias_term: true } } layer { name: "ReLU11" type: "ReLU" bottom: "Convolution16" top: "Convolution16" } layer { name: "Convolution17" type: "Convolution" bottom: "Convolution16" top: "Convolution17" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise6" type: "Eltwise" bottom: "Eltwise5" bottom: "Convolution17" top: "Eltwise6" eltwise_param { operation: SUM } } layer { name: "Pooling2" type: "Pooling" bottom: "Eltwise6" top: "Pooling2" pooling_param { pool: MAX kernel_size: 3 stride: 1 } } layer { name: "Convolution18" type: "Convolution" bottom: "Pooling2" top: "Convolution18" convolution_param { num_output: 1 pad: 1 kernel_size: 3 } }
— You are receiving this because you are subscribed to this thread. Reply to this email directly, view it on GitHubhttps://na01.safelinks. protection.outlook.com/?url=https%3A%2F%2Fgithub.com%2FCLT29 %2FOpenSALICON%2Fissues%2F3&data=01%7C01%7Cchris%40cs. pitt.edu%7Cd67f25a3860b4ccad62208d41546227f%7C9ef9f489e0a04e eb87cc3a526112fd0d%7C1&sdata=r2q9JTv0oBo6hCY7HklsefVQTzu8cA3 N4gJlt54ANS8%3D&reserved=0, or mute the threadhttps://na01.safelinks. protection.outlook.com/?url=https%3A%2F%2Fgithub.com%2Fnotif ications%2Funsubscribe-auth%2FAIhyBV-V04u-q_U9Y5cIhEh_CspD 5AQtks5rBvv4gaJpZM4K8hk4&data=01%7C01%7Cchris%40cs.pitt.edu% 7Cd67f25a3860b4ccad62208d41546227f%7C9ef9f489e0a04eeb87cc3a5 26112fd0d%7C1&sdata=VugPgm%2Ba2zU6yOlSTs68xxzoLGOiEp2fzeKDM% 2By%2BTos%3D&reserved=0.
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-263002577, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8vx4Pz25vju33qvDfZ_HQ4H-Uc8ZSks5rBxwGgaJpZM4K8hk4 .
Hello, I do not see any attached image. Any line beginning with # means it is commented out and has no effect so #pad is just commented out. It has no effect. Normally padding means the convolution is padded with zeros on the height and width.
here is attached file for kernel restarting and for data in one layer and not in other layer
On Sat, Nov 26, 2016 at 11:43 AM, Chris Thomas notifications@github.com wrote:
Hello, I do not see any attached image. Any line beginning with # means it is commented out and has no effect so
pad is just commented out. It has no effect.
Normally padding means the convolution is padded with zeros on the height and width.
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-263047699, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v9iM_W599dJ5ZKmlGo9rcEAJiRtAks5rB9T2gaJpZM4K8hk4 .
Hello, I still don't see anything. Maybe try posting it directly to github instead of e-mailing or try uploading it to some site like imgur or photobucket and send me the URL. Right now, I can't see any images attached to your post.
ok.can you plz look at my deploy file which can get the data for convolution17 but convolution18 does not show the data.i am doing work on your idea with resnet model...
On Sat, Nov 26, 2016 at 11:57 AM, Chris Thomas notifications@github.com wrote:
Hello, I still don't see anything. Maybe try posting it directly to github instead of e-mailing or try uploading it to some site like imgur or photobucket and send me the URL. Right now, I can't see any images attached to your post.
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-263048223, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v7RO0OIrvoJqUiIJOIu5HltRTVq-ks5rB9hygaJpZM4K8hk4 .
now plz look at these...
Hello....
plz sir look at these files and guide me how i solve this problem
Hello, I will look at this this afternoon. One thought. How are you initializing the weights for the resnet? You should initialize from a pretrained model. If you aren't initializing, maybe your weights at convolution layer 18 are all zeros causing any input to just output all zeros. Can you try changing the weights to the convolution 18 layer and telling me what happens?
-chris
Hello, For the "kernel died" error, I am not sure what your problem is. I assume you have a GPU on your machine? Right now the Salicon.py is trying to use a GPU so if you don't have one, then you will need to take that out. You will also need to take out the solver mode: GPU in the prototxt file if you don't have a GPU. The other thing I can think of is you don't have enough memory on your GPU. You will need probably 10+ GB of free GPU memory to train the VGG net and probably a lot more for the resnet, especially at the image sizes we are looking at in the Salicon model. You may want to try decreasing the image size going into the network (instead of 600 maybe make it 200). Try those things. Thanks, Chris
Hello I even try 32*32 image size.i deploy this model only for one image when it works then i will go for image data set.main problem is convolution18 after pooling layer does not get any data but pooling layer shows the data.you can see in that pic which i have send.after this convolution18 i apply sigmoid layer as you did in your model.i run this code on CPU version with 2 GB memory.i also run your your code on CPU version just to get the saliency map for one image using your trained model with replacing code for GPU version to CPU version.plz try to figure out this problem i have been stuck for so many days on this issue....Thanks
On 29 Nov 2016 7:08 a.m., "Chris Thomas" notifications@github.com wrote:
Hello, For the "kernel died" error, I am not sure what your problem is. I assume you have a GPU on your machine? Right now the Salicon.py is trying to use a GPU so if you don't have one, then you will need to take that out. You will also need to take out the solver mode: GPU in the prototxt file if you don't have a GPU. The other thing I can think of is you don't have enough memory on your GPU. You will need probably 10+ GB of free GPU memory to train the VGG net and probably a lot more for the resnet, especially at the image sizes we are looking at in the Salicon model. You may want to try decreasing the image size going into the network (instead of 600 maybe make it 200). Try those things. Thanks, Chris
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-263455542, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v4HhOHzSf6ir5vtnVTozfYzi9JfVks5rC4kCgaJpZM4K8hk4 .
hello,
from where I get ground truth image for OSIE data set??
On Tue, Nov 29, 2016 at 8:17 AM, Rubina Rashid rubina.re@gmail.com wrote:
Hello I even try 32*32 image size.i deploy this model only for one image when it works then i will go for image data set.main problem is convolution18 after pooling layer does not get any data but pooling layer shows the data.you can see in that pic which i have send.after this convolution18 i apply sigmoid layer as you did in your model.i run this code on CPU version with 2 GB memory.i also run your your code on CPU version just to get the saliency map for one image using your trained model with replacing code for GPU version to CPU version.plz try to figure out this problem i have been stuck for so many days on this issue....Thanks
On 29 Nov 2016 7:08 a.m., "Chris Thomas" notifications@github.com wrote:
Hello, For the "kernel died" error, I am not sure what your problem is. I assume you have a GPU on your machine? Right now the Salicon.py is trying to use a GPU so if you don't have one, then you will need to take that out. You will also need to take out the solver mode: GPU in the prototxt file if you don't have a GPU. The other thing I can think of is you don't have enough memory on your GPU. You will need probably 10+ GB of free GPU memory to train the VGG net and probably a lot more for the resnet, especially at the image sizes we are looking at in the Salicon model. You may want to try decreasing the image size going into the network (instead of 600 maybe make it 200). Try those things. Thanks, Chris
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-263455542, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v4HhOHzSf6ir5vtnVTozfYzi9JfVks5rC4kCgaJpZM4K8hk4 .
at this link i download 700 images but i can not understand about "1001.mat" like files and how these files will be open?
On Thu, Dec 1, 2016 at 7:56 PM, Chris Thomas notifications@github.com wrote:
https://www.ece.nus.edu.sg/stfpage/eleqiz/predicting.html
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-264193837, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v9MdedmcW0tfMhDvdQvmz82f5ijGks5rDuANgaJpZM4K8hk4 .
These files are ground truth images??
On Fri, Dec 2, 2016 at 5:35 PM, Rubina Rashid rubina.re@gmail.com wrote:
at this link i download 700 images but i can not understand about "1001.mat" like files and how these files will be open?
On Thu, Dec 1, 2016 at 7:56 PM, Chris Thomas notifications@github.com wrote:
https://www.ece.nus.edu.sg/stfpage/eleqiz/predicting.html
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/CLT29/OpenSALICON/issues/3#issuecomment-264193837, or mute the thread https://github.com/notifications/unsubscribe-auth/ATz8v9MdedmcW0tfMhDvdQvmz82f5ijGks5rDuANgaJpZM4K8hk4 .
hi when i run your code then i found kernel restarting error.... then i made my own prototxt with resnet model.
At the end Convolution18 layer do not get values.it shows just zero.plz guide me...
layer { name: "FineCustomData" type: "Python" top: "Python1" python_param { module: "ResCustomData" layer: "ResCustomData" param_str: "1,3,300,600" } } layer { name: "Convolution1" type: "Convolution" bottom: "Python1" top: "Convolution1" convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm1" type: "BatchNorm" bottom: "Convolution1" top: "Convolution1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale1" type: "Scale" bottom: "Convolution1" top: "Convolution1" scale_param { bias_term: true } } layer { name: "ReLU1" type: "ReLU" bottom: "Convolution1" top: "Convolution1" } layer { name: "Convolution2" type: "Convolution" bottom: "Convolution1" top: "Convolution2" convolution_param { num_output: 64 bias_term: false pad: 0 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm2" type: "BatchNorm" bottom: "Convolution2" top: "Convolution2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale2" type: "Scale" bottom: "Convolution2" top: "Convolution2" scale_param { bias_term: true } } layer { name: "ReLU2" type: "ReLU" bottom: "Convolution2" top: "Convolution2" } layer { name: "Pooling1" type: "Pooling" bottom: "Convolution2" top: "Pooling1" pooling_param { pool: MAX kernel_size: 3 stride: 1 } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "Pooling1" top: "Pooling1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale3" type: "Scale" bottom: "Pooling1" top: "Pooling1" scale_param { bias_term: true } } layer { name: "Convolution3" type: "Convolution" bottom: "Pooling1" top: "Convolution3" convolution_param { num_output: 128 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm4" type: "BatchNorm" bottom: "Convolution3" top: "Convolution3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale4" type: "Scale" bottom: "Convolution3" top: "Convolution3" scale_param { bias_term: true } } layer { name: "ReLU3" type: "ReLU" bottom: "Convolution3" top: "Convolution3" } layer { name: "Convolution4" type: "Convolution" bottom: "Convolution3" top: "Convolution4" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution5" type: "Convolution" bottom: "Pooling1" top: "Convolution5" convolution_param { num_output: 128 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise1" type: "Eltwise" bottom: "Convolution4" bottom: "Convolution5" top: "Eltwise1" eltwise_param { operation: SUM } } layer { name: "BatchNorm5" type: "BatchNorm" bottom: "Eltwise1" top: "BatchNorm5" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale5" type: "Scale" bottom: "BatchNorm5" top: "BatchNorm5" scale_param { bias_term: true } } layer { name: "ReLU4" type: "ReLU" bottom: "BatchNorm5" top: "BatchNorm5" } layer { name: "Convolution6" type: "Convolution" bottom: "BatchNorm5" top: "Convolution6" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm6" type: "BatchNorm" bottom: "Convolution6" top: "Convolution6" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale6" type: "Scale" bottom: "Convolution6" top: "Convolution6" scale_param { bias_term: true } } layer { name: "ReLU5" type: "ReLU" bottom: "Convolution6" top: "Convolution6" } layer { name: "Convolution7" type: "Convolution" bottom: "Convolution6" top: "Convolution7" convolution_param { num_output: 128 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise2" type: "Eltwise" bottom: "Eltwise1" bottom: "Convolution7" top: "Eltwise2" eltwise_param { operation: SUM } } layer { name: "BatchNorm7" type: "BatchNorm" bottom: "Eltwise2" top: "Eltwise2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale7" type: "Scale" bottom: "Eltwise2" top: "Eltwise2" scale_param { bias_term: true } } layer { name: "Convolution8" type: "Convolution" bottom: "Eltwise2" top: "Convolution8" convolution_param { num_output: 256 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm8" type: "BatchNorm" bottom: "Convolution8" top: "Convolution8" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale8" type: "Scale" bottom: "Convolution8" top: "Convolution8" scale_param { bias_term: true } } layer { name: "ReLU6" type: "ReLU" bottom: "Convolution8" top: "Convolution8" } layer { name: "Convolution9" type: "Convolution" bottom: "Convolution8" top: "Convolution9" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution10" type: "Convolution" bottom: "Eltwise2" top: "Convolution10" convolution_param { num_output: 256 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise3" type: "Eltwise" bottom: "Convolution9" bottom: "Convolution10" top: "Eltwise3" eltwise_param { operation: SUM } } layer { name: "BatchNorm9" type: "BatchNorm" bottom: "Eltwise3" top: "BatchNorm9" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale9" type: "Scale" bottom: "BatchNorm9" top: "BatchNorm9" scale_param { bias_term: true } } layer { name: "ReLU7" type: "ReLU" bottom: "BatchNorm9" top: "BatchNorm9" } layer { name: "Convolution11" type: "Convolution" bottom: "BatchNorm9" top: "Convolution11" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm10" type: "BatchNorm" bottom: "Convolution11" top: "Convolution11" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale10" type: "Scale" bottom: "Convolution11" top: "Convolution11" scale_param { bias_term: true } } layer { name: "ReLU8" type: "ReLU" bottom: "Convolution11" top: "Convolution11" } layer { name: "Convolution12" type: "Convolution" bottom: "Convolution11" top: "Convolution12" convolution_param { num_output: 256 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise4" type: "Eltwise" bottom: "Eltwise3" bottom: "Convolution12" top: "Eltwise4" eltwise_param { operation: SUM } } layer { name: "BatchNorm11" type: "BatchNorm" bottom: "Eltwise4" top: "Eltwise4" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale11" type: "Scale" bottom: "Eltwise4" top: "Eltwise4" scale_param { bias_term: true } } layer { name: "Convolution13" type: "Convolution" bottom: "Eltwise4" top: "Convolution13" convolution_param { num_output: 512 bias_term: false pad: 1 kernel_size: 3 stride: 2 weight_filler { type: "msra" } } } layer { name: "BatchNorm12" type: "BatchNorm" bottom: "Convolution13" top: "Convolution13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale12" type: "Scale" bottom: "Convolution13" top: "Convolution13" scale_param { bias_term: true } } layer { name: "ReLU9" type: "ReLU" bottom: "Convolution13" top: "Convolution13" } layer { name: "Convolution14" type: "Convolution" bottom: "Convolution13" top: "Convolution14" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Convolution15" type: "Convolution" bottom: "Eltwise4" top: "Convolution15" convolution_param { num_output: 512 pad: 0 kernel_size: 1 stride: 2 weight_filler { type: "msra" } } } layer { name: "Eltwise5" type: "Eltwise" bottom: "Convolution14" bottom: "Convolution15" top: "Eltwise5" eltwise_param { operation: SUM } } layer { name: "BatchNorm13" type: "BatchNorm" bottom: "Eltwise5" top: "BatchNorm13" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale13" type: "Scale" bottom: "BatchNorm13" top: "BatchNorm13" scale_param { bias_term: true } } layer { name: "ReLU10" type: "ReLU" bottom: "BatchNorm13" top: "BatchNorm13" } layer { name: "Convolution16" type: "Convolution" bottom: "BatchNorm13" top: "Convolution16" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "BatchNorm14" type: "BatchNorm" bottom: "Convolution16" top: "Convolution16" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale14" type: "Scale" bottom: "Convolution16" top: "Convolution16" scale_param { bias_term: true } } layer { name: "ReLU11" type: "ReLU" bottom: "Convolution16" top: "Convolution16" } layer { name: "Convolution17" type: "Convolution" bottom: "Convolution16" top: "Convolution17" convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 weight_filler { type: "msra" } } } layer { name: "Eltwise6" type: "Eltwise" bottom: "Eltwise5" bottom: "Convolution17" top: "Eltwise6" eltwise_param { operation: SUM } } layer { name: "Pooling2" type: "Pooling" bottom: "Eltwise6" top: "Pooling2" pooling_param { pool: MAX kernel_size: 3 stride: 1 } } layer { name: "Convolution18" type: "Convolution" bottom: "Pooling2" top: "Convolution18" convolution_param { num_output: 1 pad: 1 kernel_size: 3 } }