shurans / sscnet

Semantic Scene Completion from a Single Depth Image
http://sscnet.cs.princeton.edu/
339 stars 91 forks source link

when run demotest_model.py, error occured #41

Closed LongruiDong closed 5 years ago

LongruiDong commented 6 years ago

ubuntu 16.04 CUDA 8.0 cudnn 5.1 NVIDIA Drivers:384.130 python 3.6

make all & make pycaffe have succeed. However when i run demotest_model.py, the problem happens:

WARNING: Logging before InitGoogleLogging() is written to STDERR
E1031 20:10:32.822459 22163 common.cpp:113] Cannot create Cublas handle. Cublas won't be available.
W1031 20:10:32.830101 22163 _caffe.cpp:122] DEPRECATION WARNING - deprecated use of Python interface
W1031 20:10:32.830122 22163 _caffe.cpp:123] Use this instead (with the named "weights" parameter):
W1031 20:10:32.830127 22163 _caffe.cpp:125] Net('demo.txt', 1, weights='../models/suncg_ft_nyu.caffemodel')
I1031 20:10:32.832070 22163 net.cpp:58] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data"
  type: "SuncgData"
  top: "data"
  top: "seg_label"
  top: "seg_weight"
  suncg_data_param {
    file_data: "."
    file_list: ""
    vox_unit: 0.02
    vox_margin: 0.24
    vox_size: 240
    vox_size: 144
    vox_size: 240
    crop_size: 240
    crop_size: 144
    crop_size: 240
    label_size: 60
    label_size: 36
    label_size: 60
    seg_classes: 11
    shuffle: false
    occ_empty_only: true
    neg_obj_sample_ratio: 2
    seg_class_map: 0
    seg_class_map: 1
    seg_class_map: 2
    seg_class_map: 3
    seg_class_map: 4
    seg_class_map: 11
    seg_class_map: 5
    seg_class_map: 6
    seg_class_map: 7
    seg_class_map: 8
    seg_class_map: 8
    seg_class_map: 10
    seg_class_map: 10
    seg_class_map: 10
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 9
    seg_class_map: 8
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 10
    seg_class_map: 10
    seg_class_map: 11
    seg_class_map: 8
    seg_class_map: 10
    seg_class_map: 11
    seg_class_map: 9
    seg_class_map: 11
    seg_class_map: 11
    seg_class_map: 11
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    seg_class_weight: 1
    occ_class_weight: 10
    occ_class_weight: 10
    with_projection_tsdf: false
    batch_size: 1
    tsdf_type: 1
    data_type: TSDF
  }
}
layer {
  name: "conv1_1"
  type: "Convolution"
  bottom: "data"
  top: "conv1_1"
  convolution_param {
    num_output: 16
    pad: 3
    pad: 3
    pad: 3
    kernel_size: 7
    kernel_size: 7
    kernel_size: 7
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "relu1_1"
  type: "ReLU"
  bottom: "conv1_1"
  top: "conv1_1"
}
layer {
  name: "reduction2_1"
  type: "Convolution"
  bottom: "conv1_1"
  top: "reduction2_1"
  convolution_param {
    num_output: 32
    bias_term: false
    pad: 0
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "conv2_1"
  type: "Convolution"
  bottom: "conv1_1"
  top: "conv2_1"
  convolution_param {
    num_output: 32
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "relu2_1"
  type: "ReLU"
  bottom: "conv2_1"
  top: "conv2_1"
}
layer {
  name: "conv2_2"
  type: "Convolution"
  bottom: "conv2_1"
  top: "conv2_2"
  convolution_param {
    num_output: 32
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "res2_2"
  type: "Eltwise"
  bottom: "reduction2_1"
  bottom: "conv2_2"
  top: "res2_2"
}
layer {
  name: "add2_2"
  type: "ReLU"
  bottom: "res2_2"
  top: "res2_2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "res2_2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
    engine: CUDNN
  }
}
layer {
  name: "reduction3_1"
  type: "Convolution"
  bottom: "pool2"
  top: "reduction3_1"
  convolution_param {
    num_output: 64
    bias_term: false
    pad: 0
    kernel_size: 1
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "conv3_1"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3_1"
  convolution_param {
    num_output: 64
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "relu3_1"
  type: "ReLU"
  bottom: "conv3_1"
  top: "conv3_1"
}
layer {
  name: "conv3_2"
  type: "Convolution"
  bottom: "conv3_1"
  top: "conv3_2"
  convolution_param {
    num_output: 64
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "res3_2"
  type: "Eltwise"
  bottom: "reduction3_1"
  bottom: "conv3_2"
  top: "res3_2"
}
layer {
  name: "add3_2"
  type: "ReLU"
  bottom: "res3_2"
  top: "res3_2"
}
layer {
  name: "relu3_2"
  type: "ReLU"
  bottom: "res3_2"
  top: "res3_2"
}
layer {
  name: "conv3_3"
  type: "Convolution"
  bottom: "res3_2"
  top: "conv3_3"
  convolution_param {
    num_output: 64
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "relu3_3"
  type: "ReLU"
  bottom: "conv3_3"
  top: "conv3_3"
}
layer {
  name: "conv3_4"
  type: "Convolution"
  bottom: "conv3_3"
  top: "conv3_4"
  convolution_param {
    num_output: 64
    pad: 1
    pad: 1
    pad: 1
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CUDNN
    axis: 1
  }
}
layer {
  name: "res3_4"
  type: "Eltwise"
  bottom: "res3_2"
  bottom: "conv3_4"
  top: "res3_4"
}
layer {
  name: "add3_4"
  type: "ReLU"
  bottom: "res3_4"
  top: "res3_4"
}
layer {
  name: "relu3_4"
  type: "ReLU"
  bottom: "res3_4"
  top: "res3_4"
}
layer {
  name: "conv3_5"
  type: "Convolution"
  bottom: "res3_4"
  top: "conv3_5"
  convolution_param {
    num_output: 64
    pad: 2
    pad: 2
    pad: 2
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CAFFE
    axis: 1
    dilation: 2
    dilation: 2
    dilation: 2
  }
}
layer {
  name: "relu3_5"
  type: "ReLU"
  bottom: "conv3_5"
  top: "conv3_5"
}
layer {
  name: "conv3_6"
  type: "Convolution"
  bottom: "conv3_5"
  top: "conv3_6"
  convolution_param {
    num_output: 64
    pad: 2
    pad: 2
    pad: 2
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CAFFE
    axis: 1
    dilation: 2
    dilation: 2
    dilation: 2
  }
}
layer {
  name: "res3_6"
  type: "Eltwise"
  bottom: "res3_4"
  bottom: "conv3_6"
  top: "res3_6"
}
layer {
  name: "add3_6"
  type: "ReLU"
  bottom: "res3_6"
  top: "res3_6"
}
layer {
  name: "relu3_6"
  type: "ReLU"
  bottom: "res3_6"
  top: "res3_6"
}
layer {
  name: "conv3_7"
  type: "Convolution"
  bottom: "res3_6"
  top: "conv3_7"
  convolution_param {
    num_output: 64
    pad: 2
    pad: 2
    pad: 2
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CAFFE
    axis: 1
    dilation: 2
    dilation: 2
    dilation: 2
  }
}
layer {
  name: "relu3_7"
  type: "ReLU"
  bottom: "conv3_7"
  top: "conv3_7"
}
layer {
  name: "conv3_8"
  type: "Convolution"
  bottom: "conv3_7"
  top: "conv3_8"
  convolution_param {
    num_output: 64
    pad: 2
    pad: 2
    pad: 2
    kernel_size: 3
    kernel_size: 3
    kernel_size: 3
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
    }
    engine: CAFFE
    axis: 1
    dilation: 2
    dilation: 2
    dilation: 2
  }
}
layer {
  name: "res3_8"
  type: "Eltwise"
  bottom: "res3_6"
  bottom: "conv3_8"
  top: "res3_8"
}
layer {
  name: "add3_8"
  type: "ReLU"
  bottom: "res3_8"
  top: "res3_8"
}
layer {
  name: "Concat"
  type: "Concat"
  bottom: "res3_2"
  bottom: "res3_4"
  bottom: "res3_6"
  bottom: "res3_8"
  top: "sum"
  concat_param {
    axis: 1
  }
}
layer {
  name: "sum_relu"
  type: "ReLU"
  bottom: "sum"
  top: "sum"
}
layer {
  name: "conv4_1_new"
  type: "Convolution"
  bottom: "sum"
  top: "conv4_1"
  convolution_param {
    num_output: 128
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    weight_filler {
      type: "gaussian"
      std: 0.1
    }
    bias_filler {
      type: "constant"
    }
    axis: 1
  }
}
layer {
  name: "relu4_1"
  type: "ReLU"
  bottom: "conv4_1"
  top: "conv4_1"
}
layer {
  name: "conv4_2"
  type: "Convolution"
  bottom: "conv4_1"
  top: "conv4_2"
  convolution_param {
    num_output: 128
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu4_2"
  type: "ReLU"
  bottom: "conv4_2"
  top: "conv4_2"
}
layer {
  name: "fc12"
  type: "Convolution"
  bottom: "conv4_2"
  top: "fc12"
  convolution_param {
    num_output: 12
    kernel_size: 1
    kernel_size: 1
    kernel_size: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "prob"
  type: "Softmax"
  bottom: "fc12"
  top: "prob"
}
I1031 20:10:32.832505 22163 layer_factory.hpp:77] Creating layer data
I1031 20:10:32.832531 22163 net.cpp:100] Creating Layer data
I1031 20:10:32.832540 22163 net.cpp:408] data -> data
I1031 20:10:32.832554 22163 net.cpp:408] data -> seg_label
I1031 20:10:32.832564 22163 net.cpp:408] data -> seg_weight
I1031 20:10:32.832585 22163 suncg_data_layer.cu:28] Read SUNCG parameters
I1031 20:10:32.832594 22163 suncg_data_layer.cu:45] num_segmentation_class12
List file not exist : ./camera_list_train.list
I1031 20:10:32.832634 22163 suncg_util.hpp:200] total number of files: 1
I1031 20:10:32.832641 22163 suncg_data_layer.cu:90] Read camera information
I1031 20:10:33.879174 22163 suncg_data_layer.cu:100] Set voxel volume parameters and copy them to GPU
I1031 20:10:33.879215 22163 suncg_data_layer.cu:107] Allocating data
I1031 20:10:33.879221 22163 suncg_data_layer.cu:108] data_num_channel: 1
I1031 20:10:33.881167 22163 suncg_data_layer.cu:165] Resize tops
I1031 20:10:33.955788 22163 net.cpp:150] Setting up data
I1031 20:10:33.955819 22163 net.cpp:157] Top shape: 1 1 240 144 240 (8294400)
I1031 20:10:33.955828 22163 net.cpp:157] Top shape: 1 1 60 36 60 (129600)
I1031 20:10:33.955834 22163 net.cpp:157] Top shape: 1 1 60 36 60 (129600)
I1031 20:10:33.955839 22163 net.cpp:165] Memory required for data: 34214400
I1031 20:10:33.955848 22163 layer_factory.hpp:77] Creating layer conv1_1
I1031 20:10:33.955869 22163 net.cpp:100] Creating Layer conv1_1
I1031 20:10:33.955876 22163 net.cpp:434] conv1_1 <- data
I1031 20:10:33.955889 22163 net.cpp:408] conv1_1 -> conv1_1
**E1031 20:10:34.877794 22173 common.cpp:113] Cannot create Cublas handle. Cublas won't be available.
F1031 20:10:35.117483 22163 cudnn_conv_layer.cpp:53] Check failed: status == CUDNN_STATUS_SUCCESS (4 vs. 0)  CUDNN_STATUS_INTERNAL_ERROR
*** Check failure stack trace: ***
Aborted (core dumped)**

Cause i am a green-hand about Caffe.... Have any one know how to fix this issue? Does demotest_model.py also requires GPU with memory >= 12GB? Sincerely hope to get everyone's help :D 555~

LongruiDong commented 6 years ago

Um.... Later, I just Comment USE_CUDNN := 1,the above issue disappers. However, a new error occured in demo:

Check failed: num_axes()<=4(5 vs. 4) Cannot use legacy accessors on Blobs with >4 axes

I don not know why...