PaddlePaddle / models

Officially maintained, supported by PaddlePaddle, including CV, NLP, Speech, Rec, TS, big models and so on.
Apache License 2.0
6.91k stars 2.91k forks source link

rcnn 通过load_inference_model 加载模型 报错 Tensor holds no memory #3523

Open linghaolu opened 5 years ago

linghaolu commented 5 years ago

rcnn 模型,通过 save_inference_model 保存后,通过fluid.io.load_inference_model 加载模型 预测发生错误。 Traceback (most recent call last): File "infer.py", line 185, in infer2() File "infer.py", line 71, in infer2 return_numpy=False) File "/home/work/anaconda3/lib/python3.6/site-packages/paddle/fluid/executor.py", line 650, in run use_program_cache=use_program_cache) File "/home/work/anaconda3/lib/python3.6/site-packages/paddle/fluid/executor.py", line 748, in _run exe.run(program.desc, scope, 0, True, True, fetch_var_name) paddle.fluid.core_avx.EnforceNotMet: Invoke operator generate_proposals error. Python Callstacks: File "/home/work/anaconda3/lib/python3.6/site-packages/paddle/fluid/framework.py", line 1748, in append_op attrs=kwargs.get("attrs", None)) File "/home/work/anaconda3/lib/python3.6/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op return self.main_program.current_block().append_op(*args, *kwargs) File "/home/work/anaconda3/lib/python3.6/site-packages/paddle/fluid/layers/detection.py", line 2497, in generate_proposals 'RpnRoiProbs': rpn_roi_probs}) File "/home/ssd1/caohaitao/rcnn/models/model_builder.py", line 234, in rpn_heads eta=eta) File "/home/ssd1/caohaitao/rcnn/models/model_builder.py", line 41, in build_model self.rpn_heads(body_conv) File "export.py", line 24, in export model.build_model(imageshape) File "export.py", line 46, in export() C++ Callstacks: holder should not be null Tensor holds no memory. Call Tensor::mutable_data first. at [/paddle/paddle/fluid/framework/tensor.cc:23] PaddlePaddle Call Stacks: 0 0x7fc3bac1ff88p void paddle::platform::EnforceNotMet::Init(std::string, char const, int) + 360 1 0x7fc3bac202d7p paddle::platform::EnforceNotMet::EnforceNotMet(std::string const&, char const, int) + 87 2 0x7fc3bcd23c29p paddle::framework::Tensor::check_memory_size() const + 185 3 0x7fc3bcd24e2cp paddle::framework::Tensor::Slice(long, long) const + 44 4 0x7fc3bc64cd7ap paddle::operators::GenerateProposalsKernel::Compute(paddle::framework::ExecutionContext const&) const + 2682 5 0x7fc3bc64d423p std::_Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::GenerateProposalsKernel, pa$dle::operators::GenerateProposalsKernel >::operator()(char const, char const, int) const::{lambda(paddle::framework::ExecutionContext const&)#1}>::_M_invoke(std::_Anydata const&, paddle::framework::ExecutionContext const&) + 35 6 0x7fc3bcc93f87p paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::$oid, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detai$::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, $oost::detail::variant::void> const&, paddle::framework::RuntimeContext) const + 375 7 0x7fc3bcc94361p paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::$oid, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detai$::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, $oost::detail::variant::void> const&) const + 529 8 0x7fc3bcc9195cp paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, boost::variant<paddle::platform::CUDAPlace, paddle::platform::CPUPlace, paddle::platform::CUDAPinnedPlace, boost::detail::variant::void, boo$t::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant$:void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::detail::variant::void, boost::det$il::variant::void> const&) + 332 9 0x7fc3bada9bfep paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext, paddle::framework::Scope, bool, bool, bool) + 382 10 0x7fc3badacc9fp paddle::framework::Executor::Run(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::vector<std::string, std::allocator > const&, bool) + 143 11 0x7fc3bac110edp 12 0x7fc3bac52426p 13 0x7fc3f279a364p _PyCFunction_FastCallDict + 340 14 0x7fc3f282d00ep 15 0x7fc3f284e62ap _PyEval_EvalFrameDefault + 778 16 0x7fc3f2825f24p 17 0x7fc3f2826dc1p 18 0x7fc3f282cf95p 19 0x7fc3f284f3e7p _PyEval_EvalFrameDefault + 4295 20 0x7fc3f2825f24p 21 0x7fc3f2826dc1p 22 0x7fc3f282cf95p 23 0x7fc3f284f3e7p _PyEval_EvalFrameDefault + 4295 24 0x7fc3f282628ep 25 0x7fc3f2826dc1p 26 0x7fc3f282cf95p 27 0x7fc3f284e62ap _PyEval_EvalFrameDefault + 778 28 0x7fc3f28278d9p PyEval_EvalCodeEx + 809 29 0x7fc3f282867cp PyEval_EvalCode + 28 30 0x7fc3f28a2ce4p 31 0x7fc3f28a30e1p PyRun_FileExFlags + 161 32 0x7fc3f28a32e4p PyRun_SimpleFileExFlags + 452 33 0x7fc3f28a6dafp Py_Main + 1535 34 0x7fc3f276d8bep main + 238 35 0x7fc3f1ebe7c5p __libc_start_main + 245 36 0x7fc3f28550dap

linghaolu commented 5 years ago

save 函数

` def export(): image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size] class_nums = cfg.class_num

model = model_builder.RCNN(
    add_conv_body_func=resnet.add_ResNet50_conv4_body,
    add_roi_box_head_func=resnet.add_ResNet_roi_conv5_head,
    use_pyreader=False,
    mode='infer')
model.build_model(image_shape)
pred_boxes = model.eval_bbox_out()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# yapf: disable
if not os.path.exists(cfg.pretrained_model):
    raise ValueError("Model path [%s] does not exist." % (cfg.pretrained_model))

def if_exist(var):
    return os.path.exists(os.path.join(cfg.pretrained_model, var.name))

fluid.io.load_vars(exe, cfg.pretrained_model, predicate=if_exist)

#fluid.io.save_inference_model('model_output', ['image'], target_vars=pred_boxes, executor=exe, main_program=fluid.default_main_program())
#fluid.io.save_inference_model('./RCNN', ['image'], target_vars=pred_boxes, executor=exe, main_program=test_program,  model_filename='model', params_filename='params')
fluid.io.save_inference_model('model_output', ['image'], target_vars=pred_boxes, executor=exe)

`

linghaolu commented 5 years ago

infer 报错代码

try:
    from pycocotools.coco import COCO
    from pycocotools.cocoeval import COCOeval, Params

    data_path = DatasetPath('val')
    test_list = data_path.get_file_list()
    coco_api = COCO(test_list)
    cid = coco_api.getCatIds()
    cat_id_to_num_id_map = {
        v: i + 1
        for i, v in enumerate(coco_api.getCatIds())
    }
    category_ids = coco_api.getCatIds()
    labels_map = {
        cat_id_to_num_id_map[item['id']]: item['name']
        for item in coco_api.loadCats(category_ids)
    }
    labels_map[0] = 'background'
except:
    print("The COCO dataset or COCO API is not exist, use the default "
          "mapping of class index and real category name on COCO17.")
    assert cfg.dataset == 'coco2017'
    labels_map = coco17_labels()

image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size]
class_nums = cfg.class_num

place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)

# load model
[inference_program, image, pred_boxes] = fluid.io.load_inference_model(
    dirname="model_output", executor=exe)

# yapf: disable
#fluid.io.save_inference_model('model_output', ['image'], target_vars=pred_boxes, executor=exe, main_program=fluid.default_main_program())

# yapf: enable
infer_reader = reader.infer(cfg.image_path)
feeder = fluid.DataFeeder(place=place, feed_list=image, program=inference_program)

dts_res = []
segms_res = []
if cfg.MASK_ON:
    fetch_list = [pred_boxes, masks]
else:
    fetch_list = [pred_boxes]
data = next(infer_reader())
print(data[0][0].shape)
im_info = [data[0][1]]
start = time.time()
result = exe.run(inference_program, fetch_list=pred_boxes,
                 feed=feeder.feed([[data[0][0]]]),
                 return_numpy=False)