PaddlePaddle / Paddle

PArallel Distributed Deep LEarning: Machine Learning Framework from Industrial Practice (『飞桨』核心框架,深度学习&机器学习高性能单机、分布式训练和跨平台部署)
http://www.paddlepaddle.org/
Apache License 2.0
22.13k stars 5.56k forks source link

服务器端部署时Python预测api create_paddle_predictor报错 #19810

Closed xiaolvtaomi closed 4 years ago

xiaolvtaomi commented 5 years ago

目的是在服务器部署一套能够预测ocr的接口,现在本地的mac设备上跑下试试

import argparse
import numpy as np

from paddle.fluid.core import PaddleBuf
from paddle.fluid.core import PaddleDType
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor

from PIL import Image

def main():
    args = parse_args()

    # Set config
    config = AnalysisConfig(args.model_dir)
    config.disable_gpu()

    # Create PaddlePredictor
    predictor = create_paddle_predictor(config)

    # Set inputs
    inputs = fake_input(args.batch_size, args.img_file)

    # Infer
    outputs = predictor.run(inputs)

    # parse outputs
    output = outputs[0]
    print(output.name)
    output_data = output.data.float_data()
    assert len(output_data) == 512 * args.batch_size
    for i in range(args.batch_size):
        print(np.argmax(output_data[i * 512:(i + 1) * 512]))

def fake_input(batch_size, img_path):
    image = PaddleTensor()
    image.name = "pixel"
    image.shape = [batch_size, 1, 100, 380]
    image.dtype = PaddleDType.FLOAT32
    # image.data = PaddleBuf(
    #     np.random.randn(*image.shape).flatten().astype("float32").tolist())
    img = Image.open(img_path).convert('L')
    img = np.array(img) - 127.5
    img = img[np.newaxis, ...]
    image.data = PaddleBuf(img)

    ids = PaddleTensor()
    ids.name="init_ids"
    ids.shape = [1, 1]
    ids.dtype = PaddleDType.INT64

    scores = PaddleTensor()
    scores.name = "init_scores"
    scores.shape = [1, 1]
    scores.dtype = PaddleDType.FLOAT32

    return [ids, image, scores]

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_dir", type=str, help="model dir")
    parser.add_argument("--prog_file", type=str, help="program filename")
    parser.add_argument("--params_file", type=str, help="parameter filename")
    parser.add_argument("--batch_size", type=int, default=1, help="batch size")
    parser.add_argument("--img_file", type=str)

    return parser.parse_args()

if __name__ == "__main__":

    # python3 serverapi.py --model_dir=/Users/lml/Documents/ocr/ocr  --img_file=/Users/lml/Downloads/009.jpg --batch_size=1
    main()

报错内容如下:

➜  ocr_recognition git:(master) ✗ python3 serverapi.py --model_dir=/Users/lml/Documents/ocr/ocr  --img_file=/Users/lml/Downloads/009.jpg
--- Running analysis [ir_graph_build_pass]
--- Running analysis [ir_analysis_pass]
--- Running IR pass [infer_clean_graph_pass]
--- Running IR pass [attention_lstm_fuse_pass]
--- Running IR pass [seqconv_eltadd_relu_fuse_pass]
--- Running IR pass [fc_lstm_fuse_pass]
--- Running IR pass [mul_lstm_fuse_pass]
--- Running IR pass [fc_gru_fuse_pass]
--- Running IR pass [mul_gru_fuse_pass]
---  detected 2 subgraphs
Traceback (most recent call last):
  File "serverapi.py", line 79, in <module>
    main()
  File "serverapi.py", line 20, in main
    predictor = create_paddle_predictor(config)
paddle.fluid.core_avx.EnforceNotMet: Node not found for PDNode fc_nobias_gru_fuse/fc/3/Out at [/home/teamcity/work/ef54dc8a5b211854/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc:119]
PaddlePaddle Call Stacks: 
0          0x12dfcca7cp void paddle::platform::EnforceNotMet::Init<char const*>(char const*, char const*, int) + 636
1          0x12dfcc7abp paddle::platform::EnforceNotMet::EnforceNotMet(std::exception_ptr, char const*, int) + 139
2          0x12f274439p paddle::framework::ir::BuildFusion(paddle::framework::ir::Graph*, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, paddle::framework::Scope*, bool)::$_0::operator()(std::__1::unordered_map<paddle::framework::ir::PDNode*, paddle::framework::ir::Node*, std::__1::hash<paddle::framework::ir::PDNode*>, std::__1::equal_to<paddle::framework::ir::PDNode*>, std::__1::allocator<std::__1::pair<paddle::framework::ir::PDNode* const, paddle::framework::ir::Node*> > > const&, paddle::framework::ir::Graph*) const + 10329
3          0x12f33a78ap paddle::framework::ir::GraphPatternDetector::operator()(paddle::framework::ir::Graph*, std::__1::function<void (std::__1::unordered_map<paddle::framework::ir::PDNode*, paddle::framework::ir::Node*, std::__1::hash<paddle::framework::ir::PDNode*>, std::__1::equal_to<paddle::framework::ir::PDNode*>, std::__1::allocator<std::__1::pair<paddle::framework::ir::PDNode* const, paddle::framework::ir::Node*> > > const&, paddle::framework::ir::Graph*)>) + 746
4          0x12f271445p paddle::framework::ir::BuildFusion(paddle::framework::ir::Graph*, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, paddle::framework::Scope*, bool) + 725
5          0x12f271157p paddle::framework::ir::MulGRUFusePass::ApplyImpl(paddle::framework::ir::Graph*) const + 55
6          0x12f36b85cp paddle::framework::ir::Pass::Apply(paddle::framework::ir::Graph*) const + 156
7          0x12f225c76p paddle::inference::analysis::IRPassManager::Apply(std::__1::unique_ptr<paddle::framework::ir::Graph, std::__1::default_delete<paddle::framework::ir::Graph> >) + 726
8          0x12f21e8c9p paddle::inference::analysis::IrAnalysisPass::RunImpl(paddle::inference::analysis::Argument*) + 313
9          0x12f21a788p paddle::inference::analysis::Analyzer::RunAnalysis(paddle::inference::analysis::Argument*) + 568
10         0x12e195e3fp paddle::AnalysisPredictor::OptimizeInferenceProgram() + 63
11         0x12e194b93p paddle::AnalysisPredictor::PrepareProgram(std::__1::shared_ptr<paddle::framework::ProgramDesc> const&) + 179
12         0x12e194762p paddle::AnalysisPredictor::Init(std::__1::shared_ptr<paddle::framework::Scope> const&, std::__1::shared_ptr<paddle::framework::ProgramDesc> const&) + 338
13         0x12e1a46f3p std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> > paddle::CreatePaddlePredictor<paddle::AnalysisConfig, (paddle::PaddleEngineKind)2>(paddle::AnalysisConfig const&) + 1315
14         0x12e1a75eep std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> > paddle::CreatePaddlePredictor<paddle::AnalysisConfig>(paddle::AnalysisConfig const&) + 14
15         0x12e103cfap void pybind11::cpp_function::initialize<std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> > (*&)(paddle::AnalysisConfig const&), std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> >, paddle::AnalysisConfig const&, pybind11::name, pybind11::scope, pybind11::sibling>(std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> > (*&&&)(paddle::AnalysisConfig const&), std::__1::unique_ptr<paddle::PaddlePredictor, std::__1::default_delete<paddle::PaddlePredictor> > (*)(paddle::AnalysisConfig const&), pybind11::name const&, pybind11::scope const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::operator()(pybind11::detail::function_call&) const + 106
16         0x12dfb1a08p pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 3400
17         0x100749385p _PyCFunction_FastCallDict + 229
18         0x1007d2e94p call_function + 612
19         0x1007d48a4p _PyEval_EvalFrameDefault + 5604
20         0x1007d2bdep fast_function + 606
21         0x1007d2e7bp call_function + 587
22         0x1007d48a4p _PyEval_EvalFrameDefault + 5604
23         0x1007d2230p _PyEval_EvalCodeWithName + 2720
24         0x1007d23d4p PyEval_EvalCode + 100
25         0x10080ef7ep PyRun_FileExFlags + 206
26         0x10080f21fp PyRun_SimpleFileExFlags + 447
27         0x10082886ap Py_Main + 3914
28         0x100000dfep
29         0x100000c34p
fc500110 commented 5 years ago

出错的模型有么

paddle-bot-old[bot] commented 4 years ago

Since you haven\'t replied for more than a year, we have closed this issue/pr. If the problem is not solved or there is a follow-up one, please reopen it at any time and we will continue to follow up. 由于您超过一年未回复,我们将关闭这个issue/pr。 若问题未解决或有后续问题,请随时重新打开,我们会继续跟进。