PaddlePaddle / models

Officially maintained, supported by PaddlePaddle, including CV, NLP, Speech, Rec, TS, big models and so on.
Apache License 2.0
6.9k stars 2.91k forks source link

ocr attention模型用fluid.io.save_inference_model保存模型后,load运行报错 #3103

Closed banbishan closed 5 years ago

banbishan commented 5 years ago

我在infer.py里加了 path = "./infer_model" fluid.io.save_inference_model(dirname=path, feeded_var_names=['pixel'],target_vars=[ids])

load运行后报错如下 image load代码:

import paddle.fluid as fluid
import numpy as np

file_name = 'demo_1_3_48_384_nchw_float'
file = np.fromfile(file_name,'f')

exe = fluid.Executor(fluid.CPUPlace())
path = "/mnt/e/docker/infer_model"
[inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=path, executor=exe)
results = exe.run(inference_program,
              feed={feed_target_names[0]: file},
              fetch_list=fetch_targets)
wanghaoshuang commented 5 years ago

应该是你在exe.run的时候给的feed不对,你可以print下file的shape么?

OliverLPH commented 5 years ago

应该是你在exe.run的时候给的feed不对,你可以print下file的shape么?

我试了一下确实确实 attention这个模型跑不起来,ctc可以。

Traceback (most recent call last):
  File "NEW_TEST_OCR.py", line 253, in <module>
    result = get_inference_result(args)
  File "NEW_TEST_OCR.py", line 177, in get_inference_result
    feed=feed_dict)#,
  File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 651, in run
    use_program_cache=use_program_cache)
  File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 745, in _run
    fetch_var_name=fetch_var_name)
  File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 419, in _add_feed_fetch_ops
    if not has_feed_operators(global_block, feed, feed_var_name):
  File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 163, in has_feed_operators
    "Feed operators in program desc do not match 'feed_targets'")
Exception: Feed operators in program desc do not match 'feed_targets'

报错提示的是 feed op和 feed_targets不匹配。

代码如下 save_inference_model

def get_norm_result(args):
    """OCR inference"""
    if args.model == "crnn_ctc":
        infer = ctc_infer
        get_feeder_data = get_ctc_feeder_for_infer
    else:
        infer = attention_infer
        get_feeder_data = get_attention_feeder_for_infer

    num_classes = data_reader.num_classes()
    data_shape = data_reader.data_shape()
    # define network
    images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
    ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)
    # data reader
    infer_reader = data_reader.inference(
        batch_size=args.batch_size,
        infer_images_dir=args.input_images_dir,
        infer_list_file=args.input_images_list,
        cycle=True if args.iterations > 0 else False,
        model=args.model)
    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)

    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    test_program = fluid.default_main_program().clone(for_test=True)
    # load dictionary
    dict_map = None
    if args.dict is not None and os.path.isfile(args.dict):
        dict_map = {}
        with open(args.dict) as dict_file:
            for i, word in enumerate(dict_file):
                dict_map[i] = word.strip()
        print("Loaded dict from %s" % args.dict)

    # load init model
    model_dir = args.model_path
    model_file_name = None
    if not os.path.isdir(args.model_path):
        model_dir = os.path.dirname(args.model_path)
        model_file_name = os.path.basename(args.model_path)
    fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name)
    print("Init model from: %s." % args.model_path)

    # save inference model
    if args.save_inference:
        feed_var_names = ['pixel']
        target_vars = ids
        fluid.io.save_inference_model(os.path.join(args.inference_model_root, args.model),
                                      feeded_var_names=feed_var_names,
                                      target_vars=target_vars,
                                      main_program=test_program,
                                      executor=exe,
                                      model_filename='model',
                                      params_filename='params')
    batch_times = []
    iters = 0
    res = []
    for data in infer_reader():
        feed_dict = get_feeder_data(data, place)
        if args.iterations > 0 and iters == args.iterations + args.skip_batch_num:
            break
        if iters < args.skip_batch_num:
            print("Warm-up itaration")
        if iters == args.skip_batch_num:
            profiler.reset_profiler()

        start = time.time()
        #print(fluid.default_main_program())
        result = exe.run(test_program,
                         feed=feed_dict,
                         fetch_list=[ids],
                         return_numpy=False)
        indexes = prune(np.array(result[0]).flatten(), 0, 1)
        batch_time = time.time() - start
        fps = args.batch_size / batch_time
        batch_times.append(batch_time)
        if dict_map is not None:
            print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
                iters,
                batch_time,
                fps,
                [dict_map[index] for index in indexes], ))
        else:
            print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
                iters,
                batch_time,
                fps,
                indexes, ))

        iters += 1
        res += indexes.tolist()
    return res

load_inference_model

def get_inference_result(args):
    """OCR get inference result"""
    if args.model == "crnn_ctc":
        get_feeder_data = get_ctc_feeder_for_infer
    else:
        get_feeder_data = get_attention_feeder_for_infer

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()

    exe = fluid.Executor(place)
    [inference_program, feed_var_names, target_vars] = \
        fluid.io.load_inference_model(dirname=os.path.join(args.inference_model_root, args.model),
                                      executor=exe,
                                      model_filename='model',
                                      params_filename='params')

    # data reader
    infer_reader = data_reader.inference(
        batch_size=args.batch_size,
        infer_images_dir=args.input_images_dir,
        infer_list_file=args.input_images_list,
        cycle=True if args.iterations > 0 else False,
        model=args.model)

    # load dictionary
    dict_map = None
    if args.dict is not None and os.path.isfile(args.dict):
        dict_map = {}
        with open(args.dict) as dict_file:
            for i, word in enumerate(dict_file):
                dict_map[i] = word.strip()
        print("Loaded dict from %s" % args.dict)

    batch_times = []
    iters = 0
    res = []
    for data in infer_reader():
        feed_dict = get_feeder_data(data, place)
        #feed_dict.pop('init_scores')
        print(feed_dict)
        if args.iterations > 0 and iters == args.iterations + args.skip_batch_num:
            break
        if iters < args.skip_batch_num:
            print("Warm-up itaration")
        if iters == args.skip_batch_num:
            profiler.reset_profiler()

        start = time.time()
        #print(inference_program)
        exe.run(inference_program,
                         feed=feed_dict)#,
                         #fetch_list=target_vars,
                         #return_numpy=False)
        indexes = prune(np.array(result[0]).flatten(), 0, 1)
        batch_time = time.time() - start
        fps = args.batch_size / batch_time
        batch_times.append(batch_time)
        if dict_map is not None:
            print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
                iters,
                batch_time,
                fps,
                [dict_map[index] for index in indexes], ))
        else:
            print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % (
                iters,
                batch_time,
                fps,
                indexes, ))

        iters += 1
        res += indexes.tolist()
    return res
OliverLPH commented 5 years ago

发现问题了,ctc模型的 feed_var_names = ['pixel'] 而attention模型的 feed_var_names = ['init_ids', 'pixel', 'init_scores']

    # save inference model
    if args.save_inference:
        feed_var_names = ['init_ids', 'pixel', 'init_scores']
        #feed_var_names = ['pixel']
        target_vars = ids
        fluid.io.save_inference_model(os.path.join(args.inference_model_root, args.model),
                                      feeded_var_names=feed_var_names,
                                      target_vars=target_vars,
                                      main_program=test_program,
                                      executor=exe,
                                      model_filename='model',
                                      params_filename='params')

这样save的模型就可以load进去了

banbishan commented 5 years ago

发现问题了,ctc模型的 feed_var_names = ['pixel'] 而attention模型的 feed_var_names = ['init_ids', 'pixel', 'init_scores']

    # save inference model
    if args.save_inference:
        feed_var_names = ['init_ids', 'pixel', 'init_scores']
        #feed_var_names = ['pixel']
        target_vars = ids
        fluid.io.save_inference_model(os.path.join(args.inference_model_root, args.model),
                                      feeded_var_names=feed_var_names,
                                      target_vars=target_vars,
                                      main_program=test_program,
                                      executor=exe,
                                      model_filename='model',
                                      params_filename='params')

这样save的模型就可以load进去了

hello, @OliverLPH,非常感谢回复。