PaddlePaddle / models

Officially maintained, supported by PaddlePaddle, including CV, NLP, Speech, Rec, TS, big models and so on.
Apache License 2.0
6.9k stars 2.91k forks source link

将两个用同一预训练网络的模型放在不同线程预测报错 #4906

Open Derek-Kun opened 3 years ago

Derek-Kun commented 3 years ago

我训了两个图像分类网络(6分类网络和1000分类网络),骨架网络都是用的ResNet200_vd,我将这两个网络分别放在不同线程运行, 单独运行的时候都没问题,两个线程同时运行就会报错: 图片 图片 两个模型用的infer.py代码都一样,只是class_dim和pretraind_model参数不一样,报错的位置是这行代码: 图片

应该是两个线程之间的指令相互干扰造成的,请问如何处理?

chenwhql commented 3 years ago

infer源码方便看下吗

Derek-Kun commented 3 years ago

infer的源码基本就是改了一下Paddle下image_classification里的infer.py,

from future import absolute_import from future import division from future import print_function

import os import time import sys

parent_path = os.path.abspath(os.path.join(file, (['..'] 2))) if parent_path not in sys.path: sys.path.append(parent_path) import math import numpy as np import functools import re import logging import time import paddle import paddle.fluid as fluid import reader import models import json from utils import * import shutil from xlrd import open_workbook import argparse import re import json

parser = argparse.ArgumentParser(description=doc)

yapf: disable

add_arg = functools.partial(add_arguments, argparser=parser) add_arg('data_dir', str, "./data/ILSVRC2012/val/", "The ImageNet data") add_arg('use_gpu', bool, False, "Whether to use GPU or not.") add_arg('class_dim', int, 1000, "Class number.") parser.add_argument("--pretrained_model", default="/home/ResNet200_vd_pretrained", required=False, type=str, help="The path to load pretrained model") add_arg('model', str, "ResNet200_vd", "Set the network to use.") add_arg('save_inference', bool, False, "Whether to save inference model or not") add_arg('resize_short_size', int, 256, "Set resize short size") add_arg('reader_thread', int, 1, "The number of multi thread reader") add_arg('reader_buf_size', int, 2048, "The buf size of multi thread reader") parser.add_argument('--image_mean', nargs='+', type=float, default=[0.485, 0.456, 0.406], help="The mean of input image data") parser.add_argument('--image_std', nargs='+', type=float, default=[0.229, 0.224, 0.225], help="The std of input image data") parser.add_argument('--image_shape', nargs='+', type=int, default=[3, 224, 224], help="the shape of image") add_arg('topk', int, 1, "topk") add_arg('class_map_path', str, "/Net.xls", "readable label filepath") add_arg('interpolation', int, None, "The interpolation mode") add_arg('padding_type', str, "SAME", "Padding type of convolution") add_arg('use_se', bool, True, "Whether to use Squeeze-and-Excitation module for EfficientNet.") add_arg('image_path', str, None, "single image path") add_arg('batch_size', int, 1, "batch_size on all the devices") add_arg('save_json_path', str, "/home/result.json", "save output to a json file")

yapf: enable

logging.basicConfig(level=logging.INFO) logger = logging.getLogger(name)

infer_gate = True

def set_gate(value): global infer_gate infer_gate = value

def get_gate_value(): return infer_gate

def infer(infer_args): global infer_gate infer_counter = 0 print(infer_gate) while infer_gate: infer_files = os.listdir(r"/home/seed") infer_args.image_path = r"/home/seed/" + str(infer_files[0]) if infer_counter == 0: infer_model_list = [m for m in dir(models) if "__" not in m]

        assert infer_args.model in infer_model_list, "{} is not in lists: {}".format(infer_args.model,
                                                                         infer_model_list)
        assert os.path.isdir(infer_args.pretrained_model
                             ), "please load right pretrained model path for infer"

        assert infer_args.image_shape[
                   1] <= infer_args.resize_short_size, "Please check the args:image_shape and args:resize_short_size, The croped size(image_shape[1]) must smaller than or equal to the resized length(resize_short_size) "

        if infer_args.image_path:
            assert os.path.isfile(
                infer_args.image_path
            ), "Please check the args:image_path, it should be a path to single image."
            if infer_args.use_gpu:
                assert fluid.core.get_cuda_device_count(
                ) == 1, "please set \"export CUDA_VISIBLE_DEVICES=\" available single card"
            else:
                assert int(os.environ.get('CPU_NUM',
                                          1)) == 1, "please set CPU_NUM as 1"

    infer_image = fluid.data(
        name='image', shape=[None] + infer_args.image_shape, dtype='float32')
    if infer_counter == 0:
        if infer_args.model.startswith('EfficientNet'):
            infer_model = models.__dict__[infer_args.model](is_test=True,
                                                padding_type=infer_args.padding_type, use_se=infer_args.use_se)
        else:
            infer_model = models.__dict__[infer_args.model]()
        if infer_args.model == "GoogLeNet":

            infer_out, infer__, infer__ = infer_model.net(input=infer_image, class_dim=infer_args.class_dim)
        else:
            infer_out = infer_model.net(input=infer_image, class_dim=infer_args.class_dim)
            infer_out = fluid.layers.softmax(infer_out)

        infer_test_program = fluid.default_main_program().clone(for_test=True)

        infer_gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))

        infer_place = fluid.CUDAPlace(infer_gpu_id) if infer_args.use_gpu else fluid.CPUPlace()

        infer_exe = fluid.Executor(infer_place)
        infer_exe.run(fluid.default_startup_program())
        if infer_args.use_gpu:
            infer_places = fluid.framework.cuda_places()
        else:
            infer_places = fluid.framework.cpu_places()

        if infer_args.class_dim == 1000:
            [infer_compiled_program, infer_feeder, infer_fetch_list] = \
                fluid.io.load_inference_model(r"/home/ResNet200_vd_1000", infer_exe,
                                              model_filename="model", params_filename="params")
        if infer_args.class_dim == 6:
            [infer_compiled_program, infer_feeder, infer_fetch_list] = \
                fluid.io.load_inference_model(r"/home/ResNet200_vd_6", infer_exe,
                                              model_filename="model", params_filename="params")

        if infer_args.save_inference:
            fluid.io.save_inference_model(
                dirname=infer_args.model,
                feeded_var_names=['image'],
                main_program=infer_test_program,
                target_vars=infer_out,
                executor=infer_exe,
                model_filename='model',
                params_filename='params')
            logger.info("model: {0} is already saved".format(infer_args.model))
            exit(0)

        infer_imagenet_reader = reader.ImageNetReader()

    infer_test_reader = infer_imagenet_reader.test(settings=infer_args)

    infer_feeder = fluid.DataFeeder(place=infer_places, feed_list=[infer_image])
    if infer_counter == 0:
        TOPK = infer_args.topk
        if os.path.exists(infer_args.class_map_path):
            logger.info(
                "The map of readable label and numerical label has been found!")

            infer_workbook = open_workbook(infer_args.class_map_path)  
            infer_sheet = infer_workbook.sheet_by_index(0)  
            infer_image_id = infer_sheet.col_values(3)
            infer_first_tag = infer_sheet.col_values(4)  
            infer_second_tag = infer_sheet.col_values(5)  
            infer_third_tag = infer_sheet.col_values(6)  
            infer_label_dict = {}
            for i in range(1, len(infer_image_id)):
                infer_label_dict[infer_image_id[i]] = [infer_first_tag[i], infer_second_tag[i], infer_third_tag[i]]

    infer_info = {}
    infer_parallel_data = []
    infer_parallel_id = []
    infer_place_num = paddle.fluid.core.get_cuda_device_count(
    ) if infer_args.use_gpu else int(os.environ.get('CPU_NUM', 1))
    if os.path.exists(infer_args.save_json_path):
        logger.warning("path: {} Already exists! will recover it\n".format(
            infer_args.save_json_path))
    with open(infer_args.save_json_path, "w") as infer_fout:
        for batch_id, infer_data in enumerate(infer_test_reader()):
            infer_image_data = [[items[0]] for items in infer_data]
            infer_image_id = [items[1] for items in infer_data]

            infer_parallel_id.append(infer_image_id)
            infer_parallel_data.append(infer_image_data)

            if infer_place_num == len(infer_parallel_data):

                infer_result = infer_exe.run(
                    infer_compiled_program,
                    fetch_list=infer_fetch_list,
                    feed=list(infer_feeder.feed_parallel(infer_parallel_data, infer_place_num)))
                for infer_i, infer_res in enumerate(infer_result[0]):
                    infer_pred_label = np.argsort(infer_res)[::-1][:TOPK]
                    infer_real_id = str(np.array(infer_parallel_id).flatten()[infer_i])
                    _, infer_real_id = os.path.split(infer_real_id)

                    if os.path.exists(infer_args.class_map_path):
                        infer_readable_pred_label = []
                        for infer_label in infer_pred_label:
                            if infer_res[infer_pred_label][0] > 0.9:
                                if infer_label_dict[infer_label][2]:
                                    infer_readable_pred_label.append(infer_label_dict[infer_label][2])
                                else:
                                    infer_readable_pred_label.append(infer_label_dict[infer_label][1])
                            elif infer_res[infer_pred_label][0] > 0.8:
                                infer_readable_pred_label.append(infer_label_dict[infer_label][1])
                            elif infer_res[infer_pred_label][0] > 0.2:
                                if infer_label_dict[infer_label][0]:
                                    infer_readable_pred_label.append(infer_label_dict[infer_label][0])
                                else:
                                    infer_readable_pred_label.append(infer_label_dict[infer_label][1])
                            else:
                                infer_readable_pred_label.append("记录")

                        infer_info[infer_real_id] = {}
                        infer_info[infer_real_id]['id'], infer_info[infer_real_id]['score'], infer_info[infer_real_id]['class'], infer_info[
                            infer_real_id]['class_name'] = str(infer_real_id), str(infer_res[infer_pred_label]), str(
                            infer_pred_label), infer_readable_pred_label
                    else:
                        infer_info[infer_real_id] = {}
                        infer_info[infer_real_id]['id'], infer_info[infer_real_id]['score'], infer_info[infer_real_id]['class'] = \
                            str(infer_real_id), str(infer_res[infer_pred_label]), str(infer_pred_label)

                    logger.info("{}, {}".format(infer_real_id, infer_info[infer_real_id]))

                    # fout.write(real_id + "\t" + json.dumps(info[real_id]) +
                    #            "\n")
                    infer_fout.write(json.dumps(infer_info[infer_real_id]) + "\n")

                infer_parallel_data = []
                infer_parallel_id = []

    infer_counter = infer_counter + 1
    sys.stdout.flush()
    infer_gate = False

def main():

infer_parser = argparse.ArgumentParser(description=__doc__)
# yapf: disable
infer_add_arg = functools.partial(add_arguments, argparser=infer_parser)
infer_add_arg('data_dir', str, "./data/ILSVRC2012/val/", "The ImageNet data")
infer_add_arg('use_gpu', bool, False, "Whether to use GPU or not.")
infer_add_arg('class_dim', int, 1000, "Class number.")
infer_parser.add_argument("--pretrained_model",
                    default="/home/ResNet200_vd_pretrained",
                    required=False, type=str,
                    help="The path to load pretrained model")
infer_add_arg('model', str, "ResNet200_vd", "Set the network to use.")
infer_add_arg('save_inference', bool, False, "Whether to save inference model or not")
infer_add_arg('resize_short_size', int, 256, "Set resize short size")
infer_add_arg('reader_thread', int, 1, "The number of multi thread reader")
infer_add_arg('reader_buf_size', int, 2048, "The buf size of multi thread reader")
infer_parser.add_argument('--image_mean', nargs='+', type=float, default=[0.485, 0.456, 0.406],
                    help="The mean of input image data")
infer_parser.add_argument('--image_std', nargs='+', type=float, default=[0.229, 0.224, 0.225],
                    help="The std of input image data")
infer_parser.add_argument('--image_shape', nargs='+', type=int, default=[3, 224, 224], help="the shape of image")
infer_add_arg('topk', int, 1, "topk")
infer_add_arg('class_map_path', str, "/home/Net.xls",
        "readable label filepath")
infer_add_arg('interpolation', int, None, "The interpolation mode")
infer_add_arg('padding_type', str, "SAME", "Padding type of convolution")
infer_add_arg('use_se', bool, True, "Whether to use Squeeze-and-Excitation module for EfficientNet.")
infer_add_arg('image_path', str, None, "single image path")
infer_add_arg('batch_size', int, 1, "batch_size on all the devices")
infer_add_arg('save_json_path', str, "/home/result.json", "save output to a json file")

logging.basicConfig(level=logging.INFO)

infer_args = infer_parser.parse_args()

check_gpu()
check_version()
infer(infer_args)

if name == 'main': a = time.time()

main()
b = time.time()

print(str(b - a))
Derek-Kun commented 3 years ago

我尝试过把所有变量名都替换然后做成两个infer1.py和infer2.py,因为多线程会共享命名空间我怕会相互干扰,但是还是会报错,我猜应该是我调用的方法里面有些变量命名冲突。我现在的解决办法是把多线程改成多进程就不冲突了,但是这样的话每个进程没法共享全局变量了,只能通过别的方法来控制每个进程

chenwhql commented 3 years ago

好像没看到多线程控制的代码?要不您把您管理线程的关键代码截一下?

Derek-Kun commented 3 years ago

就是定义了两个继承了Threading的类,分别把两个infer.py作为Module import进来,满足一定条件就同时开启两个Threading,我测试的时候把两个Infer的预测时间间隔了3秒(time.sleep(3)就能一切正常运行,同时开启就会报错,所以我猜测可能是因为同时开了两个线程,Paddle自带的一些方法在调用变量的时候,因为变量名在同一片命名空间,会相互错误引用。比方说infer 1 里面的有一个Paddle的方法,a= 30, b = a +10, infer 2 里是a = 20, b = a+10, 如果infer 2 里的a = 20命令插入了infer 1 的a = 30 , b = a +10 之间, 那么infer **1里的b就变成了30,本来b应该等于40被下面的方法调用,现在变成了30,后面的维度匹配不上就报错了。 当然这个也只是我个人的猜测,我现在改成多进程然后用共享硬盘文件的方法模拟“全局变量”了

图片 图片