PaddlePaddle / Paddle

PArallel Distributed Deep LEarning: Machine Learning Framework from Industrial Practice (『飞桨』核心框架,深度学习&机器学习高性能单机、分布式训练和跨平台部署)
http://www.paddlepaddle.org/
Apache License 2.0
22.24k stars 5.58k forks source link

您好,关于paddlepaddle自定义数据载入的方式,应该怎么加速呢 #18300

Closed yangninghua closed 5 years ago

yangninghua commented 5 years ago

image

# -*-coding:utf-8-*-
from __future__ import print_function

import os
import argparse
from PIL import Image
import numpy
import paddle
import paddle.fluid as fluid

from visualdl import LogWriter

from PIL import Image

# 创建一个LogWriter,第一个参数是指定存放数据的路径,
# 第二个参数是指定多少次写操作执行一次内存到磁盘的数据持久化
logw = LogWriter("./paddle_log", sync_cycle=10000)
with logw.mode('train') as logger:
    trainTag = logger.scalar("损失指标")  # 训练损失的标签 对应效果图的“train”
with logw.mode('test') as logger:
    testTag = logger.scalar("损失指标")  # 测试损失的标签 对应效果图的“test”

import sys

class Logger(object):
    def __init__(self, filename="Default.log"):
        self.terminal = sys.stdout
        self.log = open(filename, "a")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass

def parse_args():
    parser = argparse.ArgumentParser("mnist")
    parser.add_argument(
        '--enable_ce',
        action='store_true',
        help="If set, run the task with continuous evaluation logs.")
    parser.add_argument(
        '--use_gpu',
        type=bool,
        default=True,
        help="Whether to use GPU or not.")
    parser.add_argument(
        '--num_epochs', type=int, default=100, help="number of epochs.")
    args = parser.parse_args()
    return args

def loss_net(prediction, label):
    # 交叉熵损失函数
    loss = fluid.layers.cross_entropy(input=prediction, label=label)
    avg_loss = fluid.layers.mean(loss)
    acc = fluid.layers.accuracy(input=prediction, label=label)
    return prediction, avg_loss, acc

def multilayer_perceptron(img, label):
    img = fluid.layers.fc(input=img, size=200, act='tanh')
    hidden = fluid.layers.fc(input=img, size=200, act='tanh')
    prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
    return loss_net(prediction, label)

def softmax_regression(img, label):
    prediction = fluid.layers.fc(input=img, size=10, act='softmax')
    return loss_net(prediction, label)

def resnet_pre_train_10class(img, label):
    import resnet
    prediction = resnet.ResNet18().net(input=img, class_dim=10)
    # resnet50中的最后fc层没有使用激活函数softmax,应该加上然后使用交叉熵损失函数
    prediction_softmax = fluid.layers.softmax(input=prediction, use_cudnn=True)
    return loss_net(prediction_softmax, label)

# -------训练期间通过调用一个handler函数来监控训练进度---------
from paddle.utils.plot import Ploter

train_prompt = "Train cost"
test_prompt = "Test cost"
cost_ploter = Ploter(train_prompt, test_prompt)

# 将训练过程绘图表示
def event_handler_plot(ploter_title, step, cost):
    cost_ploter.append(ploter_title, step, cost)
    cost_ploter.plot("./plot/paddle_plot")

# 打印训练的中间结果,训练轮次,batch数,损失函数
def event_handler(pass_id, batch_id, cost):
    print("Pass %d, Batch %d, Cost %f" % (pass_id, batch_id, cost))

def train(nn_type,
          use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):
    # 不理解啥意思,因为函数返回为false所以一定会继续执行,可能判断是否paddle安装或者编译成功
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return

    # 定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作
    startup_program = fluid.default_startup_program()
    # 定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    main_program = fluid.default_main_program()

    def reader_createor(ynhimgs):
        def reader():
            for i in range(len(ynhimgs)):
                fn, label = ynhimgs[i]
                img = Image.open(fn).convert('RGB')
                img = img.resize((224, 224), Image.ANTIALIAS)
                img = numpy.array(img).astype(numpy.float32)
                # 转换成CHW
                img = img.transpose((2, 0, 1))
                # 转换成BGR
                data = img[(2, 1, 0), :, :] / 255.0
                yield data, label

        return reader

    fh = open('./Data/train.txt', 'r')
    ynhimgs = []
    for line in fh:
        line = line.rstrip()
        words = line.split()
        img_path_result = os.path.join("", words[0])
        ynhimgs.append((img_path_result, int(words[1])))

    # 是否固定random_seed,即是否保证模型可复现,通过打印出的acc,avg等验证
    if args.enable_ce:
        train_reader = paddle.batch(
            reader_createor(ynhimgs), batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            reader_createor(ynhimgs), batch_size=BATCH_SIZE)
        startup_program.random_seed = 90
        main_program.random_seed = 90
    # 数据提取操作,提取-打乱-打包batch
    else:
        train_reader = paddle.batch(
            paddle.reader.shuffle(reader_createor(ynhimgs), buf_size=500),
            batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            reader_createor(ynhimgs), batch_size=BATCH_SIZE)

    # 定义网络输入,image and label
    img = fluid.layers.data(name='img', shape=[3, 224, 224], dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    # 选择网络主干
    if nn_type == 'softmax_regression':
        net_conf = softmax_regression
    elif nn_type == 'multilayer_perceptron':
        net_conf = multilayer_perceptron
    else:
        net_conf = resnet_pre_train_10class

    # 网络组合,输入(image+label)+主干+loss(loss类型,以及输出的loss格式-平均avg和准确率)
    prediction, avg_loss, acc = net_conf(img, label)

    # 程序克隆-#定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    test_program = main_program.clone(for_test=True)
    # 选择优化器
    optimizer = fluid.optimizer.Adam(learning_rate=0.001)
    # 选择优化目标-avg_loss
    optimizer.minimize(avg_loss)

    # 提前定义好测试函数 在训练时候使用
    def train_test(train_test_program, train_test_feed, train_test_reader):

        # test_program通过main_program进行克隆,防止混淆,因为主程序中有梯度等参数
        acc_set = []
        avg_loss_set = []
        for test_data in train_test_reader():
            acc_np, avg_loss_np = exe.run(
                program=train_test_program,
                feed=train_test_feed.feed(test_data),
                fetch_list=[acc, avg_loss])
            acc_set.append(float(acc_np))
            avg_loss_set.append(float(avg_loss_np))
        # get test acc and loss
        acc_val_mean = numpy.array(acc_set).mean()
        avg_loss_val_mean = numpy.array(avg_loss_set).mean()
        # 返回全部测试集的平均loss和平均准确率
        return avg_loss_val_mean, acc_val_mean

    # place属性由用户定义,代表程序将在哪里执行
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    # 创建一个Executor执行器
    exe = fluid.Executor(place)

    # 定义DataFeeder数据读取器,网络读取方式,指定网络的输入(网络通过graph搭建,需要指定输入)
    feeder = fluid.DataFeeder(feed_list=[img, label], place=place)

    # 这里需要注意,在初始化前,把loss,优化器,执行器等都要定义好
    # 正式进行网络训练前,需先执行参数初始化。其中 defalut_startup_program 中定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作。
    exe.run(startup_program)
    # 训练epochs次数
    epochs = [epoch_id for epoch_id in range(PASS_NUM)]

    # list用于保存 epoch_id, avg_loss_val, acc_val
    lists = []
    step = 0
    # epochs循环操作
    for epoch_id in epochs:
        # train_reader数据输入
        for step_id, data in enumerate(train_reader()):
            # 由于传入数据与传出数据存在多列,因此 fluid 通过 feed 映射定义数据的传输数据,
            # 通过 fetch_list 取出期望结果:
            metrics = exe.run(
                main_program,
                feed=feeder.feed(data),
                fetch_list=[avg_loss, acc])
            # 若batch提取次数为100,画出loss曲线,打印出avg_loss等参数
            if step % 100 == 0:
                event_handler_plot(train_prompt, step, metrics[0])
                print("Pass %d, Epoch %d, Cost %f" % (step, epoch_id, metrics[0]))
            step += 1

        # paddle-visualdl #输入数据
        trainTag.add_record(step, metrics[0])

        # 每迭代一次,进行一次测试工作
        # test for epoch
        avg_loss_val, acc_val = train_test(
            train_test_program=test_program,
            train_test_reader=test_reader,
            train_test_feed=feeder)
        # 打印epoch测试结果
        print("Test with Epoch %d, avg_cost: %s, acc: %s" % (epoch_id, avg_loss_val, acc_val))
        # 输出loss图-avg_loss
        event_handler_plot(test_prompt, step, avg_loss_val)

        # paddle-visualdl #输入数据
        testTag.add_record(step, avg_loss_val)

        # 保存每次测试结果,方便后续找到最优测试结果
        lists.append((epoch_id, avg_loss_val, acc_val))
        # 如果给定路径,每次epoch都保存模型且覆盖之前的模型
        # 保存模型时候应该指定输入input--["img"]
        # ["img"]--预测(inference)需要 feed 的数据
        # [prediction]--保存预测(inference)结果的 Variables
        # exe--executor 保存 inference model
        if save_dirname is not None:
            fluid.io.save_inference_model(
                save_dirname, ["img"], [prediction],
                exe,
                model_filename=model_filename,
                params_filename=params_filename)

    # epochs迭代完毕
    # 模型可复现,打印出训练的avg_loss,测试的avg_loss_val和acc_val
    if args.enable_ce:
        print("kpis\ttrain_cost\t%f" % metrics[0])
        print("kpis\ttest_cost\t%s" % avg_loss_val)
        print("kpis\ttest_acc\t%s" % acc_val)

    # 找到最优的测试结果,打印出最优结果 epoch_id  avg_loss_val
    # find the best pass
    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))
    # 打印分类的最优准确率
    print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))

def infer(use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):
    # 网络是否保存了模型
    if save_dirname is None:
        return

    # place属性由用户定义,代表程序将在哪里执行 cpu or gpu
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    # 定义执行器
    exe = fluid.Executor(place)

    # 定义图像载入函数 通过PIL打开,resize到网络输入大小,进行归一化操作
    # 定义N×C×H×W and float32
    # 这些操作在mnist中已经定义,所以预测要一样
    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((224, 224), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 3, 224, 224).astype(numpy.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    # 获取当前程序运行路径
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    # 载入测试图像
    tensor_img = load_image(cur_dir + '/image/mnist_train_20.png')

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).

        # 载入模型
        # 返回:(Program,feed_target_names, fetch_targets)
        # Program 是一个 Program ,它是预测 Program。
        # feed_target_names 是一个str列表,它包含需要在预测 Program 中提供数据的变量的名称。
        # fetch_targets 是一个 Variable 列表,从中我们可以得到推断结果。
        [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(
            save_dirname, exe, model_filename, params_filename)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.

        # 下列操作对应训练阶段的
        '''
        metrics = exe.run(
        main_program,
        feed=feeder.feed(data),
        fetch_list=[avg_loss, acc])
        '''
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: tensor_img},
            fetch_list=fetch_targets)
        # results=list[array([list[1,2,3,4,5,6,7,8,9,10]])]
        # 按升序排列
        # 所以lab[0][0][-1]
        lab = numpy.argsort(results)
        print("Inference result of image/mnist_train_20.png is: %d" % lab[0][0][-1])

def main(use_cuda, nn_type):
    # 保存参数,若为None,则默认为: __model__
    # model_filename = None
    model_filename = "paddle_model"
    # 保存参数,若为None,则保存为分离状态的参数 batch_norm_0.b_0 conv2d_0.b_0 等等
    # params_filename = None
    params_filename = "paddle_params"
    # 参数和模型的保存路径
    save_dirname = "recognize_digits_" + nn_type + ".inference.model"

    # 保存所有print输出到log文件
    sys.stdout = Logger('paddle_log.txt')

    # call train() with is_local argument to run distributed train
    # 训练程序和测试程序
    train(
        nn_type=nn_type,
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)
    # 使用保存的模型前向传播测试
    infer(
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)

if __name__ == '__main__':
    # 定义输入args参数
    args = parse_args()
    BATCH_SIZE = 64
    PASS_NUM = args.num_epochs
    use_cuda = args.use_gpu
    # predict = 'softmax_regression' # uncomment for Softmax
    # predict = 'multilayer_perceptron' # uncomment for MLP
    # 选择使用的主干网络
    predict = 'convolutional_neural_network'  # uncomment for LeNet5
    main(use_cuda=use_cuda, nn_type=predict)
yangninghua commented 5 years ago

您好这是我以官方例子进行的改进, txt中时这样定义的: 我想实现,通过自己定义的txt去读取数据,我上面的步骤已经可以训练了,但是速度特别慢, paddle的同学能否给个建议,我刚刚接触,我尝试了网络寻找博客和文档,但是博客写的很老了,不是最新的fluid形式

./Data/valid/3/3_4864.png 3
./Data/valid/3/3_4107.png 3
./Data/valid/3/3_5200.png 3
./Data/valid/3/3_7825.png 3
./Data/valid/3/3_8930.png 3
./Data/valid/3/3_8739.png 3
./Data/valid/3/3_7352.png 3
./Data/valid/3/3_4814.png 3
./Data/valid/3/3_6990.png 3
./Data/valid/3/3_7865.png 3
./Data/valid/3/3_7961.png 3
./Data/valid/3/3_4687.png 3
./Data/valid/3/3_5458.png 3
./Data/valid/3/3_4808.png 3
./Data/valid/3/3_819.png 3
./Data/valid/3/3_6406.png 3
./Data/valid/3/3_4682.png 3
./Data/valid/3/3_9576.png 3
./Data/valid/3/3_4963.png 3
./Data/valid/3/3_3570.png 3
./Data/valid/3/3_302.png 3
./Data/valid/3/3_6893.png 3
./Data/valid/3/3_2822.png 3
./Data/valid/3/3_5773.png 3
./Data/valid/3/3_5232.png 3
./Data/valid/3/3_6859.png 3
./Data/valid/3/3_4435.png 3
./Data/valid/3/3_2871.png 3
./Data/valid/3/3_6709.png 3
./Data/valid/3/3_1861.png 3
./Data/valid/3/3_3387.png 3
./Data/valid/3/3_63.png 3
./Data/valid/3/3_573.png 3
./Data/valid/3/3_7228.png 3
./Data/valid/3/3_1129.png 3
./Data/valid/3/3_6525.png 3
./Data/valid/3/3_68.png 3
./Data/valid/3/3_1346.png 3
./Data/valid/3/3_3353.png 3
./Data/valid/3/3_7440.png 3
./Data/valid/3/3_1268.png 3
./Data/valid/3/3_9093.png 3
./Data/valid/3/3_6111.png 3
./Data/valid/3/3_1715.png 3
./Data/valid/3/3_3023.png 3
./Data/valid/3/3_7401.png 3
./Data/valid/3/3_1595.png 3
./Data/valid/3/3_8033.png 3
./Data/valid/3/3_5689.png 3
./Data/valid/3/3_4176.png 3
./Data/valid/3/3_4785.png 3
./Data/valid/3/3_6833.png 3
./Data/valid/3/3_597.png 3
./Data/valid/3/3_3654.png 3
./Data/valid/3/3_3674.png 3
./Data/valid/3/3_4574.png 3
./Data/valid/3/3_669.png 3
./Data/valid/3/3_7807.png 3
./Data/valid/3/3_4618.png 3
./Data/valid/3/3_7680.png 3
./Data/valid/3/3_4561.png 3
./Data/valid/3/3_7979.png 3
./Data/valid/3/3_4545.png 3
./Data/valid/3/3_6694.png 3
./Data/valid/3/3_8012.png 3
./Data/valid/3/3_2410.png 3
./Data/valid/3/3_8337.png 3
./Data/valid/3/3_3013.png 3
./Data/valid/3/3_4190.png 3
./Data/valid/3/3_8114.png 3
./Data/valid/3/3_2068.png 3
./Data/valid/3/3_3391.png 3
./Data/valid/3/3_1981.png 3
./Data/valid/3/3_1386.png 3
./Data/valid/3/3_2831.png 3
./Data/valid/3/3_2840.png 3
./Data/valid/3/3_4637.png 3
./Data/valid/3/3_6054.png 3
./Data/valid/3/3_2286.png 3
./Data/valid/3/3_5597.png 3
./Data/valid/3/3_4876.png 3
./Data/valid/3/3_6293.png 3
./Data/valid/3/3_599.png 3
./Data/valid/3/3_8077.png 3
./Data/valid/3/3_5575.png 3
./Data/valid/3/3_3779.png 3
./Data/valid/3/3_6898.png 3
./Data/valid/3/3_7174.png 3
./Data/valid/3/3_397.png 3
./Data/valid/3/3_6938.png 3
./Data/valid/3/3_7970.png 3
./Data/valid/3/3_1524.png 3
./Data/valid/3/3_2489.png 3
./Data/valid/3/3_1030.png 3
./Data/valid/3/3_4407.png 3
./Data/valid/3/3_9505.png 3
./Data/valid/3/3_4996.png 3
./Data/valid/3/3_5011.png 3
./Data/valid/3/3_8746.png 3
./Data/valid/3/3_4886.png 3
JiabinYang commented 5 years ago

您可以参考官网的这个文档使用py_reader加速数据读取http://paddlepaddle.org/documentation/docs/zh/1.4/user_guides/howto/prepare_data/use_py_reader.html