PaddlePaddle / PaddleSlim

PaddleSlim is an open-source library for deep model compression and architecture search.
https://paddleslim.readthedocs.io/zh_CN/latest/
Apache License 2.0
1.56k stars 344 forks source link

新年好!静态模型离线量化wav2lip,结束后发现没有所谓的 scale 文件 #1844

Closed drakitLiu closed 6 months ago

drakitLiu commented 7 months ago

我的量化代码是基于官方提供的:quant_post.py:


import os
import sys
import logging
import functools
import random
sys.path[0] = os.path.join(
    os.path.dirname("__file__"), os.path.pardir, os.path.pardir)
from paddleslim.common import get_logger
from paddleslim.quant import quant_post_static
from utility import add_arguments, print_arguments
# import imagenet_reader as reader
import os
from paddle.io import Dataset, DataLoader
import argparse
import paddle
import numpy as np

_logger = get_logger(__name__, level=logging.INFO)

parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
# ############ 量化参数
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('batch_num', int, 1, "Batch number")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('cpu', bool, False, "Whether to use CPU or not.")
add_arg('model_path', str, "../../inference_model/", "model dir")
add_arg('save_path', str, "../../quant_model/Wav2lip/", "model dir to save quanted model")
add_arg('model_filename', str, 'wav2lipmodelhq_netG.pdmodel', "model file name")
add_arg('params_filename', str, 'wav2lipmodelhq_netG.pdiparams', "params file name")
add_arg('algo', str, 'avg', "calibration algorithm")
add_arg('round_type', str, 'round', "The method of converting the quantized weights.")
add_arg('hist_percent', float, 0.9999, "The percentile of algo:hist")
add_arg('is_full_quantize', bool, False, "Whether is full quantization or not.")
add_arg('bias_correction', bool, False, "Whether to use bias correction")
add_arg('ce_test', bool, False, "Whether to CE test.")
add_arg('onnx_format', bool, True, "Whether to export the quantized model with format of ONNX.")
add_arg('input_name', list, ['audio_sequences', 'face_sequences'], "The name of model input.")
add_arg('config_file', str, '../../configs/wav2lip_hq.yaml', "config file path.")
add_arg('evaluate_only', bool, True, "if evaluate_only ?.")

class LRS2PreprocessedDataset(Dataset):
    def __init__(self, preprocessed_root):
        super().__init__()
        self.preprocessed_root = preprocessed_root
        self.vidname = self.get_count_samples(preprocessed_root)

    def get_count_samples(self, preprocessed_root):
        # data/my_lrs2_preprocessed/real_data
        count = 0
        data_all = []
        while True:
            mel_data_path = os.path.join(preprocessed_root, f'audio{count}.npy')
            img_data_path = os.path.join(preprocessed_root, f'face{count}.npy')
            if not os.path.exists(mel_data_path) or not os.path.exists(img_data_path):
                'in root:{}, {} or {} is not found'.format(preprocessed_root, mel_data_path, img_data_path)
                if count == 0:
                    print(mel_data_path, 'data is None ! .....error')
                    exit()
                else:
                    return data_all
            data_all.append([mel_data_path, img_data_path])
            count += 1

    def _load_one_sample(self, idx):
        one_mel = np.load(os.path.join(self.preprocessed_root, f'audio{idx}.npy'))
        one_img = np.load(os.path.join(self.preprocessed_root, f'face{idx}.npy'))
        return one_mel, one_img

    def __getitem__(self, idx):
        return self._load_one_sample(idx)

    def __len__(self):
        return len(self.vidname)

def run_quantize(args, data_set):
    if args.ce_test:
        # set seed
        seed = 111
        np.random.seed(seed)
        paddle.seed(seed)
        random.seed(seed)
        shuffle = False

    place = paddle.CUDAPlace(0) if args.use_gpu else paddle.CPUPlace()
    # val_dataset = reader.ImageNetDataset(mode='test')

    image_shape = [6, 96, 96]
    image = paddle.static.data(
        name=args.input_name[1], shape=[None] + image_shape, dtype='float32')
    voice_shape = [1, 80, 16]
    voice = paddle.static.data(
        name=args.input_name[0], shape=[None] + voice_shape, dtype='float32')

    data_loader = paddle.io.DataLoader(
        dataset=data_set,
        places=place,
        feed_list=[voice, image],
        drop_last=False,
        return_list=False,
        batch_size=args.batch_size,
        shuffle=False)

    assert os.path.exists(args.model_path), "args.model_path doesn't exist"
    assert os.path.isdir(args.model_path), "args.model_path must be a dir"
    exe = paddle.static.Executor(place)
    quant_post_static(
        executor=exe,
        model_dir=args.model_path,
        quantize_model_path=args.save_path,
        data_loader=data_loader,
        model_filename=args.model_filename,
        params_filename=args.params_filename,
        batch_size=args.batch_size,
        batch_nums=args.batch_num,
        algo=args.algo,
        round_type=args.round_type,
        hist_percent=args.hist_percent,
        is_full_quantize=args.is_full_quantize,
        bias_correction=args.bias_correction,
        onnx_format=args.onnx_format)

if __name__ == "__main__":
    args = parser.parse_args()
    if args.cpu:
        paddle.set_device('cpu')
    data_set = LRS2PreprocessedDataset(preprocessed_root='../../data/my_lrs2_preprocessed/real_data')
    paddle.enable_static()
    run_quantize(args, data_set)

运行结果: 1707467010525

确实得到了 模型文件和权重文件,但是没有官方说的 scale 文件

补充:我的前向计算的dataloader数据已经wav2lip模型前向计算测试过,没问题。 paddlepaddle版本最新稳定版本,python=3.8,paddleslim=2.6

谢谢~

drakitLiu commented 7 months ago

发现即使没有scale文件,但也可以将量化后的静态图合并成onnx模型文件,使用的命令是: paddle2onnx --model_dir ./ --model_filename model.pdmodel --params_filename model.pdiparams --save_file quant_model.onnx --opset_version 13 --enable_dev_version True --deploy_backend onnxruntime --enable_onnx_checker True 但是失真度太大了! 于是我尝试改用这个量化方法:

 quant_recon_static(
        executor=exe,
        model_dir=args__.model_path,
        quantize_model_path=args__.save_path + 'quant_recon_static/',
        data_loader=data_loader,
        model_filename=args__.model_filename,
        params_filename=args__.params_filename,
        batch_size=32,
        batch_nums=256,
        region_weights_names=None,
        onnx_format=args__.onnx_format,
        recon_level='region-wise',
        is_full_quantize=args__.is_full_quantize,
        bias_correction=args__.bias_correction,
    )

官方说是这个方法会很耗时,没想到不仅耗时,而且压根不会成功,跑了4天了,epoch使用的是默认值20,程序跑完20个epoch后又会从0开始跑,然后没完没了,第五天报错了: 屏幕截图 2024-02-15 140658 此时此刻很想雨!

xiaoluomi commented 7 months ago

你好,首先回答下你提到的这个scale文件,在使用paddleslim压缩静态图模型后得到的只有model.pdmodel和model.pdiparams两个文件,你所描述的这个scale文件是calibration.cache文件,这个文件是在GPU上使用Tensorrt部署时,指令paddle2onnx --model_dir ./ --model_filename model.pdmodel --params_filename model.pdiparams --save_file float_model.onnx --opset_version 13 --enable_dev_version True --deploy_backend tensorrt --enable_onnx_checker True生成的。 关于精度损失的问题,quant_post_static和quant_recon_static属于比较老的接口,可能在某些模型使用上存在部分问题,这边建议去使用paddleslim的新接口,自动压缩接口,可参照https://github.com/PaddlePaddle/PaddleSlim/tree/develop/example/auto_compression 里面包括了训练后量化和量化训练,静态图模型适合使用此接口。 以下面是简单的接口示例: ac = AutoCompression( model_dir="./MobileNetV1_infer", model_filename="inference.pdmodel", params_filename="inference.pdiparams", save_dir="MobileNetV1_quant", config={"QuantPost": {}, "HyperParameterOptimization": {'ptq_algo': ['avg'], 'max_quant_count': 3}}, train_dataloader=train_loader, eval_dataloader=train_loader) ac.compress()

xiaoluomi commented 7 months ago

你好,首先回答下你提到的这个scale文件,在使用paddleslim压缩静态图模型后得到的只有model.pdmodel和model.pdiparams两个文件,你所描述的这个scale文件是calibration.cache文件,这个文件是在GPU上使用Tensorrt部署时,指令paddle2onnx --model_dir ./ --model_filename model.pdmodel --params_filename model.pdiparams --save_file float_model.onnx --opset_version 13 --enable_dev_version True --deploy_backend tensorrt --enable_onnx_checker True生成的。 关于精度损失的问题,quant_post_static和quant_recon_static属于比较老的接口,可能在某些模型使用上存在部分问题,这边建议去使用paddleslim的新接口,自动压缩接口,可参照https://github.com/PaddlePaddle/PaddleSlim/tree/develop/example/auto_compression 里面包括了训练后量化和量化训练,静态图模型适合使用此接口。 以下面是简单的接口示例: ac = AutoCompression( model_dir="./MobileNetV1_infer", model_filename="inference.pdmodel", params_filename="inference.pdiparams", save_dir="MobileNetV1_quant", config={"QuantPost": {}, "HyperParameterOptimization": {'ptq_algo': ['avg'], 'max_quant_count': 3}}, train_dataloader=train_loader, eval_dataloader=train_loader) ac.compress()

xiaoluomi commented 7 months ago

此外提醒一下onnx_format这个参数设置为True,便于导出的pdmodel是paddle的新格式,也便于后续转成onnx文件。

xiaoluomi commented 6 months ago

如果问题解决了的话,麻烦关闭该issue,谢谢