leoxiaobin / deep-high-resolution-net.pytorch

The project is an official implementation of our CVPR2019 paper "Deep High-Resolution Representation Learning for Human Pose Estimation"
https://jingdongwang2017.github.io/Projects/HRNet/PoseEstimation.html
MIT License
4.31k stars 908 forks source link

Can you provide a onnx model for me? #302

Open lianghuikang opened 1 year ago

lianghuikang commented 1 year ago

I am searching for the onnx file of your model, but it seems that there is no ready-made one, can you provide it?

ToniButland1998 commented 1 year ago

I think you can try export to onnx by yourself. Have you ever tried export to onnx? How is the onnx inference performance?

ToniButland1998 commented 1 year ago

I write a export_to_onnx.py And It can export pytorch model to onnx. You may try it :


from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import pprint

import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms

import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.function import validate
from utils.utils import create_logger

import models

#python tools/export_onnx.py --cfg experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml  TEST.MODEL_FILE models/pytorch/pose_coco/pose_hrnet_w32_256x192.pth

def parse_args():
    parser = argparse.ArgumentParser(description='Train keypoints network')
    # general
    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=True,
                        type=str)

    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    parser.add_argument('--modelDir',
                        help='model directory',
                        type=str,
                        default='')
    parser.add_argument('--logDir',
                        help='log directory',
                        type=str,
                        default='')
    parser.add_argument('--dataDir',
                        help='data directory',
                        type=str,
                        default='')
    parser.add_argument('--prevModelDir',
                        help='prev Model directory',
                        type=str,
                        default='')

    args = parser.parse_args()
    return args

def main():
    args = parse_args()
    update_config(cfg, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')

    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
        cfg, is_train=False
    )

    if cfg.TEST.MODEL_FILE:
        logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
        model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    else:
        model_state_file = os.path.join(
            final_output_dir, 'final_state.pth'
        )
        logger.info('=> loading model from {}'.format(model_state_file))
        model.load_state_dict(torch.load(model_state_file))

    dummy_input = torch.randn(1, 3, 256, 192)

    # Export the model to an ONNX file
    print('exporting model to ONNX...')
    torch.onnx.export(model, dummy_input, 'pose_hrnet_w32_256x192.onnx')

if __name__ == '__main__':
    main()

The command to launch it is :

python tools/export_onnx.py --cfg experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml TEST.MODEL_FILE models/pytorch/pose_coco/pose_hrnet_w32_256x192.pth