linghu8812 / tensorrt_inference

705 stars 207 forks source link

yolov5转trt出错 #52

Closed DaChaoXc closed 3 years ago

DaChaoXc commented 3 years ago

(base) xc@xc:~/xc/code/obj/YOLO/tensorrt_inference/yolov5/build$ ./yolov5_trt ../config.yaml ../samples

Input filename: ../models/yolov5m.onnx ONNX IR version: 0.0.6 Opset version: 12 Producer name: pytorch Producer version: 1.7 Domain:
Model version: 0 Doc string:

WARNING: ONNX model has a newer ir_version (0.0.6) than this parser was built against (0.0.3). While parsing node number 0 [Slice -> "179"]: 3 --- Begin node --- input: "images" input: "176" input: "177" input: "175" input: "175" output: "179" name: "Slice_4" op_type: "Slice"

--- End node --- ERROR: /home/xc/xc/code/obj/TensorRT-CenterNet-master/onnx-tensorrt/ModelImporter.cpp:537 In function importModel: [5] Assertion failed: tensors.count(input_name) [02/20/2021-18:08:53] [E] Failure while parsing ONNX file start building engine [02/20/2021-18:08:53] [E] [TRT] Network must have at least one output [02/20/2021-18:08:53] [E] [TRT] Network validation failed. build engine done yolov5_trt: /home/xc/xc/code/obj/YOLO/tensorrt_inference/yolov5/../includes/common/common.hpp:138: void onnxToTRTModel(const string&, const string&, nvinfer1::ICudaEngine*&, const int&): Assertion `engine' failed. 已放弃 (核心已转储)

我下载了3.0和4.0的模型转onnx后,再转trt出错了

DaChaoXc commented 3 years ago

/home/xc/xc/softwares/anaconda3/bin/python3.6 /home/xc/xc/code/obj/YOLO/tensorrt_inference/yolov5/models/yolov5/models/export_onnx.py Namespace(batch_size=1, img_size=[640, 640], weights='yolov5m.pt') Fusing layers... Model Summary: 308 layers, 21356877 parameters, 0 gradients, 51.3 GFLOPS

Starting ONNX export with onnx 1.7.0... (op_type:Slice, name:Slice_4): Inferred shape and existing shape differ in dimension 2: (640) vs (320) ONNX export success, saved as yolov5m.onnx

Export complete. Visualize with https://github.com/lutzroeder/netron.

转onnx时的输出信息

linghu8812 commented 3 years ago

https://github.com/linghu8812/tensorrt_inference/issues/12#issuecomment-745724887

按着这个测测呢?

DaChaoXc commented 3 years ago

#12 (comment)

按着这个测测呢?

import onnxruntime import numpy as np

sess_options = onnxruntime.SessionOptions() sess = onnxruntime.InferenceSession('./yolov5s.onnx', sess_options) data = [np.random.rand(1, 3, 640, 640).astype(np.float32)] input_names = sess.getinputs() feed = zip(sorted(i.name for i_ in input_names), data) result = sess.run(None, dict(feed)) print(result[0].shape)

/home/xc/xc/softwares/anaconda3/bin/python3.6 /home/xc/xc/code/obj/YOLO/tensorrt_inference/yolov5/models/test.py (1, 25200, 85) 结果正确

DaChaoXc commented 3 years ago

#12 (comment)

按着这个测测呢?

onnx到trt不行

DaChaoXc commented 3 years ago

python=3.6,onnx=1.8.1,model=yolov5s_v3.pt,tensorrt=7.1.3.4

need change cmake file: cmake_minimum_required(VERSION 3.5)

project(yolov5_trt)

set(CMAKE_SKIP_BUILD_RPATH FALSE) # 编译时加上RPATH
set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) # 编译时RPATH不使用安装的RPATH
set(CMAKE_INSTALL_RPATH "") # 安装RPATH为空
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE) # 安装的执行文件不加上RPATH

CUDA

find_package(CUDA REQUIRED) message(STATUS "Find CUDA include at ${CUDA_INCLUDE_DIRS}") message(STATUS "Find CUDA libraries: ${CUDA_LIBRARIES}")

TensorRT

set(TENSORRT_ROOT /usr) find_path(TENSORRT_INCLUDE_DIR NvInfer.h HINTS ${TENSORRT_ROOT} PATH_SUFFIXES include/) message(STATUS "Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}") find_library(TENSORRT_LIBRARY_INFER nvinfer HINTS ${TENSORRT_ROOT} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib64 lib/x64) find_library(TENSORRT_LIBRARY_ONNXPARSER nvonnxparser HINTS ${TENSORRT_ROOT} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib64 lib/x64 lib/x86_64-linux-gnu) set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_ONNXPARSER}) message(STATUS "Find TensorRT libs: ${TENSORRT_LIBRARY}")

OpenCV

find_package(OpenCV REQUIRED) message(STATUS "Find OpenCV include at ${OpenCV_INCLUDE_DIRS}") message(STATUS "Find OpenCV libraries: ${OpenCV_LIBRARIES}")

set(COMMON_INCLUDE ../includes/common) set(YAML_INCLUDE ../includes/yaml-cpp/include) set(YAML_LIB_DIR ../includes/yaml-cpp/libs)

set(CMAKE_BUILD_TYPE Release) set(CMAKE_CXX_STANDARD 11) set(GPU_ARCHS 61)

include_directories(${CUDA_INCLUDE_DIRS} ${TENSORRT_INCLUDE_DIR} ${OpenCV_INCLUDE_DIRS} ${COMMON_INCLUDE} ${YAML_INCLUDE}) link_directories(${YAML_LIB_DIR})

add_executable(yolov5_trt main.cpp yolov5.cpp) target_link_libraries(yolov5_trt ${OpenCV_LIBRARIES} ${CUDA_LIBRARIES} ${TENSORRT_LIBRARY} yaml-cpp)

the result is right: bus_

DaChaoXc commented 3 years ago

@linghu8812 @DataXujing

DaChaoXc commented 3 years ago

@linghu8812

fanchunpeng commented 2 years ago

@DaChaoXc 我看这张图检测框是正确的,但是框体得分值和python版本存在差异?什么原因呢?