open-mmlab / mmdeploy

OpenMMLab Model Deployment Framework
https://mmdeploy.readthedocs.io/en/latest/
Apache License 2.0
2.69k stars 617 forks source link

C++ inference encounters this error:failed to create detector, code: 6 #481

Closed 1wang11lijian1 closed 2 years ago

1wang11lijian1 commented 2 years ago

[2022-05-17 11:21:22.741] [mmdeploy] [error] [device_impl.cpp:147] 0, -1 [2022-05-17 11:21:22.742] [mmdeploy] [error] [detector.cpp:58] exception caught: invalid argument (1) @ :0 failed to create detector, code: 6

I can't create a detector when using the C++ inference interface, need your help

lvhan028 commented 2 years ago

Can you share your build command and execution command? It looks like device module is not loaded successfully.

lzhangzz commented 2 years ago

[2022-05-17 11:21:22.741] [mmdeploy] [error] [device_impl.cpp:147] 0, -1

The log indicates device support is not loaded.

1wang11lijian1 commented 2 years ago

Can you share your build command and execution command? It looks like device module is not loaded successfully.

include

include <opencv2/imgcodecs/imgcodecs.hpp>

include <opencv2/imgproc/imgproc.hpp>

include

include "c/detector.h"

using namespace std;

int main() {

const char* model_path  = "D:\\WIN\\work_dir_yolox_3";
const char* image_path  = ""D:\\WIN\\work_dir_yolox_3\\20210722_162700_235.jpg";
const char* device_name = "cuda";   //"cpu", "cuda"

cv::Mat img = cv::imread(image_path);
if (!img.data) {
    fprintf(stderr, "failed to load image: %s\n", image_path);
    return 1;
}

mm_handle_t detector{};
int status{};
status = mmdeploy_detector_create_by_path(model_path, device_name, 0, &detector);
if (status != MM_SUCCESS) {
    fprintf(stderr, "failed to create detector, code: %d\n", (int)status);
    return 1;
}

mm_mat_t mat{ img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8 };

mm_detect_t* bboxes{};
int* res_count{};
status = mmdeploy_detector_apply(detector, &mat, 1, &bboxes, &res_count);
if (status != MM_SUCCESS) {
    fprintf(stderr, "failed to apply detector, code: %d\n", (int)status);
    return 1;
}

fprintf(stdout, "bbox_count=%d\n", *res_count);

for (int i = 0; i < *res_count; ++i) {
    const auto& box = bboxes[i].bbox;
    const auto& mask = bboxes[i].mask;

    fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n",
            i, box.left, box.top, box.right, box.bottom, bboxes[i].label_id, bboxes[i].score);

    // skip detections with invalid bbox size (bbox height or width < 1)
    if ((box.right - box.left) < 1 || (box.bottom - box.top) < 1) {
        continue;
    }

    // skip detections less than specified score threshold
    if (bboxes[i].score < 0.1) {
        continue;
    }

    // generate mask overlay if model exports masks
    if (mask != nullptr) {
        fprintf(stdout, "mask %d, height=%d, width=%d\n", i, mask->height, mask->width);

        cv::Mat imgMask(mask->height, mask->width, CV_8UC1, &mask->data[0]);
        auto x0 = std::max(std::floor(box.left) - 1, 0.f);
        auto y0 = std::max(std::floor(box.top) - 1, 0.f);
        cv::Rect roi((int)x0, (int)y0, mask->width, mask->height);

        // split the RGB channels, overlay mask to a specific color channel
        cv::Mat ch[3];
        split(img, ch);
        int col = 0;  // int col = i % 3;
        cv::bitwise_or(imgMask, ch[col](roi), ch[col](roi));
        merge(ch, 3, img);
    }

    cv::rectangle(img, cv::Point{ (int)box.left, (int)box.top },
                  cv::Point{ (int)box.right, (int)box.bottom }, cv::Scalar{ 0, 255, 0 });
}

cv::imwrite("output_detection.png", img);

mmdeploy_detector_release_result(bboxes, res_count, 1);

mmdeploy_detector_destroy(detector);

return 0;

}

This is my execution command,I am going to reconfigure the dependencies of C++ reasoning before this problem occurs. https://github.com/open-mmlab/mmdeploy/issues/280. This problem is exactly the same as I encountered. How to force the library to be loaded here?

lvhan028 commented 2 years ago

All questions mentioned in #280 have been resolved. What's your cmake file? How did you build your program?

1wang11lijian1 commented 2 years ago

Here is my cmakelist file, build code, env_check and cpp code copied from the example/object_detection.cpp's code

cmakelist

cmake_minimum_required(VERSION 3.14)
project(yizeming_dp)

find_package(MMDeploy REQUIRED)

function(add_example name)
  file(GLOB _SRCS ${name}.c*)
  add_executable(${name} ${_SRCS})
  if (NOT MSVC)
    # disable new dtags so that executables can run even without LD_LIBRARY_PATH set
    target_link_libraries(${name} PRIVATE -Wl,--disable-new-dtags)
  endif ()
  mmdeploy_load_static(${name} MMDeployStaticModules)
  mmdeploy_load_dynamic(${name} MMDeployDynamicModules)
  target_link_libraries(${name} PRIVATE MMDeployLibs ${OpenCV_LIBS})
endfunction()

add_example(image_classification)
add_example(object_detection)
add_example(image_restorer)
add_example(image_segmentation)
add_example(pose_detection)
add_example(ocr)

build code

cd $env:MMDEPLOY_DIR
mkdir build
cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
  -DMMDEPLOY_BUILD_SDK=ON `
  -DMMDEPLOY_TARGET_DEVICES="cuda" `
  -DMMDEPLOY_TARGET_BACKENDS="trt" `
  -DMMDEPLOY_CODEBASES="all" `
  -Dpplcv_DIR="$env:PPLCV_DIR/pplcv-build/install/lib/cmake/ppl" `
  -DTENSORRT_DIR="$env:TENSORRT_DIR" `
  -DCUDNN_DIR="$env:CUDNN_DIR"

cmake --build . --config Release -- /m
cmake --install . --config Release

env_check

PS C:\Users\12481\MMDeploy> python ./tools/check_env.py
2022-05-18 16:13:16,567 - mmdeploy - INFO - 

2022-05-18 16:13:16,567 - mmdeploy - INFO - **********Environmental information**********
'gcc' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
2022-05-18 16:13:18,048 - mmdeploy - INFO - sys.platform: win32
2022-05-18 16:13:18,048 - mmdeploy - INFO - Python: 3.7.11 (default, Jul 27 2021, 09:42:29) [MSC v.1916 64 bit (AMD64)]
2022-05-18 16:13:18,048 - mmdeploy - INFO - CUDA available: True
2022-05-18 16:13:18,048 - mmdeploy - INFO - GPU 0: NVIDIA TITAN Xp
2022-05-18 16:13:18,049 - mmdeploy - INFO - CUDA_HOME: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2        
2022-05-18 16:13:18,049 - mmdeploy - INFO - NVCC: Cuda compilation tools, release 10.2, V10.2.89
2022-05-18 16:13:18,049 - mmdeploy - INFO - GCC: n/a
2022-05-18 16:13:18,049 - mmdeploy - INFO - PyTorch: 1.8.1
2022-05-18 16:13:18,049 - mmdeploy - INFO - PyTorch compiling details: PyTorch built with:
  - C++ Version: 199711
  - MSVC 192829913
  - Intel(R) Math Kernel Library Version 2020.0.2 Product Build 20200624 for Intel(R) 64 architecture applications
  - Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)
  - OpenMP 2019
  - CPU capability usage: AVX2
  - CUDA Runtime 10.2
  - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,c
ode=sm_75;-gencode;arch=compute_37,code=compute_37
  - CuDNN 7.6.5
  - Magma 2.5.4
  - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=10.2, CUDNN_VERSION=7.6.5, CXX_COMPILER=C:/cb/pytorch_1000000000000/work/tmp_bin/sccache-cl.exe, CXX_FLAGS=/DWIN32 /D_WINDOWS /GR /EHsc /w /bigobj -DUSE_PTHREADPOOL -o
penmp:experimental -DNDEBUG -DUSE_FBGEMM -DUSE_XNNPACK, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=
ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=OFF, USE_NNPACK=OFF, USE_OPENMP=ON,

2022-05-18 16:13:18,050 - mmdeploy - INFO - TorchVision: 0.9.1
2022-05-18 16:13:18,050 - mmdeploy - INFO - OpenCV: 4.5.5
2022-05-18 16:13:18,050 - mmdeploy - INFO - MMCV: 1.4.0
2022-05-18 16:13:18,050 - mmdeploy - INFO - MMCV Compiler: MSVC 192930141
2022-05-18 16:13:18,050 - mmdeploy - INFO - MMCV CUDA Compiler: 10.2
2022-05-18 16:13:18,051 - mmdeploy - INFO - MMDeploy: 0.4.0+6e7e219
2022-05-18 16:13:18,051 - mmdeploy - INFO -

2022-05-18 16:13:18,051 - mmdeploy - INFO - **********Backend information**********
2022-05-18 16:13:18,907 - mmdeploy - INFO - onnxruntime: 1.8.1  ops_is_avaliable : True
2022-05-18 16:13:18,908 - mmdeploy - INFO - tensorrt: 8.2.4.2   ops_is_avaliable : True
2022-05-18 16:13:18,909 - mmdeploy - INFO - ncnn: None  ops_is_avaliable : False
2022-05-18 16:13:18,911 - mmdeploy - INFO - pplnn_is_avaliable: False
2022-05-18 16:13:18,912 - mmdeploy - INFO - openvino_is_avaliable: False
2022-05-18 16:13:18,912 - mmdeploy - INFO -

2022-05-18 16:13:18,912 - mmdeploy - INFO - **********Codebase information**********
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmdet:      2.23.0
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmseg:      None
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmcls:      None
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmocr:      None
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmedit:     None
2022-05-18 16:13:18,915 - mmdeploy - INFO - mmdet3d:    None
2022-05-18 16:13:18,916 - mmdeploy - INFO - mmpose:     None

cpp code

#include
#include <opencv2/imgcodecs/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include
#include "c/detector.h"

using namespace std;

int main() {

const char* model_path  = "D:\\WIN\\work_dir_yolox_3";
const char* image_path  = ""D:\\WIN\\work_dir_yolox_3\\20210722_162700_235.jpg";
const char* device_name = "cuda";   //"cpu", "cuda"

cv::Mat img = cv::imread(image_path);
if (!img.data) {
    fprintf(stderr, "failed to load image: %s\n", image_path);
    return 1;
}

mm_handle_t detector{};
int status{};
status = mmdeploy_detector_create_by_path(model_path, device_name, 0, &detector);
if (status != MM_SUCCESS) {
    fprintf(stderr, "failed to create detector, code: %d\n", (int)status);
    return 1;
}

mm_mat_t mat{ img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8 };

mm_detect_t* bboxes{};
int* res_count{};
status = mmdeploy_detector_apply(detector, &mat, 1, &bboxes, &res_count);
if (status != MM_SUCCESS) {
    fprintf(stderr, "failed to apply detector, code: %d\n", (int)status);
    return 1;
}

fprintf(stdout, "bbox_count=%d\n", *res_count);

for (int i = 0; i < *res_count; ++i) {
    const auto& box = bboxes[i].bbox;
    const auto& mask = bboxes[i].mask;

    fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n",
            i, box.left, box.top, box.right, box.bottom, bboxes[i].label_id, bboxes[i].score);

    // skip detections with invalid bbox size (bbox height or width < 1)
    if ((box.right - box.left) < 1 || (box.bottom - box.top) < 1) {
        continue;
    }

    // skip detections less than specified score threshold
    if (bboxes[i].score < 0.1) {
        continue;
    }

    // generate mask overlay if model exports masks
    if (mask != nullptr) {
        fprintf(stdout, "mask %d, height=%d, width=%d\n", i, mask->height, mask->width);

        cv::Mat imgMask(mask->height, mask->width, CV_8UC1, &mask->data[0]);
        auto x0 = std::max(std::floor(box.left) - 1, 0.f);
        auto y0 = std::max(std::floor(box.top) - 1, 0.f);
        cv::Rect roi((int)x0, (int)y0, mask->width, mask->height);

        // split the RGB channels, overlay mask to a specific color channel
        cv::Mat ch[3];
        split(img, ch);
        int col = 0;  // int col = i % 3;
        cv::bitwise_or(imgMask, ch[col](roi), ch[col](roi));
        merge(ch, 3, img);
    }

    cv::rectangle(img, cv::Point{ (int)box.left, (int)box.top },
                  cv::Point{ (int)box.right, (int)box.bottom }, cv::Scalar{ 0, 255, 0 });
}

cv::imwrite("output_detection.png", img);

mmdeploy_detector_release_result(bboxes, res_count, 1);

mmdeploy_detector_destroy(detector);

return 0;
}
lvhan028 commented 2 years ago
cd $env:MMDEPLOY_DIR
mkdir build
cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
  -DMMDEPLOY_BUILD_SDK=ON `
  -DMMDEPLOY_TARGET_DEVICES="cuda" `
  -DMMDEPLOY_TARGET_BACKENDS="trt" `
  -DMMDEPLOY_CODEBASES="all" `
  -Dpplcv_DIR="$env:PPLCV_DIR/pplcv-build/install/lib/cmake/ppl" `
  -DTENSORRT_DIR="$env:TENSORRT_DIR" `
  -DCUDNN_DIR="$env:CUDNN_DIR"

cmake --build . --config Release -- /m
cmake --install . --config Release

This is used to build MMDeploy SDK.

What's the command for building your own program?

1wang11lijian1 commented 2 years ago
cd $env:MMDEPLOY_DIR
mkdir build
cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
  -DMMDEPLOY_BUILD_SDK=ON `
  -DMMDEPLOY_TARGET_DEVICES="cuda" `
  -DMMDEPLOY_TARGET_BACKENDS="trt" `
  -DMMDEPLOY_CODEBASES="all" `
  -Dpplcv_DIR="$env:PPLCV_DIR/pplcv-build/install/lib/cmake/ppl" `
  -DTENSORRT_DIR="$env:TENSORRT_DIR" `
  -DCUDNN_DIR="$env:CUDNN_DIR"

cmake --build . --config Release -- /m
cmake --install . --config Release

This is used to build MMDeploy SDK.

What's the command for building your own program?

Sorry, I don't know how to compile my own project's instructions, can you teach me?

1wang11lijian1 commented 2 years ago

Hello, the problem I am encountering now is that I can use the dll of the C++ inference engine normally on my local computer, because I have been configured and compiled on this computer, but it cannot be used on other computers. I want to know what I should do How to do it smoothly on other computers

lvhan028 commented 2 years ago

Hello, the problem I am encountering now is that I can use the dll of the C++ inference engine normally on my local computer, because I have been configured and compiled on this computer, but it cannot be used on other computers. I want to know what I should do How to do it smoothly on other computers

What the error log says when you porting to the other environment?

lvhan028 commented 2 years ago

Please refer to #685. Closing it since no activity for a long time.