zherlock030 / YOLOv5_Torchscript

C++ code for running a yolov5s model.
27 stars 5 forks source link

unable to find "torchvision/nms.h" #1

Open sudapure opened 4 years ago

sudapure commented 4 years ago

torchvision package doesn't looks like to be included in libtorch lib headers, from cMake file looks like torchvision-abi/include/torchvision/nms.cpp is a external module, it would be helpful if you can give some references regarding this.

lulersoft commented 4 years ago

删除 #include "torchvision/nms.h" 在src_v4.cpp 加入以下:


template <typename scalar_t>
at::Tensor nms_cpu_kernel(
    const at::Tensor& dets,
    const at::Tensor& scores,
    const float iou_threshold) {
    AT_ASSERTM(!dets.options().device().is_cuda(), "dets must be a CPU tensor");
    AT_ASSERTM(
        !scores.options().device().is_cuda(), "scores must be a CPU tensor");
    AT_ASSERTM(
        dets.scalar_type() == scores.scalar_type(),
        "dets should have the same type as scores");

    if (dets.numel() == 0)
        return at::empty({ 0 }, dets.options().dtype(at::kLong));

    auto x1_t = dets.select(1, 0).contiguous();
    auto y1_t = dets.select(1, 1).contiguous();
    auto x2_t = dets.select(1, 2).contiguous();
    auto y2_t = dets.select(1, 3).contiguous();

    at::Tensor areas_t = (x2_t - x1_t) * (y2_t - y1_t);

    auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));

    auto ndets = dets.size(0);
    at::Tensor suppressed_t = at::zeros({ ndets }, dets.options().dtype(at::kByte));
    at::Tensor keep_t = at::zeros({ ndets }, dets.options().dtype(at::kLong));

    auto suppressed = suppressed_t.data_ptr<uint8_t>();
    auto keep = keep_t.data_ptr<int64_t>();
    auto order = order_t.data_ptr<int64_t>();
    auto x1 = x1_t.data_ptr<scalar_t>();
    auto y1 = y1_t.data_ptr<scalar_t>();
    auto x2 = x2_t.data_ptr<scalar_t>();
    auto y2 = y2_t.data_ptr<scalar_t>();
    auto areas = areas_t.data_ptr<scalar_t>();

    int64_t num_to_keep = 0;

    for (int64_t _i = 0; _i < ndets; _i++) {
        auto i = order[_i];
        if (suppressed[i] == 1)
            continue;
        keep[num_to_keep++] = i;
        auto ix1 = x1[i];
        auto iy1 = y1[i];
        auto ix2 = x2[i];
        auto iy2 = y2[i];
        auto iarea = areas[i];

        for (int64_t _j = _i + 1; _j < ndets; _j++) {
            auto j = order[_j];
            if (suppressed[j] == 1)
                continue;
            auto xx1 = std::max(ix1, x1[j]);
            auto yy1 = std::max(iy1, y1[j]);
            auto xx2 = std::min(ix2, x2[j]);
            auto yy2 = std::min(iy2, y2[j]);

            auto w = std::max(static_cast<scalar_t>(0), xx2 - xx1);
            auto h = std::max(static_cast<scalar_t>(0), yy2 - yy1);
            auto inter = w * h;
            auto ovr = inter / (iarea + areas[j] - inter);
            if (ovr > iou_threshold)
                suppressed[j] = 1;
        }
    }
    return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep);
}

at::Tensor nms_cpu(
    const at::Tensor& dets,
    const at::Tensor& scores,
    const float iou_threshold) {
    auto result = at::empty({ 0 }, dets.options());

    AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] {
        result = nms_cpu_kernel<scalar_t>(dets, scores, iou_threshold);
    });
    return result;
}

at::Tensor nms(
    const at::Tensor& dets,
    const at::Tensor& scores,
    const double iou_threshold) {
    if (dets.device().is_cuda()) {
#if defined(WITH_CUDA)
        if (dets.numel() == 0) {
            at::cuda::CUDAGuard device_guard(dets.device());
            return at::empty({ 0 }, dets.options().dtype(at::kLong));
        }
        return nms_cuda(dets, scores, iou_threshold);
#elif defined(WITH_HIP)
        if (dets.numel() == 0) {
            at::cuda::HIPGuard device_guard(dets.device());
            return at::empty({ 0 }, dets.options().dtype(at::kLong));
        }
        return nms_cuda(dets, scores, iou_threshold);
#else
        AT_ERROR("Not compiled with GPU support");
#endif
    }

    at::Tensor result = nms_cpu(dets, scores, iou_threshold);
    return result;
}
lulersoft commented 4 years ago

这是俺项目的 CMakeLists.txt,仅供参考,win10,vs2019 编译通过

# CMakeList.txt: torch_exe 的 CMake 项目,在此处包括源代码并定义
# 项目特定的逻辑。
#
cmake_minimum_required (VERSION 3.8)

project(torch_exe)

set(CMAKE_PREFIX_PATH "D:\\sdk\\libtorch\\lib") #//注意这里填自己解压libtorch时的路径

set(OpenCV_DIR "D:\\sdk\\opencv\\build")

find_package(Torch REQUIRED)
find_package(OpenCV QUIET)

if(NOT Torch_FOUND)
    message(FATAL_ERROR "Pytorch Not Found!")
endif(NOT Torch_FOUND)

if(NOT OpenCV_FOUND)
    find_package(OpenCV 2.4.3 QUIET)
    if(NOT OpenCV_FOUND)
        message(FATAL_ERROR "OpenCV > 2.4.3 not found.")
    endif()
endif()

# 将源代码添加到此项目的可执行文件。
add_executable (torch_exe "torch_exe.cpp" "torch_exe.h")

# TODO: 如有需要,请添加测试并安装目标。
target_link_libraries(${PROJECT_NAME} ${TORCH_LIBRARIES} ${OpenCV_LIBS})
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 11)
hmmlencat commented 4 years ago

In my environment ,libtorch-cxx11-abi-shared-with-deps-1.4.0+cu100,but when I cmake .. ,make,there will be error: no matching function for call to ‘at::Tensor::index()’ image can you give me some help?

sudapure commented 4 years ago

@lulersoft , it looks like code for nms.cpp, however does it generates libyolov5s.so file, if not, then how should i build this shared lib.

zherlock030 commented 4 years ago

@sudapure hey I add nms.h and nms.cpp in the repo.

zherlock030 commented 4 years ago

@hmmlencat not sure about this, maybe u can try latest libtorch, mine is 1.5. Or u can check ur cmakelist