PaddlePaddle / FastDeploy

⚡️An Easy-to-use and Fast Deep Learning Model Deployment Toolkit for ☁️Cloud 📱Mobile and 📹Edge. Including Image, Video, Text and Audio 20+ main stream scenarios and 150+ SOTA models with end-to-end optimization, multi-platform and multi-framework support.
https://www.paddlepaddle.org.cn/fastdeploy
Apache License 2.0
2.93k stars 456 forks source link

在CUDA 12(Windows) 上构建失败:nvcc fatal : Unsupported gpu architecture 'compute_35' #2024

Open justghostof opened 1 year ago

justghostof commented 1 year ago

环境

问题日志及出现问题的操作流程

生成失败。

   “C:\Users\MWX\FastDeploy\build\fastdeploy.sln”(默认目标) (1) ->
   “C:\Users\MWX\FastDeploy\build\ALL_BUILD.vcxproj.metaproj”(默认目标) (2) ->
   “C:\Users\MWX\FastDeploy\build\copy_yaml_include.vcxproj.metaproj”(默认目标) (3) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj.metaproj”(默认目标) (4) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(默认目标) (23) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(CudaBuildCore 目标) (23:4) ->
   (CudaBuildCore 目标) ->
     C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\BuildCustomizations\CUDA 12.
   1.targets(799,9): error MSB3721: 命令“"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin\nvcc.exe"  --u
   se-local-env -ccbin "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.36.32532\bin\HostX
   64\x64" -x cu   -IC:\Users\MWX\FastDeploy\. -IC:\Users\MWX\FastDeploy\build -IC:\Users\MWX\FastDeploy\third_part
   y\eigen -IC:\Users\MWX\FastDeploy\build\third_libs\install\onnxruntime\include -IC:\Users\MWX\FastDeploy\build\t
   hird_libs\install\paddle_inference -IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include -
   IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include\ie -I"C:\Program Files\NVIDIA GPU Com
   puting Toolkit\CUDA\v12.1\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT\8.6.1\include" -IC:
   \Users\MWX\FastDeploy\.\fastdeploy\runtime\backends\tensorrt\common -I"C:\Users\MWX\FastDeploy\third_party\yaml-
   cpp\include" -IC:\Users\MWX\FastDeploy\build\third_libs\install\fast_tokenizer\include -IC:\Users\MWX\FastDeploy
   \build\third_libs\install\fast_tokenizer\third_party\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\
   paddle2onnx\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\opencv\build\include -IC:\Users\MWX\FastD
   eploy\build\third_libs\install\opencv\build\include\opencv -IC:\Users\MWX\FastDeploy\build\third_libs\install\op
   envino\runtime\3rdparty\tbb\include -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"     --k
   eep-dir x64\Release  -maxrregcount=0   --machine 64 --compile -cudart static -gencode arch=compute_35,code=sm_35
    -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -ge
   ncode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencod
   e arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -w --expt-relaxed-constexpr --expt-extended-lam
   bda -Xcompiler="/EHsc /wd4244 /wd4267 /wd4819 /bigobj -Ob2 /wd4251"   -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -D"__REL_FILE__=\"fa
   stdeploy/vision/common/processors/normalize_and_permute.cu\"" -D_WINDLL -D_MBCS -DWIN32 -D_WINDOWS -DNDEBUG -DYA
   ML_CPP_DLL -DFASTDEPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABL
   E_PADDLE_BACKEND -DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENA
   BLE_TEXT -DENABLE_PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -X
   compiler "/EHsc /W0 /nologo /O2 /FS   /MD /GR" -Xcompiler "/Fdfastdeploy.dir\Release\vc143.pdb" -o fastdeploy.di
   r\Release\/fastdeploy/vision/common/processors/normalize_and_permute.cu.obj "C:\Users\MWX\FastDeploy\fastdeploy\
   vision\common\processors\normalize_and_permute.cu"”已退出,返回代码为 1。 [C:\Users\MWX\FastDeploy\build\fastdeploy.vcxpro
   j]

   “C:\Users\MWX\FastDeploy\build\fastdeploy.sln”(默认目标) (1) ->
   “C:\Users\MWX\FastDeploy\build\ALL_BUILD.vcxproj.metaproj”(默认目标) (2) ->
   “C:\Users\MWX\FastDeploy\build\copy_yaml_include.vcxproj.metaproj”(默认目标) (3) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj.metaproj”(默认目标) (4) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(默认目标) (23) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(CudaBuildCore 目标) (23:3) ->
     C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\BuildCustomizations\CUDA 12.
   1.targets(799,9): error MSB3721: 命令“"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin\nvcc.exe"  --u
   se-local-env -ccbin "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.36.32532\bin\HostX
   64\x64" -x cu   -IC:\Users\MWX\FastDeploy\. -IC:\Users\MWX\FastDeploy\build -IC:\Users\MWX\FastDeploy\third_part
   y\eigen -IC:\Users\MWX\FastDeploy\build\third_libs\install\onnxruntime\include -IC:\Users\MWX\FastDeploy\build\t
   hird_libs\install\paddle_inference -IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include -
   IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include\ie -I"C:\Program Files\NVIDIA GPU Com
   puting Toolkit\CUDA\v12.1\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT\8.6.1\include" -IC:
   \Users\MWX\FastDeploy\.\fastdeploy\runtime\backends\tensorrt\common -I"C:\Users\MWX\FastDeploy\third_party\yaml-
   cpp\include" -IC:\Users\MWX\FastDeploy\build\third_libs\install\fast_tokenizer\include -IC:\Users\MWX\FastDeploy
   \build\third_libs\install\fast_tokenizer\third_party\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\
   paddle2onnx\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\opencv\build\include -IC:\Users\MWX\FastD
   eploy\build\third_libs\install\opencv\build\include\opencv -IC:\Users\MWX\FastDeploy\build\third_libs\install\op
   envino\runtime\3rdparty\tbb\include -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"     --k
   eep-dir x64\Release  -maxrregcount=0   --machine 64 --compile -cudart static -gencode arch=compute_35,code=sm_35
    -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -ge
   ncode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencod
   e arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -w --expt-relaxed-constexpr --expt-extended-lam
   bda -Xcompiler="/EHsc /wd4244 /wd4267 /wd4819 /bigobj -Ob2 /wd4251"   -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -D"__REL_FILE__=\"fa
   stdeploy/runtime/backends/common/cuda/adaptive_pool2d_kernel.cu\"" -D_WINDLL -D_MBCS -DWIN32 -D_WINDOWS -DNDEBUG
    -DYAML_CPP_DLL -DFASTDEPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -D
   ENABLE_PADDLE_BACKEND -DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION
   -DENABLE_TEXT -DENABLE_PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPOR
   TS -Xcompiler "/EHsc /W0 /nologo /O2 /FS   /MD /GR" -Xcompiler "/Fdfastdeploy.dir\Release\vc143.pdb" -o fastdepl
   oy.dir\Release\adaptive_pool2d_kernel.obj "C:\Users\MWX\FastDeploy\fastdeploy\runtime\backends\common\cuda\adapt
   ive_pool2d_kernel.cu"”已退出,返回代码为 1。 [C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj]

   “C:\Users\MWX\FastDeploy\build\fastdeploy.sln”(默认目标) (1) ->
   “C:\Users\MWX\FastDeploy\build\ALL_BUILD.vcxproj.metaproj”(默认目标) (2) ->
   “C:\Users\MWX\FastDeploy\build\copy_yaml_include.vcxproj.metaproj”(默认目标) (3) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj.metaproj”(默认目标) (4) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(默认目标) (23) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(CudaBuildCore 目标) (23:5) ->
     C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\BuildCustomizations\CUDA 12.
   1.targets(799,9): error MSB3721: 命令“"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin\nvcc.exe"  --u
   se-local-env -ccbin "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.36.32532\bin\HostX
   64\x64" -x cu   -IC:\Users\MWX\FastDeploy\. -IC:\Users\MWX\FastDeploy\build -IC:\Users\MWX\FastDeploy\third_part
   y\eigen -IC:\Users\MWX\FastDeploy\build\third_libs\install\onnxruntime\include -IC:\Users\MWX\FastDeploy\build\t
   hird_libs\install\paddle_inference -IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include -
   IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include\ie -I"C:\Program Files\NVIDIA GPU Com
   puting Toolkit\CUDA\v12.1\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT\8.6.1\include" -IC:
   \Users\MWX\FastDeploy\.\fastdeploy\runtime\backends\tensorrt\common -I"C:\Users\MWX\FastDeploy\third_party\yaml-
   cpp\include" -IC:\Users\MWX\FastDeploy\build\third_libs\install\fast_tokenizer\include -IC:\Users\MWX\FastDeploy
   \build\third_libs\install\fast_tokenizer\third_party\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\
   paddle2onnx\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\opencv\build\include -IC:\Users\MWX\FastD
   eploy\build\third_libs\install\opencv\build\include\opencv -IC:\Users\MWX\FastDeploy\build\third_libs\install\op
   envino\runtime\3rdparty\tbb\include -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"     --k
   eep-dir x64\Release  -maxrregcount=0   --machine 64 --compile -cudart static -gencode arch=compute_35,code=sm_35
    -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -ge
   ncode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencod
   e arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -w --expt-relaxed-constexpr --expt-extended-lam
   bda -Xcompiler="/EHsc /wd4244 /wd4267 /wd4819 /bigobj -Ob2 /wd4251"   -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -D"__REL_FILE__=\"fa
   stdeploy/vision/common/processors/normalize.cu\"" -D_WINDLL -D_MBCS -DWIN32 -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -
   DFASTDEPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BAC
   KEND -DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DE
   NABLE_PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -Xcompiler "/E
   Hsc /W0 /nologo /O2 /FS   /MD /GR" -Xcompiler "/Fdfastdeploy.dir\Release\vc143.pdb" -o fastdeploy.dir\Release\/f
   astdeploy/vision/common/processors/normalize.cu.obj "C:\Users\MWX\FastDeploy\fastdeploy\vision\common\processors
   \normalize.cu"”已退出,返回代码为 1。 [C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj]

   “C:\Users\MWX\FastDeploy\build\fastdeploy.sln”(默认目标) (1) ->
   “C:\Users\MWX\FastDeploy\build\ALL_BUILD.vcxproj.metaproj”(默认目标) (2) ->
   “C:\Users\MWX\FastDeploy\build\copy_yaml_include.vcxproj.metaproj”(默认目标) (3) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj.metaproj”(默认目标) (4) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(默认目标) (23) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(CudaBuildCore 目标) (23:6) ->
     C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\BuildCustomizations\CUDA 12.
   1.targets(799,9): error MSB3721: 命令“"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin\nvcc.exe"  --u
   se-local-env -ccbin "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.36.32532\bin\HostX
   64\x64" -x cu   -IC:\Users\MWX\FastDeploy\. -IC:\Users\MWX\FastDeploy\build -IC:\Users\MWX\FastDeploy\third_part
   y\eigen -IC:\Users\MWX\FastDeploy\build\third_libs\install\onnxruntime\include -IC:\Users\MWX\FastDeploy\build\t
   hird_libs\install\paddle_inference -IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include -
   IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include\ie -I"C:\Program Files\NVIDIA GPU Com
   puting Toolkit\CUDA\v12.1\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT\8.6.1\include" -IC:
   \Users\MWX\FastDeploy\.\fastdeploy\runtime\backends\tensorrt\common -I"C:\Users\MWX\FastDeploy\third_party\yaml-
   cpp\include" -IC:\Users\MWX\FastDeploy\build\third_libs\install\fast_tokenizer\include -IC:\Users\MWX\FastDeploy
   \build\third_libs\install\fast_tokenizer\third_party\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\
   paddle2onnx\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\opencv\build\include -IC:\Users\MWX\FastD
   eploy\build\third_libs\install\opencv\build\include\opencv -IC:\Users\MWX\FastDeploy\build\third_libs\install\op
   envino\runtime\3rdparty\tbb\include -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"     --k
   eep-dir x64\Release  -maxrregcount=0   --machine 64 --compile -cudart static -gencode arch=compute_35,code=sm_35
    -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -ge
   ncode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencod
   e arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -w --expt-relaxed-constexpr --expt-extended-lam
   bda -Xcompiler="/EHsc /wd4244 /wd4267 /wd4819 /bigobj -Ob2 /wd4251"   -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -D"__REL_FILE__=\"fa
   stdeploy/vision/utils/yolo_preprocess.cu\"" -D_WINDLL -D_MBCS -DWIN32 -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -Xcompiler "/EHsc /W
   0 /nologo /O2 /FS   /MD /GR" -Xcompiler "/Fdfastdeploy.dir\Release\vc143.pdb" -o fastdeploy.dir\Release\yolo_pre
   process.obj "C:\Users\MWX\FastDeploy\fastdeploy\vision\utils\yolo_preprocess.cu"”已退出,返回代码为 1。 [C:\Users\MWX\Fast
   Deploy\build\fastdeploy.vcxproj]

   “C:\Users\MWX\FastDeploy\build\fastdeploy.sln”(默认目标) (1) ->
   “C:\Users\MWX\FastDeploy\build\ALL_BUILD.vcxproj.metaproj”(默认目标) (2) ->
   “C:\Users\MWX\FastDeploy\build\copy_yaml_include.vcxproj.metaproj”(默认目标) (3) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj.metaproj”(默认目标) (4) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(默认目标) (23) ->
   “C:\Users\MWX\FastDeploy\build\fastdeploy.vcxproj”(CudaBuildCore 目标) (23:2) ->
     C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\BuildCustomizations\CUDA 12.
   1.targets(799,9): error MSB3721: 命令“"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin\nvcc.exe"  --u
   se-local-env -ccbin "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.36.32532\bin\HostX
   64\x64" -x cu   -IC:\Users\MWX\FastDeploy\. -IC:\Users\MWX\FastDeploy\build -IC:\Users\MWX\FastDeploy\third_part
   y\eigen -IC:\Users\MWX\FastDeploy\build\third_libs\install\onnxruntime\include -IC:\Users\MWX\FastDeploy\build\t
   hird_libs\install\paddle_inference -IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include -
   IC:\Users\MWX\FastDeploy\build\third_libs\install\openvino\runtime\include\ie -I"C:\Program Files\NVIDIA GPU Com
   puting Toolkit\CUDA\v12.1\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT\8.6.1\include" -IC:
   \Users\MWX\FastDeploy\.\fastdeploy\runtime\backends\tensorrt\common -I"C:\Users\MWX\FastDeploy\third_party\yaml-
   cpp\include" -IC:\Users\MWX\FastDeploy\build\third_libs\install\fast_tokenizer\include -IC:\Users\MWX\FastDeploy
   \build\third_libs\install\fast_tokenizer\third_party\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\
   paddle2onnx\include -IC:\Users\MWX\FastDeploy\build\third_libs\install\opencv\build\include -IC:\Users\MWX\FastD
   eploy\build\third_libs\install\opencv\build\include\opencv -IC:\Users\MWX\FastDeploy\build\third_libs\install\op
   envino\runtime\3rdparty\tbb\include -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"     --k
   eep-dir x64\Release  -maxrregcount=0   --machine 64 --compile -cudart static -gencode arch=compute_35,code=sm_35
    -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -ge
   ncode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencod
   e arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -w --expt-relaxed-constexpr --expt-extended-lam
   bda -Xcompiler="/EHsc /wd4244 /wd4267 /wd4819 /bigobj -Ob2 /wd4251"   -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTD
   EPLOY_LIB -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -
   DENABLE_OPENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_
   PADDLE2ONNX -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -D"__REL_FILE__=\"fa
   stdeploy/function/cuda_cast.cu\"" -D_WINDLL -D_MBCS -DWIN32 -D_WINDOWS -DNDEBUG -DYAML_CPP_DLL -DFASTDEPLOY_LIB
   -DCMAKE_BUILD_TYPE=Release -DEIGEN_STRONG_INLINE=inline -DENABLE_ORT_BACKEND -DENABLE_PADDLE_BACKEND -DENABLE_OP
   ENVINO_BACKEND -DWITH_GPU -DENABLE_NVJPEG -DENABLE_TRT_BACKEND -DENABLE_VISION -DENABLE_TEXT -DENABLE_PADDLE2ONN
   X -D__TBB_NO_IMPLICIT_LINKAGE=1 -D"CMAKE_INTDIR=\"Release\"" -Dfastdeploy_EXPORTS -Xcompiler "/EHsc /W0 /nologo
   /O2 /FS   /MD /GR" -Xcompiler "/Fdfastdeploy.dir\Release\vc143.pdb" -o fastdeploy.dir\Release\cuda_cast.obj "C:\
   Users\MWX\FastDeploy\fastdeploy\function\cuda_cast.cu"”已退出,返回代码为 1。 [C:\Users\MWX\FastDeploy\build\fastdeploy.vc
   xproj]

0 个警告
5 个错误

已用时间 00:00:02.01

weiweijeff commented 1 year ago

我编译时也遇到。请到cmake文件夹修改cuda.cmake文件,把支持老显卡的算力代号 35删除.我的修改成这样了:

if(NOT WITH_GPU)
  return()
endif()

# This is to eliminate the CMP0104 warnings from cmake 3.18+.
# Instead of setting CUDA_ARCHITECTURES, we will set CMAKE_CUDA_FLAGS.
set(CMAKE_CUDA_ARCHITECTURES OFF)

if(BUILD_ON_JETSON)
  set(fd_known_gpu_archs "53 62 72")
  set(fd_known_gpu_archs10 "53 62 72")
else()
  message("Using New Release Strategy - All Arches Packge")
  set(fd_known_gpu_archs "70 75 80 86")
  set(fd_known_gpu_archs10 "70 75")
  set(fd_known_gpu_archs11 "70 75 80")
endif()

######################################################################################
# A function for automatic detection of GPUs installed  (if autodetection is enabled)
# Usage:
#   detect_installed_gpus(out_variable)
function(detect_installed_gpus out_variable)
  if(NOT CUDA_gpu_detect_output)
    set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)

    file(
      WRITE ${cufile}
      ""
      "#include \"stdio.h\"\n"
      "#include \"cuda.h\"\n"
      "#include \"cuda_runtime.h\"\n"
      "int main() {\n"
      "  int count = 0;\n"
      "  if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
      "  if (count == 0) return -1;\n"
      "  for (int device = 0; device < count; ++device) {\n"
      "    cudaDeviceProp prop;\n"
      "    if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
      "      printf(\"%d.%d \", prop.major, prop.minor);\n"
      "  }\n"
      "  return 0;\n"
      "}\n")

    execute_process(
      COMMAND "${CMAKE_CUDA_COMPILER}" "--run" "${cufile}"
      WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
      RESULT_VARIABLE nvcc_res
      OUTPUT_VARIABLE nvcc_out
      ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)

    if(nvcc_res EQUAL 0)
      # only keep the last line of nvcc_out
      string(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
      string(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
      list(GET nvcc_out -1 nvcc_out)
      string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
      set(CUDA_gpu_detect_output
          ${nvcc_out}
          CACHE INTERNAL
                "Returned GPU architetures from detect_installed_gpus tool"
                FORCE)
    endif()
  endif()

  if(NOT CUDA_gpu_detect_output)
    message(
      STATUS
        "Automatic GPU detection failed. Building for all known architectures.")
    set(${out_variable}
        ${fd_known_gpu_archs}
        PARENT_SCOPE)
  else()
    set(${out_variable}
        ${CUDA_gpu_detect_output}
        PARENT_SCOPE)
  endif()
endfunction()

########################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
#   select_nvcc_arch_flags(out_variable)
function(select_nvcc_arch_flags out_variable)
  # List of arch names
  set(archs_names
      "Kepler"
      "Maxwell"
      "Pascal"
      "Volta"
      "Turing"
      "Ampere"
      "All"
      "Manual")
  set(archs_name_default "All")
  list(APPEND archs_names "Auto")

  # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
  set(CUDA_ARCH_NAME
      ${archs_name_default}
      CACHE STRING "Select target NVIDIA GPU achitecture.")
  set_property(CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names})
  mark_as_advanced(CUDA_ARCH_NAME)

  # verify CUDA_ARCH_NAME value
  if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
    string(REPLACE ";" ", " archs_names "${archs_names}")
    message(
      FATAL_ERROR "Only ${archs_names} architectures names are supported.")
  endif()

  if(${CUDA_ARCH_NAME} STREQUAL "Manual")
    set(CUDA_ARCH_BIN
        ${fd_known_gpu_archs}
        CACHE
          STRING
          "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported"
    )
    set(CUDA_ARCH_PTX
        ""
        CACHE
          STRING
          "Specify 'virtual' PTX architectures to build PTX intermediate code for"
    )
    mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
  else()
    unset(CUDA_ARCH_BIN CACHE)
    unset(CUDA_ARCH_PTX CACHE)
  endif()

  if(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "53")
    else()
      set(cuda_arch_bin "50")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "62")
    else()
      set(cuda_arch_bin "60 61")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "72")
    else()
      set(cuda_arch_bin "70")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Turing")
    set(cuda_arch_bin "75")
  elseif(${CUDA_ARCH_NAME} STREQUAL "Ampere")
    if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.1) # CUDA 11.0
      set(cuda_arch_bin "80")
    elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 12.0) # CUDA 11.1+
      set(cuda_arch_bin "80 86")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "All")
    set(cuda_arch_bin ${fd_known_gpu_archs})
  elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
    message(
      STATUS
        "WARNING: This is just a warning for publishing release.
      You are building GPU version without supporting different architectures.
      So the wheel package may fail on other GPU architectures.
      You can add -DCUDA_ARCH_NAME=All in cmake command
      to get a full wheel package to resolve this warning.
      While, this version will still work on local GPU architecture.")
    detect_installed_gpus(cuda_arch_bin)
  else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
    set(cuda_arch_bin ${CUDA_ARCH_BIN})
  endif()

  if(NEW_RELEASE_JIT)
    set(cuda_arch_ptx "${cuda_arch_ptx}${cuda_arch_bin}")
    set(cuda_arch_bin "")
  endif()

  # remove dots and convert to lists
  string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
  string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
  string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
  string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")

  list(REMOVE_DUPLICATES cuda_arch_bin)
  list(REMOVE_DUPLICATES cuda_arch_ptx)

  set(nvcc_flags "")
  set(nvcc_archs_readable "")

  # Tell NVCC to add binaries for the specified GPUs
  foreach(arch ${cuda_arch_bin})
    if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
      # User explicitly specified PTX for the concrete BIN
      string(APPEND nvcc_flags
             " -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}")
      string(APPEND nvcc_archs_readable " sm_${CMAKE_MATCH_1}")
    else()
      # User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
      string(APPEND nvcc_flags " -gencode arch=compute_${arch},code=sm_${arch}")
      string(APPEND nvcc_archs_readable " sm_${arch}")
    endif()
  endforeach()

  # Tell NVCC to add PTX intermediate code for the specified architectures
  foreach(arch ${cuda_arch_ptx})
    string(APPEND nvcc_flags
           " -gencode arch=compute_${arch},code=compute_${arch}")
    string(APPEND nvcc_archs_readable " compute_${arch}")
  endforeach()

  string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
  set(${out_variable}
      ${nvcc_flags}
      PARENT_SCOPE)
  set(${out_variable}_readable
      ${nvcc_archs_readable}
      PARENT_SCOPE)
endfunction()

message(STATUS "CUDA detected: " ${CMAKE_CUDA_COMPILER_VERSION})
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0) # CUDA 10.x
  set(fd_known_gpu_archs ${fd_known_gpu_archs10})
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.2) # CUDA 11.0/11.1
  set(fd_known_gpu_archs ${fd_known_gpu_archs11})
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 12.0) # CUDA 11.2+
  set(fd_known_gpu_archs "${fd_known_gpu_archs11} 86")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
endif()

# setting nvcc arch flags
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${NVCC_FLAGS_EXTRA}")
message(STATUS "NVCC_FLAGS_EXTRA: ${NVCC_FLAGS_EXTRA}")

# Set C++14 support
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here.
set(CMAKE_CUDA_STANDARD 11)

# (Note) For windows, if delete /W[1-4], /W1 will be added defaultly and conflic with -w
# So replace /W[1-4] with /W0
if(WIN32)
  string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
endif()
# in cuda9, suppress cuda warning on eigen
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w")
# Set :expt-relaxed-constexpr to suppress Eigen warnings
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
# Set :expt-extended-lambda to enable HOSTDEVICE annotation on lambdas
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-extended-lambda")

if(WIN32)
  set(CMAKE_CUDA_FLAGS
      "${CMAKE_CUDA_FLAGS} -Xcompiler \"/wd4244 /wd4267 /wd4819 \"")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler /bigobj")
  if(MSVC_STATIC_CRT)
    foreach(flag_var
            CMAKE_CUDA_FLAGS CMAKE_CUDA_FLAGS_DEBUG CMAKE_CUDA_FLAGS_RELEASE
            CMAKE_CUDA_FLAGS_MINSIZEREL CMAKE_CUDA_FLAGS_RELWITHDEBINFO)
      if(${flag_var} MATCHES "-MD")
        string(REGEX REPLACE "-MD" "-MT" ${flag_var} "${${flag_var}}")
      endif()
    endforeach()
  endif()
endif()

mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)
justghostof commented 1 year ago

我编译时也遇到。请到cmake文件夹修改cuda.cmake文件,把支持老显卡的算力代号 35删除.我的修改成这样了:

if(NOT WITH_GPU)
  return()
endif()

# This is to eliminate the CMP0104 warnings from cmake 3.18+.
# Instead of setting CUDA_ARCHITECTURES, we will set CMAKE_CUDA_FLAGS.
set(CMAKE_CUDA_ARCHITECTURES OFF)

if(BUILD_ON_JETSON)
  set(fd_known_gpu_archs "53 62 72")
  set(fd_known_gpu_archs10 "53 62 72")
else()
  message("Using New Release Strategy - All Arches Packge")
  set(fd_known_gpu_archs "70 75 80 86")
  set(fd_known_gpu_archs10 "70 75")
  set(fd_known_gpu_archs11 "70 75 80")
endif()

######################################################################################
# A function for automatic detection of GPUs installed  (if autodetection is enabled)
# Usage:
#   detect_installed_gpus(out_variable)
function(detect_installed_gpus out_variable)
  if(NOT CUDA_gpu_detect_output)
    set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)

    file(
      WRITE ${cufile}
      ""
      "#include \"stdio.h\"\n"
      "#include \"cuda.h\"\n"
      "#include \"cuda_runtime.h\"\n"
      "int main() {\n"
      "  int count = 0;\n"
      "  if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
      "  if (count == 0) return -1;\n"
      "  for (int device = 0; device < count; ++device) {\n"
      "    cudaDeviceProp prop;\n"
      "    if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
      "      printf(\"%d.%d \", prop.major, prop.minor);\n"
      "  }\n"
      "  return 0;\n"
      "}\n")

    execute_process(
      COMMAND "${CMAKE_CUDA_COMPILER}" "--run" "${cufile}"
      WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
      RESULT_VARIABLE nvcc_res
      OUTPUT_VARIABLE nvcc_out
      ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)

    if(nvcc_res EQUAL 0)
      # only keep the last line of nvcc_out
      string(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
      string(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
      list(GET nvcc_out -1 nvcc_out)
      string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
      set(CUDA_gpu_detect_output
          ${nvcc_out}
          CACHE INTERNAL
                "Returned GPU architetures from detect_installed_gpus tool"
                FORCE)
    endif()
  endif()

  if(NOT CUDA_gpu_detect_output)
    message(
      STATUS
        "Automatic GPU detection failed. Building for all known architectures.")
    set(${out_variable}
        ${fd_known_gpu_archs}
        PARENT_SCOPE)
  else()
    set(${out_variable}
        ${CUDA_gpu_detect_output}
        PARENT_SCOPE)
  endif()
endfunction()

########################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA_ARCH_NAME
# Usage:
#   select_nvcc_arch_flags(out_variable)
function(select_nvcc_arch_flags out_variable)
  # List of arch names
  set(archs_names
      "Kepler"
      "Maxwell"
      "Pascal"
      "Volta"
      "Turing"
      "Ampere"
      "All"
      "Manual")
  set(archs_name_default "All")
  list(APPEND archs_names "Auto")

  # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
  set(CUDA_ARCH_NAME
      ${archs_name_default}
      CACHE STRING "Select target NVIDIA GPU achitecture.")
  set_property(CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names})
  mark_as_advanced(CUDA_ARCH_NAME)

  # verify CUDA_ARCH_NAME value
  if(NOT ";${archs_names};" MATCHES ";${CUDA_ARCH_NAME};")
    string(REPLACE ";" ", " archs_names "${archs_names}")
    message(
      FATAL_ERROR "Only ${archs_names} architectures names are supported.")
  endif()

  if(${CUDA_ARCH_NAME} STREQUAL "Manual")
    set(CUDA_ARCH_BIN
        ${fd_known_gpu_archs}
        CACHE
          STRING
          "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported"
    )
    set(CUDA_ARCH_PTX
        ""
        CACHE
          STRING
          "Specify 'virtual' PTX architectures to build PTX intermediate code for"
    )
    mark_as_advanced(CUDA_ARCH_BIN CUDA_ARCH_PTX)
  else()
    unset(CUDA_ARCH_BIN CACHE)
    unset(CUDA_ARCH_PTX CACHE)
  endif()

  if(${CUDA_ARCH_NAME} STREQUAL "Maxwell")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "53")
    else()
      set(cuda_arch_bin "50")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "62")
    else()
      set(cuda_arch_bin "60 61")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Volta")
    if(BUILD_ON_JETSON)
      set(cuda_arch_bin "72")
    else()
      set(cuda_arch_bin "70")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "Turing")
    set(cuda_arch_bin "75")
  elseif(${CUDA_ARCH_NAME} STREQUAL "Ampere")
    if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.1) # CUDA 11.0
      set(cuda_arch_bin "80")
    elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 12.0) # CUDA 11.1+
      set(cuda_arch_bin "80 86")
    endif()
  elseif(${CUDA_ARCH_NAME} STREQUAL "All")
    set(cuda_arch_bin ${fd_known_gpu_archs})
  elseif(${CUDA_ARCH_NAME} STREQUAL "Auto")
    message(
      STATUS
        "WARNING: This is just a warning for publishing release.
      You are building GPU version without supporting different architectures.
      So the wheel package may fail on other GPU architectures.
      You can add -DCUDA_ARCH_NAME=All in cmake command
      to get a full wheel package to resolve this warning.
      While, this version will still work on local GPU architecture.")
    detect_installed_gpus(cuda_arch_bin)
  else() # (${CUDA_ARCH_NAME} STREQUAL "Manual")
    set(cuda_arch_bin ${CUDA_ARCH_BIN})
  endif()

  if(NEW_RELEASE_JIT)
    set(cuda_arch_ptx "${cuda_arch_ptx}${cuda_arch_bin}")
    set(cuda_arch_bin "")
  endif()

  # remove dots and convert to lists
  string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
  string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
  string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
  string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")

  list(REMOVE_DUPLICATES cuda_arch_bin)
  list(REMOVE_DUPLICATES cuda_arch_ptx)

  set(nvcc_flags "")
  set(nvcc_archs_readable "")

  # Tell NVCC to add binaries for the specified GPUs
  foreach(arch ${cuda_arch_bin})
    if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
      # User explicitly specified PTX for the concrete BIN
      string(APPEND nvcc_flags
             " -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}")
      string(APPEND nvcc_archs_readable " sm_${CMAKE_MATCH_1}")
    else()
      # User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
      string(APPEND nvcc_flags " -gencode arch=compute_${arch},code=sm_${arch}")
      string(APPEND nvcc_archs_readable " sm_${arch}")
    endif()
  endforeach()

  # Tell NVCC to add PTX intermediate code for the specified architectures
  foreach(arch ${cuda_arch_ptx})
    string(APPEND nvcc_flags
           " -gencode arch=compute_${arch},code=compute_${arch}")
    string(APPEND nvcc_archs_readable " compute_${arch}")
  endforeach()

  string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
  set(${out_variable}
      ${nvcc_flags}
      PARENT_SCOPE)
  set(${out_variable}_readable
      ${nvcc_archs_readable}
      PARENT_SCOPE)
endfunction()

message(STATUS "CUDA detected: " ${CMAKE_CUDA_COMPILER_VERSION})
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0) # CUDA 10.x
  set(fd_known_gpu_archs ${fd_known_gpu_archs10})
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.2) # CUDA 11.0/11.1
  set(fd_known_gpu_archs ${fd_known_gpu_archs11})
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
elseif(${CMAKE_CUDA_COMPILER_VERSION} LESS 12.0) # CUDA 11.2+
  set(fd_known_gpu_archs "${fd_known_gpu_archs11} 86")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D_MWAITXINTRIN_H_INCLUDED")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -D__STRICT_ANSI__")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Wno-deprecated-gpu-targets")
endif()

# setting nvcc arch flags
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${NVCC_FLAGS_EXTRA}")
message(STATUS "NVCC_FLAGS_EXTRA: ${NVCC_FLAGS_EXTRA}")

# Set C++14 support
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here.
set(CMAKE_CUDA_STANDARD 11)

# (Note) For windows, if delete /W[1-4], /W1 will be added defaultly and conflic with -w
# So replace /W[1-4] with /W0
if(WIN32)
  string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
endif()
# in cuda9, suppress cuda warning on eigen
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w")
# Set :expt-relaxed-constexpr to suppress Eigen warnings
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
# Set :expt-extended-lambda to enable HOSTDEVICE annotation on lambdas
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-extended-lambda")

if(WIN32)
  set(CMAKE_CUDA_FLAGS
      "${CMAKE_CUDA_FLAGS} -Xcompiler \"/wd4244 /wd4267 /wd4819 \"")
  set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler /bigobj")
  if(MSVC_STATIC_CRT)
    foreach(flag_var
            CMAKE_CUDA_FLAGS CMAKE_CUDA_FLAGS_DEBUG CMAKE_CUDA_FLAGS_RELEASE
            CMAKE_CUDA_FLAGS_MINSIZEREL CMAKE_CUDA_FLAGS_RELWITHDEBINFO)
      if(${flag_var} MATCHES "-MD")
        string(REGEX REPLACE "-MD" "-MT" ${flag_var} "${${flag_var}}")
      endif()
    endforeach()
  endif()
endif()

mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD)
mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION)

thx,我稍后去试试

lym169 commented 1 year ago

我也遇到了,根据上述方案,已解决

LateLinux commented 5 months ago

Helpful! Problem solved! Thx a lot!

smalie2222 commented 5 months ago

为什么我打不开cuda.cmake文件啊

LateLinux commented 4 months ago

Windows用notepad,Ubuntu用nano,其他linux用touch

linux下注意文件权限。

在 2024-05-06 15:29:32,"smalie2222" @.***> 写道:

为什么我打不开cuda.cmake文件啊

— Reply to this email directly, view it on GitHub, or unsubscribe. You are receiving this because you commented.Message ID: @.***>