Closed nobody-cheng closed 2 years ago
#include <cuda_runtime_api.h> for (int j = 0; j < MAX_CUDA_NUM; j++) { torch::jit::script::Module module_; try {// Deserialize the ScriptModule from a file using torch::jit::load(). cudaSetDevice(j); module_ = torch::jit::load(weights, torch::Device(torch::DeviceType::CUDA, j)); } catch (const c10::Error& e) { std::cerr << "Error loading the model!\n"; std::exit(EXIT_FAILURE); } torch::Device tempDevice = torch::Device(torch::kCUDA, j+ INDEX_START); module_.to(tempDevice); module_.to(torch::kHalf); module_.eval(); g_moduleVec.push_back(module_); }
#include <cuda_runtime_api.h> for (int j = 0; j < MAX_CUDA_NUM; j++) { torch::jit::script::Module module_; try {// Deserialize the ScriptModule from a file using torch::jit::load(). cudaSetDevice(j); module_ = torch::jit::load(weights, torch::Device(torch::DeviceType::CUDA, j)); } catch (const c10::Error& e) { std::cerr << "Error loading the model!\n"; std::exit(EXIT_FAILURE); } torch::Device tempDevice = torch::Device(torch::kCUDA, j+ INDEX_START); module_.to(tempDevice); module_.to(torch::kHalf); module_.eval(); g_moduleVec.push_back(module_); }
您好,请问您解决了这个问题吗?可以用多gpu推理吗?
你好,如何进行多GPU推理 devices指定任一个卡均是默认在0号卡运行