Open MirrorYuChen opened 5 years ago
可以的,tensorRT可以从文件加载模型
我按照tensorRT-inference里面的代码将您程序中baseEngine.cpp改成如下形式:
`void baseEngine::caffeToGIEModel(const std::string &deployFile, // name for caffe prototxt
const std::string &modelFile, // name for model
const std::vector
stringstream gieStream;
gieStream.seekg(0, gieStream.beg);
char cache_file[512];
sprintf(cache_file, "%s.engine", modelFile.c_str());
std::ifstream cache(cache_file);
if (!cache) {
// create the builder
IBuilder *builder = createInferBuilder(gLogger);
// parse the caffe model to populate the network, then set the outputs
INetworkDefinition *network = builder->createNetwork();
ICaffeParser *parser = createCaffeParser();
const IBlobNameToTensor *blobNameToTensor = parser->parse(deployFile.c_str(),
modelFile.c_str(),
*network,
nvinfer1::DataType::kHALF);
// specify which tensors are outputs
for (auto &s : outputs)
network->markOutput(*blobNameToTensor->find(s.c_str()));
// Build the engine
builder->setMaxBatchSize(maxBatchSize);
builder->setMaxWorkspaceSize(1 << 25);
builder->setHalf2Mode(true);
// cuda engie
ICudaEngine*engine = builder->buildCudaEngine(*network);
assert(engine);
nvinfer1::IHostMemory* serMem = engine->serialize();
gieStream.write((const char*)serMem->data(), serMem->size());
// we don't need the network any more, and we can destroy the parser
network->destroy();
parser->destroy();
builder->destroy();
std::ofstream outFile;
outFile.open(cache_file);
outFile << gieStream.rdbuf();
outFile.close();
gieStream.seekg(0, gieStream.beg);
} else {
printf("loading network profile from engine cache... %s\n", cache_file);
gieStream << cache.rdbuf();
cache.close();
}
nvinfer1::IRuntime* infer = nvinfer1::createInferRuntime(gLogger);
gieStream.seekg(0, std::ios::end);
const int modelSize = gieStream.tellg();
gieStream.seekg(0, std::ios::beg);
void* modelMem = malloc(modelSize);
gieStream.read((char*)modelMem, modelSize);
nvinfer1::ICudaEngine* engine = infer->deserializeCudaEngine(modelMem, modelSize, NULL);
free(modelMem);
assert(engine);
context = engine->createExecutionContext();
}
` 代码能够编译通过,运行也能产生中间文件,但是没有输出结果,大佬有时间能指导一下怎么改一下吗?
每次启动,tensorrt都会对caffe模型进行转换,能否只转换一次,并将转换后的模型保存到本地,后面每次只读取转换后的模型?