Closed ChaocunChen closed 5 months ago
这个应该不会是 mnn 问题,把代码贴在这里看看使用过程有什么问题吧. 也可以用android上一些内存泄露的检查工具测试下
这个应该不会是 mnn 问题,把代码贴在这里看看使用过程有什么问题吧. 也可以用android上一些内存泄露的检查工具测试下 .h头文件 `#ifndef YOLOX_MNN_YOLOXMNN_H
define YOLOX_MNN_YOLOXMNN_H
//#include
//#include <opencv2/highgui/highgui.hpp>
typedef struct GridInfo { float gridX; float gridY; float stride; }GridInfo;
typedef struct DetBoxes { float x; float y; float w; float h; float score; float iouScore; float area; float scoreObj; int clsIndex; }DetBoxes;
class YOLOXMNN { public: YOLOXMNN(); ~YOLOXMNN();
void setClassIndex(int clsidx);
void GenGridBox(const int netWidth, const int netHeight);
void NMS(std::vector<DetBoxes>& detBoxes, std::vector<int>& picked);
bool LoadWeight(const char* weightFile);
bool Inference(const cv::Mat& inputImage, std::vector<DetBoxes>& detBoxes);
cv::Mat PreprocImage(const cv::Mat& inputImage, const int netWidth, const int netHeight, float& fRatio);
void Postprocess(const MNN::Tensor* outTensor,const float ratio, std::vector<DetBoxes>& outBoxes);
void Release();
private:
std::shared_ptr
int mNetWidth = 0, mNetHeight = 0, mNetChannel = 0;
MNN::Tensor* mInputTensor = nullptr;
const float mStrides[3] = { 8, 16, 32 };
const float mClsThre = 0.3f;
const float mNMSThre = 0.3f;
int mClsIdx = -1;
std::vector<GridInfo> mGridInfos;
cv::Rect2f mLastbox; //上一次选定的框的大小,用来跟踪当前选定的框
std::vector<DetBoxes> mCacheBoxes; //用来缓存当前的选择框
int mInferNum = 0; //识别的总的次数
const int mCacheNum = 10; //用来缓存的次数
const bool mCacheUsed = false; //是否启用识别结果缓存
public: static YOLOXMNN* pstYoloxMnn; };
cpp文件
#include "vsmnn_yoloxs.h"
YOLOXMNN* YOLOXMNN::pstYoloxMnn = nullptr;
YOLOXMNN::YOLOXMNN() { mInferNum = 0; mLastbox.x = 0.0f; mLastbox.y = 0.0f; mLastbox.width = 0.0f; mLastbox.height = 0.0f; mCacheBoxes.clear(); }
YOLOXMNN::~YOLOXMNN() { Release(); }
void YOLOXMNN::Release() { if(mSession != nullptr) { mNet->releaseSession(mSession); mSession = nullptr; } }
void YOLOXMNN::setClassIndex(int clsidx) { mClsIdx = clsidx; }
void YOLOXMNN::GenGridBox(const int netWidth, const int netHeight) { for (int i = 0; i < 3; i++) { int gridRow = int((float)netHeight / mStrides[i]); int gridCol = int((float)netWidth / mStrides[i]); for (int row = 0; row < gridRow; row++) { for (int col = 0; col < gridCol; col++) { GridInfo gridInfo; gridInfo.gridX = (float)col; gridInfo.gridY = (float)row; gridInfo.stride = mStrides[i]; mGridInfos.push_back(gridInfo); } } } }
void YOLOXMNN::NMS(std::vector
std::sort(detBoxes.begin(), detBoxes.end(),
[](const DetBoxes& a, const DetBoxes& b)
{
return a.scoreObj > b.scoreObj;
});
float maxwidth = 0.0f;
int maxidx = 0;
DetBoxes a;
DetBoxes b;
//DBG_INFO("YOLOXMNN::NMS before num: %d \n", n);
for (int i = 0; i < n; i++) {
a = detBoxes[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++) {
b = detBoxes[picked[j]];
// intersection over union
float x0 = std::max(a.x, b.x);
float y0 = std::max(a.y, b.y);
float x1 = std::min(a.x + a.w, b.x + b.w);
float y1 = std::min(a.y + a.h, b.y + b.h);
float inter_area = std::max(0.0f, (x1 - x0)) * std::max(0.0f, (y1 - y0));
float union_area = a.area + b.area - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > mNMSThre) {
keep = 0;
}
}
if (keep) {
picked.push_back(i);
//寻找最大的宽度的框
if(a.w > maxwidth) {
maxwidth = a.w;
maxidx = i;
}
}
}
/*DBG_INFO("YOLOXMNN::NMS mLastbox.width : %f \n", mLastbox.width);
DBG_INFO("YOLOXMNN::NMS maxwidth : %f \n", maxwidth);
DBG_INFO("YOLOXMNN::NMS maxidx : %d \n", maxidx);*/
/*第一次赋予宽度最大的框,并且最大的人体框,大于当前选定框2倍直接更换*/
if((mLastbox.width < 1.0f && mLastbox.height < 1.0f) ||
maxwidth > mLastbox.width*2.0) {
picked.clear();
picked.push_back(maxidx);
mLastbox.x = detBoxes[maxidx].x;
mLastbox.y = detBoxes[maxidx].y;
mLastbox.width = detBoxes[maxidx].w;
mLastbox.height = detBoxes[maxidx].h;
return;
}
//选定重合度最大的选定框
float maxiou = -1.0f;
n = (int)picked.size();
//DBG_INFO("YOLOXMNN::NMS after num: %d \n", n);
for (int i = 0; i < n; i++) {
//DBG_INFO("YOLOXMNN::NMS idx: %d \n", picked[i]);
a = detBoxes[picked[i]];
// intersection over union
float x0 = std::max(a.x, mLastbox.x);
float y0 = std::max(a.y, mLastbox.y);
float x1 = std::min(a.x + a.w, mLastbox.x + mLastbox.width);
float y1 = std::min(a.y + a.h, mLastbox.y + mLastbox.height);
float inter_area = std::max(0.0f, (x1 - x0)) * std::max(0.0f, (y1 - y0));
float union_area = a.area + mLastbox.width*mLastbox.height - inter_area;
float iou = inter_area / union_area;
if (iou > maxiou) {
maxiou = iou;
maxidx = picked[i];
}
/*DBG_INFO("YOLOXMNN::NMS a.x: %f \n", a.x);
DBG_INFO("YOLOXMNN::NMS a.y: %d \n", a.y);
DBG_INFO("YOLOXMNN::NMS a.w: %f \n", a.w);
DBG_INFO("YOLOXMNN::NMS a.h: %f \n", a.h);*/
}
/*DBG_INFO("YOLOXMNN::NMS mLastbox.x: %f \n", mLastbox.x);
DBG_INFO("YOLOXMNN::NMS mLastbox.y: %d \n", mLastbox.y);
DBG_INFO("YOLOXMNN::NMS mLastbox.w: %f \n", mLastbox.width);
DBG_INFO("YOLOXMNN::NMS mLastbox.h: %f \n", mLastbox.height);
DBG_INFO("YOLOXMNN::NMS 111 maxiou: %f \n", maxiou);
DBG_INFO("YOLOXMNN::NMS 111 maxidx: %d \n", maxidx);*/
if(maxiou > 0.1f) {
mLastbox.x = detBoxes[maxidx].x;
mLastbox.y = detBoxes[maxidx].y;
mLastbox.width = detBoxes[maxidx].w;
mLastbox.height = detBoxes[maxidx].h;
}
picked.clear();
picked.push_back(maxidx);
return;
}
bool YOLOXMNN::LoadWeight(const char* weightFile) { if(weightFile == nullptr) { return false; }
mNet.reset(MNN::Interpreter::createFromFile(weightFile), MNN::Interpreter::destroy);
MNN::ScheduleConfig config; // session config, default
config.numThread = 1;
mSession = mNet->createSession(config);
// input tensor config
mInputTensor = mNet->getSessionInput(mSession, nullptr);
std::vector<int> inputShape = mInputTensor->shape();
mNetChannel = inputShape[1];
mNetHeight = inputShape[2];
mNetWidth = inputShape[3];
DBG_INFO("yoloxs input: shape[0]:%d\n", inputShape[0]);
DBG_INFO("yoloxs input: w:%d , h:%d, c: %d\n", mNetWidth, mNetHeight, mNetChannel);
DBG_INFO("yoloxs input: size:%d\n", mInputTensor->size());
this->GenGridBox(mNetWidth, mNetHeight);
DBG_INFO("yoloxs GRID SIZE: %d \n", (int)mGridInfos.size());
//mNet->resizeTensor(mInputTensor, { 1, mNetChannel, mNetHeight, mNetWidth });
//mNet->resizeSession(mSession);
// image config
MNN::CV::ImageProcess::Config imageConfig; // image config
imageConfig.filterType = MNN::CV::BILINEAR;
imageConfig.sourceFormat = MNN::CV::RGB;
imageConfig.destFormat = MNN::CV::BGR;
//MNN::CV::Matrix trans;
//trans.setScale(1.0f, 1.0f);
mPretreat.reset(MNN::CV::ImageProcess::create(imageConfig),MNN::CV::ImageProcess::destroy);
//mPretreat->setMatrix(trans);
return true;
}
bool YOLOXMNN::Inference(const cv::Mat& inputImage, std::vector
//因为识别框相对位置固定,我们处理一次图片,缓存多次
if(mCacheUsed) {
if(mCacheBoxes.size() > 0 && mCacheNum > 0 && (mInferNum%mCacheNum != 0)) {
mInferNum++;
detBoxes.assign(mCacheBoxes.begin(), mCacheBoxes.end());
return true;
}
mInferNum = 1;
}
float ratio = 1.0f;
auto start_time0 = std::chrono::high_resolution_clock::now();
cv::Mat netImage = this->PreprocImage(inputImage, mNetWidth, mNetHeight, ratio);
mPretreat->convert((uint8_t*)netImage.data, netImage.cols, netImage.rows, 0, mInputTensor);
netImage.release();
DBG_INFO("yoloxs covert input: size:%d\n", mInputTensor->size());
auto start_time = std::chrono::high_resolution_clock::now();
auto duration_ms0 = std::chrono::duration_cast<std::chrono::milliseconds>(start_time - start_time0).count();
DBG_INFO("yoloxs prepocImage and convert image cost %lld ms", duration_ms0);
/////////////test start////////////////
/*DetBoxes detbox;
detbox.x = 3.0;
detbox.y = 135.0;
detbox.w = 440;
detbox.h = 330;
detbox.clsIndex = 0;
detbox.score = 0.7f;
detBoxes.clear();
detBoxes.push_back(detbox);*/
//////////////test end//////////////////////
//int HW = 0, Channel = 0;
mNet->runSession(mSession);
MNN::Tensor* outputTensor = mNet->getSessionOutput(mSession, nullptr);
//std::vector<int> Shape = outputTensor->shape();
//HW = Shape[1];
//Channel = Shape[2];
//DBG_INFO("output: wh: %d, c: %d \n", HW, Channel);
auto end_time = std::chrono::high_resolution_clock::now();
auto duration_ms = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
DBG_INFO("yoloxs runSession cost %lld ms", duration_ms);
auto start_time1 = std::chrono::high_resolution_clock::now();
this->Postprocess(outputTensor, ratio, detBoxes);
auto end_time1 = std::chrono::high_resolution_clock::now();
auto duration_ms1 = std::chrono::duration_cast<std::chrono::milliseconds>(end_time1 - start_time1).count();
DBG_INFO("yoloxs Postprocess cost %lld ms", duration_ms1);
return true;
}
cv::Mat YOLOXMNN::PreprocImage(const cv::Mat& inputImage, const int netWidth, const int netHeight, float& fRatio) { int width = inputImage.cols, height = inputImage.rows; cv::Mat imageOut(netHeight, netWidth, CV_8UC3); if (width == netWidth && height == netHeight) { inputImage.copyTo(imageOut); return inputImage; }
memset(imageOut.data, 114, netWidth * netHeight * 3);
fRatio = std::min((float)netWidth / (float)width, (float)netHeight / (float)height);
int newWidth = (int)(fRatio * (float)width), newHeight = (int)(fRatio * (float)height);
//cv::Mat rzImage;
//cv::resize(inputImage, rzImage, cv::Size(newWidth, newHeight));
//cv::Mat rectImage = imageOut(cv::Rect(0, 0, newWidth, newHeight));
//rzImage.copyTo(rectImage);
cv::Mat rectImage = imageOut(cv::Rect(0, 0, newWidth, newHeight));
cv::resize(inputImage, rectImage, cv::Size(newWidth, newHeight));
DBG_INFO("yoloxs rectImage: w: %d, h: %d \n", rectImage.cols, rectImage.rows);
DBG_INFO("yoloxs imageOut: w: %d, h: %d \n", imageOut.cols, imageOut.rows);
return imageOut;
}
void YOLOXMNN::Postprocess(const MNN::Tensor outTensor,
const float ratio, std::vector
std::vector<DetBoxes> detBoxes;
DetBoxes detBox;
for (int i = 0; i < outHW; ++i, outData += outChannel) {
// decoder
float centerX = (mGridInfos[i].gridX + outData[0]) * mGridInfos[i].stride;
float centerY = (mGridInfos[i].gridY + outData[1]) * mGridInfos[i].stride;
detBox.w = std::exp(outData[2]) * mGridInfos[i].stride;
detBox.h = std::exp(outData[3]) * mGridInfos[i].stride;
detBox.x = centerX - detBox.w * 0.5f;
detBox.y = centerY - detBox.h * 0.5f;
detBox.iouScore = outData[4];
float score = 0.0f;
int clsIndex = 0;
float* clsScoreData = outData + 5;
for (int j = 0; j < (outChannel - 5); ++j) {
if (score < clsScoreData[j]) {
score = clsScoreData[j];
clsIndex = j;
}
}
detBox.score = score;
detBox.clsIndex = clsIndex;
detBox.area = detBox.w * detBox.h;
detBox.scoreObj = detBox.score * detBox.iouScore;
if (detBox.scoreObj >= mClsThre && (mClsIdx == clsIndex || mClsIdx == -1)) {
detBoxes.push_back(detBox);
}
}
std::vector<int> picked;
this->NMS(detBoxes, picked);
DetBoxes detPickedBox;
for (int i = 0; i < (int)picked.size(); ++i) {
detPickedBox = detBoxes[picked[i]];
detPickedBox.x /= ratio;
detPickedBox.y /= ratio;
detPickedBox.w /= ratio;
detPickedBox.h /= ratio;
outBoxes.push_back(detPickedBox);
}
detBoxes.clear();
if(mCacheUsed) {
mCacheBoxes.assign(outBoxes.begin(), outBoxes.end()); //用来备份当前的选定框
}
DBG_INFO("yoloxs det box num: %d \n", (int)picked.size());
}` @jxt1234 代码如上 帮忙看下。另外昨晚让做平板的朋友装了我们的yolox apk没有内存泄露问题,但在我们Amlogic S905X4 S905y4 以及三星的一款手机上都出现了内存泄露 跑两三小时内存200m涨到500m
@jxt1234 大佬这个帮看下 感谢感谢
代码看不出问题。 C++ 代码在 PC 上运行并用编译器检查一下内存泄露看看。或者 Android 上用 https://zhuanlan.zhihu.com/p/191641711 类似的分析工具测试看看吧。
Marking as stale. No activity in 60 days.
平台(如果交叉编译请再附上交叉编译目标平台):
Platform(Include target platform as well if cross-compiling):
yolox 用MNN部署在Amlogic S905x4上内存泄露,开始以为个别情况,今天换一个三星的安卓手机也出现内存泄露 内存不断在涨
部署代码用的https://github.com/jmu201521121021/YOLOX.MNN
Github版本:
Github Version:
模型转化用的MNN1.2.0 安卓MNN包1.2.0 和2.6都试过 都会出现内存泄漏 实在不知道那里有问题,大佬们帮看下