Closed aniketgadwe closed 8 years ago
Pose::Pose() { objectPoints.push_back(cv::Point3f(-50.0f,-50.0f,0.0f)); objectPoints.push_back(cv::Point3f(-50.0f,50.0f,0.0f)); objectPoints.push_back(cv::Point3f(50.0f,50.0f,0.0f)); objectPoints.push_back(cv::Point3f(50.0f,-50.0f,0.0f)); cameraIntrinsicParams=Mat(Size(3,3),CV_32FC1); cameraIntrinsicParams.at<float>(0,0)= 694.507824f; cameraIntrinsicParams.at<float>(0,1)= 0 ; cameraIntrinsicParams.at<float>(0,2)= 329.707540f;//304.729528f; cameraIntrinsicParams.at<float>(1,0)= 0 ; cameraIntrinsicParams.at<float>(1,1)= 694.579263f; cameraIntrinsicParams.at<float>(1,2)= 260.350615f;//235.217420f; cameraIntrinsicParams.at<float>(2,0)= 0 ; cameraIntrinsicParams.at<float>(2,1)= 0 ; cameraIntrinsicParams.at<float>(2,2)= 1 ; distCoeffs=Mat(Size(5,1),CV_32FC1); distCoeffs.at<float>(0,0)=0.015950f; distCoeffs.at<float>(1,0)=-0.224494f; distCoeffs.at<float>(2,0)=0.002544f; distCoeffs.at<float>(3,0)=0.000194f; distCoeffs.at<float>(3,0)=0.00f; } vector<float> Pose::contour_detection(Mat threshold, Mat src) { int flag =0; double largest_area=0.0; int largest_contour_index=0; int lowThreshold; int ratio=3; int kernel_size=3; int const max_lowThreshold = 100; double *location; //static double check[3]={0.0,0.0,0.0}; vector<float>check(3); //push_back.check(0)=0.0; //push_back.check(1)=0.0; //push_back.check(2)=0.0; int j=0; //int index[100]; //double area_array[100]; //double *condition; Mat dst(src.rows,src.cols,CV_8UC1,Scalar::all(0)); Rect boundRect; RotatedRect box; vector<Point>Points ; // Vector for storing contour vector<Vec4i> hierarchy; Mat detected_edges; blur(threshold, detected_edges, Size(3,3) ); /// Canny detector Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size ); vector< vector<Point> > contours0; findContours( detected_edges, contours0, hierarchy,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE ); // Find the contours in the image vector< vector<Point> > contours; contours.resize(contours0.size()); std::vector<Point2f> imagePoints; std::vector<Point2f> preciseCorners(4); int k; int x=contours0.size(); cout<<"size"<<contours0.size()<<endl; for( k=0; k <x; k++ ) { /* cout<<"k"<<k<<endl; cout<<"size"<<contours0.size()<<endl; //double eps = contours0[k].size() * 0.05; approxPolyDP(contours0[k], contours[k],9, true); //if (contours[k].size() != 4) //continue; //if (!cv::isContourConvex(contours[k])) //continue; if(contours[k].size()==4) { index[j]=k; j=j+1; cout<<"j :"<<j <<endl; } */ } cout<<"out"; /*if (contours0.size()==0) { return check; } /* if(j>1) { cout<<"5"<<endl; for( int i = 0; i<j; i++ ) { cout<<"6"<<endl; area_array[i]=contourArea( contours[index[i]],false); if(area_array[i]>largest_area) { largest_area=area_array[i]; largest_contour_index=i; //Store the index of largest contour } } } if(j>1) { for( int i = 0; i<j; i++ ) { double ratio =largest_area/area_array[i]; if(ratio>=3.0 || ratio<=5.0) { for (int c=0;c<4;c++) { preciseCorners[c] = contours[index[largest_contour_index]][c]; } cv::cornerSubPix(threshold, preciseCorners, cvSize(5,5),cvSize(-1,-1), TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 )); // cv::cornerSubPix(threshold, preciseCorners, cvSize(5,5),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER,30,0.1)); for (int c=0;c<4;c++) { contours[index[largest_contour_index]][c] = preciseCorners[c]; } imagePoints.push_back(Point2f(contours[index[largest_contour_index]][0].x,contours[index[largest_contour_index]][0].y)); imagePoints.push_back(Point2f(contours[index[largest_contour_index]][1].x,contours[index[largest_contour_index]][1].y)); imagePoints.push_back(Point2f(contours[index[largest_contour_index]][2].x,contours[index[largest_contour_index]][2].y)); imagePoints.push_back(Point2f(contours[index[largest_contour_index]][3].x,contours[index[largest_contour_index]][3].y)); Point P1=contours[index[largest_contour_index]][0]; Point P2=contours[index[largest_contour_index]][1]; Point P3=contours[index[largest_contour_index]][2]; Point P4=contours[index[largest_contour_index]][3]; line(src,P1,P2, Scalar(0,255,0),1,CV_AA,0); line(src,P2,P3, Scalar(0,255,0),1,CV_AA,0); line(src,P3,P4, Scalar(0,255,0),1,CV_AA,0); line(src,P4,P1, Scalar(0,255,0),1,CV_AA,0); //free(index); //free(area_array); location=pose_estimation(objectPoints,imagePoints); return location; } } } */ cout<<"get"; return check; }
vector Pose::pose_estimation(const std::vectorcv::Point3f &objectPoints,const std::vectorcv::Point2f &imagePoints) {
vector<float> position; Mat rvec,tvec,inliers; //rvec=Mat::zeros(3,1,CV_64FC1); //tvec=Mat::zeros(3,1,CV_64FC1); //int flags=0; bool useExtrinsicGuess =false; int method = CV_ITERATIVE ; cv::solvePnP(objectPoints,imagePoints, cameraIntrinsicParams, distCoeffs,rvec, tvec,useExtrinsicGuess,method); Mat distant=Mat(Size(3,3),CV_64FC1); Mat jacobian=Mat(Size(3,1),CV_32FC1); Rodrigues(rvec,distant,jacobian); Mat J; vector<Point2f> p(4); projectPoints(objectPoints,rvec,tvec, cameraIntrinsicParams, distCoeffs, p, J); float sum = 0.; for (size_t i = 0; i <p.size(); i++) { sum += sqrt((p[i].x - imagePoints[i].x)* (p[i].x - imagePoints[i].x)+(p[i].y - imagePoints[i].y)* (p[i].y - imagePoints[i].y)); } Mat R= distant.t(); cout<<""<<endl; //if (sum < 3000) //{ cout << "sum=" << sum << endl; Mat T = -R * tvec ; position[0] =T.at<double>(0,0); position[1]=T.at<double>(1,0); position[2]=T.at<double>(2,0); //} return position; }
i tried out to find the source of error..it might be due to
please help me to find out the source of error.
error is solved !! returning of array was the problem.
include "Pose.h"
vector Pose::pose_estimation(const std::vectorcv::Point3f &objectPoints,const std::vectorcv::Point2f &imagePoints)
{