RainerKuemmerle / g2o

g2o: A General Framework for Graph Optimization
3k stars 1.09k forks source link

I got an error about Eigen3("data is not aligned") when using my own edge and vertex to do optimization #250

Open zouxianghong opened 6 years ago

zouxianghong commented 6 years ago

1.Problem Description: when using my own edge() and vertex() to do optimization, i got an error: "((size_t(m_data) % (((int)1 >= (int)internal::traits::Alignment) ? (int)1 : (int)internal::traits::Alignment)) == 0) && \"data is not aligned\""

2.Code: 2.1.Definition of Vertex and Edge `enum BlendType { POSE_ESTIMATION_2D2D = 0, POSE_ESTIMATION_2D3D_1 = 1, POSE_ESTIMATION_2D3D_2 = 2, POSE_ESTIMATION_3D2D_1 = 3, POSE_ESTIMATION_3D2D_2 = 4, POSE_ESTIMATION_3D3D_1 = 5, POSE_ESTIMATION_3D3D_2 = 6, POSE_ESTIMATION_3D3D_3 = 7 };

/** \brief Vertex for blend: 2d-2d/2d-3d/3d-2d/3d-3d bundleadjustment(pose only)

public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW

};

// Edge for blend: 2d-2d/2d-3d/3d-2d/3d-3d bundleadjustment(pose only) class EdgeBlendPoseOnly: public g2o::BaseUnaryEdge<1, Eigen::Matrix<double,1,1>, VertexBlendPoseOnly> { public: /**/ EdgeBlendPoseOnly( const BlendType& blend_type, const Eigen::Vector3d& Pc_1, const Eigen::Vector3d& Pc_2) : blendtype(blend_type), Pc1(Pc_1), Pc2(Pc_2) { information().setIdentity(); }

void computeError() { const VertexBlendPoseOnly v = static_cast<const VertexBlendPoseOnly>(_vertices[0]); const Vector6d pose = v->estimate(); double theta_x = pose[0], theta_y = pose[1], theta_z = pose[2], T_1 = pose[3], T_2 = pose[4], T_3 = pose[5]; double theta_x_2 = theta_xtheta_x; double theta_y_2 = theta_ytheta_y; double theta_z_2 = theta_ztheta_z; double theta_x_theta_y = theta_x theta_y; double theta_x_theta_z = theta_x theta_z; double theta_y_theta_z = theta_y theta_z; double sum_square_theta = theta_x_2 + theta_y_2 + theta_z_2; double sqrt_sum_square_theta = sqrt(sum_square_theta); double part_1 = (1-cos(sqrt_sum_square_theta))/sum_square_theta; double part_2 = sin(sqrt_sum_square_theta)/sqrt_sum_square_theta;

double r11, r12, r13, r21, r22, r23, r31, r32, r33;  //< 9 elements of Rotation matrix
r11 = 1 - (theta_y_2 + theta_z_2)*part_1;
r12 = theta_x_theta_y*part_1 - theta_z*part_2;
r13 = theta_x_theta_z*part_1 + theta_y*part_2;
r21 = theta_x_theta_y*part_1 + theta_z*part_2;
r22 = 1 - (theta_x_2 + theta_z_2)*part_1;
r23 = theta_y_theta_z*part_1 - theta_x*part_2;
r31 = theta_x_theta_z*part_1 - theta_y*part_2;
r32 = theta_y_theta_z*part_1 + theta_x*part_2;
r33 = 1 - (theta_x_2 + theta_y_2)*part_1;

if(blend_type_ == BlendType::POSE_ESTIMATION_2D2D)
{
  Eigen::Matrix3d R;
  R << r11 , r12 , r13
     , r21 , r22 , r23
     , r31 , r32 , r33;
  Eigen::Vector3d vec3d(T_2-Pc_2_[1]*T_3, Pc_2_[0]*T_3-T_1, Pc_2_[1]*T_1-Pc_2_[0]*T_3);
  _error(0,0) = _measurement(0,0) - vec3d.transpose()*R*Pc_1_;
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_2D3D_1)
{
  Eigen::Matrix<double, 1, 3> R1, R3;
  R1 << r11 , r12 , r13;
  R3 << r31 , r32 , r33;
  _error(0,0) = _measurement(0,0) - ((Pc_2_[0]-T_1)*R3 - (Pc_2_[2]-T_3)*R1)*Pc_1_;
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_2D3D_2)
{
  Eigen::Matrix<double, 1, 3> R2, R3;
  R2 << r21 , r22 , r23;
  R3 << r31 , r32 , r33;
  _error(0,0) = _measurement(0,0) - ((Pc_2_[1]-T_2)*R3 - (Pc_2_[2]-T_3)*R2)*Pc_1_;
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_3D2D_1)
{
  Eigen::Matrix<double, 1, 3> R1, R3;
  R1 << r11 , r12 , r13;
  R3 << r31 , r32 , r33;
  _error(0,0) = _measurement(0,0) - ((R1 - Pc_2_[0]*R3)*Pc_1_ + T_1 - Pc_2_[0]*T_3);
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_3D2D_2)
{
  Eigen::Matrix<double, 1, 3> R2, R3;
  R2 << r21 , r22 , r23;
  R3 << r31 , r32 , r33;
  _error(0,0) = _measurement(0,0) - ((R2 - Pc_2_[1]*R3)*Pc_1_ + T_2 - Pc_2_[1]*T_3);
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_3D3D_1)
{
  Eigen::Matrix<double, 1, 3> R1;
  R1 << r11 , r12 , r13;
  _error(0,0) = _measurement(0,0) - (Pc_2_[0] - R1*Pc_1_ - T_1);
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_3D3D_2)
{
  Eigen::Matrix<double, 1, 3> R2;
  R2 << r21 , r22 , r23;
  _error(0,0) = _measurement(0,0) - (Pc_2_[1] - R2*Pc_1_ - T_2);
}
else if(blend_type_ == BlendType::POSE_ESTIMATION_3D3D_3)
{
  Eigen::Matrix<double, 1, 3> R3;
  R3 << r31 , r32 , r33;
  _error(0,0) = _measurement(0,0) - (Pc_2_[2] - R3*Pc_1_ - T_3);
}

}

//virtual void linearizeOplus();

virtual bool read(std::istream& in){} virtual bool write(std::ostream& out) const {}

BlendType getBlendType() const {return blendtype;}

private:

BlendType blendtype; //< Blend type: 2D2D/2D3D_1/2D3D_2/3D2D_1/3D2D_2/3D3D_1/3D3D_2/3D3D_3

// Note: Pc(Xc,Yc,Yz) is the (normalization) camera coordinate of P // for 2D-2D: Pc1 is (Xc_1/Zc_1,Yc_1/Zc_1,1), Pc2 is (Xc_2/Zc_2,Yc_2/Zc_2,1) // for 2D-3D: Pc1 is (Xc_1/Zc_1,Yc_1/Zc_1,1), Pc2 is (Xc_2,Yc_2,Zc_2) // for 3D-2D: Pc1 is (Xc_1,Yc_1,Zc_1), Pc2 is (Xc_2/Zc_2,Yc_2/Zc_2,1) // for 3D-3D: Pc1 is (Xc_1,Yc_1,Zc_1), Pc2 is (Xc_2,Yc_2,Zc_2) Eigen::Vector3d Pc1;

// Note: Pc(Xc,Yc,Yz) is the (normalization) camera coordinate of P // for 2D-2D: Pc1 is (Xc_1/Zc_1,Yc_1/Zc_1,1), Pc2 is (Xc_2/Zc_2,Yc_2/Zc_2,1) // for 2D-3D: Pc1 is (Xc_1/Zc_1,Yc_1/Zc_1,1), Pc2 is (Xc_2,Yc_2,Zc_2) // for 3D-2D: Pc1 is (Xc_1,Yc_1,Zc_1), Pc2 is (Xc_2/Zc_2,Yc_2/Zc_2,1) // for 3D-3D: Pc1 is (Xc_1,Yc_1,Zc_1), Pc2 is (Xc_2,Yc_2,Zc_2) Eigen::Vector3d Pc2;

public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW

}; 2.2.Do optimization

include

include

include

include <opencv2/imgcodecs/imgcodecs.hpp>

include <opencv2/highgui/highgui.hpp>

include <opencv2/imgproc/imgproc.hpp>

include "RGBD_IndoorMapping/common_include.h"

include "RGBD_IndoorMapping/config.h"

include "RGBD_IndoorMapping/util.h"

include "RGBD_IndoorMapping/feature.h"

include "RGBD_IndoorMapping/feature_adjuster.h"

include "RGBD_IndoorMapping/pose_estimation.h"

include "RGBD_IndoorMapping/frame.h"

using namespace std; using namespace cv; using namespace RGBD_IndoorMapping;

int main(int argc, char** argv) { if(argc != 6) { std::cout << "Usage: test_pose_estimation image_1 image_2 depth_1 depth_2 estimation_type(init/3d2d/2d3d/3d3d/blend)." << std::endl; return -1; }

string image_name_1 = argv[1];
string depth_name_1 = argv[3];
string image_name_2 = argv[2];
string depth_name_2 = argv[4];
string estimation_type = argv[5];
Config::setParameterFile("config/default.yaml");
cv::Mat image_1 = imread(image_name_1, CV_LOAD_IMAGE_COLOR);
cv::Mat depth_1 = imread(depth_name_1, CV_LOAD_IMAGE_UNCHANGED);
cv::Mat image_2 = imread(image_name_2, CV_LOAD_IMAGE_COLOR);
cv::Mat depth_2 = imread(depth_name_2, CV_LOAD_IMAGE_UNCHANGED);

// search keypoints
std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
cv::Mat descriptors_1, descriptors_2;
cv::Feature2D* detector = myslam::createDetector("ORB");
detector->detect(image_1, keypoints_1);
detector->detect(image_2, keypoints_2);

cv::Ptr<DescriptorExtractor> descriptor_extractor = myslam::createDescriptorExtractor("ORB");
descriptor_extractor->compute(image_1, keypoints_1, descriptors_1);
descriptor_extractor->compute(image_2, keypoints_2, descriptors_2);

// match keypoints
double match_ratio = Config::get<float>("match_ratio");
vector<DMatch> matches_, matches;
BFMatcher matcher(cv::NORM_HAMMING);
matcher.match(descriptors_1, descriptors_2, matches_);
std::cout << "size of rough matches is " << matches_.size() << std::endl;

// reject outliers(bad matches)
float min_dist = std::min_element(
matches_.begin(), matches_.end(),
[](const cv::DMatch& m1, const cv::DMatch& m2){
  return m1.distance < m2.distance;
}
)->distance;
for(auto m:matches_)
{
  if(m.distance < std::max<float>(min_dist*match_ratio, 30.0))
  {
    matches.push_back(m);
  }
}
std::cout << "size of matches is " << matches.size() << std::endl;

// draw matched keypoints pair
cv::Mat image_show_1, image_show_2, image_match;
drawMatches(image_1, keypoints_1, image_2, keypoints_2, matches, image_match);
cv::imshow("ORB image pair", image_match);
cv::waitKey(0);

// Camera           
float fx = Config::get<float>("camera.fx");
float fy = Config::get<float>("camera.fy");
float cx = Config::get<float>("camera.cx");
float cy = Config::get<float>("camera.cy");
float depth_scale = Config::get<float>("camera.depth_scale");
Camera::Ptr camera(new Camera(fx, fy, cx, cy, depth_scale));
cv::Mat K = camera->getCameraIntrinsicsCV();

// Frames
Frame::Ptr frame1 = Frame::createFrame();
frame1->setCamera(camera);
frame1->setColor(image_1);
frame1->setDepth(depth_1);
frame1->setId(1);
frame1->setTime_stamp(0.0);

Frame::Ptr frame2 = Frame::createFrame();
frame2->setCamera(camera);
frame2->setColor(image_2);
frame2->setDepth(depth_2);
frame2->setId(2);
frame2->setTime_stamp(1.0);

// compute initial pose T21
std::vector<cv::Point2f> pts_2d_2_init;
std::vector<cv::Point3f> pts_3d_1_init;
std::vector<int> inliers;
for(auto m:matches)
{
  double dd_1 = frame1->findDepth(keypoints_1[m.queryIdx]);
  if(dd_1 <= 0 )
    continue;
  Eigen::Vector3d p1 = camera->pixel2camera(Eigen::Vector2d(keypoints_1[m.queryIdx].pt.x, keypoints_1[m.queryIdx].pt.y), dd_1);
  pts_3d_1_init.push_back(cv::Point3f(p1(0,0), p1(1,0), p1(2,0)));
  pts_2d_2_init.push_back(keypoints_2[m.trainIdx].pt);
}
std::cout << pts_3d_1_init.size() << " 3d-2d pairs to compute initial pose." << std::endl;
bool state = false;
Vector6d init_T21_Vec6d = getInitiPoseEstimation3D2D(state, pts_3d_1_init, pts_2d_2_init, inliers, camera, cv::Mat());
SE3 init_T21 = Vector6dToSE3(init_T21_Vec6d);
std::cout << "inliers size: " << inliers.size() << std::endl;
std::cout << "init_T21 = \n" << init_T21.matrix() << std::endl;

// do optimization
  std::vector<cv::Point2f> pts_2d2d_1, pts_2d2d_2;
  std::vector<cv::Point3f> pts_3d3d_1, pts_3d3d_2;
  std::vector<cv::Point2f> pts_2d3d_1, pts_3d2d_2;
  std::vector<cv::Point3f> pts_3d2d_1, pts_2d3d_2;
  for(auto m:matches)
  {
    double dd_1 = frame1->findDepth(keypoints_1[m.queryIdx]);
double dd_2 = frame2->findDepth(keypoints_2[m.trainIdx]);
    if(dd_1 <= 0)
    {
  if(dd_2 <= 0)
  {// 2d2d
    pts_2d2d_1.push_back(keypoints_1[m.queryIdx].pt);
    pts_2d2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  else
  {// 2d3d
        Eigen::Vector3d p2 = camera->pixel2camera(Eigen::Vector2d(keypoints_2[m.trainIdx].pt.x, keypoints_2[m.trainIdx].pt.y), dd_2);
        pts_2d3d_1.push_back(keypoints_1[m.queryIdx].pt);
        pts_2d3d_2.push_back(cv::Point3f(p2(0,0), p2(1,0), p2(2,0)));
  }
    }
    else
    {
  if(dd_2 <= 0)
  {// 3d2d
        Eigen::Vector3d p1 = camera->pixel2camera(Eigen::Vector2d(keypoints_1[m.queryIdx].pt.x, keypoints_1[m.queryIdx].pt.y), dd_1);
        pts_3d2d_1.push_back(cv::Point3f(p1(0,0), p1(1,0), p1(2,0)));
        pts_3d2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  else
  {// 3d3d
    Eigen::Vector3d p1 = camera->pixel2camera(Eigen::Vector2d(keypoints_1[m.queryIdx].pt.x, keypoints_1[m.queryIdx].pt.y), dd_1);
    Eigen::Vector3d p2 = camera->pixel2camera(Eigen::Vector2d(keypoints_2[m.trainIdx].pt.x, keypoints_2[m.trainIdx].pt.y), dd_2);
    pts_3d3d_1.push_back(cv::Point3f(p1(0,0), p1(1,0), p1(2,0)));
    pts_3d3d_2.push_back(cv::Point3f(p2(0,0), p2(1,0), p2(2,0)));
  }
    }
  }
  std::vector<int> inliers_2d2d, inliers_2d3d, inliers_3d2d, inliers_3d3d;
  Vector6d T21 = poseEstimationBlendG2O(state, pts_2d2d_1, pts_2d2d_2, pts_2d3d_1, pts_2d3d_2, pts_3d2d_1, pts_3d2d_2, pts_3d3d_1, pts_3d3d_2, inliers_2d2d, inliers_2d3d, inliers_3d2d, inliers_3d3d, camera, cv::Mat(), init_T21_Vec6d);
  cv::Mat r, R, t;
  r = (cv::Mat_<double>(3,1) <<
    T21(0,0), T21(1,0), T21(2,0)
  );
  t = (cv::Mat_<double>(3,1) <<
    T21(3,0), T21(4,0), T21(5,0)
  );
  cv::Rodrigues(r, R);
  Eigen::Matrix3d rot;
  Eigen::Vector3d trans;
  rot << R.at<double>(0,0) , R.at<double>(0,1) , R.at<double>(0,2)
       , R.at<double>(1,0) , R.at<double>(1,1) , R.at<double>(1,2)
       , R.at<double>(2,0) , R.at<double>(2,1) , R.at<double>(2,2);
  trans << t.at<double>(0,0) , t.at<double>(1,0) , t.at<double>(2,0);
  SE3 T21_blend(rot, trans);
  std::cout << "T21_blend = \n" <<T21_blend.matrix() << std::endl;
  frame2->setT_c_w(T21_blend);

return 0;

}`

3.Run and Debug I use GDB to debug the code, and i got errors below:

`0x00007ffff291c428 in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:54

1 0x00007ffff291e02a in __GI_abort () at abort.c:89

2 0x00007ffff2914bd7 in __assert_fail_base (fmt=,

assertion=assertion@entry=0x7ffff7b93ce0 "((size_t(m_data) % (((int)1 >= (int)internal::traits<Derived>::Alignment) ? (int)1 : (int)internal::traits<Derived>::Alignment)) == 0) && \"data is not aligned\"", 
file=file@entry=0x7ffff7b84a38 "/usr/include/eigen3/Eigen/src/Core/MapBase.h", line=line@entry=168, 
function=function@entry=0x7ffff7b97420 <Eigen::MapBase<Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >, 0>::checkSanity() const::__PRETTY_FUNCTION__> "void Eigen::MapBase<Derived, 0>::checkSanity() const [with Derived = Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >]") at assert.c:92

3 0x00007ffff2914c82 in __GI___assert_fail (

assertion=assertion@entry=0x7ffff7b93ce0 "((size_t(m_data) % (((int)1 >= (int)internal::traits<Derived>::Alignment) ? (int)1 : (int)internal::traits<Derived>::Alignment)) == 0) && \"data is not aligned\"", 
file=file@entry=0x7ffff7b84a38 "/usr/include/eigen3/Eigen/src/Core/MapBase.h", line=line@entry=168, 
function=function@entry=0x7ffff7b97420 <Eigen::MapBase<Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >, 0>::checkSanity() const::__PRETTY_FUNCTION__---Type <return> to continue, or q <return> to quit---info f

Derived = Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >]") at assert.c:101

4 0x00007ffff7b343ec in Eigen::MapBase<Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >, 0>::checkSanity (this=)

at /usr/include/eigen3/Eigen/src/Core/MapBase.h:168

5 Eigen::MapBase<Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >, 0>::MapBase (cols=6, rows=1, dataPtr=, this=)

at /usr/include/eigen3/Eigen/src/Core/MapBase.h:155

6 Eigen::MapBase<Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >, 1>::MapBase (cols=6, rows=1, dataPtr=, this=)

at /usr/include/eigen3/Eigen/src/Core/MapBase.h:243

7 Eigen::Map<Eigen::Matrix<double, 1, 6, 1, 1, 6>, 32, Eigen::Stride<0, 0> >::Map (stride=..., cols=6, rows=1, dataPtr=, this=)

at /usr/include/eigen3/Eigen/src/Core/Map.h:151

8 g2o::BaseUnaryEdge<1, double, RGBD_IndoorMapping::VertexBlendPoseOnly>::linearizeOplus (this=, jacobianWorkspace=...)

at /usr/local/include/g2o/core/base_unary_edge.hpp:86

9 0x00007ffff7b187ed in g2o::BlockSolver<g2o::BlockSolver<g2o::BlockSolverTraits<-1, -1> > >::buildSystem (this=0x13321ed0) at /usr/local/include/g2o/core/block_solver.hpp:512

10 0x00007ffff1f96be9 in g2o::OptimizationAlgorithmLevenberg::solve(int, bool) () from /usr/local/lib/libg2o_core.so

11 0x00007ffff1f8e5d7 in g2o::SparseOptimizer::optimize(int, bool) () from /usr/local/lib/libg2o_core.so

12 0x00007ffff7b31740 in RGBD_IndoorMapping::poseEstimationBlendG2O (state=@0x7fffffffbeac: RGBD_IndoorMapping::OK, pts_2d2d_1=std::vector of length 0, capacity 0,

pts_2d2d_2=std::vector of length 0, capacity 0, pts_2d3d_1=std::vector of length 4, capacity 4 = {...}, pts_2d3d_2=std::vector of length 4, capacity 4 = {...}, 
pts_3d2d_1=std::vector of length 1, capacity 1 = {...}, pts_3d2d_2=std::vector of length 1, capacity 1 = {...}, pts_3d3d_1=std::vector of length 267, capacity 512 = {...}, 
pts_3d3d_2=std::vector of length 267, capacity 512 = {...}, inliers_2d2d_index=std::vector of length 0, capacity 0, 
inliers_2d3d_index=std::vector of length 4, capacity 4 = {...}, inliers_3d2d_index=std::vector of length 1, capacity 1 = {...}, 
inliers_3d3d_index=std::vector of length 267, capacity 512 = {...}, camera=0x9f2590, distCoeffs=..., initi_T21=...)
at RGBD_IndoorMapping/src/PoseEstimation.cpp:395

13 0x00007ffff7af5d44 in RGBD_IndoorMapping::Mapping::trackWithFrame (this=this@entry=0x632c00, F=0x15986f40)

at RGBD_IndoorMapping/src/Mapping.cpp:262`

4.Asking for help I don't know why this bug happens, though i do everything i can. Anyone can help me? Thanks!

YangSiri commented 4 years ago

Hi, Have you tried Release mode, it works for me.

balarayen-blippar commented 4 years ago

@zouxianghong Did you find the answer for this?

wangtsiao commented 2 years ago

check your compiler option, i think maybe we should remove -march=native

VRichardJP commented 2 years ago

Could it be related to https://github.com/RainerKuemmerle/g2o/issues/557 ? I get the "data is not aligned" assertion failure when -mf16c compilation flag is used (which is included by -march=native on my machine). Running in Release mode is just a way to ignore the assert but does not solve the underlying problem.

zouxianghong commented 2 years ago

谢谢,收到

Crydsch commented 1 year ago

I encountered the same problem with g2o and eigen (and pcl).

Turns out it really is because of SSE and AVX flags. If any of those are in use, Eigen changes its behavior to align data. This includes special allocators and overwriting new/delete/malloc/free calls. Which in turn cause this assert error or even segfaults (double free) if data is not correctly handled. It happens when different parts of your application (such as the app itself and/or two libraries exchange data) and one was compiled with SSE/AVX (and thus euler) and the other was not.

This happens when the libraries and your application use different build flags. In particular:

tl;dr; Make sure all your libraries and your application are built with the same SSE/AVX flags.

zouxianghong commented 1 year ago

谢谢,收到