Open JudgeLJX opened 10 months ago
Hello, have you solved this problem? I also encountered a similar problem
Hello, I have resolved this issue. Here are the steps to resolve it:
Add # include "pybind11/numpy. h"
to graph_filter. h
Modify the following code:
py::array_t<double> compute_scores(py::array_t<double> points, const std::string filter_type, const int scale_min_dist, const int scale_max_dist)
// VectorXd compute_scores(const MatrixXd &points, const std::string filter_type)
{
py::buffer_info buf = points.request();
double *ptr = static_cast<double *>(buf.ptr);
int rows = buf.shape[0];
int cols = buf.shape[1];
Eigen::MatrixXd points_mat = Eigen::Map<Eigen::MatrixXd>(ptr, rows, cols);
const int n_points = points_mat.rows();
kdree_t kdtree(points_mat.transpose()); // requires rows = dimension
double min_dist{0.0};
double max_dist{0.0};
compute_resolution(points_mat, kdtree, min_dist, max_dist);
// const double radius = std::min(min_dist * 10, max_dist * 2);
const double radius = std::min(min_dist * scale_min_dist, max_dist * scale_max_dist);
std::cout << "\nResolution: " << min_dist << ", " << max_dist << ", " << radius << "\n";
// const double radius = std::min(min_dist, max_dist);
const int max_neighbors = 100;
// adjacency matrix W
SparseMatrix<double> W = compute_adjacency_matrix(points_mat, kdtree, radius, max_neighbors);
// // compute D
// SparseMatrix<double> D(n_points, n_points);
// D.reserve(n_points); // diagonal
// for (int i = 0; i < W.outerSize(); ++i)
// {
// double row_sum{0.0};
// for (SparseMatrix<double>::InnerIterator it(W, i); it; ++it)
// {
// row_sum += it.value();
// }
// D.coeffRef(i, i) = row_sum;
// }
// // compute L
// SparseMatrix<double> L(n_points, n_points);
// L = D - W;
SparseMatrix<double> F(n_points, n_points);
if (filter_type == "all")
{
F.setIdentity();
std::cout << "Matrix F:\n" << F << std::endl;
}
else if (filter_type == "high")
{
// F = D - W
SparseMatrix<double> D = compute_D(W);
F = D - W;
std::cout << "Matrix F:\n" << F << std::endl;
}
else if (filter_type == "low")
{
// F = D^-1 * W
F = W;
SparseMatrix<double> D = compute_D(W);
for (int i = 0; i < F.outerSize(); ++i)
{
double row_sum{0.0};
for (SparseMatrix<double>::InnerIterator it(F, i); it; ++it)
{
it.valueRef() *= D.coeff(i, i) + 1;
}
}
std::cout << "Matrix F:\n" << F << std::endl;
}
else
{
std::cout << "ERROR! filter type has to be among {high, low, all}\n";
std::exit(-1);
}
// apply filter
VectorXd scores = apply_filter(points_mat, F);
std::cout << "Scores: " << scores.transpose() << std::endl;
const double *data_ptr = scores.data();
// 创建一个新的 NumPy 数组,并复制数据
py::array_t<double> np_array(scores.size());
auto buffer_info = np_array.request();
double *numpy_data_ptr = static_cast<double *>(buffer_info.ptr);
std::memcpy(numpy_data_ptr, data_ptr, scores.size() * sizeof(double));
py::print(np_array);
return np_array;
// return scores;
}
Comment the code of sample_points and sample_data(Sorry, my English is not very good, the meaning of comment is equivalent to deletion)
Hi Thanks for your reply,
Can you get the same result shows in its cube figure?
Hi, I met this issue:
Could you please help a lot
Read 300 points Traceback (most recent call last): File "sample_point_cloud.py", line 18, in
pcd_sampled = sample_pcd(pcd_orig, args.filter_type, args.n_samples, 10, 10)
File "/home/judgel/Desktop/fast_point_cloud_sampling-master/utils.py", line 49, in sample_pcd
scores = compute_scores_from_points(points_orig, filter_type, scale_min_dist, scale_max_dist)
File "/home/judgel/Desktop/fast_point_cloud_sampling-master/utils.py", line 32, in compute_scores_from_points
scores = graph_filter.compute_scores(points, filter_type, scale_min_dist, scale_max_dist)
TypeError: compute_scores(): incompatible function arguments. The following argument types are supported:
Invoked with: array([[-1.93484843e-01, -4.95167285e-01, -2.63983041e-01], [-2.17938349e-01, -4.91037607e-01, -2.82596081e-01], [ 2.00165346e-01, -4.95085597e-01, -2.63629794e-01], [ 2.04199612e-01, -4.80470300e-01, -2.85623878e-01],