Open ghost opened 7 years ago
Can anyone explain the following function ? What is the function based on?
float computeCompleteDataLogLikelihood(const ResidualIterator& first_residual, const ResidualIterator& last_residual, const WeightIterator& first_weight, const Eigen::Vector2f& mean, const Eigen::Matrix2f& precision) { size_t n = (last_residual - first_residual); size_t c = 1; double error_sum = 0.0; double error_acc = 1.0;
for(ResidualIterator err_it = first_residual; err_it != last_residual; ++err_it, ++c) { error_acc = (1.0 + 0.2 ((err_it).transpose() precision (err_it))(0,0));
if((c % 50) == 0) { error_sum += std::log(error_acc); error_acc = 1.0; }
}
return 0.5 n std::log(precision.determinant()) - 0.5 (5.0 + 2.0) error_sum; }
Hi, did you find an answer by yourself? I have the same doubt and I can't understand why we have to refresh the error_acc every 50 iterations
Can anyone explain the following function ? What is the function based on?
float computeCompleteDataLogLikelihood(const ResidualIterator& first_residual, const ResidualIterator& last_residual, const WeightIterator& first_weight, const Eigen::Vector2f& mean, const Eigen::Matrix2f& precision) { size_t n = (last_residual - first_residual); size_t c = 1; double error_sum = 0.0; double error_acc = 1.0;
for(ResidualIterator err_it = first_residual; err_it != last_residual; ++err_it, ++c) { error_acc = (1.0 + 0.2 ((err_it).transpose() precision (err_it))(0,0));
}
return 0.5 n std::log(precision.determinant()) - 0.5 (5.0 + 2.0) error_sum; }