39 #include <pcl/common/common.h>
44 #include <opengv/absolute_pose/methods.hpp>
45 #include <opengv/absolute_pose/NoncentralAbsoluteAdapter.hpp>
46 #include <opengv/absolute_pose/NoncentralAbsoluteMultiAdapter.hpp>
47 #include <opengv/sac/Ransac.hpp>
48 #include <opengv/sac/MultiRansac.hpp>
49 #include <opengv/sac_problems/absolute_pose/AbsolutePoseSacProblem.hpp>
50 #include <opengv/sac_problems/absolute_pose/MultiNoncentralAbsolutePoseSacProblem.hpp>
60 const std::map<int, cv::Point3f> & words3A,
61 const std::map<int, cv::KeyPoint> & words2B,
68 int varianceMedianRatio,
71 const std::map<int, cv::Point3f> & words3B,
73 std::vector<int> * matchesOut,
74 std::vector<int> * inliersOut,
75 bool splitLinearCovarianceComponents)
81 std::vector<int>
matches, inliers;
85 *covariance = cv::Mat::eye(6,6,CV_64FC1);
89 std::vector<int> ids =
uKeys(words2B);
90 std::vector<cv::Point3f> objectPoints(ids.size());
91 std::vector<cv::Point2f> imagePoints(ids.size());
94 for(
unsigned int i=0;
i<ids.size(); ++
i)
96 std::map<int, cv::Point3f>::const_iterator
iter=words3A.find(ids[
i]);
99 const cv::Point3f & pt =
iter->second;
100 objectPoints[oi].x = pt.x;
101 objectPoints[oi].y = pt.y;
102 objectPoints[oi].z = pt.z;
103 imagePoints[oi] = words2B.find(ids[
i])->second.pt;
108 objectPoints.resize(oi);
109 imagePoints.resize(oi);
112 UDEBUG(
"words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d",
113 (
int)words3A.size(), (
int)words2B.size(), (
int)
matches.size(), (
int)words3B.size(),
114 guess.
prettyPrint().c_str(), reprojError, iterations);
116 if((
int)
matches.size() >= minInliers)
119 cv::Mat
K = cameraModel.
K();
120 cv::Mat
D = cameraModel.
D();
122 cv::Mat
R = (cv::Mat_<double>(3,3) <<
123 (double)guessCameraFrame.
r11(), (double)guessCameraFrame.
r12(), (double)guessCameraFrame.
r13(),
124 (double)guessCameraFrame.
r21(), (double)guessCameraFrame.
r22(), (double)guessCameraFrame.
r23(),
125 (double)guessCameraFrame.
r31(), (double)guessCameraFrame.
r32(), (double)guessCameraFrame.
r33());
127 cv::Mat rvec(3,1, CV_64FC1);
128 cv::Rodrigues(
R, rvec);
129 cv::Mat tvec = (cv::Mat_<double>(3,1) <<
130 (double)guessCameraFrame.
x(), (double)guessCameraFrame.
y(), (double)guessCameraFrame.
z());
147 if((
int)inliers.size() >= minInliers)
149 cv::Rodrigues(rvec,
R);
150 Transform pnp(
R.at<
double>(0,0),
R.at<
double>(0,1),
R.at<
double>(0,2), tvec.at<
double>(0),
151 R.at<
double>(1,0),
R.at<
double>(1,1),
R.at<
double>(1,2), tvec.at<
double>(1),
152 R.at<
double>(2,0),
R.at<
double>(2,1),
R.at<
double>(2,2), tvec.at<
double>(2));
157 if(covariance && (!words3B.empty() || cameraModel.
imageSize() != cv::Size()))
159 std::vector<float> errorSqrdDists(inliers.size());
160 std::vector<float> errorSqrdX;
161 std::vector<float> errorSqrdY;
162 std::vector<float> errorSqrdZ;
163 if(splitLinearCovarianceComponents)
165 errorSqrdX.resize(inliers.size());
166 errorSqrdY.resize(inliers.size());
167 errorSqrdZ.resize(inliers.size());
169 std::vector<float> errorSqrdAngles(inliers.size());
173 for(
unsigned int i=0;
i<inliers.size(); ++
i)
175 cv::Point3f objPt = objectPoints[inliers[
i]];
178 std::map<int, cv::Point3f>::const_iterator
iter = words3B.find(
matches[inliers[
i]]);
193 imagePoints.at(inliers[
i]).x,
194 imagePoints.at(inliers[
i]).y,
200 newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1;
206 if(splitLinearCovarianceComponents)
208 double errorX = objPt.x-newPt.x;
209 double errorY = objPt.y-newPt.y;
210 double errorZ = objPt.z-newPt.z;
211 errorSqrdX[
i] = errorX * errorX;
212 errorSqrdY[
i] = errorY * errorY;
213 errorSqrdZ[
i] = errorZ * errorZ;
216 errorSqrdDists[
i] =
uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
218 Eigen::Vector4f
v1(objPt.x, objPt.y, objPt.z, 0);
219 Eigen::Vector4f
v2(newPt.x, newPt.y, newPt.z, 0);
220 errorSqrdAngles[
i] = pcl::getAngle3D(
v1,
v2);
223 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
225 double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
227 (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
228 std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
229 double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
231 (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
233 if(splitLinearCovarianceComponents)
235 std::sort(errorSqrdX.begin(), errorSqrdX.end());
236 double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
237 std::sort(errorSqrdY.begin(), errorSqrdY.end());
238 double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
239 std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
240 double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
245 covariance->at<
double>(0,0) = median_error_sqr_x;
246 covariance->at<
double>(1,1) = median_error_sqr_y;
247 covariance->at<
double>(2,2) = median_error_sqr_z;
249 median_error_sqr_lin =
uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
252 if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
254 UWARN(
"Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
255 *covariance = cv::Mat::eye(6,6,CV_64FC1);
262 std::vector<cv::Point2f> imagePointsReproj;
263 cv::projectPoints(objectPoints, rvec, tvec,
K, cv::Mat(), imagePointsReproj);
265 for(
unsigned int i=0;
i<inliers.size(); ++
i)
267 err +=
uNormSquared(imagePoints.at(inliers[
i]).x - imagePointsReproj.at(inliers[
i]).x, imagePoints.at(inliers[
i]).y - imagePointsReproj.at(inliers[
i]).y);
270 *covariance *=
std::sqrt(err/
float(inliers.size()));
281 inliersOut->resize(inliers.size());
282 for(
unsigned int i=0;
i<inliers.size(); ++
i)
292 const std::map<int, cv::Point3f> & words3A,
293 const std::map<int, cv::KeyPoint> & words2B,
294 const std::vector<CameraModel> & cameraModels,
295 unsigned int samplingPolicy,
300 int refineIterations,
301 int varianceMedianRatio,
304 const std::map<int, cv::Point3f> & words3B,
305 cv::Mat * covariance,
306 std::vector<int> * matchesOut,
307 std::vector<int> * inliersOut,
308 bool splitLinearCovarianceComponents)
311 #ifndef RTABMAP_OPENGV
312 UERROR(
"This function is only available if rtabmap is built with OpenGV dependency.");
314 UASSERT(!cameraModels.empty() && cameraModels[0].imageWidth() > 0);
315 int subImageWidth = cameraModels[0].imageWidth();
316 for(
size_t i=0;
i<cameraModels.size(); ++
i)
318 UASSERT(cameraModels[
i].isValidForProjection());
319 UASSERT(subImageWidth == cameraModels[
i].imageWidth());
323 UASSERT(varianceMedianRatio > 1);
325 std::vector<int>
matches, inliers;
329 *covariance = cv::Mat::eye(6,6,CV_64FC1);
333 std::vector<int> ids =
uKeys(words2B);
334 std::vector<cv::Point3f> objectPoints(ids.size());
335 std::vector<cv::Point2f> imagePoints(ids.size());
338 std::vector<int> cameraIndexes(ids.size());
339 for(
unsigned int i=0;
i<ids.size(); ++
i)
341 std::map<int, cv::Point3f>::const_iterator
iter=words3A.find(ids[
i]);
344 const cv::Point2f & kpt = words2B.find(ids[
i])->second.pt;
345 int cameraIndex =
int(kpt.x / subImageWidth);
346 UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (
int)cameraModels.size(),
347 uFormat(
"cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
348 cameraIndex, (
int)cameraModels.size(), kpt.x, subImageWidth, cameraModels[cameraIndex].imageWidth()).c_str());
350 const cv::Point3f & pt =
iter->second;
351 objectPoints[oi] = pt;
352 imagePoints[oi] = kpt;
354 imagePoints[oi].x = imagePoints[oi].x - (cameraIndex*subImageWidth);
355 cameraIndexes[oi] = cameraIndex;
360 objectPoints.resize(oi);
361 imagePoints.resize(oi);
362 cameraIndexes.resize(oi);
365 UDEBUG(
"words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d samplingPolicy=%ld",
366 (
int)words3A.size(), (
int)words2B.size(), (
int)
matches.size(), (
int)words3B.size(),
367 guess.
prettyPrint().c_str(), reprojError, iterations, samplingPolicy);
369 if((
int)
matches.size() >= minInliers)
371 if(samplingPolicy == 0 || samplingPolicy == 2)
374 cc.resize(cameraModels.size());
375 std::fill(cc.begin(), cc.end(),0);
376 for(
size_t i=0;
i<cameraIndexes.size(); ++
i)
378 cc[cameraIndexes[
i]] = cc[cameraIndexes[
i]] + 1;
381 for (
size_t i=0;
i<cameraModels.size(); ++
i)
383 UDEBUG(
"Matches in Camera %d: %d",
i, cc[
i]);
387 if(samplingPolicy==2) {
388 UERROR(
"Not enough matches in camera %ld to do "
389 "homogenoeus random sampling, returning null "
390 "transform. Consider using AUTO sampling "
391 "policy to fallback to ANY policy.",
i);
396 UWARN(
"Not enough matches in camera %ld to do "
397 "homogenoeus random sampling, falling back to ANY policy.",
i);
404 if(samplingPolicy == 0)
410 opengv::translations_t camOffsets;
411 opengv::rotations_t camRotations;
412 for(
size_t i=0;
i<cameraModels.size(); ++
i)
414 camOffsets.push_back(opengv::translation_t(
415 cameraModels[
i].localTransform().
x(),
416 cameraModels[
i].localTransform().
y(),
417 cameraModels[
i].localTransform().
z()));
418 camRotations.push_back(cameraModels[
i].localTransform().toEigen4d().block<3,3>(0, 0));
422 if(samplingPolicy == 2)
425 std::vector<std::shared_ptr<opengv::points_t>> multiPoints;
426 multiPoints.resize(cameraModels.size());
428 std::vector<std::shared_ptr<opengv::bearingVectors_t>> multiBearingVectors;
429 multiBearingVectors.resize(cameraModels.size());
430 for(
size_t i=0;
i<cameraModels.size();++
i)
432 multiPoints[
i] = std::make_shared<opengv::points_t>();
433 multiBearingVectors[
i] = std::make_shared<opengv::bearingVectors_t>();
436 for(
size_t i=0;
i<objectPoints.size(); ++
i)
438 int cameraIndex = cameraIndexes[
i];
439 multiPoints[cameraIndex]->push_back(opengv::point_t(objectPoints[
i].
x,objectPoints[
i].
y,objectPoints[
i].
z));
441 cameraModels[cameraIndex].project(imagePoints[
i].
x, imagePoints[
i].
y, 1, pt[0], pt[1], pt[2]);
443 multiBearingVectors[cameraIndex]->push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
447 opengv::absolute_pose::NoncentralAbsoluteMultiAdapter adapter(
453 adapter.setR(guess.
toEigen4d().block<3,3>(0, 0));
454 adapter.sett(opengv::translation_t(guess.
x(), guess.
y(), guess.
z()));
458 opengv::sac::MultiRansac<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> ransac;
459 std::shared_ptr<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> absposeproblem_ptr(
460 new opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem(adapter));
462 ransac.sac_model_ = absposeproblem_ptr;
463 ransac.threshold_ = 1.0 -
cos(
atan(reprojError/cameraModels[0].
fx()));
464 ransac.max_iterations_ = iterations;
465 UDEBUG(
"Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
468 ransac.computeModel();
473 UDEBUG(
"Ransac iterations done: %d", ransac.iterations_);
474 for (
size_t i=0;
i < cameraModels.size(); ++
i)
476 inliers.insert(inliers.end(), ransac.inliers_[
i].begin(), ransac.inliers_[
i].end());
482 opengv::points_t points;
485 opengv::bearingVectors_t bearingVectors;
486 opengv::absolute_pose::NoncentralAbsoluteAdapter::camCorrespondences_t camCorrespondences;
488 for(
size_t i=0;
i<objectPoints.size(); ++
i)
490 int cameraIndex = cameraIndexes[
i];
491 points.push_back(opengv::point_t(objectPoints[
i].
x,objectPoints[
i].
y,objectPoints[
i].
z));
493 cameraModels[cameraIndex].project(imagePoints[
i].
x, imagePoints[
i].
y, 1, pt[0], pt[1], pt[2]);
495 bearingVectors.push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
496 camCorrespondences.push_back(cameraIndex);
500 opengv::absolute_pose::NoncentralAbsoluteAdapter adapter(
507 adapter.setR(guess.
toEigen4d().block<3,3>(0, 0));
508 adapter.sett(opengv::translation_t(guess.
x(), guess.
y(), guess.
z()));
512 opengv::sac::Ransac<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> ransac;
513 std::shared_ptr<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> absposeproblem_ptr(
514 new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem(adapter, opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem::GP3P));
516 ransac.sac_model_ = absposeproblem_ptr;
517 ransac.threshold_ = 1.0 -
cos(
atan(reprojError/cameraModels[0].
fx()));
518 ransac.max_iterations_ = iterations;
519 UDEBUG(
"Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
522 ransac.computeModel();
527 UDEBUG(
"Ransac iterations done: %d", ransac.iterations_);
528 inliers = ransac.inliers_;
531 UDEBUG(
"Ransac inliers: %ld", inliers.size());
533 if((
int)inliers.size() >= minInliers && !pnp.
isNull())
540 std::vector<float> errorSqrdX;
541 std::vector<float> errorSqrdY;
542 std::vector<float> errorSqrdZ;
543 if(splitLinearCovarianceComponents)
545 errorSqrdX.resize(inliers.size());
546 errorSqrdY.resize(inliers.size());
547 errorSqrdZ.resize(inliers.size());
549 std::vector<float> errorSqrdDists(inliers.size());
550 std::vector<float> errorSqrdAngles(inliers.size());
552 std::vector<Transform> transformsCameraFrame(cameraModels.size());
553 std::vector<Transform> transformsCameraFrameInv(cameraModels.size());
554 for(
size_t i=0;
i<cameraModels.size(); ++
i)
556 transformsCameraFrame[
i] =
transform * cameraModels[
i].localTransform();
557 transformsCameraFrameInv[
i] = transformsCameraFrame[
i].inverse();
560 for(
unsigned int i=0;
i<inliers.size(); ++
i)
562 cv::Point3f objPt = objectPoints[inliers[
i]];
565 std::map<int, cv::Point3f>::const_iterator
iter = words3B.find(
matches[inliers[
i]]);
573 int cameraIndex = cameraIndexes[inliers[
i]];
580 cameraModels[cameraIndex].imageSize(),
581 imagePoints.at(inliers[
i]).x,
582 imagePoints.at(inliers[
i]).y,
583 cameraModels[cameraIndex].cx(),
584 cameraModels[cameraIndex].cy(),
585 cameraModels[cameraIndex].fx(),
586 cameraModels[cameraIndex].fy());
588 newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1;
594 if(splitLinearCovarianceComponents)
596 double errorX = objPt.x-newPt.x;
597 double errorY = objPt.y-newPt.y;
598 double errorZ = objPt.z-newPt.z;
599 errorSqrdX[
i] = errorX * errorX;
600 errorSqrdY[
i] = errorY * errorY;
601 errorSqrdZ[
i] = errorZ * errorZ;
604 errorSqrdDists[
i] =
uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
606 Eigen::Vector4f
v1(objPt.x, objPt.y, objPt.z, 0);
607 Eigen::Vector4f
v2(newPt.x, newPt.y, newPt.z, 0);
608 errorSqrdAngles[
i] = pcl::getAngle3D(
v1,
v2);
611 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
613 double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
615 (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
616 std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
617 double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
619 (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
621 if(splitLinearCovarianceComponents)
623 std::sort(errorSqrdX.begin(), errorSqrdX.end());
624 double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
625 std::sort(errorSqrdY.begin(), errorSqrdY.end());
626 double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
627 std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
628 double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
633 covariance->at<
double>(0,0) = median_error_sqr_x;
634 covariance->at<
double>(1,1) = median_error_sqr_y;
635 covariance->at<
double>(2,2) = median_error_sqr_z;
637 median_error_sqr_lin =
uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
640 if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
642 UWARN(
"Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
643 *covariance = cv::Mat::eye(6,6,CV_64FC1);
656 inliersOut->resize(inliers.size());
657 for(
unsigned int i=0;
i<inliers.size(); ++
i)
667 const std::map<int, cv::Point3f> & words3A,
668 const std::map<int, cv::Point3f> & words3B,
670 double inliersDistance,
672 int refineIterations,
673 cv::Mat * covariance,
674 std::vector<int> * matchesOut,
675 std::vector<int> * inliersOut)
678 std::vector<cv::Point3f> inliers1;
679 std::vector<cv::Point3f> inliers2;
689 UASSERT(inliers1.size() == inliers2.size());
690 UDEBUG(
"Unique correspondences = %d", (
int)inliers1.size());
694 *covariance = cv::Mat::eye(6,6,CV_64FC1);
697 std::vector<int> inliers;
698 if((
int)inliers1.size() >= minInliers)
700 pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1cloud(
new pcl::PointCloud<pcl::PointXYZ>);
701 pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2cloud(
new pcl::PointCloud<pcl::PointXYZ>);
702 inliers1cloud->resize(inliers1.size());
703 inliers2cloud->resize(inliers1.size());
704 for(
unsigned int i=0;
i<inliers1.size(); ++
i)
706 (*inliers1cloud)[
i].x = inliers1[
i].x;
707 (*inliers1cloud)[
i].y = inliers1[
i].y;
708 (*inliers1cloud)[
i].z = inliers1[
i].z;
709 (*inliers2cloud)[
i].x = inliers2[
i].x;
710 (*inliers2cloud)[
i].y = inliers2[
i].y;
711 (*inliers2cloud)[
i].z = inliers2[
i].z;
723 if(!
t.isNull() && (
int)inliers.size() >= minInliers)
735 inliersOut->resize(inliers.size());
736 for(
unsigned int i=0;
i<inliers.size(); ++
i)
747 std::vector<cv::Point3f> opoints,
748 std::vector<cv::Point2f> ipoints,
749 const cv::Mat & cameraMatrix,
750 const cv::Mat & distCoeffs,
751 const cv::Mat & rvec,
752 const cv::Mat & tvec,
753 float reprojErrorThreshold,
754 std::vector<int> & inliers)
756 UASSERT(opoints.size() == ipoints.size());
759 std::vector<cv::Point2f> projpoints;
760 projectPoints(opoints, rvec, tvec, cameraMatrix, distCoeffs, projpoints);
762 inliers.resize(
count,0);
763 std::vector<float> err(
count);
767 float e = (
float)cv::norm( ipoints[
i] - projpoints[
i]);
768 if(
e <= reprojErrorThreshold)
780 const std::vector<cv::Point3f> & objectPoints,
781 const std::vector<cv::Point2f> & imagePoints,
782 const cv::Mat & cameraMatrix,
783 const cv::Mat & distCoeffs,
786 bool useExtrinsicGuess,
788 float reprojectionError,
790 std::vector<int> & inliers,
792 int refineIterations,
795 if(minInliersCount < 4)
816 float inlierThreshold = reprojectionError;
817 if((
int)inliers.size() >= minInliersCount && refineIterations>0)
819 float error_threshold = inlierThreshold;
820 int refine_iterations = 0;
821 bool inlier_changed =
false, oscillating =
false;
822 std::vector<int> new_inliers, prev_inliers = inliers;
823 std::vector<size_t> inliers_sizes;
825 cv::Mat new_model_rvec = rvec;
826 cv::Mat new_model_tvec = tvec;
831 std::vector<cv::Point3f> opoints_inliers(prev_inliers.size());
832 std::vector<cv::Point2f> ipoints_inliers(prev_inliers.size());
833 for(
unsigned int i=0;
i<prev_inliers.size(); ++
i)
835 opoints_inliers[
i] = objectPoints[prev_inliers[
i]];
836 ipoints_inliers[
i] = imagePoints[prev_inliers[
i]];
839 UDEBUG(
"inliers=%d refine_iterations=%d, rvec=%f,%f,%f tvec=%f,%f,%f", (
int)prev_inliers.size(), refine_iterations,
840 *new_model_rvec.ptr<
double>(0), *new_model_rvec.ptr<
double>(1), *new_model_rvec.ptr<
double>(2),
841 *new_model_tvec.ptr<
double>(0), *new_model_tvec.ptr<
double>(1), *new_model_tvec.ptr<
double>(2));
844 cv::solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec,
true, flags);
845 inliers_sizes.push_back(prev_inliers.size());
847 UDEBUG(
"rvec=%f,%f,%f tvec=%f,%f,%f",
848 *new_model_rvec.ptr<
double>(0), *new_model_rvec.ptr<
double>(1), *new_model_rvec.ptr<
double>(2),
849 *new_model_tvec.ptr<
double>(0), *new_model_tvec.ptr<
double>(1), *new_model_tvec.ptr<
double>(2));
852 std::vector<float> err =
computeReprojErrors(objectPoints, imagePoints, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, error_threshold, new_inliers);
853 UDEBUG(
"RANSAC refineModel: Number of inliers found (before/after): %d/%d, with an error threshold of %f.",
854 (
int)prev_inliers.size (), (
int)new_inliers.size (), error_threshold);
856 if ((
int)new_inliers.size() < minInliersCount)
859 if (refine_iterations >= refineIterations)
867 float m =
uMean(err.data(), err.size());
868 float variance =
uVariance(err.data(), err.size());
869 error_threshold =
std::min(inlierThreshold, refineSigma *
float(
sqrt(variance)));
871 UDEBUG (
"RANSAC refineModel: New estimated error threshold: %f (variance=%f mean=%f) on iteration %d out of %d.",
872 error_threshold, variance,
m, refine_iterations, refineIterations);
873 inlier_changed =
false;
877 if (new_inliers.size () != prev_inliers.size ())
880 if ((
int)inliers_sizes.size () >= minInliersCount)
882 if (inliers_sizes[inliers_sizes.size () - 1] == inliers_sizes[inliers_sizes.size () - 3] &&
883 inliers_sizes[inliers_sizes.size () - 2] == inliers_sizes[inliers_sizes.size () - 4])
889 inlier_changed =
true;
894 for (
size_t i = 0;
i < prev_inliers.size (); ++
i)
897 if (prev_inliers[
i] != new_inliers[
i])
899 inlier_changed =
true;
904 while (inlier_changed && ++refine_iterations < refineIterations);
907 if ((
int)prev_inliers.size() < minInliersCount)
909 UWARN (
"RANSAC refineModel: Refinement failed: got very low inliers (%d)!", (
int)prev_inliers.size());
914 UDEBUG(
"RANSAC refineModel: Detected oscillations in the model refinement.");
918 rvec = new_model_rvec;
919 tvec = new_model_tvec;