39 #include <pcl/common/common.h>
44 #include <opengv/absolute_pose/methods.hpp>
45 #include <opengv/absolute_pose/NoncentralAbsoluteAdapter.hpp>
46 #include <opengv/absolute_pose/NoncentralAbsoluteMultiAdapter.hpp>
47 #include <opengv/sac/Ransac.hpp>
48 #include <opengv/sac/MultiRansac.hpp>
49 #include <opengv/sac_problems/absolute_pose/AbsolutePoseSacProblem.hpp>
50 #include <opengv/sac_problems/absolute_pose/MultiNoncentralAbsolutePoseSacProblem.hpp>
60 const std::map<int, cv::Point3f> & words3A,
61 const std::map<int, cv::KeyPoint> & words2B,
68 int varianceMedianRatio,
71 const std::map<int, cv::Point3f> & words3B,
73 std::vector<int> * matchesOut,
74 std::vector<int> * inliersOut,
75 bool splitLinearCovarianceComponents)
81 std::vector<int>
matches, inliers;
85 *covariance = cv::Mat::eye(6,6,CV_64FC1);
89 std::vector<int> ids =
uKeys(words2B);
90 std::vector<cv::Point3f> objectPoints(ids.size());
91 std::vector<cv::Point2f> imagePoints(ids.size());
94 for(
unsigned int i=0;
i<ids.size(); ++
i)
96 std::map<int, cv::Point3f>::const_iterator
iter=words3A.find(ids[
i]);
99 const cv::Point3f & pt =
iter->second;
100 objectPoints[oi].x = pt.x;
101 objectPoints[oi].y = pt.y;
102 objectPoints[oi].z = pt.z;
103 imagePoints[oi] = words2B.find(ids[
i])->second.pt;
108 objectPoints.resize(oi);
109 imagePoints.resize(oi);
112 UDEBUG(
"words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d",
113 (
int)words3A.size(), (
int)words2B.size(), (
int)
matches.size(), (
int)words3B.size(),
114 guess.
prettyPrint().c_str(), reprojError, iterations);
116 if((
int)
matches.size() >= minInliers)
119 cv::Mat
K = cameraModel.
K();
120 cv::Mat
D = cameraModel.
D();
122 cv::Mat
R = (cv::Mat_<double>(3,3) <<
123 (double)guessCameraFrame.
r11(), (double)guessCameraFrame.
r12(), (double)guessCameraFrame.
r13(),
124 (double)guessCameraFrame.
r21(), (double)guessCameraFrame.
r22(), (double)guessCameraFrame.
r23(),
125 (double)guessCameraFrame.
r31(), (double)guessCameraFrame.
r32(), (double)guessCameraFrame.
r33());
127 cv::Mat rvec(3,1, CV_64FC1);
128 cv::Rodrigues(
R, rvec);
129 cv::Mat tvec = (cv::Mat_<double>(3,1) <<
130 (double)guessCameraFrame.
x(), (double)guessCameraFrame.
y(), (double)guessCameraFrame.
z());
147 if((
int)inliers.size() >= minInliers)
149 cv::Rodrigues(rvec,
R);
150 Transform pnp(
R.at<
double>(0,0),
R.at<
double>(0,1),
R.at<
double>(0,2), tvec.at<
double>(0),
151 R.at<
double>(1,0),
R.at<
double>(1,1),
R.at<
double>(1,2), tvec.at<
double>(1),
152 R.at<
double>(2,0),
R.at<
double>(2,1),
R.at<
double>(2,2), tvec.at<
double>(2));
157 if(covariance && (!words3B.empty() || cameraModel.
imageSize() != cv::Size()))
159 std::vector<float> errorSqrdDists(inliers.size());
160 std::vector<float> errorSqrdX;
161 std::vector<float> errorSqrdY;
162 std::vector<float> errorSqrdZ;
163 if(splitLinearCovarianceComponents)
165 errorSqrdX.resize(inliers.size());
166 errorSqrdY.resize(inliers.size());
167 errorSqrdZ.resize(inliers.size());
169 std::vector<float> errorSqrdAngles(inliers.size());
173 for(
unsigned int i=0;
i<inliers.size(); ++
i)
175 cv::Point3f objPt = objectPoints[inliers[
i]];
178 std::map<int, cv::Point3f>::const_iterator
iter = words3B.find(
matches[inliers[
i]]);
193 imagePoints.at(inliers[
i]).x,
194 imagePoints.at(inliers[
i]).y,
200 newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1;
206 if(splitLinearCovarianceComponents)
208 double errorX = objPt.x-newPt.x;
209 double errorY = objPt.y-newPt.y;
210 double errorZ = objPt.z-newPt.z;
211 errorSqrdX[
i] = errorX * errorX;
212 errorSqrdY[
i] = errorY * errorY;
213 errorSqrdZ[
i] = errorZ * errorZ;
216 errorSqrdDists[
i] =
uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
218 Eigen::Vector4f
v1(objPt.x, objPt.y, objPt.z, 0);
219 Eigen::Vector4f
v2(newPt.x, newPt.y, newPt.z, 0);
220 errorSqrdAngles[
i] = pcl::getAngle3D(
v1,
v2);
223 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
225 double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
227 (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
228 std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
229 double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
231 (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
233 if(splitLinearCovarianceComponents)
235 std::sort(errorSqrdX.begin(), errorSqrdX.end());
236 double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
237 std::sort(errorSqrdY.begin(), errorSqrdY.end());
238 double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
239 std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
240 double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
245 covariance->at<
double>(0,0) = median_error_sqr_x;
246 covariance->at<
double>(1,1) = median_error_sqr_y;
247 covariance->at<
double>(2,2) = median_error_sqr_z;
249 median_error_sqr_lin =
uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
252 if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
254 UWARN(
"Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
255 *covariance = cv::Mat::eye(6,6,CV_64FC1);
262 std::vector<cv::Point2f> imagePointsReproj;
263 cv::projectPoints(objectPoints, rvec, tvec,
K, cv::Mat(), imagePointsReproj);
265 for(
unsigned int i=0;
i<inliers.size(); ++
i)
267 err +=
uNormSquared(imagePoints.at(inliers[
i]).x - imagePointsReproj.at(inliers[
i]).x, imagePoints.at(inliers[
i]).y - imagePointsReproj.at(inliers[
i]).y);
270 *covariance *=
std::sqrt(err/
float(inliers.size()));
281 inliersOut->resize(inliers.size());
282 for(
unsigned int i=0;
i<inliers.size(); ++
i)
292 const std::map<int, cv::Point3f> & words3A,
293 const std::map<int, cv::KeyPoint> & words2B,
294 const std::vector<CameraModel> & cameraModels,
295 unsigned int samplingPolicy,
300 int refineIterations,
301 int varianceMedianRatio,
304 const std::map<int, cv::Point3f> & words3B,
305 cv::Mat * covariance,
306 std::vector<int> * matchesOut,
307 std::vector<int> * inliersOut,
308 bool splitLinearCovarianceComponents)
310 std::vector<std::vector<int> > matchesPerCamera;
311 std::vector<std::vector<int> > inliersPerCamera;
327 matchesOut?&matchesPerCamera:0,
328 inliersOut?&inliersPerCamera:0,
329 splitLinearCovarianceComponents);
332 for(
size_t i=0;
i<matchesPerCamera.size(); ++
i)
334 matchesOut->insert(matchesOut->end(), matchesPerCamera[
i].begin(), matchesPerCamera[
i].end());
339 for(
size_t i=0;
i<inliersPerCamera.size(); ++
i)
341 inliersOut->insert(inliersOut->end(), inliersPerCamera[
i].begin(), inliersPerCamera[
i].end());
348 const std::map<int, cv::Point3f> & words3A,
349 const std::map<int, cv::KeyPoint> & words2B,
350 const std::vector<CameraModel> & cameraModels,
351 unsigned int samplingPolicy,
356 int refineIterations,
357 int varianceMedianRatio,
360 const std::map<int, cv::Point3f> & words3B,
361 cv::Mat * covariance,
362 std::vector<std::vector<int> > * matchesOut,
363 std::vector<std::vector<int> > * inliersOut,
364 bool splitLinearCovarianceComponents)
367 #ifndef RTABMAP_OPENGV
368 UERROR(
"This function is only available if rtabmap is built with OpenGV dependency.");
370 UASSERT(!cameraModels.empty() && cameraModels[0].imageWidth() > 0);
371 int subImageWidth = cameraModels[0].imageWidth();
372 for(
size_t i=0;
i<cameraModels.size(); ++
i)
374 UASSERT(cameraModels[
i].isValidForProjection());
375 UASSERT(subImageWidth == cameraModels[
i].imageWidth());
379 UASSERT(varianceMedianRatio > 1);
381 std::vector<int>
matches, inliers;
385 *covariance = cv::Mat::eye(6,6,CV_64FC1);
389 std::vector<int> ids =
uKeys(words2B);
390 std::vector<cv::Point3f> objectPoints(ids.size());
391 std::vector<cv::Point2f> imagePoints(ids.size());
394 std::vector<int> cameraIndexes(ids.size());
395 for(
unsigned int i=0;
i<ids.size(); ++
i)
397 std::map<int, cv::Point3f>::const_iterator
iter=words3A.find(ids[
i]);
400 const cv::Point2f & kpt = words2B.find(ids[
i])->second.pt;
401 int cameraIndex =
int(kpt.x / subImageWidth);
402 UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (
int)cameraModels.size(),
403 uFormat(
"cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
404 cameraIndex, (
int)cameraModels.size(), kpt.x, subImageWidth, cameraModels[cameraIndex].imageWidth()).c_str());
406 const cv::Point3f & pt =
iter->second;
407 objectPoints[oi] = pt;
408 imagePoints[oi] = kpt;
410 imagePoints[oi].x = imagePoints[oi].x - (cameraIndex*subImageWidth);
411 cameraIndexes[oi] = cameraIndex;
416 objectPoints.resize(oi);
417 imagePoints.resize(oi);
418 cameraIndexes.resize(oi);
421 UDEBUG(
"words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d samplingPolicy=%ld",
422 (
int)words3A.size(), (
int)words2B.size(), (
int)
matches.size(), (
int)words3B.size(),
423 guess.
prettyPrint().c_str(), reprojError, iterations, samplingPolicy);
425 if((
int)
matches.size() >= minInliers)
427 if(samplingPolicy == 0 || samplingPolicy == 2)
430 cc.resize(cameraModels.size());
431 std::fill(cc.begin(), cc.end(),0);
432 for(
size_t i=0;
i<cameraIndexes.size(); ++
i)
434 cc[cameraIndexes[
i]] = cc[cameraIndexes[
i]] + 1;
437 for (
size_t i=0;
i<cameraModels.size(); ++
i)
439 UDEBUG(
"Matches in Camera %d: %d",
i, cc[
i]);
443 if(samplingPolicy==2) {
444 UERROR(
"Not enough matches in camera %ld to do "
445 "homogenoeus random sampling, returning null "
446 "transform. Consider using AUTO sampling "
447 "policy to fallback to ANY policy.",
i);
452 UWARN(
"Not enough matches in camera %ld to do "
453 "homogenoeus random sampling, falling back to ANY policy.",
i);
460 if(samplingPolicy == 0)
466 opengv::translations_t camOffsets;
467 opengv::rotations_t camRotations;
468 for(
size_t i=0;
i<cameraModels.size(); ++
i)
470 camOffsets.push_back(opengv::translation_t(
471 cameraModels[
i].localTransform().
x(),
472 cameraModels[
i].localTransform().
y(),
473 cameraModels[
i].localTransform().
z()));
474 camRotations.push_back(cameraModels[
i].localTransform().toEigen4d().block<3,3>(0, 0));
478 if(samplingPolicy == 2)
481 std::vector<std::shared_ptr<opengv::points_t>> multiPoints;
482 multiPoints.resize(cameraModels.size());
484 std::vector<std::shared_ptr<opengv::bearingVectors_t>> multiBearingVectors;
485 multiBearingVectors.resize(cameraModels.size());
486 for(
size_t i=0;
i<cameraModels.size();++
i)
488 multiPoints[
i] = std::make_shared<opengv::points_t>();
489 multiBearingVectors[
i] = std::make_shared<opengv::bearingVectors_t>();
492 for(
size_t i=0;
i<objectPoints.size(); ++
i)
494 int cameraIndex = cameraIndexes[
i];
495 multiPoints[cameraIndex]->push_back(opengv::point_t(objectPoints[
i].
x,objectPoints[
i].
y,objectPoints[
i].
z));
497 cameraModels[cameraIndex].project(imagePoints[
i].
x, imagePoints[
i].
y, 1, pt[0], pt[1], pt[2]);
499 multiBearingVectors[cameraIndex]->push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
503 opengv::absolute_pose::NoncentralAbsoluteMultiAdapter adapter(
509 adapter.setR(guess.
toEigen4d().block<3,3>(0, 0));
510 adapter.sett(opengv::translation_t(guess.
x(), guess.
y(), guess.
z()));
514 opengv::sac::MultiRansac<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> ransac;
515 std::shared_ptr<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> absposeproblem_ptr(
516 new opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem(adapter));
518 ransac.sac_model_ = absposeproblem_ptr;
519 ransac.threshold_ = 1.0 -
cos(
atan(reprojError/cameraModels[0].
fx()));
520 ransac.max_iterations_ = iterations;
521 UDEBUG(
"Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
524 ransac.computeModel();
529 UDEBUG(
"Ransac iterations done: %d", ransac.iterations_);
530 for (
size_t i=0;
i < cameraModels.size(); ++
i)
532 inliers.insert(inliers.end(), ransac.inliers_[
i].begin(), ransac.inliers_[
i].end());
538 opengv::points_t points;
541 opengv::bearingVectors_t bearingVectors;
542 opengv::absolute_pose::NoncentralAbsoluteAdapter::camCorrespondences_t camCorrespondences;
544 for(
size_t i=0;
i<objectPoints.size(); ++
i)
546 int cameraIndex = cameraIndexes[
i];
547 points.push_back(opengv::point_t(objectPoints[
i].
x,objectPoints[
i].
y,objectPoints[
i].
z));
549 cameraModels[cameraIndex].project(imagePoints[
i].
x, imagePoints[
i].
y, 1, pt[0], pt[1], pt[2]);
551 bearingVectors.push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
552 camCorrespondences.push_back(cameraIndex);
556 opengv::absolute_pose::NoncentralAbsoluteAdapter adapter(
563 adapter.setR(guess.
toEigen4d().block<3,3>(0, 0));
564 adapter.sett(opengv::translation_t(guess.
x(), guess.
y(), guess.
z()));
568 opengv::sac::Ransac<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> ransac;
569 std::shared_ptr<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> absposeproblem_ptr(
570 new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem(adapter, opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem::GP3P));
572 ransac.sac_model_ = absposeproblem_ptr;
573 ransac.threshold_ = 1.0 -
cos(
atan(reprojError/cameraModels[0].
fx()));
574 ransac.max_iterations_ = iterations;
575 UDEBUG(
"Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
578 ransac.computeModel();
583 UDEBUG(
"Ransac iterations done: %d", ransac.iterations_);
584 inliers = ransac.inliers_;
587 UDEBUG(
"Ransac inliers: %ld", inliers.size());
589 if((
int)inliers.size() >= minInliers && !pnp.
isNull())
596 std::vector<float> errorSqrdX;
597 std::vector<float> errorSqrdY;
598 std::vector<float> errorSqrdZ;
599 if(splitLinearCovarianceComponents)
601 errorSqrdX.resize(inliers.size());
602 errorSqrdY.resize(inliers.size());
603 errorSqrdZ.resize(inliers.size());
605 std::vector<float> errorSqrdDists(inliers.size());
606 std::vector<float> errorSqrdAngles(inliers.size());
608 std::vector<Transform> transformsCameraFrame(cameraModels.size());
609 std::vector<Transform> transformsCameraFrameInv(cameraModels.size());
610 for(
size_t i=0;
i<cameraModels.size(); ++
i)
612 transformsCameraFrame[
i] =
transform * cameraModels[
i].localTransform();
613 transformsCameraFrameInv[
i] = transformsCameraFrame[
i].inverse();
616 for(
unsigned int i=0;
i<inliers.size(); ++
i)
618 cv::Point3f objPt = objectPoints[inliers[
i]];
621 std::map<int, cv::Point3f>::const_iterator
iter = words3B.find(
matches[inliers[
i]]);
629 int cameraIndex = cameraIndexes[inliers[
i]];
636 cameraModels[cameraIndex].imageSize(),
637 imagePoints.at(inliers[
i]).x,
638 imagePoints.at(inliers[
i]).y,
639 cameraModels[cameraIndex].cx(),
640 cameraModels[cameraIndex].cy(),
641 cameraModels[cameraIndex].fx(),
642 cameraModels[cameraIndex].fy());
644 newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1;
650 if(splitLinearCovarianceComponents)
652 double errorX = objPt.x-newPt.x;
653 double errorY = objPt.y-newPt.y;
654 double errorZ = objPt.z-newPt.z;
655 errorSqrdX[
i] = errorX * errorX;
656 errorSqrdY[
i] = errorY * errorY;
657 errorSqrdZ[
i] = errorZ * errorZ;
660 errorSqrdDists[
i] =
uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
662 Eigen::Vector4f
v1(objPt.x, objPt.y, objPt.z, 0);
663 Eigen::Vector4f
v2(newPt.x, newPt.y, newPt.z, 0);
664 errorSqrdAngles[
i] = pcl::getAngle3D(
v1,
v2);
667 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
669 double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
671 (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
672 std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
673 double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
675 (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
677 if(splitLinearCovarianceComponents)
679 std::sort(errorSqrdX.begin(), errorSqrdX.end());
680 double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
681 std::sort(errorSqrdY.begin(), errorSqrdY.end());
682 double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
683 std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
684 double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
689 covariance->at<
double>(0,0) = median_error_sqr_x;
690 covariance->at<
double>(1,1) = median_error_sqr_y;
691 covariance->at<
double>(2,2) = median_error_sqr_z;
693 median_error_sqr_lin =
uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
696 if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
698 UWARN(
"Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
699 *covariance = cv::Mat::eye(6,6,CV_64FC1);
708 matchesOut->resize(cameraModels.size());
712 UASSERT(cameraIndexes[
i]>=0 && cameraIndexes[
i] < (
int)cameraModels.size());
713 matchesOut->at(cameraIndexes[
i]).push_back(
matches[
i]);
718 inliersOut->resize(cameraModels.size());
719 for(
unsigned int i=0;
i<inliers.size(); ++
i)
721 UASSERT(inliers[
i]>=0 && inliers[
i] < (
int)cameraIndexes.size());
722 UASSERT(cameraIndexes[inliers[
i]]>=0 && cameraIndexes[inliers[
i]] < (
int)cameraModels.size());
723 inliersOut->at(cameraIndexes[inliers[
i]]).push_back(
matches[inliers[
i]]);
731 const std::map<int, cv::Point3f> & words3A,
732 const std::map<int, cv::Point3f> & words3B,
734 double inliersDistance,
736 int refineIterations,
737 cv::Mat * covariance,
738 std::vector<int> * matchesOut,
739 std::vector<int> * inliersOut)
742 std::vector<cv::Point3f> inliers1;
743 std::vector<cv::Point3f> inliers2;
753 UASSERT(inliers1.size() == inliers2.size());
754 UDEBUG(
"Unique correspondences = %d", (
int)inliers1.size());
758 *covariance = cv::Mat::eye(6,6,CV_64FC1);
761 std::vector<int> inliers;
762 if((
int)inliers1.size() >= minInliers)
764 pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1cloud(
new pcl::PointCloud<pcl::PointXYZ>);
765 pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2cloud(
new pcl::PointCloud<pcl::PointXYZ>);
766 inliers1cloud->resize(inliers1.size());
767 inliers2cloud->resize(inliers1.size());
768 for(
unsigned int i=0;
i<inliers1.size(); ++
i)
770 (*inliers1cloud)[
i].x = inliers1[
i].x;
771 (*inliers1cloud)[
i].y = inliers1[
i].y;
772 (*inliers1cloud)[
i].z = inliers1[
i].z;
773 (*inliers2cloud)[
i].x = inliers2[
i].x;
774 (*inliers2cloud)[
i].y = inliers2[
i].y;
775 (*inliers2cloud)[
i].z = inliers2[
i].z;
787 if(!
t.isNull() && (
int)inliers.size() >= minInliers)
799 inliersOut->resize(inliers.size());
800 for(
unsigned int i=0;
i<inliers.size(); ++
i)
811 std::vector<cv::Point3f> opoints,
812 std::vector<cv::Point2f> ipoints,
813 const cv::Mat & cameraMatrix,
814 const cv::Mat & distCoeffs,
815 const cv::Mat & rvec,
816 const cv::Mat & tvec,
817 float reprojErrorThreshold,
818 std::vector<int> & inliers)
820 UASSERT(opoints.size() == ipoints.size());
823 std::vector<cv::Point2f> projpoints;
824 projectPoints(opoints, rvec, tvec, cameraMatrix, distCoeffs, projpoints);
826 inliers.resize(
count,0);
827 std::vector<float> err(
count);
831 float e = (
float)cv::norm( ipoints[
i] - projpoints[
i]);
832 if(
e <= reprojErrorThreshold)
844 const std::vector<cv::Point3f> & objectPoints,
845 const std::vector<cv::Point2f> & imagePoints,
846 const cv::Mat & cameraMatrix,
847 const cv::Mat & distCoeffs,
850 bool useExtrinsicGuess,
852 float reprojectionError,
854 std::vector<int> & inliers,
856 int refineIterations,
859 if(minInliersCount < 4)
880 float inlierThreshold = reprojectionError;
881 if((
int)inliers.size() >= minInliersCount && refineIterations>0)
883 float error_threshold = inlierThreshold;
884 int refine_iterations = 0;
885 bool inlier_changed =
false, oscillating =
false;
886 std::vector<int> new_inliers, prev_inliers = inliers;
887 std::vector<size_t> inliers_sizes;
889 cv::Mat new_model_rvec = rvec;
890 cv::Mat new_model_tvec = tvec;
895 std::vector<cv::Point3f> opoints_inliers(prev_inliers.size());
896 std::vector<cv::Point2f> ipoints_inliers(prev_inliers.size());
897 for(
unsigned int i=0;
i<prev_inliers.size(); ++
i)
899 opoints_inliers[
i] = objectPoints[prev_inliers[
i]];
900 ipoints_inliers[
i] = imagePoints[prev_inliers[
i]];
903 UDEBUG(
"inliers=%d refine_iterations=%d, rvec=%f,%f,%f tvec=%f,%f,%f", (
int)prev_inliers.size(), refine_iterations,
904 *new_model_rvec.ptr<
double>(0), *new_model_rvec.ptr<
double>(1), *new_model_rvec.ptr<
double>(2),
905 *new_model_tvec.ptr<
double>(0), *new_model_tvec.ptr<
double>(1), *new_model_tvec.ptr<
double>(2));
908 cv::solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec,
true, flags);
909 inliers_sizes.push_back(prev_inliers.size());
911 UDEBUG(
"rvec=%f,%f,%f tvec=%f,%f,%f",
912 *new_model_rvec.ptr<
double>(0), *new_model_rvec.ptr<
double>(1), *new_model_rvec.ptr<
double>(2),
913 *new_model_tvec.ptr<
double>(0), *new_model_tvec.ptr<
double>(1), *new_model_tvec.ptr<
double>(2));
916 std::vector<float> err =
computeReprojErrors(objectPoints, imagePoints, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, error_threshold, new_inliers);
917 UDEBUG(
"RANSAC refineModel: Number of inliers found (before/after): %d/%d, with an error threshold of %f.",
918 (
int)prev_inliers.size (), (
int)new_inliers.size (), error_threshold);
920 if ((
int)new_inliers.size() < minInliersCount)
923 if (refine_iterations >= refineIterations)
931 float m =
uMean(err.data(), err.size());
932 float variance =
uVariance(err.data(), err.size());
933 error_threshold =
std::min(inlierThreshold, refineSigma *
float(
sqrt(variance)));
935 UDEBUG (
"RANSAC refineModel: New estimated error threshold: %f (variance=%f mean=%f) on iteration %d out of %d.",
936 error_threshold, variance,
m, refine_iterations, refineIterations);
937 inlier_changed =
false;
941 if (new_inliers.size () != prev_inliers.size ())
944 if ((
int)inliers_sizes.size () >= minInliersCount)
946 if (inliers_sizes[inliers_sizes.size () - 1] == inliers_sizes[inliers_sizes.size () - 3] &&
947 inliers_sizes[inliers_sizes.size () - 2] == inliers_sizes[inliers_sizes.size () - 4])
953 inlier_changed =
true;
958 for (
size_t i = 0;
i < prev_inliers.size (); ++
i)
961 if (prev_inliers[
i] != new_inliers[
i])
963 inlier_changed =
true;
968 while (inlier_changed && ++refine_iterations < refineIterations);
971 if ((
int)prev_inliers.size() < minInliersCount)
973 UWARN (
"RANSAC refineModel: Refinement failed: got very low inliers (%d)!", (
int)prev_inliers.size());
978 UDEBUG(
"RANSAC refineModel: Detected oscillations in the model refinement.");
982 rvec = new_model_rvec;
983 tvec = new_model_tvec;