46 #include <opencv2/core/core_c.h>
48 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
49 #include <opencv2/xfeatures2d.hpp>
52 #ifdef HAVE_OPENCV_CUDAOPTFLOW
53 #include <opencv2/cudaoptflow.hpp>
54 #include <opencv2/cudaimgproc.hpp>
68 _minInliers(
Parameters::defaultVisMinInliers()),
69 _inlierDistance(
Parameters::defaultVisInlierDistance()),
70 _iterations(
Parameters::defaultVisIterations()),
71 _refineIterations(
Parameters::defaultVisRefineIterations()),
72 _epipolarGeometryVar(
Parameters::defaultVisEpipolarGeometryVar()),
73 _estimationType(
Parameters::defaultVisEstimationType()),
74 _forwardEstimateOnly(
Parameters::defaultVisForwardEstOnly()),
75 _PnPReprojError(
Parameters::defaultVisPnPReprojError()),
77 _PnPRefineIterations(
Parameters::defaultVisPnPRefineIterations()),
78 _PnPVarMedianRatio(
Parameters::defaultVisPnPVarianceMedianRatio()),
79 _PnPMaxVar(
Parameters::defaultVisPnPMaxVariance()),
80 _PnPSplitLinearCovarianceComponents(
Parameters::defaultVisPnPSplitLinearCovComponents()),
81 _multiSamplingPolicy(
Parameters::defaultVisPnPSamplingPolicy()),
82 _correspondencesApproach(
Parameters::defaultVisCorType()),
83 _flowWinSize(
Parameters::defaultVisCorFlowWinSize()),
84 _flowIterations(
Parameters::defaultVisCorFlowIterations()),
86 _flowMaxLevel(
Parameters::defaultVisCorFlowMaxLevel()),
90 _gmsWithRotation(
Parameters::defaultGMSWithRotation()),
91 _gmsWithScale(
Parameters::defaultGMSWithScale()),
92 _gmsThresholdFactor(
Parameters::defaultGMSThresholdFactor()),
93 _guessWinSize(
Parameters::defaultVisCorGuessWinSize()),
94 _guessMatchToProjection(
Parameters::defaultVisCorGuessMatchToProjection()),
95 _bundleAdjustment(
Parameters::defaultVisBundleAdjustment()),
96 _depthAsMask(
Parameters::defaultVisDepthAsMask()),
97 _minInliersDistributionThr(
Parameters::defaultVisMinInliersDistribution()),
98 _maxInliersMeanDistance(
Parameters::defaultVisMeanInliersDistance()),
101 #ifdef RTABMAP_PYTHON
164 UWARN(
"%s should be >= 6 but it is set to %d, setting to 6.", Parameters::kVisMinInliers().
c_str(),
_minInliers);
170 #ifndef HAVE_OPENCV_CUDAOPTFLOW
173 UERROR(
"%s is enabled but RTAB-Map is not built with OpenCV CUDA, disabling it.", Parameters::kVisCorFlowGpu().
c_str());
181 #ifndef RTABMAP_PYTHON
182 UWARN(
"%s is set to 6 but RTAB-Map is not built with Python3 support, using default %d.",
183 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
184 _nnType = Parameters::defaultVisCorNNType();
186 int iterations = _pyMatcher?_pyMatcher->iterations():Parameters::defaultPyMatcherIterations();
187 float matchThr = _pyMatcher?_pyMatcher->matchThreshold():Parameters::defaultPyMatcherThreshold();
188 std::string
path = _pyMatcher?_pyMatcher->path():Parameters::defaultPyMatcherPath();
189 bool cuda = _pyMatcher?_pyMatcher->cuda():Parameters::defaultPyMatcherCuda();
190 std::string
model = _pyMatcher?_pyMatcher->model():Parameters::defaultPyMatcherModel();
198 UERROR(
"%s parameter should be set to use Python3 matching (%s=6), using default %d.",
199 Parameters::kPyMatcherPath().
c_str(),
200 Parameters::kVisCorNNType().
c_str(),
201 Parameters::defaultVisCorNNType());
202 _nnType = Parameters::defaultVisCorNNType();
211 #if !defined(HAVE_OPENCV_XFEATURES2D) || (CV_MAJOR_VERSION == 3 && (CV_MINOR_VERSION<4 || CV_MINOR_VERSION==4 && CV_SUBMINOR_VERSION<1))
214 UWARN(
"%s is set to 7 but RTAB-Map is not built with OpenCV's xfeatures2d support (OpenCV >= 3.4.1 also required), using default %d.",
215 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
216 _nnType = Parameters::defaultVisCorNNType();
221 for(ParametersMap::const_iterator
iter=parameters.begin();
iter!=parameters.end(); ++
iter)
223 std::string group =
uSplit(
iter->first,
'/').front();
230 if(
uContains(parameters, Parameters::kVisCorNNType()))
237 if(
uContains(parameters, Parameters::kVisCorNNDR()))
241 if(
uContains(parameters, Parameters::kKpByteToFloat()))
245 if(
uContains(parameters, Parameters::kVisFeatureType()))
249 if(
uContains(parameters, Parameters::kVisMaxFeatures()))
253 if(
uContains(parameters, Parameters::kVisSSC()))
257 if(
uContains(parameters, Parameters::kVisMaxDepth()))
261 if(
uContains(parameters, Parameters::kVisMinDepth()))
265 if(
uContains(parameters, Parameters::kVisRoiRatios()))
269 if(
uContains(parameters, Parameters::kVisSubPixEps()))
273 if(
uContains(parameters, Parameters::kVisSubPixIterations()))
277 if(
uContains(parameters, Parameters::kVisSubPixWinSize()))
281 if(
uContains(parameters, Parameters::kVisGridRows()))
285 if(
uContains(parameters, Parameters::kVisGridCols()))
300 #ifdef RTABMAP_PYTHON
333 UDEBUG(
"Input(%d): from=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
335 (
int)fromSignature.
getWords().size(),
346 UDEBUG(
"Input(%d): to=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
360 info.projectedIDs.clear();
373 UDEBUG(
"Bypassing feature matching as descriptors and images are empty. We assume features are already matched.");
400 std::vector<cv::KeyPoint> kptsFrom;
404 std::vector<int> orignalWordsFromIds;
405 int kptsFromSource = 0;
406 if(fromSignature.
getWords().empty())
410 if(!imageFrom.empty())
412 if(imageFrom.channels() > 1)
415 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
430 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
431 Parameters::kVisDepthAsMask().
c_str(),
451 orignalWordsFromIds.resize(fromSignature.
getWords().size());
453 bool allUniques =
true;
454 int previousIdAdded = 0;
458 UASSERT(
iter->second>=0 &&
iter->second<(
int)orignalWordsFromIds.size());
459 orignalWordsFromIds[
iter->second] =
iter->first;
460 if(
i>0 &&
iter->first==previousIdAdded)
464 previousIdAdded =
iter->first;
469 UDEBUG(
"IDs are not unique, IDs will be regenerated!");
470 orignalWordsFromIds.clear();
474 std::multimap<int, int> wordsFrom;
475 std::multimap<int, int> wordsTo;
476 std::vector<cv::KeyPoint> wordsKptsFrom;
477 std::vector<cv::KeyPoint> wordsKptsTo;
478 std::vector<cv::Point3f> words3From;
479 std::vector<cv::Point3f> words3To;
480 cv::Mat wordsDescFrom;
485 #ifdef HAVE_OPENCV_CUDAOPTFLOW
486 cv::cuda::GpuMat d_imageFrom;
487 cv::cuda::GpuMat d_imageTo;
490 UDEBUG(
"GPU optical flow: preparing GPU image data...");
491 d_imageFrom = fromSignature.
sensorData().imageRawGpu();
492 if(d_imageFrom.empty() && !imageFrom.empty()) {
493 d_imageFrom = cv::cuda::GpuMat(imageFrom);
496 if(d_imageFrom.channels() > 1) {
497 cv::cuda::GpuMat tmp;
498 cv::cuda::cvtColor(d_imageFrom, tmp, cv::COLOR_BGR2GRAY);
501 if(fromSignature.
sensorData().imageRawGpu().empty())
503 fromSignature.
sensorData().setImageRawGpu(d_imageFrom);
506 d_imageTo = toSignature.
sensorData().imageRawGpu();
507 if(d_imageTo.empty() && !imageTo.empty()) {
508 d_imageTo = cv::cuda::GpuMat(imageTo);
511 if(d_imageTo.channels() > 1) {
512 cv::cuda::GpuMat tmp;
513 cv::cuda::cvtColor(d_imageTo, tmp, cv::COLOR_BGR2GRAY);
516 if(toSignature.
sensorData().imageRawGpu().empty())
518 toSignature.
sensorData().setImageRawGpu(d_imageTo);
520 UDEBUG(
"GPU optical flow: preparing GPU image data... done!");
526 if(imageFrom.channels() > 1)
529 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
532 if(imageTo.channels() > 1)
535 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
540 std::vector<cv::Point3f> kptsFrom3D;
541 if(kptsFrom.size() == fromSignature.
getWords3().size())
554 if(!imageFrom.empty() && !imageTo.empty())
557 std::vector<cv::Point2f> cornersFrom;
558 cv::KeyPoint::convert(kptsFrom, cornersFrom);
559 std::vector<cv::Point2f> cornersTo;
567 cv::Mat
R = (cv::Mat_<double>(3,3) <<
568 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
569 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
570 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
571 cv::Mat rvec(1,3, CV_64FC1);
572 cv::Rodrigues(
R, rvec);
573 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
575 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), cornersTo);
581 cornersTo = cornersFrom;
583 std::vector<Transform> inverseTransforms(nCameras);
584 for(
int c=0;
c<nCameras; ++
c)
587 inverseTransforms[
c] = (guess * localTransform).
inverse();
588 UDEBUG(
"inverse transforms: cam %d -> %s",
c, inverseTransforms[
c].prettyPrint().
c_str());
592 UASSERT(kptsFrom3D.size() == cornersTo.size());
595 for(
size_t i=0;
i<kptsFrom3D.size(); ++
i)
598 int startIndex = cornersFrom[
i].x/subImageWidth;
599 UASSERT(startIndex < nCameras);
600 for(
int ci=0; ci < nCameras; ++ci)
602 int c = (ci+startIndex) % nCameras;
605 if(ptsInCamFrame.z > 0)
608 model.reproject(ptsInCamFrame.x, ptsInCamFrame.y, ptsInCamFrame.z, u,
v);
611 cornersTo[
i].x = u+
model.imageWidth()*
c;
620 UDEBUG(
"Projected %d/%ld points inside %d cameras (time=%fs)",
621 inFrame, cornersTo.size(), nCameras,
t.ticks());
626 UDEBUG(
"guessSet = %d", guessSet?1:0);
627 std::vector<unsigned char> status;
628 #ifdef HAVE_OPENCV_CUDAOPTFLOW
631 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device begin");
632 cv::cuda::GpuMat d_cornersFrom(cornersFrom);
633 cv::cuda::GpuMat d_cornersTo(cornersTo);
634 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device end");
635 cv::cuda::GpuMat d_status;
636 cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> d_pyrLK_sparse = cv::cuda::SparsePyrLKOpticalFlow::create(
639 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc begin");
640 d_pyrLK_sparse->calc(d_imageFrom, d_imageTo, d_cornersFrom, d_cornersTo, d_status);
641 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc end");
643 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host begin");
645 cornersTo = std::vector<cv::Point2f>(d_cornersTo.cols);
646 cv::Mat matCornersTo(1, d_cornersTo.cols, CV_32FC2, (
void*)&cornersTo[0]);
647 d_cornersTo.download(matCornersTo);
649 status = std::vector<unsigned char>(d_status.cols);
650 cv::Mat matStatus(1, d_status.cols, CV_8UC1, (
void*)&status[0]);
651 d_status.download(matStatus);
652 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host end");
657 std::vector<float> err;
658 UDEBUG(
"cv::calcOpticalFlowPyrLK() begin");
659 cv::calcOpticalFlowPyrLK(
669 cv::OPTFLOW_LK_GET_MIN_EIGENVALS | (guessSet ? cv::OPTFLOW_USE_INITIAL_FLOW : 0), 1
e-4);
670 UDEBUG(
"cv::calcOpticalFlowPyrLK() end");
673 UASSERT(kptsFrom.size() == kptsFrom3D.size());
674 std::vector<cv::KeyPoint> kptsTo(kptsFrom.size());
675 std::vector<cv::Point3f> kptsFrom3DKept(kptsFrom3D.size());
676 std::vector<int> orignalWordsFromIdsCpy = orignalWordsFromIds;
678 for(
unsigned int i=0;
i<status.size(); ++
i)
684 if(orignalWordsFromIdsCpy.size())
686 orignalWordsFromIds[ki] = orignalWordsFromIdsCpy[
i];
688 kptsFrom[ki] = cv::KeyPoint(cornersFrom[
i], 1);
689 kptsFrom3DKept[ki] = kptsFrom3D[
i];
690 kptsTo[ki++] = cv::KeyPoint(cornersTo[
i], 1);
693 if(orignalWordsFromIds.size())
695 orignalWordsFromIds.resize(ki);
699 kptsFrom3DKept.resize(ki);
700 kptsFrom3D = kptsFrom3DKept;
702 std::vector<cv::Point3f> kptsTo3D;
708 UASSERT(kptsFrom.size() == kptsFrom3DKept.size());
709 UASSERT(kptsFrom.size() == kptsTo.size());
710 UASSERT(kptsTo3D.size() == 0 || kptsTo.size() == kptsTo3D.size());
711 for(
unsigned int i=0;
i< kptsFrom3DKept.size(); ++
i)
713 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
714 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
715 wordsKptsFrom.push_back(kptsFrom[
i]);
716 words3From.push_back(kptsFrom3DKept[
i]);
718 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
719 wordsKptsTo.push_back(kptsTo[
i]);
720 if(!kptsTo3D.empty())
722 words3To.push_back(kptsTo3D[
i]);
729 if(imageFrom.empty())
731 UERROR(
"Optical flow correspondences requires images in data!");
733 UASSERT(kptsFrom.size() == kptsFrom3D.size());
734 for(
unsigned int i=0;
i< kptsFrom3D.size(); ++
i)
738 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
739 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
740 wordsKptsFrom.push_back(kptsFrom[
i]);
741 words3From.push_back(kptsFrom3D[
i]);
744 toSignature.
sensorData().
setFeatures(std::vector<cv::KeyPoint>(), std::vector<cv::Point3f>(), cv::Mat());
752 std::vector<cv::KeyPoint> kptsTo;
753 int kptsToSource = 0;
759 if(imageTo.channels() > 1)
762 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
777 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
778 Parameters::kVisDepthAsMask().
c_str(),
801 UDEBUG(
"kptsFrom=%d kptsFromSource=%d", (
int)kptsFrom.size(), kptsFromSource);
802 UDEBUG(
"kptsTo=%d kptsToSource=%d", (
int)kptsTo.size(), kptsToSource);
803 cv::Mat descriptorsFrom;
804 if(kptsFromSource == 2 &&
811 else if(kptsFromSource == 1 &&
816 else if(!imageFrom.empty())
818 if(imageFrom.channels() > 1)
821 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
824 UDEBUG(
"cleared orignalWordsFromIds");
825 orignalWordsFromIds.clear();
829 cv::Mat descriptorsTo;
832 if(kptsToSource == 2 &&
837 else if(kptsToSource == 1 &&
842 else if(!imageTo.empty())
844 if(imageTo.channels() > 1)
847 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
856 std::vector<cv::Point3f> kptsFrom3D;
857 std::vector<cv::Point3f> kptsTo3D;
858 if(kptsFromSource == 2 &&
859 kptsFrom.size() == fromSignature.
getWords3().size())
863 else if(kptsFromSource == 1 &&
870 if(fromSignature.
getWords3().size() && kptsFrom.size() != fromSignature.
getWords3().size())
872 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.getWords3() (%d), there "
873 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
879 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.sensorData().keypoints3D() (%d), there "
880 "is maybe a problem with the logic above (keypoints3D should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
885 UDEBUG(
"generated kptsFrom3D=%d", (
int)kptsFrom3D.size());
888 if(!kptsFrom3D.empty() &&
895 if(kptsToSource == 2 && kptsTo.size() == toSignature.
getWords3().size())
899 else if(kptsToSource == 1 &&
908 UWARN(
"kptsTo (%d) is not the same size as toSignature.getWords3() (%d), there "
909 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsTo). Regenerating kptsTo3D...",
915 UWARN(
"kptsTo (%d) is not the same size as toSignature.sensorData().keypoints3D() (%d), there "
916 "is maybe a problem with the logic above (keypoints3D() should be null or equal to kptsTo). Regenerating kptsTo3D...",
923 if(kptsTo3D.size() &&
930 UASSERT(kptsFrom.empty() || descriptorsFrom.rows == 0 ||
int(kptsFrom.size()) == descriptorsFrom.rows);
935 UDEBUG(
"descriptorsFrom=%d", descriptorsFrom.rows);
936 UDEBUG(
"descriptorsTo=%d", descriptorsTo.rows);
937 UDEBUG(
"orignalWordsFromIds=%d", (
int)orignalWordsFromIds.size());
940 if(descriptorsFrom.rows > 0 && descriptorsTo.rows > 0)
942 std::vector<CameraModel> models;
955 bool isCalibrated = !models.empty();
956 for(
size_t i=0;
i<models.size() && isCalibrated; ++
i)
958 isCalibrated = models[
i].isValidForProjection();
961 if(isCalibrated && (models[
i].imageWidth()==0 || models[
i].imageHeight()==0))
969 isCalibrated =
false;
982 UASSERT((
int)kptsTo.size() == descriptorsTo.rows);
983 UASSERT((
int)kptsFrom3D.size() == descriptorsFrom.rows);
985 std::vector<cv::Point2f> cornersProjected;
986 std::vector<int> projectedIndexToDescIndex;
987 float subImageWidth = models[0].imageWidth();
990 for(
size_t m=0;
m<models.size(); ++
m)
993 cv::Mat
R = (cv::Mat_<double>(3,3) <<
994 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
995 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
996 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
997 cv::Mat rvec(1,3, CV_64FC1);
998 cv::Rodrigues(
R, rvec);
999 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
1000 cv::Mat
K = models[
m].K();
1001 std::vector<cv::Point2f> projected;
1002 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), projected);
1003 UDEBUG(
"Projected points=%d", (
int)projected.size());
1006 UASSERT((
int)projected.size() == descriptorsFrom.rows);
1007 int cornersInFrame = 0;
1008 for(
unsigned int i=0;
i<projected.size(); ++
i)
1010 if(
uIsInBounds(projected[
i].
x, 0.0
f,
float(models[
m].imageWidth()-1)) &&
1011 uIsInBounds(projected[
i].
y, 0.0
f,
float(models[
m].imageHeight()-1)) &&
1014 if(added.find(
i) != added.end())
1020 projectedIndexToDescIndex.push_back(
i);
1021 projected[
i].x += subImageWidth*
float(
m);
1022 cornersProjected.push_back(projected[
i]);
1027 UDEBUG(
"corners in frame=%d (camera index=%ld)", cornersInFrame,
m);
1033 UDEBUG(
"guessMatchToProjection=%d, cornersProjected=%d orignalWordsFromIds=%d (added=%ld, duplicates=%d)",
1035 added.size(), duplicates);
1036 if(cornersProjected.size())
1040 UDEBUG(
"match frame to projected");
1046 std::vector< std::vector<size_t> >
indices;
1047 std::vector<std::vector<float> > dists;
1049 std::vector<cv::Point2f> pointsTo;
1050 cv::KeyPoint::convert(kptsTo, pointsTo);
1055 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1056 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1057 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1059 UDEBUG(
"radius search done for guess");
1062 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1063 std::map<int,int> addedWordsFrom;
1064 std::map<int, int> duplicates;
1066 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1067 for(
unsigned int i = 0;
i < pointsToMat.
rows; ++
i)
1069 int matchedIndex = -1;
1072 std::vector<int> descriptorsIndices(
indices[
i].
size());
1080 descriptorsFrom.row(projectedIndexToDescIndex[
indices[
i].at(
j)]).copyTo(descriptors.row(oi));
1081 descriptorsIndices[oi++] =
indices[
i].at(
j);
1083 descriptorsIndices.resize(oi);
1086 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType == 5);
1089 std::vector<cv::DMatch>
matches;
1090 matcher.match(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1093 matchedIndex = descriptorsIndices.at(
matches.at(0).trainIdx);
1098 std::vector<std::vector<cv::DMatch> >
matches;
1099 matcher.knnMatch(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1104 matchedIndex = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1113 if(matchedIndex >= 0)
1115 matchedIndex = projectedIndexToDescIndex[matchedIndex];
1116 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndex]:matchedIndex;
1118 if(addedWordsFrom.find(matchedIndex) != addedWordsFrom.end())
1120 id = addedWordsFrom.at(matchedIndex);
1121 duplicates.insert(std::make_pair(matchedIndex,
id));
1125 addedWordsFrom.insert(std::make_pair(matchedIndex,
id));
1127 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1128 if(!kptsFrom.empty())
1130 wordsKptsFrom.push_back(kptsFrom[matchedIndex]);
1132 words3From.push_back(kptsFrom3D[matchedIndex]);
1133 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndex));
1136 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1137 wordsKptsTo.push_back(kptsTo[
i]);
1138 wordsDescTo.push_back(descriptorsTo.row(
i));
1139 if(!kptsTo3D.empty())
1141 words3To.push_back(kptsTo3D[
i]);
1147 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1148 wordsKptsTo.push_back(kptsTo[
i]);
1149 wordsDescTo.push_back(descriptorsTo.row(
i));
1150 if(!kptsTo3D.empty())
1152 words3To.push_back(kptsTo3D[
i]);
1159 UDEBUG(
"addedWordsFrom=%d/%d (duplicates=%d, newWords=%d), kptsTo=%d, wordsTo=%d, words3From=%d",
1160 (
int)addedWordsFrom.size(), (
int)cornersProjected.size(), (
int)duplicates.size(), newWords,
1161 (
int)kptsTo.size(), (
int)wordsTo.size(), (
int)words3From.size());
1164 int addWordsFromNotMatched = 0;
1165 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1167 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1169 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1170 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1171 wordsKptsFrom.push_back(kptsFrom[
i]);
1172 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1173 words3From.push_back(kptsFrom3D[
i]);
1175 ++addWordsFromNotMatched;
1178 UDEBUG(
"addWordsFromNotMatched=%d -> words3From=%d", addWordsFromNotMatched, (
int)words3From.size());
1182 UDEBUG(
"match projected to frame");
1183 std::vector<cv::Point2f> pointsTo;
1184 cv::KeyPoint::convert(kptsTo, pointsTo);
1189 std::vector< std::vector<size_t> >
indices;
1190 std::vector<std::vector<float> > dists;
1196 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1197 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1198 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1200 UDEBUG(
"radius search done for guess");
1203 std::set<int> addedWordsTo;
1204 std::set<int> addedWordsFrom;
1205 double bruteForceTotalTime = 0.0;
1206 double bruteForceDescCopy = 0.0;
1208 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1209 for(
unsigned int i = 0;
i < cornersProjectedMat.
rows; ++
i)
1211 int matchedIndexFrom = projectedIndexToDescIndex[
i];
1215 info.projectedIDs.push_back(!orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom);
1220 int matchedIndexTo = -1;
1224 std::vector<int> descriptorsIndices(
indices[
i].
size());
1232 descriptorsTo.row(
indices[
i].at(
j)).copyTo(descriptors.row(oi));
1233 descriptorsIndices[oi++] =
indices[
i].at(
j);
1235 bruteForceDescCopy += bruteForceTimer.
ticks();
1238 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType==5);
1241 std::vector<cv::DMatch>
matches;
1242 matcher.match(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1245 matchedIndexTo = descriptorsIndices.at(
matches.at(0).trainIdx);
1250 std::vector<std::vector<cv::DMatch> >
matches;
1251 matcher.knnMatch(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1254 bruteForceTotalTime+=bruteForceTimer.
elapsed();
1257 matchedIndexTo = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1266 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom;
1267 addedWordsFrom.insert(addedWordsFrom.end(), matchedIndexFrom);
1269 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1270 if(!kptsFrom.empty())
1272 wordsKptsFrom.push_back(kptsFrom[matchedIndexFrom]);
1274 words3From.push_back(kptsFrom3D[matchedIndexFrom]);
1275 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndexFrom));
1277 if( matchedIndexTo >= 0 &&
1278 addedWordsTo.find(matchedIndexTo) == addedWordsTo.end())
1280 addedWordsTo.insert(matchedIndexTo);
1282 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1283 wordsKptsTo.push_back(kptsTo[matchedIndexTo]);
1284 wordsDescTo.push_back(descriptorsTo.row(matchedIndexTo));
1285 if(!kptsTo3D.empty())
1287 words3To.push_back(kptsTo3D[matchedIndexTo]);
1292 UDEBUG(
"bruteForceDescCopy=%fs, bruteForceTotalTime=%fs", bruteForceDescCopy, bruteForceTotalTime);
1295 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1297 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1299 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1300 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1301 wordsKptsFrom.push_back(kptsFrom[
i]);
1302 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1303 words3From.push_back(kptsFrom3D[
i]);
1307 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1308 for(
unsigned int i = 0;
i < kptsTo.size(); ++
i)
1310 if(addedWordsTo.find(
i) == addedWordsTo.end())
1312 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1313 wordsKptsTo.push_back(kptsTo[
i]);
1314 wordsDescTo.push_back(descriptorsTo.row(
i));
1315 if(!kptsTo3D.empty())
1317 words3To.push_back(kptsTo3D[
i]);
1326 UWARN(
"All projected points are outside the camera. Guess (%s) is wrong or images are not overlapping.", guess.
prettyPrint().c_str());
1332 if(guessSet &&
_guessWinSize > 0 && kptsFrom3D.size() && !isCalibrated)
1334 UWARN(
"Calibration not found! Finding correspondences "
1335 "with the guess cannot be done, global matching is "
1341 std::list<int> fromWordIds;
1342 std::list<int> toWordIds;
1343 #ifdef RTABMAP_PYTHON
1349 std::vector<int> fromWordIdsV(descriptorsFrom.rows);
1350 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1353 if(!orignalWordsFromIds.empty())
1355 id = orignalWordsFromIds[
i];
1357 fromWordIds.push_back(
id);
1358 fromWordIdsV[
i] =
id;
1360 if(descriptorsTo.rows)
1362 std::vector<int> toWordIdsV(descriptorsTo.rows, 0);
1363 std::vector<cv::DMatch>
matches;
1364 #ifdef RTABMAP_PYTHON
1365 if(
_nnType == 6 && _pyMatcher &&
1366 descriptorsTo.cols == descriptorsFrom.cols &&
1367 descriptorsTo.rows == (
int)kptsTo.size() &&
1368 descriptorsTo.type() == CV_32F &&
1369 descriptorsFrom.type() == CV_32F &&
1370 descriptorsFrom.rows == (
int)kptsFrom.size() &&
1373 UDEBUG(
"Python matching");
1374 matches = _pyMatcher->match(descriptorsTo, descriptorsFrom, kptsTo, kptsFrom, models[0].imageSize());
1378 if(
_nnType == 6 && _pyMatcher)
1380 UDEBUG(
"Invalid inputs for Python matching (desc type=%d, only float descriptors supported, multicam not supported), doing bruteforce matching instead.", descriptorsFrom.type());
1385 bool doCrossCheck =
true;
1386 #ifdef HAVE_OPENCV_XFEATURES2D
1387 #if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1)
1388 cv::Size imageSizeFrom;
1391 imageSizeFrom = imageFrom.size();
1396 if(!models.empty() && models[0].imageSize().height > 0 && models[0].imageSize().width > 0 &&
1397 imageSizeFrom.height > 0 && imageSizeFrom.width > 0)
1399 doCrossCheck =
false;
1403 UDEBUG(
"Invalid inputs for GMS matching, image size should be set for both inputs, doing bruteforce matching instead.");
1409 UDEBUG(
"BruteForce matching%s",
_nnType!=7?
" with crosscheck":
" with GMS");
1410 cv::BFMatcher
matcher(descriptorsFrom.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR, doCrossCheck);
1413 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
1417 std::vector<cv::DMatch> matchesGMS;
1427 for(
size_t i=0;
i<toWordIdsV.size(); ++
i)
1429 int toId = toWordIdsV[
i];
1432 toId = fromWordIds.back()+
i+1;
1434 toWordIds.push_back(toId);
1440 UDEBUG(
"VWDictionary knn matching");
1442 if(orignalWordsFromIds.empty())
1444 fromWordIds = dictionary.
addNewWords(descriptorsFrom, 1);
1448 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1450 int id = orignalWordsFromIds[
i];
1452 fromWordIds.push_back(
id);
1456 if(descriptorsTo.rows)
1459 toWordIds = dictionary.
addNewWords(descriptorsTo, 2);
1461 dictionary.
clear(
false);
1464 std::multiset<int> fromWordIdsSet(fromWordIds.begin(), fromWordIds.end());
1465 std::multiset<int> toWordIdsSet(toWordIds.begin(), toWordIds.end());
1467 UASSERT(kptsFrom3D.empty() || fromWordIds.size() == kptsFrom3D.size());
1468 UASSERT(
int(fromWordIds.size()) == descriptorsFrom.rows);
1470 for(std::list<int>::iterator
iter=fromWordIds.begin();
iter!=fromWordIds.end(); ++
iter)
1472 if(fromWordIdsSet.count(*
iter) == 1)
1474 wordsFrom.insert(wordsFrom.end(), std::make_pair(*
iter, wordsFrom.size()));
1475 if (!kptsFrom.empty())
1477 wordsKptsFrom.push_back(kptsFrom[
i]);
1479 if(!kptsFrom3D.empty())
1481 words3From.push_back(kptsFrom3D[
i]);
1483 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1487 UASSERT(kptsTo3D.size() == 0 || kptsTo3D.size() == kptsTo.size());
1488 UASSERT(toWordIds.size() == kptsTo.size());
1489 UASSERT(
int(toWordIds.size()) == descriptorsTo.rows);
1491 for(std::list<int>::iterator
iter=toWordIds.begin();
iter!=toWordIds.end(); ++
iter)
1493 if(toWordIdsSet.count(*
iter) == 1)
1495 wordsTo.insert(wordsTo.end(), std::make_pair(*
iter, wordsTo.size()));
1496 wordsKptsTo.push_back(kptsTo[
i]);
1497 wordsDescTo.push_back(descriptorsTo.row(
i));
1498 if(!kptsTo3D.empty())
1500 words3To.push_back(kptsTo3D[
i]);
1507 else if(descriptorsFrom.rows)
1510 UASSERT(kptsFrom3D.empty() ||
int(kptsFrom3D.size()) == descriptorsFrom.rows);
1511 for(
int i=0;
i<descriptorsFrom.rows; ++
i)
1513 wordsFrom.insert(wordsFrom.end(), std::make_pair(
i, wordsFrom.size()));
1514 wordsKptsFrom.push_back(kptsFrom[
i]);
1515 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1516 if(!kptsFrom3D.empty())
1518 words3From.push_back(kptsFrom3D[
i]);
1524 fromSignature.
setWords(wordsFrom, wordsKptsFrom, words3From, wordsDescFrom);
1525 toSignature.
setWords(wordsTo, wordsKptsTo, words3To, wordsDescTo);
1532 cv::Mat covariance = cv::Mat::eye(6,6,CV_64FC1);
1533 int inliersCount = 0;
1534 int matchesCount = 0;
1535 info.inliersIDs.clear();
1536 info.matchesIDs.clear();
1540 std::vector<int> inliers[2];
1542 cv::Mat covariances[2];
1543 covariances[0] = cv::Mat::eye(6,6,CV_64FC1);
1544 covariances[1] = cv::Mat::eye(6,6,CV_64FC1);
1552 signatureA = &fromSignature;
1553 signatureB = &toSignature;
1557 signatureA = &toSignature;
1558 signatureB = &fromSignature;
1568 UERROR(
"Calibrated camera required (multi-cameras not supported).");
1578 double variance = 1.0f;
1579 std::vector<int> matchesV;
1582 std::map<int, cv::KeyPoint> wordsA;
1583 std::map<int, cv::Point3f> words3A;
1584 std::map<int, cv::KeyPoint> wordsB;
1585 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1590 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1593 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1607 covariances[dir] *= variance;
1608 inliers[dir] =
uKeys(inliers3D);
1611 if(!cameraTransform.
isNull())
1619 transforms[dir] = cameraTransform.
to3DoF();
1623 transforms[dir] = cameraTransform;
1644 else if(signatureA->
getWords().size() == 0)
1661 UERROR(
"Calibrated camera required. Id=%d Models=%d StereoModels=%d weight=%d",
1667 #ifndef RTABMAP_OPENGV
1670 UERROR(
"Multi-camera 2D-3D PnP registration is only available if rtabmap is built "
1671 "with OpenGV dependency. Use 3D-3D registration approach instead for multi-camera.");
1681 std::vector<int> inliersV;
1682 std::vector<int> matchesV;
1685 std::map<int, cv::Point3f> words3A;
1686 std::map<int, cv::Point3f> words3B;
1687 std::map<int, cv::KeyPoint> wordsB;
1688 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1690 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1692 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1697 words3B.insert(std::make_pair(
iter->first, signatureB->
getWords3()[
iter->second]));
1701 std::vector<CameraModel> models;
1717 UASSERT(models[0].isValidForProjection());
1737 inliers[dir] = inliersV;
1742 UASSERT(models.size() == 1 && models[0].isValidForProjection());
1761 inliers[dir] = inliersV;
1764 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1765 if(transforms[dir].
isNull())
1767 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1773 transforms[dir] = transforms[dir].
to3DoF();
1778 msg =
uFormat(
"Not enough features in images (old=%d, new=%d, min=%d)",
1792 std::vector<int> inliersV;
1793 std::vector<int> matchesV;
1796 std::map<int, cv::Point3f> words3A;
1797 std::map<int, cv::Point3f> words3B;
1798 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1800 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1802 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1804 words3B.insert(std::make_pair(
iter->first, signatureB->
getWords3()[
iter->second]));
1816 inliers[dir] = inliersV;
1818 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1819 if(transforms[dir].
isNull())
1821 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1827 transforms[dir] = transforms[dir].
to3DoF();
1832 msg =
uFormat(
"Not enough 3D features in images (old=%d, new=%d, min=%d)",
1841 UDEBUG(
"from->to=%s", transforms[0].prettyPrint().
c_str());
1842 UDEBUG(
"to->from=%s", transforms[1].prettyPrint().
c_str());
1845 std::vector<int> allInliers = inliers[0];
1846 if(inliers[1].
size())
1848 std::set<int> allInliersSet(allInliers.begin(), allInliers.end());
1849 unsigned int oi = allInliers.size();
1850 allInliers.resize(allInliers.size() + inliers[1].size());
1851 for(
unsigned int i=0;
i<inliers[1].size(); ++
i)
1853 if(allInliersSet.find(inliers[1][
i]) == allInliersSet.end())
1855 allInliers[oi++] = inliers[1][
i];
1858 allInliers.resize(oi);
1860 std::vector<int> allMatches =
matches[0];
1863 std::set<int> allMatchesSet(allMatches.begin(), allMatches.end());
1864 unsigned int oi = allMatches.size();
1865 allMatches.resize(allMatches.size() +
matches[1].size());
1866 for(
unsigned int i=0;
i<
matches[1].size(); ++
i)
1868 if(allMatchesSet.find(
matches[1][
i]) == allMatchesSet.end())
1873 allMatches.resize(oi);
1878 !transforms[0].
isNull() &&
1879 allInliers.size() &&
1885 UDEBUG(
"Refine with bundle adjustment");
1888 std::map<int, Transform> poses;
1889 std::multimap<int, Link> links;
1890 std::map<int, cv::Point3f> points3DMap;
1893 poses.insert(std::make_pair(2, transforms[0]));
1895 for(
int i=0;
i<2;++
i)
1912 cv::Mat cov = covariances[0].clone();
1915 if(!transforms[1].
isNull() && inliers[1].size())
1917 cov = covariances[1].clone();
1921 std::map<int, Transform> optimizedPoses;
1926 std::map<int, std::vector<CameraModel> > models;
1928 std::vector<CameraModel> cameraModelsFrom;
1942 cameraModelsFrom.push_back(cameraModel);
1950 std::vector<CameraModel> cameraModelsTo;
1964 cameraModelsTo.push_back(cameraModel);
1972 models.insert(std::make_pair(1, cameraModelsFrom));
1973 models.insert(std::make_pair(2, cameraModelsTo));
1975 std::map<int, std::map<int, FeatureBA> > wordReferences;
1976 std::set<int> sbaOutliers;
1978 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
1980 int wordId = allInliers[
i];
1981 int indexFrom = fromSignature.
getWords().find(wordId)->second;
1982 const cv::Point3f & pt3D = fromSignature.
getWords3()[indexFrom];
1986 sbaOutliers.insert(wordId);
1990 points3DMap.insert(std::make_pair(wordId, pt3D));
1992 std::map<int, FeatureBA> ptMap;
1995 cv::KeyPoint kpt = fromSignature.
getWordsKpts()[indexFrom];
1997 int cameraIndex = 0;
1998 const std::vector<CameraModel> &
cam = models.at(1);
2002 float subImageWidth =
cam[0].imageWidth();
2003 cameraIndex =
int(kpt.pt.x / subImageWidth);
2004 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
2007 UASSERT(
cam[cameraIndex].isValidForProjection());
2010 ptMap.insert(std::make_pair(1,
FeatureBA(kpt, depthFrom, cv::Mat(), cameraIndex)));
2015 int indexTo = toSignature.
getWords().find(wordId)->second;
2016 cv::KeyPoint kpt = toSignature.
getWordsKpts()[indexTo];
2018 int cameraIndex = 0;
2019 const std::vector<CameraModel> &
cam = models.at(2);
2023 float subImageWidth =
cam[0].imageWidth();
2024 cameraIndex =
int(kpt.pt.x / subImageWidth);
2025 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
2028 UASSERT(
cam[cameraIndex].isValidForProjection());
2030 float depthTo = 0.0f;
2036 ptMap.insert(std::make_pair(2,
FeatureBA(kpt, depthTo, cv::Mat(), cameraIndex)));
2039 wordReferences.insert(std::make_pair(wordId, ptMap));
2048 optimizedPoses = sba->
optimizeBA(1, poses, links, models, points3DMap, wordReferences, &sbaOutliers);
2052 if(optimizedPoses.size() == 2 &&
2053 !optimizedPoses.begin()->second.isNull() &&
2054 !optimizedPoses.rbegin()->second.isNull())
2056 UDEBUG(
"Pose optimization: %s -> %s", transforms[0].prettyPrint().
c_str(), optimizedPoses.rbegin()->second.prettyPrint().c_str());
2058 if(sbaOutliers.size())
2060 std::vector<int> newInliers(allInliers.size());
2062 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
2064 if(sbaOutliers.find(allInliers[
i]) == sbaOutliers.end())
2066 newInliers[oi++] = allInliers[
i];
2069 newInliers.resize(oi);
2070 UDEBUG(
"BA outliers ratio %f",
float(sbaOutliers.size())/
float(allInliers.size()));
2071 allInliers = newInliers;
2075 msg =
uFormat(
"Not enough inliers after bundle adjustment %d/%d (matches=%d) between %d and %d",
2076 (
int)allInliers.size(),
_minInliers, (
int)allInliers.size()+sbaOutliers.size(), fromSignature.
id(), toSignature.
id());
2081 transforms[0] = optimizedPoses.rbegin()->second;
2105 info.inliersIDs = allInliers;
2106 info.matchesIDs = allMatches;
2107 inliersCount = (
int)allInliers.size();
2108 matchesCount = (
int)allMatches.size();
2109 if(!transforms[1].
isNull())
2111 transforms[1] = transforms[1].
inverse();
2112 if(transforms[0].
isNull())
2115 covariance = covariances[1];
2120 covariance = (covariances[0]+covariances[1])/2.0
f;
2126 covariance = covariances[0];
2132 std::vector<CameraModel> cameraModelsTo;
2146 if(cameraModelsTo.size() >= 1 && cameraModelsTo[0].isValidForReprojection())
2148 if(cameraModelsTo[0].imageWidth()>0 && cameraModelsTo[0].imageHeight()>0)
2150 pcaData = cv::Mat(allInliers.size(), 2, CV_32FC1);
2154 UERROR(
"Invalid calibration image size (%dx%d), cannot compute inliers distribution! (see %s=%f)", cameraModelsTo[0].imageWidth(), cameraModelsTo[0].imageHeight(), Parameters::kVisMinInliersDistribution().
c_str(),
_minInliersDistributionThr);
2164 std::vector<float> distances;
2167 distances.reserve(allInliers.size());
2169 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
2171 std::multimap<int, int>::const_iterator wordsIter = toSignature.
getWords().find(allInliers[
i]);
2174 const cv::KeyPoint & kpt = toSignature.
getWordsKpts()[wordsIter->second];
2175 int cameraIndex = (
int)(kpt.pt.x / cameraModelsTo[0].imageWidth());
2176 UASSERT_MSG(cameraIndex < (
int)cameraModelsTo.size(),
uFormat(
"cameraIndex=%d (x=%f models=%d camera width = %d)", cameraIndex, kpt.pt.x, (
int)cameraModelsTo.size(), cameraModelsTo[0].imageWidth()).c_str());
2180 const cv::Point3f & pt = toSignature.
getWords3()[wordsIter->second];
2183 UASSERT(cameraModelsTo[cameraIndex].isValidForProjection());
2186 distances.push_back(depth);
2190 if(!pcaData.empty())
2192 float * ptr = pcaData.ptr<
float>(
i, 0);
2193 ptr[0] = (kpt.pt.x-cameraIndex*cameraModelsTo[cameraIndex].imageWidth()-cameraModelsTo[cameraIndex].cx()) / cameraModelsTo[cameraIndex].imageWidth();
2194 ptr[1] = (kpt.pt.y-cameraModelsTo[cameraIndex].cy()) / cameraModelsTo[cameraIndex].imageHeight();
2199 if(!distances.empty())
2201 info.inliersMeanDistance =
uMean(distances);
2205 msg =
uFormat(
"The mean distance of the inliers is over %s threshold (%f)",
2211 if(!
transform.isNull() && !pcaData.empty())
2213 cv::Mat pcaEigenVectors, pcaEigenValues;
2214 cv::PCA pca_analysis(pcaData, cv::Mat(), CV_PCA_DATA_AS_ROW);
2216 info.inliersDistribution = pca_analysis.eigenvalues.at<
float>(0, 1);
2220 msg =
uFormat(
"The distribution (%f) of inliers is under %s threshold (%f)",
2229 msg =
uFormat(
"Missing correspondences for registration (%d->%d). fromWords = %d fromImageEmpty=%d toWords = %d toImageEmpty=%d",
2230 fromSignature.
id(), toSignature.
id(),
2235 info.inliers = inliersCount;
2237 info.matches = matchesCount;
2239 info.covariance = covariance;