46 #include <opencv2/core/core_c.h>
48 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
49 #include <opencv2/xfeatures2d.hpp>
52 #ifdef HAVE_OPENCV_CUDAOPTFLOW
53 #include <opencv2/cudaoptflow.hpp>
54 #include <opencv2/cudaimgproc.hpp>
68 _minInliers(
Parameters::defaultVisMinInliers()),
69 _inlierDistance(
Parameters::defaultVisInlierDistance()),
70 _iterations(
Parameters::defaultVisIterations()),
71 _refineIterations(
Parameters::defaultVisRefineIterations()),
72 _epipolarGeometryVar(
Parameters::defaultVisEpipolarGeometryVar()),
73 _estimationType(
Parameters::defaultVisEstimationType()),
74 _PnPReprojError(
Parameters::defaultVisPnPReprojError()),
76 _PnPRefineIterations(
Parameters::defaultVisPnPRefineIterations()),
77 _PnPVarMedianRatio(
Parameters::defaultVisPnPVarianceMedianRatio()),
78 _PnPMaxVar(
Parameters::defaultVisPnPMaxVariance()),
79 _PnPSplitLinearCovarianceComponents(
Parameters::defaultVisPnPSplitLinearCovComponents()),
80 _multiSamplingPolicy(
Parameters::defaultVisPnPSamplingPolicy()),
81 _correspondencesApproach(
Parameters::defaultVisCorType()),
82 _flowWinSize(
Parameters::defaultVisCorFlowWinSize()),
83 _flowIterations(
Parameters::defaultVisCorFlowIterations()),
85 _flowMaxLevel(
Parameters::defaultVisCorFlowMaxLevel()),
89 _gmsWithRotation(
Parameters::defaultGMSWithRotation()),
90 _gmsWithScale(
Parameters::defaultGMSWithScale()),
91 _gmsThresholdFactor(
Parameters::defaultGMSThresholdFactor()),
92 _guessWinSize(
Parameters::defaultVisCorGuessWinSize()),
93 _guessMatchToProjection(
Parameters::defaultVisCorGuessMatchToProjection()),
94 _bundleAdjustment(
Parameters::defaultVisBundleAdjustment()),
95 _depthAsMask(
Parameters::defaultVisDepthAsMask()),
96 _maskFloorThreshold(
Parameters::defaultVisDepthMaskFloorThr()),
97 _minInliersDistributionThr(
Parameters::defaultVisMinInliersDistribution()),
98 _maxInliersMeanDistance(
Parameters::defaultVisMeanInliersDistance()),
101 #ifdef RTABMAP_PYTHON
164 UWARN(
"%s should be >= 6 but it is set to %d, setting to 6.", Parameters::kVisMinInliers().
c_str(),
_minInliers);
170 #ifndef HAVE_OPENCV_CUDAOPTFLOW
173 UERROR(
"%s is enabled but RTAB-Map is not built with OpenCV CUDA, disabling it.", Parameters::kVisCorFlowGpu().
c_str());
181 #ifndef RTABMAP_PYTHON
182 UWARN(
"%s is set to 6 but RTAB-Map is not built with Python3 support, using default %d.",
183 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
184 _nnType = Parameters::defaultVisCorNNType();
186 int iterations = _pyMatcher?_pyMatcher->iterations():Parameters::defaultPyMatcherIterations();
187 float matchThr = _pyMatcher?_pyMatcher->matchThreshold():Parameters::defaultPyMatcherThreshold();
188 std::string
path = _pyMatcher?_pyMatcher->path():Parameters::defaultPyMatcherPath();
189 bool cuda = _pyMatcher?_pyMatcher->cuda():Parameters::defaultPyMatcherCuda();
190 std::string
model = _pyMatcher?_pyMatcher->model():Parameters::defaultPyMatcherModel();
198 UERROR(
"%s parameter should be set to use Python3 matching (%s=6), using default %d.",
199 Parameters::kPyMatcherPath().
c_str(),
200 Parameters::kVisCorNNType().
c_str(),
201 Parameters::defaultVisCorNNType());
202 _nnType = Parameters::defaultVisCorNNType();
211 #if !defined(HAVE_OPENCV_XFEATURES2D) || (CV_MAJOR_VERSION == 3 && (CV_MINOR_VERSION<4 || CV_MINOR_VERSION==4 && CV_SUBMINOR_VERSION<1))
214 UWARN(
"%s is set to 7 but RTAB-Map is not built with OpenCV's xfeatures2d support (OpenCV >= 3.4.1 also required), using default %d.",
215 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
216 _nnType = Parameters::defaultVisCorNNType();
221 for(ParametersMap::const_iterator
iter=parameters.begin();
iter!=parameters.end(); ++
iter)
223 std::string group =
uSplit(
iter->first,
'/').front();
230 if(
uContains(parameters, Parameters::kVisCorNNType()))
237 if(
uContains(parameters, Parameters::kVisCorNNDR()))
241 if(
uContains(parameters, Parameters::kKpByteToFloat()))
245 if(
uContains(parameters, Parameters::kVisFeatureType()))
249 if(
uContains(parameters, Parameters::kVisMaxFeatures()))
253 if(
uContains(parameters, Parameters::kVisSSC()))
257 if(
uContains(parameters, Parameters::kVisMaxDepth()))
261 if(
uContains(parameters, Parameters::kVisMinDepth()))
265 if(
uContains(parameters, Parameters::kVisRoiRatios()))
269 if(
uContains(parameters, Parameters::kVisSubPixEps()))
273 if(
uContains(parameters, Parameters::kVisSubPixIterations()))
277 if(
uContains(parameters, Parameters::kVisSubPixWinSize()))
281 if(
uContains(parameters, Parameters::kVisGridRows()))
285 if(
uContains(parameters, Parameters::kVisGridCols()))
300 #ifdef RTABMAP_PYTHON
332 UDEBUG(
"Input(%d): from=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
334 (
int)fromSignature.
getWords().size(),
345 UDEBUG(
"Input(%d): to=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
359 info.projectedIDs.clear();
372 UDEBUG(
"Bypassing feature matching as descriptors are empty. We assume features are already matched.");
399 std::vector<cv::KeyPoint> kptsFrom;
403 std::vector<int> orignalWordsFromIds;
404 int kptsFromSource = 0;
405 if(fromSignature.
getWords().empty())
409 if(!imageFrom.empty())
411 if(imageFrom.channels() > 1)
414 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
435 depthMask = depthBelow;
441 UDEBUG(
"Masking floor done.");
448 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
449 Parameters::kVisDepthAsMask().
c_str(),
450 imageFrom.rows, imageFrom.cols,
469 orignalWordsFromIds.resize(fromSignature.
getWords().size());
471 bool allUniques =
true;
472 int previousIdAdded = 0;
476 UASSERT(
iter->second>=0 &&
iter->second<(
int)orignalWordsFromIds.size());
477 orignalWordsFromIds[
iter->second] =
iter->first;
478 if(
i>0 &&
iter->first==previousIdAdded)
482 previousIdAdded =
iter->first;
487 UDEBUG(
"IDs are not unique, IDs will be regenerated!");
488 orignalWordsFromIds.clear();
492 std::multimap<int, int> wordsFrom;
493 std::multimap<int, int> wordsTo;
494 std::vector<cv::KeyPoint> wordsKptsFrom;
495 std::vector<cv::KeyPoint> wordsKptsTo;
496 std::vector<cv::Point3f> words3From;
497 std::vector<cv::Point3f> words3To;
498 cv::Mat wordsDescFrom;
503 #ifdef HAVE_OPENCV_CUDAOPTFLOW
504 cv::cuda::GpuMat d_imageFrom;
505 cv::cuda::GpuMat d_imageTo;
508 UDEBUG(
"GPU optical flow: preparing GPU image data...");
509 d_imageFrom = fromSignature.
sensorData().imageRawGpu();
510 if(d_imageFrom.empty() && !imageFrom.empty()) {
511 d_imageFrom = cv::cuda::GpuMat(imageFrom);
514 if(d_imageFrom.channels() > 1) {
515 cv::cuda::GpuMat tmp;
516 cv::cuda::cvtColor(d_imageFrom, tmp, cv::COLOR_BGR2GRAY);
519 if(fromSignature.
sensorData().imageRawGpu().empty())
521 fromSignature.
sensorData().setImageRawGpu(d_imageFrom);
524 d_imageTo = toSignature.
sensorData().imageRawGpu();
525 if(d_imageTo.empty() && !imageTo.empty()) {
526 d_imageTo = cv::cuda::GpuMat(imageTo);
529 if(d_imageTo.channels() > 1) {
530 cv::cuda::GpuMat tmp;
531 cv::cuda::cvtColor(d_imageTo, tmp, cv::COLOR_BGR2GRAY);
534 if(toSignature.
sensorData().imageRawGpu().empty())
536 toSignature.
sensorData().setImageRawGpu(d_imageTo);
538 UDEBUG(
"GPU optical flow: preparing GPU image data... done!");
544 if(imageFrom.channels() > 1)
547 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
550 if(imageTo.channels() > 1)
553 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
558 std::vector<cv::Point3f> kptsFrom3D;
559 if(kptsFrom.size() == fromSignature.
getWords3().size())
572 if(!imageFrom.empty() && !imageTo.empty())
575 std::vector<cv::Point2f> cornersFrom;
576 cv::KeyPoint::convert(kptsFrom, cornersFrom);
577 std::vector<cv::Point2f> cornersTo;
585 cv::Mat
R = (cv::Mat_<double>(3,3) <<
586 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
587 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
588 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
589 cv::Mat rvec(1,3, CV_64FC1);
590 cv::Rodrigues(
R, rvec);
591 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
593 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), cornersTo);
599 cornersTo = cornersFrom;
601 std::vector<Transform> inverseTransforms(nCameras);
602 for(
int c=0;
c<nCameras; ++
c)
605 inverseTransforms[
c] = (guess * localTransform).
inverse();
606 UDEBUG(
"inverse transforms: cam %d -> %s",
c, inverseTransforms[
c].prettyPrint().
c_str());
610 UASSERT(kptsFrom3D.size() == cornersTo.size());
613 for(
size_t i=0;
i<kptsFrom3D.size(); ++
i)
616 int startIndex = cornersFrom[
i].x/subImageWidth;
617 UASSERT(startIndex < nCameras);
618 for(
int ci=0; ci < nCameras; ++ci)
620 int c = (ci+startIndex) % nCameras;
623 if(ptsInCamFrame.z > 0)
626 model.reproject(ptsInCamFrame.x, ptsInCamFrame.y, ptsInCamFrame.z, u,
v);
629 cornersTo[
i].x = u+
model.imageWidth()*
c;
638 UDEBUG(
"Projected %d/%ld points inside %d cameras (time=%fs)",
639 inFrame, cornersTo.size(), nCameras,
t.ticks());
644 UDEBUG(
"guessSet = %d", guessSet?1:0);
645 std::vector<unsigned char> status;
646 #ifdef HAVE_OPENCV_CUDAOPTFLOW
649 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device begin");
650 cv::cuda::GpuMat d_cornersFrom(cornersFrom);
651 cv::cuda::GpuMat d_cornersTo(cornersTo);
652 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device end");
653 cv::cuda::GpuMat d_status;
654 cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> d_pyrLK_sparse = cv::cuda::SparsePyrLKOpticalFlow::create(
657 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc begin");
658 d_pyrLK_sparse->calc(d_imageFrom, d_imageTo, d_cornersFrom, d_cornersTo, d_status);
659 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc end");
661 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host begin");
663 cornersTo = std::vector<cv::Point2f>(d_cornersTo.cols);
664 cv::Mat matCornersTo(1, d_cornersTo.cols, CV_32FC2, (
void*)&cornersTo[0]);
665 d_cornersTo.download(matCornersTo);
667 status = std::vector<unsigned char>(d_status.cols);
668 cv::Mat matStatus(1, d_status.cols, CV_8UC1, (
void*)&status[0]);
669 d_status.download(matStatus);
670 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host end");
675 std::vector<float> err;
676 UDEBUG(
"cv::calcOpticalFlowPyrLK() begin");
677 cv::calcOpticalFlowPyrLK(
687 cv::OPTFLOW_LK_GET_MIN_EIGENVALS | (guessSet ? cv::OPTFLOW_USE_INITIAL_FLOW : 0), 1
e-4);
688 UDEBUG(
"cv::calcOpticalFlowPyrLK() end");
691 UASSERT(kptsFrom.size() == kptsFrom3D.size());
692 std::vector<cv::KeyPoint> kptsTo(kptsFrom.size());
693 std::vector<cv::Point3f> kptsFrom3DKept(kptsFrom3D.size());
694 std::vector<int> orignalWordsFromIdsCpy = orignalWordsFromIds;
696 for(
unsigned int i=0;
i<status.size(); ++
i)
702 if(orignalWordsFromIdsCpy.size())
704 orignalWordsFromIds[ki] = orignalWordsFromIdsCpy[
i];
706 kptsFrom[ki] = cv::KeyPoint(cornersFrom[
i], 1);
707 kptsFrom3DKept[ki] = kptsFrom3D[
i];
708 kptsTo[ki++] = cv::KeyPoint(cornersTo[
i], 1);
711 if(orignalWordsFromIds.size())
713 orignalWordsFromIds.resize(ki);
717 kptsFrom3DKept.resize(ki);
718 kptsFrom3D = kptsFrom3DKept;
720 std::vector<cv::Point3f> kptsTo3D;
726 UASSERT(kptsFrom.size() == kptsFrom3DKept.size());
727 UASSERT(kptsFrom.size() == kptsTo.size());
728 UASSERT(kptsTo3D.size() == 0 || kptsTo.size() == kptsTo3D.size());
729 for(
unsigned int i=0;
i< kptsFrom3DKept.size(); ++
i)
731 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
732 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
733 wordsKptsFrom.push_back(kptsFrom[
i]);
734 words3From.push_back(kptsFrom3DKept[
i]);
736 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
737 wordsKptsTo.push_back(kptsTo[
i]);
738 if(!kptsTo3D.empty())
740 words3To.push_back(kptsTo3D[
i]);
747 if(imageFrom.empty())
749 UERROR(
"Optical flow correspondences requires images in data!");
751 UASSERT(kptsFrom.size() == kptsFrom3D.size());
752 for(
unsigned int i=0;
i< kptsFrom3D.size(); ++
i)
756 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
757 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
758 wordsKptsFrom.push_back(kptsFrom[
i]);
759 words3From.push_back(kptsFrom3D[
i]);
762 toSignature.
sensorData().
setFeatures(std::vector<cv::KeyPoint>(), std::vector<cv::Point3f>(), cv::Mat());
770 std::vector<cv::KeyPoint> kptsTo;
771 int kptsToSource = 0;
777 if(imageTo.channels() > 1)
780 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
801 depthMask = depthBelow;
807 UDEBUG(
"Masking floor done.");
814 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
815 Parameters::kVisDepthAsMask().
c_str(),
816 imageTo.rows, imageTo.cols,
838 UDEBUG(
"kptsFrom=%d kptsFromSource=%d", (
int)kptsFrom.size(), kptsFromSource);
839 UDEBUG(
"kptsTo=%d kptsToSource=%d", (
int)kptsTo.size(), kptsToSource);
840 cv::Mat descriptorsFrom;
841 if(kptsFromSource == 2 &&
848 else if(kptsFromSource == 1 &&
853 else if(!imageFrom.empty())
855 if(imageFrom.channels() > 1)
858 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
861 UDEBUG(
"cleared orignalWordsFromIds");
862 orignalWordsFromIds.clear();
866 cv::Mat descriptorsTo;
869 if(kptsToSource == 2 &&
874 else if(kptsToSource == 1 &&
879 else if(!imageTo.empty())
881 if(imageTo.channels() > 1)
884 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
893 std::vector<cv::Point3f> kptsFrom3D;
894 std::vector<cv::Point3f> kptsTo3D;
895 if(kptsFromSource == 2 &&
896 kptsFrom.size() == fromSignature.
getWords3().size())
900 else if(kptsFromSource == 1 &&
907 if(fromSignature.
getWords3().size() && kptsFrom.size() != fromSignature.
getWords3().size())
909 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.getWords3() (%d), there "
910 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
916 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.sensorData().keypoints3D() (%d), there "
917 "is maybe a problem with the logic above (keypoints3D should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
922 UDEBUG(
"generated kptsFrom3D=%d", (
int)kptsFrom3D.size());
925 if(!kptsFrom3D.empty() &&
932 if(kptsToSource == 2 && kptsTo.size() == toSignature.
getWords3().size())
936 else if(kptsToSource == 1 &&
945 UWARN(
"kptsTo (%d) is not the same size as toSignature.getWords3() (%d), there "
946 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsTo). Regenerating kptsTo3D...",
952 UWARN(
"kptsTo (%d) is not the same size as toSignature.sensorData().keypoints3D() (%d), there "
953 "is maybe a problem with the logic above (keypoints3D() should be null or equal to kptsTo). Regenerating kptsTo3D...",
960 if(kptsTo3D.size() &&
967 UASSERT(kptsFrom.empty() || descriptorsFrom.rows == 0 ||
int(kptsFrom.size()) == descriptorsFrom.rows);
972 UDEBUG(
"descriptorsFrom=%d", descriptorsFrom.rows);
973 UDEBUG(
"descriptorsTo=%d", descriptorsTo.rows);
974 UDEBUG(
"orignalWordsFromIds=%d", (
int)orignalWordsFromIds.size());
977 if(descriptorsFrom.rows > 0 && descriptorsTo.rows > 0)
979 std::vector<CameraModel> models;
992 bool isCalibrated = !models.empty();
993 for(
size_t i=0;
i<models.size() && isCalibrated; ++
i)
995 isCalibrated = models[
i].isValidForProjection();
998 if(isCalibrated && (models[
i].imageWidth()==0 || models[
i].imageHeight()==0))
1006 isCalibrated =
false;
1019 UASSERT((
int)kptsTo.size() == descriptorsTo.rows);
1020 UASSERT((
int)kptsFrom3D.size() == descriptorsFrom.rows);
1022 std::vector<cv::Point2f> cornersProjected;
1023 std::vector<int> projectedIndexToDescIndex;
1024 float subImageWidth = models[0].imageWidth();
1025 std::set<int> added;
1027 for(
size_t m=0;
m<models.size(); ++
m)
1030 cv::Mat
R = (cv::Mat_<double>(3,3) <<
1031 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
1032 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
1033 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
1034 cv::Mat rvec(1,3, CV_64FC1);
1035 cv::Rodrigues(
R, rvec);
1036 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
1037 cv::Mat
K = models[
m].K();
1038 std::vector<cv::Point2f> projected;
1039 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), projected);
1040 UDEBUG(
"Projected points=%d", (
int)projected.size());
1043 UASSERT((
int)projected.size() == descriptorsFrom.rows);
1044 int cornersInFrame = 0;
1045 for(
unsigned int i=0;
i<projected.size(); ++
i)
1047 if(
uIsInBounds(projected[
i].
x, 0.0
f,
float(models[
m].imageWidth()-1)) &&
1048 uIsInBounds(projected[
i].
y, 0.0
f,
float(models[
m].imageHeight()-1)) &&
1051 if(added.find(
i) != added.end())
1057 projectedIndexToDescIndex.push_back(
i);
1058 projected[
i].x += subImageWidth*
float(
m);
1059 cornersProjected.push_back(projected[
i]);
1064 UDEBUG(
"corners in frame=%d (camera index=%ld)", cornersInFrame,
m);
1070 UDEBUG(
"guessMatchToProjection=%d, cornersProjected=%d orignalWordsFromIds=%d (added=%ld, duplicates=%d)",
1072 added.size(), duplicates);
1073 if(cornersProjected.size())
1077 UDEBUG(
"match frame to projected");
1083 std::vector< std::vector<size_t> >
indices;
1084 std::vector<std::vector<float> > dists;
1086 std::vector<cv::Point2f> pointsTo;
1087 cv::KeyPoint::convert(kptsTo, pointsTo);
1092 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1093 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1094 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1096 UDEBUG(
"radius search done for guess");
1099 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1100 std::map<int,int> addedWordsFrom;
1101 std::map<int, int> duplicates;
1103 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1104 for(
unsigned int i = 0;
i < pointsToMat.
rows; ++
i)
1106 int matchedIndex = -1;
1109 std::vector<int> descriptorsIndices(
indices[
i].
size());
1117 descriptorsFrom.row(projectedIndexToDescIndex[
indices[
i].at(
j)]).copyTo(descriptors.row(oi));
1118 descriptorsIndices[oi++] =
indices[
i].at(
j);
1120 descriptorsIndices.resize(oi);
1123 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType == 5);
1126 std::vector<cv::DMatch>
matches;
1127 matcher.match(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1130 matchedIndex = descriptorsIndices.at(
matches.at(0).trainIdx);
1135 std::vector<std::vector<cv::DMatch> >
matches;
1136 matcher.knnMatch(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1141 matchedIndex = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1150 if(matchedIndex >= 0)
1152 matchedIndex = projectedIndexToDescIndex[matchedIndex];
1153 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndex]:matchedIndex;
1155 if(addedWordsFrom.find(matchedIndex) != addedWordsFrom.end())
1157 id = addedWordsFrom.at(matchedIndex);
1158 duplicates.insert(std::make_pair(matchedIndex,
id));
1162 addedWordsFrom.insert(std::make_pair(matchedIndex,
id));
1164 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1165 if(!kptsFrom.empty())
1167 wordsKptsFrom.push_back(kptsFrom[matchedIndex]);
1169 words3From.push_back(kptsFrom3D[matchedIndex]);
1170 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndex));
1173 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1174 wordsKptsTo.push_back(kptsTo[
i]);
1175 wordsDescTo.push_back(descriptorsTo.row(
i));
1176 if(!kptsTo3D.empty())
1178 words3To.push_back(kptsTo3D[
i]);
1184 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1185 wordsKptsTo.push_back(kptsTo[
i]);
1186 wordsDescTo.push_back(descriptorsTo.row(
i));
1187 if(!kptsTo3D.empty())
1189 words3To.push_back(kptsTo3D[
i]);
1196 UDEBUG(
"addedWordsFrom=%d/%d (duplicates=%d, newWords=%d), kptsTo=%d, wordsTo=%d, words3From=%d",
1197 (
int)addedWordsFrom.size(), (
int)cornersProjected.size(), (
int)duplicates.size(), newWords,
1198 (
int)kptsTo.size(), (
int)wordsTo.size(), (
int)words3From.size());
1201 int addWordsFromNotMatched = 0;
1202 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1204 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1206 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1207 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1208 wordsKptsFrom.push_back(kptsFrom[
i]);
1209 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1210 words3From.push_back(kptsFrom3D[
i]);
1212 ++addWordsFromNotMatched;
1215 UDEBUG(
"addWordsFromNotMatched=%d -> words3From=%d", addWordsFromNotMatched, (
int)words3From.size());
1219 UDEBUG(
"match projected to frame");
1220 std::vector<cv::Point2f> pointsTo;
1221 cv::KeyPoint::convert(kptsTo, pointsTo);
1226 std::vector< std::vector<size_t> >
indices;
1227 std::vector<std::vector<float> > dists;
1233 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1234 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1235 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1237 UDEBUG(
"radius search done for guess");
1240 std::set<int> addedWordsTo;
1241 std::set<int> addedWordsFrom;
1242 double bruteForceTotalTime = 0.0;
1243 double bruteForceDescCopy = 0.0;
1245 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1246 for(
unsigned int i = 0;
i < cornersProjectedMat.
rows; ++
i)
1248 int matchedIndexFrom = projectedIndexToDescIndex[
i];
1252 info.projectedIDs.push_back(!orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom);
1257 int matchedIndexTo = -1;
1261 std::vector<int> descriptorsIndices(
indices[
i].
size());
1269 descriptorsTo.row(
indices[
i].at(
j)).copyTo(descriptors.row(oi));
1270 descriptorsIndices[oi++] =
indices[
i].at(
j);
1272 bruteForceDescCopy += bruteForceTimer.
ticks();
1275 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType==5);
1278 std::vector<cv::DMatch>
matches;
1279 matcher.match(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1282 matchedIndexTo = descriptorsIndices.at(
matches.at(0).trainIdx);
1287 std::vector<std::vector<cv::DMatch> >
matches;
1288 matcher.knnMatch(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1291 bruteForceTotalTime+=bruteForceTimer.
elapsed();
1294 matchedIndexTo = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1303 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom;
1304 addedWordsFrom.insert(addedWordsFrom.end(), matchedIndexFrom);
1306 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1307 if(!kptsFrom.empty())
1309 wordsKptsFrom.push_back(kptsFrom[matchedIndexFrom]);
1311 words3From.push_back(kptsFrom3D[matchedIndexFrom]);
1312 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndexFrom));
1314 if( matchedIndexTo >= 0 &&
1315 addedWordsTo.find(matchedIndexTo) == addedWordsTo.end())
1317 addedWordsTo.insert(matchedIndexTo);
1319 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1320 wordsKptsTo.push_back(kptsTo[matchedIndexTo]);
1321 wordsDescTo.push_back(descriptorsTo.row(matchedIndexTo));
1322 if(!kptsTo3D.empty())
1324 words3To.push_back(kptsTo3D[matchedIndexTo]);
1329 UDEBUG(
"bruteForceDescCopy=%fs, bruteForceTotalTime=%fs", bruteForceDescCopy, bruteForceTotalTime);
1332 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1334 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1336 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1337 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1338 wordsKptsFrom.push_back(kptsFrom[
i]);
1339 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1340 words3From.push_back(kptsFrom3D[
i]);
1344 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1345 for(
unsigned int i = 0;
i < kptsTo.size(); ++
i)
1347 if(addedWordsTo.find(
i) == addedWordsTo.end())
1349 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1350 wordsKptsTo.push_back(kptsTo[
i]);
1351 wordsDescTo.push_back(descriptorsTo.row(
i));
1352 if(!kptsTo3D.empty())
1354 words3To.push_back(kptsTo3D[
i]);
1363 UWARN(
"All projected points are outside the camera. Guess (%s) is wrong or images are not overlapping.", guess.
prettyPrint().c_str());
1369 if(guessSet &&
_guessWinSize > 0 && kptsFrom3D.size() && !isCalibrated)
1371 UWARN(
"Calibration not found! Finding correspondences "
1372 "with the guess cannot be done, global matching is "
1378 std::list<int> fromWordIds;
1379 std::list<int> toWordIds;
1380 #ifdef RTABMAP_PYTHON
1386 std::vector<int> fromWordIdsV(descriptorsFrom.rows);
1387 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1390 if(!orignalWordsFromIds.empty())
1392 id = orignalWordsFromIds[
i];
1394 fromWordIds.push_back(
id);
1395 fromWordIdsV[
i] =
id;
1397 if(descriptorsTo.rows)
1399 std::vector<int> toWordIdsV(descriptorsTo.rows, 0);
1400 std::vector<cv::DMatch>
matches;
1401 #ifdef RTABMAP_PYTHON
1402 if(
_nnType == 6 && _pyMatcher &&
1403 descriptorsTo.cols == descriptorsFrom.cols &&
1404 descriptorsTo.rows == (
int)kptsTo.size() &&
1405 descriptorsTo.type() == CV_32F &&
1406 descriptorsFrom.type() == CV_32F &&
1407 descriptorsFrom.rows == (
int)kptsFrom.size() &&
1410 UDEBUG(
"Python matching");
1411 matches = _pyMatcher->match(descriptorsTo, descriptorsFrom, kptsTo, kptsFrom, models[0].imageSize());
1415 if(
_nnType == 6 && _pyMatcher)
1417 UDEBUG(
"Invalid inputs for Python matching (desc type=%d, only float descriptors supported, multicam not supported), doing bruteforce matching instead.", descriptorsFrom.type());
1422 bool doCrossCheck =
true;
1423 #ifdef HAVE_OPENCV_XFEATURES2D
1424 #if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1)
1425 cv::Size imageSizeFrom;
1428 imageSizeFrom = imageFrom.size();
1433 if(!models.empty() && models[0].imageSize().height > 0 && models[0].imageSize().width > 0 &&
1434 imageSizeFrom.height > 0 && imageSizeFrom.width > 0)
1436 doCrossCheck =
false;
1440 UDEBUG(
"Invalid inputs for GMS matching, image size should be set for both inputs, doing bruteforce matching instead.");
1446 UDEBUG(
"BruteForce matching%s",
_nnType!=7?
" with crosscheck":
" with GMS");
1447 cv::BFMatcher
matcher(descriptorsFrom.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR, doCrossCheck);
1450 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
1454 std::vector<cv::DMatch> matchesGMS;
1464 for(
size_t i=0;
i<toWordIdsV.size(); ++
i)
1466 int toId = toWordIdsV[
i];
1469 toId = fromWordIds.back()+
i+1;
1471 toWordIds.push_back(toId);
1477 UDEBUG(
"VWDictionary knn matching");
1479 if(orignalWordsFromIds.empty())
1481 fromWordIds = dictionary.
addNewWords(descriptorsFrom, 1);
1485 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1487 int id = orignalWordsFromIds[
i];
1489 fromWordIds.push_back(
id);
1493 if(descriptorsTo.rows)
1496 toWordIds = dictionary.
addNewWords(descriptorsTo, 2);
1498 dictionary.
clear(
false);
1501 std::multiset<int> fromWordIdsSet(fromWordIds.begin(), fromWordIds.end());
1502 std::multiset<int> toWordIdsSet(toWordIds.begin(), toWordIds.end());
1504 UASSERT(kptsFrom3D.empty() || fromWordIds.size() == kptsFrom3D.size());
1505 UASSERT(
int(fromWordIds.size()) == descriptorsFrom.rows);
1507 for(std::list<int>::iterator
iter=fromWordIds.begin();
iter!=fromWordIds.end(); ++
iter)
1509 if(fromWordIdsSet.count(*
iter) == 1)
1511 wordsFrom.insert(wordsFrom.end(), std::make_pair(*
iter, wordsFrom.size()));
1512 if (!kptsFrom.empty())
1514 wordsKptsFrom.push_back(kptsFrom[
i]);
1516 if(!kptsFrom3D.empty())
1518 words3From.push_back(kptsFrom3D[
i]);
1520 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1524 UASSERT(kptsTo3D.size() == 0 || kptsTo3D.size() == kptsTo.size());
1525 UASSERT(toWordIds.size() == kptsTo.size());
1526 UASSERT(
int(toWordIds.size()) == descriptorsTo.rows);
1528 for(std::list<int>::iterator
iter=toWordIds.begin();
iter!=toWordIds.end(); ++
iter)
1530 if(toWordIdsSet.count(*
iter) == 1)
1532 wordsTo.insert(wordsTo.end(), std::make_pair(*
iter, wordsTo.size()));
1533 wordsKptsTo.push_back(kptsTo[
i]);
1534 wordsDescTo.push_back(descriptorsTo.row(
i));
1535 if(!kptsTo3D.empty())
1537 words3To.push_back(kptsTo3D[
i]);
1544 else if(descriptorsFrom.rows)
1547 UASSERT(kptsFrom3D.empty() ||
int(kptsFrom3D.size()) == descriptorsFrom.rows);
1548 for(
int i=0;
i<descriptorsFrom.rows; ++
i)
1550 wordsFrom.insert(wordsFrom.end(), std::make_pair(
i, wordsFrom.size()));
1551 wordsKptsFrom.push_back(kptsFrom[
i]);
1552 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1553 if(!kptsFrom3D.empty())
1555 words3From.push_back(kptsFrom3D[
i]);
1561 fromSignature.
setWords(wordsFrom, wordsKptsFrom, words3From, wordsDescFrom);
1562 toSignature.
setWords(wordsTo, wordsKptsTo, words3To, wordsDescTo);
1569 cv::Mat covariance = cv::Mat::eye(6,6,CV_64FC1);
1570 int inliersCount = 0;
1571 int matchesCount = 0;
1572 info.inliersIDs.clear();
1573 info.matchesIDs.clear();
1576 std::vector<int> inliers;
1587 UERROR(
"Calibrated camera required (multi-cameras not supported).");
1597 double variance = 1.0f;
1598 std::vector<int> matchesV;
1601 std::map<int, cv::KeyPoint> wordsA;
1602 std::map<int, cv::Point3f> words3A;
1603 std::map<int, cv::KeyPoint> wordsB;
1604 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1609 words3A.insert(std::make_pair(
iter->first, fromSignature.
getWords3()[
iter->second]));
1612 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1626 covariance *= variance;
1627 inliers =
uKeys(inliers3D);
1630 if(!cameraTransform.
isNull())
1638 transform = cameraTransform.
to3DoF();
1663 else if(fromSignature.
getWords().size() == 0)
1680 UERROR(
"Calibrated camera required. Id=%d Models=%d StereoModels=%d weight=%d",
1686 #ifndef RTABMAP_OPENGV
1689 UERROR(
"Multi-camera 2D-3D PnP registration is only available if rtabmap is built "
1690 "with OpenGV dependency. Use 3D-3D registration approach instead for multi-camera.");
1700 std::vector<int> inliersV;
1701 std::vector<int> matchesV;
1704 std::map<int, cv::Point3f> words3A;
1705 std::map<int, cv::Point3f> words3B;
1706 std::map<int, cv::KeyPoint> wordsB;
1707 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1709 words3A.insert(std::make_pair(
iter->first, fromSignature.
getWords3()[
iter->second]));
1711 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1716 words3B.insert(std::make_pair(
iter->first, toSignature.
getWords3()[
iter->second]));
1720 std::vector<CameraModel> models;
1736 UASSERT(models[0].isValidForProjection());
1738 std::vector<std::vector<int> > matchesPerCam;
1739 std::vector<std::vector<int> > inliersPerCam;
1760 info.matchesPerCam.resize(matchesPerCam.size());
1761 for(
size_t i=0;
i<matchesPerCam.size(); ++
i)
1763 matches.insert(
matches.end(), matchesPerCam[
i].begin(), matchesPerCam[
i].end());
1764 info.matchesPerCam[
i] = matchesPerCam[
i].size();
1766 info.inliersPerCam.resize(inliersPerCam.size());
1767 for(
size_t i=0;
i<inliersPerCam.size(); ++
i)
1769 inliers.insert(inliers.end(), inliersPerCam[
i].begin(), inliersPerCam[
i].end());
1770 info.inliersPerCam[
i] = inliersPerCam[
i].size();
1775 UASSERT(models.size() == 1 && models[0].isValidForProjection());
1797 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1800 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1811 msg =
uFormat(
"Not enough features in images (old=%d, new=%d, min=%d)",
1825 std::vector<int> inliersV;
1826 std::vector<int> matchesV;
1829 std::map<int, cv::Point3f> words3A;
1830 std::map<int, cv::Point3f> words3B;
1831 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1833 words3A.insert(std::make_pair(
iter->first, fromSignature.
getWords3()[
iter->second]));
1835 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1837 words3B.insert(std::make_pair(
iter->first, toSignature.
getWords3()[
iter->second]));
1851 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1854 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1865 msg =
uFormat(
"Not enough 3D features in images (old=%d, new=%d, min=%d)",
1880 UDEBUG(
"Refine with bundle adjustment");
1883 std::map<int, Transform> poses;
1884 std::multimap<int, Link> links;
1885 std::map<int, cv::Point3f> points3DMap;
1888 poses.insert(std::make_pair(2,
transform));
1890 UASSERT(covariance.cols==6 && covariance.rows == 6 && covariance.type() == CV_64FC1);
1905 std::map<int, Transform> optimizedPoses;
1910 std::map<int, std::vector<CameraModel> > models;
1912 std::vector<CameraModel> cameraModelsFrom;
1926 cameraModelsFrom.push_back(cameraModel);
1934 std::vector<CameraModel> cameraModelsTo;
1948 cameraModelsTo.push_back(cameraModel);
1956 models.insert(std::make_pair(1, cameraModelsFrom));
1957 models.insert(std::make_pair(2, cameraModelsTo));
1959 std::map<int, std::map<int, FeatureBA> > wordReferences;
1960 std::set<int> sbaOutliers;
1962 for(
unsigned int i=0;
i<inliers.size(); ++
i)
1964 int wordId = inliers[
i];
1965 int indexFrom = fromSignature.
getWords().find(wordId)->second;
1966 const cv::Point3f & pt3D = fromSignature.
getWords3()[indexFrom];
1969 points3DMap.insert(std::make_pair(wordId, pt3D));
1971 std::map<int, FeatureBA> ptMap;
1974 cv::KeyPoint kpt = fromSignature.
getWordsKpts()[indexFrom];
1976 int cameraIndex = 0;
1977 const std::vector<CameraModel> &
cam = models.at(1);
1981 float subImageWidth =
cam[0].imageWidth();
1982 cameraIndex =
int(kpt.pt.x / subImageWidth);
1983 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
1986 UASSERT(
cam[cameraIndex].isValidForProjection());
1989 ptMap.insert(std::make_pair(1,
FeatureBA(kpt, depthFrom, cv::Mat(), cameraIndex)));
1994 int indexTo = toSignature.
getWords().find(wordId)->second;
1995 cv::KeyPoint kpt = toSignature.
getWordsKpts()[indexTo];
1997 int cameraIndex = 0;
1998 const std::vector<CameraModel> &
cam = models.at(2);
2002 float subImageWidth =
cam[0].imageWidth();
2003 cameraIndex =
int(kpt.pt.x / subImageWidth);
2004 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
2007 UASSERT(
cam[cameraIndex].isValidForProjection());
2009 float depthTo = 0.0f;
2015 ptMap.insert(std::make_pair(2,
FeatureBA(kpt, depthTo, cv::Mat(), cameraIndex)));
2018 wordReferences.insert(std::make_pair(wordId, ptMap));
2027 optimizedPoses = sba->
optimizeBA(1, poses, links, models, points3DMap, wordReferences, &sbaOutliers);
2031 if(optimizedPoses.size() == 2 &&
2032 !optimizedPoses.begin()->second.isNull() &&
2033 !optimizedPoses.rbegin()->second.isNull())
2035 UDEBUG(
"Pose optimization: %s -> %s",
transform.prettyPrint().c_str(), optimizedPoses.rbegin()->second.prettyPrint().c_str());
2037 if(sbaOutliers.size())
2039 std::vector<int> newInliers(inliers.size());
2041 for(
unsigned int i=0;
i<inliers.size(); ++
i)
2043 if(sbaOutliers.find(inliers[
i]) == sbaOutliers.end())
2045 newInliers[oi++] = inliers[
i];
2048 newInliers.resize(oi);
2049 UDEBUG(
"BA outliers ratio %f",
float(sbaOutliers.size())/
float(inliers.size()));
2050 inliers = newInliers;
2054 msg =
uFormat(
"Not enough inliers after bundle adjustment %d/%d (matches=%d) between %d and %d",
2055 (
int)inliers.size(),
_minInliers, (
int)inliers.size()+sbaOutliers.size(), fromSignature.
id(), toSignature.
id());
2060 transform = optimizedPoses.rbegin()->second;
2083 info.inliersIDs = inliers;
2085 inliersCount = (
int)inliers.size();
2091 std::vector<CameraModel> cameraModelsTo;
2105 if(cameraModelsTo.size() >= 1 && cameraModelsTo[0].isValidForReprojection())
2107 if(cameraModelsTo[0].imageWidth()>0 && cameraModelsTo[0].imageHeight()>0)
2109 pcaData = cv::Mat(inliers.size(), 2, CV_32FC1);
2113 UERROR(
"Invalid calibration image size (%dx%d), cannot compute inliers distribution! (see %s=%f)", cameraModelsTo[0].imageWidth(), cameraModelsTo[0].imageHeight(), Parameters::kVisMinInliersDistribution().
c_str(),
_minInliersDistributionThr);
2123 std::vector<float> distances;
2126 distances.reserve(inliers.size());
2128 for(
unsigned int i=0;
i<inliers.size(); ++
i)
2130 std::multimap<int, int>::const_iterator wordsIter = toSignature.
getWords().find(inliers[
i]);
2133 const cv::KeyPoint & kpt = toSignature.
getWordsKpts()[wordsIter->second];
2134 int cameraIndex = (
int)(kpt.pt.x / cameraModelsTo[0].imageWidth());
2135 UASSERT_MSG(cameraIndex < (
int)cameraModelsTo.size(),
uFormat(
"cameraIndex=%d (x=%f models=%d camera width = %d)", cameraIndex, kpt.pt.x, (
int)cameraModelsTo.size(), cameraModelsTo[0].imageWidth()).c_str());
2139 const cv::Point3f & pt = toSignature.
getWords3()[wordsIter->second];
2142 UASSERT(cameraModelsTo[cameraIndex].isValidForProjection());
2145 distances.push_back(depth);
2149 if(!pcaData.empty())
2151 float * ptr = pcaData.ptr<
float>(
i, 0);
2152 ptr[0] = (kpt.pt.x-cameraIndex*cameraModelsTo[cameraIndex].imageWidth()-cameraModelsTo[cameraIndex].cx()) / cameraModelsTo[cameraIndex].imageWidth();
2153 ptr[1] = (kpt.pt.y-cameraModelsTo[cameraIndex].cy()) / cameraModelsTo[cameraIndex].imageHeight();
2158 if(!distances.empty())
2160 info.inliersMeanDistance =
uMean(distances);
2164 msg =
uFormat(
"The mean distance of the inliers is over %s threshold (%f)",
2170 if(!
transform.isNull() && !pcaData.empty())
2172 cv::Mat pcaEigenVectors, pcaEigenValues;
2173 cv::PCA pca_analysis(pcaData, cv::Mat(), CV_PCA_DATA_AS_ROW);
2175 info.inliersDistribution = pca_analysis.eigenvalues.at<
float>(0, 1);
2179 msg =
uFormat(
"The distribution (%f) of inliers is under %s threshold (%f)",
2188 msg =
uFormat(
"Missing correspondences for registration (%d->%d). fromWords = %d fromImageEmpty=%d toWords = %d toImageEmpty=%d",
2189 fromSignature.
id(), toSignature.
id(),
2194 info.inliers = inliersCount;
2196 info.matches = matchesCount;
2198 info.covariance = covariance;