46 #include <opencv2/core/core_c.h>
48 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
49 #include <opencv2/xfeatures2d.hpp>
52 #ifdef HAVE_OPENCV_CUDAOPTFLOW
53 #include <opencv2/cudaoptflow.hpp>
54 #include <opencv2/cudaimgproc.hpp>
68 _minInliers(
Parameters::defaultVisMinInliers()),
69 _inlierDistance(
Parameters::defaultVisInlierDistance()),
70 _iterations(
Parameters::defaultVisIterations()),
71 _refineIterations(
Parameters::defaultVisRefineIterations()),
72 _epipolarGeometryVar(
Parameters::defaultVisEpipolarGeometryVar()),
73 _estimationType(
Parameters::defaultVisEstimationType()),
74 _forwardEstimateOnly(
Parameters::defaultVisForwardEstOnly()),
75 _PnPReprojError(
Parameters::defaultVisPnPReprojError()),
77 _PnPRefineIterations(
Parameters::defaultVisPnPRefineIterations()),
78 _PnPVarMedianRatio(
Parameters::defaultVisPnPVarianceMedianRatio()),
79 _PnPMaxVar(
Parameters::defaultVisPnPMaxVariance()),
80 _PnPSplitLinearCovarianceComponents(
Parameters::defaultVisPnPSplitLinearCovComponents()),
81 _multiSamplingPolicy(
Parameters::defaultVisPnPSamplingPolicy()),
82 _correspondencesApproach(
Parameters::defaultVisCorType()),
83 _flowWinSize(
Parameters::defaultVisCorFlowWinSize()),
84 _flowIterations(
Parameters::defaultVisCorFlowIterations()),
86 _flowMaxLevel(
Parameters::defaultVisCorFlowMaxLevel()),
90 _gmsWithRotation(
Parameters::defaultGMSWithRotation()),
91 _gmsWithScale(
Parameters::defaultGMSWithScale()),
92 _gmsThresholdFactor(
Parameters::defaultGMSThresholdFactor()),
93 _guessWinSize(
Parameters::defaultVisCorGuessWinSize()),
94 _guessMatchToProjection(
Parameters::defaultVisCorGuessMatchToProjection()),
95 _bundleAdjustment(
Parameters::defaultVisBundleAdjustment()),
96 _depthAsMask(
Parameters::defaultVisDepthAsMask()),
97 _maskFloorThreshold(
Parameters::defaultVisDepthMaskFloorThr()),
98 _minInliersDistributionThr(
Parameters::defaultVisMinInliersDistribution()),
99 _maxInliersMeanDistance(
Parameters::defaultVisMeanInliersDistance()),
102 #ifdef RTABMAP_PYTHON
166 UWARN(
"%s should be >= 6 but it is set to %d, setting to 6.", Parameters::kVisMinInliers().
c_str(),
_minInliers);
172 #ifndef HAVE_OPENCV_CUDAOPTFLOW
175 UERROR(
"%s is enabled but RTAB-Map is not built with OpenCV CUDA, disabling it.", Parameters::kVisCorFlowGpu().
c_str());
183 #ifndef RTABMAP_PYTHON
184 UWARN(
"%s is set to 6 but RTAB-Map is not built with Python3 support, using default %d.",
185 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
186 _nnType = Parameters::defaultVisCorNNType();
188 int iterations = _pyMatcher?_pyMatcher->iterations():Parameters::defaultPyMatcherIterations();
189 float matchThr = _pyMatcher?_pyMatcher->matchThreshold():Parameters::defaultPyMatcherThreshold();
190 std::string
path = _pyMatcher?_pyMatcher->path():Parameters::defaultPyMatcherPath();
191 bool cuda = _pyMatcher?_pyMatcher->cuda():Parameters::defaultPyMatcherCuda();
192 std::string
model = _pyMatcher?_pyMatcher->model():Parameters::defaultPyMatcherModel();
200 UERROR(
"%s parameter should be set to use Python3 matching (%s=6), using default %d.",
201 Parameters::kPyMatcherPath().
c_str(),
202 Parameters::kVisCorNNType().
c_str(),
203 Parameters::defaultVisCorNNType());
204 _nnType = Parameters::defaultVisCorNNType();
213 #if !defined(HAVE_OPENCV_XFEATURES2D) || (CV_MAJOR_VERSION == 3 && (CV_MINOR_VERSION<4 || CV_MINOR_VERSION==4 && CV_SUBMINOR_VERSION<1))
216 UWARN(
"%s is set to 7 but RTAB-Map is not built with OpenCV's xfeatures2d support (OpenCV >= 3.4.1 also required), using default %d.",
217 Parameters::kVisCorNNType().
c_str(), Parameters::defaultVisCorNNType());
218 _nnType = Parameters::defaultVisCorNNType();
223 for(ParametersMap::const_iterator
iter=parameters.begin();
iter!=parameters.end(); ++
iter)
225 std::string group =
uSplit(
iter->first,
'/').front();
232 if(
uContains(parameters, Parameters::kVisCorNNType()))
239 if(
uContains(parameters, Parameters::kVisCorNNDR()))
243 if(
uContains(parameters, Parameters::kKpByteToFloat()))
247 if(
uContains(parameters, Parameters::kVisFeatureType()))
251 if(
uContains(parameters, Parameters::kVisMaxFeatures()))
255 if(
uContains(parameters, Parameters::kVisSSC()))
259 if(
uContains(parameters, Parameters::kVisMaxDepth()))
263 if(
uContains(parameters, Parameters::kVisMinDepth()))
267 if(
uContains(parameters, Parameters::kVisRoiRatios()))
271 if(
uContains(parameters, Parameters::kVisSubPixEps()))
275 if(
uContains(parameters, Parameters::kVisSubPixIterations()))
279 if(
uContains(parameters, Parameters::kVisSubPixWinSize()))
283 if(
uContains(parameters, Parameters::kVisGridRows()))
287 if(
uContains(parameters, Parameters::kVisGridCols()))
302 #ifdef RTABMAP_PYTHON
335 UDEBUG(
"Input(%d): from=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
337 (
int)fromSignature.
getWords().size(),
348 UDEBUG(
"Input(%d): to=%d words, %d 3D words, %d words descriptors, %d kpts, %d kpts3D, %d descriptors, image=%dx%d models=%d stereo=%d",
362 info.projectedIDs.clear();
375 UDEBUG(
"Bypassing feature matching as descriptors and images are empty. We assume features are already matched.");
402 std::vector<cv::KeyPoint> kptsFrom;
406 std::vector<int> orignalWordsFromIds;
407 int kptsFromSource = 0;
408 if(fromSignature.
getWords().empty())
412 if(!imageFrom.empty())
414 if(imageFrom.channels() > 1)
417 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
438 depthMask = depthBelow;
444 UDEBUG(
"Masking floor done.");
451 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
452 Parameters::kVisDepthAsMask().
c_str(),
453 imageFrom.rows, imageFrom.cols,
472 orignalWordsFromIds.resize(fromSignature.
getWords().size());
474 bool allUniques =
true;
475 int previousIdAdded = 0;
479 UASSERT(
iter->second>=0 &&
iter->second<(
int)orignalWordsFromIds.size());
480 orignalWordsFromIds[
iter->second] =
iter->first;
481 if(
i>0 &&
iter->first==previousIdAdded)
485 previousIdAdded =
iter->first;
490 UDEBUG(
"IDs are not unique, IDs will be regenerated!");
491 orignalWordsFromIds.clear();
495 std::multimap<int, int> wordsFrom;
496 std::multimap<int, int> wordsTo;
497 std::vector<cv::KeyPoint> wordsKptsFrom;
498 std::vector<cv::KeyPoint> wordsKptsTo;
499 std::vector<cv::Point3f> words3From;
500 std::vector<cv::Point3f> words3To;
501 cv::Mat wordsDescFrom;
506 #ifdef HAVE_OPENCV_CUDAOPTFLOW
507 cv::cuda::GpuMat d_imageFrom;
508 cv::cuda::GpuMat d_imageTo;
511 UDEBUG(
"GPU optical flow: preparing GPU image data...");
512 d_imageFrom = fromSignature.
sensorData().imageRawGpu();
513 if(d_imageFrom.empty() && !imageFrom.empty()) {
514 d_imageFrom = cv::cuda::GpuMat(imageFrom);
517 if(d_imageFrom.channels() > 1) {
518 cv::cuda::GpuMat tmp;
519 cv::cuda::cvtColor(d_imageFrom, tmp, cv::COLOR_BGR2GRAY);
522 if(fromSignature.
sensorData().imageRawGpu().empty())
524 fromSignature.
sensorData().setImageRawGpu(d_imageFrom);
527 d_imageTo = toSignature.
sensorData().imageRawGpu();
528 if(d_imageTo.empty() && !imageTo.empty()) {
529 d_imageTo = cv::cuda::GpuMat(imageTo);
532 if(d_imageTo.channels() > 1) {
533 cv::cuda::GpuMat tmp;
534 cv::cuda::cvtColor(d_imageTo, tmp, cv::COLOR_BGR2GRAY);
537 if(toSignature.
sensorData().imageRawGpu().empty())
539 toSignature.
sensorData().setImageRawGpu(d_imageTo);
541 UDEBUG(
"GPU optical flow: preparing GPU image data... done!");
547 if(imageFrom.channels() > 1)
550 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
553 if(imageTo.channels() > 1)
556 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
561 std::vector<cv::Point3f> kptsFrom3D;
562 if(kptsFrom.size() == fromSignature.
getWords3().size())
575 if(!imageFrom.empty() && !imageTo.empty())
578 std::vector<cv::Point2f> cornersFrom;
579 cv::KeyPoint::convert(kptsFrom, cornersFrom);
580 std::vector<cv::Point2f> cornersTo;
588 cv::Mat
R = (cv::Mat_<double>(3,3) <<
589 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
590 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
591 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
592 cv::Mat rvec(1,3, CV_64FC1);
593 cv::Rodrigues(
R, rvec);
594 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
596 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), cornersTo);
602 cornersTo = cornersFrom;
604 std::vector<Transform> inverseTransforms(nCameras);
605 for(
int c=0;
c<nCameras; ++
c)
608 inverseTransforms[
c] = (guess * localTransform).
inverse();
609 UDEBUG(
"inverse transforms: cam %d -> %s",
c, inverseTransforms[
c].prettyPrint().
c_str());
613 UASSERT(kptsFrom3D.size() == cornersTo.size());
616 for(
size_t i=0;
i<kptsFrom3D.size(); ++
i)
619 int startIndex = cornersFrom[
i].x/subImageWidth;
620 UASSERT(startIndex < nCameras);
621 for(
int ci=0; ci < nCameras; ++ci)
623 int c = (ci+startIndex) % nCameras;
626 if(ptsInCamFrame.z > 0)
629 model.reproject(ptsInCamFrame.x, ptsInCamFrame.y, ptsInCamFrame.z, u,
v);
632 cornersTo[
i].x = u+
model.imageWidth()*
c;
641 UDEBUG(
"Projected %d/%ld points inside %d cameras (time=%fs)",
642 inFrame, cornersTo.size(), nCameras,
t.ticks());
647 UDEBUG(
"guessSet = %d", guessSet?1:0);
648 std::vector<unsigned char> status;
649 #ifdef HAVE_OPENCV_CUDAOPTFLOW
652 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device begin");
653 cv::cuda::GpuMat d_cornersFrom(cornersFrom);
654 cv::cuda::GpuMat d_cornersTo(cornersTo);
655 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer host to device end");
656 cv::cuda::GpuMat d_status;
657 cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> d_pyrLK_sparse = cv::cuda::SparsePyrLKOpticalFlow::create(
660 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc begin");
661 d_pyrLK_sparse->calc(d_imageFrom, d_imageTo, d_cornersFrom, d_cornersTo, d_status);
662 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow calc end");
664 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host begin");
666 cornersTo = std::vector<cv::Point2f>(d_cornersTo.cols);
667 cv::Mat matCornersTo(1, d_cornersTo.cols, CV_32FC2, (
void*)&cornersTo[0]);
668 d_cornersTo.download(matCornersTo);
670 status = std::vector<unsigned char>(d_status.cols);
671 cv::Mat matStatus(1, d_status.cols, CV_8UC1, (
void*)&status[0]);
672 d_status.download(matStatus);
673 UDEBUG(
"cv::cuda::SparsePyrLKOpticalFlow transfer device to host end");
678 std::vector<float> err;
679 UDEBUG(
"cv::calcOpticalFlowPyrLK() begin");
680 cv::calcOpticalFlowPyrLK(
690 cv::OPTFLOW_LK_GET_MIN_EIGENVALS | (guessSet ? cv::OPTFLOW_USE_INITIAL_FLOW : 0), 1
e-4);
691 UDEBUG(
"cv::calcOpticalFlowPyrLK() end");
694 UASSERT(kptsFrom.size() == kptsFrom3D.size());
695 std::vector<cv::KeyPoint> kptsTo(kptsFrom.size());
696 std::vector<cv::Point3f> kptsFrom3DKept(kptsFrom3D.size());
697 std::vector<int> orignalWordsFromIdsCpy = orignalWordsFromIds;
699 for(
unsigned int i=0;
i<status.size(); ++
i)
705 if(orignalWordsFromIdsCpy.size())
707 orignalWordsFromIds[ki] = orignalWordsFromIdsCpy[
i];
709 kptsFrom[ki] = cv::KeyPoint(cornersFrom[
i], 1);
710 kptsFrom3DKept[ki] = kptsFrom3D[
i];
711 kptsTo[ki++] = cv::KeyPoint(cornersTo[
i], 1);
714 if(orignalWordsFromIds.size())
716 orignalWordsFromIds.resize(ki);
720 kptsFrom3DKept.resize(ki);
721 kptsFrom3D = kptsFrom3DKept;
723 std::vector<cv::Point3f> kptsTo3D;
729 UASSERT(kptsFrom.size() == kptsFrom3DKept.size());
730 UASSERT(kptsFrom.size() == kptsTo.size());
731 UASSERT(kptsTo3D.size() == 0 || kptsTo.size() == kptsTo3D.size());
732 for(
unsigned int i=0;
i< kptsFrom3DKept.size(); ++
i)
734 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
735 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
736 wordsKptsFrom.push_back(kptsFrom[
i]);
737 words3From.push_back(kptsFrom3DKept[
i]);
739 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
740 wordsKptsTo.push_back(kptsTo[
i]);
741 if(!kptsTo3D.empty())
743 words3To.push_back(kptsTo3D[
i]);
750 if(imageFrom.empty())
752 UERROR(
"Optical flow correspondences requires images in data!");
754 UASSERT(kptsFrom.size() == kptsFrom3D.size());
755 for(
unsigned int i=0;
i< kptsFrom3D.size(); ++
i)
759 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
760 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
761 wordsKptsFrom.push_back(kptsFrom[
i]);
762 words3From.push_back(kptsFrom3D[
i]);
765 toSignature.
sensorData().
setFeatures(std::vector<cv::KeyPoint>(), std::vector<cv::Point3f>(), cv::Mat());
773 std::vector<cv::KeyPoint> kptsTo;
774 int kptsToSource = 0;
780 if(imageTo.channels() > 1)
783 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
804 depthMask = depthBelow;
810 UDEBUG(
"Masking floor done.");
817 UWARN(
"%s is true, but RGB size (%dx%d) modulo depth size (%dx%d) is not 0. Ignoring depth mask for feature detection.",
818 Parameters::kVisDepthAsMask().
c_str(),
819 imageTo.rows, imageTo.cols,
841 UDEBUG(
"kptsFrom=%d kptsFromSource=%d", (
int)kptsFrom.size(), kptsFromSource);
842 UDEBUG(
"kptsTo=%d kptsToSource=%d", (
int)kptsTo.size(), kptsToSource);
843 cv::Mat descriptorsFrom;
844 if(kptsFromSource == 2 &&
851 else if(kptsFromSource == 1 &&
856 else if(!imageFrom.empty())
858 if(imageFrom.channels() > 1)
861 cv::cvtColor(imageFrom, tmp, cv::COLOR_BGR2GRAY);
864 UDEBUG(
"cleared orignalWordsFromIds");
865 orignalWordsFromIds.clear();
869 cv::Mat descriptorsTo;
872 if(kptsToSource == 2 &&
877 else if(kptsToSource == 1 &&
882 else if(!imageTo.empty())
884 if(imageTo.channels() > 1)
887 cv::cvtColor(imageTo, tmp, cv::COLOR_BGR2GRAY);
896 std::vector<cv::Point3f> kptsFrom3D;
897 std::vector<cv::Point3f> kptsTo3D;
898 if(kptsFromSource == 2 &&
899 kptsFrom.size() == fromSignature.
getWords3().size())
903 else if(kptsFromSource == 1 &&
910 if(fromSignature.
getWords3().size() && kptsFrom.size() != fromSignature.
getWords3().size())
912 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.getWords3() (%d), there "
913 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
919 UWARN(
"kptsFrom (%d) is not the same size as fromSignature.sensorData().keypoints3D() (%d), there "
920 "is maybe a problem with the logic above (keypoints3D should be null or equal to kptsfrom). Regenerating kptsFrom3D...",
925 UDEBUG(
"generated kptsFrom3D=%d", (
int)kptsFrom3D.size());
928 if(!kptsFrom3D.empty() &&
935 if(kptsToSource == 2 && kptsTo.size() == toSignature.
getWords3().size())
939 else if(kptsToSource == 1 &&
948 UWARN(
"kptsTo (%d) is not the same size as toSignature.getWords3() (%d), there "
949 "is maybe a problem with the logic above (getWords3() should be null or equal to kptsTo). Regenerating kptsTo3D...",
955 UWARN(
"kptsTo (%d) is not the same size as toSignature.sensorData().keypoints3D() (%d), there "
956 "is maybe a problem with the logic above (keypoints3D() should be null or equal to kptsTo). Regenerating kptsTo3D...",
963 if(kptsTo3D.size() &&
970 UASSERT(kptsFrom.empty() || descriptorsFrom.rows == 0 ||
int(kptsFrom.size()) == descriptorsFrom.rows);
975 UDEBUG(
"descriptorsFrom=%d", descriptorsFrom.rows);
976 UDEBUG(
"descriptorsTo=%d", descriptorsTo.rows);
977 UDEBUG(
"orignalWordsFromIds=%d", (
int)orignalWordsFromIds.size());
980 if(descriptorsFrom.rows > 0 && descriptorsTo.rows > 0)
982 std::vector<CameraModel> models;
995 bool isCalibrated = !models.empty();
996 for(
size_t i=0;
i<models.size() && isCalibrated; ++
i)
998 isCalibrated = models[
i].isValidForProjection();
1001 if(isCalibrated && (models[
i].imageWidth()==0 || models[
i].imageHeight()==0))
1009 isCalibrated =
false;
1022 UASSERT((
int)kptsTo.size() == descriptorsTo.rows);
1023 UASSERT((
int)kptsFrom3D.size() == descriptorsFrom.rows);
1025 std::vector<cv::Point2f> cornersProjected;
1026 std::vector<int> projectedIndexToDescIndex;
1027 float subImageWidth = models[0].imageWidth();
1028 std::set<int> added;
1030 for(
size_t m=0;
m<models.size(); ++
m)
1033 cv::Mat
R = (cv::Mat_<double>(3,3) <<
1034 (double)guessCameraRef.
r11(), (double)guessCameraRef.
r12(), (double)guessCameraRef.
r13(),
1035 (double)guessCameraRef.
r21(), (double)guessCameraRef.
r22(), (double)guessCameraRef.
r23(),
1036 (double)guessCameraRef.
r31(), (double)guessCameraRef.
r32(), (double)guessCameraRef.
r33());
1037 cv::Mat rvec(1,3, CV_64FC1);
1038 cv::Rodrigues(
R, rvec);
1039 cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guessCameraRef.
x(), (double)guessCameraRef.
y(), (double)guessCameraRef.
z());
1040 cv::Mat
K = models[
m].K();
1041 std::vector<cv::Point2f> projected;
1042 cv::projectPoints(kptsFrom3D, rvec, tvec,
K, cv::Mat(), projected);
1043 UDEBUG(
"Projected points=%d", (
int)projected.size());
1046 UASSERT((
int)projected.size() == descriptorsFrom.rows);
1047 int cornersInFrame = 0;
1048 for(
unsigned int i=0;
i<projected.size(); ++
i)
1050 if(
uIsInBounds(projected[
i].
x, 0.0
f,
float(models[
m].imageWidth()-1)) &&
1051 uIsInBounds(projected[
i].
y, 0.0
f,
float(models[
m].imageHeight()-1)) &&
1054 if(added.find(
i) != added.end())
1060 projectedIndexToDescIndex.push_back(
i);
1061 projected[
i].x += subImageWidth*
float(
m);
1062 cornersProjected.push_back(projected[
i]);
1067 UDEBUG(
"corners in frame=%d (camera index=%ld)", cornersInFrame,
m);
1073 UDEBUG(
"guessMatchToProjection=%d, cornersProjected=%d orignalWordsFromIds=%d (added=%ld, duplicates=%d)",
1075 added.size(), duplicates);
1076 if(cornersProjected.size())
1080 UDEBUG(
"match frame to projected");
1086 std::vector< std::vector<size_t> >
indices;
1087 std::vector<std::vector<float> > dists;
1089 std::vector<cv::Point2f> pointsTo;
1090 cv::KeyPoint::convert(kptsTo, pointsTo);
1095 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1096 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1097 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1099 UDEBUG(
"radius search done for guess");
1102 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1103 std::map<int,int> addedWordsFrom;
1104 std::map<int, int> duplicates;
1106 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1107 for(
unsigned int i = 0;
i < pointsToMat.
rows; ++
i)
1109 int matchedIndex = -1;
1112 std::vector<int> descriptorsIndices(
indices[
i].
size());
1120 descriptorsFrom.row(projectedIndexToDescIndex[
indices[
i].at(
j)]).copyTo(descriptors.row(oi));
1121 descriptorsIndices[oi++] =
indices[
i].at(
j);
1123 descriptorsIndices.resize(oi);
1126 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType == 5);
1129 std::vector<cv::DMatch>
matches;
1130 matcher.match(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1133 matchedIndex = descriptorsIndices.at(
matches.at(0).trainIdx);
1138 std::vector<std::vector<cv::DMatch> >
matches;
1139 matcher.knnMatch(descriptorsTo.row(
i), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1144 matchedIndex = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1153 if(matchedIndex >= 0)
1155 matchedIndex = projectedIndexToDescIndex[matchedIndex];
1156 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndex]:matchedIndex;
1158 if(addedWordsFrom.find(matchedIndex) != addedWordsFrom.end())
1160 id = addedWordsFrom.at(matchedIndex);
1161 duplicates.insert(std::make_pair(matchedIndex,
id));
1165 addedWordsFrom.insert(std::make_pair(matchedIndex,
id));
1167 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1168 if(!kptsFrom.empty())
1170 wordsKptsFrom.push_back(kptsFrom[matchedIndex]);
1172 words3From.push_back(kptsFrom3D[matchedIndex]);
1173 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndex));
1176 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1177 wordsKptsTo.push_back(kptsTo[
i]);
1178 wordsDescTo.push_back(descriptorsTo.row(
i));
1179 if(!kptsTo3D.empty())
1181 words3To.push_back(kptsTo3D[
i]);
1187 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1188 wordsKptsTo.push_back(kptsTo[
i]);
1189 wordsDescTo.push_back(descriptorsTo.row(
i));
1190 if(!kptsTo3D.empty())
1192 words3To.push_back(kptsTo3D[
i]);
1199 UDEBUG(
"addedWordsFrom=%d/%d (duplicates=%d, newWords=%d), kptsTo=%d, wordsTo=%d, words3From=%d",
1200 (
int)addedWordsFrom.size(), (
int)cornersProjected.size(), (
int)duplicates.size(), newWords,
1201 (
int)kptsTo.size(), (
int)wordsTo.size(), (
int)words3From.size());
1204 int addWordsFromNotMatched = 0;
1205 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1207 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1209 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1210 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1211 wordsKptsFrom.push_back(kptsFrom[
i]);
1212 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1213 words3From.push_back(kptsFrom3D[
i]);
1215 ++addWordsFromNotMatched;
1218 UDEBUG(
"addWordsFromNotMatched=%d -> words3From=%d", addWordsFromNotMatched, (
int)words3From.size());
1222 UDEBUG(
"match projected to frame");
1223 std::vector<cv::Point2f> pointsTo;
1224 cv::KeyPoint::convert(kptsTo, pointsTo);
1229 std::vector< std::vector<size_t> >
indices;
1230 std::vector<std::vector<float> > dists;
1236 UASSERT(descriptorsFrom.cols == descriptorsTo.cols);
1237 UASSERT(descriptorsFrom.rows == (
int)kptsFrom.size());
1238 UASSERT((
int)pointsToMat.
rows == descriptorsTo.rows);
1240 UDEBUG(
"radius search done for guess");
1243 std::set<int> addedWordsTo;
1244 std::set<int> addedWordsFrom;
1245 double bruteForceTotalTime = 0.0;
1246 double bruteForceDescCopy = 0.0;
1248 cv::Mat descriptors(10, descriptorsTo.cols, descriptorsTo.type());
1249 for(
unsigned int i = 0;
i < cornersProjectedMat.
rows; ++
i)
1251 int matchedIndexFrom = projectedIndexToDescIndex[
i];
1255 info.projectedIDs.push_back(!orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom);
1260 int matchedIndexTo = -1;
1264 std::vector<int> descriptorsIndices(
indices[
i].
size());
1272 descriptorsTo.row(
indices[
i].at(
j)).copyTo(descriptors.row(oi));
1273 descriptorsIndices[oi++] =
indices[
i].at(
j);
1275 bruteForceDescCopy += bruteForceTimer.
ticks();
1278 cv::BFMatcher
matcher(descriptors.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR,
_nnType==5);
1281 std::vector<cv::DMatch>
matches;
1282 matcher.match(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches);
1285 matchedIndexTo = descriptorsIndices.at(
matches.at(0).trainIdx);
1290 std::vector<std::vector<cv::DMatch> >
matches;
1291 matcher.knnMatch(descriptorsFrom.row(matchedIndexFrom), cv::Mat(descriptors, cv::Range(0, oi)),
matches, 2);
1294 bruteForceTotalTime+=bruteForceTimer.
elapsed();
1297 matchedIndexTo = descriptorsIndices.at(
matches[0].at(0).trainIdx);
1306 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[matchedIndexFrom]:matchedIndexFrom;
1307 addedWordsFrom.insert(addedWordsFrom.end(), matchedIndexFrom);
1309 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1310 if(!kptsFrom.empty())
1312 wordsKptsFrom.push_back(kptsFrom[matchedIndexFrom]);
1314 words3From.push_back(kptsFrom3D[matchedIndexFrom]);
1315 wordsDescFrom.push_back(descriptorsFrom.row(matchedIndexFrom));
1317 if( matchedIndexTo >= 0 &&
1318 addedWordsTo.find(matchedIndexTo) == addedWordsTo.end())
1320 addedWordsTo.insert(matchedIndexTo);
1322 wordsTo.insert(wordsTo.end(), std::make_pair(
id, wordsTo.size()));
1323 wordsKptsTo.push_back(kptsTo[matchedIndexTo]);
1324 wordsDescTo.push_back(descriptorsTo.row(matchedIndexTo));
1325 if(!kptsTo3D.empty())
1327 words3To.push_back(kptsTo3D[matchedIndexTo]);
1332 UDEBUG(
"bruteForceDescCopy=%fs, bruteForceTotalTime=%fs", bruteForceDescCopy, bruteForceTotalTime);
1335 for(
unsigned int i=0;
i<kptsFrom3D.size(); ++
i)
1337 if(
util3d::isFinite(kptsFrom3D[
i]) && addedWordsFrom.find(
i) == addedWordsFrom.end())
1339 int id = !orignalWordsFromIds.empty()?orignalWordsFromIds[
i]:
i;
1340 wordsFrom.insert(wordsFrom.end(), std::make_pair(
id, wordsFrom.size()));
1341 wordsKptsFrom.push_back(kptsFrom[
i]);
1342 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1343 words3From.push_back(kptsFrom3D[
i]);
1347 int newToId = !orignalWordsFromIds.empty()?fromSignature.
getWords().rbegin()->first+1:descriptorsFrom.rows;
1348 for(
unsigned int i = 0;
i < kptsTo.size(); ++
i)
1350 if(addedWordsTo.find(
i) == addedWordsTo.end())
1352 wordsTo.insert(wordsTo.end(), std::make_pair(newToId, wordsTo.size()));
1353 wordsKptsTo.push_back(kptsTo[
i]);
1354 wordsDescTo.push_back(descriptorsTo.row(
i));
1355 if(!kptsTo3D.empty())
1357 words3To.push_back(kptsTo3D[
i]);
1366 UWARN(
"All projected points are outside the camera. Guess (%s) is wrong or images are not overlapping.", guess.
prettyPrint().c_str());
1372 if(guessSet &&
_guessWinSize > 0 && kptsFrom3D.size() && !isCalibrated)
1374 UWARN(
"Calibration not found! Finding correspondences "
1375 "with the guess cannot be done, global matching is "
1381 std::list<int> fromWordIds;
1382 std::list<int> toWordIds;
1383 #ifdef RTABMAP_PYTHON
1389 std::vector<int> fromWordIdsV(descriptorsFrom.rows);
1390 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1393 if(!orignalWordsFromIds.empty())
1395 id = orignalWordsFromIds[
i];
1397 fromWordIds.push_back(
id);
1398 fromWordIdsV[
i] =
id;
1400 if(descriptorsTo.rows)
1402 std::vector<int> toWordIdsV(descriptorsTo.rows, 0);
1403 std::vector<cv::DMatch>
matches;
1404 #ifdef RTABMAP_PYTHON
1405 if(
_nnType == 6 && _pyMatcher &&
1406 descriptorsTo.cols == descriptorsFrom.cols &&
1407 descriptorsTo.rows == (
int)kptsTo.size() &&
1408 descriptorsTo.type() == CV_32F &&
1409 descriptorsFrom.type() == CV_32F &&
1410 descriptorsFrom.rows == (
int)kptsFrom.size() &&
1413 UDEBUG(
"Python matching");
1414 matches = _pyMatcher->match(descriptorsTo, descriptorsFrom, kptsTo, kptsFrom, models[0].imageSize());
1418 if(
_nnType == 6 && _pyMatcher)
1420 UDEBUG(
"Invalid inputs for Python matching (desc type=%d, only float descriptors supported, multicam not supported), doing bruteforce matching instead.", descriptorsFrom.type());
1425 bool doCrossCheck =
true;
1426 #ifdef HAVE_OPENCV_XFEATURES2D
1427 #if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1)
1428 cv::Size imageSizeFrom;
1431 imageSizeFrom = imageFrom.size();
1436 if(!models.empty() && models[0].imageSize().height > 0 && models[0].imageSize().width > 0 &&
1437 imageSizeFrom.height > 0 && imageSizeFrom.width > 0)
1439 doCrossCheck =
false;
1443 UDEBUG(
"Invalid inputs for GMS matching, image size should be set for both inputs, doing bruteforce matching instead.");
1449 UDEBUG(
"BruteForce matching%s",
_nnType!=7?
" with crosscheck":
" with GMS");
1450 cv::BFMatcher
matcher(descriptorsFrom.type()==CV_8U?cv::NORM_HAMMING:cv::NORM_L2SQR, doCrossCheck);
1453 #if defined(HAVE_OPENCV_XFEATURES2D) && (CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION==3 && CV_MINOR_VERSION >=4 && CV_SUBMINOR_VERSION >= 1))
1457 std::vector<cv::DMatch> matchesGMS;
1467 for(
size_t i=0;
i<toWordIdsV.size(); ++
i)
1469 int toId = toWordIdsV[
i];
1472 toId = fromWordIds.back()+
i+1;
1474 toWordIds.push_back(toId);
1480 UDEBUG(
"VWDictionary knn matching");
1482 if(orignalWordsFromIds.empty())
1484 fromWordIds = dictionary.
addNewWords(descriptorsFrom, 1);
1488 for (
int i = 0;
i < descriptorsFrom.rows; ++
i)
1490 int id = orignalWordsFromIds[
i];
1492 fromWordIds.push_back(
id);
1496 if(descriptorsTo.rows)
1499 toWordIds = dictionary.
addNewWords(descriptorsTo, 2);
1501 dictionary.
clear(
false);
1504 std::multiset<int> fromWordIdsSet(fromWordIds.begin(), fromWordIds.end());
1505 std::multiset<int> toWordIdsSet(toWordIds.begin(), toWordIds.end());
1507 UASSERT(kptsFrom3D.empty() || fromWordIds.size() == kptsFrom3D.size());
1508 UASSERT(
int(fromWordIds.size()) == descriptorsFrom.rows);
1510 for(std::list<int>::iterator
iter=fromWordIds.begin();
iter!=fromWordIds.end(); ++
iter)
1512 if(fromWordIdsSet.count(*
iter) == 1)
1514 wordsFrom.insert(wordsFrom.end(), std::make_pair(*
iter, wordsFrom.size()));
1515 if (!kptsFrom.empty())
1517 wordsKptsFrom.push_back(kptsFrom[
i]);
1519 if(!kptsFrom3D.empty())
1521 words3From.push_back(kptsFrom3D[
i]);
1523 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1527 UASSERT(kptsTo3D.size() == 0 || kptsTo3D.size() == kptsTo.size());
1528 UASSERT(toWordIds.size() == kptsTo.size());
1529 UASSERT(
int(toWordIds.size()) == descriptorsTo.rows);
1531 for(std::list<int>::iterator
iter=toWordIds.begin();
iter!=toWordIds.end(); ++
iter)
1533 if(toWordIdsSet.count(*
iter) == 1)
1535 wordsTo.insert(wordsTo.end(), std::make_pair(*
iter, wordsTo.size()));
1536 wordsKptsTo.push_back(kptsTo[
i]);
1537 wordsDescTo.push_back(descriptorsTo.row(
i));
1538 if(!kptsTo3D.empty())
1540 words3To.push_back(kptsTo3D[
i]);
1547 else if(descriptorsFrom.rows)
1550 UASSERT(kptsFrom3D.empty() ||
int(kptsFrom3D.size()) == descriptorsFrom.rows);
1551 for(
int i=0;
i<descriptorsFrom.rows; ++
i)
1553 wordsFrom.insert(wordsFrom.end(), std::make_pair(
i, wordsFrom.size()));
1554 wordsKptsFrom.push_back(kptsFrom[
i]);
1555 wordsDescFrom.push_back(descriptorsFrom.row(
i));
1556 if(!kptsFrom3D.empty())
1558 words3From.push_back(kptsFrom3D[
i]);
1564 fromSignature.
setWords(wordsFrom, wordsKptsFrom, words3From, wordsDescFrom);
1565 toSignature.
setWords(wordsTo, wordsKptsTo, words3To, wordsDescTo);
1572 cv::Mat covariance = cv::Mat::eye(6,6,CV_64FC1);
1573 int inliersCount = 0;
1574 int matchesCount = 0;
1575 info.inliersIDs.clear();
1576 info.matchesIDs.clear();
1580 std::vector<int> inliers[2];
1582 cv::Mat covariances[2];
1583 covariances[0] = cv::Mat::eye(6,6,CV_64FC1);
1584 covariances[1] = cv::Mat::eye(6,6,CV_64FC1);
1592 signatureA = &fromSignature;
1593 signatureB = &toSignature;
1597 signatureA = &toSignature;
1598 signatureB = &fromSignature;
1608 UERROR(
"Calibrated camera required (multi-cameras not supported).");
1618 double variance = 1.0f;
1619 std::vector<int> matchesV;
1622 std::map<int, cv::KeyPoint> wordsA;
1623 std::map<int, cv::Point3f> words3A;
1624 std::map<int, cv::KeyPoint> wordsB;
1625 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1630 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1633 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1647 covariances[dir] *= variance;
1648 inliers[dir] =
uKeys(inliers3D);
1651 if(!cameraTransform.
isNull())
1659 transforms[dir] = cameraTransform.
to3DoF();
1663 transforms[dir] = cameraTransform;
1684 else if(signatureA->
getWords().size() == 0)
1701 UERROR(
"Calibrated camera required. Id=%d Models=%d StereoModels=%d weight=%d",
1707 #ifndef RTABMAP_OPENGV
1710 UERROR(
"Multi-camera 2D-3D PnP registration is only available if rtabmap is built "
1711 "with OpenGV dependency. Use 3D-3D registration approach instead for multi-camera.");
1721 std::vector<int> inliersV;
1722 std::vector<int> matchesV;
1725 std::map<int, cv::Point3f> words3A;
1726 std::map<int, cv::Point3f> words3B;
1727 std::map<int, cv::KeyPoint> wordsB;
1728 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1730 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1732 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1737 words3B.insert(std::make_pair(
iter->first, signatureB->
getWords3()[
iter->second]));
1741 std::vector<CameraModel> models;
1757 UASSERT(models[0].isValidForProjection());
1777 inliers[dir] = inliersV;
1782 UASSERT(models.size() == 1 && models[0].isValidForProjection());
1801 inliers[dir] = inliersV;
1804 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1805 if(transforms[dir].
isNull())
1807 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1813 transforms[dir] = transforms[dir].
to3DoF();
1818 msg =
uFormat(
"Not enough features in images (old=%d, new=%d, min=%d)",
1832 std::vector<int> inliersV;
1833 std::vector<int> matchesV;
1836 std::map<int, cv::Point3f> words3A;
1837 std::map<int, cv::Point3f> words3B;
1838 for(std::map<int, int>::iterator
iter=uniqueWordsA.begin();
iter!=uniqueWordsA.end(); ++
iter)
1840 words3A.insert(std::make_pair(
iter->first, signatureA->
getWords3()[
iter->second]));
1842 for(std::map<int, int>::iterator
iter=uniqueWordsB.begin();
iter!=uniqueWordsB.end(); ++
iter)
1844 words3B.insert(std::make_pair(
iter->first, signatureB->
getWords3()[
iter->second]));
1856 inliers[dir] = inliersV;
1858 UDEBUG(
"inliers: %d/%d", (
int)inliersV.size(), (
int)matchesV.size());
1859 if(transforms[dir].
isNull())
1861 msg =
uFormat(
"Not enough inliers %d/%d (matches=%d) between %d and %d",
1867 transforms[dir] = transforms[dir].
to3DoF();
1872 msg =
uFormat(
"Not enough 3D features in images (old=%d, new=%d, min=%d)",
1881 UDEBUG(
"from->to=%s", transforms[0].prettyPrint().
c_str());
1882 UDEBUG(
"to->from=%s", transforms[1].prettyPrint().
c_str());
1885 std::vector<int> allInliers = inliers[0];
1886 if(inliers[1].
size())
1888 std::set<int> allInliersSet(allInliers.begin(), allInliers.end());
1889 unsigned int oi = allInliers.size();
1890 allInliers.resize(allInliers.size() + inliers[1].size());
1891 for(
unsigned int i=0;
i<inliers[1].size(); ++
i)
1893 if(allInliersSet.find(inliers[1][
i]) == allInliersSet.end())
1895 allInliers[oi++] = inliers[1][
i];
1898 allInliers.resize(oi);
1900 std::vector<int> allMatches =
matches[0];
1903 std::set<int> allMatchesSet(allMatches.begin(), allMatches.end());
1904 unsigned int oi = allMatches.size();
1905 allMatches.resize(allMatches.size() +
matches[1].size());
1906 for(
unsigned int i=0;
i<
matches[1].size(); ++
i)
1908 if(allMatchesSet.find(
matches[1][
i]) == allMatchesSet.end())
1913 allMatches.resize(oi);
1918 !transforms[0].
isNull() &&
1919 allInliers.size() &&
1925 UDEBUG(
"Refine with bundle adjustment");
1928 std::map<int, Transform> poses;
1929 std::multimap<int, Link> links;
1930 std::map<int, cv::Point3f> points3DMap;
1933 poses.insert(std::make_pair(2, transforms[0]));
1935 for(
int i=0;
i<2;++
i)
1952 cv::Mat cov = covariances[0].clone();
1955 if(!transforms[1].
isNull() && inliers[1].size())
1957 cov = covariances[1].clone();
1961 std::map<int, Transform> optimizedPoses;
1966 std::map<int, std::vector<CameraModel> > models;
1968 std::vector<CameraModel> cameraModelsFrom;
1982 cameraModelsFrom.push_back(cameraModel);
1990 std::vector<CameraModel> cameraModelsTo;
2004 cameraModelsTo.push_back(cameraModel);
2012 models.insert(std::make_pair(1, cameraModelsFrom));
2013 models.insert(std::make_pair(2, cameraModelsTo));
2015 std::map<int, std::map<int, FeatureBA> > wordReferences;
2016 std::set<int> sbaOutliers;
2018 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
2020 int wordId = allInliers[
i];
2021 int indexFrom = fromSignature.
getWords().find(wordId)->second;
2022 const cv::Point3f & pt3D = fromSignature.
getWords3()[indexFrom];
2026 sbaOutliers.insert(wordId);
2030 points3DMap.insert(std::make_pair(wordId, pt3D));
2032 std::map<int, FeatureBA> ptMap;
2035 cv::KeyPoint kpt = fromSignature.
getWordsKpts()[indexFrom];
2037 int cameraIndex = 0;
2038 const std::vector<CameraModel> &
cam = models.at(1);
2042 float subImageWidth =
cam[0].imageWidth();
2043 cameraIndex =
int(kpt.pt.x / subImageWidth);
2044 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
2047 UASSERT(
cam[cameraIndex].isValidForProjection());
2050 ptMap.insert(std::make_pair(1,
FeatureBA(kpt, depthFrom, cv::Mat(), cameraIndex)));
2055 int indexTo = toSignature.
getWords().find(wordId)->second;
2056 cv::KeyPoint kpt = toSignature.
getWordsKpts()[indexTo];
2058 int cameraIndex = 0;
2059 const std::vector<CameraModel> &
cam = models.at(2);
2063 float subImageWidth =
cam[0].imageWidth();
2064 cameraIndex =
int(kpt.pt.x / subImageWidth);
2065 kpt.pt.x = kpt.pt.x - (subImageWidth*
float(cameraIndex));
2068 UASSERT(
cam[cameraIndex].isValidForProjection());
2070 float depthTo = 0.0f;
2076 ptMap.insert(std::make_pair(2,
FeatureBA(kpt, depthTo, cv::Mat(), cameraIndex)));
2079 wordReferences.insert(std::make_pair(wordId, ptMap));
2088 optimizedPoses = sba->
optimizeBA(1, poses, links, models, points3DMap, wordReferences, &sbaOutliers);
2092 if(optimizedPoses.size() == 2 &&
2093 !optimizedPoses.begin()->second.isNull() &&
2094 !optimizedPoses.rbegin()->second.isNull())
2096 UDEBUG(
"Pose optimization: %s -> %s", transforms[0].prettyPrint().
c_str(), optimizedPoses.rbegin()->second.prettyPrint().c_str());
2098 if(sbaOutliers.size())
2100 std::vector<int> newInliers(allInliers.size());
2102 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
2104 if(sbaOutliers.find(allInliers[
i]) == sbaOutliers.end())
2106 newInliers[oi++] = allInliers[
i];
2109 newInliers.resize(oi);
2110 UDEBUG(
"BA outliers ratio %f",
float(sbaOutliers.size())/
float(allInliers.size()));
2111 allInliers = newInliers;
2115 msg =
uFormat(
"Not enough inliers after bundle adjustment %d/%d (matches=%d) between %d and %d",
2116 (
int)allInliers.size(),
_minInliers, (
int)allInliers.size()+sbaOutliers.size(), fromSignature.
id(), toSignature.
id());
2121 transforms[0] = optimizedPoses.rbegin()->second;
2145 info.inliersIDs = allInliers;
2146 info.matchesIDs = allMatches;
2147 inliersCount = (
int)allInliers.size();
2148 matchesCount = (
int)allMatches.size();
2149 if(!transforms[1].
isNull())
2151 transforms[1] = transforms[1].
inverse();
2152 if(transforms[0].
isNull())
2155 covariance = covariances[1];
2160 covariance = (covariances[0]+covariances[1])/2.0
f;
2166 covariance = covariances[0];
2172 std::vector<CameraModel> cameraModelsTo;
2186 if(cameraModelsTo.size() >= 1 && cameraModelsTo[0].isValidForReprojection())
2188 if(cameraModelsTo[0].imageWidth()>0 && cameraModelsTo[0].imageHeight()>0)
2190 pcaData = cv::Mat(allInliers.size(), 2, CV_32FC1);
2194 UERROR(
"Invalid calibration image size (%dx%d), cannot compute inliers distribution! (see %s=%f)", cameraModelsTo[0].imageWidth(), cameraModelsTo[0].imageHeight(), Parameters::kVisMinInliersDistribution().
c_str(),
_minInliersDistributionThr);
2204 std::vector<float> distances;
2207 distances.reserve(allInliers.size());
2209 for(
unsigned int i=0;
i<allInliers.size(); ++
i)
2211 std::multimap<int, int>::const_iterator wordsIter = toSignature.
getWords().find(allInliers[
i]);
2214 const cv::KeyPoint & kpt = toSignature.
getWordsKpts()[wordsIter->second];
2215 int cameraIndex = (
int)(kpt.pt.x / cameraModelsTo[0].imageWidth());
2216 UASSERT_MSG(cameraIndex < (
int)cameraModelsTo.size(),
uFormat(
"cameraIndex=%d (x=%f models=%d camera width = %d)", cameraIndex, kpt.pt.x, (
int)cameraModelsTo.size(), cameraModelsTo[0].imageWidth()).c_str());
2220 const cv::Point3f & pt = toSignature.
getWords3()[wordsIter->second];
2223 UASSERT(cameraModelsTo[cameraIndex].isValidForProjection());
2226 distances.push_back(depth);
2230 if(!pcaData.empty())
2232 float * ptr = pcaData.ptr<
float>(
i, 0);
2233 ptr[0] = (kpt.pt.x-cameraIndex*cameraModelsTo[cameraIndex].imageWidth()-cameraModelsTo[cameraIndex].cx()) / cameraModelsTo[cameraIndex].imageWidth();
2234 ptr[1] = (kpt.pt.y-cameraModelsTo[cameraIndex].cy()) / cameraModelsTo[cameraIndex].imageHeight();
2239 if(!distances.empty())
2241 info.inliersMeanDistance =
uMean(distances);
2245 msg =
uFormat(
"The mean distance of the inliers is over %s threshold (%f)",
2251 if(!
transform.isNull() && !pcaData.empty())
2253 cv::Mat pcaEigenVectors, pcaEigenValues;
2254 cv::PCA pca_analysis(pcaData, cv::Mat(), CV_PCA_DATA_AS_ROW);
2256 info.inliersDistribution = pca_analysis.eigenvalues.at<
float>(0, 1);
2260 msg =
uFormat(
"The distribution (%f) of inliers is under %s threshold (%f)",
2269 msg =
uFormat(
"Missing correspondences for registration (%d->%d). fromWords = %d fromImageEmpty=%d toWords = %d toImageEmpty=%d",
2270 fromSignature.
id(), toSignature.
id(),
2275 info.inliers = inliersCount;
2277 info.matches = matchesCount;
2279 info.covariance = covariance;