50 #include <QApplication>
52 #include <QHBoxLayout>
55 #include <opencv2/core/core.hpp>
62 " rtabmap-matcher [Options] from.png to.png\n"
64 " rtabmap-matcher --Vis/CorNNType 5 --Vis/PnPReprojError 3 from.png to.png\n"
65 " rtabmap-matcher --Vis/CorNNDR 0.8 from.png to.png\n"
66 " rtabmap-matcher --Vis/FeatureType 11 --SuperPoint/ModelPath \"superpoint.pt\" --Vis/CorNNType 6 --PyMatcher/Path \"~/SuperGluePretrainedNetwork/rtabmap_superglue.py\" from.png to.png\n"
67 " rtabmap-matcher --Vis/FeatureType 1 --Vis/CorNNType 6 --PyMatcher/Path \"~/OANet/demo/rtabmap_oanet.py\" --PyMatcher/Model \"~/OANet/model/gl3d/sift-4000/model_best.pth\" from.png to.png\n"
68 " rtabmap-matcher --calibration calib.yaml --from_depth from_depth.png --to_depth to_depth.png from.png to.png\n"
69 " rtabmap-matcher --calibration calibFrom.yaml --calibration_to calibTo.yaml --from_depth from_depth.png --to_depth to_depth.png from.png to.png\n"
70 " rtabmap-matcher --calibration calib.yaml --Vis/FeatureType 2 --Vis/MaxFeatures 10000 --Vis/CorNNType 7 from.png to.png\n"
72 "Note: Use \"Vis/\" parameters for feature stuff.\n"
74 " --calibration \"calibration.yaml\" Calibration file. If not set, a\n"
75 " fake one is created from image's\n"
76 " size (which may not be optimal).\n"
77 " Required if from_depth option is set.\n"
78 " Assuming same calibration for both images\n"
79 " if --calibration_to is not set.\n"
80 " --calibration_to \"calibration.yaml\" Calibration file for \"to\" image. If not set,\n"
81 " the same calibration of --calibration option is\n"
82 " used for \"to\" image.\n"
83 " --from_depth \"from_depth.png\" Depth or right image file of the first image.\n"
84 " If not set, 2D->2D estimation is done by \n"
85 " default. For 3D->2D estimation, from_depth\n"
87 " --to_depth \"to_depth.png\" Depth or right image file of the second image.\n"
88 " For 3D->3D estimation, from_depth and to_depth\n"
89 " should be both set.\n"
106 std::string fromDepthPath;
107 std::string toDepthPath;
108 std::string calibrationPath;
109 std::string calibrationToPath;
110 for(
int i=1;
i<argc-2; ++
i)
112 if(strcmp(
argv[
i],
"--from_depth") == 0)
117 fromDepthPath =
argv[
i];
124 else if(strcmp(
argv[
i],
"--to_depth") == 0)
129 toDepthPath =
argv[
i];
136 else if(strcmp(
argv[
i],
"--calibration") == 0)
141 calibrationPath =
argv[
i];
148 else if(strcmp(
argv[
i],
"--calibration_to") == 0)
153 calibrationToPath =
argv[
i];
160 else if(strcmp(
argv[
i],
"--help") == 0)
167 printf(
" --calibration = \"%s\"\n", calibrationPath.c_str());
168 if(!calibrationToPath.empty())
170 printf(
" --calibration_to = \"%s\"\n", calibrationToPath.c_str());
172 printf(
" --from_depth = \"%s\"\n", fromDepthPath.c_str());
173 printf(
" --to_depth = \"%s\"\n", toDepthPath.c_str());
175 #ifdef RTABMAP_PYTHON
180 parameters.insert(
ParametersPair(Parameters::kRegRepeatOnce(),
"false"));
182 cv::Mat imageFrom = cv::imread(
argv[argc-2], cv::IMREAD_COLOR);
183 cv::Mat imageTo = cv::imread(
argv[argc-1], cv::IMREAD_COLOR);
185 if(!imageFrom.empty() && !imageTo.empty())
193 if(!calibrationPath.empty())
195 if(!fromDepthPath.empty())
197 fromDepth = cv::imread(fromDepthPath, cv::IMREAD_UNCHANGED);
198 if(fromDepth.type() == CV_8UC3)
200 cv::cvtColor(fromDepth, fromDepth, cv::COLOR_BGR2GRAY);
202 else if(fromDepth.empty())
204 printf(
"Failed loading from_depth image: \"%s\"!", fromDepthPath.c_str());
207 if(!toDepthPath.empty())
209 toDepth = cv::imread(toDepthPath, cv::IMREAD_UNCHANGED);
210 if(toDepth.type() == CV_8UC3)
212 cv::cvtColor(toDepth, toDepth, cv::COLOR_BGR2GRAY);
214 else if(toDepth.empty())
216 printf(
"Failed loading to_depth image: \"%s\"!", toDepthPath.c_str());
219 UASSERT(toDepth.empty() || (!fromDepth.empty() && fromDepth.type() == toDepth.type()));
221 else if(!fromDepthPath.empty() || !fromDepthPath.empty())
223 printf(
"A calibration file should be provided if depth images are used!\n");
231 if(!fromDepth.empty())
233 if(fromDepth.type() != CV_8UC1)
237 printf(
"Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
240 if(calibrationToPath.empty())
249 printf(
"Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
252 if(calibrationToPath.empty())
254 stereoModelTo = stereoModel;
257 if(!calibrationToPath.empty())
259 if(toDepth.empty() || toDepth.type() != CV_8UC1)
263 printf(
"Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
271 printf(
"Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
277 else if(!calibrationPath.empty())
281 printf(
"Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
284 if(!calibrationToPath.empty())
288 printf(
"Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
299 printf(
"Using fake calibration model \"from\" (image size=%dx%d): fx=%d fy=%d cx=%d cy=%d\n",
300 imageFrom.cols, imageFrom.rows, imageFrom.cols/2, imageFrom.cols/2, imageFrom.cols/2, imageFrom.rows/2);
301 model =
CameraModel(imageFrom.cols/2, imageFrom.cols/2, imageFrom.cols/2, imageFrom.rows/2);
302 model.setImageSize(imageFrom.size());
303 printf(
"Using fake calibration model \"to\" (image size=%dx%d): fx=%d fy=%d cx=%d cy=%d\n",
304 imageTo.cols, imageTo.rows, imageTo.cols/2, imageTo.cols/2, imageTo.cols/2, imageTo.rows/2);
305 modelTo =
CameraModel(imageTo.cols/2, imageTo.cols/2, imageTo.cols/2, imageTo.rows/2);
311 if(
model.isValidForProjection())
313 printf(
"Mono calibration model detected.\n");
315 dataTo =
SensorData(imageTo, toDepth, modelTo, 2);
319 printf(
"Stereo calibration model detected.\n");
320 dataFrom =
SensorData(imageFrom, fromDepth, stereoModel, 1);
321 dataTo =
SensorData(imageTo, toDepth, stereoModelTo, 2);
328 if(fromDepth.empty())
330 parameters.insert(
ParametersPair(Parameters::kVisEstimationType(),
"2"));
331 parameters.insert(
ParametersPair(Parameters::kVisEpipolarGeometryVar(),
"1"));
332 printf(
"Calibration not set, setting %s=1 and %s=2 by default (2D->2D estimation)\n", Parameters::kVisEpipolarGeometryVar().
c_str(), Parameters::kVisEstimationType().
c_str());
343 double matchingTime =
timer.ticks();
344 printf(
"Time matching and motion estimation (excluding feature detection): %fs\n", matchingTime);
354 UWARN(
"PyMatcher is selected for matching but binary features "
355 "are not compatible. BruteForce with CrossCheck (%s=5) "
356 "has been used instead.", Parameters::kVisCorNNType().
c_str());
361 float reprojError = Parameters::defaultVisPnPReprojError();
362 std::string pyMatcherPath;
365 dialog.setWindowTitle(QString(
"Matches (%1/%2) %3 sec [%4=%5 (%6) %7=%8 (%9)%10 %11=%12 (%13) %14=%15]")
369 .arg(Parameters::kVisFeatureType().c_str())
372 .arg(Parameters::kVisCorNNType().c_str())
379 .arg(Parameters::kVisEstimationType().c_str())
382 .arg(Parameters::kVisPnPReprojError().c_str())
401 std::map<int, cv::KeyPoint> kptsFrom;
402 std::map<int, cv::KeyPoint> kptsTo;
403 for(std::map<int, int>::iterator
iter=wordsFrom.begin();
iter!=wordsFrom.end(); ++
iter)
407 for(std::map<int, int>::iterator
iter=wordsTo.begin();
iter!=wordsTo.end(); ++
iter)
416 pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsFrom(
new pcl::PointCloud<pcl::PointXYZ>);
417 cloudWordsFrom->resize(points3d.size());
419 for(std::multimap<int, cv::Point3f>::const_iterator
iter=points3d.begin();
420 iter!=points3d.end();
423 cloudWordsFrom->at(
i++) = pcl::PointXYZ(
iter->second.x,
iter->second.y,
iter->second.z);
425 if(cloudWordsFrom->size())
429 if(cloudWordsFrom->size())
439 pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsFrom(
new pcl::PointCloud<pcl::PointXYZ>);
440 cloudWordsFrom->resize(dataFrom.
getWords3().size());
442 for(std::multimap<int, int>::const_iterator
iter=dataFrom.
getWords().begin();
447 cloudWordsFrom->at(
i++) = pcl::PointXYZ(pt.x, pt.y, pt.z);
449 if(cloudWordsFrom->size())
453 if(cloudWordsFrom->size())
461 pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsTo(
new pcl::PointCloud<pcl::PointXYZ>);
462 cloudWordsTo->resize(dataTo.
getWords3().size());
464 for(std::multimap<int, int>::const_iterator
iter=dataTo.
getWords().begin();
469 cloudWordsTo->at(
i++) = pcl::PointXYZ(pt.x, pt.y, pt.z);
471 if(cloudWordsTo->size())
475 if(cloudWordsTo->size())
477 viewer->
addCloud(
"wordsTo", cloudWordsTo,
t, Qt::cyan);
484 QBoxLayout * mainLayout =
new QHBoxLayout();
485 mainLayout->setContentsMargins(0, 0, 0, 0);
486 mainLayout->setSpacing(0);
489 if(imageFrom.cols > imageFrom.rows)
491 dialog.setMinimumWidth(640*(viewer?2:1));
492 dialog.setMinimumHeight(640*imageFrom.rows/imageFrom.cols*2);
493 layout =
new QVBoxLayout();
497 dialog.setMinimumWidth((640*imageFrom.cols/imageFrom.rows*2)*(viewer?2:1));
498 dialog.setMinimumHeight(640);
499 layout =
new QHBoxLayout();
506 layout->setSpacing(0);
507 layout->addWidget(viewA, 1);
508 layout->addWidget(viewB, 1);
510 mainLayout->addLayout(layout, 1);
513 mainLayout->addWidget(viewer, 1);
516 dialog.setLayout(mainLayout);
522 if(!fromDepth.empty())
534 std::multimap<int, cv::KeyPoint> keypointsFrom;
535 std::multimap<int, cv::KeyPoint> keypointsTo;
540 keypointsFrom.insert(keypointsFrom.end(), std::make_pair(
iter->first, dataFrom.
getWordsKpts()[
iter->second]));
547 keypointsTo.insert(keypointsTo.end(), std::make_pair(
iter->first, dataTo.
getWordsKpts()[
iter->second]));
552 std::set<int> inliersSet(
info.inliersIDs.begin(),
info.inliersIDs.end());
554 const QMultiMap<int, KeypointItem*> & wordsA = viewA->
getFeatures();
555 const QMultiMap<int, KeypointItem*> & wordsB = viewB->
getFeatures();
556 if(wordsA.size() && wordsB.size())
558 QList<int> ids = wordsA.uniqueKeys();
559 for(
int i=0;
i<ids.size(); ++
i)
561 if(ids[
i] > 0 && wordsA.count(ids[
i]) == 1 && wordsB.count(ids[
i]) == 1)
574 deltaAY = viewA->height()/scaleAX;
578 deltaAX = viewA->width()/scaleAX;
586 deltaBY = viewB->height()/scaleBX;
590 deltaBX = viewA->width()/scaleBX;
598 if(inliersSet.find(ids[
i])!=inliersSet.end())
612 kptA->rect().x()+kptA->rect().width()/2,
613 kptA->rect().y()+kptA->rect().height()/2,
614 kptB->rect().x()/scaleDiff+kptB->rect().width()/scaleDiff/2+deltaAX,
615 kptB->rect().y()/scaleDiff+kptB->rect().height()/scaleDiff/2+deltaAY,
619 kptA->rect().x()*scaleDiff+kptA->rect().width()*scaleDiff/2-deltaBX,
620 kptA->rect().y()*scaleDiff+kptA->rect().height()*scaleDiff/2-deltaBY,
621 kptB->rect().x()+kptB->rect().width()/2,
622 kptB->rect().y()+kptB->rect().height()/2,
630 printf(
"Transform: %s\n",
t.prettyPrint().c_str());
631 printf(
"Features: from=%d to=%d\n", (
int)dataFrom.
getWords().size(), (
int)dataTo.
getWords().size());
632 printf(
"Matches: %d\n",
info.matches);
633 printf(
"Inliers: %d (%s=%d)\n",
info.inliers, Parameters::kVisMinInliers().c_str(), reg.
getMinInliers());
639 printf(
"Failed loading images %s and %s\n!",
argv[argc-2],
argv[argc-1]);