tools/Matcher/main.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
32 #include <rtabmap/core/util3d.h>
37 #include <rtabmap/gui/ImageView.h>
40 #include <rtabmap/utilite/UCv2Qt.h>
42 #include <rtabmap/utilite/UFile.h>
43 #include <rtabmap/utilite/UStl.h>
44 #include <rtabmap/utilite/UTimer.h>
45 #include <fstream>
46 #include <string>
47 #include <QApplication>
48 #include <QDialog>
49 #include <QHBoxLayout>
50 #include <QMultiMap>
51 #include <QString>
52 #include <opencv2/core/core.hpp>
53 
54 using namespace rtabmap;
55 
56 void showUsage()
57 {
58  printf("\n\nUsage:\n"
59  " rtabmap-matcher [Options] from.png to.png\n"
60  "Examples:\n"
61  " rtabmap-matcher --Vis/CorNNType 5 --Vis/PnPReprojError 3 from.png to.png\n"
62  " rtabmap-matcher --Vis/CorNNDR 0.8 from.png to.png\n"
63  " rtabmap-matcher --Vis/FeatureType 11 --SuperPoint/ModelPath \"superpoint.pt\" --Vis/CorNNType 6 --PyMatcher/Path \"~/SuperGluePretrainedNetwork/rtabmap_superglue.py\" from.png to.png\n"
64  " rtabmap-matcher --Vis/FeatureType 1 --Vis/CorNNType 6 --PyMatcher/Path \"~/OANet/demo/rtabmap_oanet.py\" --PyMatcher/Model \"~/OANet/model/gl3d/sift-4000/model_best.pth\" from.png to.png\n"
65  " rtabmap-matcher --calibration calib.yaml --from_depth from_depth.png --to_depth to_depth.png from.png to.png\n"
66  " rtabmap-matcher --calibration calibFrom.yaml --calibration_to calibTo.yaml --from_depth from_depth.png --to_depth to_depth.png from.png to.png\n"
67  " rtabmap-matcher --calibration calib.yaml --Vis/FeatureType 2 --Vis/MaxFeatures 10000 --Vis/CorNNType 7 from.png to.png\n"
68  "\n"
69  "Note: Use \"Vis/\" parameters for feature stuff.\n"
70  "Options:\n"
71  " --calibration \"calibration.yaml\" Calibration file. If not set, a\n"
72  " fake one is created from image's\n"
73  " size (which may not be optimal).\n"
74  " Required if from_depth option is set.\n"
75  " Assuming same calibration for both images\n"
76  " if --calibration_to is not set.\n"
77  " --calibration_to \"calibration.yaml\" Calibration file for \"to\" image. If not set,\n"
78  " the same calibration of --calibration option is\n"
79  " used for \"to\" image.\n"
80  " --from_depth \"from_depth.png\" Depth or right image file of the first image.\n"
81  " If not set, 2D->2D estimation is done by \n"
82  " default. For 3D->2D estimation, from_depth\n"
83  " should be set.\n"
84  " --to_depth \"to_depth.png\" Depth or right image file of the second image.\n"
85  " For 3D->3D estimation, from_depth and to_depth\n"
86  " should be both set.\n"
87  "\n\n"
88  "%s\n",
90  exit(1);
91 }
92 
93 int main(int argc, char * argv[])
94 {
95  if(argc < 3)
96  {
97  showUsage();
98  }
99 
102 
103  std::string fromDepthPath;
104  std::string toDepthPath;
105  std::string calibrationPath;
106  std::string calibrationToPath;
107  for(int i=1; i<argc-2; ++i)
108  {
109  if(strcmp(argv[i], "--from_depth") == 0)
110  {
111  ++i;
112  if(i<argc-2)
113  {
114  fromDepthPath = argv[i];
115  }
116  else
117  {
118  showUsage();
119  }
120  }
121  else if(strcmp(argv[i], "--to_depth") == 0)
122  {
123  ++i;
124  if(i<argc-2)
125  {
126  toDepthPath = argv[i];
127  }
128  else
129  {
130  showUsage();
131  }
132  }
133  else if(strcmp(argv[i], "--calibration") == 0)
134  {
135  ++i;
136  if(i<argc-2)
137  {
138  calibrationPath = argv[i];
139  }
140  else
141  {
142  showUsage();
143  }
144  }
145  else if(strcmp(argv[i], "--calibration_to") == 0)
146  {
147  ++i;
148  if(i<argc-2)
149  {
150  calibrationToPath = argv[i];
151  }
152  else
153  {
154  showUsage();
155  }
156  }
157  else if(strcmp(argv[i], "--help") == 0)
158  {
159  showUsage();
160  }
161  }
162 
163  printf("Options\n");
164  printf(" --calibration = \"%s\"\n", calibrationPath.c_str());
165  if(!calibrationToPath.empty())
166  {
167  printf(" --calibration_to = \"%s\"\n", calibrationToPath.c_str());
168  }
169  printf(" --from_depth = \"%s\"\n", fromDepthPath.c_str());
170  printf(" --to_depth = \"%s\"\n", toDepthPath.c_str());
171 
172  ParametersMap parameters = Parameters::parseArguments(argc, argv);
173  parameters.insert(ParametersPair(Parameters::kRegRepeatOnce(), "false"));
174 
175  cv::Mat imageFrom = cv::imread(argv[argc-2], cv::IMREAD_COLOR);
176  cv::Mat imageTo = cv::imread(argv[argc-1], cv::IMREAD_COLOR);
177 
178  if(!imageFrom.empty() && !imageTo.empty())
179  {
181  // Load data
183 
184  cv::Mat fromDepth;
185  cv::Mat toDepth;
186  if(!calibrationPath.empty())
187  {
188  if(!fromDepthPath.empty())
189  {
190  fromDepth = cv::imread(fromDepthPath, cv::IMREAD_UNCHANGED);
191  if(fromDepth.type() == CV_8UC3)
192  {
193  cv::cvtColor(fromDepth, fromDepth, cv::COLOR_BGR2GRAY);
194  }
195  else if(fromDepth.empty())
196  {
197  printf("Failed loading from_depth image: \"%s\"!", fromDepthPath.c_str());
198  }
199  }
200  if(!toDepthPath.empty())
201  {
202  toDepth = cv::imread(toDepthPath, cv::IMREAD_UNCHANGED);
203  if(toDepth.type() == CV_8UC3)
204  {
205  cv::cvtColor(toDepth, toDepth, cv::COLOR_BGR2GRAY);
206  }
207  else if(toDepth.empty())
208  {
209  printf("Failed loading to_depth image: \"%s\"!", toDepthPath.c_str());
210  }
211  }
212  UASSERT(toDepth.empty() || (!fromDepth.empty() && fromDepth.type() == toDepth.type()));
213  }
214  else if(!fromDepthPath.empty() || !fromDepthPath.empty())
215  {
216  printf("A calibration file should be provided if depth images are used!\n");
217  showUsage();
218  }
219 
220  CameraModel model;
221  StereoCameraModel stereoModel;
222  CameraModel modelTo;
223  StereoCameraModel stereoModelTo;
224  if(!fromDepth.empty())
225  {
226  if(fromDepth.type() != CV_8UC1)
227  {
228  if(!model.load(UDirectory::getDir(calibrationPath), uSplit(UFile::getName(calibrationPath), '.').front()))
229  {
230  printf("Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
231  exit(-1);
232  }
233  if(calibrationToPath.empty())
234  {
235  modelTo = model;
236  }
237  }
238  else // fromDepth.type() == CV_8UC1
239  {
240  if(!stereoModel.load(UDirectory::getDir(calibrationPath), uSplit(UFile::getName(calibrationPath), '.').front()))
241  {
242  printf("Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
243  exit(-1);
244  }
245  if(calibrationToPath.empty())
246  {
247  stereoModelTo = stereoModel;
248  }
249  }
250  if(!calibrationToPath.empty())
251  {
252  if(toDepth.empty() || toDepth.type() != CV_8UC1)
253  {
254  if(!modelTo.load(UDirectory::getDir(calibrationToPath), uSplit(UFile::getName(calibrationToPath), '.').front()))
255  {
256  printf("Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
257  exit(-1);
258  }
259  }
260  else // toDepth.type() == CV_8UC1
261  {
262  if(!stereoModelTo.load(UDirectory::getDir(calibrationToPath), uSplit(UFile::getName(calibrationToPath), '.').front()))
263  {
264  printf("Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
265  exit(-1);
266  }
267  }
268  }
269  }
270  else if(!calibrationPath.empty())
271  {
272  if(!model.load(UDirectory::getDir(calibrationPath), uSplit(UFile::getName(calibrationPath), '.').front()))
273  {
274  printf("Failed to load calibration file \"%s\"!\n", calibrationPath.c_str());
275  exit(-1);
276  }
277  if(!calibrationToPath.empty())
278  {
279  if(!modelTo.load(UDirectory::getDir(calibrationToPath), uSplit(UFile::getName(calibrationToPath), '.').front()))
280  {
281  printf("Failed to load calibration file \"%s\"!\n", calibrationToPath.c_str());
282  exit(-1);
283  }
284  }
285  else
286  {
287  modelTo = model;
288  }
289  }
290  else
291  {
292  printf("Using fake calibration model \"from\" (image size=%dx%d): fx=%d fy=%d cx=%d cy=%d\n",
293  imageFrom.cols, imageFrom.rows, imageFrom.cols/2, imageFrom.cols/2, imageFrom.cols/2, imageFrom.rows/2);
294  model = CameraModel(imageFrom.cols/2, imageFrom.cols/2, imageFrom.cols/2, imageFrom.rows/2); // Fake model
295  model.setImageSize(imageFrom.size());
296  printf("Using fake calibration model \"to\" (image size=%dx%d): fx=%d fy=%d cx=%d cy=%d\n",
297  imageTo.cols, imageTo.rows, imageTo.cols/2, imageTo.cols/2, imageTo.cols/2, imageTo.rows/2);
298  modelTo = CameraModel(imageTo.cols/2, imageTo.cols/2, imageTo.cols/2, imageTo.rows/2); // Fake model
299  modelTo.setImageSize(imageTo.size());
300  }
301 
302  Signature dataFrom;
303  Signature dataTo;
304  if(model.isValidForProjection())
305  {
306  printf("Mono calibration model detected.\n");
307  dataFrom = SensorData(imageFrom, fromDepth, model, 1);
308  dataTo = SensorData(imageTo, toDepth, modelTo, 2);
309  }
310  else //stereo
311  {
312  printf("Stereo calibration model detected.\n");
313  dataFrom = SensorData(imageFrom, fromDepth, stereoModel, 1);
314  dataTo = SensorData(imageTo, toDepth, stereoModelTo, 2);
315  }
316 
318  // Registration
320 
321  if(fromDepth.empty())
322  {
323  parameters.insert(ParametersPair(Parameters::kVisEstimationType(), "2")); // Set 2D->2D estimation for mono images
324  parameters.insert(ParametersPair(Parameters::kVisEpipolarGeometryVar(), "1")); //Unknown scale
325  printf("Calibration not set, setting %s=1 and %s=2 by default (2D->2D estimation)\n", Parameters::kVisEpipolarGeometryVar().c_str(), Parameters::kVisEstimationType().c_str());
326  }
327  RegistrationVis reg(parameters);
328  RegistrationInfo info;
329 
330 
331  // Do it one time before to make sure everything is loaded to get realistic timing.
332  reg.computeTransformationMod(dataFrom, dataTo, Transform(), &info);
333 
334  UTimer timer;
335  Transform t = reg.computeTransformationMod(dataFrom, dataTo, Transform(), &info);
336  double matchingTime = timer.ticks();
337  printf("Time matching and motion estimation: %fs\n", matchingTime);
338 
340  // Visualization
342 
343  if(reg.getNNType()==6 &&
344  !dataFrom.getWordsDescriptors().empty() &&
345  dataFrom.getWordsDescriptors().type()!=CV_32F)
346  {
347  UWARN("PyMatcher is selected for matching but binary features "
348  "are not compatible. BruteForce with CrossCheck (%s=5) "
349  "has been used instead.", Parameters::kVisCorNNType().c_str());
350  }
351 
352  QApplication app(argc, argv);
353  QDialog dialog;
354  float reprojError = Parameters::defaultVisPnPReprojError();
355  std::string pyMatcherPath;
356  Parameters::parse(parameters, Parameters::kVisPnPReprojError(), reprojError);
357  Parameters::parse(parameters, Parameters::kPyMatcherPath(), pyMatcherPath);
358  dialog.setWindowTitle(QString("Matches (%1/%2) %3 sec [%4=%5 (%6) %7=%8 (%9)%10 %11=%12 (%13) %14=%15]")
359  .arg(info.inliers)
360  .arg(info.matches)
361  .arg(matchingTime)
362  .arg(Parameters::kVisFeatureType().c_str())
363  .arg(reg.getDetector()?reg.getDetector()->getType():-1)
364  .arg(reg.getDetector()?Feature2D::typeName(reg.getDetector()->getType()).c_str():"?")
365  .arg(Parameters::kVisCorNNType().c_str())
366  .arg(reg.getNNType())
368  reg.getNNType()==5||(reg.getNNType()==6&&!dataFrom.getWordsDescriptors().empty()&& dataFrom.getWordsDescriptors().type()!=CV_32F)?"BFCrossCheck":
369  reg.getNNType()==6?QString(uSplit(UFile::getName(pyMatcherPath), '.').front().c_str()).replace("rtabmap_", ""):
370  reg.getNNType()==7?"GMS":"?")
371  .arg(reg.getNNType()<5?QString(" %1=%2").arg(Parameters::kVisCorNNDR().c_str()).arg(reg.getNNDR()):"")
372  .arg(Parameters::kVisEstimationType().c_str())
373  .arg(reg.getEstimationType())
374  .arg(reg.getEstimationType()==0?"3D->3D":reg.getEstimationType()==1?"3D->2D":reg.getEstimationType()==2?"2D->2D":"?")
375  .arg(Parameters::kVisPnPReprojError().c_str())
376  .arg(reprojError));
377 
378  CloudViewer * viewer = 0;
379  if(!t.isNull())
380  {
381  viewer = new CloudViewer(&dialog);
382  pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudFrom = util3d::cloudRGBFromSensorData(dataFrom.sensorData());
383  pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudTo = util3d::cloudRGBFromSensorData(dataTo.sensorData());
384  viewer->addCloud(uFormat("cloud_%d", dataFrom.id()), cloudFrom, Transform::getIdentity(), Qt::magenta);
385  viewer->addCloud(uFormat("cloud_%d", dataTo.id()), cloudTo, t, Qt::cyan);
386  viewer->addOrUpdateCoordinate(uFormat("frame_%d", dataTo.id()), t, 0.2);
387  viewer->setGridShown(true);
388 
389  if(reg.getEstimationType() == 2)
390  {
391  // triangulate 3D words based on the transform computed
392  std::map<int, int> wordsFrom = uMultimapToMapUnique(dataFrom.getWords());
393  std::map<int, int> wordsTo = uMultimapToMapUnique(dataTo.getWords());
394  std::map<int, cv::KeyPoint> kptsFrom;
395  std::map<int, cv::KeyPoint> kptsTo;
396  for(std::map<int, int>::iterator iter=wordsFrom.begin(); iter!=wordsFrom.end(); ++iter)
397  {
398  kptsFrom.insert(std::make_pair(iter->first, dataFrom.getWordsKpts()[iter->second]));
399  }
400  for(std::map<int, int>::iterator iter=wordsTo.begin(); iter!=wordsTo.end(); ++iter)
401  {
402  kptsTo.insert(std::make_pair(iter->first, dataTo.getWordsKpts()[iter->second]));
403  }
404  std::map<int, cv::Point3f> points3d = util3d::generateWords3DMono(
405  kptsFrom,
406  kptsTo,
407  model.isValidForProjection()?model:stereoModel.left(),
408  t);
409  pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsFrom(new pcl::PointCloud<pcl::PointXYZ>);
410  cloudWordsFrom->resize(points3d.size());
411  int i=0;
412  for(std::multimap<int, cv::Point3f>::const_iterator iter=points3d.begin();
413  iter!=points3d.end();
414  ++iter)
415  {
416  cloudWordsFrom->at(i++) = pcl::PointXYZ(iter->second.x, iter->second.y, iter->second.z);
417  }
418  if(cloudWordsFrom->size())
419  {
420  cloudWordsFrom = rtabmap::util3d::removeNaNFromPointCloud(cloudWordsFrom);
421  }
422  if(cloudWordsFrom->size())
423  {
424  viewer->addCloud("wordsFrom", cloudWordsFrom, Transform::getIdentity(), Qt::yellow);
425  viewer->setCloudPointSize("wordsFrom", 5);
426  }
427  }
428  else
429  {
430  if(!dataFrom.getWords3().empty())
431  {
432  pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsFrom(new pcl::PointCloud<pcl::PointXYZ>);
433  cloudWordsFrom->resize(dataFrom.getWords3().size());
434  int i=0;
435  for(std::multimap<int, int>::const_iterator iter=dataFrom.getWords().begin();
436  iter!=dataFrom.getWords().end();
437  ++iter)
438  {
439  const cv::Point3f & pt = dataFrom.getWords3()[iter->second];
440  cloudWordsFrom->at(i++) = pcl::PointXYZ(pt.x, pt.y, pt.z);
441  }
442  if(cloudWordsFrom->size())
443  {
444  cloudWordsFrom = rtabmap::util3d::removeNaNFromPointCloud(cloudWordsFrom);
445  }
446  if(cloudWordsFrom->size())
447  {
448  viewer->addCloud("wordsFrom", cloudWordsFrom, Transform::getIdentity(), Qt::magenta);
449  viewer->setCloudPointSize("wordsFrom", 5);
450  }
451  }
452  if(!dataTo.getWords3().empty())
453  {
454  pcl::PointCloud<pcl::PointXYZ>::Ptr cloudWordsTo(new pcl::PointCloud<pcl::PointXYZ>);
455  cloudWordsTo->resize(dataTo.getWords3().size());
456  int i=0;
457  for(std::multimap<int, int>::const_iterator iter=dataTo.getWords().begin();
458  iter!=dataTo.getWords().end();
459  ++iter)
460  {
461  const cv::Point3f & pt = dataTo.getWords3()[iter->second];
462  cloudWordsTo->at(i++) = pcl::PointXYZ(pt.x, pt.y, pt.z);
463  }
464  if(cloudWordsTo->size())
465  {
466  cloudWordsTo = rtabmap::util3d::removeNaNFromPointCloud(cloudWordsTo);
467  }
468  if(cloudWordsTo->size())
469  {
470  viewer->addCloud("wordsTo", cloudWordsTo, t, Qt::cyan);
471  viewer->setCloudPointSize("wordsTo", 5);
472  }
473  }
474  }
475  }
476 
477  QBoxLayout * mainLayout = new QHBoxLayout();
478  mainLayout->setContentsMargins(0, 0, 0, 0);
479  mainLayout->setSpacing(0);
480  QBoxLayout * layout;
481  bool vertical=true;
482  if(imageFrom.cols > imageFrom.rows)
483  {
484  dialog.setMinimumWidth(640*(viewer?2:1));
485  dialog.setMinimumHeight(640*imageFrom.rows/imageFrom.cols*2);
486  layout = new QVBoxLayout();
487  }
488  else
489  {
490  dialog.setMinimumWidth((640*imageFrom.cols/imageFrom.rows*2)*(viewer?2:1));
491  dialog.setMinimumHeight(640);
492  layout = new QHBoxLayout();
493  vertical = false;
494  }
495 
496  ImageView * viewA = new ImageView(&dialog);
497  ImageView * viewB = new ImageView(&dialog);
498 
499  layout->setSpacing(0);
500  layout->addWidget(viewA, 1);
501  layout->addWidget(viewB, 1);
502 
503  mainLayout->addLayout(layout, 1);
504  if(viewer)
505  {
506  mainLayout->addWidget(viewer, 1);
507  }
508 
509  dialog.setLayout(mainLayout);
510 
511  dialog.show();
512 
513  viewA->setImage(uCvMat2QImage(imageFrom));
514  viewA->setAlpha(200);
515  if(!fromDepth.empty())
516  {
517  viewA->setImageDepth(uCvMat2QImage(fromDepth, false, uCvQtDepthRedToBlue));
518  viewA->setImageDepthShown(true);
519  }
520  viewB->setImage(uCvMat2QImage(imageTo));
521  viewB->setAlpha(200);
522  if(!toDepth.empty())
523  {
524  viewB->setImageDepth(uCvMat2QImage(toDepth, false, uCvQtDepthRedToBlue));
525  viewB->setImageDepthShown(true);
526  }
527  std::multimap<int, cv::KeyPoint> keypointsFrom;
528  std::multimap<int, cv::KeyPoint> keypointsTo;
529  if(!dataFrom.getWordsKpts().empty())
530  {
531  for(std::map<int, int>::const_iterator iter=dataFrom.getWords().begin(); iter!=dataFrom.getWords().end(); ++iter)
532  {
533  keypointsFrom.insert(keypointsFrom.end(), std::make_pair(iter->first, dataFrom.getWordsKpts()[iter->second]));
534  }
535  }
536  if(!dataTo.getWordsKpts().empty())
537  {
538  for(std::map<int, int>::const_iterator iter=dataTo.getWords().begin(); iter!=dataTo.getWords().end(); ++iter)
539  {
540  keypointsTo.insert(keypointsTo.end(), std::make_pair(iter->first, dataTo.getWordsKpts()[iter->second]));
541  }
542  }
543  viewA->setFeatures(keypointsFrom);
544  viewB->setFeatures(keypointsTo);
545  std::set<int> inliersSet(info.inliersIDs.begin(), info.inliersIDs.end());
546 
547  const QMultiMap<int, KeypointItem*> & wordsA = viewA->getFeatures();
548  const QMultiMap<int, KeypointItem*> & wordsB = viewB->getFeatures();
549  if(wordsA.size() && wordsB.size())
550  {
551  QList<int> ids = wordsA.uniqueKeys();
552  for(int i=0; i<ids.size(); ++i)
553  {
554  if(ids[i] > 0 && wordsA.count(ids[i]) == 1 && wordsB.count(ids[i]) == 1)
555  {
556  // Add lines
557  // Draw lines between corresponding features...
558  float scaleAX = viewA->viewScale();
559  float scaleBX = viewB->viewScale();
560 
561  float scaleDiff = viewA->viewScale() / viewB->viewScale();
562  float deltaAX = 0;
563  float deltaAY = 0;
564 
565  if(vertical)
566  {
567  deltaAY = viewA->height()/scaleAX;
568  }
569  else
570  {
571  deltaAX = viewA->width()/scaleAX;
572  }
573 
574  float deltaBX = 0;
575  float deltaBY = 0;
576 
577  if(vertical)
578  {
579  deltaBY = viewB->height()/scaleBX;
580  }
581  else
582  {
583  deltaBX = viewA->width()/scaleBX;
584  }
585 
586  const KeypointItem * kptA = wordsA.value(ids[i]);
587  const KeypointItem * kptB = wordsB.value(ids[i]);
588 
589  QColor cA = viewA->getDefaultMatchingLineColor();
590  QColor cB = viewB->getDefaultMatchingLineColor();
591  if(inliersSet.find(ids[i])!=inliersSet.end())
592  {
593  cA = viewA->getDefaultMatchingFeatureColor();
594  cB = viewB->getDefaultMatchingFeatureColor();
595  viewA->setFeatureColor(ids[i], viewA->getDefaultMatchingFeatureColor());
596  viewB->setFeatureColor(ids[i], viewB->getDefaultMatchingFeatureColor());
597  }
598  else
599  {
600  viewA->setFeatureColor(ids[i], viewA->getDefaultMatchingLineColor());
601  viewB->setFeatureColor(ids[i], viewB->getDefaultMatchingLineColor());
602  }
603 
604  viewA->addLine(
605  kptA->rect().x()+kptA->rect().width()/2,
606  kptA->rect().y()+kptA->rect().height()/2,
607  kptB->rect().x()/scaleDiff+kptB->rect().width()/scaleDiff/2+deltaAX,
608  kptB->rect().y()/scaleDiff+kptB->rect().height()/scaleDiff/2+deltaAY,
609  cA);
610 
611  viewB->addLine(
612  kptA->rect().x()*scaleDiff+kptA->rect().width()*scaleDiff/2-deltaBX,
613  kptA->rect().y()*scaleDiff+kptA->rect().height()*scaleDiff/2-deltaBY,
614  kptB->rect().x()+kptB->rect().width()/2,
615  kptB->rect().y()+kptB->rect().height()/2,
616  cB);
617  }
618  }
619  viewA->update();
620  viewB->update();
621  }
622 
623  printf("Transform: %s\n", t.prettyPrint().c_str());
624  printf("Features: from=%d to=%d\n", (int)dataFrom.getWords().size(), (int)dataTo.getWords().size());
625  printf("Matches: %d\n", info.matches);
626  printf("Inliers: %d (%s=%d)\n", info.inliers, Parameters::kVisMinInliers().c_str(), reg.getMinInliers());
627  app.exec();
628  delete viewer;
629  }
630  else
631  {
632  printf("Failed loading images %s and %s\n!", argv[argc-2], argv[argc-1]);
633  }
634 
635 
636  return 0;
637 }
638 
void addLine(float x1, float y1, float x2, float y2, QColor color, const QString &text=QString())
Definition: ImageView.cpp:1084
static bool parse(const ParametersMap &parameters, const std::string &key, bool &value)
Definition: Parameters.cpp:476
void setAlpha(int alpha)
Definition: ImageView.cpp:1205
const QColor & getDefaultMatchingFeatureColor() const
Definition: ImageView.cpp:368
std::map< K, V > uMultimapToMapUnique(const std::multimap< K, V > &m)
Definition: UStl.h:505
Definition: UTimer.h:46
void setImageSize(const cv::Size &size)
virtual Feature2D::Type getType() const =0
std::string prettyPrint() const
Definition: Transform.cpp:295
std::string getName()
Definition: UFile.h:135
void setGridShown(bool shown)
std::map< int, cv::Point3f > RTABMAP_EXP generateWords3DMono(const std::map< int, cv::KeyPoint > &kpts, const std::map< int, cv::KeyPoint > &previousKpts, const CameraModel &cameraModel, Transform &cameraTransform, float ransacReprojThreshold=3.0f, float ransacConfidence=0.99f, const std::map< int, cv::Point3f > &refGuess3D=std::map< int, cv::Point3f >(), double *variance=0, std::vector< int > *matchesOut=0)
void addOrUpdateCoordinate(const std::string &id, const Transform &transform, double scale, bool foreground=false)
static const char * showUsage()
Definition: Parameters.cpp:548
Transform computeTransformationMod(Signature &from, Signature &to, Transform guess=Transform::getIdentity(), RegistrationInfo *info=0) const
void setFeatureColor(int id, QColor color)
Definition: ImageView.cpp:1169
const Feature2D * getDetector() const
static std::string getDir(const std::string &filePath)
Definition: UDirectory.cpp:273
const QMultiMap< int, rtabmap::KeypointItem * > & getFeatures() const
Definition: ImageView.h:104
std::pair< std::string, std::string > ParametersPair
Definition: Parameters.h:44
static Transform getIdentity()
Definition: Transform.cpp:380
pcl::PointCloud< pcl::PointXYZ >::Ptr RTABMAP_EXP removeNaNFromPointCloud(const pcl::PointCloud< pcl::PointXYZ >::Ptr &cloud)
static ParametersMap parseArguments(int argc, char *argv[], bool onlyParameters=false)
Definition: Parameters.cpp:575
std::map< std::string, std::string > ParametersMap
Definition: Parameters.h:43
int main(int argc, char *argv[])
std::vector< int > inliersIDs
bool load(const std::string &filePath)
void setImage(const QImage &image)
Definition: ImageView.cpp:1099
static void setLevel(ULogger::Level level)
Definition: ULogger.h:339
const std::vector< cv::KeyPoint > & getWordsKpts() const
Definition: Signature.h:112
std::list< std::string > uSplit(const std::string &str, char separator= ' ')
Definition: UStl.h:566
#define UASSERT(condition)
Wrappers of STL for convenient functions.
const CameraModel & left() const
const std::multimap< int, int > & getWords() const
Definition: Signature.h:111
bool isNull() const
Definition: Transform.cpp:107
int getEstimationType() const
QImage uCvMat2QImage(const cv::Mat &image, bool isBgr=true, uCvQtDepthColorMap colorMap=uCvQtDepthWhiteToBlack)
Definition: UCv2Qt.h:44
void setImageDepth(const cv::Mat &imageDepth)
Definition: ImageView.cpp:1126
static std::string nnStrategyName(NNStrategy strategy)
Definition: VWDictionary.h:58
pcl::PointCloud< pcl::PointXYZRGB >::Ptr RTABMAP_EXP cloudRGBFromSensorData(const SensorData &sensorData, int decimation=1, float maxDepth=0.0f, float minDepth=0.0f, std::vector< int > *validIndices=0, const ParametersMap &stereoParameters=ParametersMap(), const std::vector< float > &roiRatios=std::vector< float >())
Definition: util3d.cpp:1031
static void setType(Type type, const std::string &fileName=kDefaultLogFileName, bool append=true)
Definition: ULogger.cpp:176
void setCloudPointSize(const std::string &id, int size)
const cv::Mat & getWordsDescriptors() const
Definition: Signature.h:115
int id() const
Definition: Signature.h:70
static std::string typeName(Type type)
Definition: Features2d.h:125
bool addCloud(const std::string &id, const pcl::PCLPointCloud2Ptr &binaryCloud, const Transform &pose, bool rgb, bool hasNormals, bool hasIntensity, const QColor &color=QColor(), int viewport=1)
bool isValidForProjection() const
Definition: CameraModel.h:87
bool load(const std::string &directory, const std::string &cameraName, bool ignoreStereoTransform=true)
QApplication * app
const QColor & getDefaultMatchingLineColor() const
Definition: ImageView.cpp:372
const std::vector< cv::Point3f > & getWords3() const
Definition: Signature.h:131
SensorData & sensorData()
Definition: Signature.h:137
ULogger class and convenient macros.
#define UWARN(...)
double ticks()
Definition: UTimer.cpp:117
void showUsage()
float viewScale() const
Definition: ImageView.cpp:464
std::string UTILITE_EXP uFormat(const char *fmt,...)
void setFeatures(const std::multimap< int, cv::KeyPoint > &refWords, const cv::Mat &depth=cv::Mat(), const QColor &color=Qt::yellow)
Definition: ImageView.cpp:1002
void setImageDepthShown(bool shown)
Definition: ImageView.cpp:430


rtabmap
Author(s): Mathieu Labbe
autogenerated on Mon Dec 14 2020 03:34:59