OdometryMono.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
30 #include "rtabmap/core/Memory.h"
31 #include "rtabmap/core/Signature.h"
34 #include "rtabmap/core/util3d.h"
35 #include "rtabmap/core/util2d.h"
38 #include "rtabmap/core/Stereo.h"
40 #include "rtabmap/utilite/UTimer.h"
42 #include "rtabmap/utilite/UStl.h"
43 #include "rtabmap/utilite/UMath.h"
44 #include <opencv2/imgproc/imgproc.hpp>
45 #include <opencv2/calib3d/calib3d.hpp>
46 #include <opencv2/video/tracking.hpp>
47 #include <pcl/common/centroid.h>
48 
49 namespace rtabmap {
50 
52  Odometry(parameters),
53  flowWinSize_(Parameters::defaultVisCorFlowWinSize()),
54  flowIterations_(Parameters::defaultVisCorFlowIterations()),
55  flowEps_(Parameters::defaultVisCorFlowEps()),
56  flowMaxLevel_(Parameters::defaultVisCorFlowMaxLevel()),
57  minInliers_(Parameters::defaultVisMinInliers()),
58  iterations_(Parameters::defaultVisIterations()),
59  pnpReprojError_(Parameters::defaultVisPnPReprojError()),
60  pnpFlags_(Parameters::defaultVisPnPFlags()),
61  pnpRefineIterations_(Parameters::defaultVisPnPRefineIterations()),
62  localHistoryMaxSize_(Parameters::defaultOdomF2MMaxSize()),
63  initMinFlow_(Parameters::defaultOdomMonoInitMinFlow()),
64  initMinTranslation_(Parameters::defaultOdomMonoInitMinTranslation()),
65  minTranslation_(Parameters::defaultOdomMonoMinTranslation()),
66  fundMatrixReprojError_(Parameters::defaultVhEpRansacParam1()),
67  fundMatrixConfidence_(Parameters::defaultVhEpRansacParam2()),
68  maxVariance_(Parameters::defaultOdomMonoMaxVariance())
69 {
70  Parameters::parse(parameters, Parameters::kVisCorFlowWinSize(), flowWinSize_);
71  Parameters::parse(parameters, Parameters::kVisCorFlowIterations(), flowIterations_);
72  Parameters::parse(parameters, Parameters::kVisCorFlowEps(), flowEps_);
73  Parameters::parse(parameters, Parameters::kVisCorFlowMaxLevel(), flowMaxLevel_);
74  Parameters::parse(parameters, Parameters::kVisMinInliers(), minInliers_);
75  UASSERT(minInliers_ >= 1);
76  Parameters::parse(parameters, Parameters::kVisIterations(), iterations_);
77  Parameters::parse(parameters, Parameters::kVisPnPReprojError(), pnpReprojError_);
78  Parameters::parse(parameters, Parameters::kVisPnPFlags(), pnpFlags_);
79  Parameters::parse(parameters, Parameters::kVisPnPRefineIterations(), pnpRefineIterations_);
80  Parameters::parse(parameters, Parameters::kOdomF2MMaxSize(), localHistoryMaxSize_);
81 
82  Parameters::parse(parameters, Parameters::kOdomMonoInitMinFlow(), initMinFlow_);
83  Parameters::parse(parameters, Parameters::kOdomMonoInitMinTranslation(), initMinTranslation_);
84  Parameters::parse(parameters, Parameters::kOdomMonoMinTranslation(), minTranslation_);
85  Parameters::parse(parameters, Parameters::kOdomMonoMaxVariance(), maxVariance_);
86 
87  Parameters::parse(parameters, Parameters::kVhEpRansacParam1(), fundMatrixReprojError_);
88  Parameters::parse(parameters, Parameters::kVhEpRansacParam2(), fundMatrixConfidence_);
89 
90  // Setup memory
91  ParametersMap customParameters;
92  float minDepth = Parameters::defaultVisMinDepth();
93  float maxDepth = Parameters::defaultVisMaxDepth();
94  std::string roi = Parameters::defaultVisRoiRatios();
95  Parameters::parse(parameters, Parameters::kVisMinDepth(), minDepth);
96  Parameters::parse(parameters, Parameters::kVisMaxDepth(), maxDepth);
97  Parameters::parse(parameters, Parameters::kVisRoiRatios(), roi);
98  customParameters.insert(ParametersPair(Parameters::kKpMinDepth(), uNumber2Str(minDepth)));
99  customParameters.insert(ParametersPair(Parameters::kKpMaxDepth(), uNumber2Str(maxDepth)));
100  customParameters.insert(ParametersPair(Parameters::kKpRoiRatios(), roi));
101  customParameters.insert(ParametersPair(Parameters::kMemRehearsalSimilarity(), "1.0")); // desactivate rehearsal
102  customParameters.insert(ParametersPair(Parameters::kMemBinDataKept(), "false"));
103  customParameters.insert(ParametersPair(Parameters::kMemSTMSize(), "0"));
104  customParameters.insert(ParametersPair(Parameters::kMemNotLinkedNodesKept(), "false"));
105  customParameters.insert(ParametersPair(Parameters::kKpTfIdfLikelihoodUsed(), "false"));
106  int nn = Parameters::defaultVisCorNNType();
107  float nndr = Parameters::defaultVisCorNNDR();
108  int featureType = Parameters::defaultVisFeatureType();
109  int maxFeatures = Parameters::defaultVisMaxFeatures();
110  Parameters::parse(parameters, Parameters::kVisCorNNType(), nn);
111  Parameters::parse(parameters, Parameters::kVisCorNNDR(), nndr);
112  Parameters::parse(parameters, Parameters::kVisFeatureType(), featureType);
113  Parameters::parse(parameters, Parameters::kVisMaxFeatures(), maxFeatures);
114  customParameters.insert(ParametersPair(Parameters::kKpNNStrategy(), uNumber2Str(nn)));
115  customParameters.insert(ParametersPair(Parameters::kKpNndrRatio(), uNumber2Str(nndr)));
116  customParameters.insert(ParametersPair(Parameters::kKpDetectorStrategy(), uNumber2Str(featureType)));
117  customParameters.insert(ParametersPair(Parameters::kKpMaxFeatures(), uNumber2Str(maxFeatures)));
118 
119  int subPixWinSize = Parameters::defaultVisSubPixWinSize();
120  int subPixIterations = Parameters::defaultVisSubPixIterations();
121  double subPixEps = Parameters::defaultVisSubPixEps();
122  Parameters::parse(parameters, Parameters::kVisSubPixWinSize(), subPixWinSize);
123  Parameters::parse(parameters, Parameters::kVisSubPixIterations(), subPixIterations);
124  Parameters::parse(parameters, Parameters::kVisSubPixEps(), subPixEps);
125  customParameters.insert(ParametersPair(Parameters::kKpSubPixWinSize(), uNumber2Str(subPixWinSize)));
126  customParameters.insert(ParametersPair(Parameters::kKpSubPixIterations(), uNumber2Str(subPixIterations)));
127  customParameters.insert(ParametersPair(Parameters::kKpSubPixEps(), uNumber2Str(subPixEps)));
128 
129  // add only feature stuff
130  for(ParametersMap::const_iterator iter=parameters.begin(); iter!=parameters.end(); ++iter)
131  {
132  if(Parameters::isFeatureParameter(iter->first))
133  {
134  customParameters.insert(*iter);
135  }
136  }
137 
138  memory_ = new Memory(customParameters);
139  if(!memory_->init("", false, ParametersMap()))
140  {
141  UERROR("Error initializing the memory for Mono Odometry.");
142  }
143 
144  bool stereoOpticalFlow = Parameters::defaultStereoOpticalFlow();
145  Parameters::parse(parameters, Parameters::kStereoOpticalFlow(), stereoOpticalFlow);
146  if(stereoOpticalFlow)
147  {
148  stereo_ = new StereoOpticalFlow(parameters);
149  }
150  else
151  {
152  stereo_ = new Stereo(parameters);
153  }
154 }
155 
157 {
158  delete memory_;
159  delete stereo_;
160 }
161 
162 void OdometryMono::reset(const Transform & initialPose)
163 {
164  Odometry::reset(initialPose);
165  memory_->init("", false, ParametersMap());
166  localMap_.clear();
167  refDepthOrRight_ = cv::Mat();
168  cornersMap_.clear();
169  keyFrameWords3D_.clear();
170  keyFramePoses_.clear();
171 }
172 
174 {
175  Transform output;
176 
177  if(data.imageRaw().empty())
178  {
179  UERROR("Image empty! Cannot compute odometry...");
180  return output;
181  }
182 
183  if(!(((data.cameraModels().size() == 1 && data.cameraModels()[0].isValidForProjection()) || data.stereoCameraModel().isValidForProjection())))
184  {
185  UERROR("Odometry cannot be done without calibration or on multi-camera!");
186  return output;
187  }
188 
189 
190  const CameraModel & cameraModel = data.stereoCameraModel().isValidForProjection()?data.stereoCameraModel().left():data.cameraModels()[0];
191 
192  UTimer timer;
193 
194  int inliers = 0;
195  int correspondences = 0;
196  int nFeatures = 0;
197 
198  cv::Mat newFrame;
199  // convert to grayscale
200  if(data.imageRaw().channels() > 1)
201  {
202  cv::cvtColor(data.imageRaw(), newFrame, cv::COLOR_BGR2GRAY);
203  }
204  else
205  {
206  newFrame = data.imageRaw().clone();
207  }
208 
209  if(memory_->getStMem().size() >= 1)
210  {
211  if(localMap_.size())
212  {
213  //PnP
214  UDEBUG("PnP");
215 
216  if(this->isInfoDataFilled() && info)
217  {
218  info->type = 0;
219  }
220 
221  // generate kpts
222  if(memory_->update(SensorData(newFrame)))
223  {
224  UDEBUG("");
225  bool newPtsAdded = false;
226  const Signature * newS = memory_->getLastWorkingSignature();
227  UDEBUG("newWords=%d", (int)newS->getWords().size());
228  nFeatures = (int)newS->getWords().size();
229  if((int)newS->getWords().size() > minInliers_)
230  {
231  cv::Mat K = cameraModel.K();
232  Transform pnpGuess = ((this->getPose() * (guess.isNull()?Transform::getIdentity():guess)) * cameraModel.localTransform()).inverse();
233  cv::Mat R = (cv::Mat_<double>(3,3) <<
234  (double)pnpGuess.r11(), (double)pnpGuess.r12(), (double)pnpGuess.r13(),
235  (double)pnpGuess.r21(), (double)pnpGuess.r22(), (double)pnpGuess.r23(),
236  (double)pnpGuess.r31(), (double)pnpGuess.r32(), (double)pnpGuess.r33());
237  cv::Mat rvec(1,3, CV_64FC1);
238  cv::Rodrigues(R, rvec);
239  cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)pnpGuess.x(), (double)pnpGuess.y(), (double)pnpGuess.z());
240 
241  std::vector<cv::Point3f> objectPoints;
242  std::vector<cv::Point2f> imagePoints;
243  std::vector<int> matches;
244 
245  UDEBUG("compute PnP from optical flow");
246 
247  std::vector<int> ids = uKeys(localMap_);
248  objectPoints = uValues(localMap_);
249 
250  // compute last projection
251  UDEBUG("project points to previous image");
252  std::vector<cv::Point2f> prevImagePoints;
253  const Signature * prevS = memory_->getSignature(*(++memory_->getStMem().rbegin()));
254  Transform prevGuess = (keyFramePoses_.at(prevS->id()) * cameraModel.localTransform()).inverse();
255  cv::Mat prevR = (cv::Mat_<double>(3,3) <<
256  (double)prevGuess.r11(), (double)prevGuess.r12(), (double)prevGuess.r13(),
257  (double)prevGuess.r21(), (double)prevGuess.r22(), (double)prevGuess.r23(),
258  (double)prevGuess.r31(), (double)prevGuess.r32(), (double)prevGuess.r33());
259  cv::Mat prevRvec(1,3, CV_64FC1);
260  cv::Rodrigues(prevR, prevRvec);
261  cv::Mat prevTvec = (cv::Mat_<double>(1,3) << (double)prevGuess.x(), (double)prevGuess.y(), (double)prevGuess.z());
262  cv::projectPoints(objectPoints, prevRvec, prevTvec, K, cv::Mat(), prevImagePoints);
263 
264  // compute current projection
265  UDEBUG("project points to previous image");
266  cv::projectPoints(objectPoints, rvec, tvec, K, cv::Mat(), imagePoints);
267 
268  //filter points not in the image and set guess from unique correspondences
269  std::vector<cv::Point3f> objectPointsTmp(objectPoints.size());
270  std::vector<cv::Point2f> refCorners(objectPoints.size());
271  std::vector<cv::Point2f> newCorners(objectPoints.size());
272  matches.resize(objectPoints.size());
273  int oi=0;
274  for(unsigned int i=0; i<objectPoints.size(); ++i)
275  {
276  if(uIsInBounds(int(imagePoints[i].x), 0, newFrame.cols) &&
277  uIsInBounds(int(imagePoints[i].y), 0, newFrame.rows) &&
278  uIsInBounds(int(prevImagePoints[i].x), 0, prevS->sensorData().imageRaw().cols) &&
279  uIsInBounds(int(prevImagePoints[i].y), 0, prevS->sensorData().imageRaw().rows))
280  {
281  refCorners[oi] = prevImagePoints[i];
282  newCorners[oi] = imagePoints[i];
283  if(localMap_.count(ids[i]) == 1)
284  {
285  if(prevS->getWords().count(ids[i]) == 1)
286  {
287  // set guess if unique
288  refCorners[oi] = prevS->getWords().find(ids[i])->second.pt;
289  }
290  if(newS->getWords().count(ids[i]) == 1)
291  {
292  // set guess if unique
293  newCorners[oi] = newS->getWords().find(ids[i])->second.pt;
294  }
295  }
296  objectPointsTmp[oi] = objectPoints[i];
297  matches[oi] = ids[i];
298  ++oi;
299  }
300  }
301  objectPointsTmp.resize(oi);
302  refCorners.resize(oi);
303  newCorners.resize(oi);
304  matches.resize(oi);
305 
306  // Refine imagePoints using optical flow
307  std::vector<unsigned char> statusFlowInliers;
308  std::vector<float> err;
309  UDEBUG("cv::calcOpticalFlowPyrLK() begin");
310  cv::calcOpticalFlowPyrLK(
311  prevS->sensorData().imageRaw(),
312  newFrame,
313  refCorners,
314  newCorners,
315  statusFlowInliers,
316  err,
318  cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, flowIterations_, flowEps_),
319  cv::OPTFLOW_LK_GET_MIN_EIGENVALS | cv::OPTFLOW_USE_INITIAL_FLOW, 1e-4);
320  UDEBUG("cv::calcOpticalFlowPyrLK() end");
321 
322  objectPoints.resize(statusFlowInliers.size());
323  imagePoints.resize(statusFlowInliers.size());
324  std::vector<int> matchesTmp(statusFlowInliers.size());
325  oi = 0;
326  for(unsigned int i=0; i<statusFlowInliers.size(); ++i)
327  {
328  if(statusFlowInliers[i])
329  {
330  objectPoints[oi] = objectPointsTmp[i];
331  imagePoints[oi] = newCorners[i];
332  matchesTmp[oi] = matches[i];
333  ++oi;
334 
335  if(this->isInfoDataFilled() && info)
336  {
337  cv::KeyPoint kpt;
338  if(newS->getWords().count(matches[i]) == 1)
339  {
340  kpt = newS->getWords().find(matches[i])->second;
341  }
342  kpt.pt = newCorners[i];
343  info->words.insert(std::make_pair(matches[i], kpt));
344  }
345  }
346  }
347  UDEBUG("Flow inliers= %d/%d", oi, (int)statusFlowInliers.size());
348  objectPoints.resize(oi);
349  imagePoints.resize(oi);
350  matchesTmp.resize(oi);
351  matches = matchesTmp;
352 
353  if(this->isInfoDataFilled() && info)
354  {
355  info->reg.matchesIDs.insert(info->reg.matchesIDs.end(), matches.begin(), matches.end());
356  }
357  correspondences = (int)matches.size();
358 
359  if((int)matches.size() < minInliers_)
360  {
361  UWARN("not enough matches (%d < %d)...", (int)matches.size(), minInliers_);
362  }
363  else
364  {
365  //PnPRansac
366  std::vector<int> inliersV;
368  objectPoints,
369  imagePoints,
370  K,
371  cv::Mat(),
372  rvec,
373  tvec,
374  true,
375  iterations_,
377  0, // min inliers
378  inliersV,
379  pnpFlags_,
381 
382  UDEBUG("inliers=%d/%d", (int)inliersV.size(), (int)objectPoints.size());
383 
384  inliers = (int)inliersV.size();
385  if((int)inliersV.size() < minInliers_)
386  {
387  UWARN("PnP not enough inliers (%d < %d), rejecting the transform...", (int)inliersV.size(), minInliers_);
388  }
389  else
390  {
391  cv::Mat R(3,3,CV_64FC1);
392  cv::Rodrigues(rvec, R);
393  Transform pnp = Transform(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
394  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
395  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
396  output = this->getPose().inverse() * pnp.inverse() * cameraModel.localTransform().inverse();
397 
398  if(this->isInfoDataFilled() && info && inliersV.size())
399  {
400  info->reg.inliersIDs.resize(inliersV.size());
401  for(unsigned int i=0; i<inliersV.size(); ++i)
402  {
403  info->reg.inliersIDs[i] = matches[inliersV[i]]; // index and ID should match (index starts at 0, ID starts at 1)
404  }
405  }
406 
407  //Find the frame with the most similar features
408  std::set<int> stMem = memory_->getStMem();
409  stMem.erase(newS->id());
410  std::map<int, float> likelihood = memory_->computeLikelihood(newS, std::list<int>(stMem.begin(), stMem.end()));
411  int maxLikelihoodId = -1;
412  float maxLikelihood = 0;
413  for(std::map<int, float>::iterator iter=likelihood.begin(); iter!=likelihood.end(); ++iter)
414  {
415  if(iter->second > maxLikelihood)
416  {
417  maxLikelihood = iter->second;
418  maxLikelihoodId = iter->first;
419  }
420  }
421  UASSERT(maxLikelihoodId != -1);
422 
423  // Add new points to local map
424  const Signature* previousS = memory_->getSignature(maxLikelihoodId);
425  UASSERT(previousS!=0);
426  Transform cameraTransform = keyFramePoses_.at(previousS->id()).inverse()*this->getPose()*output;
427  UDEBUG("cameraTransform guess= %s (norm^2=%f)", cameraTransform.prettyPrint().c_str(), cameraTransform.getNormSquared());
428  if(cameraTransform.getNorm() < minTranslation_)
429  {
430  UINFO("Translation with the nearest frame is too small (%f<%f) to add new points to local map",
431  cameraTransform.getNorm(), minTranslation_);
432  }
433  else
434  {
435 
436  double variance = 0;
437  const std::map<int, cv::Point3f> & previousGuess = keyFrameWords3D_.find(previousS->id())->second;
438  std::map<int, cv::Point3f> inliers3D = util3d::generateWords3DMono(
439  uMultimapToMapUnique(previousS->getWords()),
441  cameraModel,
442  cameraTransform,
443  iterations_,
445  pnpFlags_,
449  previousGuess,
450  &variance);
451 
452  if((int)inliers3D.size() < minInliers_)
453  {
454  UWARN("Epipolar geometry not enough inliers (%d < %d), rejecting the transform (%s)...",
455  (int)inliers3D.size(), minInliers_, cameraTransform.prettyPrint().c_str());
456  }
457  else if(variance == 0 || variance > maxVariance_)
458  {
459  UWARN("Variance too high %f (max = %f)", variance, maxVariance_);
460  }
461  else
462  {
463  UDEBUG("inliers3D=%d/%d variance= %f", inliers3D.size(), newS->getWords().size(), variance);
464  Transform newPose = keyFramePoses_.at(previousS->id())*cameraTransform;
465  UDEBUG("cameraTransform= %s", cameraTransform.prettyPrint().c_str());
466 
467  std::multimap<int, cv::Point3f> wordsToAdd;
468  for(std::map<int, cv::Point3f>::iterator iter=inliers3D.begin();
469  iter != inliers3D.end();
470  ++iter)
471  {
472  // transform inliers3D in new signature referential
473  iter->second = util3d::transformPoint(iter->second, cameraTransform.inverse());
474 
475  if(!uContains(localMap_, iter->first))
476  {
477  //UDEBUG("Add new point %d to local map", iter->first);
478  cv::Point3f newPt = util3d::transformPoint(iter->second, newPose);
479  wordsToAdd.insert(std::make_pair(iter->first, newPt));
480  }
481  }
482 
483  if((int)wordsToAdd.size())
484  {
485  localMap_.insert(wordsToAdd.begin(), wordsToAdd.end());
486  newPtsAdded = true;
487  UDEBUG("Added %d words", (int)wordsToAdd.size());
488  }
489 
490  if(newPtsAdded)
491  {
492  keyFrameWords3D_.insert(std::make_pair(newS->id(), inliers3D));
493  keyFramePoses_.insert(std::make_pair(newS->id(), newPose));
494 
495  // keep only the two last signatures
496  while(localHistoryMaxSize_ && (int)localMap_.size() > localHistoryMaxSize_ && memory_->getStMem().size()>2)
497  {
498  int nodeId = *memory_->getStMem().begin();
499  std::list<int> removedPts;
500  memory_->deleteLocation(nodeId, &removedPts);
501  keyFrameWords3D_.erase(nodeId);
502  keyFramePoses_.erase(nodeId);
503  for(std::list<int>::iterator iter = removedPts.begin(); iter!=removedPts.end(); ++iter)
504  {
505  localMap_.erase(*iter);
506  }
507  }
508  }
509  }
510  }
511  }
512  }
513  }
514 
515  if(!newPtsAdded)
516  {
517  // remove new words from dictionary
518  memory_->deleteLocation(newS->id());
519  }
520  }
521  }
522  else if(cornersMap_.size())
523  {
524  //flow
525 
526  if(this->isInfoDataFilled() && info)
527  {
528  info->type = 1;
529  }
530 
531  const Signature * refS = memory_->getLastWorkingSignature();
532 
533  std::vector<cv::Point2f> refCorners(cornersMap_.size());
534  std::vector<cv::Point2f> refCornersGuess(cornersMap_.size());
535  std::vector<int> cornerIds(cornersMap_.size());
536  int ii=0;
537  for(std::map<int, cv::Point2f>::iterator iter=cornersMap_.begin(); iter!=cornersMap_.end(); ++iter)
538  {
539  std::multimap<int, cv::KeyPoint>::const_iterator jter=refS->getWords().find(iter->first);
540  UASSERT(jter != refS->getWords().end());
541  refCorners[ii] = jter->second.pt;
542  refCornersGuess[ii] = iter->second;
543  cornerIds[ii] = iter->first;
544  ++ii;
545  }
546 
547  UDEBUG("flow");
548  // Find features in the new left image
549  std::vector<unsigned char> statusFlowInliers;
550  std::vector<float> err;
551  UDEBUG("cv::calcOpticalFlowPyrLK() begin");
552  cv::calcOpticalFlowPyrLK(
553  refS->sensorData().imageRaw(),
554  newFrame,
555  refCorners,
556  refCornersGuess,
557  statusFlowInliers,
558  err,
560  cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, flowIterations_, flowEps_),
561  cv::OPTFLOW_LK_GET_MIN_EIGENVALS | cv::OPTFLOW_USE_INITIAL_FLOW, 1e-4);
562  UDEBUG("cv::calcOpticalFlowPyrLK() end");
563 
564  UDEBUG("Filtering optical flow outliers...");
565  float flow = 0;
566 
567  if(this->isInfoDataFilled() && info)
568  {
569  info->refCorners = refCorners;
570  info->newCorners = refCornersGuess;
571  }
572 
573  int oi = 0;
574  std::vector<cv::Point2f> tmpRefCorners(statusFlowInliers.size());
575  std::vector<cv::Point2f> newCorners(statusFlowInliers.size());
576  std::vector<int> inliersV(statusFlowInliers.size());
577  std::vector<int> tmpCornersId(statusFlowInliers.size());
578  UASSERT(refCornersGuess.size() == statusFlowInliers.size());
579  UASSERT(refCorners.size() == statusFlowInliers.size());
580  UASSERT(cornerIds.size() == statusFlowInliers.size());
581  for(unsigned int i=0; i<statusFlowInliers.size(); ++i)
582  {
583  if(statusFlowInliers[i])
584  {
585  float dx = refCorners[i].x - refCornersGuess[i].x;
586  float dy = refCorners[i].y - refCornersGuess[i].y;
587  float tmp = std::sqrt(dx*dx + dy*dy);
588  flow+=tmp;
589 
590  tmpRefCorners[oi] = refCorners[i];
591  newCorners[oi] = refCornersGuess[i];
592 
593  inliersV[oi] = i;
594  cornersMap_.at(cornerIds[i]) = refCornersGuess[i];
595  tmpCornersId[oi] = cornerIds[i];
596 
597  ++oi;
598  }
599  else
600  {
601  cornersMap_.erase(cornerIds[i]);
602  }
603  }
604  if(oi)
605  {
606  flow /=float(oi);
607  }
608  tmpRefCorners.resize(oi);
609  newCorners.resize(oi);
610  inliersV.resize((oi));
611  tmpCornersId.resize(oi);
612  refCorners= tmpRefCorners;
613  cornerIds = tmpCornersId;
614 
615  if(this->isInfoDataFilled() && info)
616  {
617  // fill flow matches info
618  info->cornerInliers = inliersV;
619  inliers = (int)inliersV.size();
620  }
621 
622  UDEBUG("Filtering optical flow outliers...done! (inliers=%d/%d)", oi, (int)statusFlowInliers.size());
623 
624  if(flow > initMinFlow_ && oi > minInliers_)
625  {
626  UDEBUG("flow=%f", flow);
627  // compute fundamental matrix
628  UDEBUG("Find fundamental matrix");
629  std::vector<unsigned char> statusFInliers;
630  cv::Mat F = cv::findFundamentalMat(
631  refCorners,
632  newCorners,
633  statusFInliers,
634  cv::RANSAC,
637  //std::cout << "F=" << F << std::endl;
638 
639  if(!F.empty())
640  {
641  UDEBUG("Filtering fundamental matrix outliers...");
642  std::vector<cv::Point2f> tmpNewCorners(statusFInliers.size());
643  std::vector<cv::Point2f> tmpRefCorners(statusFInliers.size());
644  tmpCornersId.resize(statusFInliers.size());
645  oi = 0;
646  UASSERT(newCorners.size() == statusFInliers.size());
647  UASSERT(refCorners.size() == statusFInliers.size());
648  UASSERT(cornerIds.size() == statusFInliers.size());
649  std::vector<int> tmpInliers(statusFInliers.size());
650  for(unsigned int i=0; i<statusFInliers.size(); ++i)
651  {
652  if(statusFInliers[i])
653  {
654  tmpNewCorners[oi] = newCorners[i];
655  tmpRefCorners[oi] = refCorners[i];
656  tmpInliers[oi] = inliersV[i];
657  tmpCornersId[oi] = cornerIds[i];
658  ++oi;
659  }
660  }
661  tmpInliers.resize(oi);
662  tmpNewCorners.resize(oi);
663  tmpRefCorners.resize(oi);
664  tmpCornersId.resize(oi);
665  newCorners = tmpNewCorners;
666  refCorners = tmpRefCorners;
667  inliersV = tmpInliers;
668  cornerIds = tmpCornersId;
669  if(this->isInfoDataFilled() && info)
670  {
671  // update inliers
672  info->cornerInliers = inliersV;
673  inliers = (int)inliersV.size();
674  }
675  UDEBUG("Filtering fundamental matrix outliers...done! (inliers=%d/%d)", oi, (int)statusFInliers.size());
676 
677  if((int)refCorners.size() > minInliers_)
678  {
679  std::vector<cv::Point2f> refCornersRefined;
680  std::vector<cv::Point2f> newCornersRefined;
681  //UDEBUG("Correcting matches...");
682  cv::correctMatches(F, refCorners, newCorners, refCornersRefined, newCornersRefined);
683  UASSERT(refCorners.size() == refCornersRefined.size());
684  UASSERT(newCorners.size() == newCornersRefined.size());
685  refCorners = refCornersRefined;
686  newCorners = newCornersRefined;
687  //UDEBUG("Correcting matches...done!");
688 
689  UDEBUG("Computing P...");
690  cv::Mat K = cameraModel.K();
691 
692  cv::Mat Kinv = K.inv();
693  cv::Mat E = K.t()*F*K;
694 
695  //normalize coordinates
696  cv::Mat x(3, (int)refCorners.size(), CV_64FC1);
697  cv::Mat xp(3, (int)refCorners.size(), CV_64FC1);
698  for(unsigned int i=0; i<refCorners.size(); ++i)
699  {
700  x.at<double>(0, i) = refCorners[i].x;
701  x.at<double>(1, i) = refCorners[i].y;
702  x.at<double>(2, i) = 1;
703 
704  xp.at<double>(0, i) = newCorners[i].x;
705  xp.at<double>(1, i) = newCorners[i].y;
706  xp.at<double>(2, i) = 1;
707  }
708 
709  cv::Mat x_norm = Kinv * x;
710  cv::Mat xp_norm = Kinv * xp;
711  x_norm = x_norm.rowRange(0,2);
712  xp_norm = xp_norm.rowRange(0,2);
713 
714  cv::Mat P = EpipolarGeometry::findPFromE(E, x_norm, xp_norm);
715  if(!P.empty())
716  {
717  cv::Mat P0 = cv::Mat::zeros(3, 4, CV_64FC1);
718  P0.at<double>(0,0) = 1;
719  P0.at<double>(1,1) = 1;
720  P0.at<double>(2,2) = 1;
721 
722  UDEBUG("Computing P...done!");
723  //std::cout << "P=" << P << std::endl;
724 
725  cv::Mat R, T;
727 
728  UDEBUG("");
729  std::vector<double> reprojErrors;
730  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud;
731  EpipolarGeometry::triangulatePoints(x_norm, xp_norm, P0, P, cloud, reprojErrors);
732 
733  std::vector<cv::Point3f> inliersRef;
734  std::vector<cv::Point3f> inliersRefGuess;
735  std::vector<cv::Point2f> imagePoints(cloud->size());
736  inliersRef.resize(cloud->size());
737  inliersRefGuess.resize(cloud->size());
738  tmpCornersId.resize(cloud->size());
739 
740  oi = 0;
741  UASSERT(newCorners.size() == cloud->size());
742 
743  std::vector<cv::Point3f> newCorners3D;
744 
745  if(!refDepthOrRight_.empty())
746  {
747  if(refDepthOrRight_.type() == CV_8UC1)
748  {
751  std::vector<unsigned char> stereoStatus;
752  std::vector<cv::Point2f> rightCorners;
753  rightCorners = stereo_->computeCorrespondences(
754  refS->sensorData().imageRaw(),
756  refCorners,
757  stereoStatus);
758 
759  newCorners3D = util3d::generateKeypoints3DStereo(
760  refCorners,
761  rightCorners,
762  m,
763  stereoStatus);
764  }
765  else if(refDepthOrRight_.type() == CV_32FC1 || refDepthOrRight_.type() == CV_16UC1)
766  {
767  std::vector<cv::KeyPoint> tmpKpts;
768  cv::KeyPoint::convert(refCorners, tmpKpts);
769  CameraModel m(cameraModel.fx(), cameraModel.fy(), cameraModel.cx(), cameraModel.cy());
770  newCorners3D = util3d::generateKeypoints3DDepth(
771  tmpKpts,
773  m);
774  }
775  else
776  {
777  UWARN("Depth or right image type not supported: %d", refDepthOrRight_.type());
778  }
779  }
780 
781  for(unsigned int i=0; i<cloud->size(); ++i)
782  {
783  if(cloud->at(i).z>0)
784  {
785  imagePoints[oi] = newCorners[i];
786  tmpCornersId[oi] = cornerIds[i];
787  inliersRef[oi].x = cloud->at(i).x;
788  inliersRef[oi].y = cloud->at(i).y;
789  inliersRef[oi].z = cloud->at(i).z;
790  if(!newCorners3D.empty())
791  {
792  inliersRefGuess[oi] = newCorners3D.at(i);
793  }
794  ++oi;
795  }
796  }
797  imagePoints.resize(oi);
798  inliersRef.resize(oi);
799  inliersRefGuess.resize(oi);
800  tmpCornersId.resize(oi);
801  cornerIds = tmpCornersId;
802 
803  bool reject = false;
804 
805  //estimate scale
806  float scale = 1;
807  std::multimap<float, float> scales; // <variance, scale>
808  if(!newCorners3D.empty()) // scale known
809  {
810  UASSERT(inliersRefGuess.size() == inliersRef.size());
811  for(unsigned int i=0; i<inliersRef.size(); ++i)
812  {
813  if(util3d::isFinite(inliersRefGuess.at(i)))
814  {
815  float s = inliersRefGuess.at(i).z/inliersRef.at(i).z;
816  std::vector<float> errorSqrdDists(inliersRef.size());
817  oi = 0;
818  for(unsigned int j=0; j<inliersRef.size(); ++j)
819  {
820  if(cloud->at(j).z>0)
821  {
822  cv::Point3f refPt = inliersRef.at(j);
823  refPt.x *= s;
824  refPt.y *= s;
825  refPt.z *= s;
826  const cv::Point3f & guess = inliersRefGuess.at(j);
827  errorSqrdDists[oi++] = uNormSquared(refPt.x-guess.x, refPt.y-guess.y, refPt.z-guess.z);
828  }
829  }
830  errorSqrdDists.resize(oi);
831  if(errorSqrdDists.size() > 2)
832  {
833  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
834  double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 1];
835  float variance = 2.1981 * median_error_sqr;
836  //UDEBUG("scale %d = %f variance = %f", i, s, variance);
837  if(variance > 0)
838  {
839  scales.insert(std::make_pair(variance, s));
840  }
841  }
842  }
843  }
844  if(scales.size() == 0)
845  {
846  UWARN("No scales found!?");
847  reject = true;
848  }
849  else
850  {
851  scale = scales.begin()->second;
852  UWARN("scale used = %f (variance=%f scales=%d)", scale, scales.begin()->first, (int)scales.size());
853 
854  UDEBUG("Max noise variance = %f current variance=%f", maxVariance_, scales.begin()->first);
855  if(scales.begin()->first > maxVariance_)
856  {
857  UWARN("Too high variance %f (should be < %f)", scales.begin()->first, maxVariance_);
858  reject = true; // 20 cm for good initialization
859  }
860  }
861 
862  }
863  else if(inliersRef.size())
864  {
865  // find centroid of the cloud and set it to 1 meter
866  Eigen::Vector4f centroid(0,0,0,0);
867  pcl::PointCloud<pcl::PointXYZ> inliersRefCloud;
868  inliersRefCloud.resize(inliersRef.size());
869  for(unsigned int i=0; i<inliersRef.size(); ++i)
870  {
871  inliersRefCloud[i].x = inliersRef[i].x;
872  inliersRefCloud[i].y = inliersRef[i].y;
873  inliersRefCloud[i].z = inliersRef[i].z;
874  }
875  pcl::compute3DCentroid(inliersRefCloud, centroid);
876  scale = 1.0f / centroid[2];
877  }
878  else
879  {
880  reject = true;
881  }
882 
883  if(!reject)
884  {
885  //PnPRansac
886  std::vector<cv::Point3f> objectPoints(inliersRef.size());
887  for(unsigned int i=0; i<inliersRef.size(); ++i)
888  {
889  objectPoints[i].x = inliersRef.at(i).x * scale;
890  objectPoints[i].y = inliersRef.at(i).y * scale;
891  objectPoints[i].z = inliersRef.at(i).z * scale;
892  }
893  cv::Mat rvec;
894  cv::Mat tvec;
895  std::vector<int> inliersPnP;
897  objectPoints, // 3D points in ref referential
898  imagePoints, // 2D points in new referential
899  K,
900  cv::Mat(),
901  rvec,
902  tvec,
903  false,
904  iterations_,
906  0, // min inliers
907  inliersPnP,
908  pnpFlags_,
910 
911  UDEBUG("PnP inliers = %d / %d", (int)inliersPnP.size(), (int)objectPoints.size());
912 
913  cv::Rodrigues(rvec, R);
914  Transform pnp(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
915  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
916  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
917 
918  output = cameraModel.localTransform() * pnp.inverse() * cameraModel.localTransform().inverse();
919  if(output.getNorm() < minTranslation_*5)
920  {
921  reject = true;
922  UWARN("Camera must be moved at least %f m for initialization (current=%f)",
923  minTranslation_*5, output.getNorm());
924  }
925 
926  if(!reject)
927  {
929  std::vector<int> wordsId = uKeys(memory_->getLastWorkingSignature()->getWords());
930  UASSERT(wordsId.size());
931  UASSERT(cornerIds.size() == objectPoints.size());
932  std::map<int, cv::Point3f> keyFrameWords3D;
933  Transform t = this->getPose()*cameraModel.localTransform();
934  for(unsigned int i=0; i<inliersPnP.size(); ++i)
935  {
936  int index =inliersPnP.at(i);
937  int id = cornerIds[index];
938  UASSERT(id > 0 && id <= *wordsId.rbegin());
939  cv::Point3f pt = util3d::transformPoint(
940  objectPoints.at(index),
941  t);
942  localMap_.insert(std::make_pair(id, cv::Point3f(pt.x, pt.y, pt.z)));
943  keyFrameWords3D.insert(std::make_pair(id, pt));
944  }
945 
946  keyFrameWords3D_.insert(std::make_pair(memory_->getLastWorkingSignature()->id(), keyFrameWords3D));
947  keyFramePoses_.insert(std::make_pair(memory_->getLastWorkingSignature()->id(), this->getPose()));
948  }
949  }
950  }
951  else
952  {
953  UERROR("No valid camera matrix found!");
954  }
955  }
956  else
957  {
958  UWARN("Not enough inliers %d/%d", (int)refCorners.size(), minInliers_);
959  }
960  }
961  else
962  {
963  UWARN("Fundamental matrix not found!");
964  }
965  }
966  else
967  {
968  UWARN("Flow not enough high! flow=%f ki=%d", flow, oi);
969  }
970  }
971  }
972  else
973  {
974  //return Identity
975  output = Transform::getIdentity();
976  if(info)
977  {
978  // a very high variance tells that the new pose is not linked with the previous one
979  info->reg.covariance = cv::Mat::eye(6,6,CV_64FC1)*9999.0;
980  }
981 
982  // generate kpts
983  if(memory_->update(SensorData(newFrame)))
984  {
985  const std::multimap<int, cv::KeyPoint> & words = memory_->getLastWorkingSignature()->getWords();
986  if((int)words.size() > minInliers_)
987  {
988  for(std::multimap<int, cv::KeyPoint>::const_iterator iter=words.begin(); iter!=words.end(); ++iter)
989  {
990  if(words.count(iter->first) == 1)
991  {
992  cornersMap_.insert(std::make_pair(iter->first, iter->second.pt));
993  }
994  }
995  refDepthOrRight_ = data.depthOrRightRaw().clone();
996  keyFramePoses_.insert(std::make_pair(memory_->getLastSignatureId(), Transform::getIdentity()));
997  }
998  else
999  {
1000  UWARN("Too low 2D corners (%d), ignoring new frame...",
1001  (int)words.size());
1003  }
1004  }
1005  else
1006  {
1007  UERROR("Failed creating signature");
1008  }
1009  }
1010 
1011  memory_->emptyTrash();
1012 
1013  if(this->isInfoDataFilled() && info)
1014  {
1015  //info->variance = variance;
1016  info->reg.inliers = inliers;
1017  info->reg.matches = correspondences;
1018  info->features = nFeatures;
1019  info->localMapSize = (int)localMap_.size();
1020  info->localMap = localMap_;
1021  }
1022 
1023  UINFO("Odom update=%fs tf=[%s] inliers=%d/%d, local_map[%d]=%d, accepted=%s",
1024  timer.elapsed(),
1025  output.prettyPrint().c_str(),
1026  inliers,
1027  correspondences,
1028  (int)memory_->getStMem().size(),
1029  (int)localMap_.size(),
1030  !output.isNull()?"true":"false");
1031 
1032  return output;
1033 }
1034 
1035 } // namespace rtabmap
static bool isFeatureParameter(const std::string &param)
Definition: Parameters.cpp:150
bool RTABMAP_EXP isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3035
static bool parse(const ParametersMap &parameters, const std::string &key, bool &value)
Definition: Parameters.cpp:446
std::map< K, V > uMultimapToMapUnique(const std::multimap< K, V > &m)
Definition: UStl.h:505
std::vector< cv::Point2f > refCorners
Definition: OdometryInfo.h:120
float r13() const
Definition: Transform.h:63
std::map< int, cv::Point3f > RTABMAP_EXP generateWords3DMono(const std::map< int, cv::KeyPoint > &kpts, const std::map< int, cv::KeyPoint > &previousKpts, const CameraModel &cameraModel, Transform &cameraTransform, int pnpIterations=100, float pnpReprojError=8.0f, int pnpFlags=0, int pnpRefineIterations=1, float ransacParam1=3.0f, float ransacParam2=0.99f, const std::map< int, cv::Point3f > &refGuess3D=std::map< int, cv::Point3f >(), double *variance=0)
Definition: UTimer.h:46
float r23() const
Definition: Transform.h:66
std::string prettyPrint() const
Definition: Transform.cpp:274
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
std::vector< K > uKeys(const std::multimap< K, V > &mm)
Definition: UStl.h:67
double elapsed()
Definition: UTimer.h:75
const std::multimap< int, cv::KeyPoint > & getWords() const
Definition: Signature.h:108
bool isInfoDataFilled() const
Definition: Odometry.h:72
bool uIsInBounds(const T &value, const T &low, const T &high)
Definition: UMath.h:934
virtual void reset(const Transform &initialPose=Transform::getIdentity())
Definition: Odometry.cpp:182
const Signature * getSignature(int id) const
Definition: Memory.cpp:1008
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
virtual Transform computeTransform(SensorData &data, const Transform &guess=Transform(), OdometryInfo *info=0)
std::map< int, cv::Point3f > localMap
Definition: OdometryInfo.h:116
std::pair< std::string, std::string > ParametersPair
Definition: Parameters.h:42
static Transform getIdentity()
Definition: Transform.cpp:364
GLM_FUNC_DECL genType e()
float getNorm() const
Definition: Transform.cpp:231
GLM_FUNC_DECL detail::tmat4x4< T, P > scale(detail::tmat4x4< T, P > const &m, detail::tvec3< T, P > const &v)
std::map< std::string, std::string > ParametersMap
Definition: Parameters.h:41
OdometryMono(const rtabmap::ParametersMap &parameters=rtabmap::ParametersMap())
const Transform & getPose() const
Definition: Odometry.h:71
Basic mathematics functions.
std::vector< int > inliersIDs
Some conversion functions.
void deleteLocation(int locationId, std::list< int > *deletedWords=0)
Definition: Memory.cpp:2266
const cv::Mat & imageRaw() const
Definition: SensorData.h:161
const std::set< int > & getStMem() const
Definition: Memory.h:147
bool init(const std::string &dbUrl, bool dbOverwritten=false, const ParametersMap &parameters=ParametersMap(), bool postInitClosingEvents=false)
Definition: Memory.cpp:142
#define UASSERT(condition)
Wrappers of STL for convenient functions.
static double triangulatePoints(const cv::Mat &pt_set1, const cv::Mat &pt_set2, const cv::Mat &P, const cv::Mat &P1, pcl::PointCloud< pcl::PointXYZ >::Ptr &pointcloud, std::vector< double > &reproj_errors)
virtual std::vector< cv::Point2f > computeCorrespondences(const cv::Mat &leftImage, const cv::Mat &rightImage, const std::vector< cv::Point2f > &leftCorners, std::vector< unsigned char > &status) const
Definition: Stereo.cpp:72
const CameraModel & left() const
double cx() const
Definition: CameraModel.h:97
bool isNull() const
Definition: Transform.cpp:107
std::vector< int > cornerInliers
Definition: OdometryInfo.h:122
double fy() const
Definition: CameraModel.h:96
bool update(const SensorData &data, Statistics *stats=0)
Definition: Memory.cpp:663
float r12() const
Definition: Transform.h:62
float r31() const
Definition: Transform.h:67
int id() const
Definition: Signature.h:70
void emptyTrash()
Definition: Memory.cpp:1866
const cv::Mat & depthOrRightRaw() const
Definition: SensorData.h:162
float r21() const
Definition: Transform.h:64
bool uContains(const std::list< V > &list, const V &value)
Definition: UStl.h:409
double cy() const
Definition: CameraModel.h:98
void RTABMAP_EXP solvePnPRansac(const std::vector< cv::Point3f > &objectPoints, const std::vector< cv::Point2f > &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, std::vector< int > &inliers, int flags, int refineIterations=1, float refineSigma=3.0f)
std::map< int, Transform > keyFramePoses_
Definition: OdometryMono.h:73
int getLastSignatureId() const
Definition: Memory.cpp:2160
float r33() const
Definition: Transform.h:69
const std::vector< CameraModel > & cameraModels() const
Definition: SensorData.h:193
std::vector< V > uValues(const std::multimap< K, V > &mm)
Definition: UStl.h:100
#define UDEBUG(...)
double fx() const
Definition: CameraModel.h:95
std::multimap< int, cv::KeyPoint > words
Definition: OdometryInfo.h:115
SensorData & sensorData()
Definition: Signature.h:134
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:560
const Signature * getLastWorkingSignature() const
Definition: Memory.cpp:2165
float r22() const
Definition: Transform.h:65
#define UERROR(...)
float getNormSquared() const
Definition: Transform.cpp:236
ULogger class and convenient macros.
const StereoCameraModel & stereoCameraModel() const
Definition: SensorData.h:194
#define UWARN(...)
std::vector< int > matchesIDs
std::map< int, float > computeLikelihood(const Signature *signature, const std::list< int > &ids)
Definition: Memory.cpp:1545
std::vector< cv::Point2f > newCorners
Definition: OdometryInfo.h:121
virtual void reset(const Transform &initialPose)
std::map< int, cv::Point2f > cornersMap_
Definition: OdometryMono.h:70
static void findRTFromP(const cv::Mat &p, cv::Mat &r, cv::Mat &t)
RegistrationInfo reg
Definition: OdometryInfo.h:91
Transform inverse() const
Definition: Transform.cpp:169
void setLocalTransform(const Transform &transform)
cv::Mat K() const
Definition: CameraModel.h:103
std::map< int, std::map< int, cv::Point3f > > keyFrameWords3D_
Definition: OdometryMono.h:72
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DDepth(const std::vector< cv::KeyPoint > &keypoints, const cv::Mat &depth, const CameraModel &cameraModel, float minDepth=0, float maxDepth=0)
float r11() const
Definition: Transform.h:61
static cv::Mat findPFromE(const cv::Mat &E, const cv::Mat &x, const cv::Mat &xp)
std::string UTILITE_EXP uNumber2Str(unsigned int number)
Definition: UConversion.cpp:90
std::map< int, cv::Point3f > localMap_
Definition: OdometryMono.h:71
float r32() const
Definition: Transform.h:68
const Transform & localTransform() const
Definition: CameraModel.h:109
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DStereo(const std::vector< cv::Point2f > &leftCorners, const std::vector< cv::Point2f > &rightCorners, const StereoCameraModel &model, const std::vector< unsigned char > &mask=std::vector< unsigned char >(), float minDepth=0, float maxDepth=0)
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)
#define UINFO(...)


rtabmap
Author(s): Mathieu Labbe
autogenerated on Wed Jun 5 2019 22:41:32