util3d_features.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include "rtabmap/core/util2d.h"
31 #include "rtabmap/core/util3d.h"
35 
37 
40 #include <rtabmap/utilite/UMath.h>
41 #include <rtabmap/utilite/UStl.h>
42 
43 #include <opencv2/video/tracking.hpp>
44 
45 namespace rtabmap
46 {
47 
48 namespace util3d
49 {
50 
51 std::vector<cv::Point3f> generateKeypoints3DDepth(
52  const std::vector<cv::KeyPoint> & keypoints,
53  const cv::Mat & depth,
54  const CameraModel & cameraModel,
55  float minDepth,
56  float maxDepth)
57 {
58  UASSERT(cameraModel.isValidForProjection());
59  std::vector<CameraModel> models;
60  models.push_back(cameraModel);
61  return generateKeypoints3DDepth(keypoints, depth, models, minDepth, maxDepth);
62 }
63 
64 std::vector<cv::Point3f> generateKeypoints3DDepth(
65  const std::vector<cv::KeyPoint> & keypoints,
66  const cv::Mat & depth,
67  const std::vector<CameraModel> & cameraModels,
68  float minDepth,
69  float maxDepth)
70 {
71  UASSERT(!depth.empty() && (depth.type() == CV_32FC1 || depth.type() == CV_16UC1));
72  UASSERT(cameraModels.size());
73  std::vector<cv::Point3f> keypoints3d;
74  if(!depth.empty())
75  {
76  UASSERT(int((depth.cols/cameraModels.size())*cameraModels.size()) == depth.cols);
77  float subImageWidth = depth.cols/cameraModels.size();
78  keypoints3d.resize(keypoints.size());
79  float rgbToDepthFactorX = 1.0f/(cameraModels[0].imageWidth()>0?cameraModels[0].imageWidth()/subImageWidth:1);
80  float rgbToDepthFactorY = 1.0f/(cameraModels[0].imageHeight()>0?cameraModels[0].imageHeight()/depth.rows:1);
81  float bad_point = std::numeric_limits<float>::quiet_NaN ();
82  for(unsigned int i=0; i<keypoints.size(); ++i)
83  {
84  float x = keypoints[i].pt.x*rgbToDepthFactorX;
85  float y = keypoints[i].pt.y*rgbToDepthFactorY;
86  int cameraIndex = int(x / subImageWidth);
87  UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (int)cameraModels.size(),
88  uFormat("cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
89  cameraIndex, (int)cameraModels.size(), keypoints[i].pt.x, subImageWidth, cameraModels[0].imageWidth()).c_str());
90 
91  pcl::PointXYZ ptXYZ = util3d::projectDepthTo3D(
92  cameraModels.size()==1?depth:cv::Mat(depth, cv::Range::all(), cv::Range(subImageWidth*cameraIndex,subImageWidth*(cameraIndex+1))),
93  x-subImageWidth*cameraIndex,
94  y,
95  cameraModels.at(cameraIndex).cx()*rgbToDepthFactorX,
96  cameraModels.at(cameraIndex).cy()*rgbToDepthFactorY,
97  cameraModels.at(cameraIndex).fx()*rgbToDepthFactorX,
98  cameraModels.at(cameraIndex).fy()*rgbToDepthFactorY,
99  true);
100 
101  cv::Point3f pt(bad_point, bad_point, bad_point);
102  if(pcl::isFinite(ptXYZ) &&
103  (minDepth < 0.0f || ptXYZ.z > minDepth) &&
104  (maxDepth <= 0.0f || ptXYZ.z <= maxDepth))
105  {
106  pt = cv::Point3f(ptXYZ.x, ptXYZ.y, ptXYZ.z);
107  if(!cameraModels.at(cameraIndex).localTransform().isNull() &&
108  !cameraModels.at(cameraIndex).localTransform().isIdentity())
109  {
110  pt = util3d::transformPoint(pt, cameraModels.at(cameraIndex).localTransform());
111  }
112  }
113  keypoints3d.at(i) = pt;
114  }
115  }
116  return keypoints3d;
117 }
118 
119 std::vector<cv::Point3f> generateKeypoints3DDisparity(
120  const std::vector<cv::KeyPoint> & keypoints,
121  const cv::Mat & disparity,
122  const StereoCameraModel & stereoCameraModel,
123  float minDepth,
124  float maxDepth)
125 {
126  UASSERT(!disparity.empty() && (disparity.type() == CV_16SC1 || disparity.type() == CV_32F));
127  UASSERT(stereoCameraModel.isValidForProjection());
128  std::vector<cv::Point3f> keypoints3d;
129  keypoints3d.resize(keypoints.size());
130  float bad_point = std::numeric_limits<float>::quiet_NaN ();
131  for(unsigned int i=0; i!=keypoints.size(); ++i)
132  {
133  cv::Point3f tmpPt = util3d::projectDisparityTo3D(
134  keypoints[i].pt,
135  disparity,
136  stereoCameraModel);
137 
138  cv::Point3f pt(bad_point, bad_point, bad_point);
139  if(util3d::isFinite(tmpPt) &&
140  (minDepth < 0.0f || tmpPt.z > minDepth) &&
141  (maxDepth <= 0.0f || tmpPt.z <= maxDepth))
142  {
143  pt = tmpPt;
144  if(!stereoCameraModel.left().localTransform().isNull() &&
145  !stereoCameraModel.left().localTransform().isIdentity())
146  {
147  pt = util3d::transformPoint(pt, stereoCameraModel.left().localTransform());
148  }
149  }
150  keypoints3d.at(i) = pt;
151  }
152  return keypoints3d;
153 }
154 
155 std::vector<cv::Point3f> generateKeypoints3DStereo(
156  const std::vector<cv::Point2f> & leftCorners,
157  const std::vector<cv::Point2f> & rightCorners,
158  const StereoCameraModel & model,
159  const std::vector<unsigned char> & mask,
160  float minDepth,
161  float maxDepth)
162 {
163  UASSERT(leftCorners.size() == rightCorners.size());
164  UASSERT(mask.size() == 0 || leftCorners.size() == mask.size());
165  UASSERT(model.left().fx()> 0.0f && model.baseline() > 0.0f);
166 
167  std::vector<cv::Point3f> keypoints3d;
168  keypoints3d.resize(leftCorners.size());
169  float bad_point = std::numeric_limits<float>::quiet_NaN ();
170  for(unsigned int i=0; i<leftCorners.size(); ++i)
171  {
172  cv::Point3f pt(bad_point, bad_point, bad_point);
173  if(mask.empty() || mask[i])
174  {
175  float disparity = leftCorners[i].x - rightCorners[i].x;
176  if(disparity != 0.0f)
177  {
178  cv::Point3f tmpPt = util3d::projectDisparityTo3D(
179  leftCorners[i],
180  disparity,
181  model);
182 
183  if(util3d::isFinite(tmpPt) &&
184  (minDepth < 0.0f || tmpPt.z > minDepth) &&
185  (maxDepth <= 0.0f || tmpPt.z <= maxDepth))
186  {
187  pt = tmpPt;
188  if(!model.localTransform().isNull() &&
189  !model.localTransform().isIdentity())
190  {
191  pt = util3d::transformPoint(pt, model.localTransform());
192  }
193  }
194  }
195  }
196 
197  keypoints3d.at(i) = pt;
198  }
199  return keypoints3d;
200 }
201 
202 // cameraTransform, from ref to next
203 // return 3D points in ref referential
204 // If cameraTransform is not null, it will be used for triangulation instead of the camera transform computed by epipolar geometry
205 // when refGuess3D is passed and cameraTransform is null, scale will be estimated, returning scaled cloud and camera transform
206 std::map<int, cv::Point3f> generateWords3DMono(
207  const std::map<int, cv::KeyPoint> & refWords,
208  const std::map<int, cv::KeyPoint> & nextWords,
209  const CameraModel & cameraModel,
210  Transform & cameraTransform,
211  int pnpIterations,
212  float pnpReprojError,
213  int pnpFlags,
214  int pnpRefineIterations,
215  float ransacParam1,
216  float ransacParam2,
217  const std::map<int, cv::Point3f> & refGuess3D,
218  double * varianceOut)
219 {
220  UASSERT(cameraModel.isValidForProjection());
221  std::map<int, cv::Point3f> words3D;
222  std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs;
223  int pairsFound = EpipolarGeometry::findPairs(refWords, nextWords, pairs);
224  UDEBUG("pairsFound=%d/%d", pairsFound, int(refWords.size()>nextWords.size()?refWords.size():nextWords.size()));
225  if(pairsFound > 8)
226  {
227  std::vector<unsigned char> status;
228  cv::Mat F = EpipolarGeometry::findFFromWords(pairs, status, ransacParam1, ransacParam2);
229  if(!F.empty())
230  {
231  //get inliers
232  //normalize coordinates
233  int oi = 0;
234  UASSERT(status.size() == pairs.size());
235  std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >::iterator iter=pairs.begin();
236  std::vector<cv::Point2f> refCorners(status.size());
237  std::vector<cv::Point2f> newCorners(status.size());
238  std::vector<int> indexes(status.size());
239  for(unsigned int i=0; i<status.size(); ++i)
240  {
241  if(status[i])
242  {
243  refCorners[oi] = iter->second.first.pt;
244  newCorners[oi] = iter->second.second.pt;
245  indexes[oi] = iter->first;
246  ++oi;
247  }
248  ++iter;
249  }
250  refCorners.resize(oi);
251  newCorners.resize(oi);
252  indexes.resize(oi);
253 
254  UDEBUG("inliers=%d/%d", oi, pairs.size());
255  if(oi > 3)
256  {
257  std::vector<cv::Point2f> refCornersRefined;
258  std::vector<cv::Point2f> newCornersRefined;
259  cv::correctMatches(F, refCorners, newCorners, refCornersRefined, newCornersRefined);
260  refCorners = refCornersRefined;
261  newCorners = newCornersRefined;
262 
263  cv::Mat x(3, (int)refCorners.size(), CV_64FC1);
264  cv::Mat xp(3, (int)refCorners.size(), CV_64FC1);
265  for(unsigned int i=0; i<refCorners.size(); ++i)
266  {
267  x.at<double>(0, i) = refCorners[i].x;
268  x.at<double>(1, i) = refCorners[i].y;
269  x.at<double>(2, i) = 1;
270 
271  xp.at<double>(0, i) = newCorners[i].x;
272  xp.at<double>(1, i) = newCorners[i].y;
273  xp.at<double>(2, i) = 1;
274  }
275 
276  cv::Mat K = cameraModel.K();
277  cv::Mat Kinv = K.inv();
278  cv::Mat E = K.t()*F*K;
279  cv::Mat x_norm = Kinv * x;
280  cv::Mat xp_norm = Kinv * xp;
281  x_norm = x_norm.rowRange(0,2);
282  xp_norm = xp_norm.rowRange(0,2);
283 
284  cv::Mat P = EpipolarGeometry::findPFromE(E, x_norm, xp_norm);
285  if(!P.empty())
286  {
287  cv::Mat P0 = cv::Mat::zeros(3, 4, CV_64FC1);
288  P0.at<double>(0,0) = 1;
289  P0.at<double>(1,1) = 1;
290  P0.at<double>(2,2) = 1;
291 
292  bool useCameraTransformGuess = !cameraTransform.isNull();
293  //if camera transform is set, use it instead of the computed one from epipolar geometry
294  if(useCameraTransformGuess)
295  {
296  Transform t = (cameraModel.localTransform().inverse()*cameraTransform*cameraModel.localTransform()).inverse();
297 
299  {
300  UDEBUG("Guess = %s", t.prettyPrint().c_str());
301  UDEBUG("Epipolar = %s", Transform(P).prettyPrint().c_str());
302  Transform PT = Transform(P);
303  float scale = t.getNorm()/PT.getNorm();
304  UDEBUG("Scale= %f", scale);
305  PT.x()*=scale;
306  PT.y()*=scale;
307  PT.z()*=scale;
308  UDEBUG("Epipolar scaled= %s", PT.prettyPrint().c_str());
309  }
310 
311  P = (cv::Mat_<double>(3,4) <<
312  (double)t.r11(), (double)t.r12(), (double)t.r13(), (double)t.x(),
313  (double)t.r21(), (double)t.r22(), (double)t.r23(), (double)t.y(),
314  (double)t.r31(), (double)t.r32(), (double)t.r33(), (double)t.z());
315  }
316 
317  // triangulate the points
318  //std::vector<double> reprojErrors;
319  //std::vector<cv::Point3f> cloud;
320  //EpipolarGeometry::triangulatePoints(x_norm, xp_norm, P0, P, cloud, reprojErrors);
321  cv::Mat pts4D;
322  cv::triangulatePoints(P0, P, x_norm, xp_norm, pts4D);
323 
324  UASSERT((int)indexes.size() == pts4D.cols && pts4D.rows == 4);
325  for(unsigned int i=0; i<indexes.size(); ++i)
326  {
327  //if(cloud->at(i).z > 0)
328  //{
329  // words3D.insert(std::make_pair(indexes[i], util3d::transformPoint(cloud->at(i), localTransform)));
330  //}
331  pts4D.col(i) /= pts4D.at<double>(3,i);
332  if(pts4D.at<double>(2,i) > 0)
333  {
334  words3D.insert(std::make_pair(indexes[i], util3d::transformPoint(cv::Point3f(pts4D.at<double>(0,i), pts4D.at<double>(1,i), pts4D.at<double>(2,i)), cameraModel.localTransform())));
335  }
336  }
337 
338  UDEBUG("ref guess=%d", (int)refGuess3D.size());
339  if(refGuess3D.size())
340  {
341  // scale estimation
342  std::vector<cv::Point3f> inliersRef;
343  std::vector<cv::Point3f> inliersRefGuess;
345  words3D,
346  refGuess3D,
347  inliersRef,
348  inliersRefGuess,
349  0);
350 
351  if(inliersRef.size())
352  {
353  // estimate the scale
354  float scale = 1.0f;
355  float variance = 1.0f;
356  if(!useCameraTransformGuess)
357  {
358  std::multimap<float, float> scales; // <variance, scale>
359  for(unsigned int i=0; i<inliersRef.size(); ++i)
360  {
361  // using x as depth, assuming we are in global referential
362  float s = inliersRefGuess.at(i).x/inliersRef.at(i).x;
363  std::vector<float> errorSqrdDists(inliersRef.size());
364  for(unsigned int j=0; j<inliersRef.size(); ++j)
365  {
366  cv::Point3f refPt = inliersRef.at(j);
367  refPt.x *= s;
368  refPt.y *= s;
369  refPt.z *= s;
370  const cv::Point3f & newPt = inliersRefGuess.at(j);
371  errorSqrdDists[j] = uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
372  }
373  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
374  double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 1];
375  float var = 2.1981 * median_error_sqr;
376  //UDEBUG("scale %d = %f variance = %f", (int)i, s, variance);
377 
378  scales.insert(std::make_pair(var, s));
379  }
380  scale = scales.begin()->second;
381  variance = scales.begin()->first;;
382  }
383  else
384  {
385  //compute variance at scale=1
386  std::vector<float> errorSqrdDists(inliersRef.size());
387  for(unsigned int j=0; j<inliersRef.size(); ++j)
388  {
389  const cv::Point3f & refPt = inliersRef.at(j);
390  const cv::Point3f & newPt = inliersRefGuess.at(j);
391  errorSqrdDists[j] = uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
392  }
393  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
394  double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 1];
395  variance = 2.1981 * median_error_sqr;
396  }
397 
398  UDEBUG("scale used = %f (variance=%f)", scale, variance);
399  if(varianceOut)
400  {
401  *varianceOut = variance;
402  }
403 
404  if(!useCameraTransformGuess)
405  {
406  std::vector<cv::Point3f> objectPoints(indexes.size());
407  std::vector<cv::Point2f> imagePoints(indexes.size());
408  int oi2=0;
409  UASSERT(indexes.size() == newCorners.size());
410  for(unsigned int i=0; i<indexes.size(); ++i)
411  {
412  std::map<int, cv::Point3f>::iterator iter = words3D.find(indexes[i]);
413  if(iter!=words3D.end() && util3d::isFinite(iter->second))
414  {
415  iter->second.x *= scale;
416  iter->second.y *= scale;
417  iter->second.z *= scale;
418  objectPoints[oi2].x = iter->second.x;
419  objectPoints[oi2].y = iter->second.y;
420  objectPoints[oi2].z = iter->second.z;
421  imagePoints[oi2] = newCorners[i];
422  ++oi2;
423  }
424  }
425  objectPoints.resize(oi2);
426  imagePoints.resize(oi2);
427 
428  //PnPRansac
429  Transform guess = cameraModel.localTransform().inverse();
430  cv::Mat R = (cv::Mat_<double>(3,3) <<
431  (double)guess.r11(), (double)guess.r12(), (double)guess.r13(),
432  (double)guess.r21(), (double)guess.r22(), (double)guess.r23(),
433  (double)guess.r31(), (double)guess.r32(), (double)guess.r33());
434  cv::Mat rvec(1,3, CV_64FC1);
435  cv::Rodrigues(R, rvec);
436  cv::Mat tvec = (cv::Mat_<double>(1,3) << (double)guess.x(), (double)guess.y(), (double)guess.z());
437  std::vector<int> inliersV;
439  objectPoints,
440  imagePoints,
441  K,
442  cv::Mat(),
443  rvec,
444  tvec,
445  true,
446  pnpIterations,
447  pnpReprojError,
448  0, // min inliers
449  inliersV,
450  pnpFlags,
451  pnpRefineIterations);
452 
453  UDEBUG("PnP inliers = %d / %d", (int)inliersV.size(), (int)objectPoints.size());
454 
455  if(inliersV.size())
456  {
457  cv::Rodrigues(rvec, R);
458  Transform pnp(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
459  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
460  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
461 
462  cameraTransform = (cameraModel.localTransform() * pnp).inverse();
463  }
464  else
465  {
466  UWARN("No inliers after PnP!");
467  }
468  }
469  }
470  else
471  {
472  UWARN("Cannot compute the scale, no points corresponding between the generated ref words and words guess");
473  }
474  }
475  else if(!useCameraTransformGuess)
476  {
477  cv::Mat R, T;
479 
480  Transform t(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), T.at<double>(0),
481  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), T.at<double>(1),
482  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), T.at<double>(2));
483 
484  cameraTransform = (cameraModel.localTransform() * t).inverse() * cameraModel.localTransform();
485  }
486  }
487  }
488  }
489  }
490  UDEBUG("wordsSet=%d / %d", (int)words3D.size(), (int)refWords.size());
491 
492  return words3D;
493 }
494 
495 std::multimap<int, cv::KeyPoint> aggregate(
496  const std::list<int> & wordIds,
497  const std::vector<cv::KeyPoint> & keypoints)
498 {
499  std::multimap<int, cv::KeyPoint> words;
500  std::vector<cv::KeyPoint>::const_iterator kpIter = keypoints.begin();
501  for(std::list<int>::const_iterator iter=wordIds.begin(); iter!=wordIds.end(); ++iter)
502  {
503  words.insert(std::pair<int, cv::KeyPoint >(*iter, *kpIter));
504  ++kpIter;
505  }
506  return words;
507 }
508 
509 }
510 
511 }
GLM_FUNC_DECL genIType mask(genIType const &count)
bool RTABMAP_EXP isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3035
float r13() const
Definition: Transform.h:63
std::map< int, cv::Point3f > RTABMAP_EXP generateWords3DMono(const std::map< int, cv::KeyPoint > &kpts, const std::map< int, cv::KeyPoint > &previousKpts, const CameraModel &cameraModel, Transform &cameraTransform, int pnpIterations=100, float pnpReprojError=8.0f, int pnpFlags=0, int pnpRefineIterations=1, float ransacParam1=3.0f, float ransacParam2=0.99f, const std::map< int, cv::Point3f > &refGuess3D=std::map< int, cv::Point3f >(), double *variance=0)
float r23() const
Definition: Transform.h:66
cv::Point3f RTABMAP_EXP projectDisparityTo3D(const cv::Point2f &pt, float disparity, const StereoCameraModel &model)
Definition: util3d.cpp:2633
std::string prettyPrint() const
Definition: Transform.cpp:274
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
static int findPairs(const std::map< int, cv::KeyPoint > &wordsA, const std::map< int, cv::KeyPoint > &wordsB, std::list< std::pair< int, std::pair< cv::KeyPoint, cv::KeyPoint > > > &pairs, bool ignoreNegativeIds=true)
f
float getNorm() const
Definition: Transform.cpp:231
GLM_FUNC_DECL detail::tmat4x4< T, P > scale(detail::tmat4x4< T, P > const &m, detail::tvec3< T, P > const &v)
GLM_FUNC_DECL bool all(vecType< bool, P > const &v)
Basic mathematics functions.
Some conversion functions.
bool isIdentity() const
Definition: Transform.cpp:136
static cv::Mat findFFromWords(const std::list< std::pair< int, std::pair< cv::KeyPoint, cv::KeyPoint > > > &pairs, std::vector< uchar > &status, double ransacParam1=3.0, double ransacParam2=0.99)
#define UASSERT(condition)
Wrappers of STL for convenient functions.
pcl::PointXYZ RTABMAP_EXP projectDepthTo3D(const cv::Mat &depthImage, float x, float y, float cx, float cy, float fx, float fy, bool smoothing, float depthErrorRatio=0.02f)
Definition: util3d.cpp:213
const CameraModel & left() const
bool isNull() const
Definition: Transform.cpp:107
std::multimap< int, cv::KeyPoint > RTABMAP_EXP aggregate(const std::list< int > &wordIds, const std::vector< cv::KeyPoint > &keypoints)
#define UASSERT_MSG(condition, msg_str)
Definition: ULogger.h:67
float r12() const
Definition: Transform.h:62
float r31() const
Definition: Transform.h:67
static ULogger::Level level()
Definition: ULogger.h:340
float r21() const
Definition: Transform.h:64
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DDisparity(const std::vector< cv::KeyPoint > &keypoints, const cv::Mat &disparity, const StereoCameraModel &stereoCameraModel, float minDepth=0, float maxDepth=0)
void RTABMAP_EXP solvePnPRansac(const std::vector< cv::Point3f > &objectPoints, const std::vector< cv::Point2f > &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, std::vector< int > &inliers, int flags, int refineIterations=1, float refineSigma=3.0f)
bool isValidForProjection() const
Definition: CameraModel.h:80
float r33() const
Definition: Transform.h:69
#define UDEBUG(...)
double fx() const
Definition: CameraModel.h:95
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:560
float r22() const
Definition: Transform.h:65
ULogger class and convenient macros.
#define UWARN(...)
static void findRTFromP(const cv::Mat &p, cv::Mat &r, cv::Mat &t)
Transform inverse() const
Definition: Transform.cpp:169
cv::Mat K() const
Definition: CameraModel.h:103
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DDepth(const std::vector< cv::KeyPoint > &keypoints, const cv::Mat &depth, const CameraModel &cameraModel, float minDepth=0, float maxDepth=0)
const Transform & localTransform() const
float r11() const
Definition: Transform.h:61
static cv::Mat findPFromE(const cv::Mat &E, const cv::Mat &x, const cv::Mat &xp)
std::string UTILITE_EXP uFormat(const char *fmt,...)
void RTABMAP_EXP findCorrespondences(const std::multimap< int, cv::KeyPoint > &wordsA, const std::multimap< int, cv::KeyPoint > &wordsB, std::list< std::pair< cv::Point2f, cv::Point2f > > &pairs)
float r32() const
Definition: Transform.h:68
const Transform & localTransform() const
Definition: CameraModel.h:109
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DStereo(const std::vector< cv::Point2f > &leftCorners, const std::vector< cv::Point2f > &rightCorners, const StereoCameraModel &model, const std::vector< unsigned char > &mask=std::vector< unsigned char >(), float minDepth=0, float maxDepth=0)
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)


rtabmap
Author(s): Mathieu Labbe
autogenerated on Wed Jun 5 2019 22:43:41