util3d_motion_estimation.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include "rtabmap/utilite/UStl.h"
31 #include "rtabmap/utilite/UMath.h"
36 #include "rtabmap/core/util3d.h"
37 
38 #include <pcl/common/common.h>
39 
40 #include "opencv/solvepnp.h"
41 
42 namespace rtabmap
43 {
44 
45 namespace util3d
46 {
47 
49  const std::map<int, cv::Point3f> & words3A,
50  const std::map<int, cv::KeyPoint> & words2B,
51  const CameraModel & cameraModel,
52  int minInliers,
53  int iterations,
54  double reprojError,
55  int flagsPnP,
56  int refineIterations,
57  const Transform & guess,
58  const std::map<int, cv::Point3f> & words3B,
59  cv::Mat * covariance,
60  std::vector<int> * matchesOut,
61  std::vector<int> * inliersOut)
62 {
63  UASSERT(cameraModel.isValidForProjection());
64  UASSERT(!guess.isNull());
65  Transform transform;
66  std::vector<int> matches, inliers;
67 
68  if(covariance)
69  {
70  *covariance = cv::Mat::eye(6,6,CV_64FC1);
71  }
72 
73  // find correspondences
74  std::vector<int> ids = uKeys(words2B);
75  std::vector<cv::Point3f> objectPoints(ids.size());
76  std::vector<cv::Point2f> imagePoints(ids.size());
77  int oi=0;
78  matches.resize(ids.size());
79  for(unsigned int i=0; i<ids.size(); ++i)
80  {
81  std::map<int, cv::Point3f>::const_iterator iter=words3A.find(ids[i]);
82  if(iter != words3A.end() && util3d::isFinite(iter->second))
83  {
84  const cv::Point3f & pt = iter->second;
85  objectPoints[oi].x = pt.x;
86  objectPoints[oi].y = pt.y;
87  objectPoints[oi].z = pt.z;
88  imagePoints[oi] = words2B.find(ids[i])->second.pt;
89  matches[oi++] = ids[i];
90  }
91  }
92 
93  objectPoints.resize(oi);
94  imagePoints.resize(oi);
95  matches.resize(oi);
96 
97  UDEBUG("words3A=%d words2B=%d matches=%d words3B=%d guess=%s",
98  (int)words3A.size(), (int)words2B.size(), (int)matches.size(), (int)words3B.size(), guess.prettyPrint().c_str());
99 
100  if((int)matches.size() >= minInliers)
101  {
102  //PnPRansac
103  cv::Mat K = cameraModel.K();
104  cv::Mat D = cameraModel.D();
105  Transform guessCameraFrame = (guess * cameraModel.localTransform()).inverse();
106  cv::Mat R = (cv::Mat_<double>(3,3) <<
107  (double)guessCameraFrame.r11(), (double)guessCameraFrame.r12(), (double)guessCameraFrame.r13(),
108  (double)guessCameraFrame.r21(), (double)guessCameraFrame.r22(), (double)guessCameraFrame.r23(),
109  (double)guessCameraFrame.r31(), (double)guessCameraFrame.r32(), (double)guessCameraFrame.r33());
110 
111  cv::Mat rvec(1,3, CV_64FC1);
112  cv::Rodrigues(R, rvec);
113  cv::Mat tvec = (cv::Mat_<double>(1,3) <<
114  (double)guessCameraFrame.x(), (double)guessCameraFrame.y(), (double)guessCameraFrame.z());
115 
117  objectPoints,
118  imagePoints,
119  K,
120  D,
121  rvec,
122  tvec,
123  true,
124  iterations,
125  reprojError,
126  minInliers, // min inliers
127  inliers,
128  flagsPnP,
129  refineIterations);
130 
131  if((int)inliers.size() >= minInliers)
132  {
133  cv::Rodrigues(rvec, R);
134  Transform pnp(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
135  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
136  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
137 
138  transform = (cameraModel.localTransform() * pnp).inverse();
139 
140  // compute variance (like in PCL computeVariance() method of sac_model.h)
141  if(covariance && words3B.size())
142  {
143  std::vector<float> errorSqrdDists(inliers.size());
144  std::vector<float> errorSqrdAngles(inliers.size());
145  oi = 0;
146  for(unsigned int i=0; i<inliers.size(); ++i)
147  {
148  std::map<int, cv::Point3f>::const_iterator iter = words3B.find(matches[inliers[i]]);
149  if(iter != words3B.end() && util3d::isFinite(iter->second))
150  {
151  const cv::Point3f & objPt = objectPoints[inliers[i]];
152  cv::Point3f newPt = util3d::transformPoint(iter->second, transform);
153  errorSqrdDists[oi] = uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
154 
155  Eigen::Vector4f v1(objPt.x - transform.x(), objPt.y - transform.y(), objPt.z - transform.z(), 0);
156  Eigen::Vector4f v2(newPt.x - transform.x(), newPt.y - transform.y(), newPt.z - transform.z(), 0);
157  errorSqrdAngles[oi++] = pcl::getAngle3D(v1, v2);
158  }
159  }
160 
161  errorSqrdDists.resize(oi);
162  errorSqrdAngles.resize(oi);
163  if(errorSqrdDists.size())
164  {
165  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
166  //divide by 4 instead of 2 to ignore very very far features (stereo)
167  double median_error_sqr = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () >> 2];
168  UASSERT(uIsFinite(median_error_sqr));
169  (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr;
170  std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
171  median_error_sqr = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () >> 2];
172  UASSERT(uIsFinite(median_error_sqr));
173  (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr;
174  }
175  else
176  {
177  UWARN("Not enough close points to compute covariance!");
178  }
179  }
180  else if(covariance)
181  {
182  // compute variance, which is the rms of reprojection errors
183  std::vector<cv::Point2f> imagePointsReproj;
184  cv::projectPoints(objectPoints, rvec, tvec, K, cv::Mat(), imagePointsReproj);
185  float err = 0.0f;
186  for(unsigned int i=0; i<inliers.size(); ++i)
187  {
188  err += uNormSquared(imagePoints.at(inliers[i]).x - imagePointsReproj.at(inliers[i]).x, imagePoints.at(inliers[i]).y - imagePointsReproj.at(inliers[i]).y);
189  }
190  UASSERT(uIsFinite(err));
191  *covariance *= std::sqrt(err/float(inliers.size()));
192  }
193  }
194  }
195 
196  if(matchesOut)
197  {
198  *matchesOut = matches;
199  }
200  if(inliersOut)
201  {
202  inliersOut->resize(inliers.size());
203  for(unsigned int i=0; i<inliers.size(); ++i)
204  {
205  inliersOut->at(i) = matches[inliers[i]];
206  }
207  }
208 
209  return transform;
210 }
211 
213  const std::map<int, cv::Point3f> & words3A,
214  const std::map<int, cv::Point3f> & words3B,
215  int minInliers,
216  double inliersDistance,
217  int iterations,
218  int refineIterations,
219  cv::Mat * covariance,
220  std::vector<int> * matchesOut,
221  std::vector<int> * inliersOut)
222 {
223  Transform transform;
224  std::vector<cv::Point3f> inliers1; // previous
225  std::vector<cv::Point3f> inliers2; // new
226 
227  std::vector<int> matches;
229  words3A,
230  words3B,
231  inliers1,
232  inliers2,
233  0,
234  &matches);
235  UASSERT(inliers1.size() == inliers2.size());
236  UDEBUG("Unique correspondences = %d", (int)inliers1.size());
237 
238  if(covariance)
239  {
240  *covariance = cv::Mat::eye(6,6,CV_64FC1);
241  }
242 
243  std::vector<int> inliers;
244  if((int)inliers1.size() >= minInliers)
245  {
246  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1cloud(new pcl::PointCloud<pcl::PointXYZ>);
247  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2cloud(new pcl::PointCloud<pcl::PointXYZ>);
248  inliers1cloud->resize(inliers1.size());
249  inliers2cloud->resize(inliers1.size());
250  for(unsigned int i=0; i<inliers1.size(); ++i)
251  {
252  (*inliers1cloud)[i].x = inliers1[i].x;
253  (*inliers1cloud)[i].y = inliers1[i].y;
254  (*inliers1cloud)[i].z = inliers1[i].z;
255  (*inliers2cloud)[i].x = inliers2[i].x;
256  (*inliers2cloud)[i].y = inliers2[i].y;
257  (*inliers2cloud)[i].z = inliers2[i].z;
258  }
260  inliers2cloud,
261  inliers1cloud,
262  inliersDistance,
263  iterations,
264  refineIterations,
265  3.0,
266  &inliers,
267  covariance);
268 
269  if(!t.isNull() && (int)inliers.size() >= minInliers)
270  {
271  transform = t;
272  }
273  }
274 
275  if(matchesOut)
276  {
277  *matchesOut = matches;
278  }
279  if(inliersOut)
280  {
281  inliersOut->resize(inliers.size());
282  for(unsigned int i=0; i<inliers.size(); ++i)
283  {
284  inliersOut->at(i) = matches[inliers[i]];
285  }
286  }
287 
288  return transform;
289 }
290 
291 
292 std::vector<float> computeReprojErrors(
293  std::vector<cv::Point3f> opoints,
294  std::vector<cv::Point2f> ipoints,
295  const cv::Mat & cameraMatrix,
296  const cv::Mat & distCoeffs,
297  const cv::Mat & rvec,
298  const cv::Mat & tvec,
299  float reprojErrorThreshold,
300  std::vector<int> & inliers)
301 {
302  UASSERT(opoints.size() == ipoints.size());
303  int count = (int)opoints.size();
304 
305  std::vector<cv::Point2f> projpoints;
306  projectPoints(opoints, rvec, tvec, cameraMatrix, distCoeffs, projpoints);
307 
308  inliers.resize(count,0);
309  std::vector<float> err(count);
310  int oi=0;
311  for (int i = 0; i < count; ++i)
312  {
313  float e = (float)cv::norm( ipoints[i] - projpoints[i]);
314  if(e <= reprojErrorThreshold)
315  {
316  inliers[oi] = i;
317  err[oi++] = e;
318  }
319  }
320  inliers.resize(oi);
321  err.resize(oi);
322  return err;
323 }
324 
326  const std::vector<cv::Point3f> & objectPoints,
327  const std::vector<cv::Point2f> & imagePoints,
328  const cv::Mat & cameraMatrix,
329  const cv::Mat & distCoeffs,
330  cv::Mat & rvec,
331  cv::Mat & tvec,
332  bool useExtrinsicGuess,
333  int iterationsCount,
334  float reprojectionError,
335  int minInliersCount,
336  std::vector<int> & inliers,
337  int flags,
338  int refineIterations,
339  float refineSigma)
340 {
341  if(minInliersCount < 4)
342  {
343  minInliersCount = 4;
344  }
345 
346  // Use OpenCV3 version of solvePnPRansac in OpenCV2.
347  // FIXME: we should use this version of solvePnPRansac in newer 3.3.1 too, which seems a lot less stable!?!? Why!?
349  objectPoints,
350  imagePoints,
351  cameraMatrix,
352  distCoeffs,
353  rvec,
354  tvec,
355  useExtrinsicGuess,
356  iterationsCount,
357  reprojectionError,
358  0.99, // confidence
359  inliers,
360  flags);
361 
362  float inlierThreshold = reprojectionError;
363  if((int)inliers.size() >= minInliersCount && refineIterations>0)
364  {
365  float error_threshold = inlierThreshold;
366  int refine_iterations = 0;
367  bool inlier_changed = false, oscillating = false;
368  std::vector<int> new_inliers, prev_inliers = inliers;
369  std::vector<size_t> inliers_sizes;
370  //Eigen::VectorXf new_model_coefficients = model_coefficients;
371  cv::Mat new_model_rvec = rvec;
372  cv::Mat new_model_tvec = tvec;
373 
374  do
375  {
376  // Get inliers from the current model
377  std::vector<cv::Point3f> opoints_inliers(prev_inliers.size());
378  std::vector<cv::Point2f> ipoints_inliers(prev_inliers.size());
379  for(unsigned int i=0; i<prev_inliers.size(); ++i)
380  {
381  opoints_inliers[i] = objectPoints[prev_inliers[i]];
382  ipoints_inliers[i] = imagePoints[prev_inliers[i]];
383  }
384 
385  UDEBUG("inliers=%d refine_iterations=%d, rvec=%f,%f,%f tvec=%f,%f,%f", (int)prev_inliers.size(), refine_iterations,
386  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
387  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
388 
389  // Optimize the model coefficients
390  cv::solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, true, flags);
391  inliers_sizes.push_back(prev_inliers.size());
392 
393  UDEBUG("rvec=%f,%f,%f tvec=%f,%f,%f",
394  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
395  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
396 
397  // Select the new inliers based on the optimized coefficients and new threshold
398  std::vector<float> err = computeReprojErrors(objectPoints, imagePoints, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, error_threshold, new_inliers);
399  UDEBUG("RANSAC refineModel: Number of inliers found (before/after): %d/%d, with an error threshold of %f.",
400  (int)prev_inliers.size (), (int)new_inliers.size (), error_threshold);
401 
402  if ((int)new_inliers.size() < minInliersCount)
403  {
404  ++refine_iterations;
405  if (refine_iterations >= refineIterations)
406  {
407  break;
408  }
409  continue;
410  }
411 
412  // Estimate the variance and the new threshold
413  float m = uMean(err.data(), err.size());
414  float variance = uVariance(err.data(), err.size());
415  error_threshold = std::min(inlierThreshold, refineSigma * float(sqrt(variance)));
416 
417  UDEBUG ("RANSAC refineModel: New estimated error threshold: %f (variance=%f mean=%f) on iteration %d out of %d.",
418  error_threshold, variance, m, refine_iterations, refineIterations);
419  inlier_changed = false;
420  std::swap (prev_inliers, new_inliers);
421 
422  // If the number of inliers changed, then we are still optimizing
423  if (new_inliers.size () != prev_inliers.size ())
424  {
425  // Check if the number of inliers is oscillating in between two values
426  if ((int)inliers_sizes.size () >= minInliersCount)
427  {
428  if (inliers_sizes[inliers_sizes.size () - 1] == inliers_sizes[inliers_sizes.size () - 3] &&
429  inliers_sizes[inliers_sizes.size () - 2] == inliers_sizes[inliers_sizes.size () - 4])
430  {
431  oscillating = true;
432  break;
433  }
434  }
435  inlier_changed = true;
436  continue;
437  }
438 
439  // Check the values of the inlier set
440  for (size_t i = 0; i < prev_inliers.size (); ++i)
441  {
442  // If the value of the inliers changed, then we are still optimizing
443  if (prev_inliers[i] != new_inliers[i])
444  {
445  inlier_changed = true;
446  break;
447  }
448  }
449  }
450  while (inlier_changed && ++refine_iterations < refineIterations);
451 
452  // If the new set of inliers is empty, we didn't do a good job refining
453  if ((int)prev_inliers.size() < minInliersCount)
454  {
455  UWARN ("RANSAC refineModel: Refinement failed: got very low inliers (%d)!", (int)prev_inliers.size());
456  }
457 
458  if (oscillating)
459  {
460  UDEBUG("RANSAC refineModel: Detected oscillations in the model refinement.");
461  }
462 
463  std::swap (inliers, new_inliers);
464  rvec = new_model_rvec;
465  tvec = new_model_tvec;
466  }
467 
468 }
469 
470 }
471 
472 }
T uVariance(const T *v, unsigned int size, T meanV)
Definition: UMath.h:491
bool RTABMAP_EXP isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3035
Transform RTABMAP_EXP estimateMotion3DTo2D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::KeyPoint > &words2B, const CameraModel &cameraModel, int minInliers=10, int iterations=100, double reprojError=5., int flagsPnP=0, int pnpRefineIterations=1, const Transform &guess=Transform::getIdentity(), const std::map< int, cv::Point3f > &words3B=std::map< int, cv::Point3f >(), cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0)
float r13() const
Definition: Transform.h:63
float r23() const
Definition: Transform.h:66
T uMean(const T *v, unsigned int size)
Definition: UMath.h:401
std::string prettyPrint() const
Definition: Transform.cpp:274
GLM_FUNC_DECL genType min(genType const &x, genType const &y)
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
std::vector< K > uKeys(const std::multimap< K, V > &mm)
Definition: UStl.h:67
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
GLM_FUNC_DECL genType e()
Basic mathematics functions.
Transform RTABMAP_EXP transformFromXYZCorrespondences(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud1, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud2, double inlierThreshold=0.02, int iterations=100, int refineModelIterations=10, double refineModelSigma=3.0, std::vector< int > *inliers=0, cv::Mat *variance=0)
cv::Mat D() const
Definition: CameraModel.h:104
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, OutputArray _inliers, int flags)
Definition: solvepnp.cpp:112
bool uIsFinite(const T &value)
Definition: UMath.h:55
#define UASSERT(condition)
Wrappers of STL for convenient functions.
bool isNull() const
Definition: Transform.cpp:107
float r12() const
Definition: Transform.h:62
float r31() const
Definition: Transform.h:67
Transform RTABMAP_EXP estimateMotion3DTo3D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::Point3f > &words3B, int minInliers=10, double inliersDistance=0.1, int iterations=100, int refineIterations=5, cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0)
float r21() const
Definition: Transform.h:64
void RTABMAP_EXP solvePnPRansac(const std::vector< cv::Point3f > &objectPoints, const std::vector< cv::Point2f > &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, std::vector< int > &inliers, int flags, int refineIterations=1, float refineSigma=3.0f)
bool isValidForProjection() const
Definition: CameraModel.h:80
float r33() const
Definition: Transform.h:69
#define UDEBUG(...)
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:560
std::vector< float > computeReprojErrors(std::vector< cv::Point3f > opoints, std::vector< cv::Point2f > ipoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, const cv::Mat &rvec, const cv::Mat &tvec, float reprojErrorThreshold, std::vector< int > &inliers)
float r22() const
Definition: Transform.h:65
ULogger class and convenient macros.
#define UWARN(...)
cv::Mat K() const
Definition: CameraModel.h:103
float r11() const
Definition: Transform.h:61
void RTABMAP_EXP findCorrespondences(const std::multimap< int, cv::KeyPoint > &wordsA, const std::multimap< int, cv::KeyPoint > &wordsB, std::list< std::pair< cv::Point2f, cv::Point2f > > &pairs)
float r32() const
Definition: Transform.h:68
const Transform & localTransform() const
Definition: CameraModel.h:109
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)


rtabmap
Author(s): Mathieu Labbe
autogenerated on Wed Jun 5 2019 22:43:41