util3d_motion_estimation.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include "rtabmap/utilite/UStl.h"
31 #include "rtabmap/utilite/UMath.h"
36 #include "rtabmap/core/util3d.h"
37 
38 #include <pcl/common/common.h>
39 
40 #include "opencv/solvepnp.h"
41 
42 #ifdef RTABMAP_OPENGV
43 #include <opengv/absolute_pose/methods.hpp>
44 #include <opengv/absolute_pose/NoncentralAbsoluteAdapter.hpp>
45 #include <opengv/sac/Ransac.hpp>
46 #include <opengv/sac_problems/absolute_pose/AbsolutePoseSacProblem.hpp>
47 #endif
48 
49 namespace rtabmap
50 {
51 
52 namespace util3d
53 {
54 
56  const std::map<int, cv::Point3f> & words3A,
57  const std::map<int, cv::KeyPoint> & words2B,
58  const CameraModel & cameraModel,
59  int minInliers,
60  int iterations,
61  double reprojError,
62  int flagsPnP,
63  int refineIterations,
64  float maxVariance,
65  const Transform & guess,
66  const std::map<int, cv::Point3f> & words3B,
67  cv::Mat * covariance,
68  std::vector<int> * matchesOut,
69  std::vector<int> * inliersOut)
70 {
71  UASSERT(cameraModel.isValidForProjection());
72  UASSERT(!guess.isNull());
73  Transform transform;
74  std::vector<int> matches, inliers;
75 
76  if(covariance)
77  {
78  *covariance = cv::Mat::eye(6,6,CV_64FC1);
79  }
80 
81  // find correspondences
82  std::vector<int> ids = uKeys(words2B);
83  std::vector<cv::Point3f> objectPoints(ids.size());
84  std::vector<cv::Point2f> imagePoints(ids.size());
85  int oi=0;
86  matches.resize(ids.size());
87  for(unsigned int i=0; i<ids.size(); ++i)
88  {
89  std::map<int, cv::Point3f>::const_iterator iter=words3A.find(ids[i]);
90  if(iter != words3A.end() && util3d::isFinite(iter->second))
91  {
92  const cv::Point3f & pt = iter->second;
93  objectPoints[oi].x = pt.x;
94  objectPoints[oi].y = pt.y;
95  objectPoints[oi].z = pt.z;
96  imagePoints[oi] = words2B.find(ids[i])->second.pt;
97  matches[oi++] = ids[i];
98  }
99  }
100 
101  objectPoints.resize(oi);
102  imagePoints.resize(oi);
103  matches.resize(oi);
104 
105  UDEBUG("words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d",
106  (int)words3A.size(), (int)words2B.size(), (int)matches.size(), (int)words3B.size(),
107  guess.prettyPrint().c_str(), reprojError, iterations);
108 
109  if((int)matches.size() >= minInliers)
110  {
111  //PnPRansac
112  cv::Mat K = cameraModel.K();
113  cv::Mat D = cameraModel.D();
114  Transform guessCameraFrame = (guess * cameraModel.localTransform()).inverse();
115  cv::Mat R = (cv::Mat_<double>(3,3) <<
116  (double)guessCameraFrame.r11(), (double)guessCameraFrame.r12(), (double)guessCameraFrame.r13(),
117  (double)guessCameraFrame.r21(), (double)guessCameraFrame.r22(), (double)guessCameraFrame.r23(),
118  (double)guessCameraFrame.r31(), (double)guessCameraFrame.r32(), (double)guessCameraFrame.r33());
119 
120  cv::Mat rvec(3,1, CV_64FC1);
121  cv::Rodrigues(R, rvec);
122  cv::Mat tvec = (cv::Mat_<double>(3,1) <<
123  (double)guessCameraFrame.x(), (double)guessCameraFrame.y(), (double)guessCameraFrame.z());
124 
126  objectPoints,
127  imagePoints,
128  K,
129  D,
130  rvec,
131  tvec,
132  true,
133  iterations,
134  reprojError,
135  minInliers, // min inliers
136  inliers,
137  flagsPnP,
138  refineIterations);
139 
140  if((int)inliers.size() >= minInliers)
141  {
142  cv::Rodrigues(rvec, R);
143  Transform pnp(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
144  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
145  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
146 
147  transform = (cameraModel.localTransform() * pnp).inverse();
148 
149  // compute variance (like in PCL computeVariance() method of sac_model.h)
150  if(covariance && (!words3B.empty() || cameraModel.imageSize() != cv::Size()))
151  {
152  std::vector<float> errorSqrdDists(inliers.size());
153  std::vector<float> errorSqrdAngles(inliers.size());
154  Transform localTransformInv = cameraModel.localTransform().inverse();
155  Transform transformCameraFrameInv = (transform * cameraModel.localTransform()).inverse();
156  for(unsigned int i=0; i<inliers.size(); ++i)
157  {
158  cv::Point3f objPt = objectPoints[inliers[i]];
159 
160  // Project obj point from base frame of cameraA in cameraB frame (z+ in front of the cameraB)
161  objPt = util3d::transformPoint(objPt, transformCameraFrameInv);
162 
163  // Get 3D point from target in cameraB frame
164  std::map<int, cv::Point3f>::const_iterator iter = words3B.find(matches[inliers[i]]);
165  cv::Point3f newPt;
166  if(iter!=words3B.end() && util3d::isFinite(iter->second))
167  {
168  newPt = util3d::transformPoint(iter->second, localTransformInv);
169  }
170  else
171  {
172  //compute from projection
173  Eigen::Vector3f ray = projectDepthTo3DRay(
174  cameraModel.imageSize(),
175  imagePoints.at(inliers[i]).x,
176  imagePoints.at(inliers[i]).y,
177  cameraModel.cx(),
178  cameraModel.cy(),
179  cameraModel.fx(),
180  cameraModel.fy());
181  // transform in camera B frame
182  newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPt.z*1.1; // Add 10 % error
183  }
184 
185  errorSqrdDists[i] = uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
186 
187  Eigen::Vector4f v1(objPt.x, objPt.y, objPt.z, 0);
188  Eigen::Vector4f v2(newPt.x, newPt.y, newPt.z, 0);
189  errorSqrdAngles[i] = pcl::getAngle3D(v1, v2);
190  }
191 
192  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
193  //divide by 4 instead of 2 to ignore very very far features (stereo)
194  double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () >> 2];
195  UASSERT(uIsFinite(median_error_sqr_lin));
196  (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
197  std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
198  double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () >> 2];
199  UASSERT(uIsFinite(median_error_sqr_ang));
200  (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
201 
202  if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
203  {
204  UWARN("Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
205  *covariance = cv::Mat::eye(6,6,CV_64FC1);
206  transform.setNull();
207  }
208  }
209  else if(covariance)
210  {
211  // compute variance, which is the rms of reprojection errors
212  std::vector<cv::Point2f> imagePointsReproj;
213  cv::projectPoints(objectPoints, rvec, tvec, K, cv::Mat(), imagePointsReproj);
214  float err = 0.0f;
215  for(unsigned int i=0; i<inliers.size(); ++i)
216  {
217  err += uNormSquared(imagePoints.at(inliers[i]).x - imagePointsReproj.at(inliers[i]).x, imagePoints.at(inliers[i]).y - imagePointsReproj.at(inliers[i]).y);
218  }
219  UASSERT(uIsFinite(err));
220  *covariance *= std::sqrt(err/float(inliers.size()));
221  }
222  }
223  }
224 
225  if(matchesOut)
226  {
227  *matchesOut = matches;
228  }
229  if(inliersOut)
230  {
231  inliersOut->resize(inliers.size());
232  for(unsigned int i=0; i<inliers.size(); ++i)
233  {
234  inliersOut->at(i) = matches[inliers[i]];
235  }
236  }
237 
238  return transform;
239 }
240 
242  const std::map<int, cv::Point3f> & words3A,
243  const std::map<int, cv::KeyPoint> & words2B,
244  const std::vector<CameraModel> & cameraModels,
245  int minInliers,
246  int iterations,
247  double reprojError,
248  int flagsPnP,
249  int refineIterations,
250  float maxVariance,
251  const Transform & guess,
252  const std::map<int, cv::Point3f> & words3B,
253  cv::Mat * covariance,
254  std::vector<int> * matchesOut,
255  std::vector<int> * inliersOut)
256 {
257  Transform transform;
258 #ifndef RTABMAP_OPENGV
259  UERROR("This function is only available if rtabmap is built with OpenGV dependency.");
260 #else
261  UASSERT(!cameraModels.empty() && cameraModels[0].imageWidth() > 0);
262  int subImageWidth = cameraModels[0].imageWidth();
263  for(size_t i=0; i<cameraModels.size(); ++i)
264  {
265  UASSERT(cameraModels[i].isValidForProjection());
266  UASSERT(subImageWidth == cameraModels[i].imageWidth());
267  }
268 
269  UASSERT(!guess.isNull());
270 
271  std::vector<int> matches, inliers;
272 
273  if(covariance)
274  {
275  *covariance = cv::Mat::eye(6,6,CV_64FC1);
276  }
277 
278  // find correspondences
279  std::vector<int> ids = uKeys(words2B);
280  std::vector<cv::Point3f> objectPoints(ids.size());
281  std::vector<cv::Point2f> imagePoints(ids.size());
282  int oi=0;
283  matches.resize(ids.size());
284  std::vector<int> cameraIndexes(ids.size());
285  for(unsigned int i=0; i<ids.size(); ++i)
286  {
287  std::map<int, cv::Point3f>::const_iterator iter=words3A.find(ids[i]);
288  if(iter != words3A.end() && util3d::isFinite(iter->second))
289  {
290  const cv::Point2f & kpt = words2B.find(ids[i])->second.pt;
291  int cameraIndex = int(kpt.x / subImageWidth);
292  UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (int)cameraModels.size(),
293  uFormat("cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
294  cameraIndex, (int)cameraModels.size(), kpt.x, subImageWidth, cameraModels[cameraIndex].imageWidth()).c_str());
295 
296  const cv::Point3f & pt = iter->second;
297  objectPoints[oi] = pt;
298  imagePoints[oi] = kpt;
299  // convert in image space
300  imagePoints[oi].x = imagePoints[oi].x - (cameraIndex*subImageWidth);
301  cameraIndexes[oi] = cameraIndex;
302  matches[oi++] = ids[i];
303  }
304  }
305 
306  objectPoints.resize(oi);
307  imagePoints.resize(oi);
308  cameraIndexes.resize(oi);
309  matches.resize(oi);
310 
311  UDEBUG("words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d",
312  (int)words3A.size(), (int)words2B.size(), (int)matches.size(), (int)words3B.size(),
313  guess.prettyPrint().c_str(), reprojError, iterations);
314 
315  if((int)matches.size() >= minInliers)
316  {
317  // convert cameras
318  opengv::translations_t camOffsets;
319  opengv::rotations_t camRotations;
320  for(size_t i=0; i<cameraModels.size(); ++i)
321  {
322  camOffsets.push_back(opengv::translation_t(
323  cameraModels[i].localTransform().x(),
324  cameraModels[i].localTransform().y(),
325  cameraModels[i].localTransform().z()));
326  camRotations.push_back(cameraModels[i].localTransform().toEigen4d().block<3,3>(0, 0));
327  }
328 
329  // convert 3d points
330  opengv::points_t points;
331  // convert 2d-3d correspondences into bearing vectors
332  opengv::bearingVectors_t bearingVectors;
333  opengv::absolute_pose::NoncentralAbsoluteAdapter::camCorrespondences_t camCorrespondences;
334  for(size_t i=0; i<objectPoints.size(); ++i)
335  {
336  int cameraIndex = cameraIndexes[i];
337  points.push_back(opengv::point_t(objectPoints[i].x,objectPoints[i].y,objectPoints[i].z));
338  cv::Vec3f pt;
339  cameraModels[cameraIndex].project(imagePoints[i].x, imagePoints[i].y, 1, pt[0], pt[1], pt[2]);
340  pt = cv::normalize(pt);
341  bearingVectors.push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
342  camCorrespondences.push_back(cameraIndex);
343  }
344 
345  //create a non-central absolute adapter
346  opengv::absolute_pose::NoncentralAbsoluteAdapter adapter(
347  bearingVectors,
348  camCorrespondences,
349  points,
350  camOffsets,
351  camRotations );
352 
353  adapter.setR(guess.toEigen4d().block<3,3>(0, 0));
354  adapter.sett(opengv::translation_t(guess.x(), guess.y(), guess.z()));
355 
356  //Create a AbsolutePoseSacProblem and Ransac
357  //The method is set to GP3P
358  opengv::sac::Ransac<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> ransac;
359  std::shared_ptr<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> absposeproblem_ptr(
360  new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem(adapter, opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem::GP3P));
361 
362  ransac.sac_model_ = absposeproblem_ptr;
363  ransac.threshold_ = 1.0 - cos(atan(reprojError/cameraModels[0].fx()));
364  ransac.max_iterations_ = iterations;
365  UDEBUG("Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
366 
367  //Run the experiment
368  ransac.computeModel();
369 
370  Transform pnp = Transform::fromEigen3d(ransac.model_coefficients_);
371 
372  UDEBUG("Ransac result: %s", pnp.prettyPrint().c_str());
373  UDEBUG("Ransac iterations done: %d", ransac.iterations_);
374  inliers = ransac.inliers_;
375  UDEBUG("Ransac inliers: %ld", inliers.size());
376 
377  if((int)inliers.size() >= minInliers)
378  {
379  transform = pnp;
380 
381  // compute variance (like in PCL computeVariance() method of sac_model.h)
382  if(covariance)
383  {
384  std::vector<float> errorSqrdDists(inliers.size());
385  std::vector<float> errorSqrdAngles(inliers.size());
386  for(unsigned int i=0; i<inliers.size(); ++i)
387  {
388  cv::Point3f objPt = objectPoints[inliers[i]];
389 
390  int cameraIndex = cameraIndexes[inliers[i]];
391  Transform transformCameraFrameInv = (transform * cameraModels[cameraIndex].localTransform()).inverse();
392 
393  // Project obj point from base frame of cameraA in cameraB frame (z+ in front of the cameraB)
394  objPt = util3d::transformPoint(objPt, transformCameraFrameInv);
395 
396  // Get 3D point from target in cameraB frame
397  std::map<int, cv::Point3f>::const_iterator iter = words3B.find(matches[inliers[i]]);
398  cv::Point3f newPt;
399  if(iter!=words3B.end() && util3d::isFinite(iter->second))
400  {
401  newPt = util3d::transformPoint(iter->second, cameraModels[cameraIndex].localTransform().inverse());
402  }
403  else
404  {
405  //compute from projection
406  Eigen::Vector3f ray = projectDepthTo3DRay(
407  cameraModels[cameraIndex].imageSize(),
408  imagePoints.at(inliers[i]).x,
409  imagePoints.at(inliers[i]).y,
410  cameraModels[cameraIndex].cx(),
411  cameraModels[cameraIndex].cy(),
412  cameraModels[cameraIndex].fx(),
413  cameraModels[cameraIndex].fy());
414  // transform in camera B frame
415  newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPt.z*1.1; // Add 10 % error
416  }
417 
418  errorSqrdDists[i] = uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
419 
420  Eigen::Vector4f v1(objPt.x, objPt.y, objPt.z, 0);
421  Eigen::Vector4f v2(newPt.x, newPt.y, newPt.z, 0);
422  errorSqrdAngles[i] = pcl::getAngle3D(v1, v2);
423  }
424 
425  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
426  //divide by 4 instead of 2 to ignore very very far features (stereo)
427  double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () >> 2];
428  UASSERT(uIsFinite(median_error_sqr_lin));
429  (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
430  std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
431  double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () >> 2];
432  UASSERT(uIsFinite(median_error_sqr_ang));
433  (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
434 
435  if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
436  {
437  UWARN("Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
438  *covariance = cv::Mat::eye(6,6,CV_64FC1);
439  transform.setNull();
440  }
441  }
442  }
443  }
444 
445  if(matchesOut)
446  {
447  *matchesOut = matches;
448  }
449  if(inliersOut)
450  {
451  inliersOut->resize(inliers.size());
452  for(unsigned int i=0; i<inliers.size(); ++i)
453  {
454  inliersOut->at(i) = matches[inliers[i]];
455  }
456  }
457 #endif
458  return transform;
459 }
460 
462  const std::map<int, cv::Point3f> & words3A,
463  const std::map<int, cv::Point3f> & words3B,
464  int minInliers,
465  double inliersDistance,
466  int iterations,
467  int refineIterations,
468  cv::Mat * covariance,
469  std::vector<int> * matchesOut,
470  std::vector<int> * inliersOut)
471 {
472  Transform transform;
473  std::vector<cv::Point3f> inliers1; // previous
474  std::vector<cv::Point3f> inliers2; // new
475 
476  std::vector<int> matches;
478  words3A,
479  words3B,
480  inliers1,
481  inliers2,
482  0,
483  &matches);
484  UASSERT(inliers1.size() == inliers2.size());
485  UDEBUG("Unique correspondences = %d", (int)inliers1.size());
486 
487  if(covariance)
488  {
489  *covariance = cv::Mat::eye(6,6,CV_64FC1);
490  }
491 
492  std::vector<int> inliers;
493  if((int)inliers1.size() >= minInliers)
494  {
495  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1cloud(new pcl::PointCloud<pcl::PointXYZ>);
496  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2cloud(new pcl::PointCloud<pcl::PointXYZ>);
497  inliers1cloud->resize(inliers1.size());
498  inliers2cloud->resize(inliers1.size());
499  for(unsigned int i=0; i<inliers1.size(); ++i)
500  {
501  (*inliers1cloud)[i].x = inliers1[i].x;
502  (*inliers1cloud)[i].y = inliers1[i].y;
503  (*inliers1cloud)[i].z = inliers1[i].z;
504  (*inliers2cloud)[i].x = inliers2[i].x;
505  (*inliers2cloud)[i].y = inliers2[i].y;
506  (*inliers2cloud)[i].z = inliers2[i].z;
507  }
509  inliers2cloud,
510  inliers1cloud,
511  inliersDistance,
512  iterations,
513  refineIterations,
514  3.0,
515  &inliers,
516  covariance);
517 
518  if(!t.isNull() && (int)inliers.size() >= minInliers)
519  {
520  transform = t;
521  }
522  }
523 
524  if(matchesOut)
525  {
526  *matchesOut = matches;
527  }
528  if(inliersOut)
529  {
530  inliersOut->resize(inliers.size());
531  for(unsigned int i=0; i<inliers.size(); ++i)
532  {
533  inliersOut->at(i) = matches[inliers[i]];
534  }
535  }
536 
537  return transform;
538 }
539 
540 
541 std::vector<float> computeReprojErrors(
542  std::vector<cv::Point3f> opoints,
543  std::vector<cv::Point2f> ipoints,
544  const cv::Mat & cameraMatrix,
545  const cv::Mat & distCoeffs,
546  const cv::Mat & rvec,
547  const cv::Mat & tvec,
548  float reprojErrorThreshold,
549  std::vector<int> & inliers)
550 {
551  UASSERT(opoints.size() == ipoints.size());
552  int count = (int)opoints.size();
553 
554  std::vector<cv::Point2f> projpoints;
555  projectPoints(opoints, rvec, tvec, cameraMatrix, distCoeffs, projpoints);
556 
557  inliers.resize(count,0);
558  std::vector<float> err(count);
559  int oi=0;
560  for (int i = 0; i < count; ++i)
561  {
562  float e = (float)cv::norm( ipoints[i] - projpoints[i]);
563  if(e <= reprojErrorThreshold)
564  {
565  inliers[oi] = i;
566  err[oi++] = e;
567  }
568  }
569  inliers.resize(oi);
570  err.resize(oi);
571  return err;
572 }
573 
575  const std::vector<cv::Point3f> & objectPoints,
576  const std::vector<cv::Point2f> & imagePoints,
577  const cv::Mat & cameraMatrix,
578  const cv::Mat & distCoeffs,
579  cv::Mat & rvec,
580  cv::Mat & tvec,
581  bool useExtrinsicGuess,
582  int iterationsCount,
583  float reprojectionError,
584  int minInliersCount,
585  std::vector<int> & inliers,
586  int flags,
587  int refineIterations,
588  float refineSigma)
589 {
590  if(minInliersCount < 4)
591  {
592  minInliersCount = 4;
593  }
594 
595  // Use OpenCV3 version of solvePnPRansac in OpenCV2.
596  // FIXME: we should use this version of solvePnPRansac in newer 3.3.1 too, which seems a lot less stable!?!? Why!?
598  objectPoints,
599  imagePoints,
600  cameraMatrix,
601  distCoeffs,
602  rvec,
603  tvec,
604  useExtrinsicGuess,
605  iterationsCount,
606  reprojectionError,
607  0.99, // confidence
608  inliers,
609  flags);
610 
611  float inlierThreshold = reprojectionError;
612  if((int)inliers.size() >= minInliersCount && refineIterations>0)
613  {
614  float error_threshold = inlierThreshold;
615  int refine_iterations = 0;
616  bool inlier_changed = false, oscillating = false;
617  std::vector<int> new_inliers, prev_inliers = inliers;
618  std::vector<size_t> inliers_sizes;
619  //Eigen::VectorXf new_model_coefficients = model_coefficients;
620  cv::Mat new_model_rvec = rvec;
621  cv::Mat new_model_tvec = tvec;
622 
623  do
624  {
625  // Get inliers from the current model
626  std::vector<cv::Point3f> opoints_inliers(prev_inliers.size());
627  std::vector<cv::Point2f> ipoints_inliers(prev_inliers.size());
628  for(unsigned int i=0; i<prev_inliers.size(); ++i)
629  {
630  opoints_inliers[i] = objectPoints[prev_inliers[i]];
631  ipoints_inliers[i] = imagePoints[prev_inliers[i]];
632  }
633 
634  UDEBUG("inliers=%d refine_iterations=%d, rvec=%f,%f,%f tvec=%f,%f,%f", (int)prev_inliers.size(), refine_iterations,
635  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
636  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
637 
638  // Optimize the model coefficients
639  cv::solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, true, flags);
640  inliers_sizes.push_back(prev_inliers.size());
641 
642  UDEBUG("rvec=%f,%f,%f tvec=%f,%f,%f",
643  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
644  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
645 
646  // Select the new inliers based on the optimized coefficients and new threshold
647  std::vector<float> err = computeReprojErrors(objectPoints, imagePoints, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, error_threshold, new_inliers);
648  UDEBUG("RANSAC refineModel: Number of inliers found (before/after): %d/%d, with an error threshold of %f.",
649  (int)prev_inliers.size (), (int)new_inliers.size (), error_threshold);
650 
651  if ((int)new_inliers.size() < minInliersCount)
652  {
653  ++refine_iterations;
654  if (refine_iterations >= refineIterations)
655  {
656  break;
657  }
658  continue;
659  }
660 
661  // Estimate the variance and the new threshold
662  float m = uMean(err.data(), err.size());
663  float variance = uVariance(err.data(), err.size());
664  error_threshold = std::min(inlierThreshold, refineSigma * float(sqrt(variance)));
665 
666  UDEBUG ("RANSAC refineModel: New estimated error threshold: %f (variance=%f mean=%f) on iteration %d out of %d.",
667  error_threshold, variance, m, refine_iterations, refineIterations);
668  inlier_changed = false;
669  std::swap (prev_inliers, new_inliers);
670 
671  // If the number of inliers changed, then we are still optimizing
672  if (new_inliers.size () != prev_inliers.size ())
673  {
674  // Check if the number of inliers is oscillating in between two values
675  if ((int)inliers_sizes.size () >= minInliersCount)
676  {
677  if (inliers_sizes[inliers_sizes.size () - 1] == inliers_sizes[inliers_sizes.size () - 3] &&
678  inliers_sizes[inliers_sizes.size () - 2] == inliers_sizes[inliers_sizes.size () - 4])
679  {
680  oscillating = true;
681  break;
682  }
683  }
684  inlier_changed = true;
685  continue;
686  }
687 
688  // Check the values of the inlier set
689  for (size_t i = 0; i < prev_inliers.size (); ++i)
690  {
691  // If the value of the inliers changed, then we are still optimizing
692  if (prev_inliers[i] != new_inliers[i])
693  {
694  inlier_changed = true;
695  break;
696  }
697  }
698  }
699  while (inlier_changed && ++refine_iterations < refineIterations);
700 
701  // If the new set of inliers is empty, we didn't do a good job refining
702  if ((int)prev_inliers.size() < minInliersCount)
703  {
704  UWARN ("RANSAC refineModel: Refinement failed: got very low inliers (%d)!", (int)prev_inliers.size());
705  }
706 
707  if (oscillating)
708  {
709  UDEBUG("RANSAC refineModel: Detected oscillations in the model refinement.");
710  }
711 
712  std::swap (inliers, new_inliers);
713  rvec = new_model_rvec;
714  tvec = new_model_tvec;
715  }
716 
717 }
718 
719 }
720 
721 }
T uVariance(const T *v, unsigned int size, T meanV)
Definition: UMath.h:491
bool RTABMAP_EXP isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3194
const cv::Size & imageSize() const
Definition: CameraModel.h:119
float r11() const
Definition: Transform.h:62
T uMean(const T *v, unsigned int size)
Definition: UMath.h:401
float r33() const
Definition: Transform.h:70
GLM_FUNC_DECL genType min(genType const &x, genType const &y)
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
float r23() const
Definition: Transform.h:67
std::vector< K > uKeys(const std::multimap< K, V > &mm)
Definition: UStl.h:67
x
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
float r13() const
Definition: Transform.h:64
GLM_FUNC_DECL genType e()
void swap(linb::any &lhs, linb::any &rhs) noexcept
float r32() const
Definition: Transform.h:69
double fx() const
Definition: CameraModel.h:102
Basic mathematics functions.
Transform RTABMAP_EXP transformFromXYZCorrespondences(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud1, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud2, double inlierThreshold=0.02, int iterations=100, int refineModelIterations=10, double refineModelSigma=3.0, std::vector< int > *inliers=0, cv::Mat *variance=0)
float r22() const
Definition: Transform.h:66
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, OutputArray _inliers, int flags)
Definition: solvepnp.cpp:112
bool uIsFinite(const T &value)
Definition: UMath.h:55
#define UASSERT(condition)
GLM_FUNC_DECL genType cos(genType const &angle)
Wrappers of STL for convenient functions.
GLM_FUNC_DECL genType normalize(genType const &x)
#define UASSERT_MSG(condition, msg_str)
Definition: ULogger.h:67
Transform RTABMAP_EXP estimateMotion3DTo2D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::KeyPoint > &words2B, const CameraModel &cameraModel, int minInliers=10, int iterations=100, double reprojError=5., int flagsPnP=0, int pnpRefineIterations=1, float maxVariance=0, const Transform &guess=Transform::getIdentity(), const std::map< int, cv::Point3f > &words3B=std::map< int, cv::Point3f >(), cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0)
cv::Mat K() const
Definition: CameraModel.h:110
GLM_FUNC_DECL genType atan(genType const &y, genType const &x)
std::string prettyPrint() const
Definition: Transform.cpp:316
Eigen::Matrix4d toEigen4d() const
Definition: Transform.cpp:371
const Transform & localTransform() const
Definition: CameraModel.h:116
Transform RTABMAP_EXP estimateMotion3DTo3D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::Point3f > &words3B, int minInliers=10, double inliersDistance=0.1, int iterations=100, int refineIterations=5, cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0)
bool isNull() const
Definition: Transform.cpp:107
void RTABMAP_EXP solvePnPRansac(const std::vector< cv::Point3f > &objectPoints, const std::vector< cv::Point2f > &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, std::vector< int > &inliers, int flags, int refineIterations=1, float refineSigma=3.0f)
double cx() const
Definition: CameraModel.h:104
static Transform fromEigen3d(const Eigen::Affine3d &matrix)
Definition: Transform.cpp:425
matches
float r21() const
Definition: Transform.h:65
double cy() const
Definition: CameraModel.h:105
float r12() const
Definition: Transform.h:63
#define UDEBUG(...)
Eigen::Vector3f RTABMAP_EXP projectDepthTo3DRay(const cv::Size &imageSize, float x, float y, float cx, float cy, float fx, float fy)
Definition: util3d.cpp:245
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:560
std::vector< float > computeReprojErrors(std::vector< cv::Point3f > opoints, std::vector< cv::Point2f > ipoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, const cv::Mat &rvec, const cv::Mat &tvec, float reprojErrorThreshold, std::vector< int > &inliers)
#define UERROR(...)
ULogger class and convenient macros.
#define UWARN(...)
double fy() const
Definition: CameraModel.h:103
float r31() const
Definition: Transform.h:68
std::string UTILITE_EXP uFormat(const char *fmt,...)
cv::Mat D() const
Definition: CameraModel.h:111
bool isValidForProjection() const
Definition: CameraModel.h:87
void RTABMAP_EXP findCorrespondences(const std::multimap< int, cv::KeyPoint > &wordsA, const std::multimap< int, cv::KeyPoint > &wordsB, std::list< std::pair< cv::Point2f, cv::Point2f > > &pairs)
Transform inverse() const
Definition: Transform.cpp:178
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)


rtabmap
Author(s): Mathieu Labbe
autogenerated on Mon Jan 23 2023 03:38:58