util3d_motion_estimation.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include "rtabmap/utilite/UStl.h"
31 #include "rtabmap/utilite/UMath.h"
33 #include "rtabmap/utilite/UTimer.h"
37 #include "rtabmap/core/util3d.h"
38 
39 #include <pcl/common/common.h>
40 
41 #include "opencv/solvepnp.h"
42 
43 #ifdef RTABMAP_OPENGV
44 #include <opengv/absolute_pose/methods.hpp>
45 #include <opengv/absolute_pose/NoncentralAbsoluteAdapter.hpp>
46 #include <opengv/absolute_pose/NoncentralAbsoluteMultiAdapter.hpp>
47 #include <opengv/sac/Ransac.hpp>
48 #include <opengv/sac/MultiRansac.hpp>
49 #include <opengv/sac_problems/absolute_pose/AbsolutePoseSacProblem.hpp>
50 #include <opengv/sac_problems/absolute_pose/MultiNoncentralAbsolutePoseSacProblem.hpp>
51 #endif
52 
53 namespace rtabmap
54 {
55 
56 namespace util3d
57 {
58 
60  const std::map<int, cv::Point3f> & words3A,
61  const std::map<int, cv::KeyPoint> & words2B,
62  const CameraModel & cameraModel,
63  int minInliers,
64  int iterations,
65  double reprojError,
66  int flagsPnP,
67  int refineIterations,
68  int varianceMedianRatio,
69  float maxVariance,
70  const Transform & guess,
71  const std::map<int, cv::Point3f> & words3B,
72  cv::Mat * covariance,
73  std::vector<int> * matchesOut,
74  std::vector<int> * inliersOut,
75  bool splitLinearCovarianceComponents)
76 {
77  UASSERT(cameraModel.isValidForProjection());
78  UASSERT(!guess.isNull());
79  UASSERT(varianceMedianRatio>1);
81  std::vector<int> matches, inliers;
82 
83  if(covariance)
84  {
85  *covariance = cv::Mat::eye(6,6,CV_64FC1);
86  }
87 
88  // find correspondences
89  std::vector<int> ids = uKeys(words2B);
90  std::vector<cv::Point3f> objectPoints(ids.size());
91  std::vector<cv::Point2f> imagePoints(ids.size());
92  int oi=0;
93  matches.resize(ids.size());
94  for(unsigned int i=0; i<ids.size(); ++i)
95  {
96  std::map<int, cv::Point3f>::const_iterator iter=words3A.find(ids[i]);
97  if(iter != words3A.end() && util3d::isFinite(iter->second))
98  {
99  const cv::Point3f & pt = iter->second;
100  objectPoints[oi].x = pt.x;
101  objectPoints[oi].y = pt.y;
102  objectPoints[oi].z = pt.z;
103  imagePoints[oi] = words2B.find(ids[i])->second.pt;
104  matches[oi++] = ids[i];
105  }
106  }
107 
108  objectPoints.resize(oi);
109  imagePoints.resize(oi);
110  matches.resize(oi);
111 
112  UDEBUG("words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d",
113  (int)words3A.size(), (int)words2B.size(), (int)matches.size(), (int)words3B.size(),
114  guess.prettyPrint().c_str(), reprojError, iterations);
115 
116  if((int)matches.size() >= minInliers)
117  {
118  //PnPRansac
119  cv::Mat K = cameraModel.K();
120  cv::Mat D = cameraModel.D();
121  Transform guessCameraFrame = (guess * cameraModel.localTransform()).inverse();
122  cv::Mat R = (cv::Mat_<double>(3,3) <<
123  (double)guessCameraFrame.r11(), (double)guessCameraFrame.r12(), (double)guessCameraFrame.r13(),
124  (double)guessCameraFrame.r21(), (double)guessCameraFrame.r22(), (double)guessCameraFrame.r23(),
125  (double)guessCameraFrame.r31(), (double)guessCameraFrame.r32(), (double)guessCameraFrame.r33());
126 
127  cv::Mat rvec(3,1, CV_64FC1);
128  cv::Rodrigues(R, rvec);
129  cv::Mat tvec = (cv::Mat_<double>(3,1) <<
130  (double)guessCameraFrame.x(), (double)guessCameraFrame.y(), (double)guessCameraFrame.z());
131 
133  objectPoints,
134  imagePoints,
135  K,
136  D,
137  rvec,
138  tvec,
139  true,
140  iterations,
141  reprojError,
142  minInliers, // min inliers
143  inliers,
144  flagsPnP,
145  refineIterations);
146 
147  if((int)inliers.size() >= minInliers)
148  {
149  cv::Rodrigues(rvec, R);
150  Transform pnp(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), tvec.at<double>(0),
151  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), tvec.at<double>(1),
152  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), tvec.at<double>(2));
153 
154  transform = (cameraModel.localTransform() * pnp).inverse();
155 
156  // compute variance (like in PCL computeVariance() method of sac_model.h)
157  if(covariance && (!words3B.empty() || cameraModel.imageSize() != cv::Size()))
158  {
159  std::vector<float> errorSqrdDists(inliers.size());
160  std::vector<float> errorSqrdX;
161  std::vector<float> errorSqrdY;
162  std::vector<float> errorSqrdZ;
163  if(splitLinearCovarianceComponents)
164  {
165  errorSqrdX.resize(inliers.size());
166  errorSqrdY.resize(inliers.size());
167  errorSqrdZ.resize(inliers.size());
168  }
169  std::vector<float> errorSqrdAngles(inliers.size());
170  Transform localTransformInv = cameraModel.localTransform().inverse();
171  Transform transformCameraFrame = transform * cameraModel.localTransform();
172  Transform transformCameraFrameInv = transformCameraFrame.inverse();
173  for(unsigned int i=0; i<inliers.size(); ++i)
174  {
175  cv::Point3f objPt = objectPoints[inliers[i]];
176 
177  // Get 3D point from cameraB base frame in cameraA base frame
178  std::map<int, cv::Point3f>::const_iterator iter = words3B.find(matches[inliers[i]]);
179  cv::Point3f newPt;
180  if(iter!=words3B.end() && util3d::isFinite(iter->second))
181  {
182  newPt = util3d::transformPoint(iter->second, transform);
183  }
184  else
185  {
186 
187  // Project obj point from base frame of cameraA in cameraB frame (z+ in front of the cameraB)
188  cv::Point3f objPtCamBFrame = util3d::transformPoint(objPt, transformCameraFrameInv);
189 
190  //compute from projection
191  Eigen::Vector3f ray = projectDepthTo3DRay(
192  cameraModel.imageSize(),
193  imagePoints.at(inliers[i]).x,
194  imagePoints.at(inliers[i]).y,
195  cameraModel.cx(),
196  cameraModel.cy(),
197  cameraModel.fx(),
198  cameraModel.fy());
199  // transform in camera B frame
200  newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1; // Add 10 % error
201 
202  //transform back into cameraA base frame
203  newPt = util3d::transformPoint(newPt, transformCameraFrame);
204  }
205 
206  if(splitLinearCovarianceComponents)
207  {
208  double errorX = objPt.x-newPt.x;
209  double errorY = objPt.y-newPt.y;
210  double errorZ = objPt.z-newPt.z;
211  errorSqrdX[i] = errorX * errorX;
212  errorSqrdY[i] = errorY * errorY;
213  errorSqrdZ[i] = errorZ * errorZ;
214  }
215 
216  errorSqrdDists[i] = uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
217 
218  Eigen::Vector4f v1(objPt.x, objPt.y, objPt.z, 0);
219  Eigen::Vector4f v2(newPt.x, newPt.y, newPt.z, 0);
220  errorSqrdAngles[i] = pcl::getAngle3D(v1, v2);
221  }
222 
223  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
224  //divide by 4 instead of 2 to ignore very very far features (stereo)
225  double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
226  UASSERT(uIsFinite(median_error_sqr_lin));
227  (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
228  std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
229  double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
230  UASSERT(uIsFinite(median_error_sqr_ang));
231  (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
232 
233  if(splitLinearCovarianceComponents)
234  {
235  std::sort(errorSqrdX.begin(), errorSqrdX.end());
236  double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
237  std::sort(errorSqrdY.begin(), errorSqrdY.end());
238  double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
239  std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
240  double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
241 
242  UASSERT(uIsFinite(median_error_sqr_x));
243  UASSERT(uIsFinite(median_error_sqr_y));
244  UASSERT(uIsFinite(median_error_sqr_z));
245  covariance->at<double>(0,0) = median_error_sqr_x;
246  covariance->at<double>(1,1) = median_error_sqr_y;
247  covariance->at<double>(2,2) = median_error_sqr_z;
248 
249  median_error_sqr_lin = uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
250  }
251 
252  if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
253  {
254  UWARN("Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
255  *covariance = cv::Mat::eye(6,6,CV_64FC1);
256  transform.setNull();
257  }
258  }
259  else if(covariance)
260  {
261  // compute variance, which is the rms of reprojection errors
262  std::vector<cv::Point2f> imagePointsReproj;
263  cv::projectPoints(objectPoints, rvec, tvec, K, cv::Mat(), imagePointsReproj);
264  float err = 0.0f;
265  for(unsigned int i=0; i<inliers.size(); ++i)
266  {
267  err += uNormSquared(imagePoints.at(inliers[i]).x - imagePointsReproj.at(inliers[i]).x, imagePoints.at(inliers[i]).y - imagePointsReproj.at(inliers[i]).y);
268  }
269  UASSERT(uIsFinite(err));
270  *covariance *= std::sqrt(err/float(inliers.size()));
271  }
272  }
273  }
274 
275  if(matchesOut)
276  {
277  *matchesOut = matches;
278  }
279  if(inliersOut)
280  {
281  inliersOut->resize(inliers.size());
282  for(unsigned int i=0; i<inliers.size(); ++i)
283  {
284  inliersOut->at(i) = matches[inliers[i]];
285  }
286  }
287 
288  return transform;
289 }
290 
292  const std::map<int, cv::Point3f> & words3A,
293  const std::map<int, cv::KeyPoint> & words2B,
294  const std::vector<CameraModel> & cameraModels,
295  unsigned int samplingPolicy,
296  int minInliers,
297  int iterations,
298  double reprojError,
299  int flagsPnP,
300  int refineIterations,
301  int varianceMedianRatio,
302  float maxVariance,
303  const Transform & guess,
304  const std::map<int, cv::Point3f> & words3B,
305  cv::Mat * covariance,
306  std::vector<int> * matchesOut,
307  std::vector<int> * inliersOut,
308  bool splitLinearCovarianceComponents)
309 {
311 #ifndef RTABMAP_OPENGV
312  UERROR("This function is only available if rtabmap is built with OpenGV dependency.");
313 #else
314  UASSERT(!cameraModels.empty() && cameraModels[0].imageWidth() > 0);
315  int subImageWidth = cameraModels[0].imageWidth();
316  for(size_t i=0; i<cameraModels.size(); ++i)
317  {
318  UASSERT(cameraModels[i].isValidForProjection());
319  UASSERT(subImageWidth == cameraModels[i].imageWidth());
320  }
321 
322  UASSERT(!guess.isNull());
323  UASSERT(varianceMedianRatio > 1);
324 
325  std::vector<int> matches, inliers;
326 
327  if(covariance)
328  {
329  *covariance = cv::Mat::eye(6,6,CV_64FC1);
330  }
331 
332  // find correspondences
333  std::vector<int> ids = uKeys(words2B);
334  std::vector<cv::Point3f> objectPoints(ids.size());
335  std::vector<cv::Point2f> imagePoints(ids.size());
336  int oi=0;
337  matches.resize(ids.size());
338  std::vector<int> cameraIndexes(ids.size());
339  for(unsigned int i=0; i<ids.size(); ++i)
340  {
341  std::map<int, cv::Point3f>::const_iterator iter=words3A.find(ids[i]);
342  if(iter != words3A.end() && util3d::isFinite(iter->second))
343  {
344  const cv::Point2f & kpt = words2B.find(ids[i])->second.pt;
345  int cameraIndex = int(kpt.x / subImageWidth);
346  UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (int)cameraModels.size(),
347  uFormat("cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
348  cameraIndex, (int)cameraModels.size(), kpt.x, subImageWidth, cameraModels[cameraIndex].imageWidth()).c_str());
349 
350  const cv::Point3f & pt = iter->second;
351  objectPoints[oi] = pt;
352  imagePoints[oi] = kpt;
353  // convert in image space
354  imagePoints[oi].x = imagePoints[oi].x - (cameraIndex*subImageWidth);
355  cameraIndexes[oi] = cameraIndex;
356  matches[oi++] = ids[i];
357  }
358  }
359 
360  objectPoints.resize(oi);
361  imagePoints.resize(oi);
362  cameraIndexes.resize(oi);
363  matches.resize(oi);
364 
365  UDEBUG("words3A=%d words2B=%d matches=%d words3B=%d guess=%s reprojError=%f iterations=%d samplingPolicy=%ld",
366  (int)words3A.size(), (int)words2B.size(), (int)matches.size(), (int)words3B.size(),
367  guess.prettyPrint().c_str(), reprojError, iterations, samplingPolicy);
368 
369  if((int)matches.size() >= minInliers)
370  {
371  if(samplingPolicy == 0 || samplingPolicy == 2)
372  {
373  std::vector<int> cc;
374  cc.resize(cameraModels.size());
375  std::fill(cc.begin(), cc.end(),0);
376  for(size_t i=0; i<cameraIndexes.size(); ++i)
377  {
378  cc[cameraIndexes[i]] = cc[cameraIndexes[i]] + 1;
379  }
380 
381  for (size_t i=0; i<cameraModels.size(); ++i)
382  {
383  UDEBUG("Matches in Camera %d: %d", i, cc[i]);
384  // opengv multi ransac needs at least 2 matches/camera
385  if (cc[i] < 2)
386  {
387  if(samplingPolicy==2) {
388  UERROR("Not enough matches in camera %ld to do "
389  "homogenoeus random sampling, returning null "
390  "transform. Consider using AUTO sampling "
391  "policy to fallback to ANY policy.", i);
392  return Transform();
393  }
394  else { // samplingPolicy==0
395  samplingPolicy = 1;
396  UWARN("Not enough matches in camera %ld to do "
397  "homogenoeus random sampling, falling back to ANY policy.", i);
398  break;
399  }
400  }
401  }
402  }
403 
404  if(samplingPolicy == 0)
405  {
406  samplingPolicy = 2;
407  }
408 
409  // convert cameras
410  opengv::translations_t camOffsets;
411  opengv::rotations_t camRotations;
412  for(size_t i=0; i<cameraModels.size(); ++i)
413  {
414  camOffsets.push_back(opengv::translation_t(
415  cameraModels[i].localTransform().x(),
416  cameraModels[i].localTransform().y(),
417  cameraModels[i].localTransform().z()));
418  camRotations.push_back(cameraModels[i].localTransform().toEigen4d().block<3,3>(0, 0));
419  }
420 
421  Transform pnp;
422  if(samplingPolicy == 2) // Homogenoeus random sampling
423  {
424  // convert 3d points
425  std::vector<std::shared_ptr<opengv::points_t>> multiPoints;
426  multiPoints.resize(cameraModels.size());
427  // convert 2d-3d correspondences into bearing vectors
428  std::vector<std::shared_ptr<opengv::bearingVectors_t>> multiBearingVectors;
429  multiBearingVectors.resize(cameraModels.size());
430  for(size_t i=0; i<cameraModels.size();++i)
431  {
432  multiPoints[i] = std::make_shared<opengv::points_t>();
433  multiBearingVectors[i] = std::make_shared<opengv::bearingVectors_t>();
434  }
435 
436  for(size_t i=0; i<objectPoints.size(); ++i)
437  {
438  int cameraIndex = cameraIndexes[i];
439  multiPoints[cameraIndex]->push_back(opengv::point_t(objectPoints[i].x,objectPoints[i].y,objectPoints[i].z));
440  cv::Vec3f pt;
441  cameraModels[cameraIndex].project(imagePoints[i].x, imagePoints[i].y, 1, pt[0], pt[1], pt[2]);
442  pt = cv::normalize(pt);
443  multiBearingVectors[cameraIndex]->push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
444  }
445 
446  //create a non-central absolute multi adapter
447  opengv::absolute_pose::NoncentralAbsoluteMultiAdapter adapter(
448  multiBearingVectors,
449  multiPoints,
450  camOffsets,
451  camRotations );
452 
453  adapter.setR(guess.toEigen4d().block<3,3>(0, 0));
454  adapter.sett(opengv::translation_t(guess.x(), guess.y(), guess.z()));
455 
456  //Create a MultiNoncentralAbsolutePoseSacProblem and MultiRansac
457  //The method is set to GP3P
458  opengv::sac::MultiRansac<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> ransac;
459  std::shared_ptr<opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem> absposeproblem_ptr(
460  new opengv::sac_problems::absolute_pose::MultiNoncentralAbsolutePoseSacProblem(adapter));
461 
462  ransac.sac_model_ = absposeproblem_ptr;
463  ransac.threshold_ = 1.0 - cos(atan(reprojError/cameraModels[0].fx()));
464  ransac.max_iterations_ = iterations;
465  UDEBUG("Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
466 
467  //Run the experiment
468  ransac.computeModel();
469 
470  pnp = Transform::fromEigen3d(ransac.model_coefficients_);
471 
472  UDEBUG("Ransac result: %s", pnp.prettyPrint().c_str());
473  UDEBUG("Ransac iterations done: %d", ransac.iterations_);
474  for (size_t i=0; i < cameraModels.size(); ++i)
475  {
476  inliers.insert(inliers.end(), ransac.inliers_[i].begin(), ransac.inliers_[i].end());
477  }
478  }
479  else
480  {
481  // convert 3d points
482  opengv::points_t points;
483 
484  // convert 2d-3d correspondences into bearing vectors
485  opengv::bearingVectors_t bearingVectors;
486  opengv::absolute_pose::NoncentralAbsoluteAdapter::camCorrespondences_t camCorrespondences;
487 
488  for(size_t i=0; i<objectPoints.size(); ++i)
489  {
490  int cameraIndex = cameraIndexes[i];
491  points.push_back(opengv::point_t(objectPoints[i].x,objectPoints[i].y,objectPoints[i].z));
492  cv::Vec3f pt;
493  cameraModels[cameraIndex].project(imagePoints[i].x, imagePoints[i].y, 1, pt[0], pt[1], pt[2]);
494  pt = cv::normalize(pt);
495  bearingVectors.push_back(opengv::bearingVector_t(pt[0], pt[1], pt[2]));
496  camCorrespondences.push_back(cameraIndex);
497  }
498 
499  //create a non-central absolute adapter
500  opengv::absolute_pose::NoncentralAbsoluteAdapter adapter(
501  bearingVectors,
502  camCorrespondences,
503  points,
504  camOffsets,
505  camRotations );
506 
507  adapter.setR(guess.toEigen4d().block<3,3>(0, 0));
508  adapter.sett(opengv::translation_t(guess.x(), guess.y(), guess.z()));
509 
510  //Create a AbsolutePoseSacProblem and Ransac
511  //The method is set to GP3P
512  opengv::sac::Ransac<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> ransac;
513  std::shared_ptr<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem> absposeproblem_ptr(
514  new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem(adapter, opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem::GP3P));
515 
516  ransac.sac_model_ = absposeproblem_ptr;
517  ransac.threshold_ = 1.0 - cos(atan(reprojError/cameraModels[0].fx()));
518  ransac.max_iterations_ = iterations;
519  UDEBUG("Ransac params: threshold = %f (reprojError=%f fx=%f), max iterations=%d", ransac.threshold_, reprojError, cameraModels[0].fx(), ransac.max_iterations_);
520 
521  //Run the experiment
522  ransac.computeModel();
523 
524  pnp = Transform::fromEigen3d(ransac.model_coefficients_);
525 
526  UDEBUG("Ransac result: %s", pnp.prettyPrint().c_str());
527  UDEBUG("Ransac iterations done: %d", ransac.iterations_);
528  inliers = ransac.inliers_;
529  }
530 
531  UDEBUG("Ransac inliers: %ld", inliers.size());
532 
533  if((int)inliers.size() >= minInliers && !pnp.isNull())
534  {
535  transform = pnp;
536 
537  // compute variance (like in PCL computeVariance() method of sac_model.h)
538  if(covariance)
539  {
540  std::vector<float> errorSqrdX;
541  std::vector<float> errorSqrdY;
542  std::vector<float> errorSqrdZ;
543  if(splitLinearCovarianceComponents)
544  {
545  errorSqrdX.resize(inliers.size());
546  errorSqrdY.resize(inliers.size());
547  errorSqrdZ.resize(inliers.size());
548  }
549  std::vector<float> errorSqrdDists(inliers.size());
550  std::vector<float> errorSqrdAngles(inliers.size());
551 
552  std::vector<Transform> transformsCameraFrame(cameraModels.size());
553  std::vector<Transform> transformsCameraFrameInv(cameraModels.size());
554  for(size_t i=0; i<cameraModels.size(); ++i)
555  {
556  transformsCameraFrame[i] = transform * cameraModels[i].localTransform();
557  transformsCameraFrameInv[i] = transformsCameraFrame[i].inverse();
558  }
559 
560  for(unsigned int i=0; i<inliers.size(); ++i)
561  {
562  cv::Point3f objPt = objectPoints[inliers[i]];
563 
564  // Get 3D point from cameraB base frame in cameraA base frame
565  std::map<int, cv::Point3f>::const_iterator iter = words3B.find(matches[inliers[i]]);
566  cv::Point3f newPt;
567  if(iter!=words3B.end() && util3d::isFinite(iter->second))
568  {
569  newPt = util3d::transformPoint(iter->second, transform);
570  }
571  else
572  {
573  int cameraIndex = cameraIndexes[inliers[i]];
574 
575  // Project obj point from base frame of cameraA in cameraB frame (z+ in front of the cameraB)
576  cv::Point3f objPtCamBFrame = util3d::transformPoint(objPt, transformsCameraFrameInv[cameraIndex]);
577 
578  //compute from projection
579  Eigen::Vector3f ray = projectDepthTo3DRay(
580  cameraModels[cameraIndex].imageSize(),
581  imagePoints.at(inliers[i]).x,
582  imagePoints.at(inliers[i]).y,
583  cameraModels[cameraIndex].cx(),
584  cameraModels[cameraIndex].cy(),
585  cameraModels[cameraIndex].fx(),
586  cameraModels[cameraIndex].fy());
587  // transform in camera B frame
588  newPt = cv::Point3f(ray.x(), ray.y(), ray.z()) * objPtCamBFrame.z*1.1; // Add 10 % error
589 
590  //transfor back into cameraA base frame
591  newPt = util3d::transformPoint(newPt, transformsCameraFrame[cameraIndex]);
592  }
593 
594  if(splitLinearCovarianceComponents)
595  {
596  double errorX = objPt.x-newPt.x;
597  double errorY = objPt.y-newPt.y;
598  double errorZ = objPt.z-newPt.z;
599  errorSqrdX[i] = errorX * errorX;
600  errorSqrdY[i] = errorY * errorY;
601  errorSqrdZ[i] = errorZ * errorZ;
602  }
603 
604  errorSqrdDists[i] = uNormSquared(objPt.x-newPt.x, objPt.y-newPt.y, objPt.z-newPt.z);
605 
606  Eigen::Vector4f v1(objPt.x, objPt.y, objPt.z, 0);
607  Eigen::Vector4f v2(newPt.x, newPt.y, newPt.z, 0);
608  errorSqrdAngles[i] = pcl::getAngle3D(v1, v2);
609  }
610 
611  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
612  //divide by 4 instead of 2 to ignore very very far features (stereo)
613  double median_error_sqr_lin = 2.1981 * (double)errorSqrdDists[errorSqrdDists.size () / varianceMedianRatio];
614  UASSERT(uIsFinite(median_error_sqr_lin));
615  (*covariance)(cv::Range(0,3), cv::Range(0,3)) *= median_error_sqr_lin;
616  std::sort(errorSqrdAngles.begin(), errorSqrdAngles.end());
617  double median_error_sqr_ang = 2.1981 * (double)errorSqrdAngles[errorSqrdAngles.size () / varianceMedianRatio];
618  UASSERT(uIsFinite(median_error_sqr_ang));
619  (*covariance)(cv::Range(3,6), cv::Range(3,6)) *= median_error_sqr_ang;
620 
621  if(splitLinearCovarianceComponents)
622  {
623  std::sort(errorSqrdX.begin(), errorSqrdX.end());
624  double median_error_sqr_x = 2.1981 * (double)errorSqrdX[errorSqrdX.size () / varianceMedianRatio];
625  std::sort(errorSqrdY.begin(), errorSqrdY.end());
626  double median_error_sqr_y = 2.1981 * (double)errorSqrdY[errorSqrdY.size () / varianceMedianRatio];
627  std::sort(errorSqrdZ.begin(), errorSqrdZ.end());
628  double median_error_sqr_z = 2.1981 * (double)errorSqrdZ[errorSqrdZ.size () / varianceMedianRatio];
629 
630  UASSERT(uIsFinite(median_error_sqr_x));
631  UASSERT(uIsFinite(median_error_sqr_y));
632  UASSERT(uIsFinite(median_error_sqr_z));
633  covariance->at<double>(0,0) = median_error_sqr_x;
634  covariance->at<double>(1,1) = median_error_sqr_y;
635  covariance->at<double>(2,2) = median_error_sqr_z;
636 
637  median_error_sqr_lin = uMax3(median_error_sqr_x, median_error_sqr_y, median_error_sqr_z);
638  }
639 
640  if(maxVariance > 0 && median_error_sqr_lin > maxVariance)
641  {
642  UWARN("Rejected PnP transform, variance is too high! %f > %f!", median_error_sqr_lin, maxVariance);
643  *covariance = cv::Mat::eye(6,6,CV_64FC1);
644  transform.setNull();
645  }
646  }
647  }
648  }
649 
650  if(matchesOut)
651  {
652  *matchesOut = matches;
653  }
654  if(inliersOut)
655  {
656  inliersOut->resize(inliers.size());
657  for(unsigned int i=0; i<inliers.size(); ++i)
658  {
659  inliersOut->at(i) = matches[inliers[i]];
660  }
661  }
662 #endif
663  return transform;
664 }
665 
667  const std::map<int, cv::Point3f> & words3A,
668  const std::map<int, cv::Point3f> & words3B,
669  int minInliers,
670  double inliersDistance,
671  int iterations,
672  int refineIterations,
673  cv::Mat * covariance,
674  std::vector<int> * matchesOut,
675  std::vector<int> * inliersOut)
676 {
678  std::vector<cv::Point3f> inliers1; // previous
679  std::vector<cv::Point3f> inliers2; // new
680 
681  std::vector<int> matches;
683  words3A,
684  words3B,
685  inliers1,
686  inliers2,
687  0,
688  &matches);
689  UASSERT(inliers1.size() == inliers2.size());
690  UDEBUG("Unique correspondences = %d", (int)inliers1.size());
691 
692  if(covariance)
693  {
694  *covariance = cv::Mat::eye(6,6,CV_64FC1);
695  }
696 
697  std::vector<int> inliers;
698  if((int)inliers1.size() >= minInliers)
699  {
700  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers1cloud(new pcl::PointCloud<pcl::PointXYZ>);
701  pcl::PointCloud<pcl::PointXYZ>::Ptr inliers2cloud(new pcl::PointCloud<pcl::PointXYZ>);
702  inliers1cloud->resize(inliers1.size());
703  inliers2cloud->resize(inliers1.size());
704  for(unsigned int i=0; i<inliers1.size(); ++i)
705  {
706  (*inliers1cloud)[i].x = inliers1[i].x;
707  (*inliers1cloud)[i].y = inliers1[i].y;
708  (*inliers1cloud)[i].z = inliers1[i].z;
709  (*inliers2cloud)[i].x = inliers2[i].x;
710  (*inliers2cloud)[i].y = inliers2[i].y;
711  (*inliers2cloud)[i].z = inliers2[i].z;
712  }
714  inliers2cloud,
715  inliers1cloud,
716  inliersDistance,
717  iterations,
718  refineIterations,
719  3.0,
720  &inliers,
721  covariance);
722 
723  if(!t.isNull() && (int)inliers.size() >= minInliers)
724  {
725  transform = t;
726  }
727  }
728 
729  if(matchesOut)
730  {
731  *matchesOut = matches;
732  }
733  if(inliersOut)
734  {
735  inliersOut->resize(inliers.size());
736  for(unsigned int i=0; i<inliers.size(); ++i)
737  {
738  inliersOut->at(i) = matches[inliers[i]];
739  }
740  }
741 
742  return transform;
743 }
744 
745 
746 std::vector<float> computeReprojErrors(
747  std::vector<cv::Point3f> opoints,
748  std::vector<cv::Point2f> ipoints,
749  const cv::Mat & cameraMatrix,
750  const cv::Mat & distCoeffs,
751  const cv::Mat & rvec,
752  const cv::Mat & tvec,
753  float reprojErrorThreshold,
754  std::vector<int> & inliers)
755 {
756  UASSERT(opoints.size() == ipoints.size());
757  int count = (int)opoints.size();
758 
759  std::vector<cv::Point2f> projpoints;
760  projectPoints(opoints, rvec, tvec, cameraMatrix, distCoeffs, projpoints);
761 
762  inliers.resize(count,0);
763  std::vector<float> err(count);
764  int oi=0;
765  for (int i = 0; i < count; ++i)
766  {
767  float e = (float)cv::norm( ipoints[i] - projpoints[i]);
768  if(e <= reprojErrorThreshold)
769  {
770  inliers[oi] = i;
771  err[oi++] = e;
772  }
773  }
774  inliers.resize(oi);
775  err.resize(oi);
776  return err;
777 }
778 
780  const std::vector<cv::Point3f> & objectPoints,
781  const std::vector<cv::Point2f> & imagePoints,
782  const cv::Mat & cameraMatrix,
783  const cv::Mat & distCoeffs,
784  cv::Mat & rvec,
785  cv::Mat & tvec,
786  bool useExtrinsicGuess,
787  int iterationsCount,
788  float reprojectionError,
789  int minInliersCount,
790  std::vector<int> & inliers,
791  int flags,
792  int refineIterations,
793  float refineSigma)
794 {
795  if(minInliersCount < 4)
796  {
797  minInliersCount = 4;
798  }
799 
800  // Use OpenCV3 version of solvePnPRansac in OpenCV2.
801  // FIXME: we should use this version of solvePnPRansac in newer 3.3.1 too, which seems a lot less stable!?!? Why!?
803  objectPoints,
804  imagePoints,
805  cameraMatrix,
806  distCoeffs,
807  rvec,
808  tvec,
809  useExtrinsicGuess,
810  iterationsCount,
811  reprojectionError,
812  0.99, // confidence
813  inliers,
814  flags);
815 
816  float inlierThreshold = reprojectionError;
817  if((int)inliers.size() >= minInliersCount && refineIterations>0)
818  {
819  float error_threshold = inlierThreshold;
820  int refine_iterations = 0;
821  bool inlier_changed = false, oscillating = false;
822  std::vector<int> new_inliers, prev_inliers = inliers;
823  std::vector<size_t> inliers_sizes;
824  //Eigen::VectorXf new_model_coefficients = model_coefficients;
825  cv::Mat new_model_rvec = rvec;
826  cv::Mat new_model_tvec = tvec;
827 
828  do
829  {
830  // Get inliers from the current model
831  std::vector<cv::Point3f> opoints_inliers(prev_inliers.size());
832  std::vector<cv::Point2f> ipoints_inliers(prev_inliers.size());
833  for(unsigned int i=0; i<prev_inliers.size(); ++i)
834  {
835  opoints_inliers[i] = objectPoints[prev_inliers[i]];
836  ipoints_inliers[i] = imagePoints[prev_inliers[i]];
837  }
838 
839  UDEBUG("inliers=%d refine_iterations=%d, rvec=%f,%f,%f tvec=%f,%f,%f", (int)prev_inliers.size(), refine_iterations,
840  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
841  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
842 
843  // Optimize the model coefficients
844  cv::solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, true, flags);
845  inliers_sizes.push_back(prev_inliers.size());
846 
847  UDEBUG("rvec=%f,%f,%f tvec=%f,%f,%f",
848  *new_model_rvec.ptr<double>(0), *new_model_rvec.ptr<double>(1), *new_model_rvec.ptr<double>(2),
849  *new_model_tvec.ptr<double>(0), *new_model_tvec.ptr<double>(1), *new_model_tvec.ptr<double>(2));
850 
851  // Select the new inliers based on the optimized coefficients and new threshold
852  std::vector<float> err = computeReprojErrors(objectPoints, imagePoints, cameraMatrix, distCoeffs, new_model_rvec, new_model_tvec, error_threshold, new_inliers);
853  UDEBUG("RANSAC refineModel: Number of inliers found (before/after): %d/%d, with an error threshold of %f.",
854  (int)prev_inliers.size (), (int)new_inliers.size (), error_threshold);
855 
856  if ((int)new_inliers.size() < minInliersCount)
857  {
858  ++refine_iterations;
859  if (refine_iterations >= refineIterations)
860  {
861  break;
862  }
863  continue;
864  }
865 
866  // Estimate the variance and the new threshold
867  float m = uMean(err.data(), err.size());
868  float variance = uVariance(err.data(), err.size());
869  error_threshold = std::min(inlierThreshold, refineSigma * float(sqrt(variance)));
870 
871  UDEBUG ("RANSAC refineModel: New estimated error threshold: %f (variance=%f mean=%f) on iteration %d out of %d.",
872  error_threshold, variance, m, refine_iterations, refineIterations);
873  inlier_changed = false;
874  std::swap (prev_inliers, new_inliers);
875 
876  // If the number of inliers changed, then we are still optimizing
877  if (new_inliers.size () != prev_inliers.size ())
878  {
879  // Check if the number of inliers is oscillating in between two values
880  if ((int)inliers_sizes.size () >= minInliersCount)
881  {
882  if (inliers_sizes[inliers_sizes.size () - 1] == inliers_sizes[inliers_sizes.size () - 3] &&
883  inliers_sizes[inliers_sizes.size () - 2] == inliers_sizes[inliers_sizes.size () - 4])
884  {
885  oscillating = true;
886  break;
887  }
888  }
889  inlier_changed = true;
890  continue;
891  }
892 
893  // Check the values of the inlier set
894  for (size_t i = 0; i < prev_inliers.size (); ++i)
895  {
896  // If the value of the inliers changed, then we are still optimizing
897  if (prev_inliers[i] != new_inliers[i])
898  {
899  inlier_changed = true;
900  break;
901  }
902  }
903  }
904  while (inlier_changed && ++refine_iterations < refineIterations);
905 
906  // If the new set of inliers is empty, we didn't do a good job refining
907  if ((int)prev_inliers.size() < minInliersCount)
908  {
909  UWARN ("RANSAC refineModel: Refinement failed: got very low inliers (%d)!", (int)prev_inliers.size());
910  }
911 
912  if (oscillating)
913  {
914  UDEBUG("RANSAC refineModel: Detected oscillations in the model refinement.");
915  }
916 
917  std::swap (inliers, new_inliers);
918  rvec = new_model_rvec;
919  tvec = new_model_tvec;
920  }
921 
922 }
923 
924 }
925 
926 }
glm::min
GLM_FUNC_DECL genType min(genType const &x, genType const &y)
int
int
rtabmap::CameraModel::fx
double fx() const
Definition: CameraModel.h:102
rtabmap::CameraModel::cx
double cx() const
Definition: CameraModel.h:104
rtabmap::util3d::transformFromXYZCorrespondences
Transform RTABMAP_CORE_EXPORT transformFromXYZCorrespondences(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud1, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &cloud2, double inlierThreshold=0.02, int iterations=100, int refineModelIterations=10, double refineModelSigma=3.0, std::vector< int > *inliers=0, cv::Mat *variance=0)
Definition: util3d_registration.cpp:63
uMean
T uMean(const T *v, unsigned int size)
Definition: UMath.h:399
D
MatrixXcd D
v1
v1
rtabmap::Transform::r33
float r33() const
Definition: Transform.h:70
fx
const double fx
rtabmap::CameraModel::cy
double cy() const
Definition: CameraModel.h:105
rtabmap::CameraModel
Definition: CameraModel.h:38
count
Index count
util3d_motion_estimation.h
util3d_correspondences.h
solvepnp.h
y
Matrix3f y
rtabmap::CameraModel::K
cv::Mat K() const
Definition: CameraModel.h:110
uVariance
T uVariance(const T *v, unsigned int size, T meanV)
Definition: UMath.h:489
glm::normalize
GLM_FUNC_DECL genType normalize(genType const &x)
rtabmap::CameraModel::D
cv::Mat D() const
Definition: CameraModel.h:111
util3d.h
rtabmap::util3d::solvePnPRansac
void RTABMAP_CORE_EXPORT solvePnPRansac(const std::vector< cv::Point3f > &objectPoints, const std::vector< cv::Point2f > &imagePoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, cv::Mat &rvec, cv::Mat &tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, std::vector< int > &inliers, int flags, int refineIterations=1, float refineSigma=3.0f)
Definition: util3d_motion_estimation.cpp:779
rtabmap::util3d::estimateMotion3DTo3D
Transform RTABMAP_CORE_EXPORT estimateMotion3DTo3D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::Point3f > &words3B, int minInliers=10, double inliersDistance=0.1, int iterations=100, int refineIterations=5, cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0)
Definition: util3d_motion_estimation.cpp:666
UTimer.h
rtabmap::Transform::isNull
bool isNull() const
Definition: Transform.cpp:107
rtabmap::util3d::projectDepthTo3DRay
Eigen::Vector3f RTABMAP_CORE_EXPORT projectDepthTo3DRay(const cv::Size &imageSize, float x, float y, float cx, float cy, float fx, float fy)
Definition: util3d.cpp:246
UMath.h
Basic mathematics functions.
rtabmap::util3d::findCorrespondences
void RTABMAP_CORE_EXPORT findCorrespondences(const std::multimap< int, cv::KeyPoint > &wordsA, const std::multimap< int, cv::KeyPoint > &wordsB, std::list< std::pair< cv::Point2f, cv::Point2f > > &pairs)
Definition: util3d_correspondences.cpp:300
rtabmap::util3d::transformPoint
cv::Point3f RTABMAP_CORE_EXPORT transformPoint(const cv::Point3f &pt, const Transform &transform)
Definition: util3d_transforms.cpp:211
rtabmap::Transform::y
float & y()
Definition: Transform.h:93
matches
matches
rtabmap::Transform::z
float & z()
Definition: Transform.h:94
uKeys
std::vector< K > uKeys(const std::multimap< K, V > &mm)
Definition: UStl.h:67
rtabmap::Transform::prettyPrint
std::string prettyPrint() const
Definition: Transform.cpp:326
util3d_transforms.h
rtabmap::Transform::r13
float r13() const
Definition: Transform.h:64
glm::sqrt
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
uMax3
T uMax3(const T &a, const T &b, const T &c)
Definition: UMath.h:78
rtabmap::Transform::x
float & x()
Definition: Transform.h:92
rtabmap::CameraModel::isValidForProjection
bool isValidForProjection() const
Definition: CameraModel.h:87
rtabmap::util3d::isFinite
bool RTABMAP_CORE_EXPORT isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3327
std::swap
void swap(GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &a, GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &b)
rtabmap::Transform::r21
float r21() const
Definition: Transform.h:65
rtabmap::CameraModel::fy
double fy() const
Definition: CameraModel.h:103
UASSERT
#define UASSERT(condition)
z
z
x
x
m
Matrix3f m
cv3::solvePnPRansac
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, OutputArray _inliers, int flags)
Definition: solvepnp.cpp:112
rtabmap::Transform::r31
float r31() const
Definition: Transform.h:68
rtabmap::Transform::toEigen4d
Eigen::Matrix4d toEigen4d() const
Definition: Transform.cpp:381
rtabmap::Transform::fromEigen3d
static Transform fromEigen3d(const Eigen::Affine3d &matrix)
Definition: Transform.cpp:435
rtabmap::Transform::inverse
Transform inverse() const
Definition: Transform.cpp:178
UASSERT_MSG
#define UASSERT_MSG(condition, msg_str)
Definition: ULogger.h:67
UWARN
#define UWARN(...)
uFormat
std::string UTILITE_EXPORT uFormat(const char *fmt,...)
Definition: UConversion.cpp:365
K
K
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
ULogger.h
ULogger class and convenient macros.
rtabmap::Transform::r22
float r22() const
Definition: Transform.h:66
rtabmap::Transform
Definition: Transform.h:41
iter
iterator iter(handle obj)
rtabmap::util3d::estimateMotion3DTo2D
Transform RTABMAP_CORE_EXPORT estimateMotion3DTo2D(const std::map< int, cv::Point3f > &words3A, const std::map< int, cv::KeyPoint > &words2B, const CameraModel &cameraModel, int minInliers=10, int iterations=100, double reprojError=5., int flagsPnP=0, int pnpRefineIterations=1, int varianceMedianRatio=4, float maxVariance=0, const Transform &guess=Transform::getIdentity(), const std::map< int, cv::Point3f > &words3B=std::map< int, cv::Point3f >(), cv::Mat *covariance=0, std::vector< int > *matchesOut=0, std::vector< int > *inliersOut=0, bool splitLinearCovarianceComponents=false)
Definition: util3d_motion_estimation.cpp:59
util3d_registration.h
glm::cos
GLM_FUNC_DECL genType cos(genType const &angle)
UStl.h
Wrappers of STL for convenient functions.
UDEBUG
#define UDEBUG(...)
rtabmap::CameraModel::imageSize
const cv::Size & imageSize() const
Definition: CameraModel.h:119
float
float
uNormSquared
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:558
rtabmap::Transform::r23
float r23() const
Definition: Transform.h:67
transform
EIGEN_DONT_INLINE void transform(const Quaternion< Scalar > &t, Data &data)
glm::atan
GLM_FUNC_DECL genType atan(genType const &y, genType const &x)
rtabmap::Transform::r12
float r12() const
Definition: Transform.h:63
t
Point2 t(10, 10)
rtabmap::util3d::computeReprojErrors
std::vector< float > computeReprojErrors(std::vector< cv::Point3f > opoints, std::vector< cv::Point2f > ipoints, const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs, const cv::Mat &rvec, const cv::Mat &tvec, float reprojErrorThreshold, std::vector< int > &inliers)
Definition: util3d_motion_estimation.cpp:746
rtabmap
Definition: CameraARCore.cpp:35
rtabmap::CameraModel::localTransform
const Transform & localTransform() const
Definition: CameraModel.h:116
rtabmap::Transform::r32
float r32() const
Definition: Transform.h:69
UERROR
#define UERROR(...)
glm::inverse
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)
i
int i
uIsFinite
bool uIsFinite(const T &value)
Definition: UMath.h:53
v2
v2
R
R
rtabmap::Transform::r11
float r11() const
Definition: Transform.h:62


rtabmap
Author(s): Mathieu Labbe
autogenerated on Thu Jul 25 2024 02:50:23