util3d_features.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include "rtabmap/core/util2d.h"
31 #include "rtabmap/core/util3d.h"
35 
37 #include "opencv/five-point.h"
38 
41 #include <rtabmap/utilite/UMath.h>
42 #include <rtabmap/utilite/UStl.h>
43 
44 #include <pcl/common/point_tests.h>
45 
46 #include <opencv2/video/tracking.hpp>
47 
48 namespace rtabmap
49 {
50 
51 namespace util3d
52 {
53 
54 std::vector<cv::Point3f> generateKeypoints3DDepth(
55  const std::vector<cv::KeyPoint> & keypoints,
56  const cv::Mat & depth,
57  const CameraModel & cameraModel,
58  float minDepth,
59  float maxDepth)
60 {
61  UASSERT(cameraModel.isValidForProjection());
62  std::vector<CameraModel> models;
63  models.push_back(cameraModel);
64  return generateKeypoints3DDepth(keypoints, depth, models, minDepth, maxDepth);
65 }
66 
67 std::vector<cv::Point3f> generateKeypoints3DDepth(
68  const std::vector<cv::KeyPoint> & keypoints,
69  const cv::Mat & depth,
70  const std::vector<CameraModel> & cameraModels,
71  float minDepth,
72  float maxDepth)
73 {
74  UASSERT(!depth.empty() && (depth.type() == CV_32FC1 || depth.type() == CV_16UC1));
75  UASSERT(cameraModels.size());
76  std::vector<cv::Point3f> keypoints3d;
77  if(!depth.empty())
78  {
79  UASSERT(int((depth.cols/cameraModels.size())*cameraModels.size()) == depth.cols);
80  float subImageWidth = depth.cols/cameraModels.size();
81  keypoints3d.resize(keypoints.size());
82  float rgbToDepthFactorX = 1.0f/(cameraModels[0].imageWidth()>0?float(cameraModels[0].imageWidth())/subImageWidth:1.0f);
83  float rgbToDepthFactorY = 1.0f/(cameraModels[0].imageHeight()>0?float(cameraModels[0].imageHeight())/float(depth.rows):1.0f);
84  float bad_point = std::numeric_limits<float>::quiet_NaN ();
85  for(unsigned int i=0; i<keypoints.size(); ++i)
86  {
87  float x = keypoints[i].pt.x*rgbToDepthFactorX;
88  float y = keypoints[i].pt.y*rgbToDepthFactorY;
89  int cameraIndex = int(x / subImageWidth);
90  UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (int)cameraModels.size(),
91  uFormat("cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
92  cameraIndex, (int)cameraModels.size(), keypoints[i].pt.x, subImageWidth, cameraModels[0].imageWidth()).c_str());
93 
94  pcl::PointXYZ ptXYZ = util3d::projectDepthTo3D(
95  cameraModels.size()==1?depth:cv::Mat(depth, cv::Range::all(), cv::Range(subImageWidth*cameraIndex,subImageWidth*(cameraIndex+1))),
96  x-subImageWidth*cameraIndex,
97  y,
98  cameraModels.at(cameraIndex).cx()*rgbToDepthFactorX,
99  cameraModels.at(cameraIndex).cy()*rgbToDepthFactorY,
100  cameraModels.at(cameraIndex).fx()*rgbToDepthFactorX,
101  cameraModels.at(cameraIndex).fy()*rgbToDepthFactorY,
102  true);
103 
104  cv::Point3f pt(bad_point, bad_point, bad_point);
105  if(pcl::isFinite(ptXYZ) &&
106  (minDepth < 0.0f || ptXYZ.z > minDepth) &&
107  (maxDepth <= 0.0f || ptXYZ.z <= maxDepth))
108  {
109  pt = cv::Point3f(ptXYZ.x, ptXYZ.y, ptXYZ.z);
110  if(!cameraModels.at(cameraIndex).localTransform().isNull() &&
111  !cameraModels.at(cameraIndex).localTransform().isIdentity())
112  {
113  pt = util3d::transformPoint(pt, cameraModels.at(cameraIndex).localTransform());
114  }
115  }
116  keypoints3d.at(i) = pt;
117  }
118  }
119  return keypoints3d;
120 }
121 
122 std::vector<cv::Point3f> generateKeypoints3DDisparity(
123  const std::vector<cv::KeyPoint> & keypoints,
124  const cv::Mat & disparity,
125  const StereoCameraModel & stereoCameraModel,
126  float minDepth,
127  float maxDepth)
128 {
129  UASSERT(!disparity.empty() && (disparity.type() == CV_16SC1 || disparity.type() == CV_32F));
130  UASSERT(stereoCameraModel.isValidForProjection());
131  std::vector<cv::Point3f> keypoints3d;
132  keypoints3d.resize(keypoints.size());
133  float bad_point = std::numeric_limits<float>::quiet_NaN ();
134  for(unsigned int i=0; i!=keypoints.size(); ++i)
135  {
136  cv::Point3f tmpPt = util3d::projectDisparityTo3D(
137  keypoints[i].pt,
138  disparity,
139  stereoCameraModel);
140 
141  cv::Point3f pt(bad_point, bad_point, bad_point);
142  if(util3d::isFinite(tmpPt) &&
143  (minDepth < 0.0f || tmpPt.z > minDepth) &&
144  (maxDepth <= 0.0f || tmpPt.z <= maxDepth))
145  {
146  pt = tmpPt;
147  if(!stereoCameraModel.left().localTransform().isNull() &&
148  !stereoCameraModel.left().localTransform().isIdentity())
149  {
150  pt = util3d::transformPoint(pt, stereoCameraModel.left().localTransform());
151  }
152  }
153  keypoints3d.at(i) = pt;
154  }
155  return keypoints3d;
156 }
157 
158 std::vector<cv::Point3f> generateKeypoints3DStereo(
159  const std::vector<cv::Point2f> & leftCorners,
160  const std::vector<cv::Point2f> & rightCorners,
161  const StereoCameraModel & model,
162  const std::vector<unsigned char> & mask,
163  float minDepth,
164  float maxDepth)
165 {
166  UASSERT(leftCorners.size() == rightCorners.size());
167  UASSERT(mask.size() == 0 || leftCorners.size() == mask.size());
168  UASSERT(model.left().fx()> 0.0f && model.baseline() > 0.0f);
169 
170  std::vector<cv::Point3f> keypoints3d;
171  keypoints3d.resize(leftCorners.size());
172  float bad_point = std::numeric_limits<float>::quiet_NaN ();
173  for(unsigned int i=0; i<leftCorners.size(); ++i)
174  {
175  cv::Point3f pt(bad_point, bad_point, bad_point);
176  if(mask.empty() || mask[i])
177  {
178  float disparity = leftCorners[i].x - rightCorners[i].x;
179  if(disparity != 0.0f)
180  {
181  cv::Point3f tmpPt = util3d::projectDisparityTo3D(
182  leftCorners[i],
183  disparity,
184  model);
185 
186  if(util3d::isFinite(tmpPt) &&
187  (minDepth < 0.0f || tmpPt.z > minDepth) &&
188  (maxDepth <= 0.0f || tmpPt.z <= maxDepth))
189  {
190  pt = tmpPt;
191  if(!model.localTransform().isNull() &&
192  !model.localTransform().isIdentity())
193  {
194  pt = util3d::transformPoint(pt, model.localTransform());
195  }
196  }
197  }
198  }
199 
200  keypoints3d.at(i) = pt;
201  }
202  return keypoints3d;
203 }
204 
205 // cameraTransform, from ref to next
206 // return 3D points in ref referential
207 // If cameraTransform is not null, it will be used for triangulation instead of the camera transform computed by epipolar geometry
208 // when refGuess3D is passed and cameraTransform is null, scale will be estimated, returning scaled cloud and camera transform
209 std::map<int, cv::Point3f> generateWords3DMono(
210  const std::map<int, cv::KeyPoint> & refWords,
211  const std::map<int, cv::KeyPoint> & nextWords,
212  const CameraModel & cameraModel,
213  Transform & cameraTransform,
214  float ransacReprojThreshold,
215  float ransacConfidence,
216  const std::map<int, cv::Point3f> & refGuess3D,
217  double * varianceOut,
218  std::vector<int> * matchesOut)
219 {
220  UASSERT(cameraModel.isValidForProjection());
221  std::map<int, cv::Point3f> words3D;
222  std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs;
223  int pairsFound = EpipolarGeometry::findPairs(refWords, nextWords, pairs);
224  UDEBUG("pairsFound=%d/%d", pairsFound, int(refWords.size()>nextWords.size()?refWords.size():nextWords.size()));
225  if(pairsFound > 8)
226  {
227  std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >::iterator iter=pairs.begin();
228  std::vector<cv::Point2f> refCorners(pairs.size());
229  std::vector<cv::Point2f> newCorners(pairs.size());
230  std::vector<int> indexes(pairs.size());
231  for(unsigned int i=0; i<pairs.size(); ++i)
232  {
233  if(matchesOut)
234  {
235  matchesOut->push_back(iter->first);
236  }
237 
238  refCorners[i] = iter->second.first.pt;
239  newCorners[i] = iter->second.second.pt;
240  indexes[i] = iter->first;
241  ++iter;
242  }
243 
244  std::vector<unsigned char> status;
245  cv::Mat pts4D;
246 
247  UDEBUG("Five-point algorithm");
252  cv::Mat E = cv3::findEssentialMat(refCorners, newCorners, cameraModel.K(), cv::RANSAC, ransacConfidence, ransacReprojThreshold, status);
253 
254  int essentialInliers = 0;
255  for(size_t i=0; i<status.size();++i)
256  {
257  if(status[i])
258  {
259  ++essentialInliers;
260  }
261  }
262  Transform cameraTransformGuess = cameraTransform;
263  if(!E.empty())
264  {
265  UDEBUG("essential inliers=%d/%d", essentialInliers, (int)status.size());
266  cv::Mat R,t;
267  cv3::recoverPose(E, refCorners, newCorners, cameraModel.K(), R, t, 50, status, pts4D);
268  if(!R.empty() && !t.empty())
269  {
270  cv::Mat P = cv::Mat::zeros(3, 4, CV_64FC1);
271  R.copyTo(cv::Mat(P, cv::Range(0,3), cv::Range(0,3)));
272  P.at<double>(0,3) = t.at<double>(0);
273  P.at<double>(1,3) = t.at<double>(1);
274  P.at<double>(2,3) = t.at<double>(2);
275 
276  cameraTransform = Transform(R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), t.at<double>(0),
277  R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), t.at<double>(1),
278  R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), t.at<double>(2));
279  UDEBUG("t (cam frame)=%s", cameraTransform.prettyPrint().c_str());
280  UDEBUG("base->cam=%s", cameraModel.localTransform().prettyPrint().c_str());
281  cameraTransform = cameraModel.localTransform() * cameraTransform.inverse() * cameraModel.localTransform().inverse();
282  UDEBUG("t (base frame)=%s", cameraTransform.prettyPrint().c_str());
283 
284  UASSERT((int)indexes.size() == pts4D.cols && pts4D.rows == 4 && status.size() == indexes.size());
285  for(unsigned int i=0; i<indexes.size(); ++i)
286  {
287  if(status[i])
288  {
289  pts4D.col(i) /= pts4D.at<double>(3,i);
290  if(pts4D.at<double>(2,i) > 0)
291  {
292  words3D.insert(std::make_pair(indexes[i], util3d::transformPoint(cv::Point3f(pts4D.at<double>(0,i), pts4D.at<double>(1,i), pts4D.at<double>(2,i)), cameraModel.localTransform())));
293  }
294  }
295  }
296  }
297  }
298  else
299  {
300  UDEBUG("Failed to find essential matrix");
301  }
302 
303  if(!cameraTransform.isNull())
304  {
305  UDEBUG("words3D=%d refGuess3D=%d cameraGuess=%s", (int)words3D.size(), (int)refGuess3D.size(), cameraTransformGuess.prettyPrint().c_str());
306 
307  // estimate the scale and variance
308  float scale = 1.0f;
309  if(!cameraTransformGuess.isNull())
310  {
311  scale = cameraTransformGuess.getNorm()/cameraTransform.getNorm();
312  }
313  float variance = 1.0f;
314 
315  std::vector<cv::Point3f> inliersRef;
316  std::vector<cv::Point3f> inliersRefGuess;
317  if(!refGuess3D.empty())
318  {
320  words3D,
321  refGuess3D,
322  inliersRef,
323  inliersRefGuess,
324  0);
325  }
326 
327  if(!inliersRef.empty())
328  {
329  UDEBUG("inliersRef=%d", (int)inliersRef.size());
330  if(cameraTransformGuess.isNull())
331  {
332  std::multimap<float, float> scales; // <variance, scale>
333  for(unsigned int i=0; i<inliersRef.size(); ++i)
334  {
335  // using x as depth, assuming we are in global referential
336  float s = inliersRefGuess.at(i).x/inliersRef.at(i).x;
337  std::vector<float> errorSqrdDists(inliersRef.size());
338  for(unsigned int j=0; j<inliersRef.size(); ++j)
339  {
340  cv::Point3f refPt = inliersRef.at(j);
341  refPt.x *= s;
342  refPt.y *= s;
343  refPt.z *= s;
344  const cv::Point3f & newPt = inliersRefGuess.at(j);
345  errorSqrdDists[j] = uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
346  }
347  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
348  double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 2];
349  float var = 2.1981 * median_error_sqr;
350  //UDEBUG("scale %d = %f variance = %f", (int)i, s, variance);
351 
352  scales.insert(std::make_pair(var, s));
353  }
354  scale = scales.begin()->second;
355  variance = scales.begin()->first;
356  }
357  else if(!cameraTransformGuess.isNull())
358  {
359  // use scale from guess
360  //compute variance
361  std::vector<float> errorSqrdDists(inliersRef.size());
362  for(unsigned int j=0; j<inliersRef.size(); ++j)
363  {
364  cv::Point3f refPt = inliersRef.at(j);
365  refPt.x *= scale;
366  refPt.y *= scale;
367  refPt.z *= scale;
368  const cv::Point3f & newPt = inliersRefGuess.at(j);
369  errorSqrdDists[j] = uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
370  }
371  std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
372  double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 2];
373  variance = 2.1981 * median_error_sqr;
374  }
375  }
376  else if(!refGuess3D.empty())
377  {
378  UWARN("Cannot compute variance, no points corresponding between "
379  "the generated ref words (%d) and words guess (%d)",
380  (int)words3D.size(), (int)refGuess3D.size());
381  }
382 
383  if(scale!=1.0f)
384  {
385  // Adjust output transform and points based on scale found
386  cameraTransform.x()*=scale;
387  cameraTransform.y()*=scale;
388  cameraTransform.z()*=scale;
389 
390  UASSERT(indexes.size() == newCorners.size());
391  for(unsigned int i=0; i<indexes.size(); ++i)
392  {
393  std::map<int, cv::Point3f>::iterator iter = words3D.find(indexes[i]);
394  if(iter!=words3D.end() && util3d::isFinite(iter->second))
395  {
396  iter->second.x *= scale;
397  iter->second.y *= scale;
398  iter->second.z *= scale;
399  }
400  }
401  }
402  UDEBUG("scale used = %f (variance=%f)", scale, variance);
403  if(varianceOut)
404  {
405  *varianceOut = variance;
406  }
407  }
408  }
409  UDEBUG("wordsSet=%d / %d", (int)words3D.size(), pairsFound);
410 
411  return words3D;
412 }
413 
414 std::multimap<int, cv::KeyPoint> aggregate(
415  const std::list<int> & wordIds,
416  const std::vector<cv::KeyPoint> & keypoints)
417 {
418  std::multimap<int, cv::KeyPoint> words;
419  std::vector<cv::KeyPoint>::const_iterator kpIter = keypoints.begin();
420  for(std::list<int>::const_iterator iter=wordIds.begin(); iter!=wordIds.end(); ++iter)
421  {
422  words.insert(std::pair<int, cv::KeyPoint >(*iter, *kpIter));
423  ++kpIter;
424  }
425  return words;
426 }
427 
428 }
429 
430 }
GLM_FUNC_DECL genIType mask(genIType const &count)
bool RTABMAP_EXP isFinite(const cv::Point3f &pt)
Definition: util3d.cpp:3194
cv::Point3f RTABMAP_EXP projectDisparityTo3D(const cv::Point2f &pt, float disparity, const StereoCameraModel &model)
Definition: util3d.cpp:2521
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
f
std::map< int, cv::Point3f > RTABMAP_EXP generateWords3DMono(const std::map< int, cv::KeyPoint > &kpts, const std::map< int, cv::KeyPoint > &previousKpts, const CameraModel &cameraModel, Transform &cameraTransform, float ransacReprojThreshold=3.0f, float ransacConfidence=0.99f, const std::map< int, cv::Point3f > &refGuess3D=std::map< int, cv::Point3f >(), double *variance=0, std::vector< int > *matchesOut=0)
x
cv::Mat findEssentialMat(InputArray _points1, InputArray _points2, InputArray _cameraMatrix, int method, double prob, double threshold, OutputArray _mask)
Definition: five-point.cpp:405
GLM_FUNC_DECL detail::tmat4x4< T, P > scale(detail::tmat4x4< T, P > const &m, detail::tvec3< T, P > const &v)
GLM_FUNC_DECL bool all(vecType< bool, P > const &v)
double fx() const
Definition: CameraModel.h:102
Basic mathematics functions.
float getNorm() const
Definition: Transform.cpp:273
Some conversion functions.
#define UASSERT(condition)
Wrappers of STL for convenient functions.
pcl::PointXYZ RTABMAP_EXP projectDepthTo3D(const cv::Mat &depthImage, float x, float y, float cx, float cy, float fx, float fy, bool smoothing, float depthErrorRatio=0.02f)
Definition: util3d.cpp:214
std::multimap< int, cv::KeyPoint > RTABMAP_EXP aggregate(const std::list< int > &wordIds, const std::vector< cv::KeyPoint > &keypoints)
#define UASSERT_MSG(condition, msg_str)
Definition: ULogger.h:67
int recoverPose(InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix, OutputArray _R, OutputArray _t, double distanceThresh, InputOutputArray _mask, OutputArray triangulatedPoints)
Definition: five-point.cpp:477
cv::Mat K() const
Definition: CameraModel.h:110
std::string prettyPrint() const
Definition: Transform.cpp:316
static int findPairs(const std::map< int, T > &wordsA, const std::map< int, T > &wordsB, std::list< std::pair< int, std::pair< T, T > > > &pairs, bool ignoreNegativeIds=true)
const Transform & localTransform() const
Definition: CameraModel.h:116
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DDisparity(const std::vector< cv::KeyPoint > &keypoints, const cv::Mat &disparity, const StereoCameraModel &stereoCameraModel, float minDepth=0, float maxDepth=0)
bool isNull() const
Definition: Transform.cpp:107
const Transform & localTransform() const
#define UDEBUG(...)
T uNormSquared(const std::vector< T > &v)
Definition: UMath.h:560
ULogger class and convenient macros.
#define UWARN(...)
model
Definition: trace.py:4
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DDepth(const std::vector< cv::KeyPoint > &keypoints, const cv::Mat &depth, const CameraModel &cameraModel, float minDepth=0, float maxDepth=0)
std::string UTILITE_EXP uFormat(const char *fmt,...)
bool isValidForProjection() const
Definition: CameraModel.h:87
void RTABMAP_EXP findCorrespondences(const std::multimap< int, cv::KeyPoint > &wordsA, const std::multimap< int, cv::KeyPoint > &wordsB, std::list< std::pair< cv::Point2f, cv::Point2f > > &pairs)
bool isIdentity() const
Definition: Transform.cpp:136
Transform inverse() const
Definition: Transform.cpp:178
std::vector< cv::Point3f > RTABMAP_EXP generateKeypoints3DStereo(const std::vector< cv::Point2f > &leftCorners, const std::vector< cv::Point2f > &rightCorners, const StereoCameraModel &model, const std::vector< unsigned char > &mask=std::vector< unsigned char >(), float minDepth=0, float maxDepth=0)
const CameraModel & left() const


rtabmap
Author(s): Mathieu Labbe
autogenerated on Mon Jan 23 2023 03:38:58