44 #include <pcl/common/point_tests.h>
46 #include <opencv2/video/tracking.hpp>
55 const std::vector<cv::KeyPoint> & keypoints,
56 const cv::Mat & depth,
62 std::vector<CameraModel> models;
63 models.push_back(cameraModel);
68 const std::vector<cv::KeyPoint> & keypoints,
69 const cv::Mat & depth,
70 const std::vector<CameraModel> & cameraModels,
74 UASSERT(!depth.empty() && (depth.type() == CV_32FC1 || depth.type() == CV_16UC1));
76 std::vector<cv::Point3f> keypoints3d;
79 UASSERT(
int((depth.cols/cameraModels.size())*cameraModels.size()) == depth.cols);
80 float subImageWidth = depth.cols/cameraModels.size();
81 keypoints3d.resize(keypoints.size());
82 float rgbToDepthFactorX = 1.0f/(cameraModels[0].imageWidth()>0?
float(cameraModels[0].imageWidth())/subImageWidth:1.0f);
83 float rgbToDepthFactorY = 1.0f/(cameraModels[0].imageHeight()>0?
float(cameraModels[0].imageHeight())/
float(depth.rows):1.0f);
84 float bad_point = std::numeric_limits<float>::quiet_NaN ();
85 for(
unsigned int i=0;
i<keypoints.size(); ++
i)
87 float x = keypoints[
i].pt.x*rgbToDepthFactorX;
88 float y = keypoints[
i].pt.y*rgbToDepthFactorY;
89 int cameraIndex =
int(
x / subImageWidth);
90 UASSERT_MSG(cameraIndex >= 0 && cameraIndex < (
int)cameraModels.size(),
91 uFormat(
"cameraIndex=%d, models=%d, kpt.x=%f, subImageWidth=%f (Camera model image width=%d)",
92 cameraIndex, (
int)cameraModels.size(), keypoints[
i].pt.x, subImageWidth, cameraModels[0].imageWidth()).c_str());
95 cameraModels.size()==1?depth:cv::Mat(depth,
cv::Range::all(), cv::Range(subImageWidth*cameraIndex,subImageWidth*(cameraIndex+1))),
96 x-subImageWidth*cameraIndex,
98 cameraModels.at(cameraIndex).cx()*rgbToDepthFactorX,
99 cameraModels.at(cameraIndex).cy()*rgbToDepthFactorY,
100 cameraModels.at(cameraIndex).fx()*rgbToDepthFactorX,
101 cameraModels.at(cameraIndex).fy()*rgbToDepthFactorY,
104 cv::Point3f pt(bad_point, bad_point, bad_point);
105 if(pcl::isFinite(ptXYZ) &&
106 (minDepth < 0.0f || ptXYZ.z > minDepth) &&
107 (maxDepth <= 0.0
f || ptXYZ.z <= maxDepth))
109 pt = cv::Point3f(ptXYZ.x, ptXYZ.y, ptXYZ.z);
110 if(!cameraModels.at(cameraIndex).localTransform().isNull() &&
111 !cameraModels.at(cameraIndex).localTransform().isIdentity())
116 keypoints3d.at(
i) = pt;
123 const std::vector<cv::KeyPoint> & keypoints,
124 const cv::Mat & disparity,
129 UASSERT(!disparity.empty() && (disparity.type() == CV_16SC1 || disparity.type() == CV_32F));
131 std::vector<cv::Point3f> keypoints3d;
132 keypoints3d.resize(keypoints.size());
133 float bad_point = std::numeric_limits<float>::quiet_NaN ();
134 for(
unsigned int i=0;
i!=keypoints.size(); ++
i)
141 cv::Point3f pt(bad_point, bad_point, bad_point);
143 (minDepth < 0.0f || tmpPt.z > minDepth) &&
144 (maxDepth <= 0.0
f || tmpPt.z <= maxDepth))
153 keypoints3d.at(
i) = pt;
159 const std::vector<cv::Point2f> & leftCorners,
160 const std::vector<cv::Point2f> & rightCorners,
162 const std::vector<unsigned char> &
mask,
166 UASSERT(leftCorners.size() == rightCorners.size());
170 std::vector<cv::Point3f> keypoints3d;
171 keypoints3d.resize(leftCorners.size());
172 float bad_point = std::numeric_limits<float>::quiet_NaN ();
173 for(
unsigned int i=0;
i<leftCorners.size(); ++
i)
175 cv::Point3f pt(bad_point, bad_point, bad_point);
178 float disparity = leftCorners[
i].x - rightCorners[
i].x;
179 if(disparity != 0.0
f)
187 (minDepth < 0.0f || tmpPt.z > minDepth) &&
188 (maxDepth <= 0.0
f || tmpPt.z <= maxDepth))
191 if(!
model.localTransform().isNull() &&
192 !
model.localTransform().isIdentity())
200 keypoints3d.at(
i) = pt;
210 const std::map<int, cv::KeyPoint> & refWords,
211 const std::map<int, cv::KeyPoint> & nextWords,
214 float ransacReprojThreshold,
215 float ransacConfidence,
216 const std::map<int, cv::Point3f> & refGuess3D,
217 double * varianceOut,
218 std::vector<int> * matchesOut)
221 std::map<int, cv::Point3f> words3D;
222 std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs;
224 UDEBUG(
"pairsFound=%d/%d", pairsFound,
int(refWords.size()>nextWords.size()?refWords.size():nextWords.size()));
227 std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >
::iterator iter=pairs.begin();
228 std::vector<cv::Point2f> refCorners(pairs.size());
229 std::vector<cv::Point2f> newCorners(pairs.size());
230 std::vector<int> indexes(pairs.size());
231 for(
unsigned int i=0;
i<pairs.size(); ++
i)
235 matchesOut->push_back(
iter->first);
238 refCorners[
i] =
iter->second.first.pt;
239 newCorners[
i] =
iter->second.second.pt;
240 indexes[
i] =
iter->first;
244 std::vector<unsigned char> status;
247 UDEBUG(
"Five-point algorithm");
252 cv::Mat
E =
cv3::findEssentialMat(refCorners, newCorners, cameraModel.
K(), cv::RANSAC, ransacConfidence, ransacReprojThreshold, status);
254 int essentialInliers = 0;
255 for(
size_t i=0;
i<status.size();++
i)
262 Transform cameraTransformGuess = cameraTransform;
265 UDEBUG(
"essential inliers=%d/%d", essentialInliers, (
int)status.size());
268 if(!
R.empty() && !
t.empty())
270 cv::Mat
P = cv::Mat::zeros(3, 4, CV_64FC1);
271 R.copyTo(cv::Mat(
P, cv::Range(0,3), cv::Range(0,3)));
272 P.at<
double>(0,3) =
t.at<
double>(0);
273 P.at<
double>(1,3) =
t.at<
double>(1);
274 P.at<
double>(2,3) =
t.at<
double>(2);
276 cameraTransform =
Transform(
R.at<
double>(0,0),
R.at<
double>(0,1),
R.at<
double>(0,2),
t.at<
double>(0),
277 R.at<
double>(1,0),
R.at<
double>(1,1),
R.at<
double>(1,2),
t.at<
double>(1),
278 R.at<
double>(2,0),
R.at<
double>(2,1),
R.at<
double>(2,2),
t.at<
double>(2));
284 UASSERT((
int)indexes.size() == pts4D.cols && pts4D.rows == 4 && status.size() == indexes.size());
285 for(
unsigned int i=0;
i<indexes.size(); ++
i)
289 pts4D.col(
i) /= pts4D.at<
double>(3,
i);
290 if(pts4D.at<
double>(2,
i) > 0)
300 UDEBUG(
"Failed to find essential matrix");
303 if(!cameraTransform.
isNull())
305 UDEBUG(
"words3D=%d refGuess3D=%d cameraGuess=%s", (
int)words3D.size(), (
int)refGuess3D.size(), cameraTransformGuess.
prettyPrint().c_str());
309 if(!cameraTransformGuess.
isNull())
313 float variance = 1.0f;
315 std::vector<cv::Point3f> inliersRef;
316 std::vector<cv::Point3f> inliersRefGuess;
317 if(!refGuess3D.empty())
327 if(!inliersRef.empty())
329 UDEBUG(
"inliersRef=%d", (
int)inliersRef.size());
330 if(cameraTransformGuess.
isNull())
332 std::multimap<float, float> scales;
333 for(
unsigned int i=0;
i<inliersRef.size(); ++
i)
336 float s = inliersRefGuess.at(
i).x/inliersRef.at(
i).x;
337 std::vector<float> errorSqrdDists(inliersRef.size());
338 for(
unsigned int j=0;
j<inliersRef.size(); ++
j)
340 cv::Point3f refPt = inliersRef.at(
j);
344 const cv::Point3f & newPt = inliersRefGuess.at(
j);
345 errorSqrdDists[
j] =
uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
347 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
348 double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 2];
349 float var = 2.1981 * median_error_sqr;
352 scales.insert(std::make_pair(var,
s));
354 scale = scales.begin()->second;
355 variance = scales.begin()->first;
357 else if(!cameraTransformGuess.
isNull())
361 std::vector<float> errorSqrdDists(inliersRef.size());
362 for(
unsigned int j=0;
j<inliersRef.size(); ++
j)
364 cv::Point3f refPt = inliersRef.at(
j);
368 const cv::Point3f & newPt = inliersRefGuess.at(
j);
369 errorSqrdDists[
j] =
uNormSquared(refPt.x-newPt.x, refPt.y-newPt.y, refPt.z-newPt.z);
371 std::sort(errorSqrdDists.begin(), errorSqrdDists.end());
372 double median_error_sqr = (double)errorSqrdDists[errorSqrdDists.size () >> 2];
373 variance = 2.1981 * median_error_sqr;
376 else if(!refGuess3D.empty())
378 UWARN(
"Cannot compute variance, no points corresponding between "
379 "the generated ref words (%d) and words guess (%d)",
380 (
int)words3D.size(), (
int)refGuess3D.size());
386 cameraTransform.
x()*=
scale;
387 cameraTransform.
y()*=
scale;
388 cameraTransform.
z()*=
scale;
390 UASSERT(indexes.size() == newCorners.size());
391 for(
unsigned int i=0;
i<indexes.size(); ++
i)
393 std::map<int, cv::Point3f>::iterator
iter = words3D.find(indexes[
i]);
402 UDEBUG(
"scale used = %f (variance=%f)",
scale, variance);
405 *varianceOut = variance;
409 UDEBUG(
"wordsSet=%d / %d", (
int)words3D.size(), pairsFound);
415 const std::list<int> & wordIds,
416 const std::vector<cv::KeyPoint> & keypoints)
418 std::multimap<int, cv::KeyPoint> words;
419 std::vector<cv::KeyPoint>::const_iterator kpIter = keypoints.begin();
420 for(std::list<int>::const_iterator
iter=wordIds.begin();
iter!=wordIds.end(); ++
iter)
422 words.insert(std::pair<int, cv::KeyPoint >(*
iter, *kpIter));