ippe.cpp
Go to the documentation of this file.
1 
17 #include "ippe.h"
18 #include <opencv2/imgproc.hpp>
19 
20 #include <iostream>
21 
22 using namespace cv;
23 using namespace std;
24 
25 
26 namespace aruco
27 {
28 
29 /******
30  *
31  */
32 
33 cv::Mat getRTMatrix(const cv::Mat& R_, const cv::Mat& T_, int forceType)
34 {
35  cv::Mat M;
36  cv::Mat R, T;
37  R_.copyTo(R);
38  T_.copyTo(T);
39  if (R.type() == CV_64F)
40  {
41  assert(T.type() == CV_64F);
42  cv::Mat Matrix = cv::Mat::eye(4, 4, CV_64FC1);
43 
44  cv::Mat R33 = cv::Mat(Matrix, cv::Rect(0, 0, 3, 3));
45  if (R.total() == 3)
46  {
47  cv::Rodrigues(R, R33);
48  }
49  else if (R.total() == 9)
50  {
51  cv::Mat R64;
52  R.convertTo(R64, CV_64F);
53  R.copyTo(R33);
54  }
55  for (int i = 0; i < 3; i++)
56  Matrix.at<double>(i, 3) = T.ptr<double>(0)[i];
57  M = Matrix;
58  }
59  else if (R.depth() == CV_32F)
60  {
61  cv::Mat Matrix = cv::Mat::eye(4, 4, CV_32FC1);
62  cv::Mat R33 = cv::Mat(Matrix, cv::Rect(0, 0, 3, 3));
63  if (R.total() == 3)
64  {
65  cv::Rodrigues(R, R33);
66  }
67  else if (R.total() == 9)
68  {
69  cv::Mat R32;
70  R.convertTo(R32, CV_32F);
71  R.copyTo(R33);
72  }
73 
74  for (int i = 0; i < 3; i++)
75  Matrix.at<float>(i, 3) = T.ptr<float>(0)[i];
76  M = Matrix;
77  }
78 
79  if (forceType == -1)
80  return M;
81  else
82  {
83  cv::Mat MTyped;
84  M.convertTo(MTyped, forceType);
85  return MTyped;
86  }
87 }
88 vector<cv::Mat> solvePnP(const vector<cv::Point3f>& objPoints,
89  const std::vector<cv::Point2f>& imgPoints,
90  cv::InputArray cameraMatrix, cv::InputArray distCoeffs)
91 {
92  cv::Mat Rvec, Tvec;
93  float reprojErr1, reprojErr2;
94  cv::Mat Rvec2, Tvec2;
95 
96  IPPE::PoseSolver Solver;
97  Solver.solveGeneric(objPoints, imgPoints, cameraMatrix, distCoeffs, Rvec, Tvec,
98  reprojErr1, Rvec2, Tvec2, reprojErr2);
99 
100  return { getRTMatrix(Rvec, Tvec, CV_32F), getRTMatrix(Rvec2, Tvec2, CV_32F) };
101 }
102 
103 void solvePnP(const vector<cv::Point3f>& objPoints,
104  const std::vector<cv::Point2f>& imgPoints, cv::InputArray cameraMatrix,
105  cv::InputArray distCoeffs, cv::Mat& rvec, cv::Mat& tvec)
106 {
107  float reprojErr1, reprojErr2;
108  cv::Mat Rvec2, Tvec2;
109 
110  IPPE::PoseSolver Solver;
111  Solver.solveGeneric(objPoints, imgPoints, cameraMatrix, distCoeffs, rvec, tvec,
112  reprojErr1, Rvec2, Tvec2, reprojErr2);
113 }
114 
115 std::vector<std::pair<cv::Mat, double>> solvePnP_(float size,
116  const std::vector<cv::Point2f>& imgPoints,
117  cv::InputArray cameraMatrix,
118  cv::InputArray distCoeffs)
119 {
120  cv::Mat Rvec, Tvec, Rvec2, Tvec2;
121  float reprojErr1, reprojErr2;
122 
123  IPPE::PoseSolver Solver;
124  Solver.solveSquare(size, imgPoints, cameraMatrix, distCoeffs, Rvec, Tvec, reprojErr1,
125  Rvec2, Tvec2, reprojErr2);
126 
127 
128  return { make_pair(getRTMatrix(Rvec, Tvec, CV_32F), reprojErr1),
129  make_pair(getRTMatrix(Rvec2, Tvec2, CV_32F), reprojErr2) };
130 }
131 
132 std::vector<std::pair<cv::Mat, double>> solvePnP_(const std::vector<cv::Point3f>& objPoints,
133  const std::vector<cv::Point2f>& imgPoints,
134  cv::InputArray cameraMatrix,
135  cv::InputArray distCoeffs)
136 {
137  cv::Mat Rvec, Tvec;
138  float reprojErr1, reprojErr2;
139  cv::Mat Rvec2, Tvec2;
140 
141  IPPE::PoseSolver Solver;
142  Solver.solveGeneric(objPoints, imgPoints, cameraMatrix, distCoeffs, Rvec, Tvec,
143  reprojErr1, Rvec2, Tvec2, reprojErr2);
144 
145 
146  return { make_pair(getRTMatrix(Rvec, Tvec, CV_32F), reprojErr1),
147  make_pair(getRTMatrix(Rvec2, Tvec2, CV_32F), reprojErr2) };
148 }
149 } // namespace aruco
150 
152 {
153 }
154 
156 {
157 }
158 
159 void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints, cv::InputArray _imagePoints,
160  cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
161  cv::OutputArray _rvec1, cv::OutputArray _tvec1,
162  float& err1, cv::OutputArray _rvec2,
163  cv::OutputArray _tvec2, float& err2)
164 {
165  cv::Mat normalizedImagePoints; // undistored version of imagePoints
166 
167  if (_cameraMatrix.empty())
168  {
169  // there is no camera matrix and image points are given in normalized pixel coordinates.
170  _imagePoints.copyTo(normalizedImagePoints);
171  }
172  else
173  {
174  // undistort the image points (i.e. put them in normalized pixel coordinates):
175  cv::undistortPoints(_imagePoints, normalizedImagePoints, _cameraMatrix, _distCoeffs);
176  }
177 
178  // solve:
179  cv::Mat Ma, Mb;
180  solveGeneric(_objectPoints, normalizedImagePoints, Ma, Mb);
181 
182  // the two poses computed by IPPE (sorted):
183  cv::Mat M1, M2;
184 
185  // sort poses by reprojection error:
186  sortPosesByReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb,
187  M1, M2, err1, err2);
188 
189  // fill outputs
190  rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
191  rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
192 
193  M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
194  M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
195 }
196 
197 void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints,
198  cv::InputArray _normalizedInputPoints,
199  cv::OutputArray _Ma, cv::OutputArray _Mb)
200 {
201  // argument checking:
202  size_t n = _objectPoints.rows() * _objectPoints.cols(); // number of points
203  int objType = _objectPoints.type();
204  int type_input = _normalizedInputPoints.type();
205  assert((objType == CV_32FC3) | (objType == CV_64FC3));
206  assert((type_input == CV_32FC2) | (type_input == CV_64FC2));
207  assert((_objectPoints.rows() == 1) | (_objectPoints.cols() == 1));
208  assert((_objectPoints.rows() >= 4) | (_objectPoints.cols() >= 4));
209  assert((_normalizedInputPoints.rows() == 1) | (_normalizedInputPoints.cols() == 1));
210  assert(static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()) == n);
211 
212  cv::Mat normalizedInputPoints;
213  if (type_input == CV_32FC2)
214  {
215  _normalizedInputPoints.getMat().convertTo(normalizedInputPoints, CV_64FC2);
216  }
217  else
218  {
219  normalizedInputPoints = _normalizedInputPoints.getMat();
220  }
221 
222  cv::Mat objectInputPoints;
223  if (type_input == CV_32FC3)
224  {
225  _objectPoints.getMat().convertTo(objectInputPoints, CV_64FC3);
226  }
227  else
228  {
229  objectInputPoints = _objectPoints.getMat();
230  }
231 
232  cv::Mat canonicalObjPoints;
233  cv::Mat MmodelPoints2Canonical;
234 
235  // transform object points to the canonical position (zero centred and on the plane z=0):
236  makeCanonicalObjectPoints(objectInputPoints, canonicalObjPoints, MmodelPoints2Canonical);
237 
238  // compute the homography mapping the model's points to normalizedInputPoints
239  cv::Mat H;
240  HomographyHO::homographyHO(canonicalObjPoints, _normalizedInputPoints, H);
241 
242  // now solve
243  cv::Mat MaCanon, MbCanon;
244  solveCanonicalForm(canonicalObjPoints, normalizedInputPoints, H, MaCanon, MbCanon);
245 
246  // transform computed poses to account for canonical transform:
247  cv::Mat Ma = MaCanon * MmodelPoints2Canonical;
248  cv::Mat Mb = MbCanon * MmodelPoints2Canonical;
249 
250  // output poses:
251  Ma.copyTo(_Ma);
252  Mb.copyTo(_Mb);
253 }
254 
255 void IPPE::PoseSolver::solveCanonicalForm(cv::InputArray _canonicalObjPoints,
256  cv::InputArray _normalizedInputPoints,
257  cv::InputArray _H, cv::OutputArray _Ma,
258  cv::OutputArray _Mb)
259 {
260  _Ma.create(4, 4, CV_64FC1);
261  _Mb.create(4, 4, CV_64FC1);
262 
263  cv::Mat Ma = _Ma.getMat();
264  cv::Mat Mb = _Mb.getMat();
265  cv::Mat H = _H.getMat();
266 
267  // initialise poses:
268  Ma.setTo(0);
269  Ma.at<double>(3, 3) = 1;
270  Mb.setTo(0);
271  Mb.at<double>(3, 3) = 1;
272 
273  // Compute the Jacobian J of the homography at (0,0):
274  double j00, j01, j10, j11, v0, v1;
275 
276  j00 = H.at<double>(0, 0) - H.at<double>(2, 0) * H.at<double>(0, 2);
277  j01 = H.at<double>(0, 1) - H.at<double>(2, 1) * H.at<double>(0, 2);
278  j10 = H.at<double>(1, 0) - H.at<double>(2, 0) * H.at<double>(1, 2);
279  j11 = H.at<double>(1, 1) - H.at<double>(2, 1) * H.at<double>(1, 2);
280 
281  // Compute the transformation of (0,0) into the image:
282  v0 = H.at<double>(0, 2);
283  v1 = H.at<double>(1, 2);
284 
285  // compute the two rotation solutions:
286  cv::Mat Ra = Ma.colRange(0, 3).rowRange(0, 3);
287  cv::Mat Rb = Mb.colRange(0, 3).rowRange(0, 3);
288  computeRotations(j00, j01, j10, j11, v0, v1, Ra, Rb);
289 
290  // for each rotation solution, compute the corresponding translation solution:
291  cv::Mat ta = Ma.colRange(3, 4).rowRange(0, 3);
292  cv::Mat tb = Mb.colRange(3, 4).rowRange(0, 3);
293  computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Ra, ta);
294  computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Rb, tb);
295 }
296 
297 void IPPE::PoseSolver::solveSquare(double squareLength, InputArray _imagePoints,
298  InputArray _cameraMatrix, InputArray _distCoeffs,
299  OutputArray _rvec1, OutputArray _tvec1, float& err1,
300  OutputArray _rvec2, OutputArray _tvec2, float& err2)
301 {
302  // allocate outputs:
303  _rvec1.create(3, 1, CV_64FC1);
304  _tvec1.create(3, 1, CV_64FC1);
305  _rvec2.create(3, 1, CV_64FC1);
306  _tvec2.create(3, 1, CV_64FC1);
307 
308  cv::Mat normalizedInputPoints; // undistored version of imagePoints
309  cv::Mat objectPoints2D;
310 
311  // generate the object points:
312  generateSquareObjectCorners2D(squareLength, objectPoints2D);
313 
314  cv::Mat H; // homography from canonical object points to normalized pixels
315 
316 
317  if (_cameraMatrix.empty())
318  {
319  // this means imagePoints are defined in normalized pixel coordinates, so just copy it:
320  _imagePoints.copyTo(normalizedInputPoints);
321  }
322  else
323  {
324  // undistort the image points (i.e. put them in normalized pixel coordinates).
325  cv::undistortPoints(_imagePoints, normalizedInputPoints, _cameraMatrix, _distCoeffs);
326  }
327 
328  if (normalizedInputPoints.type() == CV_32FC2)
329  {
330  normalizedInputPoints.convertTo(normalizedInputPoints, CV_64F);
331  }
332 
333  // compute H
334  homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0, H);
335 
336  // now solve
337  cv::Mat Ma, Mb;
338  solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);
339 
340  // sort poses according to reprojection error:
341  cv::Mat M1, M2;
342  cv::Mat objectPoints3D;
343  generateSquareObjectCorners3D(squareLength, objectPoints3D);
344 
345  sortPosesByReprojError(objectPoints3D, normalizedInputPoints, _cameraMatrix,
346  _distCoeffs, Ma, Mb, M1, M2, err1, err2);
347 
348  // fill outputs
349  rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
350  rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
351 
352  M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
353  M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
354 }
355 
356 void IPPE::PoseSolver::generateSquareObjectCorners3D(double squareLength, OutputArray _objectPoints)
357 {
358  _objectPoints.create(1, 4, CV_64FC3);
359  cv::Mat objectPoints = _objectPoints.getMat();
360  objectPoints.ptr<Vec3d>(0)[0] = Vec3d(-squareLength / 2.0, squareLength / 2.0, 0.0);
361  objectPoints.ptr<Vec3d>(0)[1] = Vec3d(squareLength / 2.0, squareLength / 2.0, 0.0);
362  objectPoints.ptr<Vec3d>(0)[2] = Vec3d(squareLength / 2.0, -squareLength / 2.0, 0.0);
363  objectPoints.ptr<Vec3d>(0)[3] = Vec3d(-squareLength / 2.0, -squareLength / 2.0, 0.0);
364 }
365 
366 void IPPE::PoseSolver::generateSquareObjectCorners2D(double squareLength, OutputArray _objectPoints)
367 {
368  _objectPoints.create(1, 4, CV_64FC2);
369  cv::Mat objectPoints = _objectPoints.getMat();
370  objectPoints.ptr<Vec2d>(0)[0] = Vec2d(-squareLength / 2.0, squareLength / 2.0);
371  objectPoints.ptr<Vec2d>(0)[1] = Vec2d(squareLength / 2.0, squareLength / 2.0);
372  objectPoints.ptr<Vec2d>(0)[2] = Vec2d(squareLength / 2.0, -squareLength / 2.0);
373  objectPoints.ptr<Vec2d>(0)[3] = Vec2d(-squareLength / 2.0, -squareLength / 2.0);
374 }
375 
376 double IPPE::PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec)
377 {
378  assert(_objectPoints.type() == CV_64FC3);
379 
380  size_t n = static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols());
381  Mat R;
382  Mat q;
383  Rodrigues(_rvec, R);
384  double zBar = 0;
385 
386  for (size_t i = 0; i < n; i++)
387  {
388  cv::Mat p(_objectPoints.getMat().at<Point3d>(i));
389  q = R * p + _tvec.getMat();
390  double z;
391  if (q.depth() == CV_64FC1)
392  {
393  z = q.at<double>(2);
394  }
395  else
396  {
397  z = static_cast<double>(q.at<float>(2));
398  }
399  zBar += z;
400 
401  // if (z <= 0) {
402  // std::cout << "Warning: object point " << i << " projects behind the camera!
403  // This should not be allowed. " << std::endl;
404  // }
405  }
406  return zBar / static_cast<double>(n);
407 }
408 
409 void IPPE::PoseSolver::rot2vec(InputArray _R, OutputArray _r)
410 {
411  assert(_R.type() == CV_64FC1);
412  assert(_R.rows() == 3);
413  assert(_R.cols() == 3);
414 
415  _r.create(3, 1, CV_64FC1);
416 
417  cv::Mat R = _R.getMat();
418  cv::Mat rvec = _r.getMat();
419 
420  double trace = R.at<double>(0, 0) + R.at<double>(1, 1) + R.at<double>(2, 2);
421  double w_norm = acos((trace - 1.0) / 2.0);
422  double c0, c1, c2;
423  double eps = std::numeric_limits<float>::epsilon();
424  double d = 1 / (2 * sin(w_norm)) * w_norm;
425  if (w_norm < eps) // rotation is the identity
426  {
427  rvec.setTo(0);
428  }
429  else
430  {
431  c0 = R.at<double>(2, 1) - R.at<double>(1, 2);
432  c1 = R.at<double>(0, 2) - R.at<double>(2, 0);
433  c2 = R.at<double>(1, 0) - R.at<double>(0, 1);
434  rvec.at<double>(0) = d * c0;
435  rvec.at<double>(1) = d * c1;
436  rvec.at<double>(2) = d * c2;
437  }
438 }
439 
440 void IPPE::PoseSolver::computeTranslation(InputArray _objectPoints, InputArray _normalizedImgPoints,
441  InputArray _R, OutputArray _t)
442 {
443  // This is solved by building the linear system At = b, where t corresponds to the
444  // (unknown) translation. This is then inverted with the associated normal equations to
445  // give t = inv(transpose(A)*A)*transpose(A)*b For efficiency we only store the
446  // coefficients of (transpose(A)*A) and (transpose(A)*b)
447 
448  assert(_objectPoints.type() == CV_64FC2);
449  assert(_normalizedImgPoints.type() == CV_64FC2);
450  assert(_R.type() == CV_64FC1);
451 
452  assert((_R.rows() == 3) & (_R.cols() == 3));
453  assert((_objectPoints.rows() == 1) | (_objectPoints.cols() == 1));
454  assert((_normalizedImgPoints.rows() == 1) | (_normalizedImgPoints.cols() == 1));
455 
456  size_t n = static_cast<size_t>(_normalizedImgPoints.rows() * _normalizedImgPoints.cols());
457  assert(n == static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()));
458 
459  cv::Mat objectPoints = _objectPoints.getMat();
460  cv::Mat imgPoints = _normalizedImgPoints.getMat();
461 
462  _t.create(3, 1, CV_64FC1);
463 
464  cv::Mat R = _R.getMat();
465 
466  // coefficients of (transpose(A)*A)
467  double ATA00 = n;
468  double ATA02 = 0;
469  double ATA11 = n;
470  double ATA12 = 0;
471  double ATA20 = 0;
472  double ATA21 = 0;
473  double ATA22 = 0;
474 
475  // coefficients of (transpose(A)*b)
476  double ATb0 = 0;
477  double ATb1 = 0;
478  double ATb2 = 0;
479 
480  // S gives inv(transpose(A)*A)/det(A)^2
481  double S00, S01, S02;
482  double S10, S11, S12;
483  double S20, S21, S22;
484 
485  double rx, ry, rz;
486  double a2;
487  double b2;
488  double bx, by;
489 
490  // now loop through each point and increment the coefficients:
491  for (size_t i = 0; i < n; i++)
492  {
493  rx = R.at<double>(0, 0) * objectPoints.at<Vec2d>(i)(0) +
494  R.at<double>(0, 1) * objectPoints.at<Vec2d>(i)(1);
495  ry = R.at<double>(1, 0) * objectPoints.at<Vec2d>(i)(0) +
496  R.at<double>(1, 1) * objectPoints.at<Vec2d>(i)(1);
497  rz = R.at<double>(2, 0) * objectPoints.at<Vec2d>(i)(0) +
498  R.at<double>(2, 1) * objectPoints.at<Vec2d>(i)(1);
499 
500  a2 = -imgPoints.at<Vec2d>(i)(0);
501  b2 = -imgPoints.at<Vec2d>(i)(1);
502 
503  ATA02 = ATA02 + a2;
504  ATA12 = ATA12 + b2;
505  ATA20 = ATA20 + a2;
506  ATA21 = ATA21 + b2;
507  ATA22 = ATA22 + a2 * a2 + b2 * b2;
508 
509  bx = -a2 * rz - rx;
510  by = -b2 * rz - ry;
511 
512  ATb0 = ATb0 + bx;
513  ATb1 = ATb1 + by;
514  ATb2 = ATb2 + a2 * bx + b2 * by;
515  }
516 
517  double detAInv = 1.0 / (ATA00 * ATA11 * ATA22 - ATA00 * ATA12 * ATA21 - ATA02 * ATA11 * ATA20);
518 
519  // construct S:
520  S00 = ATA11 * ATA22 - ATA12 * ATA21;
521  S01 = ATA02 * ATA21;
522  S02 = -ATA02 * ATA11;
523  S10 = ATA12 * ATA20;
524  S11 = ATA00 * ATA22 - ATA02 * ATA20;
525  S12 = -ATA00 * ATA12;
526  S20 = -ATA11 * ATA20;
527  S21 = -ATA00 * ATA21;
528  S22 = ATA00 * ATA11;
529 
530  // solve t:
531  Mat t = _t.getMat();
532  t.at<double>(0) = detAInv * (S00 * ATb0 + S01 * ATb1 + S02 * ATb2);
533  t.at<double>(1) = detAInv * (S10 * ATb0 + S11 * ATb1 + S12 * ATb2);
534  t.at<double>(2) = detAInv * (S20 * ATb0 + S21 * ATb1 + S22 * ATb2);
535 }
536 
537 void IPPE::PoseSolver::computeRotations(double j00, double j01, double j10, double j11,
538  double p, double q, OutputArray _R1, OutputArray _R2)
539 {
540  // This is fairly optimized code which makes it hard to understand. The matlab code is certainly easier to read.
541  _R1.create(3, 3, CV_64FC1);
542  _R2.create(3, 3, CV_64FC1);
543 
544  double a00, a01, a10, a11, ata00, ata01, ata11, b00, b01, b10, b11, binv00, binv01,
545  binv10, binv11;
546  // double rv00, rv01, rv02, rv10, rv11, rv12, rv20, rv21, rv22;
547  double rtilde00, rtilde01, rtilde10, rtilde11;
548  double rtilde00_2, rtilde01_2, rtilde10_2, rtilde11_2;
549  double b0, b1, gamma, dtinv;
550  double sp;
551 
552  Mat Rv;
553  cv::Mat v(3, 1, CV_64FC1);
554  v.at<double>(0) = p;
555  v.at<double>(1) = q;
556  v.at<double>(2) = 1;
557  rotateVec2ZAxis(v, Rv);
558  Rv = Rv.t();
559 
560 
561  // setup the 2x2 SVD decomposition:
562  double rv00, rv01, rv02;
563  double rv10, rv11, rv12;
564  double rv20, rv21, rv22;
565  rv00 = Rv.at<double>(0, 0);
566  rv01 = Rv.at<double>(0, 1);
567  rv02 = Rv.at<double>(0, 2);
568 
569  rv10 = Rv.at<double>(1, 0);
570  rv11 = Rv.at<double>(1, 1);
571  rv12 = Rv.at<double>(1, 2);
572 
573  rv20 = Rv.at<double>(2, 0);
574  rv21 = Rv.at<double>(2, 1);
575  rv22 = Rv.at<double>(2, 2);
576 
577  b00 = rv00 - p * rv20;
578  b01 = rv01 - p * rv21;
579  b10 = rv10 - q * rv20;
580  b11 = rv11 - q * rv21;
581 
582  dtinv = 1.0 / ((b00 * b11 - b01 * b10));
583 
584  binv00 = dtinv * b11;
585  binv01 = -dtinv * b01;
586  binv10 = -dtinv * b10;
587  binv11 = dtinv * b00;
588 
589  a00 = binv00 * j00 + binv01 * j10;
590  a01 = binv00 * j01 + binv01 * j11;
591  a10 = binv10 * j00 + binv11 * j10;
592  a11 = binv10 * j01 + binv11 * j11;
593 
594  // compute the largest singular value of A:
595  ata00 = a00 * a00 + a01 * a01;
596  ata01 = a00 * a10 + a01 * a11;
597  ata11 = a10 * a10 + a11 * a11;
598 
599  gamma = sqrt(
600  0.5 * (ata00 + ata11 + sqrt((ata00 - ata11) * (ata00 - ata11) + 4.0 * ata01 * ata01)));
601 
602  // reconstruct the full rotation matrices:
603  rtilde00 = a00 / gamma;
604  rtilde01 = a01 / gamma;
605  rtilde10 = a10 / gamma;
606  rtilde11 = a11 / gamma;
607 
608  rtilde00_2 = rtilde00 * rtilde00;
609  rtilde01_2 = rtilde01 * rtilde01;
610  rtilde10_2 = rtilde10 * rtilde10;
611  rtilde11_2 = rtilde11 * rtilde11;
612 
613  b0 = sqrt(-rtilde00_2 - rtilde10_2 + 1);
614  b1 = sqrt(-rtilde01_2 - rtilde11_2 + 1);
615  sp = (-rtilde00 * rtilde01 - rtilde10 * rtilde11);
616 
617  if (sp < 0)
618  {
619  b1 = -b1;
620  }
621 
622  // store results:
623  Mat R1 = _R1.getMat();
624  Mat R2 = _R2.getMat();
625 
626  R1.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (b0)*rv02;
627  R1.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (b1)*rv02;
628  R1.at<double>(0, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv00 +
629  (b0 * rtilde01 - b1 * rtilde00) * rv01 +
630  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
631  R1.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (b0)*rv12;
632  R1.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (b1)*rv12;
633  R1.at<double>(1, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv10 +
634  (b0 * rtilde01 - b1 * rtilde00) * rv11 +
635  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
636  R1.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (b0)*rv22;
637  R1.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (b1)*rv22;
638  R1.at<double>(2, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv20 +
639  (b0 * rtilde01 - b1 * rtilde00) * rv21 +
640  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
641 
642  R2.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (-b0) * rv02;
643  R2.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (-b1) * rv02;
644  R2.at<double>(0, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv00 +
645  (b1 * rtilde00 - b0 * rtilde01) * rv01 +
646  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
647  R2.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (-b0) * rv12;
648  R2.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (-b1) * rv12;
649  R2.at<double>(1, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv10 +
650  (b1 * rtilde00 - b0 * rtilde01) * rv11 +
651  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
652  R2.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (-b0) * rv22;
653  R2.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (-b1) * rv22;
654  R2.at<double>(2, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv20 +
655  (b1 * rtilde00 - b0 * rtilde01) * rv21 +
656  (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
657 }
658 
659 
660 void IPPE::PoseSolver::homographyFromSquarePoints(InputArray _targetPoints,
661  double halfLength, OutputArray H_)
662 {
663  assert((_targetPoints.type() == CV_32FC2) | (_targetPoints.type() == CV_64FC2));
664 
665  cv::Mat pts = _targetPoints.getMat();
666  H_.create(3, 3, CV_64FC1);
667  Mat H = H_.getMat();
668 
669  double p1x, p1y;
670  double p2x, p2y;
671  double p3x, p3y;
672  double p4x, p4y;
673 
674  if (_targetPoints.type() == CV_32FC2)
675  {
676  p1x = -pts.at<Vec2f>(0)(0);
677  p1y = -pts.at<Vec2f>(0)(1);
678 
679  p2x = -pts.at<Vec2f>(1)(0);
680  p2y = -pts.at<Vec2f>(1)(1);
681 
682  p3x = -pts.at<Vec2f>(2)(0);
683  p3y = -pts.at<Vec2f>(2)(1);
684 
685  p4x = -pts.at<Vec2f>(3)(0);
686  p4y = -pts.at<Vec2f>(3)(1);
687  }
688  else
689  {
690  p1x = -pts.at<Vec2d>(0)(0);
691  p1y = -pts.at<Vec2d>(0)(1);
692 
693  p2x = -pts.at<Vec2d>(1)(0);
694  p2y = -pts.at<Vec2d>(1)(1);
695 
696  p3x = -pts.at<Vec2d>(2)(0);
697  p3y = -pts.at<Vec2d>(2)(1);
698 
699  p4x = -pts.at<Vec2d>(3)(0);
700  p4y = -pts.at<Vec2d>(3)(1);
701  }
702 
703  // analytic solution:
704  double detsInv = -1 / (halfLength * (p1x * p2y - p2x * p1y - p1x * p4y + p2x * p3y -
705  p3x * p2y + p4x * p1y + p3x * p4y - p4x * p3y));
706 
707  H.at<double>(0, 0) =
708  detsInv * (p1x * p3x * p2y - p2x * p3x * p1y - p1x * p4x * p2y + p2x * p4x * p1y -
709  p1x * p3x * p4y + p1x * p4x * p3y + p2x * p3x * p4y - p2x * p4x * p3y);
710  H.at<double>(0, 1) =
711  detsInv * (p1x * p2x * p3y - p1x * p3x * p2y - p1x * p2x * p4y + p2x * p4x * p1y +
712  p1x * p3x * p4y - p3x * p4x * p1y - p2x * p4x * p3y + p3x * p4x * p2y);
713  H.at<double>(0, 2) =
714  detsInv * halfLength *
715  (p1x * p2x * p3y - p2x * p3x * p1y - p1x * p2x * p4y + p1x * p4x * p2y -
716  p1x * p4x * p3y + p3x * p4x * p1y + p2x * p3x * p4y - p3x * p4x * p2y);
717  H.at<double>(1, 0) =
718  detsInv * (p1x * p2y * p3y - p2x * p1y * p3y - p1x * p2y * p4y + p2x * p1y * p4y -
719  p3x * p1y * p4y + p4x * p1y * p3y + p3x * p2y * p4y - p4x * p2y * p3y);
720  H.at<double>(1, 1) =
721  detsInv * (p2x * p1y * p3y - p3x * p1y * p2y - p1x * p2y * p4y + p4x * p1y * p2y +
722  p1x * p3y * p4y - p4x * p1y * p3y - p2x * p3y * p4y + p3x * p2y * p4y);
723  H.at<double>(1, 2) =
724  detsInv * halfLength *
725  (p1x * p2y * p3y - p3x * p1y * p2y - p2x * p1y * p4y + p4x * p1y * p2y -
726  p1x * p3y * p4y + p3x * p1y * p4y + p2x * p3y * p4y - p4x * p2y * p3y);
727  H.at<double>(2, 0) = -detsInv * (p1x * p3y - p3x * p1y - p1x * p4y - p2x * p3y +
728  p3x * p2y + p4x * p1y + p2x * p4y - p4x * p2y);
729  H.at<double>(2, 1) = detsInv * (p1x * p2y - p2x * p1y - p1x * p3y + p3x * p1y +
730  p2x * p4y - p4x * p2y - p3x * p4y + p4x * p3y);
731  H.at<double>(2, 2) = 1.0;
732 }
733 
734 void IPPE::PoseSolver::makeCanonicalObjectPoints(InputArray _objectPoints,
735  OutputArray _canonicalObjPoints,
736  OutputArray _MmodelPoints2Canonical)
737 {
738  int objType = _objectPoints.type();
739  assert((objType == CV_64FC3) | (objType == CV_32FC3));
740 
741  size_t n = _objectPoints.rows() * _objectPoints.cols();
742 
743  _canonicalObjPoints.create(1, n, CV_64FC2);
744  _MmodelPoints2Canonical.create(4, 4, CV_64FC1);
745 
746  cv::Mat objectPoints = _objectPoints.getMat();
747  cv::Mat canonicalObjPoints = _canonicalObjPoints.getMat();
748 
749  cv::Mat UZero(3, n, CV_64FC1);
750 
751  double xBar = 0;
752  double yBar = 0;
753  double zBar = 0;
754  bool isOnZPlane = true;
755  for (size_t i = 0; i < n; i++)
756  {
757  double x, y, z;
758  if (objType == CV_32FC3)
759  {
760  x = static_cast<double>(objectPoints.at<Vec3f>(i)[0]);
761  y = static_cast<double>(objectPoints.at<Vec3f>(i)[1]);
762  z = static_cast<double>(objectPoints.at<Vec3f>(i)[2]);
763  }
764  else
765  {
766  x = objectPoints.at<Vec3d>(i)[0];
767  y = objectPoints.at<Vec3d>(i)[1];
768  z = objectPoints.at<Vec3d>(i)[2];
769 
770  if (abs(z) > IPPE_SMALL)
771  {
772  isOnZPlane = false;
773  }
774  }
775  xBar += x;
776  yBar += y;
777  zBar += z;
778 
779  UZero.at<double>(0, i) = x;
780  UZero.at<double>(1, i) = y;
781  UZero.at<double>(2, i) = z;
782  }
783  xBar = xBar / (double)n;
784  yBar = yBar / (double)n;
785  zBar = zBar / (double)n;
786 
787  for (size_t i = 0; i < n; i++)
788  {
789  UZero.at<double>(0, i) -= xBar;
790  UZero.at<double>(1, i) -= yBar;
791  UZero.at<double>(2, i) -= zBar;
792  }
793 
794  cv::Mat MCenter(4, 4, CV_64FC1);
795  MCenter.setTo(0);
796  MCenter.at<double>(0, 0) = 1;
797  MCenter.at<double>(1, 1) = 1;
798  MCenter.at<double>(2, 2) = 1;
799  MCenter.at<double>(3, 3) = 1;
800 
801  MCenter.at<double>(0, 3) = -xBar;
802  MCenter.at<double>(1, 3) = -yBar;
803  MCenter.at<double>(2, 3) = -zBar;
804 
805  if (isOnZPlane)
806  {
807  // MmodelPoints2Canonical is given by MCenter
808  MCenter.copyTo(_MmodelPoints2Canonical);
809  for (size_t i = 0; i < n; i++)
810  {
811  canonicalObjPoints.at<Vec2d>(i)[0] = UZero.at<double>(0, i);
812  canonicalObjPoints.at<Vec2d>(i)[1] = UZero.at<double>(1, i);
813  }
814  }
815  else
816  {
817  cv::Mat UZeroAligned(3, n, CV_64FC1);
818  cv::Mat R; // rotation that rotates objectPoints to the plane z=0
819 
820  if (!computeObjextSpaceR3Pts(objectPoints, R))
821  {
822  // we could not compute R, problably because there is a duplicate point in
823  // {objectPoints(0),objectPoints(1),objectPoints(2)}. So we compute it with the SVD (which is slower):
824  computeObjextSpaceRSvD(UZero, R);
825  }
826 
827  UZeroAligned = R * UZero;
828 
829  for (size_t i = 0; i < n; i++)
830  {
831  canonicalObjPoints.at<Vec2d>(i)[0] = UZeroAligned.at<double>(0, i);
832  canonicalObjPoints.at<Vec2d>(i)[1] = UZeroAligned.at<double>(1, i);
833  assert(abs(UZeroAligned.at<double>(2, i)) <= IPPE_SMALL);
834  }
835 
836  cv::Mat MRot(4, 4, CV_64FC1);
837  MRot.setTo(0);
838  MRot.at<double>(3, 3) = 1;
839 
840  R.copyTo(MRot.colRange(0, 3).rowRange(0, 3));
841  cv::Mat Mb = MRot * MCenter;
842  Mb.copyTo(_MmodelPoints2Canonical);
843  }
844 }
845 
846 void IPPE::PoseSolver::evalReprojError(cv::InputArray _objectPoints, cv::InputArray _imagePoints,
847  cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
848  cv::InputArray _M, float& err)
849 {
850  cv::Mat projectedPoints;
851  cv::Mat imagePoints = _imagePoints.getMat();
852  cv::Mat r;
853  rot2vec(_M.getMat().colRange(0, 3).rowRange(0, 3), r);
854 
855  if (_cameraMatrix.empty())
856  {
857  // there is no camera matrix and image points are in normalized pixel coordinates
858  cv::Mat K(3, 3, CV_64FC1);
859  K.setTo(0);
860  K.at<double>(0, 0) = 1;
861  K.at<double>(1, 1) = 1;
862  K.at<double>(2, 2) = 1;
863  cv::Mat kc;
864  cv::projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3), K, kc,
865  projectedPoints);
866  }
867  else
868  {
869  cv::projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3),
870  _cameraMatrix, _distCoeffs, projectedPoints);
871  }
872 
873  err = 0;
874  size_t n = _objectPoints.rows() * _objectPoints.cols();
875 
876  float dx, dy;
877  for (size_t i = 0; i < n; i++)
878  {
879  if (projectedPoints.depth() == CV_32FC1)
880  {
881  dx = projectedPoints.at<Vec2f>(i)[0] - imagePoints.at<Vec2f>(i)[0];
882  dy = projectedPoints.at<Vec2f>(i)[1] - imagePoints.at<Vec2f>(i)[1];
883  }
884  else
885  {
886  dx = projectedPoints.at<Vec2d>(i)[0] - imagePoints.at<Vec2d>(i)[0];
887  dy = projectedPoints.at<Vec2d>(i)[1] - imagePoints.at<Vec2d>(i)[1];
888  }
889 
890  err += dx * dx + dy * dy;
891  }
892  err = sqrt(err / (2.0f * n));
893 }
894 
895 void IPPE::PoseSolver::sortPosesByReprojError(cv::InputArray _objectPoints,
896  cv::InputArray _imagePoints,
897  cv::InputArray _cameraMatrix,
898  cv::InputArray _distCoeffs, cv::InputArray _Ma,
899  cv::InputArray _Mb, cv::OutputArray _M1,
900  cv::OutputArray _M2, float& err1, float& err2)
901 {
902  float erra, errb;
903  evalReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _Ma, erra);
904  evalReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _Mb, errb);
905  if (erra < errb)
906  {
907  err1 = erra;
908  _Ma.copyTo(_M1);
909 
910  err2 = errb;
911  _Mb.copyTo(_M2);
912  }
913  else
914  {
915  err1 = errb;
916  _Mb.copyTo(_M1);
917 
918  err2 = erra;
919  _Ma.copyTo(_M2);
920  }
921 }
922 
923 void HomographyHO::normalizeDataIsotropic(cv::InputArray _Data, cv::OutputArray _DataN,
924  cv::OutputArray _T, cv::OutputArray _Ti)
925 {
926  cv::Mat Data = _Data.getMat();
927  int numPoints = Data.rows * Data.cols;
928  assert((Data.rows == 1) | (Data.cols == 1));
929  assert((Data.channels() == 2) | (Data.channels() == 3));
930  assert(numPoints >= 4);
931 
932  int dataType = _Data.type();
933  assert((dataType == CV_64FC2) | (dataType == CV_64FC3) | (dataType == CV_32FC2) |
934  (dataType == CV_32FC3));
935 
936  _DataN.create(2, numPoints, CV_64FC1);
937 
938  _T.create(3, 3, CV_64FC1);
939  _Ti.create(3, 3, CV_64FC1);
940 
941  cv::Mat DataN = _DataN.getMat();
942  cv::Mat T = _T.getMat();
943  cv::Mat Ti = _Ti.getMat();
944 
945  _T.setTo(0);
946  _Ti.setTo(0);
947 
948  double xm, ym;
949  int numChannels = Data.channels();
950 
951  xm = 0;
952  ym = 0;
953  for (int i = 0; i < numPoints; i++)
954  {
955  if (numChannels == 2)
956  {
957  if (dataType == CV_32FC2)
958  {
959  xm = xm + Data.at<Vec2f>(i)[0];
960  ym = ym + Data.at<Vec2f>(i)[1];
961  }
962  else
963  {
964  xm = xm + Data.at<Vec2d>(i)[0];
965  ym = ym + Data.at<Vec2d>(i)[1];
966  }
967  }
968  else
969  {
970  if (dataType == CV_32FC3)
971  {
972  xm = xm + Data.at<Vec3f>(i)[0];
973  ym = ym + Data.at<Vec3f>(i)[1];
974  }
975  else
976  {
977  xm = xm + Data.at<Vec3d>(i)[0];
978  ym = ym + Data.at<Vec3d>(i)[1];
979  }
980  }
981  }
982  xm = xm / (double)numPoints;
983  ym = ym / (double)numPoints;
984 
985  double kappa = 0;
986  double xh, yh;
987 
988  for (int i = 0; i < numPoints; i++)
989  {
990  if (numChannels == 2)
991  {
992  if (dataType == CV_32FC2)
993  {
994  xh = Data.at<Vec2f>(i)[0] - xm;
995  yh = Data.at<Vec2f>(i)[1] - ym;
996  }
997  else
998  {
999  xh = Data.at<Vec2d>(i)[0] - xm;
1000  yh = Data.at<Vec2d>(i)[1] - ym;
1001  }
1002  }
1003  else
1004  {
1005  if (dataType == CV_32FC3)
1006  {
1007  xh = Data.at<Vec3f>(i)[0] - xm;
1008  yh = Data.at<Vec3f>(i)[1] - ym;
1009  }
1010  else
1011  {
1012  xh = Data.at<Vec3d>(i)[0] - xm;
1013  yh = Data.at<Vec3d>(i)[1] - ym;
1014  }
1015  }
1016 
1017  DataN.at<double>(0, i) = xh;
1018  DataN.at<double>(1, i) = yh;
1019  kappa = kappa + xh * xh + yh * yh;
1020  }
1021  double beta = sqrt(2 * numPoints / kappa);
1022  DataN = DataN * beta;
1023 
1024  T.at<double>(0, 0) = 1.0 / beta;
1025  T.at<double>(1, 1) = 1.0 / beta;
1026 
1027  T.at<double>(0, 2) = xm;
1028  T.at<double>(1, 2) = ym;
1029 
1030  T.at<double>(2, 2) = 1;
1031 
1032  Ti.at<double>(0, 0) = beta;
1033  Ti.at<double>(1, 1) = beta;
1034 
1035  Ti.at<double>(0, 2) = -beta * xm;
1036  Ti.at<double>(1, 2) = -beta * ym;
1037 
1038  Ti.at<double>(2, 2) = 1;
1039 }
1040 
1041 void HomographyHO::homographyHO(cv::InputArray _srcPoints, cv::InputArray _targPoints,
1042  cv::OutputArray _H)
1043 {
1044  _H.create(3, 3, CV_64FC1);
1045  cv::Mat H = _H.getMat();
1046 
1047  cv::Mat DataA, DataB, TA, TAi, TB, TBi;
1048 
1049  HomographyHO::normalizeDataIsotropic(_srcPoints, DataA, TA, TAi);
1050  HomographyHO::normalizeDataIsotropic(_targPoints, DataB, TB, TBi);
1051 
1052  int n = DataA.cols;
1053  assert(n == DataB.cols);
1054 
1055  cv::Mat C1(1, n, CV_64FC1);
1056  cv::Mat C2(1, n, CV_64FC1);
1057  cv::Mat C3(1, n, CV_64FC1);
1058  cv::Mat C4(1, n, CV_64FC1);
1059 
1060  cv::Mat Mx(n, 3, CV_64FC1);
1061  cv::Mat My(n, 3, CV_64FC1);
1062 
1063  double mC1, mC2, mC3, mC4;
1064  mC1 = 0;
1065  mC2 = 0;
1066  mC3 = 0;
1067  mC4 = 0;
1068 
1069  for (int i = 0; i < n; i++)
1070  {
1071  C1.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(0, i);
1072  C2.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(1, i);
1073  C3.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(0, i);
1074  C4.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(1, i);
1075 
1076  mC1 = mC1 + C1.at<double>(0, i);
1077  mC2 = mC2 + C2.at<double>(0, i);
1078  mC3 = mC3 + C3.at<double>(0, i);
1079  mC4 = mC4 + C4.at<double>(0, i);
1080  }
1081 
1082  mC1 = mC1 / n;
1083  mC2 = mC2 / n;
1084  mC3 = mC3 / n;
1085  mC4 = mC4 / n;
1086 
1087  for (int i = 0; i < n; i++)
1088  {
1089  Mx.at<double>(i, 0) = C1.at<double>(0, i) - mC1;
1090  Mx.at<double>(i, 1) = C2.at<double>(0, i) - mC2;
1091  Mx.at<double>(i, 2) = -DataB.at<double>(0, i);
1092 
1093  My.at<double>(i, 0) = C3.at<double>(0, i) - mC3;
1094  My.at<double>(i, 1) = C4.at<double>(0, i) - mC4;
1095  My.at<double>(i, 2) = -DataB.at<double>(1, i);
1096  }
1097 
1098  cv::Mat DataAT, DataADataAT, DataADataATi, Pp, Bx, By, Ex, Ey, D;
1099 
1100  cv::transpose(DataA, DataAT);
1101  DataADataAT = DataA * DataAT;
1102  double dt = DataADataAT.at<double>(0, 0) * DataADataAT.at<double>(1, 1) -
1103  DataADataAT.at<double>(0, 1) * DataADataAT.at<double>(1, 0);
1104 
1105  DataADataATi = cv::Mat(2, 2, CV_64FC1);
1106  DataADataATi.at<double>(0, 0) = DataADataAT.at<double>(1, 1) / dt;
1107  DataADataATi.at<double>(0, 1) = -DataADataAT.at<double>(0, 1) / dt;
1108  DataADataATi.at<double>(1, 0) = -DataADataAT.at<double>(1, 0) / dt;
1109  DataADataATi.at<double>(1, 1) = DataADataAT.at<double>(0, 0) / dt;
1110 
1111  Pp = DataADataATi * DataA;
1112 
1113  Bx = Pp * Mx;
1114  By = Pp * My;
1115 
1116  Ex = DataAT * Bx;
1117  Ey = DataAT * By;
1118 
1119  D = cv::Mat(2 * n, 3, CV_64FC1);
1120  cv::Mat DT, DDT;
1121 
1122  for (int i = 0; i < n; i++)
1123  {
1124  D.at<double>(i, 0) = Mx.at<double>(i, 0) - Ex.at<double>(i, 0);
1125  D.at<double>(i, 1) = Mx.at<double>(i, 1) - Ex.at<double>(i, 1);
1126  D.at<double>(i, 2) = Mx.at<double>(i, 2) - Ex.at<double>(i, 2);
1127 
1128  D.at<double>(i + n, 0) = My.at<double>(i, 0) - Ey.at<double>(i, 0);
1129  D.at<double>(i + n, 1) = My.at<double>(i, 1) - Ey.at<double>(i, 1);
1130  D.at<double>(i + n, 2) = My.at<double>(i, 2) - Ey.at<double>(i, 2);
1131  }
1132 
1133  cv::transpose(D, DT);
1134  DDT = DT * D;
1135 
1136  cv::Mat S, U, V, h12, h45;
1137  double h3, h6;
1138 
1139  cv::eigen(DDT, S, U);
1140 
1141  cv::Mat h789(3, 1, CV_64FC1);
1142  h789.at<double>(0, 0) = U.at<double>(2, 0);
1143  h789.at<double>(1, 0) = U.at<double>(2, 1);
1144  h789.at<double>(2, 0) = U.at<double>(2, 2);
1145 
1146  h12 = -Bx * h789;
1147  h45 = -By * h789;
1148 
1149  h3 = -(mC1 * h789.at<double>(0, 0) + mC2 * h789.at<double>(1, 0));
1150  h6 = -(mC3 * h789.at<double>(0, 0) + mC4 * h789.at<double>(1, 0));
1151 
1152  H.at<double>(0, 0) = h12.at<double>(0, 0);
1153  H.at<double>(0, 1) = h12.at<double>(1, 0);
1154  H.at<double>(0, 2) = h3;
1155 
1156  H.at<double>(1, 0) = h45.at<double>(0, 0);
1157  H.at<double>(1, 1) = h45.at<double>(1, 0);
1158  H.at<double>(1, 2) = h6;
1159 
1160  H.at<double>(2, 0) = h789.at<double>(0, 0);
1161  H.at<double>(2, 1) = h789.at<double>(1, 0);
1162  H.at<double>(2, 2) = h789.at<double>(2, 0);
1163 
1164  H = TB * H * TAi;
1165  H = H / H.at<double>(2, 2);
1166 }
1167 
1168 
1169 void IPPE::PoseSolver::rotateVec2ZAxis(InputArray _a, OutputArray _Ra)
1170 {
1171  _Ra.create(3, 3, CV_64FC1);
1172  Mat Ra = _Ra.getMat();
1173 
1174  double ax = _a.getMat().at<double>(0);
1175  double ay = _a.getMat().at<double>(1);
1176  double az = _a.getMat().at<double>(2);
1177 
1178  double nrm = sqrt(ax * ax + ay * ay + az * az);
1179  ax = ax / nrm;
1180  ay = ay / nrm;
1181  az = az / nrm;
1182 
1183  double c = az;
1184 
1185  if (abs(1.0 + c) < std::numeric_limits<float>::epsilon())
1186  {
1187  Ra.setTo(0.0);
1188  Ra.at<double>(0, 0) = 1.0;
1189  Ra.at<double>(1, 1) = 1.0;
1190  Ra.at<double>(2, 2) = -1.0;
1191  }
1192  else
1193  {
1194  double d = 1.0 / (1.0 + c);
1195  double ax2 = ax * ax;
1196  double ay2 = ay * ay;
1197  double axay = ax * ay;
1198 
1199  Ra.at<double>(0, 0) = -ax2 * d + 1.0;
1200  Ra.at<double>(0, 1) = -axay * d;
1201  Ra.at<double>(0, 2) = -ax;
1202 
1203  Ra.at<double>(1, 0) = -axay * d;
1204  Ra.at<double>(1, 1) = -ay2 * d + 1.0;
1205  Ra.at<double>(1, 2) = -ay;
1206 
1207  Ra.at<double>(2, 0) = ax;
1208  Ra.at<double>(2, 1) = ay;
1209  Ra.at<double>(2, 2) = 1.0 - (ax2 + ay2) * d;
1210  }
1211 }
1212 
1213 bool IPPE::PoseSolver::computeObjextSpaceR3Pts(InputArray _objectPoints, OutputArray R)
1214 {
1215  bool ret; // return argument
1216  double p1x, p1y, p1z;
1217  double p2x, p2y, p2z;
1218  double p3x, p3y, p3z;
1219 
1220  cv::Mat objectPoints = _objectPoints.getMat();
1221  size_t n = objectPoints.rows * objectPoints.cols;
1222  if (objectPoints.type() == CV_32FC3)
1223  {
1224  p1x = objectPoints.at<Vec3f>(0)[0];
1225  p1y = objectPoints.at<Vec3f>(0)[1];
1226  p1z = objectPoints.at<Vec3f>(0)[2];
1227 
1228  p2x = objectPoints.at<Vec3f>(1)[0];
1229  p2y = objectPoints.at<Vec3f>(1)[1];
1230  p2z = objectPoints.at<Vec3f>(1)[2];
1231 
1232  p3x = objectPoints.at<Vec3f>(2)[0];
1233  p3y = objectPoints.at<Vec3f>(2)[1];
1234  p3z = objectPoints.at<Vec3f>(2)[2];
1235  }
1236  else
1237  {
1238  p1x = objectPoints.at<Vec3d>(0)[0];
1239  p1y = objectPoints.at<Vec3d>(0)[1];
1240  p1z = objectPoints.at<Vec3d>(0)[2];
1241 
1242  p2x = objectPoints.at<Vec3d>(1)[0];
1243  p2y = objectPoints.at<Vec3d>(1)[1];
1244  p2z = objectPoints.at<Vec3d>(1)[2];
1245 
1246  p3x = objectPoints.at<Vec3d>(2)[0];
1247  p3y = objectPoints.at<Vec3d>(2)[1];
1248  p3z = objectPoints.at<Vec3d>(2)[2];
1249  }
1250 
1251  double nx = (p1y - p2y) * (p1z - p3z) - (p1y - p3y) * (p1z - p2z);
1252  double ny = (p1x - p3x) * (p1z - p2z) - (p1x - p2x) * (p1z - p3z);
1253  double nz = (p1x - p2x) * (p1y - p3y) - (p1x - p3x) * (p1y - p2y);
1254 
1255  double nrm = sqrt(nx * nx + ny * ny + nz * nz);
1256  if (nrm > IPPE_SMALL)
1257  {
1258  nx = nx / nrm;
1259  ny = ny / nrm;
1260  nz = nz / nrm;
1261  cv::Mat v(3, 1, CV_64FC1);
1262  v.at<double>(0) = nx;
1263  v.at<double>(1) = ny;
1264  v.at<double>(2) = nz;
1265  rotateVec2ZAxis(v, R);
1266  ret = true;
1267  }
1268  else
1269  {
1270  ret = false;
1271  }
1272  return ret;
1273 }
1274 
1275 bool IPPE::PoseSolver::computeObjextSpaceRSvD(InputArray _objectPointsZeroMean, OutputArray _R)
1276 {
1277  bool ret; // return argument
1278  _R.create(3, 3, CV_64FC1);
1279  cv::Mat R = _R.getMat();
1280 
1281  // we could not compute R with the first three points, so lets use the SVD
1282  cv::SVD s;
1283  cv::Mat W, U, VT;
1284  s.compute(_objectPointsZeroMean.getMat() * _objectPointsZeroMean.getMat().t(), W, U, VT);
1285  double s3 = W.at<double>(2);
1286  double s2 = W.at<double>(1);
1287 
1288  // check if points are coplanar:
1289  assert(s3 / s2 < IPPE_SMALL);
1290 
1291  R = U.t();
1292  if (cv::determinant(R) < 0)
1293  { // this ensures R is a rotation matrix and not a general unitary matrix:
1294  R.at<double>(2, 0) = -R.at<double>(2, 0);
1295  R.at<double>(2, 1) = -R.at<double>(2, 1);
1296  R.at<double>(2, 2) = -R.at<double>(2, 2);
1297  }
1298  ret = true;
1299  return ret;
1300 }
IPPE::PoseSolver::rot2vec
void rot2vec(cv::InputArray _R, cv::OutputArray _r)
Fast conversion from a rotation matrix to a rotation vector using Rodrigues' formula.
Definition: ippe.cpp:409
IPPE::PoseSolver::PoseSolver
PoseSolver()
PoseSolver constructor.
Definition: ippe.cpp:151
aruco::solvePnP
void solvePnP(const vector< cv::Point3f > &objPoints, const std::vector< cv::Point2f > &imgPoints, cv::InputArray cameraMatrix, cv::InputArray distCoeffs, cv::Mat &rvec, cv::Mat &tvec)
Definition: ippe.cpp:103
ippe.h
HomographyHO::normalizeDataIsotropic
void normalizeDataIsotropic(cv::InputArray Data, cv::OutputArray DataN, cv::OutputArray T, cv::OutputArray Ti)
Performs data normalization before homography estimation. For details see Hartley,...
Definition: ippe.cpp:923
IPPE::PoseSolver
Definition: ippe.h:67
IPPE_SMALL
#define IPPE_SMALL
Definition: ippe.h:43
aruco::getRTMatrix
cv::Mat getRTMatrix(const cv::Mat &R_, const cv::Mat &T_, int forceType)
Definition: ippe.cpp:33
aruco::solvePnP_
ARUCO_EXPORT std::vector< std::pair< cv::Mat, double > > solvePnP_(float size, const std::vector< cv::Point2f > &imgPoints, cv::InputArray cameraMatrix, cv::InputArray distCoeffs)
Definition: ippe.cpp:115
f
f
IPPE::PoseSolver::homographyFromSquarePoints
void homographyFromSquarePoints(cv::InputArray _targetPoints, double halfLength, cv::OutputArray _H)
Closed-form solution for the homography mapping with four corner correspondences of a square (it maps...
Definition: ippe.cpp:660
IPPE::PoseSolver::~PoseSolver
~PoseSolver()
PoseSolver destructor.
Definition: ippe.cpp:155
IPPE::PoseSolver::makeCanonicalObjectPoints
void makeCanonicalObjectPoints(cv::InputArray _objectPoints, cv::OutputArray _canonicalObjectPoints, cv::OutputArray _MobjectPoints2Canonical)
Takes a set of planar object points and transforms them to 'canonical' object coordinates This is whe...
Definition: ippe.cpp:734
HomographyHO::homographyHO
void homographyHO(cv::InputArray srcPoints, cv::InputArray targPoints, cv::OutputArray H)
Computes the best-fitting homography matrix from source to target points using Harker and O'Leary's m...
Definition: ippe.cpp:1041
IPPE::PoseSolver::computeObjextSpaceRSvD
bool computeObjextSpaceRSvD(cv::InputArray _objectPointsZeroMean, cv::OutputArray _R)
computeObjextSpaceRSvD Computes the rotation _R that rotates the object points to the plane z=0....
Definition: ippe.cpp:1275
d
d
IPPE::PoseSolver::sortPosesByReprojError
void sortPosesByReprojError(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::InputArray _Ma, cv::InputArray _Mb, cv::OutputArray _M1, cv::OutputArray _M2, float &err1, float &err2)
Sorts two pose solutions according to their RMS reprojection error (lowest first).
Definition: ippe.cpp:895
IPPE::PoseSolver::meanSceneDepth
double meanSceneDepth(cv::InputArray objectPoints, cv::InputArray rvec, cv::InputArray tvec)
Computes the average depth of an object given its pose in camera coordinates.
Definition: ippe.cpp:376
IPPE::PoseSolver::computeRotations
void computeRotations(double j00, double j01, double j10, double j11, double p, double q, cv::OutputArray _R1, cv::OutputArray _R2)
Computes the two rotation solutions from the Jacobian of a homography matrix H at a point (ux,...
Definition: ippe.cpp:537
IPPE::PoseSolver::solveCanonicalForm
void solveCanonicalForm(cv::InputArray _canonicalObjPoints, cv::InputArray _normalizedInputPoints, cv::InputArray _H, cv::OutputArray _Ma, cv::OutputArray _Mb)
Finds the two possible poses of a planar object in its canonical position, given a set of corresponde...
Definition: ippe.cpp:255
IPPE::PoseSolver::solveSquare
void solveSquare(double squareLength, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::OutputArray _rvec1, cv::OutputArray _tvec1, float &reprojErr1, cv::OutputArray _rvec2, cv::OutputArray _tvec2, float &reprojErr2)
Finds the two possible poses of a square planar object and their respective reprojection errors using...
Definition: ippe.cpp:297
IPPE::PoseSolver::generateSquareObjectCorners2D
void generateSquareObjectCorners2D(double squareLength, cv::OutputArray _objectPoints)
Generates the 4 object points of a square planar object, without including the z-component (which is ...
Definition: ippe.cpp:366
std
IPPE::PoseSolver::solveGeneric
void solveGeneric(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::OutputArray _rvec1, cv::OutputArray _tvec1, float &reprojErr1, cv::OutputArray _rvec2, cv::OutputArray _tvec2, float &reprojErr2)
Finds the two possible poses of a planar object given a set of correspondences and their respective r...
Definition: ippe.cpp:159
IPPE::PoseSolver::evalReprojError
void evalReprojError(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::InputArray _M, float &err)
Evaluates the Root Mean Squared (RMS) reprojection error of a pose solution.
Definition: ippe.cpp:846
aruco
Definition: cameraparameters.h:24
IPPE::PoseSolver::rotateVec2ZAxis
void rotateVec2ZAxis(cv::InputArray _a, cv::OutputArray _Ra)
Finds the rotation _Ra that rotates a vector _a to the z axis (0,0,1)
Definition: ippe.cpp:1169
IPPE::PoseSolver::generateSquareObjectCorners3D
void generateSquareObjectCorners3D(double squareLength, cv::OutputArray _objectPoints)
Generates the 4 object points of a square planar object.
Definition: ippe.cpp:356
IPPE::PoseSolver::computeTranslation
void computeTranslation(cv::InputArray _objectPoints, cv::InputArray _normalizedImgPoints, cv::InputArray _R, cv::OutputArray _t)
Computes the translation solution for a given rotation solution.
Definition: ippe.cpp:440
IPPE::PoseSolver::computeObjextSpaceR3Pts
bool computeObjextSpaceR3Pts(cv::InputArray _objectPoints, cv::OutputArray _R)
Computes the rotation _R that rotates the object points to the plane z=0. This uses the cross-product...
Definition: ippe.cpp:1213


aruco
Author(s): Rafael Muñoz Salinas , Bence Magyar
autogenerated on Sat Sep 23 2023 02:26:45