markerdetector_impl.cpp
Go to the documentation of this file.
1 
17 #include "markerdetector_impl.h"
18 #include "cameraparameters.h"
19 #include "markerlabeler.h"
20 #include "timers.h"
21 
22 #include <opencv2/core/core.hpp>
23 #include <opencv2/imgproc/imgproc.hpp>
24 #include <opencv2/features2d/features2d.hpp>
25 #include <opencv2/calib3d/calib3d.hpp>
26 #include <fstream>
27 #include <iostream>
28 #include <valarray>
29 #include <chrono>
30 #include <thread>
31 #include "debug.h"
32 #include "aruco_cvversioning.h"
33 //#include "picoflann.h"
34 
35 //#ifdef _DEBUG
36 //#include <opencv2/highgui/highgui.hpp>
37 //#endif
38 using namespace std;
39 using namespace cv;
40 
41 namespace aruco
42 {
43 /************************************
44  *
45  *
46  *
47  *
48  ************************************/
49 MarkerDetector_Impl::MarkerDetector_Impl()
50 {
51  markerIdDetector = aruco::MarkerLabeler::create(Dictionary::ALL_DICTS);
52  setDetectionMode(DM_NORMAL);
53 }
54 /************************************
55  *
56  *
57  *
58  *
59  ************************************/
60 MarkerDetector_Impl::MarkerDetector_Impl(int dict_type, float error_correction_rate)
61 {
62  setDictionary(dict_type, error_correction_rate);
63  setDetectionMode(DM_NORMAL);
64 }
65 /************************************
66  *
67  *
68  *
69  *
70  ************************************/
71 MarkerDetector_Impl::MarkerDetector_Impl(std::string dict_type, float error_correction_rate)
72 {
73  setDictionary(dict_type, error_correction_rate);
74  setDetectionMode(DM_NORMAL);
75 }
76 /************************************
77  *
78  *
79  *
80  *
81  ************************************/
82 
83 MarkerDetector_Impl::~MarkerDetector_Impl()
84 {
85 }
86 
87 void MarkerDetector_Impl::setParameters(const MarkerDetector::Params &params)
88 {
89  _params = params;
90  setDictionary(_params.dictionary, _params.error_correction_rate);
91 }
92 
93 /************************************
94  *
95  *
96  *
97  *
98  ************************************/
99 void MarkerDetector_Impl::setDetectionMode(DetectionMode dm, float minMarkerSize)
100 {
101  _params.setDetectionMode(dm, minMarkerSize);
102 }
103 
104 DetectionMode MarkerDetector_Impl::getDetectionMode()
105 {
106  return _params.detectMode;
107 }
108 
109 
110 
111 /************************************
112  *
113  *
114  *
115  *
116  ************************************/
117 
118 std::vector<aruco::Marker> MarkerDetector_Impl::detect(const cv::Mat &input)
119 {
120  std::vector<Marker> detectedMarkers;
121  detect(input, detectedMarkers);
122  return detectedMarkers;
123 }
124 
125 std::vector<aruco::Marker> MarkerDetector_Impl::detect(const cv::Mat &input,
126  const CameraParameters &camParams,
127  float markerSizeMeters,
128  bool setYPerperdicular, bool correctFisheye)
129 {
130  std::vector<Marker> detectedMarkers;
131  detect(input, detectedMarkers, camParams, markerSizeMeters, setYPerperdicular, correctFisheye);
132  return detectedMarkers;
133 }
134 
135 /************************************
136  *
137  *
138  *
139  *
140  ************************************/
141 void MarkerDetector_Impl::detect(const cv::Mat &input, std::vector<Marker> &detectedMarkers,
142  CameraParameters camParams, float markerSizeMeters,
143  bool setYPerpendicular, bool correctFisheye)
144 {
145  if (camParams.CamSize != input.size() && camParams.isValid() && markerSizeMeters > 0)
146  {
147  // must resize camera parameters if we want to compute properly marker poses
148  CameraParameters cp_aux = camParams;
149  cp_aux.resize(input.size());
150  detect(input, detectedMarkers, cp_aux.CameraMatrix, cp_aux.Distorsion,
151  cp_aux.ExtrinsicMatrix, markerSizeMeters, setYPerpendicular, correctFisheye);
152  }
153  else
154  {
155  detect(input, detectedMarkers, camParams.CameraMatrix, camParams.Distorsion,
156  camParams.ExtrinsicMatrix, markerSizeMeters, setYPerpendicular, correctFisheye);
157  }
158 }
159 int MarkerDetector_Impl::getMarkerWarpSize()
160 {
161  auto bis = markerIdDetector->getBestInputSize();
162  if (bis != -1)
163  return bis;
164 
165  int ndiv = markerIdDetector->getNSubdivisions();
166  if (ndiv == -1)
167  ndiv = 7; // set any possible value(it is used for non dictionary based labelers)
168  return _params.markerWarpPixSize *
169  ndiv; // this is the minimum size that the smallest marker will have
170 }
171 
172 
173 
174 void MarkerDetector_Impl::buildPyramid(vector<cv::Mat> &ImagePyramid, const cv::Mat &grey,
175  int minSize)
176 {
177  // determine numbre of pyramid images
178  int npyrimg = 1;
179  cv::Size imgpsize = grey.size();
180  while (imgpsize.width > minSize)
181  {
182  imgpsize = cv::Size(imgpsize.width / _params.pyrfactor, imgpsize.height / _params.pyrfactor);
183  npyrimg++;
184  }
185 
186  ImagePyramid.resize(npyrimg);
187  imagePyramid[0] = grey;
188  // now, create pyramid images
189  imgpsize = grey.size();
190  for (int i = 1; i < npyrimg; i++)
191  {
192  cv::Size nsize(ImagePyramid[i - 1].cols / _params.pyrfactor,
193  ImagePyramid[i - 1].rows / _params.pyrfactor);
194  cv::resize(ImagePyramid[i - 1], ImagePyramid[i], nsize);
195  }
196 }
197 
198 void impl_assignClass_fast(const cv::Mat &im, std::vector<cv::KeyPoint> &kpoints,
199  bool norm, int wsize)
200 {
201  if (im.type() != CV_8UC1)
202  throw std::runtime_error("assignClass_fast Input image must be 8UC1");
203  int wsizeFull = wsize * 2 + 1;
204 
205  cv::Mat labels = cv::Mat::zeros(wsize * 2 + 1, wsize * 2 + 1, CV_8UC1);
206  cv::Mat thresIm = cv::Mat(wsize * 2 + 1, wsize * 2 + 1, CV_8UC1);
207 
208  for (auto &kp : kpoints)
209  {
210  int x = kp.pt.x + 0.5f;
211  int y = kp.pt.y + 0.5f;
212 
213  cv::Rect r = cv::Rect(x - wsize, y - wsize, wsize * 2 + 1, wsize * 2 + 1);
214  // Check boundaries
215  if (r.x < 0 || r.x + r.width > im.cols || r.y < 0 || r.y + r.height > im.rows)
216  continue;
217 
218 
219  int endX = r.x + r.width;
220  int endY = r.y + r.height;
221  uchar minV = 255, maxV = 0;
222  for (int y = r.y; y < endY; y++)
223  {
224  const uchar *ptr = im.ptr<uchar>(y);
225  for (int x = r.x; x < endX; x++)
226  {
227  if (minV > ptr[x])
228  minV = ptr[x];
229  if (maxV < ptr[x])
230  maxV = ptr[x];
231  }
232  }
233 
234  if ((maxV - minV) < 25)
235  {
236  kp.class_id = 0;
237  continue;
238  }
239 
240  double thres = (maxV + minV) / 2.0;
241 
242  unsigned int nZ = 0;
243  // count non zero considering the threshold
244  for (int y = 0; y < wsizeFull; y++)
245  {
246  const uchar *ptr = im.ptr<uchar>(r.y + y) + r.x;
247  uchar *thresPtr = thresIm.ptr<uchar>(y);
248  for (int x = 0; x < wsizeFull; x++)
249  {
250  if (ptr[x] > thres)
251  {
252  nZ++;
253  thresPtr[x] = 255;
254  }
255  else
256  thresPtr[x] = 0;
257  }
258  }
259  // set all to zero labels.setTo(cv::Scalar::all(0));
260  for (int y = 0; y < thresIm.rows; y++)
261  {
262  uchar *labelsPtr = labels.ptr<uchar>(y);
263  for (int x = 0; x < thresIm.cols; x++)
264  labelsPtr[x] = 0;
265  }
266 
267  uchar newLab = 1;
268  std::map<uchar, uchar> unions;
269  for (int y = 0; y < thresIm.rows; y++)
270  {
271  uchar *thresPtr = thresIm.ptr<uchar>(y);
272  uchar *labelsPtr = labels.ptr<uchar>(y);
273  for (int x = 0; x < thresIm.cols; x++)
274  {
275  uchar reg = thresPtr[x];
276  uchar lleft_px = 0;
277  uchar ltop_px = 0;
278 
279  if (x - 1 > -1)
280  {
281  if (reg == thresPtr[x - 1])
282  lleft_px = labelsPtr[x - 1];
283  }
284 
285  if (y - 1 > -1)
286  {
287  if (reg == thresIm.ptr<uchar>(y - 1)[x]) // thresIm.at<uchar>(y-1, x)
288  ltop_px = labels.at<uchar>(y - 1, x);
289  }
290 
291  if (lleft_px == 0 && ltop_px == 0)
292  labelsPtr[x] = newLab++;
293 
294  else if (lleft_px != 0 && ltop_px != 0)
295  {
296  if (lleft_px < ltop_px)
297  {
298  labelsPtr[x] = lleft_px;
299  unions[ltop_px] = lleft_px;
300  }
301  else if (lleft_px > ltop_px)
302  {
303  labelsPtr[x] = ltop_px;
304  unions[lleft_px] = ltop_px;
305  }
306  else
307  { // IGuales
308  labelsPtr[x] = ltop_px;
309  }
310  }
311  else
312  {
313  if (lleft_px != 0)
314  labelsPtr[x] = lleft_px;
315  else
316  labelsPtr[x] = ltop_px;
317  }
318  }
319  }
320 
321  int nc = newLab - 1 - unions.size();
322  if (nc == 2)
323  {
324  if (nZ > thresIm.total() - nZ)
325  kp.class_id = 0;
326  else
327  kp.class_id = 1;
328  }
329  else if (nc > 2)
330  {
331  kp.class_id = 2;
332  }
333  }
334 }
335 
336 // void fastCorners{
337 // vector<cv::KeyPoint> kpts;
338 // cv::FAST(input,kpts,7);
339 
340 // // Adapter.
341 // // Given an Point2f element, it returns the element of the dimension specified such
342 // that dim=0 is x and dim=1 is y struct PicoFlann_Point2fAdapter{
343 // inline float operator( )(const cv::KeyPoint &kp, int dim)const { return
344 // dim==0?kp.pt.x:kp.pt.y; }
345 // };
346 
347 // picoflann::KdTreeIndex<2,PicoFlann_Point2fAdapter> kdtree;//2 is the number of
348 // dimensions kdtree.build(kpts);
349 // //search 10 nearest neibors to point (0,0)
350 // vector<bool> maxima(kpts.size(),true);
351 // for(auto &kpt:kpts){
352 // kpt.size=1;//used
353 // kpt.class_id=-1;//not yet
354 // }
355 
356 // int classIdx=0;
357 // vector<pair< cv::Point2f,double> > center_weight;
358 // center_weight.reserve(kpts.size());
359 
360 // for(size_t i=0;i<kpts.size();i++){
361 // if( kpts[i].size){
362 // size_t maxResponseIdx =i;
363 // std::vector<std::pair<uint32_t,double> > res=kdtree.radiusSearch(kpts,kpts[i],7);
364 // //compute the one with max response
365 // for(auto p:res){
366 // if( kpts[p.first].response>= kpts[maxResponseIdx].response && kpts[p.first].size)
367 // maxResponseIdx= p.first ;
368 // }
369 // auto &maxKp=kpts[ maxResponseIdx];
370 
371 // if(maxKp.class_id==-1){
372 // maxKp.class_id=int(center_weight.size());
373 // center_weight.push_back( { maxKp.pt*maxKp.response,maxKp.response});
374 // }
375 
376 // //the others are non maxima
377 // for(auto p:res){
378 // if(p.first==maxResponseIdx) continue;
379 // auto &kp=kpts[p.first];
380 // if( kp.size!=0 && kp.class_id==-1){
381 // kp.size=0;//sets as non maxiima
382 // center_weight[maxKp.class_id].first+=kp.pt*kp.response;
383 // center_weight[maxKp.class_id].second+=kp.response;
384 // }
385 // }
386 // }
387 // }
388 
389 // //take only the selected ones
390 // kpts.clear();
391 // for(auto cw:center_weight){
392 // cv::KeyPoint kp;
393 // kp.pt= cw.first*(1./cw.second);
394 // kpts.push_back(kp);
395 // }
396 
399 
400 // assignClass_fast(input,kpts,false,5);
401 
402 
403 // cv::Mat im2;
404 // cv::cvtColor(input,im2,CV_GRAY2BGR);
405 // for(int i=0;i< kpts.size();i++){
406 // auto & kp=kpts[i];
407 
408 // if(kp.class_id==2){
409 
410 // cv::rectangle(auxThresImage,kp.pt-cv::Point2f(2,2),kp.pt+cv::Point2f(2,2),cv::Scalar(0,0,0),-1);
411 // cv::rectangle(im2,kp.pt-cv::Point2f(2,2),kp.pt+cv::Point2f(2,2),cv::Scalar(255,0,123));
412 // }
413 // else
414 // cv::rectangle(im2,kp.pt-cv::Point2f(2,2),kp.pt+cv::Point2f(2,2),cv::Scalar(125,125,125));
415 // }
416 
417 //}
418 /**************************************************
419  *
420  */
421 
422 vector<MarkerDetector_Impl::MarkerCandidate> MarkerDetector_Impl::thresholdAndDetectRectangles(
423  const cv::Mat &input, int thres_param1, int thres_param2, bool enclosed, cv::Mat &auxThresImage)
424 {
425  // ensure that _thresParam1%2==1
427  if (thres_param1 < 3)
428  thres_param1 = 3;
429  else if (((int)thres_param1) % 2 != 1)
430  thres_param1 = (int)(thres_param1 + 1);
431 
432  int enclosedOffset = -1;
433  cv::Mat auxImage;
434  // if ( !erode)
435  auxImage = auxThresImage;
436  if (_params.thresMethod == MarkerDetector::THRES_AUTO_FIXED)
437  {
438  cv::threshold(input, auxImage, static_cast<int>(thres_param2), 255, THRESH_BINARY_INV);
439  if (enclosed)
440  {
441  cv::Mat aux1;
442  enclosedOffset = int(std::max(3.0, 3. / 1920. * float(auxImage.cols)));
443  if (enclosedOffset % 2 == 0)
444  enclosedOffset++;
445  cv::erode(auxImage, aux1,
446  getStructuringElement(MORPH_CROSS, cv::Size(enclosedOffset, enclosedOffset),
447  cv::Point(enclosedOffset / 2, enclosedOffset / 2)));
448  cv::bitwise_xor(aux1, auxImage, auxImage);
449 
450  __ARUCO_TIMER_EVENT__("erode");
451  }
452  }
453  else
454  {
455  cv::adaptiveThreshold(input, auxImage, 255., ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV,
456  static_cast<int>(thres_param1), static_cast<int>(thres_param2));
457  if (_params.closingSize > 0)
458  {
459  int p = _params.closingSize * 2 + 1;
460  cv::Mat im2;
461  cv::Mat ker = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(p, p));
462  cv::morphologyEx(auxImage, im2, cv::MORPH_OPEN, ker);
463  auxThresImage = im2;
464  }
465  enclosedOffset = thres_param1;
466  }
467  __ARUCO_TIMER_EVENT__("thres");
468 
469 
470  // fastCorners ()
471 
472 
473  vector<MarkerCandidate> MarkerCanditates;
474  // calcualte the min_max contour sizes
475  int thisImageMinSize = int(3.5 * float(_params.lowResMarkerSize));
476  // if image is eroded, minSize must be adapted
477  std::vector<cv::Vec4i> hierarchy;
478  std::vector<std::vector<cv::Point>> contours;
479  cv::findContours(auxThresImage, contours, cv::noArray(), cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
480  __ARUCO_TIMER_EVENT__("find-cont");
481  vector<Point> approxCurve;
482 //#define _aruco_debug_detectrectangles
483 #ifdef _aruco_debug_detectrectangles
484  cv::Mat simage;
485  cv::cvtColor(input, simage, CV_GRAY2BGR);
486 #endif
487 
489  for (unsigned int i = 0; i < contours.size(); i++)
490  {
491 #ifdef _aruco_debug_detectrectangles
492  drawContour(simage, contours[i], Scalar(125, 125, 255));
493 #endif
494  // check it is a possible element by first checking that is is large enough
495  if (thisImageMinSize < int(contours[i].size()))
496  {
497  // can approximate to a convex rect?
498  cv::approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * 0.05, true);
499 
500  if (approxCurve.size() == 4 && cv::isContourConvex(approxCurve))
501  {
502 #ifdef _aruco_debug_detectrectangles
503  drawApproxCurve(simage, approxCurve, Scalar(255, 0, 255), 1);
504 #endif
505  // ensure that the distace between consecutive points is large enough
506  float minDist = std::numeric_limits<float>::max();
507  for (int j = 0; j < 4; j++)
508  {
509  float d = cv::norm(approxCurve[j] - approxCurve[(j + 1) % 4]);
510  if (d < minDist)
511  minDist = d;
512  }
513 
514  // add the points
515  MarkerCanditates.push_back(MarkerCandidate());
516  for (int j = 0; j < 4; j++)
517  MarkerCanditates.back().push_back(Point2f(static_cast<float>(approxCurve[j].x),
518  static_cast<float>(approxCurve[j].y)));
519  // now, if it is eroded, must enalrge 1 bit the corners to go to the real location
520  // for each opposite pair, take the line joining them and move one pixel apart
521  // ideally, Bresenham's algorithm should be used
522  if (enclosed)
523  {
524  enlargeMarkerCandidate(MarkerCanditates.back(), float(enclosedOffset) / 2.);
525  }
526 #ifdef _aruco_debug_detectrectangles
527  MarkerCanditates.back().draw(simage, Scalar(255, 255, 0), 1, false);
528 #endif
529  MarkerCanditates.back().contourPoints = contours[i];
530  }
531  }
532  }
533 #ifdef _aruco_debug_detectrectangles
534  cv::imshow("contours", simage);
535 #endif
536  return MarkerCanditates;
537 }
538 
539 
540 void MarkerDetector_Impl::thresholdAndDetectRectangles_thread()
541 {
542  while (true)
543  {
544  bool enclosed = false;
545  auto tad = _tasks.pop();
546  if (tad.task == EXIT_TASK)
547  return;
548  else if (tad.task == ENCLOSE_TASK)
549  enclosed = true;
550  _vcandidates[tad.outIdx] =
551  thresholdAndDetectRectangles(_thres_Images[tad.inIdx], tad.param1, tad.param2,
552  enclosed, _thres_Images[tad.outIdx]);
553  };
554 }
555 
556 vector<aruco::MarkerDetector_Impl::MarkerCandidate> MarkerDetector_Impl::thresholdAndDetectRectangles(
557  const cv::Mat &image)
558 {
559  // compute the different values of param1
560 
561  int adaptiveWindowSize = _params.AdaptiveThresWindowSize;
562  if (_params.AdaptiveThresWindowSize == -1)
563  adaptiveWindowSize = max(int(3), int(15 * float(image.cols) / 1920.));
564  if (adaptiveWindowSize % 2 == 0)
565  adaptiveWindowSize++;
566 
567  // adaptiveWindowSize=15;
568 
569  vector<int> p1_values;
570  for (int i = static_cast<int>(
571  std::max(3., adaptiveWindowSize - 2. * _params.AdaptiveThresWindowSize_range));
572  i <= adaptiveWindowSize + 2 * _params.AdaptiveThresWindowSize_range; i += 2)
573  p1_values.push_back(i);
574 
575  size_t nimages = p1_values.size();
576  _tooNearDistance = p1_values.back();
577  _vcandidates.resize(nimages);
578  _thres_Images.resize(nimages + 1);
579  _thres_Images.back() = image; // add at the end the original image
580  // first, thresholded images
582  vector<ThresAndDetectRectTASK> vtad;
583 
584 
585  ThreadTasks task = THRESHOLD_TASK;
586  if (_params.enclosedMarker)
587  task = ENCLOSE_TASK;
588  for (size_t i = 0; i < p1_values.size(); i++)
589  {
590  tad.inIdx = int(_thres_Images.size() - 1);
591  tad.param1 = p1_values[i];
592  tad.param2 = _params.ThresHold;
593  tad.outIdx = i;
594  tad.task = task;
595  _tasks.push(tad);
596  vtad.push_back(tad);
597  }
598 
599 
600  // reserve images
601  for (size_t i = 0; i < nimages; i++)
602  _thres_Images[i].create(image.size(), CV_8UC1);
603 
604 
605  // how many threads will be used?
606  int nthreads = 0;
607  if (_params.maxThreads <= 0) // if allowed to use all , take max()-1, since the
608  // buildpyramid must be working at this moment
609  nthreads = std::thread::hardware_concurrency() - 1;
610  else
611  nthreads = max(1, _params.maxThreads - 1);
612 
613  tad.task = EXIT_TASK;
614  for (int i = 0; i < nthreads; i++)
615  _tasks.push(tad);
616 
617  if (nthreads == 1)
618  { // no threads
619  ScopeTimer Timer("non-parallel");
620  thresholdAndDetectRectangles_thread();
621  }
622  else
623  { // parallell mode
624  // add the final task END
625  ScopeTimer Timer("parallel");
626 
627  // run the tasks (in parallel)
628  vector<std::thread> threads;
629  for (int i = 0; i < nthreads; i++)
630  threads.push_back(
631  std::thread(&MarkerDetector_Impl::thresholdAndDetectRectangles_thread, this));
632  // wait for them to finish
633  for (auto &th : threads)
634  th.join();
635  }
636  vector<MarkerCandidate> joined;
637  joinVectors(_vcandidates, joined);
638  return joined;
639 }
640 
641 vector<MarkerDetector_Impl::MarkerCandidate> MarkerDetector_Impl::prefilterCandidates(
642  vector<aruco::MarkerDetector_Impl::MarkerCandidate> &MarkerCanditates, cv::Size imgSize)
643 {
648  valarray<bool> swapped(false, MarkerCanditates.size()); // used later
649  for (unsigned int i = 0; i < MarkerCanditates.size(); i++)
650  {
651  // trace a line between the first and second point.
652  // if the thrid point is at the right side, then the points are anti-clockwise
653  double dx1 = MarkerCanditates[i][1].x - MarkerCanditates[i][0].x;
654  double dy1 = MarkerCanditates[i][1].y - MarkerCanditates[i][0].y;
655  double dx2 = MarkerCanditates[i][2].x - MarkerCanditates[i][0].x;
656  double dy2 = MarkerCanditates[i][2].y - MarkerCanditates[i][0].y;
657  double o = (dx1 * dy2) - (dy1 * dx2);
658 
659  if (o < 0.0)
660  { // if the third point is in the left side, then sort in anti-clockwise order
661  swap(MarkerCanditates[i][1], MarkerCanditates[i][3]);
662  swapped[i] = true;
663  }
664  }
665 
666  if (0)
667  {
669  // first detect candidates to be removed
670  vector<pair<int, int>> TooNearCandidates;
671  for (unsigned int i = 0; i < MarkerCanditates.size(); i++)
672  {
673  // calculate the average distance of each corner to the nearest corner of the other marker candidate
674  for (unsigned int j = i + 1; j < MarkerCanditates.size(); j++)
675  {
676  valarray<float> vdist(4);
677  for (int c = 0; c < 4; c++)
678  vdist[c] = cv::norm(MarkerCanditates[i][c] - MarkerCanditates[j][c]);
679  // if distance is too small
680  if (vdist[0] < _tooNearDistance && vdist[1] < _tooNearDistance &&
681  vdist[2] < _tooNearDistance && vdist[3] < _tooNearDistance)
682  TooNearCandidates.push_back(pair<int, int>(i, j));
683  }
684  }
685 
687  vector<bool> toRemove(MarkerCanditates.size(), false);
688  for (unsigned int i = 0; i < TooNearCandidates.size(); i++)
689  {
690  if (perimeter(MarkerCanditates[TooNearCandidates[i].first]) >
691  perimeter(MarkerCanditates[TooNearCandidates[i].second]))
692  toRemove[TooNearCandidates[i].second] = true;
693  else
694  toRemove[TooNearCandidates[i].first] = true;
695  }
696 
698  // remove markers with corners too near the image limits
699  int borderDistThresX = static_cast<int>(_params.borderDistThres * float(imgSize.width));
700  int borderDistThresY = static_cast<int>(_params.borderDistThres * float(imgSize.height));
701  for (size_t i = 0; i < MarkerCanditates.size(); i++)
702  {
703  // delete if any of the corners is too near image border
704  for (size_t c = 0; c < MarkerCanditates[i].size(); c++)
705  {
706  if (MarkerCanditates[i][c].x < borderDistThresX ||
707  MarkerCanditates[i][c].y < borderDistThresY ||
708  MarkerCanditates[i][c].x > imgSize.width - borderDistThresX ||
709  MarkerCanditates[i][c].y > imgSize.height - borderDistThresY)
710  {
711  toRemove[i] = true;
712  }
713  }
714  }
715 
716  // move to output only valid ones
717  vector<MarkerCandidate> finalCandidates;
718  finalCandidates.reserve(MarkerCanditates.size());
719  for (size_t i = 0; i < MarkerCanditates.size(); i++)
720  if (!toRemove[i])
721  finalCandidates.push_back(MarkerCanditates[i]);
722  return finalCandidates;
723  }
724  else
725  return MarkerCanditates;
726 }
727 
728 
729 void MarkerDetector_Impl::addToImageHist(cv::Mat &im, std::vector<float> &hist)
730 {
731  for (int y = 0; y < im.rows; y++)
732  {
733  uchar *ptr = im.ptr<uchar>(y);
734  for (int x = 0; x < im.cols; x++)
735  hist[ptr[x]]++;
736  }
737 }
738 
739 int MarkerDetector_Impl::Otsu(std::vector<float> &hist)
740 {
741  float sum = 0, invsum;
742  for (auto c : hist)
743  sum += c;
744  invsum = 1. / sum;
745  for (auto &c : hist)
746  c *= invsum;
747 
748  float maxVar = 0;
749  int bestT = -1;
750  for (int t = 1; t < 256; t++)
751  {
752  float w0 = 0, w1 = 0, mean0 = 0, mean1 = 0;
753  for (int v = 0; v < t; v++)
754  {
755  w0 += hist[v];
756  mean0 += float(v) * hist[v];
757  }
758  for (int v = t; v < 256; v++)
759  {
760  w1 += hist[v];
761  mean1 += hist[v] * float(v);
762  }
763  if (w0 > 1e-4 && w1 > 1e-4)
764  {
765  mean0 /= w0;
766  mean1 /= w1;
767  float var = w0 * w1 * (mean0 - mean1) * (mean0 - mean1);
768  // cout<<t<<" : "<<var<<"|"<<w0<<" "<<w1<<" "<<mean0<<" "<<mean1<<endl;
769  if (var > maxVar)
770  {
771  maxVar = var;
772  bestT = t;
773  }
774  }
775  }
776  return bestT;
777 }
778 /************************************
779  * Main detection function. Performs all steps
780  ************************************/
781 void MarkerDetector_Impl::detect(const cv::Mat &input, std::vector<Marker> &detectedMarkers,
782  cv::Mat camMatrix, cv::Mat distCoeff, cv::Mat extrinsics,
783  float markerSizeMeters, bool setYPerpendicular,
784  bool correctFisheye)
785 {
786  // clear input data
787  detectedMarkers.clear();
788  _vcandidates.clear();
789  _candidates_wcontour.clear();
791 
792 
793  // it must be a 3 channel image
794  if (input.type() == CV_8UC3)
795  cv::cvtColor(input, grey, CV_BGR2GRAY);
796  // convertToGray(input, grey);
797  else
798  grey = input;
799  __ARUCO_TIMER_EVENT__("ConvertGrey");
800 
804  float ResizeFactor = 1;
805  // use the minimum and markerWarpSize to determine the optimal image size on which to do rectangle detection
806  cv::Mat imgToBeThresHolded;
807  cv::Size maxImageSize = grey.size();
808  auto minpixsize =
809  getMinMarkerSizePix(input.size()); // min pixel size of the marker in the original image
810  if (_params.lowResMarkerSize < minpixsize)
811  {
812  ResizeFactor = float(_params.lowResMarkerSize) / float(minpixsize);
813  if (ResizeFactor < 0.9)
814  { // do not waste time if smaller than this
815  _debug_msg("Scale factor=" << ResizeFactor, 1);
816  maxImageSize.width = float(grey.cols) * ResizeFactor + 0.5;
817  maxImageSize.height = float(grey.rows) * ResizeFactor + 0.5;
818  if (maxImageSize.width % 2 != 0)
819  maxImageSize.width++;
820  if (maxImageSize.height % 2 != 0)
821  maxImageSize.height++;
822  cv::resize(grey, imgToBeThresHolded, maxImageSize, 0, 0, cv::INTER_NEAREST);
823  // cv::resize(grey,imgToBeThresHolded,maxImageSize,0,0,cv::INTER_LINEAR);
824  }
825  }
826 
827  if (imgToBeThresHolded.empty()) // if not set in previous step, add original now
828  imgToBeThresHolded = grey;
829 
830  __ARUCO_TIMER_EVENT__("CreateImageToTheshold");
831  bool needPyramid =
832  true; // ResizeFactor< 1/_params.pyrfactor;//only use pyramid if working on a big image.
833  std::thread buildPyramidThread;
834  if (needPyramid)
835  {
836  if (_params.maxThreads > 1)
837  buildPyramidThread =
838  std::thread([&] { buildPyramid(imagePyramid, grey, 2 * getMarkerWarpSize()); });
839  else
840  buildPyramid(imagePyramid, grey, 2 * getMarkerWarpSize());
841  __ARUCO_TIMER_EVENT__("BuildPyramid");
842  }
843  else
844  {
845  imagePyramid.resize(1);
846  imagePyramid[0] = grey;
847  }
848 
849 
850  int nAttemptsAutoFix = 0;
851  bool keepLookingFor = false;
852  std::vector<float> hist(256, 0);
853  do
854  {
858  vector<MarkerCandidate> MarkerCanditates;
859  MarkerCanditates = thresholdAndDetectRectangles(imgToBeThresHolded);
860  thres = _thres_Images[0];
861 
862  // _debug_exec(10,
863  // {//only executes when compiled in DEBUG mode if debug level is at least 10
864  // //show the thresholded images
865  // for (size_t i = 0; i < _thres_Images.size(); i++) {
866  // stringstream sstr; sstr << "thres-" << i;
867  // cv::namedWindow(sstr.str(),cv::WINDOW_NORMAL);
868  // cv::imshow(sstr.str(),_thres_Images[i]);
869  // }});
870 
871 
872  __ARUCO_TIMER_EVENT__("Threshold and Detect rectangles");
873  // prefilter candidates
874  // _debug_exec(10,//only executes when compiled in DEBUG mode if debug level is at least 10
875  // //show the thresholded images
876  // cv::Mat imrect;
877  // cv::cvtColor(imgToBeThresHolded,imrect,CV_GRAY2BGR);
878  // for(auto m: MarkerCanditates )
879  // m.draw(imrect,cv::Scalar(0,245,0));
880  // cv::imshow("rect-nofiltered",imrect);
881  // );
882 
883  MarkerCanditates = prefilterCandidates(MarkerCanditates, imgToBeThresHolded.size());
884 
885  __ARUCO_TIMER_EVENT__("prefilterCandidates");
886 
887  // _debug_exec(10,//only executes when compiled in DEBUG mode if debug level is at least 10
888  // //show the thresholded images
889  // cv::Mat imrect;
890  // cv::cvtColor(imgToBeThresHolded,imrect,CV_GRAY2BGR);
891  // for(auto m: MarkerCanditates)
892  // m.draw(imrect,cv::Scalar(0,245,0));
893  // cv::imshow("rect-filtered",imrect);
894  // );
895  // before going on, make sure the piramid is built
896  if (buildPyramidThread.joinable())
897  buildPyramidThread.join();
898 
899 
903 
904  // Debug::setLevel(10);
905  auto markerWarpSize = getMarkerWarpSize();
906 
907  detectedMarkers.clear();
908  _candidates_wcontour.clear();
909  for (auto &b : hist)
910  b = 0;
911  float desiredarea = std::pow(static_cast<float>(markerWarpSize), 2.f);
912  for (size_t i = 0; i < MarkerCanditates.size(); i++)
913  {
914  // Find proyective homography
915  cv::Mat canonicalMarker, canonicalMarkerAux;
916 
917  cv::Mat inToWarp = imgToBeThresHolded;
918  MarkerCandidate points2d_pyr = MarkerCanditates[i];
919  if (needPyramid)
920  {
921  // warping is one of the most time consuming operations, especially when the
922  // region is large. To reduce computing time, let us find in the image pyramid,
923  // the best configuration to save time indicates how much bigger observation is
924  // wrt to desired patch
925  size_t imgPyrIdx = 0;
926  for (size_t p = 1; p < imagePyramid.size(); p++)
927  {
928  if (MarkerCanditates[i].getArea() / pow(4, p) >= desiredarea)
929  imgPyrIdx = p;
930  else
931  break;
932  }
933  inToWarp = imagePyramid[imgPyrIdx];
934  // move points to the image level p
935  float ratio = float(inToWarp.cols) / float(imgToBeThresHolded.cols);
936  for (auto &p : points2d_pyr)
937  p *= ratio; // 1. / pow(2, imgPyrIdx);
938  }
939  warp(inToWarp, canonicalMarker, Size(markerWarpSize, markerWarpSize), points2d_pyr);
940  int id, nRotations;
941  double min, Max;
942  cv::minMaxIdx(canonicalMarker, &min, &Max);
943  canonicalMarker.copyTo(canonicalMarkerAux);
944  string additionalInfo;
945  // _debug_exec(10,//only executes when compiled in DEBUG mode if debug
946  // level is at least 10
947  // //show the thresholded images
948  // stringstream sstr;sstr<<"test-"<<i;
949  // cout <<"test"<<i<<endl;
950  // cv::namedWindow(sstr.str(),cv::WINDOW_NORMAL);
951  // cv::imshow(sstr.str(),canonicalMarkerAux);
952  // cv::waitKey(0);
953  // );
954  if (markerIdDetector->detect(canonicalMarkerAux, id, nRotations, additionalInfo))
955  {
956  detectedMarkers.push_back(MarkerCanditates[i]);
957  detectedMarkers.back().id = id;
958  detectedMarkers.back().dict_info = additionalInfo;
959  detectedMarkers.back().contourPoints = MarkerCanditates[i].contourPoints;
960  // sort the points so that they are always in the same order no matter the camera orientation
961  std::rotate(detectedMarkers.back().begin(), detectedMarkers.back().begin() + 4 - nRotations,
962  detectedMarkers.back().end());
963  // _debug_exec(10,//only executes when compiled in DEBUG mode if
964  // debug level is at least 10
965  // //show the thresholded images
966  // stringstream
967  // sstr;sstr<<"can-"<<detectedMarkers.back().id;
968  // cv::namedWindow(sstr.str(),cv::WINDOW_NORMAL);
969  // cv::imshow(sstr.str(),canonicalMarker);
970  // cout<<"ID="<<id<<" "<< detectedMarkers.back()<<endl;
971  // );
972  if (_params.thresMethod == MarkerDetector::THRES_AUTO_FIXED)
973  addToImageHist(canonicalMarker, hist);
974  }
975  else
976  {
977  _candidates_wcontour.push_back(MarkerCanditates[i]);
978  }
979  }
980  __ARUCO_TIMER_EVENT__("Marker classification. ");
981  if (detectedMarkers.size() == 0 && _params.thresMethod == MarkerDetector::THRES_AUTO_FIXED &&
982  ++nAttemptsAutoFix < _params.NAttemptsAutoThresFix)
983  {
984  _params.ThresHold = 10 + rand() % 230;
985  keepLookingFor = true;
986  }
987  else
988  keepLookingFor = false;
989  } while (keepLookingFor);
990 
991  // Debug::setLevel(5);
992 
993 
994  if (_params.thresMethod == MarkerDetector::THRES_AUTO_FIXED)
995  {
996  int newThres = Otsu(hist);
997  ;
998  if (newThres > 0)
999  _params.ThresHold = float(newThres);
1000  }
1001 
1002 #ifdef debug_lines
1003  cv::imshow("image-lines", image);
1004  cv::waitKey(10);
1005 #endif
1006  // now, move the points to the original image (upsample corners)
1007  if (input.cols != imgToBeThresHolded.cols)
1008  {
1009  cornerUpsample(detectedMarkers, imgToBeThresHolded.size());
1010  __ARUCO_TIMER_EVENT__("Corner Upsample");
1011  }
1012 
1013 
1014 
1018 
1019 
1020  if (_params.trackingMinDetections > 0)
1021  {
1022  // update the marker_ndetections
1023  // decrement unseen ones
1024  for (auto &md : marker_ndetections)
1025  {
1026  if (std::find(detectedMarkers.begin(), detectedMarkers.end(), Marker(md.first)) ==
1027  detectedMarkers.end())
1028  md.second = std::max(md.second - 1, 0);
1029  }
1030 
1031  // Identify the markers in the prev frame not yet tracked
1032  std::vector<Marker> needsTrack;
1033  for (auto m : _prevMarkers)
1034  {
1035  if (std::find(detectedMarkers.begin(), detectedMarkers.end(), Marker(m.id)) ==
1036  detectedMarkers.end())
1037  {
1038  if (marker_ndetections.count(m.id) != 0)
1039  {
1040  if (marker_ndetections.at(m.id) >= _params.trackingMinDetections)
1041  needsTrack.push_back(m);
1042  }
1043  }
1044  }
1045 
1046  // look for them in the candidates
1047  if (needsTrack.size() > 0)
1048  {
1049  struct trackInfo
1050  {
1051  trackInfo(const Marker &m)
1052  {
1053  ma.setParams(m);
1054  }
1055  marker_analyzer ma;
1056  int candidate = -1;
1057  double dist = std::numeric_limits<double>::max();
1058  };
1059  std::map<int, trackInfo> marker_trackMatches;
1060  for (auto &mnt : needsTrack)
1061  marker_trackMatches.insert({ mnt.id, trackInfo(mnt) });
1062 
1063  for (size_t cand = 0; cand < _candidates_wcontour.size(); cand++)
1064  {
1065  auto &mcnd = _candidates_wcontour[cand];
1066  marker_analyzer can_ma;
1067  can_ma.setParams(mcnd);
1068  for (auto &mtm : marker_trackMatches)
1069  {
1070  if (mtm.second.ma.isInto(can_ma.getCenter()))
1071  {
1072  auto dist = cv::norm(mtm.second.ma.getCenter() - can_ma.getCenter());
1073  auto sizedif =
1074  fabs(mtm.second.ma.getArea() - can_ma.getArea()) / mtm.second.ma.getArea();
1075  if (sizedif < 0.3f && dist < mtm.second.dist)
1076  {
1077  mtm.second.candidate = static_cast<int>(cand);
1078  mtm.second.dist = dist;
1079  }
1080  }
1081  }
1082  }
1083 
1084  for (auto &mtm : marker_trackMatches)
1085  {
1086  if (mtm.second.candidate == -1)
1087  continue;
1088  auto it = find(_prevMarkers.begin(), _prevMarkers.end(), Marker(mtm.first));
1089  assert(it != _prevMarkers.end());
1090  detectedMarkers.push_back(_candidates_wcontour[mtm.second.candidate]);
1091  detectedMarkers.back().id = mtm.first;
1092  detectedMarkers.back().dict_info = it->dict_info;
1093  detectedMarkers.back().contourPoints =
1094  _candidates_wcontour[mtm.second.candidate].contourPoints;
1095  // find best rotatation
1096 
1097  auto rotError = [](const vector<cv::Point2f> &c1, const vector<cv::Point2f> &c2)
1098  {
1099  cv::Point2f direction1 = c1[1] - c1[0];
1100  direction1 *= 1. / cv::norm(direction1);
1101  cv::Point2f direction2 = c2[1] - c2[0];
1102  direction2 *= 1. / cv::norm(direction2);
1103  return direction1.dot(direction2);
1104  };
1105  auto prev = find(_prevMarkers.begin(), _prevMarkers.end(), Marker(mtm.first));
1106  pair<int, double> rot_error(-1, -1);
1107  for (int r = 0; r < 4; r++)
1108  {
1109  vector<cv::Point2f> aux = detectedMarkers.back();
1110  std::rotate(aux.begin(), aux.begin() + r, aux.end());
1111  auto err = rotError(*prev, aux);
1112  if (err > rot_error.second)
1113  {
1114  rot_error = { r, err };
1115  }
1116  }
1117  std::rotate(detectedMarkers.back().begin(),
1118  detectedMarkers.back().begin() + rot_error.first,
1119  detectedMarkers.back().end());
1120  // mark for removal
1121  _candidates_wcontour[mtm.second.candidate].clear();
1122  }
1123  // remove the used candidates
1124  _candidates_wcontour.erase(
1125  std::remove_if(_candidates_wcontour.begin(), _candidates_wcontour.end(),
1126  [](const vector<cv::Point2f> &m) { return m.size() == 0; }),
1127  _candidates_wcontour.end());
1128  }
1129  // update for next
1130  for (auto m : detectedMarkers)
1131  {
1132  if (marker_ndetections.count(m.id) == 0)
1133  marker_ndetections[m.id] = 1;
1134  else
1135  marker_ndetections[m.id]++;
1136  }
1137  }
1138 
1139 
1143 
1144  if (1)
1145  {
1146  // sort by id
1147  std::sort(detectedMarkers.begin(), detectedMarkers.end());
1148 
1149  // there might be still the case that a marker is detected twice because of the double
1150  // border indicated earlier, detect and remove these cases
1151  vector<bool> toRemove(detectedMarkers.size(), false);
1152 
1153  for (int i = 0; i < int(detectedMarkers.size()) - 1; i++)
1154  {
1155  for (int j = i + 1; j < int(detectedMarkers.size()) && !toRemove[i]; j++)
1156  {
1157  if (detectedMarkers[i].id == detectedMarkers[j].id &&
1158  detectedMarkers[i].dict_info == detectedMarkers[j].dict_info)
1159  {
1160  // deletes the one with smaller perimeter
1161  if (perimeter(detectedMarkers[i]) < perimeter(detectedMarkers[j]))
1162  toRemove[i] = true;
1163  else
1164  toRemove[j] = true;
1165  }
1166  }
1167  }
1168 
1169  removeElements(detectedMarkers, toRemove);
1170  }
1171 
1172 
1177  if (detectedMarkers.size() > 0 && input.size() == imgToBeThresHolded.size())
1178  {
1179  if (_params.cornerRefinementM == CORNER_SUBPIX)
1180  {
1181  int halfwsize = 4 * float(input.cols) / float(imgToBeThresHolded.cols) + 0.5;
1182 
1183  vector<Point2f> Corners;
1184  for (unsigned int i = 0; i < detectedMarkers.size(); i++)
1185  for (int c = 0; c < 4; c++)
1186  Corners.push_back(detectedMarkers[i][c]);
1187  cornerSubPix(grey, Corners, cv::Size(halfwsize, halfwsize), cv::Size(-1, -1),
1188  cv::TermCriteria(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 12, 0.005));
1189  // copy back
1190  for (unsigned int i = 0; i < detectedMarkers.size(); i++)
1191  for (int c = 0; c < 4; c++)
1192  detectedMarkers[i][c] = Corners[i * 4 + c];
1193  }
1194  else if (_params.cornerRefinementM == CORNER_LINES)
1195  {
1196  // use the lines method for estimation of the corners
1197  for (unsigned int i = 0; i < detectedMarkers.size(); i++)
1198  refineCornerWithContourLines(detectedMarkers[i]);
1199  }
1200  }
1201  __ARUCO_TIMER_EVENT__("Corner Refinement");
1203 
1204 
1205 
1209 
1211  if (camMatrix.rows != 0 && markerSizeMeters > 0)
1212  {
1213  for (unsigned int i = 0; i < detectedMarkers.size(); i++)
1214  detectedMarkers[i].calculateExtrinsics(markerSizeMeters, camMatrix, distCoeff,
1215  extrinsics, setYPerpendicular, correctFisheye);
1216  __ARUCO_TIMER_EVENT__("Pose Estimation");
1217  }
1218 
1219  // compute _markerMinSize
1220  float mlength = std::numeric_limits<float>::max();
1221  for (const auto &marker : detectedMarkers)
1222  {
1223  float l = 0;
1224  for (int c = 0; c < 4; c++)
1225  l += cv::norm(marker[c] - marker[(c + 1) % 4]);
1226  if (mlength > l)
1227  mlength = l;
1228  }
1229  float markerMinSize;
1230  if (mlength != std::numeric_limits<float>::max())
1231  markerMinSize = mlength / (4 * std::max(input.cols, input.rows));
1232  else
1233  markerMinSize = 0;
1234  if (_params.autoSize)
1235  {
1236  _params.minSize = markerMinSize * (1 - _params.ts);
1237  }
1238 
1239 
1241  _prevMarkers = detectedMarkers;
1242 }
1243 void MarkerDetector_Impl::refineCornerWithContourLines(aruco::Marker &marker,
1244  cv::Mat camMatrix, cv::Mat distCoeff)
1245 {
1246  // search corners on the contour vector
1247 
1248  std::vector<cv::Point> &contour = marker.contourPoints;
1249  vector<int> cornerIndex(4, -1);
1250  vector<float> dist(4, std::numeric_limits<float>::max());
1251  for (unsigned int j = 0; j < contour.size(); j++)
1252  {
1253  for (unsigned int k = 0; k < 4; k++)
1254  {
1255  float d = (contour[j].x - marker[k].x) * (contour[j].x - marker[k].x) +
1256  (contour[j].y - marker[k].y) * (contour[j].y - marker[k].y);
1257  if (d < dist[k])
1258  {
1259  cornerIndex[k] = j;
1260  dist[k] = d;
1261  }
1262  }
1263  }
1264 
1265  // if any cornerIndex
1266 
1267 
1268  // contour pixel in inverse order or not?
1269  bool inverse;
1270  if ((cornerIndex[1] > cornerIndex[0]) &&
1271  (cornerIndex[2] > cornerIndex[1] || cornerIndex[2] < cornerIndex[0]))
1272  inverse = false;
1273  else if (cornerIndex[2] > cornerIndex[1] && cornerIndex[2] < cornerIndex[0])
1274  inverse = false;
1275  else
1276  inverse = true;
1277 
1278 
1279  // get pixel vector for each line of the marker
1280  int inc = 1;
1281  if (inverse)
1282  inc = -1;
1283 
1284  // undistort contour
1285  vector<Point2f> contour2f;
1286  if (!camMatrix.empty() && !distCoeff.empty())
1287  {
1288  for (unsigned int i = 0; i < contour.size(); i++)
1289  contour2f.push_back(cv::Point2f(contour[i].x, contour[i].y));
1290  if (!camMatrix.empty() && !distCoeff.empty())
1291  cv::undistortPoints(contour2f, contour2f, camMatrix, distCoeff, cv::Mat(), camMatrix);
1292  }
1293  else
1294  {
1295  contour2f.reserve(contour.size());
1296  for (auto p : contour)
1297  contour2f.push_back(cv::Point2f(p.x, p.y));
1298  }
1299 
1300  vector<std::vector<cv::Point2f>> contourLines;
1301  contourLines.resize(4);
1302  for (unsigned int l = 0; l < 4; l++)
1303  {
1304  for (int j = (int)cornerIndex[l]; j != (int)cornerIndex[(l + 1) % 4]; j += inc)
1305  {
1306  if (j == (int)contour.size() && !inverse)
1307  j = 0;
1308  else if (j == 0 && inverse)
1309  j = contour.size() - 1;
1310  contourLines[l].push_back(contour2f[j]);
1311  if (j == (int)cornerIndex[(l + 1) % 4])
1312  break; // this has to be added because of the previous ifs
1313  }
1314  }
1315 
1316  // interpolate marker lines
1317  vector<Point3f> lines;
1318  lines.resize(4);
1319  for (unsigned int j = 0; j < lines.size(); j++)
1320  interpolate2Dline(contourLines[j], lines[j]);
1321 
1322  // get cross points of lines
1323  vector<Point2f> crossPoints;
1324  crossPoints.resize(4);
1325  for (unsigned int i = 0; i < 4; i++)
1326  crossPoints[i] = getCrossPoint(lines[(i - 1) % 4], lines[i]);
1327 
1328  // distort corners again if undistortion was performed
1329  if (!camMatrix.empty() && !distCoeff.empty())
1330  distortPoints(crossPoints, crossPoints, camMatrix, distCoeff);
1331 
1332  // reassing points
1333  for (unsigned int j = 0; j < 4; j++)
1334  {
1335  // cout<<marker[j]<<" "<<crossPoints[j]<<endl;
1336  marker[j] = crossPoints[j];
1337  }
1338 }
1339 
1340 
1341 
1342 // expands the corners of the candidate to reach the real locations
1343 // used in eroded images
1344 void MarkerDetector_Impl::enlargeMarkerCandidate(MarkerCandidate &cand, int fact)
1345 {
1346  for (int j = 0; j < 2; j++)
1347  {
1348  auto startp = j;
1349  auto endp = (j + 2) % 4;
1350  // sort so that the nearest to x is first
1351  if (cand[startp].x > cand[endp].x)
1352  {
1353  swap(startp, endp);
1354  }
1355  const float _180 = 3.14159f;
1356 
1357  const float _22 = 3.14159 / 8.f;
1358  const float _3_22 = 3. * 3.14159f / 8.f;
1359  const float _5_22 = 5.f * 3.14159f / 8.f;
1360  const float _7_22 = 7.f * 3.14159f / 8.f;
1361 
1362  int incx = 0, incy = 0;
1363  // compute the angle
1364  auto v1 = cand[endp] - cand[startp];
1365  float angle = atan2(v1.y, v1.x);
1366  if (_22 < angle && angle < 3 * _22)
1367  { // a
1368  incx = incy = fact;
1369  }
1370  else if (-_22 < angle && angle < _22)
1371  { // b
1372  incx = fact;
1373  incy = 0;
1374  }
1375  else if (-_3_22 < angle && angle < -_22)
1376  { // c
1377  incx = fact;
1378  incy = -fact;
1379  }
1380  else if (-_5_22 < angle && angle < -_3_22)
1381  { // D
1382  incx = 0;
1383  incy = -fact;
1384  }
1385  else if (-_7_22 < angle && angle < -_5_22)
1386  { // E
1387  incx = -fact;
1388  incy = -fact;
1389  }
1390  else if ((-_180 < angle && angle < -_7_22) || (_7_22 < angle && angle < _180))
1391  { // f
1392  incx = -fact;
1393  incy = 0;
1394  }
1395  else if ((_5_22 < angle && angle < _7_22))
1396  { // g
1397  incx = -fact;
1398  incy = fact;
1399  }
1400  else if ((_3_22 < angle && angle < _5_22))
1401  { // h
1402  incx = fact;
1403  incy = fact;
1404  }
1405  cand[endp].x += incx;
1406  cand[endp].y += incy;
1407  cand[startp].x -= incx;
1408  cand[startp].y -= incy;
1409  }
1410 }
1411 
1412 
1413 
1414 int MarkerDetector_Impl::getMinMarkerSizePix(cv::Size orginput_imageSize) const
1415 {
1416  if (_params.minSize == -1 && _params.minSize_pix == -1)
1417  return 0;
1418  // calcualte the min_max contour sizes
1419  int maxDim = std::max(orginput_imageSize.width, orginput_imageSize.height);
1420  int minSize = 0;
1421  if (_params.minSize != -1)
1422  minSize = static_cast<float>(_params.minSize) * static_cast<float>(maxDim);
1423  if (_params.minSize_pix != -1)
1424  minSize = std::min(_params.minSize_pix, minSize);
1425  return minSize;
1426 }
1427 /************************************
1428  *
1429  *
1430  *
1431  *
1432  ************************************/
1433 bool MarkerDetector_Impl::warp(Mat &in, Mat &out, Size size, vector<Point2f> points)
1434 {
1435  if (points.size() != 4)
1436  throw cv::Exception(9001, "point.size()!=4", "MarkerDetector_Impl::warp", __FILE__, __LINE__);
1437  // obtain the perspective transform
1438  Point2f pointsRes[4], pointsIn[4];
1439  for (int i = 0; i < 4; i++)
1440  pointsIn[i] = points[i];
1441  pointsRes[0] = (Point2f(0, 0));
1442  pointsRes[1] = Point2f(static_cast<float>(size.width - 1), 0.f);
1443  pointsRes[2] =
1444  Point2f(static_cast<float>(size.width - 1), static_cast<float>(size.height - 1));
1445  pointsRes[3] = Point2f(0.f, static_cast<float>(size.height - 1));
1446  Mat M = getPerspectiveTransform(pointsIn, pointsRes);
1447  cv::warpPerspective(in, out, M, size, cv::INTER_LINEAR);
1448  // cv::warpPerspective(in, out, M, size, cv::INTER_NEAREST);
1449  return true;
1450 }
1451 
1452 
1453 /************************************
1454  *
1455  *
1456  *
1457  *
1458  ************************************/
1459 int MarkerDetector_Impl::perimeter(const vector<Point2f> &a)
1460 {
1461  int sum = 0;
1462  for (unsigned int i = 0; i < a.size(); i++)
1463  {
1464  int i2 = (i + 1) % a.size();
1465  sum += static_cast<int>(sqrt((a[i].x - a[i2].x) * (a[i].x - a[i2].x) +
1466  (a[i].y - a[i2].y) * (a[i].y - a[i2].y)));
1467  }
1468  return sum;
1469 }
1470 
1471 
1474 void MarkerDetector_Impl::interpolate2Dline(const std::vector<cv::Point2f> &inPoints,
1475  cv::Point3f &outLine)
1476 {
1477  float minX, maxX, minY, maxY;
1478  minX = maxX = inPoints[0].x;
1479  minY = maxY = inPoints[0].y;
1480  for (unsigned int i = 1; i < inPoints.size(); i++)
1481  {
1482  if (inPoints[i].x < minX)
1483  minX = inPoints[i].x;
1484  if (inPoints[i].x > maxX)
1485  maxX = inPoints[i].x;
1486  if (inPoints[i].y < minY)
1487  minY = inPoints[i].y;
1488  if (inPoints[i].y > maxY)
1489  maxY = inPoints[i].y;
1490  }
1491 
1492  // create matrices of equation system
1493  const int pointsCount = static_cast<int>(inPoints.size());
1494  Mat A(pointsCount, 2, CV_32FC1, Scalar(0));
1495  Mat B(pointsCount, 1, CV_32FC1, Scalar(0));
1496  Mat X;
1497 
1498  if (maxX - minX > maxY - minY)
1499  {
1500  // Ax + C = y
1501  for (int i = 0; i < pointsCount; i++)
1502  {
1503  A.at<float>(i, 0) = inPoints[i].x;
1504  A.at<float>(i, 1) = 1.;
1505  B.at<float>(i, 0) = inPoints[i].y;
1506  }
1507 
1508  // solve system
1509  solve(A, B, X, DECOMP_SVD);
1510  // return Ax + By + C
1511  outLine = Point3f(X.at<float>(0, 0), -1., X.at<float>(1, 0));
1512  }
1513  else
1514  {
1515  // By + C = x
1516  for (int i = 0; i < pointsCount; i++)
1517  {
1518  A.at<float>(i, 0) = inPoints[i].y;
1519  A.at<float>(i, 1) = 1.;
1520  B.at<float>(i, 0) = inPoints[i].x;
1521  }
1522 
1523  // solve system
1524  solve(A, B, X, DECOMP_SVD);
1525  // return Ax + By + C
1526  outLine = Point3f(-1., X.at<float>(0, 0), X.at<float>(1, 0));
1527  }
1528 }
1529 
1532 Point2f MarkerDetector_Impl::getCrossPoint(const cv::Point3f &line1, const cv::Point3f &line2)
1533 {
1534  // create matrices of equation system
1535  Mat A(2, 2, CV_32FC1, Scalar(0));
1536  Mat B(2, 1, CV_32FC1, Scalar(0));
1537  Mat X;
1538 
1539  A.at<float>(0, 0) = line1.x;
1540  A.at<float>(0, 1) = line1.y;
1541  B.at<float>(0, 0) = -line1.z;
1542 
1543  A.at<float>(1, 0) = line2.x;
1544  A.at<float>(1, 1) = line2.y;
1545  B.at<float>(1, 0) = -line2.z;
1546 
1547  // solve system
1548  solve(A, B, X, DECOMP_SVD);
1549  return Point2f(X.at<float>(0, 0), X.at<float>(1, 0));
1550 }
1551 
1552 // template <typename T>
1553 // void MarkerDetector_Impl::cornerUpsample(vector<T>& MarkerCanditates, cv::Size
1554 // lowResImageSize ){
1555 // cornerUpsample_SUBP(MarkerCanditates,lowResImageSize);
1556 // }
1557 
1558 // template <typename T>
1559 // void MarkerDetector_Impl::cornerUpsample_SUBP(vector<T>& MarkerCanditates, cv::Size
1560 // lowResImageSize ){
1561 // if (MarkerCanditates.size()==0)return;
1562 // //first, determine the image in the pyramid nearest to this one
1563 // int startPyrImg=0;
1564 
1565 // for(size_t i=0;i<imagePyramid.size();i++){
1566 // if ( lowResImageSize.width < imagePyramid[i].cols) startPyrImg=i;
1567 // else break;
1568 // }
1570 
1571 // cv::Size prevLowResSize=lowResImageSize;
1572 // for(int curpyr=startPyrImg;curpyr>=0;curpyr--){
1573 // float factor= float(imagePyramid[curpyr].cols)/float(prevLowResSize.width) ;
1574 // //upsample corner locations
1575 // for(auto &m:MarkerCanditates)
1576 // for(auto &point:m) {point*=factor;}
1577 // int halfwsize= 0.5+2.5*factor;
1578 // vector<cv::Point2f> p2d;//p2d.reserve(MarkerCanditates.size()*4);
1579 // for(auto &m:MarkerCanditates)
1580 // for(auto &point:m) { p2d.push_back(point);}
1581 // cv::cornerSubPix(
1582 // imagePyramid[curpyr],p2d,cv::Size(halfwsize,halfwsize),cv::Size(-1,-1),cv::TermCriteria(cv::TermCriteria::MAX_ITER
1583 // , 4,0.5));
1584 // int cidx=0;
1585 // for(auto &m:MarkerCanditates)
1586 // for(auto &point:m) {point =p2d[cidx++];}
1587 
1588 // prevLowResSize=imagePyramid[curpyr].size();
1589 // }
1590 //}
1591 
1592 
1593 /************************************
1594  *
1595  *
1596  *
1597  *
1598  ************************************/
1599 void MarkerDetector_Impl::drawAllContours(Mat input, std::vector<std::vector<cv::Point>> &contours)
1600 {
1601  drawContours(input, contours, -1, Scalar(255, 0, 255));
1602 }
1603 
1604 /************************************
1605  *
1606  *
1607  *
1608  *
1609  ************************************/
1610 void MarkerDetector_Impl::drawContour(Mat &in, vector<Point> &contour, Scalar color)
1611 {
1612  for (unsigned int i = 0; i < contour.size(); i++)
1613  {
1614  cv::rectangle(in, contour[i], contour[i], color);
1615  }
1616 }
1617 
1618 void MarkerDetector_Impl::drawApproxCurve(Mat &in, vector<Point> &contour, Scalar color, int thickness)
1619 {
1620  for (unsigned int i = 0; i < contour.size(); i++)
1621  {
1622  cv::line(in, contour[i], contour[(i + 1) % contour.size()], color, thickness);
1623  }
1624 }
1625 /************************************
1626  *
1627  *
1628  *
1629  *
1630  ************************************/
1631 
1632 void MarkerDetector_Impl::draw(Mat out, const vector<Marker> &markers)
1633 {
1634  for (unsigned int i = 0; i < markers.size(); i++)
1635  {
1636  cv::line(out, markers[i][0], markers[i][1], cv::Scalar(255, 0, 0), 2);
1637  cv::line(out, markers[i][1], markers[i][2], cv::Scalar(255, 0, 0), 2);
1638  cv::line(out, markers[i][2], markers[i][3], cv::Scalar(255, 0, 0), 2);
1639  cv::line(out, markers[i][3], markers[i][0], cv::Scalar(255, 0, 0), 2);
1640  }
1641 }
1642 
1643 
1644 void MarkerDetector_Impl::setMarkerLabeler(cv::Ptr<MarkerLabeler> detector)
1645 {
1646  markerIdDetector = detector;
1647 }
1648 
1649 void MarkerDetector_Impl::setDictionary(int dict_type, float error_correction_rate)
1650 {
1651  markerIdDetector =
1652  MarkerLabeler::create((Dictionary::DICT_TYPES)dict_type, error_correction_rate);
1653  _params.error_correction_rate = error_correction_rate;
1654  _params.dictionary = aruco::Dictionary::getTypeString((Dictionary::DICT_TYPES)dict_type);
1655 }
1656 
1657 
1658 
1659 void MarkerDetector_Impl::setDictionary(string dict_type, float error_correction_rate)
1660 {
1661  auto _to_string = [](float i)
1662  {
1663  std::stringstream str;
1664  str << i;
1665  return str.str();
1666  };
1667  _params.dictionary = dict_type;
1668  markerIdDetector = MarkerLabeler::create(dict_type, _to_string(error_correction_rate));
1669  _params.error_correction_rate = error_correction_rate;
1670 }
1671 
1672 
1673 cv::Mat MarkerDetector_Impl::getThresholdedImage(uint32_t idx)
1674 {
1675  if (_thres_Images.size() == 0)
1676  return cv::Mat();
1677  if (idx >= _thres_Images.size())
1678  idx = _thres_Images.size() - 1; // last one is the original image
1679  return _thres_Images[idx];
1680 }
1681 
1682 
1683 
1686 void MarkerDetector_Impl::distortPoints(vector<cv::Point2f> in, vector<cv::Point2f> &out,
1687  const Mat &camMatrix, const Mat &distCoeff)
1688 {
1689  // trivial extrinsics
1690  cv::Mat Rvec = cv::Mat(3, 1, CV_32FC1, cv::Scalar::all(0));
1691  cv::Mat Tvec = Rvec.clone();
1692  // calculate 3d points and then reproject, so opencv makes the distortion internally
1693  vector<cv::Point3f> cornersPoints3d;
1694  for (unsigned int i = 0; i < in.size(); i++)
1695  cornersPoints3d.push_back(
1696  cv::Point3f((in[i].x - camMatrix.at<float>(0, 2)) / camMatrix.at<float>(0, 0), // x
1697  (in[i].y - camMatrix.at<float>(1, 2)) / camMatrix.at<float>(1, 1), // y
1698  1)); // z
1699  cv::projectPoints(cornersPoints3d, Rvec, Tvec, camMatrix, distCoeff, out);
1700 }
1701 
1702 
1705 void MarkerDetector_Impl::saveParamsToFile(const std::string &path) const
1706 {
1707  cv::FileStorage fs(path, cv::FileStorage::WRITE);
1708  if (!fs.isOpened())
1709  throw std::runtime_error("Could not open " + path);
1710  _params.save(fs);
1711 }
1712 
1715 void MarkerDetector_Impl::loadParamsFromFile(const std::string &path)
1716 {
1717  cv::FileStorage fs(path, cv::FileStorage::READ);
1718  if (!fs.isOpened())
1719  throw std::runtime_error("Could not open " + path);
1720  _params.load(fs);
1721  setDictionary(_params.dictionary, _params.error_correction_rate);
1722 }
1723 
1724 void MarkerDetector_Impl::toStream(std::ostream &str) const
1725 {
1726  uint64_t sig = 13213;
1727  str.write((char *)&sig, sizeof(sig));
1728  _params.toStream(str);
1729 }
1730 
1731 void MarkerDetector_Impl::fromStream(std::istream &str)
1732 {
1733  uint64_t sig = 13213;
1734  str.read((char *)&sig, sizeof(sig));
1735  if (sig != 13213)
1736  throw std::runtime_error("MarkerDetector_Impl::fromStream invalid signature");
1737  _params.fromStream(str);
1738  setDictionary(_params.dictionary, _params.error_correction_rate);
1739 }
1740 
1741 
1742 
1743 void MarkerDetector_Impl::filter_ambiguous_query(std::vector<cv::DMatch> &matches)
1744 {
1745  if (matches.size() == 0)
1746  return;
1747  // determine maximum values of queryIdx
1748  int maxT = -1;
1749  for (auto m : matches)
1750  maxT = std::max(maxT, m.queryIdx);
1751 
1752  // now, create the vector with the elements
1753  vector<int> used(maxT + 1, -1);
1754  vector<cv::DMatch> best_matches(maxT);
1755  int idx = 0;
1756  bool needRemove = false;
1757 
1758  for (auto &match : matches)
1759  {
1760  if (used[match.queryIdx] == -1)
1761  {
1762  used[match.queryIdx] = idx;
1763  }
1764  else
1765  {
1766  if (matches[used[match.queryIdx]].distance > match.distance)
1767  {
1768  matches[used[match.queryIdx]].queryIdx = -1; // annulate the other match
1769  used[match.queryIdx] = idx;
1770  needRemove = true;
1771  }
1772  else
1773  {
1774  match.queryIdx = -1; // annulate this match
1775  needRemove = true;
1776  }
1777  }
1778  idx++;
1779  }
1780 
1781 
1782  if (needRemove)
1783  matches.erase(std::remove_if(matches.begin(), matches.end(),
1784  [](const cv::DMatch &m)
1785  { return m.trainIdx == -1 || m.queryIdx == -1; }),
1786  matches.end());
1787 }
1788 
1789 void MarkerDetector_Impl::cornerUpsample(std::vector<Marker> &MarkerCanditates,
1790  cv::Size lowResImageSize)
1791 {
1792  if (MarkerCanditates.size() == 0)
1793  return;
1794  // first, determine the image in the pyramid nearest to this one
1795  int startPyrImg = 0;
1796 
1797  for (size_t i = 0; i < imagePyramid.size(); i++)
1798  {
1799  if (lowResImageSize.width < imagePyramid[i].cols)
1800  startPyrImg = i;
1801  else
1802  break;
1803  }
1804  //#define _aruco_marker_detector_fast
1805 
1806  cv::Size prevLowResSize = lowResImageSize;
1807  for (int curpyr = startPyrImg; curpyr >= 0; curpyr--)
1808  {
1809  float factor = float(imagePyramid[curpyr].cols) / float(prevLowResSize.width);
1810  // upsample corner locations
1811  for (auto &m : MarkerCanditates)
1812  for (auto &point : m)
1813  {
1814  point *= factor;
1815  }
1816  int halfwsize = 0.5 + 2.5 * factor;
1817  std::vector<cv::Point2f> p2d; // p2d.reserve(MarkerCanditates.size()*4);
1818  for (auto &m : MarkerCanditates)
1819  for (auto &point : m)
1820  {
1821  p2d.push_back(point);
1822  }
1823  cv::cornerSubPix(imagePyramid[curpyr], p2d, cv::Size(halfwsize, halfwsize),
1824  cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::MAX_ITER, 4, 0.5));
1825  int cidx = 0;
1826  for (auto &m : MarkerCanditates)
1827  for (auto &point : m)
1828  {
1829  point = p2d[cidx++];
1830  }
1831 
1832  prevLowResSize = imagePyramid[curpyr].size();
1833  }
1834 }
1835 
1836 
1837 void MarkerDetector_Impl::cornerUpsample(std::vector<std::vector<cv::Point2f>> &MarkerCanditates,
1838  cv::Size lowResImageSize)
1839 {
1840  std::vector<Marker> markers;
1841  markers.reserve(MarkerCanditates.size());
1842  std::copy(MarkerCanditates.begin(), MarkerCanditates.end(), std::back_inserter(markers));
1843 
1844  cornerUpsample(markers, lowResImageSize);
1845 
1846  for (std::size_t i = 0; i < markers.size(); i++)
1847  {
1848  MarkerCanditates[i] = markers[i];
1849  }
1850 }
1851 }; // namespace aruco
_debug_msg
#define _debug_msg(x, level)
Definition: debug.h:116
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK::param2
int param2
Definition: markerdetector_impl.h:370
aruco::MarkerDetector_Impl::marker_analyzer::setParams
void setParams(const std::vector< cv::Point2f > &m)
Definition: markerdetector_impl.h:430
markerlabeler.h
aruco::CORNER_SUBPIX
@ CORNER_SUBPIX
Definition: markerdetector.h:76
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK::outIdx
int outIdx
Definition: markerdetector_impl.h:369
aruco::CameraParameters
Parameters of the camera.
Definition: cameraparameters.h:29
aruco::MarkerCandidate
Marker MarkerCandidate
Definition: markerdetector.h:84
aruco::MarkerLabeler::create
static cv::Ptr< MarkerLabeler > create(Dictionary::DICT_TYPES dict_type, float error_correction_rate=0)
Definition: markerlabeler.cpp:20
aruco::MarkerDetector_Impl::marker_analyzer::getCenter
cv::Point2f getCenter() const
Definition: markerdetector_impl.h:460
aruco::CameraParameters::isValid
bool isValid() const
Definition: cameraparameters.h:63
f
f
aruco::MarkerDetector_Impl::marker_analyzer
Definition: markerdetector_impl.h:428
aruco::CameraParameters::resize
void resize(cv::Size size)
Definition: cameraparameters.cpp:166
aruco::ScopeTimer
Definition: timers.h:30
__ARUCO_TIMER_EVENT__
#define __ARUCO_TIMER_EVENT__(Y)
Definition: timers.h:284
aruco::CameraParameters::CameraMatrix
cv::Mat CameraMatrix
Definition: cameraparameters.h:33
markerdetector_impl.h
d
d
aruco::DM_NORMAL
@ DM_NORMAL
Definition: markerdetector.h:62
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK::param1
int param1
Definition: markerdetector_impl.h:370
aruco::Marker
Definition: marker.h:35
aruco::Timer
Definition: timers.h:182
aruco::Dictionary::DICT_TYPES
DICT_TYPES
Definition: dictionary.h:45
aruco::Dictionary::getTypeString
static std::string getTypeString(DICT_TYPES t)
Definition: dictionary.cpp:3455
aruco::CORNER_LINES
@ CORNER_LINES
Definition: markerdetector.h:77
aruco::MarkerDetector::Params
Definition: markerdetector.h:104
timers.h
aruco::CameraParameters::ExtrinsicMatrix
cv::Mat ExtrinsicMatrix
Definition: cameraparameters.h:40
aruco::Marker::contourPoints
vector< cv::Point > contourPoints
Definition: marker.h:47
aruco::MarkerDetector_Impl::marker_analyzer::getArea
float getArea() const
Definition: markerdetector_impl.h:464
std
aruco::MarkerDetector_Impl::ThreadTasks
ThreadTasks
Definition: markerdetector_impl.h:361
aruco
Definition: cameraparameters.h:24
aruco::DetectionMode
DetectionMode
The DetectionMode enum defines the different possibilities for detection. Specifies the detection mod...
Definition: markerdetector.h:60
aruco_cvversioning.h
aruco::CameraParameters::CamSize
cv::Size CamSize
Definition: cameraparameters.h:37
cameraparameters.h
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK::task
ThreadTasks task
Definition: markerdetector_impl.h:371
aruco::impl_assignClass_fast
void impl_assignClass_fast(const cv::Mat &im, std::vector< cv::KeyPoint > &kpoints, bool norm, int wsize)
Definition: markerdetector_impl.cpp:198
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK
Definition: markerdetector_impl.h:367
aruco::MarkerDetector_Impl::ThresAndDetectRectTASK::inIdx
int inIdx
Definition: markerdetector_impl.h:369
__ARUCO_ADDTIMER__
#define __ARUCO_ADDTIMER__
Definition: timers.h:283
aruco::rotate
auto rotate
Definition: fractalmarkerset.cpp:526
aruco::CameraParameters::Distorsion
cv::Mat Distorsion
Definition: cameraparameters.h:35


aruco
Author(s): Rafael Muñoz Salinas , Bence Magyar
autogenerated on Sat Sep 23 2023 02:26:45