fractaldetector.cpp
Go to the documentation of this file.
1 
16 #include "fractaldetector.h"
17 #include "opencv2/calib3d/calib3d.hpp"
18 #include "cvdrawingutils.h"
19 #include <algorithm>
20 #include "aruco_cvversioning.h"
21 
22 namespace aruco
23 {
25 {
28  p.markerWarpPixSize = 10;
29  _markerDetector->setParameters(p);
30 };
31 
33 {
37  _markerDetector->setMarkerLabeler(_fractalLabeler);
38 }
39 
40 void FractalDetector::setConfiguration(std::string params)
41 {
42  _params.configuration_type = params;
44  _markerDetector->setMarkerLabeler(_fractalLabeler);
45 }
46 
47 void FractalDetector::drawMarkers(cv::Mat &img)
48 {
49  float size = std::max(1., float(img.cols) / 1280.);
50  for (auto m : Markers)
51  m.draw(img, cv::Scalar(0, 0, 255), size, false);
52 }
53 
54 void FractalDetector::draw2d(cv::Mat &img)
55 {
56  if (Markers.size() > 0)
57  {
58  std::map<int, FractalMarker> id_fmarker =
59  _fractalLabeler->_fractalMarkerSet.fractalMarkerCollection;
60 
61  std::vector<cv::Point2f> inners;
62  std::map<int, std::vector<cv::Point3f>> id_innerCorners =
63  _fractalLabeler->_fractalMarkerSet.getInnerCorners();
64  for (auto id_innerC : id_innerCorners)
65  {
66  std::vector<cv::Point3f> inner3d;
67  for (auto pt : id_innerC.second)
68  inners.push_back(cv::Point2f(pt.x, pt.y));
69  }
70 
71  std::vector<cv::Point2f> points3d;
72  std::vector<cv::Point2f> points2d;
73  for (auto m : Markers)
74  {
75  for (auto p : id_fmarker[m.id].points)
76  points3d.push_back(cv::Point2f(p.x, p.y));
77 
78  for (auto p : m)
79  points2d.push_back(p);
80  }
81 
82  cv::Mat H = cv::findHomography(points3d, points2d);
83  std::vector<cv::Point2f> dstPnt;
84  cv::perspectiveTransform(inners, dstPnt, H);
85 
86  float size = std::max(1., float(img.cols) / 1280.);
87  for (auto p : dstPnt)
88  cv::circle(img, p, size, cv::Scalar(0, 0, 255), CV_FILLED);
89  }
90 }
91 
92 void FractalDetector::draw3d(cv::Mat &img, bool cube, bool axis)
93 {
94  if (Tracker.isPoseValid())
95  {
96  cv::Mat rot;
97  cv::Rodrigues(Tracker.getRvec(), rot);
98 
99  std::vector<cv::Point3f> innerPoints3d;
100  for (auto pt : Tracker.getInner3d())
101  {
102  cv::Mat_<double> src(3, 1, rot.type());
103  src(0, 0) = pt.x;
104  src(1, 0) = pt.y;
105  src(2, 0) = pt.z;
106 
107  cv::Mat cam_image_point = rot * src + Tracker.getTvec();
108  cam_image_point = cam_image_point / cv::norm(cam_image_point);
109 
110  if (cam_image_point.at<double>(2, 0) > 0.85)
111  innerPoints3d.push_back(pt);
112  }
113  // Draw inner points
114  if (innerPoints3d.size() > 0)
115  {
116  std::vector<cv::Point2f> innerPoints;
117  projectPoints(innerPoints3d, Tracker.getRvec(), Tracker.getTvec(),
119  for (auto p : innerPoints)
120  circle(img, p, 3, cv::Scalar(0, 0, 255), CV_FILLED);
121  }
122  // Draw cube
123  if (cube)
124  {
125  std::map<int, FractalMarker> id_fmarker = Tracker.getFractal().fractalMarkerCollection;
126  for (auto m : Markers)
127  draw3dCube(img, id_fmarker[m.id], _cam_params, 2);
128  }
129 
130  // Draw axes
131  if (axis)
134  }
135 }
136 
138  const CameraParameters &CP, int lineSize)
139 {
140  cv::Mat objectPoints(8, 3, CV_32FC1);
141 
142  float msize = m.getMarkerSize();
143  float halfSize = msize / 2;
144 
145  objectPoints.at<float>(0, 0) = -halfSize;
146  objectPoints.at<float>(0, 1) = -halfSize;
147  objectPoints.at<float>(0, 2) = 0;
148  objectPoints.at<float>(1, 0) = halfSize;
149  objectPoints.at<float>(1, 1) = -halfSize;
150  objectPoints.at<float>(1, 2) = 0;
151  objectPoints.at<float>(2, 0) = halfSize;
152  objectPoints.at<float>(2, 1) = halfSize;
153  objectPoints.at<float>(2, 2) = 0;
154  objectPoints.at<float>(3, 0) = -halfSize;
155  objectPoints.at<float>(3, 1) = halfSize;
156  objectPoints.at<float>(3, 2) = 0;
157 
158  objectPoints.at<float>(4, 0) = -halfSize;
159  objectPoints.at<float>(4, 1) = -halfSize;
160  objectPoints.at<float>(4, 2) = msize;
161  objectPoints.at<float>(5, 0) = halfSize;
162  objectPoints.at<float>(5, 1) = -halfSize;
163  objectPoints.at<float>(5, 2) = msize;
164  objectPoints.at<float>(6, 0) = halfSize;
165  objectPoints.at<float>(6, 1) = halfSize;
166  objectPoints.at<float>(6, 2) = msize;
167  objectPoints.at<float>(7, 0) = -halfSize;
168  objectPoints.at<float>(7, 1) = halfSize;
169  objectPoints.at<float>(7, 2) = msize;
170 
171 
172  std::vector<cv::Point2f> imagePoints;
173  projectPoints(objectPoints, getRvec(), getTvec(), CP.CameraMatrix, CP.Distorsion, imagePoints);
174 
175  for (int i = 0; i < 4; i++)
176  cv::line(Image, imagePoints[i], imagePoints[(i + 1) % 4], cv::Scalar(0, 0, 255, 255), lineSize);
177 
178  for (int i = 0; i < 4; i++)
179  cv::line(Image, imagePoints[i + 4], imagePoints[4 + (i + 1) % 4],
180  cv::Scalar(0, 0, 255, 255), lineSize);
181 
182  for (int i = 0; i < 4; i++)
183  cv::line(Image, imagePoints[i], imagePoints[i + 4], cv::Scalar(0, 0, 255, 255), lineSize);
184 }
185 }; // namespace aruco
cv::Ptr< FractalMarkerLabeler > _fractalLabeler
void setConfiguration(int configuration)
setConfiguration
cv::Ptr< MarkerDetector > _markerDetector
const cv::Mat getTvec() const
void draw2d(cv::Mat &img)
static std::string getTypeString(FractalMarkerSet::CONF_TYPES t)
void draw3dCube(cv::Mat &Image, FractalMarker m, const CameraParameters &CP, int lineSize)
static void draw3dAxis(cv::Mat &Image, const CameraParameters &CP, const cv::Mat &Rvec, const cv::Mat &Tvec, float axis_size)
void draw3d(cv::Mat &img, bool cube=true, bool axis=true)
static cv::Ptr< FractalMarkerLabeler > create(std::string params)
Definition: fractallabeler.h:9
float getMarkerSize() const
Definition: markermap.h:47
Main class for marker detection.
Parameters of the camera.
const cv::Mat getRvec() const
CameraParameters _cam_params
const std::vector< cv::Point3f > getInner3d()
std::map< int, FractalMarker > fractalMarkerCollection
void drawMarkers(cv::Mat &img)
FractalMarkerSet getFractal()
std::vector< aruco::Marker > Markers
FractalPoseTracker Tracker


aruco
Author(s): Rafael Muñoz Salinas , Bence Magyar
autogenerated on Fri Nov 25 2022 04:02:23