SfM.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ALVAR, A Library for Virtual and Augmented Reality.
3  *
4  * Copyright 2007-2012 VTT Technical Research Centre of Finland
5  *
6  * Contact: VTT Augmented Reality Team <alvar.info@vtt.fi>
7  * <http://www.vtt.fi/multimedia/alvar.html>
8  *
9  * ALVAR is free software; you can redistribute it and/or modify it under the
10  * terms of the GNU Lesser General Public License as published by the Free
11  * Software Foundation; either version 2.1 of the License, or (at your option)
12  * any later version.
13  *
14  * This library is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
17  * for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public License
20  * along with ALVAR; if not, see
21  * <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
22  */
23 
24 #include "SfM.h"
25 
26 namespace alvar {
27 
29 {
31  double prev[3];
32 
33 public:
34 
36  {
37  is_initialized = false;
38  memset(prev, 0, 3*sizeof(double));
39  }
40 
41  bool UpdateDistance(Pose* pose, double limit=10)
42  {
43  double trad[3]; CvMat tra = cvMat(3, 1, CV_64F, trad);
44  Pose p = *pose;
45  p.Invert();
46  p.GetTranslation(&tra);
47  if(is_initialized == false)
48  {
49  memcpy(prev, trad, 3*sizeof(double));
50  is_initialized = true;
51  return false;
52  }
53  double dist = (prev[0]-trad[0])*(prev[0]-trad[0]) +
54  (prev[1]-trad[1])*(prev[1]-trad[1]) +
55  (prev[2]-trad[2])*(prev[2]-trad[2]);
56  if(dist > limit)
57  {
58  memcpy(prev, trad, 3*sizeof(double));
59  return true;
60  }
61  return false;
62  }
63 };
65 
66 void SimpleSfM::Reset(bool reset_also_triangulated) {
67  pose_ok = false;
68  container = container_reset_point;
69  if (reset_also_triangulated) {
70  container_triangulated = container_triangulated_reset_point;
71  }
72  pose_difference.Reset();
73  pose_difference.Mirror(false, true, true);
74 }
75 
77  container_reset_point = container;
78  container_triangulated_reset_point = container_triangulated;
79 }
80 
82  container.clear();
83 }
84 
86 
87 Pose *SimpleSfM::GetPose() { return &pose; }
88 
89 bool SimpleSfM::AddMultiMarker(const char *fname, FILE_FORMAT format/* = FILE_FORMAT_XML*/) {
90  MultiMarkerEC mm;
91  return mm.Load(container, fname, format, 0, 0,1023);
92 }
93 
95  return mm->MarkersToEC(container, 0, 0, 1023);
96 }
97 
98 void SimpleSfM::AddMarker(int marker_id, double edge_length, Pose &pose) {
99  marker_detector.MarkerToEC(container, marker_id, edge_length, pose, 0, 0, 1023);
100 }
101 
102 float PointVectorFromCamera(CvPoint3D32f p3d, CvPoint3D32f &p3d_vec, Pose *camera_pose) {
103  double pd[16], v[4] = {0,0,0,1};
104  CvMat Pi = cvMat(4, 4, CV_64F, pd);
105  CvMat V = cvMat(4, 1, CV_64F, v);
106 
107  // Camera location in marker coordinates
108  camera_pose->GetMatrix(&Pi);
109  cvInv(&Pi, &Pi);
110  cvMatMul(&Pi, &V, &V);
111  v[0] /= v[3];
112  v[1] /= v[3];
113  v[2] /= v[3];
114 
115  // Vector from camera-point to the 3D-point in marker coordinates
116  p3d_vec.x = float(p3d.x - v[0]);
117  p3d_vec.y = float(p3d.y - v[1]);
118  p3d_vec.z = float(p3d.z - v[2]);
119 
120  return std::sqrt(p3d_vec.x*p3d_vec.x + p3d_vec.y*p3d_vec.y + p3d_vec.z*p3d_vec.z);
121 }
122 
123 void CreateShadowPoint(CvPoint3D32f &p3d_sh, CvPoint3D32f p3d, CameraEC *cam, Pose *camera_pose, float parallax_length, float triangulate_angle) {
124  // Vector from camera-point to the 3D-point in marker coordinates
125  float l = PointVectorFromCamera(p3d, p3d_sh, camera_pose);
126 
127  // Figure out a suitable matching shadow point distance
128  CvPoint2D32f p2d;
129  CvPoint3D32f p3d2;
130  cam->ProjectPoint(p3d, camera_pose, p2d);
131  p2d.x += parallax_length;
132  cam->Get3dOnDepth(camera_pose, p2d, l, p3d2);
133  p3d2.x -= p3d.x;
134  p3d2.y -= p3d.y;
135  p3d2.z -= p3d.z;
136  float pl = std::sqrt(p3d2.x*p3d2.x + p3d2.y*p3d2.y + p3d2.z*p3d2.z);
137  float shadow_point_dist = float(pl / std::abs(tan(triangulate_angle*3.1415926535/180.))); // How far we need the shadow point to have the parallax limit match
138 
139  // Create the shadow point
140  p3d_sh.x /= l;
141  p3d_sh.y /= l;
142  p3d_sh.z /= l;
143  p3d_sh.x *= shadow_point_dist;
144  p3d_sh.y *= shadow_point_dist;
145  p3d_sh.z *= shadow_point_dist;
146  p3d_sh.x += p3d.x;
147  p3d_sh.y += p3d.y;
148  p3d_sh.z += p3d.z;
149 
150  //std::cout<<"p : "<<p3d.x<<","<<p3d.y<<","<<p3d.z<<std::endl;
151  //std::cout<<"psh: "<<p3d_sh.x<<","<<p3d_sh.y<<","<<p3d_sh.z<<std::endl;
152 }
153 
154 bool SimpleSfM::Update(IplImage *image, bool assume_plane, bool triangulate, float reproj_err_limit, float triangulate_angle) {
155  const int min_triangulate=50, max_triangulate=150;
156  const float plane_dist_limit = 100.f;
157  const bool remember_3d_points = true;
158  const float parallax_length = 10.f;
159  const float parallax_length_sq = parallax_length*parallax_length;
160  float reproj_err_limit_sq = reproj_err_limit*reproj_err_limit;
161  std::map<int, Feature>::iterator iter;
162  std::map<int, Feature>::iterator iter_end = container.end();
163 
164  // #1. Detect marker corners and track features
165  marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false);
166  tf.Purge();
167  tf.Track(image, 0, container, 1); // Track without adding features
168  //tf.Track(image, 0, container, 1, 1024, 65535); // Track with adding features
169  tf.EraseNonTracked(container, 1);
170 
171  // #2. Update pose
172  if (!pose_ok) pose_ok = cam.CalcExteriorOrientation(container, &pose);
173  pose_ok = cam.UpdatePose(container, &pose);
174  if (!pose_ok) pose_ok = cam.UpdatePose(container, &pose, 0); // Only markers
175  if (!pose_ok) return false;
176 
177  // #3. Reproject features and their shadows
178  double err_markers = cam.Reproject(container, &pose, 0, true, false, 100);
179  if (err_markers > reproj_err_limit*2) { Reset(false); return false;}
180  double err_features = cam.Reproject(container, &pose, 1, false, false, 100);
181  if ((err_markers == 0) && (err_features > reproj_err_limit*2)) {
182  // Error is so large, that we just are satisfied we have some pose
183  // Don't triangulate and remove points based on this result
184  return true;
185  }
186 
187  // #4. Add previously triangulated features to container if they are visible...
188  if (remember_3d_points) {
189  IplImage *mask = tf.NewFeatureMask();
190  iter = container_triangulated.begin();
191  while(iter != container_triangulated.end()) {
192  if (container.find(iter->first) == container.end()) {
193  Camera *c = &cam;
194  c->ProjectPoint(iter->second.p3d, &pose, iter->second.projected_p2d);
195  if ((iter->second.projected_p2d.x >= 0) &&
196  (iter->second.projected_p2d.y >= 0) &&
197  (iter->second.projected_p2d.x < image->width) &&
198  (iter->second.projected_p2d.y < image->height))
199  {
200  CvScalar s = cvGet2D(mask, int(iter->second.projected_p2d.y), int(iter->second.projected_p2d.x));
201  if (s.val[0] == 0) {
202  container[iter->first] = iter->second;
203  container[iter->first].estimation_type = 3;
204  container[iter->first].p2d = container[iter->first].projected_p2d;
205  }
206  }
207  }
208  iter++;
209  }
210  }
211 
212  // #5. Add other features to container
213  // Assume them initially on marker plane if (assume_plane == true)
214  tf.AddFeatures(container, 1, 1024, 0xffffff); //65535);
215  if (assume_plane) {
216  for (iter = container.begin(); iter != iter_end; iter++) {
217  Feature &f = iter->second;
218  //if (f.type_id == 0) continue;
219  if (!f.has_p3d && f.estimation_type < 1) {
220  //cam.Get3dOnPlane(&pose, f.p2d, f.p3d);
221 
222  //CvPoint3D32f p3d_vec;
223  //float l = PointVectorFromCamera(f.p3d, p3d_vec, &pose);
224  //if (l > plane_dist_limit) cam.Get3dOnDepth(&pose, f.p2d, plane_dist_limit, f.p3d);
225  cam.Get3dOnDepth(&pose, f.p2d, plane_dist_limit, f.p3d);
226 
227  // TODO: Now we don't create a shadow point for plane points. This is because
228  // we don't want to remember them as 3d points in container_triangulated.
229  // We probably get the same benefit just by assuming that everything is on plane?
230  //CreateShadowPoint(f.p3d_sh, f.p3d, &pose, shadow_point_dist);
231  f.p3d_sh = f.p3d;
232 
233  f.has_p3d = true;
234  f.estimation_type = 1;
235  }
236  }
237  }
238 
239  // #6. Triangulate features with good parallax
240  if (triangulate) {
241  for (iter = container.begin(); iter != iter_end; iter++) {
242  Feature &f = iter->second;
243  //if (f.type_id == 0) continue;
244  if (!f.has_p3d && !f.has_stored_pose) {
245  f.pose1 = pose;
246  f.p2d1 = f.p2d;
247  f.has_stored_pose = true;
248  cam.Get3dOnDepth(&pose, f.p2d, 8, f.p3d);
249  CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, triangulate_angle);
250  }
251  if (!f.has_p3d && f.has_stored_pose && f.estimation_type < 2) {
252  double dist_sh_sq = (f.projected_p2d_sh.x-f.projected_p2d.x)*(f.projected_p2d_sh.x-f.projected_p2d.x);
253  dist_sh_sq += (f.projected_p2d_sh.y-f.projected_p2d.y)*(f.projected_p2d_sh.y-f.projected_p2d.y);
254  if (dist_sh_sq > parallax_length_sq) {
255  CvPoint2D32f u1 = f.p2d1;
256  CvPoint2D32f u2 = f.p2d;
257  cam.Camera::Undistort(u1);
258  cam.Camera::Undistort(u2);
259  if(cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6)) {
260  CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, triangulate_angle);
261  f.has_p3d = true;
262  } else
263  f.has_stored_pose = false;
264  f.estimation_type = 2;
265  }
266  }
267  }
268  }
269 
270  // #7. Erase poor features
271  cam.Reproject(container, &pose, 1, false, false, 100); // TODO: Why again...
272  iter = container.begin();
273  while(iter != container.end()) {
274  bool iter_inc = true;
275  if (iter->second.type_id > 0) {
276  Feature &f = iter->second;
277  if ((f.estimation_type == 0) || !f.has_p2d || !f.has_p3d);
278  else {
279  // Note that the projected_p2d needs to have valid content at this point
280  double dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x);
281  dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y);
282 
283  if (f.estimation_type == 1) {
284  if (dist_sq > (reproj_err_limit_sq)) f.has_p3d = false;
285  } else if (f.estimation_type == 2) {
286  if (dist_sq > (reproj_err_limit_sq)) {
287  container.erase(iter++);
288  iter_inc = false;
289  }
290  } else if (f.estimation_type == 3) {
291  if (container_triangulated.size() < min_triangulate) {
292  if (dist_sq > (reproj_err_limit_sq)) {
293  container.erase(iter++);
294  iter_inc = false;
295  }
296  } else {
297  if (dist_sq > (reproj_err_limit_sq)) f.has_p3d = false;
298  if (dist_sq > (reproj_err_limit_sq*2)) {
299  std::map<int, Feature>::iterator iter_tmp;
300  iter_tmp = container_triangulated.find(iter->first);
301  if (iter_tmp != container_triangulated.end()) {
302  container_triangulated.erase(iter_tmp);
303  }
304  container.erase(iter++);
305  iter_inc = false;
306  }
307  }
308  }
309  }
310  }
311  if (iter_inc) iter++;
312  }
313 
314  // #8. Remember good features that have little reprojection error while the parallax is noticeable
315  if (remember_3d_points && (container_triangulated.size() < max_triangulate)) {
316  iter = container.begin();
317  while(iter != container.end()) {
318  if ((iter->second.type_id > 0) &&
319  (container_triangulated.find(iter->first) == container_triangulated.end()))
320  {
321  Feature &f = iter->second;
322  double dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x);
323  dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y);
324  double dist_sh_sq = (f.projected_p2d_sh.x-f.projected_p2d.x)*(f.projected_p2d_sh.x-f.projected_p2d.x);
325  dist_sh_sq += (f.projected_p2d_sh.y-f.projected_p2d.y)*(f.projected_p2d_sh.y-f.projected_p2d.y);
326  if ((dist_sq < reproj_err_limit_sq) && (dist_sh_sq > parallax_length_sq)) {
327  container_triangulated[iter->first] = iter->second;
328  f.estimation_type = 3;
329  }
330  }
331  iter++;
332  if (container_triangulated.size() >= max_triangulate) break;
333  }
334  }
335 
336  return true;
337 }
338 
340  marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false);
341  tf.Purge();
342  tf.Track(image, 0, container, 1, 1024, 65535);
343  tf.EraseNonTracked(container, 1);
344  if (!pose_ok) pose_ok = cam.CalcExteriorOrientation(container, &pose);
345  else pose_ok = cam.UpdatePose(container, &pose);
346  if(pose_ok) update_tri = moving.UpdateDistance(&pose, scale);
347  std::map<int,Feature>::iterator iter = container.begin();
348  std::map<int,Feature>::iterator iter_end = container.end();
349  for(;iter != iter_end; iter++) {
350  if (pose_ok && (iter->second.type_id == 1) && (!iter->second.has_stored_pose) && (!iter->second.has_p3d)) {
351  iter->second.pose1 = pose;
352  iter->second.p2d1 = iter->second.p2d;
353  iter->second.has_stored_pose = true;
354  }
355  }
356  iter = container.begin();
357  for(;iter != iter_end; iter++)
358  {
359  Feature &f = iter->second;
360  if(update_tri && f.has_stored_pose && !f.has_p3d) {
361  f.tri_cntr++;
362  }
363  if(f.tri_cntr==3) {
364  CvPoint2D32f u1 = f.p2d1;
365  CvPoint2D32f u2 = f.p2d;
366  cam.Camera::Undistort(u1);
367  cam.Camera::Undistort(u2);
368  if(cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6))
369  f.has_p3d = true;
370  else
371  f.has_stored_pose = false;
372  f.tri_cntr = 0;
373  }
374  }
375  if (pose_ok) {
376  double err_markers = cam.Reproject(container, &pose, 0, true, false, 100);
377  if(err_markers > 30) { Reset(false); return false;}
378  double err_features = cam.Reproject(container, &pose, 1, false, false, 100);
379  cam.EraseUsingReprojectionError(container, 30, 1);
380  }
381  return pose_ok;
382 }
383 
385  int n_markers = marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false);
386  static int n_markers_prev = 0;
387  tf.Purge();
388  tf.Track(image, 0, container, 1, 1024, 65535);
389  tf.EraseNonTracked(container, 1);
390  int type_id = -1;
391  if(n_markers>=1) type_id = 0;
392  if (n_markers==1)
393  {
394  pose_ok = cam.CalcExteriorOrientation(container, &pose, 0);
395  if(pose_ok) pose_original = pose;
396  }
397  else if(n_markers>1)
398  {
399  if(pose_ok)
400  pose_ok = cam.UpdatePose(container, &pose, 0);
401  else
402  pose_ok = cam.CalcExteriorOrientation(container, &pose, 0);
403  if(pose_ok) pose_original = pose;
404  }
405  else //if(pose_ok) // Tracking going on...
406  {
407  if (n_markers_prev > 0) {
408  pose_difference.Reset();
409  pose_difference.Mirror(false, true, true);
410  std::map<int,Feature>::iterator iter = container.begin();
411  std::map<int,Feature>::iterator iter_end = container.end();
412  for(;iter != iter_end; iter++) {
413  if ((iter->second.type_id == 1) && (iter->second.has_p2d)) {
414  CvPoint2D32f _p2d = iter->second.p2d;
415  cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d);
416  iter->second.has_p3d = true;
417  }
418  }
419  }
420  cam.UpdateRotation(container, &pose_difference, 1);
421  if(pose_ok)
422  {
423  double rot_mat[16];
424  double gl_mat[16];
425  pose_difference.GetMatrixGL(rot_mat);
426  pose_original.GetMatrixGL(gl_mat);
427  CvMat rot = cvMat(4, 4, CV_64F, rot_mat);// Rotation matrix (difference) from the tracker
428  CvMat mod = cvMat(4, 4, CV_64F, gl_mat); // Original modelview matrix (camera location)
429  cvMatMul(&mod, &rot, &rot);
430  /*if(pose_ok)*/ pose.SetMatrixGL(rot_mat);
431  }
432  //pose_ok = true;
433  }
434  n_markers_prev = n_markers;
435  if (pose_ok) {
436  if (n_markers < 1) {
437  std::map<int,Feature>::iterator iter = container.begin();
438  std::map<int,Feature>::iterator iter_end = container.end();
439  for(;iter != iter_end; iter++) {
440  if ((iter->second.type_id == 1) && (!iter->second.has_p3d) && (iter->second.has_p2d)) {
441  CvPoint2D32f _p2d = iter->second.p2d;
442  cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d);
443  iter->second.has_p3d = true;
444  }
445  }
446  }
447 
448  double err_markers = cam.Reproject(container, &pose, 0, true, false, 100);
449  if(err_markers > 10) { Reset(false); return false;}
450  double err_features = cam.Reproject(container, &pose_difference, 1, false, false, 100);
451  cam.EraseUsingReprojectionError(container, 10, 1);
452  }
453  return pose_ok;
454 }
455 
456 void SimpleSfM::Draw(IplImage *rgba) {
457  if(markers_found) return;
458  std::map<int,Feature>::iterator iter = container.begin();
459  std::map<int,Feature>::iterator iter_end = container.end();
460  for(;iter != iter_end;iter++) {
461  Feature &f = iter->second;
462  if (f.has_p2d) {
463  int r=0, g=0, b=0, rad=1;
464  if (f.type_id == 0) {r=255; g=0; b=0;}
465  else if (f.estimation_type == 0) {
466  if (f.has_stored_pose) {r=255; g=0; b=255;}
467  else {r=0; g=255; b=255;}
468  }
469  else if (f.estimation_type == 1) {r=0; g=255; b=0;}
470  else if (f.estimation_type == 2) {r=255; g=0; b=255;}
471  else if (f.estimation_type == 3) {r=0; g=0; b=255;}
472  if (f.has_p3d) rad=5;
473  else rad = f.tri_cntr+1;
474 
475  cvCircle(rgba, cvPointFrom32f(f.p2d), rad, CV_RGB(r,g,b));
476  if (pose_ok) {
477  // The shadow point line
478  if (f.type_id > 0 && f.estimation_type < 3 && f.p3d_sh.x != 0.f) {
479  cvLine(rgba, cvPointFrom32f(f.projected_p2d), cvPointFrom32f(f.projected_p2d_sh), CV_RGB(0,255,0));
480  }
481  // Reprojection error
482  if (f.has_p3d) {
483  cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), CV_RGB(255,0,255));
484  }
485  }
486  //if (pose_ok && f.has_p3d) {
487  // cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), CV_RGB(255,0,255));
488  //}
489  //if (f.type_id == 0) {
490  // int id = iter->first;
491  // cvCircle(rgba, cvPointFrom32f(f.p2d), 7, CV_RGB(255,0,0));
492  //} else {
493  // int id = iter->first-1024+1;
494  // if (f.has_p3d) {
495  // cvCircle(rgba, cvPointFrom32f(f.p2d), 5, CV_RGB(0,255,0));
496  // }
497  // else if (f.has_stored_pose)
498  // cvCircle(rgba, cvPointFrom32f(f.p2d), f.tri_cntr+1, CV_RGB(255,0,255));
499  // else
500  // cvCircle(rgba, cvPointFrom32f(f.p2d), 5, CV_RGB(0,255,255));
501  //}
502  }
503  }
504 }
505 
506 } // namespace alvar
507 
Main ALVAR namespace.
Definition: Alvar.h:174
float PointVectorFromCamera(CvPoint3D32f p3d, CvPoint3D32f &p3d_vec, Pose *camera_pose)
Definition: SfM.cpp:102
void AddMarker(int marker_id, double edge_length, Pose &pose)
Add an marker to be a basis for tracking. It is good idea to call SetResetPoint after these...
Definition: SfM.cpp:98
void CreateShadowPoint(CvPoint3D32f &p3d_sh, CvPoint3D32f p3d, CameraEC *cam, Pose *camera_pose, float parallax_length, float triangulate_angle)
Definition: SfM.cpp:123
f
void Clear()
Clear all tracked features.
Definition: SfM.cpp:81
void GetTranslation(CvMat *tra) const
Definition: Pose.cpp:168
void SetResetPoint()
Remember the current state and return here when the Reset is called.
Definition: SfM.cpp:76
bool UpdateDistance(Pose *pose, double limit=10)
Definition: SfM.cpp:41
XmlRpcServer s
This file implements structure from motion.
Version of Camera using external container.
Definition: EC.h:392
unsigned char * image
Definition: GlutViewer.cpp:155
bool is_initialized
Definition: SfM.cpp:30
bool AddMultiMarker(const char *fname, FILE_FORMAT format=FILE_FORMAT_XML)
Add MultiMarker from file as a basis for tracking. It is good idea to call SetResetPoint after these...
Definition: SfM.cpp:89
CameraEC * GetCamera()
Get the camera used internally. You need to use this to set correct camera calibration (see SamplePoi...
Definition: SfM.cpp:85
Simple Camera class for calculating distortions, orientation or projections with pre-calibrated camer...
Definition: Camera.h:82
Version of MultiMarker using external container.
Definition: EC.h:759
CvPoint2D32f p2d
Definition: EC.h:157
CvPoint2D32f projected_p2d_sh
Definition: SfM.h:50
bool UpdateTriangulateOnly(IplImage *image)
Update camera 6-DOF position using triangulated points only (This is the old version of Update) ...
Definition: SfM.cpp:339
float rad
Definition: GlutViewer.cpp:146
FILE_FORMAT
Definition: FileFormat.h:39
void ProjectPoint(const CvPoint3D64f pw, const Pose *pose, CvPoint2D64f &pi) const
Project one point.
Definition: Camera.cpp:814
bool Update(IplImage *image, bool assume_plane=true, bool triangulate=true, float reproj_err_limit=5.f, float triangulate_angle=15.f)
Update position assuming that camera is moving with 6-DOF.
Definition: SfM.cpp:154
Pose representation derived from the Rotation class
Definition: Pose.h:50
void Get3dOnDepth(const Pose *pose, CvPoint2D32f p2d, float depth, CvPoint3D32f &p3d)
Get 3D-coordinate for 2D feature assuming certain depth.
Definition: EC.cpp:252
void Draw(IplImage *rgba)
Draw debug information about the tracked features and detected markers.
Definition: SfM.cpp:456
MarkerDetector< MarkerData > marker_detector
CvPoint3D32f p3d_sh
Definition: SfM.h:49
Pose * GetPose()
Get the estimated pose.
Definition: SfM.cpp:87
bool UpdateRotationsOnly(IplImage *image)
Update position assuming that user is more standing still and viewing the environment.
Definition: SfM.cpp:384
void Invert()
Definition: Pose.cpp:132
void GetMatrix(CvMat *mat) const
Definition: Pose.cpp:91
Extended version of ExternalContainer structure used internally in SimpleSfM.
Definition: SfM.h:44
bool Load(std::map< int, T > &container, const char *fname, FILE_FORMAT format=FILE_FORMAT_DEFAULT, int type_id=0, int first_id=0, int last_id=65535)
Fill in 3D-coordinates for marker corners to the given container using save MultiMarker file...
Definition: EC.h:800
bool MarkersToEC(std::map< int, T > &container, int type_id=0, int first_id=0, int last_id=65535)
Fill in 3D-coordinates for marker corners to the given container.
Definition: EC.h:763
Camera * cam
CvPoint3D32f p3d
Definition: EC.h:158
CvPoint2D32f projected_p2d
Definition: EC.h:159
void Reset(bool reset_also_triangulated=true)
Reset the situation back to the point it was when SetResetPoint was called.
Definition: SfM.cpp:66
r
double prev[3]
Definition: SfM.cpp:31
CameraMoves moving
Definition: SfM.cpp:64
CvPoint2D32f p2d1
Definition: SfM.h:48


ar_track_alvar
Author(s): Scott Niekum
autogenerated on Thu Jun 6 2019 19:27:24