texture_mapping.hpp
Go to the documentation of this file.
1 /*
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2010, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of Willow Garage, Inc. nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $Id$
35  *
36  */
37 
38 #ifndef PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
39 #define PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
40 
41 #include <pcl/common/distances.h>
43 #include <pcl/search/octree.h>
44 #include <pcl/common/common.h> // for getAngle3D
45 
47 template<typename PointInT> std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> >
49  const Eigen::Vector3f &p1,
50  const Eigen::Vector3f &p2,
51  const Eigen::Vector3f &p3)
52 {
53  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates;
54  // process for each face
55  Eigen::Vector3f p1p2 (p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]);
56  Eigen::Vector3f p1p3 (p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]);
57  Eigen::Vector3f p2p3 (p3[0] - p2[0], p3[1] - p2[1], p3[2] - p2[2]);
58 
59  // Normalize
60  p1p2 = p1p2 / std::sqrt (p1p2.dot (p1p2));
61  p1p3 = p1p3 / std::sqrt (p1p3.dot (p1p3));
62  p2p3 = p2p3 / std::sqrt (p2p3.dot (p2p3));
63 
64  // compute vector normal of a face
65  Eigen::Vector3f f_normal = p1p2.cross (p1p3);
66  f_normal = f_normal / std::sqrt (f_normal.dot (f_normal));
67 
68  // project vector field onto the face: vector v1_projected = v1 - Dot(v1, n) * n;
69  Eigen::Vector3f f_vector_field = vector_field_ - vector_field_.dot (f_normal) * f_normal;
70 
71  // Normalize
72  f_vector_field = f_vector_field / std::sqrt (f_vector_field.dot (f_vector_field));
73 
74  // texture coordinates
75  Eigen::Vector2f tp1, tp2, tp3;
76 
77  double alpha = std::acos (f_vector_field.dot (p1p2));
78 
79  // distance between 3 vertices of triangles
80  double e1 = (p2 - p3).norm () / f_;
81  double e2 = (p1 - p3).norm () / f_;
82  double e3 = (p1 - p2).norm () / f_;
83 
84  // initialize
85  tp1[0] = 0.0;
86  tp1[1] = 0.0;
87 
88  tp2[0] = static_cast<float> (e3);
89  tp2[1] = 0.0;
90 
91  // determine texture coordinate tp3;
92  double cos_p1 = (e2 * e2 + e3 * e3 - e1 * e1) / (2 * e2 * e3);
93  double sin_p1 = sqrt (1 - (cos_p1 * cos_p1));
94 
95  tp3[0] = static_cast<float> (cos_p1 * e2);
96  tp3[1] = static_cast<float> (sin_p1 * e2);
97 
98  // rotating by alpha (angle between V and pp1 & pp2)
99  Eigen::Vector2f r_tp2, r_tp3;
100  r_tp2[0] = static_cast<float> (tp2[0] * std::cos (alpha) - tp2[1] * std::sin (alpha));
101  r_tp2[1] = static_cast<float> (tp2[0] * std::sin (alpha) + tp2[1] * std::cos (alpha));
102 
103  r_tp3[0] = static_cast<float> (tp3[0] * std::cos (alpha) - tp3[1] * std::sin (alpha));
104  r_tp3[1] = static_cast<float> (tp3[0] * std::sin (alpha) + tp3[1] * std::cos (alpha));
105 
106  // shifting
107  tp1[0] = tp1[0];
108  tp2[0] = r_tp2[0];
109  tp3[0] = r_tp3[0];
110  tp1[1] = tp1[1];
111  tp2[1] = r_tp2[1];
112  tp3[1] = r_tp3[1];
113 
114  float min_x = tp1[0];
115  float min_y = tp1[1];
116  if (min_x > tp2[0])
117  min_x = tp2[0];
118  if (min_x > tp3[0])
119  min_x = tp3[0];
120  if (min_y > tp2[1])
121  min_y = tp2[1];
122  if (min_y > tp3[1])
123  min_y = tp3[1];
124 
125  if (min_x < 0)
126  {
127  tp1[0] = tp1[0] - min_x;
128  tp2[0] = tp2[0] - min_x;
129  tp3[0] = tp3[0] - min_x;
130  }
131  if (min_y < 0)
132  {
133  tp1[1] = tp1[1] - min_y;
134  tp2[1] = tp2[1] - min_y;
135  tp3[1] = tp3[1] - min_y;
136  }
137 
138  tex_coordinates.push_back (tp1);
139  tex_coordinates.push_back (tp2);
140  tex_coordinates.push_back (tp3);
141  return (tex_coordinates);
142 }
143 
145 template<typename PointInT> void
147 {
148  // mesh information
149  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
150  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
151 
152  // temporary PointXYZ
153  float x, y, z;
154  // temporary face
155  Eigen::Vector3f facet[3];
156 
157  // texture coordinates for each mesh
158  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
159 
160  for (size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
161  {
162  // texture coordinates for each mesh
163 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
164  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
165 #else
166  std::vector<Eigen::Vector2f> texture_map_tmp;
167 #endif
168 
169  // processing for each face
170  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
171  {
172  size_t idx;
173 
174  // get facet information
175  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
176  {
177  idx = tex_mesh.tex_polygons[m][i].vertices[j];
178  memcpy (&x, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
179  memcpy (&y, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
180  memcpy (&z, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
181  facet[j][0] = x;
182  facet[j][1] = y;
183  facet[j][2] = z;
184  }
185 
186  // get texture coordinates of each face
187  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates = mapTexture2Face (facet[0], facet[1], facet[2]);
188  for (size_t n = 0; n < tex_coordinates.size (); ++n)
189  texture_map_tmp.push_back (tex_coordinates[n]);
190  }// end faces
191 
192  // texture materials
193  std::stringstream tex_name;
194  tex_name << "material_" << m;
195  tex_name >> tex_material_.tex_name;
196  tex_material_.tex_file = tex_files_[m];
197  tex_mesh.tex_materials.push_back (tex_material_);
198 
199  // texture coordinates
200  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
201  }// end meshes
202 }
203 
205 template<typename PointInT> void
207 {
208  // mesh information
209  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
210  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
211 
212  float x_lowest = 100000;
213  float x_highest = 0;
214  float y_lowest = 100000;
215  //float y_highest = 0 ;
216  float z_lowest = 100000;
217  float z_highest = 0;
218  float x_, y_, z_;
219 
220  for (int i = 0; i < nr_points; ++i)
221  {
222  memcpy (&x_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
223  memcpy (&y_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
224  memcpy (&z_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
225  // x
226  if (x_ <= x_lowest)
227  x_lowest = x_;
228  if (x_ > x_lowest)
229  x_highest = x_;
230 
231  // y
232  if (y_ <= y_lowest)
233  y_lowest = y_;
234  //if (y_ > y_lowest) y_highest = y_;
235 
236  // z
237  if (z_ <= z_lowest)
238  z_lowest = z_;
239  if (z_ > z_lowest)
240  z_highest = z_;
241  }
242  // x
243  float x_range = (x_lowest - x_highest) * -1;
244  float x_offset = 0 - x_lowest;
245  // x
246  // float y_range = (y_lowest - y_highest)*-1;
247  // float y_offset = 0 - y_lowest;
248  // z
249  float z_range = (z_lowest - z_highest) * -1;
250  float z_offset = 0 - z_lowest;
251 
252  // texture coordinates for each mesh
253  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
254 
255  for (size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
256  {
257  // texture coordinates for each mesh
258 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
259  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
260 #else
261  std::vector<Eigen::Vector2f> texture_map_tmp;
262 #endif
263 
264  // processing for each face
265  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
266  {
267  size_t idx;
268  Eigen::Vector2f tmp_VT;
269  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
270  {
271  idx = tex_mesh.tex_polygons[m][i].vertices[j];
272  memcpy (&x_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
273  memcpy (&y_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
274  memcpy (&z_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
275 
276  // calculate uv coordinates
277  tmp_VT[0] = (x_ + x_offset) / x_range;
278  tmp_VT[1] = (z_ + z_offset) / z_range;
279  texture_map_tmp.push_back (tmp_VT);
280  }
281  }// end faces
282 
283  // texture materials
284  std::stringstream tex_name;
285  tex_name << "material_" << m;
286  tex_name >> tex_material_.tex_name;
287  tex_material_.tex_file = tex_files_[m];
288  tex_mesh.tex_materials.push_back (tex_material_);
289 
290  // texture coordinates
291  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
292  }// end meshes
293 }
294 
296 template<typename PointInT> void
298 {
299 
300  if (tex_mesh.tex_polygons.size () != cams.size () + 1)
301  {
302  PCL_ERROR ("The mesh should be divided into nbCamera+1 sub-meshes.\n");
303  PCL_ERROR ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
304  return;
305  }
306 
307  PCL_INFO ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
308 
309  typename pcl::PointCloud<PointInT>::Ptr originalCloud (new pcl::PointCloud<PointInT>);
310  typename pcl::PointCloud<PointInT>::Ptr camera_transformed_cloud (new pcl::PointCloud<PointInT>);
311 
312  // convert mesh's cloud to pcl format for ease
313  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *originalCloud);
314 
315  // texture coordinates for each mesh
316  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > > texture_map;
317 
318  for (size_t m = 0; m < cams.size (); ++m)
319  {
320  // get current camera parameters
321  Camera current_cam = cams[m];
322 
323  // get camera transform
324  Eigen::Affine3f cam_trans = current_cam.pose;
325 
326  // transform cloud into current camera frame
327  pcl::transformPointCloud (*originalCloud, *camera_transformed_cloud, cam_trans.inverse ());
328 
329  // vector of texture coordinates for each face
330 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
331  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
332 #else
333  std::vector<Eigen::Vector2f> texture_map_tmp;
334 #endif
335 
336  // processing each face visible by this camera
337  PointInT pt;
338  size_t idx;
339  for (size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
340  {
341  Eigen::Vector2f tmp_VT;
342  // for each point of this face
343  for (size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
344  {
345  // get point
346  idx = tex_mesh.tex_polygons[m][i].vertices[j];
347  pt = camera_transformed_cloud->points[idx];
348 
349  // compute UV coordinates for this point
350  getPointUVCoordinates (pt, current_cam, tmp_VT);
351  texture_map_tmp.push_back (tmp_VT);
352 
353  }// end points
354  }// end faces
355 
356  // texture materials
357  std::stringstream tex_name;
358  tex_name << "material_" << m;
359  tex_name >> tex_material_.tex_name;
360  tex_material_.tex_file = current_cam.texture_file;
361  tex_mesh.tex_materials.push_back (tex_material_);
362 
363  // texture coordinates
364  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
365  }// end cameras
366 
367  // push on extra empty UV map (for unseen faces) so that obj writer does not crash!
368 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
369  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
370 #else
371  std::vector<Eigen::Vector2f> texture_map_tmp;
372 #endif
373  for (size_t i = 0; i < tex_mesh.tex_polygons[cams.size ()].size (); ++i)
374  for (size_t j = 0; j < tex_mesh.tex_polygons[cams.size ()][i].vertices.size (); ++j)
375  {
376  Eigen::Vector2f tmp_VT;
377  tmp_VT[0] = -1;
378  tmp_VT[1] = -1;
379  texture_map_tmp.push_back (tmp_VT);
380  }
381 
382  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
383 
384  // push on an extra dummy material for the same reason
385  std::stringstream tex_name;
386  tex_name << "material_" << cams.size ();
387  tex_name >> tex_material_.tex_name;
388  tex_material_.tex_file = "occluded.jpg";
389  tex_mesh.tex_materials.push_back (tex_material_);
390 
391 }
392 
394 template<typename PointInT> bool
396 {
397  Eigen::Vector3f direction;
398  direction (0) = pt.x;
399  direction (1) = pt.y;
400  direction (2) = pt.z;
401 
402  std::vector<int> indices;
403 
404  PointCloudConstPtr cloud (new PointCloud());
405  cloud = octree->getInputCloud();
406 
407  double distance_threshold = octree->getResolution();
408 
409  // raytrace
410  octree->getIntersectedVoxelIndices(direction, -direction, indices);
411 
412  int nbocc = static_cast<int> (indices.size ());
413  for (size_t j = 0; j < indices.size (); j++)
414  {
415  // if intersected point is on the over side of the camera
416  if (pt.z * cloud->points[indices[j]].z < 0)
417  {
418  nbocc--;
419  continue;
420  }
421 
422  if (fabs (cloud->points[indices[j]].z - pt.z) <= distance_threshold)
423  {
424  // points are very close to each-other, we do not consider the occlusion
425  nbocc--;
426  }
427  }
428 
429  if (nbocc == 0)
430  return (false);
431  else
432  return (true);
433 }
434 
436 template<typename PointInT> void
438  PointCloudPtr &filtered_cloud,
439  const double octree_voxel_size, std::vector<int> &visible_indices,
440  std::vector<int> &occluded_indices)
441 {
442  // variable used to filter occluded points by depth
443  double maxDeltaZ = octree_voxel_size;
444 
445  // create an octree to perform rayTracing
446  OctreePtr octree (new Octree (octree_voxel_size));
447  // create octree structure
448  octree->setInputCloud (input_cloud);
449  // update bounding box automatically
450  octree->defineBoundingBox ();
451  // add points in the tree
452  octree->addPointsFromInputCloud ();
453 
454  visible_indices.clear ();
455 
456  // for each point of the cloud, raycast toward camera and check intersected voxels.
457  Eigen::Vector3f direction;
458  std::vector<int> indices;
459  for (size_t i = 0; i < input_cloud->points.size (); ++i)
460  {
461  direction (0) = input_cloud->points[i].x;
462  direction (1) = input_cloud->points[i].y;
463  direction (2) = input_cloud->points[i].z;
464 
465  // if point is not occluded
466  octree->getIntersectedVoxelIndices (direction, -direction, indices);
467 
468  int nbocc = static_cast<int> (indices.size ());
469  for (size_t j = 0; j < indices.size (); j++)
470  {
471  // if intersected point is on the over side of the camera
472  if (input_cloud->points[i].z * input_cloud->points[indices[j]].z < 0)
473  {
474  nbocc--;
475  continue;
476  }
477 
478  if (fabs (input_cloud->points[indices[j]].z - input_cloud->points[i].z) <= maxDeltaZ)
479  {
480  // points are very close to each-other, we do not consider the occlusion
481  nbocc--;
482  }
483  }
484 
485  if (nbocc == 0)
486  {
487  // point is added in the filtered mesh
488  filtered_cloud->points.push_back (input_cloud->points[i]);
489  visible_indices.push_back (static_cast<int> (i));
490  }
491  else
492  {
493  occluded_indices.push_back (static_cast<int> (i));
494  }
495  }
496 
497 }
498 
500 template<typename PointInT> void
501 pcl::TextureMapping<PointInT>::removeOccludedPoints (const pcl::TextureMesh &tex_mesh, pcl::TextureMesh &cleaned_mesh, const double octree_voxel_size)
502 {
503  // copy mesh
504  cleaned_mesh = tex_mesh;
505 
506  typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT>);
507  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
508 
509  // load points into a PCL format
510  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
511 
512  std::vector<int> visible, occluded;
513  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
514 
515  // Now that we know which points are visible, let's iterate over each face.
516  // if the face has one invisible point => out!
517  for (size_t polygons = 0; polygons < cleaned_mesh.tex_polygons.size (); ++polygons)
518  {
519  // remove all faces from cleaned mesh
520  cleaned_mesh.tex_polygons[polygons].clear ();
521  // iterate over faces
522  for (size_t faces = 0; faces < tex_mesh.tex_polygons[polygons].size (); ++faces)
523  {
524  // check if all the face's points are visible
525  bool faceIsVisible = true;
526  std::vector<int>::iterator it;
527 
528  // iterate over face's vertex
529  for (size_t points = 0; points < tex_mesh.tex_polygons[polygons][faces].vertices.size (); ++points)
530  {
531  it = find (occluded.begin (), occluded.end (), tex_mesh.tex_polygons[polygons][faces].vertices[points]);
532 
533  if (it == occluded.end ())
534  {
535  // point is not in the occluded vector
536  // PCL_INFO (" VISIBLE!\n");
537  }
538  else
539  {
540  // point was occluded
541  // PCL_INFO(" OCCLUDED!\n");
542  faceIsVisible = false;
543  }
544  }
545 
546  if (faceIsVisible)
547  {
548  cleaned_mesh.tex_polygons[polygons].push_back (tex_mesh.tex_polygons[polygons][faces]);
549  }
550 
551  }
552  }
553 }
554 
556 template<typename PointInT> void
557 pcl::TextureMapping<PointInT>::removeOccludedPoints (const pcl::TextureMesh &tex_mesh, PointCloudPtr &filtered_cloud,
558  const double octree_voxel_size)
559 {
560  PointCloudPtr cloud (new PointCloud);
561 
562  // load points into a PCL format
563  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
564 
565  std::vector<int> visible, occluded;
566  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
567 
568 }
569 
571 template<typename PointInT> int
572 pcl::TextureMapping<PointInT>::sortFacesByCamera (pcl::TextureMesh &tex_mesh, pcl::TextureMesh &sorted_mesh,
573  const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size,
574  PointCloud &visible_pts)
575 {
576  if (tex_mesh.tex_polygons.size () != 1)
577  {
578  PCL_ERROR ("The mesh must contain only 1 sub-mesh!\n");
579  return (-1);
580  }
581 
582  if (cameras.size () == 0)
583  {
584  PCL_ERROR ("Must provide at least one camera info!\n");
585  return (-1);
586  }
587 
588  // copy mesh
589  sorted_mesh = tex_mesh;
590  // clear polygons from cleaned_mesh
591  sorted_mesh.tex_polygons.clear ();
592 
593  typename pcl::PointCloud<PointInT>::Ptr original_cloud (new pcl::PointCloud<PointInT>);
594  typename pcl::PointCloud<PointInT>::Ptr transformed_cloud (new pcl::PointCloud<PointInT>);
595  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
596 
597  // load points into a PCL format
598  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *original_cloud);
599 
600  // for each camera
601  for (size_t cam = 0; cam < cameras.size (); ++cam)
602  {
603  // get camera pose as transform
604  Eigen::Affine3f cam_trans = cameras[cam].pose;
605 
606  // transform original cloud in camera coordinates
607  pcl::transformPointCloud (*original_cloud, *transformed_cloud, cam_trans.inverse ());
608 
609  // find occlusions on transformed cloud
610  std::vector<int> visible, occluded;
611  removeOccludedPoints (transformed_cloud, filtered_cloud, octree_voxel_size, visible, occluded);
612  visible_pts = *filtered_cloud;
613 
614  // find visible faces => add them to polygon N for camera N
615  // add polygon group for current camera in clean
616  std::vector<pcl::Vertices> visibleFaces_currentCam;
617  // iterate over the faces of the current mesh
618  for (size_t faces = 0; faces < tex_mesh.tex_polygons[0].size (); ++faces)
619  {
620  // check if all the face's points are visible
621  bool faceIsVisible = true;
622  std::vector<int>::iterator it;
623 
624  // iterate over face's vertex
625  for (size_t current_pt_indice = 0; faceIsVisible && current_pt_indice < tex_mesh.tex_polygons[0][faces].vertices.size (); ++current_pt_indice)
626  {
627  // TODO this is far too long! Better create an helper function that raycasts here.
628  it = find (occluded.begin (), occluded.end (), tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]);
629 
630  if (it == occluded.end ())
631  {
632  // point is not occluded
633  // does it land on the camera's image plane?
634  PointInT pt = transformed_cloud->points[tex_mesh.tex_polygons[0][faces].vertices[current_pt_indice]];
635  Eigen::Vector2f dummy_UV;
636  if (!getPointUVCoordinates (pt, cameras[cam], dummy_UV))
637  {
638  // point is not visible by the camera
639  faceIsVisible = false;
640  }
641  }
642  else
643  {
644  faceIsVisible = false;
645  }
646  }
647 
648  if (faceIsVisible)
649  {
650  // push current visible face into the sorted mesh
651  visibleFaces_currentCam.push_back (tex_mesh.tex_polygons[0][faces]);
652  // remove it from the unsorted mesh
653  tex_mesh.tex_polygons[0].erase (tex_mesh.tex_polygons[0].begin () + faces);
654  faces--;
655  }
656 
657  }
658  sorted_mesh.tex_polygons.push_back (visibleFaces_currentCam);
659  }
660 
661  // we should only have occluded and non-visible faces left in tex_mesh.tex_polygons[0]
662  // we need to add them as an extra polygon in the sorted mesh
663  sorted_mesh.tex_polygons.push_back (tex_mesh.tex_polygons[0]);
664  return (0);
665 }
666 
668 template<typename PointInT> void
670  pcl::PointCloud<pcl::PointXYZI>::Ptr &colored_cloud,
671  const double octree_voxel_size, const bool show_nb_occlusions,
672  const int max_occlusions)
673  {
674  // variable used to filter occluded points by depth
675  double maxDeltaZ = octree_voxel_size * 2.0;
676 
677  // create an octree to perform rayTracing
678  pcl::octree::OctreePointCloudSearch<PointInT> *octree;
679  octree = new pcl::octree::OctreePointCloudSearch<PointInT> (octree_voxel_size);
680  // create octree structure
681  octree->setInputCloud (input_cloud);
682  // update bounding box automatically
683  octree->defineBoundingBox ();
684  // add points in the tree
685  octree->addPointsFromInputCloud ();
686 
687  // ray direction
688  Eigen::Vector3f direction;
689 
690  std::vector<int> indices;
691  // point from where we ray-trace
692  pcl::PointXYZI pt;
693 
694  std::vector<double> zDist;
695  std::vector<double> ptDist;
696  // for each point of the cloud, ray-trace toward the camera and check intersected voxels.
697  for (size_t i = 0; i < input_cloud->points.size (); ++i)
698  {
699  direction (0) = input_cloud->points[i].x;
700  pt.x = input_cloud->points[i].x;
701  direction (1) = input_cloud->points[i].y;
702  pt.y = input_cloud->points[i].y;
703  direction (2) = input_cloud->points[i].z;
704  pt.z = input_cloud->points[i].z;
705 
706  // get number of occlusions for that point
707  indices.clear ();
708  int nbocc = octree->getIntersectedVoxelIndices (direction, -direction, indices);
709 
710  nbocc = static_cast<int> (indices.size ());
711 
712  // TODO need to clean this up and find tricks to get remove aliasaing effect on planes
713  for (size_t j = 0; j < indices.size (); j++)
714  {
715  // if intersected point is on the over side of the camera
716  if (pt.z * input_cloud->points[indices[j]].z < 0)
717  {
718  nbocc--;
719  }
720  else if (fabs (input_cloud->points[indices[j]].z - pt.z) <= maxDeltaZ)
721  {
722  // points are very close to each-other, we do not consider the occlusion
723  nbocc--;
724  }
725  else
726  {
727  zDist.push_back (fabs (input_cloud->points[indices[j]].z - pt.z));
728  ptDist.push_back (pcl::euclideanDistance (input_cloud->points[indices[j]], pt));
729  }
730  }
731 
732  if (show_nb_occlusions)
733  (nbocc <= max_occlusions) ? (pt.intensity = static_cast<float> (nbocc)) : (pt.intensity = static_cast<float> (max_occlusions));
734  else
735  (nbocc == 0) ? (pt.intensity = 0) : (pt.intensity = 1);
736 
737  colored_cloud->points.push_back (pt);
738  }
739 
740  if (zDist.size () >= 2)
741  {
742  std::sort (zDist.begin (), zDist.end ());
743  std::sort (ptDist.begin (), ptDist.end ());
744  }
745 }
746 
748 template<typename PointInT> void
749 pcl::TextureMapping<PointInT>::showOcclusions (pcl::TextureMesh &tex_mesh, pcl::PointCloud<pcl::PointXYZI>::Ptr &colored_cloud,
750  double octree_voxel_size, bool show_nb_occlusions, int max_occlusions)
751 {
752  // load points into a PCL format
753  typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT>);
754  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
755 
756  showOcclusions (cloud, colored_cloud, octree_voxel_size, show_nb_occlusions, max_occlusions);
757 }
758 
760 template<typename PointInT> void
762 {
763 
764  if (mesh.tex_polygons.size () != 1)
765  return;
766 
767  typename pcl::PointCloud<PointInT>::Ptr mesh_cloud (new pcl::PointCloud<PointInT>);
768 
769  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);
770 
771  std::vector<pcl::Vertices> faces;
772 
773  for (int current_cam = 0; current_cam < static_cast<int> (cameras.size ()); ++current_cam)
774  {
775  PCL_INFO ("Processing camera %d of %d.\n", current_cam+1, cameras.size ());
776 
777  // transform mesh into camera's frame
778  typename pcl::PointCloud<PointInT>::Ptr camera_cloud (new pcl::PointCloud<PointInT>);
779  pcl::transformPointCloud (*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse ());
780 
781  // CREATE UV MAP FOR CURRENT FACES
782  pcl::PointCloud<pcl::PointXY>::Ptr projections (new pcl::PointCloud<pcl::PointXY>);
783  std::vector<bool> visibility;
784  visibility.resize (mesh.tex_polygons[current_cam].size ());
785  std::vector<UvIndex> indexes_uv_to_points;
786  // for each current face
787 
788  //TODO change this
789  pcl::PointXY nan_point;
790  nan_point.x = std::numeric_limits<float>::quiet_NaN ();
791  nan_point.y = std::numeric_limits<float>::quiet_NaN ();
792  UvIndex u_null;
793  u_null.idx_cloud = -1;
794  u_null.idx_face = -1;
795 
796  int cpt_invisible=0;
797  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[current_cam].size ()); ++idx_face)
798  {
799  //project each vertice, if one is out of view, stop
800  pcl::PointXY uv_coord1;
801  pcl::PointXY uv_coord2;
802  pcl::PointXY uv_coord3;
803 
804  if (isFaceProjected (cameras[current_cam],
805  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
806  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
807  camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
808  uv_coord1,
809  uv_coord2,
810  uv_coord3))
811  {
812  // face is in the camera's FOV
813 
814  // add UV coordinates
815  projections->points.push_back (uv_coord1);
816  projections->points.push_back (uv_coord2);
817  projections->points.push_back (uv_coord3);
818 
819  // remember corresponding face
820  UvIndex u1, u2, u3;
821  u1.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[0];
822  u2.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[1];
823  u3.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[2];
824  u1.idx_face = idx_face; u2.idx_face = idx_face; u3.idx_face = idx_face;
825  indexes_uv_to_points.push_back (u1);
826  indexes_uv_to_points.push_back (u2);
827  indexes_uv_to_points.push_back (u3);
828 
829  //keep track of visibility
830  visibility[idx_face] = true;
831  }
832  else
833  {
834  projections->points.push_back (nan_point);
835  projections->points.push_back (nan_point);
836  projections->points.push_back (nan_point);
837  indexes_uv_to_points.push_back (u_null);
838  indexes_uv_to_points.push_back (u_null);
839  indexes_uv_to_points.push_back (u_null);
840  //keep track of visibility
841  visibility[idx_face] = false;
842  cpt_invisible++;
843  }
844  }
845 
846  // projections contains all UV points of the current faces
847  // indexes_uv_to_points links a uv point to its point in the camera cloud
848  // visibility contains tells if a face was in the camera FOV (false = skip)
849 
850  // TODO handle case were no face could be projected
851  if (visibility.size () - cpt_invisible !=0)
852  {
853  //create kdtree
854  pcl::KdTreeFLANN<pcl::PointXY> kdtree;
855  kdtree.setInputCloud (projections);
856 
857  std::vector<int> idxNeighbors;
858  std::vector<float> neighborsSquaredDistance;
859  // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
860  // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
861  cpt_invisible = 0;
862  for (int idx_pcam = 0 ; idx_pcam <= current_cam ; ++idx_pcam)
863  {
864  // project all faces
865  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[idx_pcam].size ()); ++idx_face)
866  {
867 
868  if (idx_pcam == current_cam && !visibility[idx_face])
869  {
870  // we are now checking for self occlusions within the current faces
871  // the current face was already declared as occluded.
872  // therefore, it cannot occlude another face anymore => we skip it
873  continue;
874  }
875 
876  // project each vertice, if one is out of view, stop
877  pcl::PointXY uv_coord1;
878  pcl::PointXY uv_coord2;
879  pcl::PointXY uv_coord3;
880 
881  if (isFaceProjected (cameras[current_cam],
882  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
883  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
884  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
885  uv_coord1,
886  uv_coord2,
887  uv_coord3))
888  {
889  // face is in the camera's FOV
890  //get its circumsribed circle
891  double radius;
892  pcl::PointXY center;
893  // getTriangleCircumcenterAndSize (uv_coord1, uv_coord2, uv_coord3, center, radius);
894  getTriangleCircumcscribedCircleCentroid(uv_coord1, uv_coord2, uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize
895 
896  // get points inside circ.circle
897  if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
898  {
899  // for each neighbor
900  for (size_t i = 0; i < idxNeighbors.size (); ++i)
901  {
902  if (std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
903  std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
904  camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
905  < camera_cloud->points[indexes_uv_to_points[idxNeighbors[i]].idx_cloud].z)
906  {
907  // neighbor is farther than all the face's points. Check if it falls into the triangle
908  if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, projections->points[idxNeighbors[i]]))
909  {
910  // current neighbor is inside triangle and is closer => the corresponding face
911  visibility[indexes_uv_to_points[idxNeighbors[i]].idx_face] = false;
912  cpt_invisible++;
913  //TODO we could remove the projections of this face from the kd-tree cloud, but I fond it slower, and I need the point to keep ordered to querry UV coordinates later
914  }
915  }
916  }
917  }
918  }
919  }
920  }
921  }
922 
923  // now, visibility is true for each face that belongs to the current camera
924  // if a face is not visible, we push it into the next one.
925 
926  if (static_cast<int> (mesh.tex_coordinates.size ()) <= current_cam)
927  {
928 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
929  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
930 #else
931  std::vector<Eigen::Vector2f> dummy_container;
932 #endif
933  mesh.tex_coordinates.push_back (dummy_container);
934  }
935  mesh.tex_coordinates[current_cam].resize (3 * visibility.size ());
936 
937  std::vector<pcl::Vertices> occluded_faces;
938  occluded_faces.resize (visibility.size ());
939  std::vector<pcl::Vertices> visible_faces;
940  visible_faces.resize (visibility.size ());
941 
942  int cpt_occluded_faces = 0;
943  int cpt_visible_faces = 0;
944 
945  for (size_t idx_face = 0 ; idx_face < visibility.size () ; ++idx_face)
946  {
947  if (visibility[idx_face])
948  {
949  // face is visible by the current camera copy UV coordinates
950  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = projections->points[idx_face*3].x;
951  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = projections->points[idx_face*3].y;
952 
953  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = projections->points[idx_face*3 + 1].x;
954  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = projections->points[idx_face*3 + 1].y;
955 
956  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = projections->points[idx_face*3 + 2].x;
957  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = projections->points[idx_face*3 + 2].y;
958 
959  visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];
960 
961  cpt_visible_faces++;
962  }
963  else
964  {
965  // face is occluded copy face into temp vector
966  occluded_faces[cpt_occluded_faces] = mesh.tex_polygons[current_cam][idx_face];
967  cpt_occluded_faces++;
968  }
969  }
970  mesh.tex_coordinates[current_cam].resize (cpt_visible_faces*3);
971 
972  occluded_faces.resize (cpt_occluded_faces);
973  mesh.tex_polygons.push_back (occluded_faces);
974 
975  visible_faces.resize (cpt_visible_faces);
976  mesh.tex_polygons[current_cam].clear ();
977  mesh.tex_polygons[current_cam] = visible_faces;
978  }
979 
980  // we have been through all the cameras.
981  // if any faces are left, they were not visible by any camera
982  // we still need to produce uv coordinates for them
983 
984  if (mesh.tex_coordinates.size() <= cameras.size ())
985  {
986 #if PCL_VERSION_COMPARE(>=, 1, 8, 0)
987  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
988 #else
989  std::vector<Eigen::Vector2f> dummy_container;
990 #endif
991  mesh.tex_coordinates.push_back(dummy_container);
992  }
993 
994 
995  for(size_t idx_face = 0 ; idx_face < mesh.tex_polygons[cameras.size()].size() ; ++idx_face)
996  {
997  Eigen::Vector2f UV1, UV2, UV3;
998  UV1(0) = -1.0; UV1(1) = -1.0;
999  UV2(0) = -1.0; UV2(1) = -1.0;
1000  UV3(0) = -1.0; UV3(1) = -1.0;
1001  mesh.tex_coordinates[cameras.size()].push_back(UV1);
1002  mesh.tex_coordinates[cameras.size()].push_back(UV2);
1003  mesh.tex_coordinates[cameras.size()].push_back(UV3);
1004  }
1005 
1006 }
1007 
1009 {
1010 public:
1011  FaceInfo(float d,
1012  float a,
1013  float edge,
1014  bool facingCam,
1015  const pcl::PointXY & uv1,
1016  const pcl::PointXY & uv2,
1017  const pcl::PointXY & uv3,
1018  const pcl::PointXY & center) :
1019  distance(d),
1020  angle(a),
1021  longestEdgeSqrd(edge),
1022  facingTheCam(facingCam),
1023  uv_coord1(uv1),
1024  uv_coord2(uv2),
1025  uv_coord3(uv3),
1026  uv_center(center)
1027  {}
1028  float distance;
1029  float angle;
1032  pcl::PointXY uv_coord1;
1033  pcl::PointXY uv_coord2;
1034  pcl::PointXY uv_coord3;
1035  pcl::PointXY uv_center;
1036 };
1037 
1038 bool ptInTriangle(const pcl::PointXY & p0, const pcl::PointXY & p1, const pcl::PointXY & p2, const pcl::PointXY & p) {
1039  float A = 1/2 * (-p1.y * p2.x + p0.y * (-p1.x + p2.x) + p0.x * (p1.y - p2.y) + p1.x * p2.y);
1040  float sign = A < 0 ? -1 : 1;
1041  float s = (p0.y * p2.x - p0.x * p2.y + (p2.y - p0.y) * p.x + (p0.x - p2.x) * p.y) * sign;
1042  float t = (p0.x * p1.y - p0.y * p1.x + (p0.y - p1.y) * p.x + (p1.x - p0.x) * p.y) * sign;
1043 
1044  return s > 0 && t > 0 && (s + t) < 2 * A * sign;
1045 }
1046 
1048 template<typename PointInT> bool
1050  pcl::TextureMesh &mesh,
1051  const pcl::texture_mapping::CameraVector &cameras,
1052  const rtabmap::ProgressState * state,
1053  std::vector<std::map<int, pcl::PointXY> > * vertexToPixels,
1054  bool distanceToCamPolicy)
1055 {
1056 
1057  if (mesh.tex_polygons.size () != 1)
1058  return false;
1059 
1060  typename pcl::PointCloud<PointInT>::Ptr mesh_cloud (new pcl::PointCloud<PointInT>);
1061 
1062  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);
1063 
1064  std::vector<pcl::Vertices> faces;
1065  faces.swap(mesh.tex_polygons[0]);
1066 
1067  mesh.tex_polygons.clear();
1068  mesh.tex_polygons.resize(cameras.size()+1);
1069  mesh.tex_coordinates.clear();
1070  mesh.tex_coordinates.resize(cameras.size()+1);
1071 
1072  // pre compute all cam inverse and visibility
1073  std::vector<std::map<int, FaceInfo > > visibleFaces(cameras.size());
1074  std::vector<Eigen::Affine3f> invCamTransform(cameras.size());
1075  std::vector<std::list<int> > faceCameras(faces.size());
1076  std::string msg = uFormat("Computing visible faces per cam (%d faces, %d cams)", (int)faces.size(), (int)cameras.size());
1077  UINFO(msg.c_str());
1078  if(state && !state->callback(msg))
1079  {
1080  //cancelled!
1081  UWARN("Texturing cancelled!");
1082  return false;
1083  }
1084  for (unsigned int current_cam = 0; current_cam < cameras.size(); ++current_cam)
1085  {
1086  UDEBUG("Texture camera %d...", current_cam);
1087 
1088  typename pcl::PointCloud<PointInT>::Ptr camera_cloud (new pcl::PointCloud<PointInT>);
1089  pcl::transformPointCloud(*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse());
1090 
1091  std::vector<int> visibilityIndices;
1092  visibilityIndices.resize (faces.size ());
1093  pcl::PointCloud<pcl::PointXY>::Ptr projections (new pcl::PointCloud<pcl::PointXY>);
1094  projections->resize(faces.size()*3);
1095  std::map<float, int> sortedVisibleFaces;
1096  int oi=0;
1097  for(unsigned int idx_face=0; idx_face<faces.size(); ++idx_face)
1098  {
1099  pcl::Vertices & face = faces[idx_face];
1100 
1101  int j=oi*3;
1102  pcl::PointXY & uv_coords1 = projections->at(j);
1103  pcl::PointXY & uv_coords2 = projections->at(j+1);
1104  pcl::PointXY & uv_coords3 = projections->at(j+2);
1105  PointInT & pt0 = camera_cloud->points[face.vertices[0]];
1106  PointInT & pt1 = camera_cloud->points[face.vertices[1]];
1107  PointInT & pt2 = camera_cloud->points[face.vertices[2]];
1108  if (isFaceProjected (cameras[current_cam],
1109  pt0,
1110  pt1,
1111  pt2,
1112  uv_coords1,
1113  uv_coords2,
1114  uv_coords3))
1115  {
1116  // check if the polygon is facing the camera, assuming counterclockwise normal
1117  Eigen::Vector3f v0(
1118  uv_coords2.x - uv_coords1.x,
1119  uv_coords2.y - uv_coords1.y,
1120  0);
1121  Eigen::Vector3f v1(
1122  uv_coords3.x - uv_coords1.x,
1123  uv_coords3.y - uv_coords1.y,
1124  0);
1125  Eigen::Vector3f normal = v0.cross(v1);
1126  float angle = normal.dot(Eigen::Vector3f(0.0f,0.0f,1.0f));
1127  bool facingTheCam = angle>0.0f;
1128  float distanceToCam = std::min(std::min(pt0.z, pt1.z), pt2.z);
1129  float angleToCam = 0.0f;
1130  Eigen::Vector3f e0 = Eigen::Vector3f(
1131  pt1.x - pt0.x,
1132  pt1.y - pt0.y,
1133  pt1.z - pt0.z);
1134  Eigen::Vector3f e1 = Eigen::Vector3f(
1135  pt2.x - pt0.x,
1136  pt2.y - pt0.y,
1137  pt2.z - pt0.z);
1138  Eigen::Vector3f e2 = Eigen::Vector3f(
1139  pt2.x - pt1.x,
1140  pt2.y - pt1.y,
1141  pt2.z - pt1.z);
1142  if(facingTheCam && this->max_angle_)
1143  {
1144  Eigen::Vector3f normal3D;
1145  normal3D = e0.cross(e1);
1146  angleToCam = pcl::getAngle3D(Eigen::Vector4f(normal3D[0], normal3D[1], normal3D[2], 0.0f), Eigen::Vector4f(0.0f,0.0f,-1.0f,0.0f));
1147  }
1148 
1149  // longest edge
1150  float e0norm2 = e0[0]*e0[0] + e0[1]*e0[1] + e0[2]*e0[2];
1151  float e1norm2 = e1[0]*e1[0] + e1[1]*e1[1] + e1[2]*e1[2];
1152  float e2norm2 = e2[0]*e2[0] + e2[1]*e2[1] + e2[2]*e2[2];
1153  float longestEdgeSqrd = std::max(std::max(e0norm2, e1norm2), e2norm2);
1154 
1155  pcl::PointXY center;
1156  center.x = (uv_coords1.x+uv_coords2.x+uv_coords3.x)/3.0f;
1157  center.y = (uv_coords1.y+uv_coords2.y+uv_coords3.y)/3.0f;
1158  visibleFaces[current_cam].insert(visibleFaces[current_cam].end(), std::make_pair(idx_face, FaceInfo(distanceToCam, angleToCam, longestEdgeSqrd, facingTheCam, uv_coords1, uv_coords2, uv_coords3, center)));
1159  sortedVisibleFaces.insert(std::make_pair(distanceToCam, idx_face));
1160  visibilityIndices[oi] = idx_face;
1161  ++oi;
1162  }
1163  }
1164  visibilityIndices.resize(oi);
1165  projections->resize(oi*3);
1166  UASSERT(projections->size() == visibilityIndices.size()*3);
1167 
1168  //filter occluded polygons
1169  //create kdtree
1170  pcl::KdTreeFLANN<pcl::PointXY> kdtree;
1171  kdtree.setInputCloud (projections);
1172 
1173  std::vector<int> idxNeighbors;
1174  std::vector<float> neighborsSquaredDistance;
1175  // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
1176  // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
1177  // project all faces
1178  std::set<int> occludedFaces;
1179  for (std::map<float, int>::iterator jter=sortedVisibleFaces.begin(); jter!=sortedVisibleFaces.end(); ++jter)
1180  //for (unsigned int idx = 0; idx<visibilityIndices.size(); ++idx)
1181  {
1182  int idx_face = jter->second;
1183  //int idx_face = visibilityIndices[idx];
1184  std::map<int, FaceInfo>::iterator iter= visibleFaces[current_cam].find(idx_face);
1185  UASSERT(iter != visibleFaces[current_cam].end());
1186 
1187  FaceInfo & info = iter->second;
1188 
1189  // face is in the camera's FOV
1190  //get its circumsribed circle
1191  double radius;
1192  pcl::PointXY center;
1193  // getTriangleCircumcenterAndSize (info.uv_coord1, info.uv_coord2, info.uv_coord3, center, radius);
1194  getTriangleCircumcscribedCircleCentroid(info.uv_coord1, info.uv_coord2, info.uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize
1195 
1196  // get points inside circ.circle
1197  if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
1198  {
1199  // for each neighbor
1200  for (size_t i = 0; i < idxNeighbors.size (); ++i)
1201  {
1202  int neighborFaceIndex = idxNeighbors[i]/3;
1203  //std::map<int, FaceInfo>::iterator jter= visibleFaces[current_cam].find(visibilityIndices[neighborFaceIndex]);
1204  //if(jter != visibleFaces[current_cam].end())
1205  {
1206  if (std::max(camera_cloud->points[faces[idx_face].vertices[0]].z,
1207  std::max (camera_cloud->points[faces[idx_face].vertices[1]].z,
1208  camera_cloud->points[faces[idx_face].vertices[2]].z))
1209  < camera_cloud->points[faces[visibilityIndices[neighborFaceIndex]].vertices[idxNeighbors[i]%3]].z)
1210  //if (info.distance < jter->second.distance)
1211  {
1212  // neighbor is farther than all the face's points. Check if it falls into the triangle
1213  if (checkPointInsideTriangle(info.uv_coord1, info.uv_coord2, info.uv_coord3, projections->at(idxNeighbors[i])))
1214  {
1215  // current neighbor is inside triangle and is closer => the corresponding face
1216  occludedFaces.insert(visibilityIndices[neighborFaceIndex]);
1217  //TODO we could remove the projections of this face from the kd-tree cloud, but I fond it slower, and I need the point to keep ordered to querry UV coordinates later
1218  }
1219  }
1220  }
1221  }
1222  }
1223  }
1224 
1225  // remove occluded faces
1226  for(std::set<int>::iterator iter= occludedFaces.begin(); iter!=occludedFaces.end(); ++iter)
1227  {
1228  visibleFaces[current_cam].erase(*iter);
1229  }
1230 
1231  // filter clusters
1232  int clusterFaces = 0;
1233 
1234  std::vector<pcl::Vertices> polygons(visibleFaces[current_cam].size());
1235  std::vector<int> polygon_to_face_index(visibleFaces[current_cam].size());
1236  oi =0;
1237  for(std::map<int, FaceInfo>::iterator iter=visibleFaces[current_cam].begin(); iter!=visibleFaces[current_cam].end(); ++iter)
1238  {
1239  polygons[oi].vertices.resize(3);
1240  polygons[oi].vertices[0] = faces[iter->first].vertices[0];
1241  polygons[oi].vertices[1] = faces[iter->first].vertices[1];
1242  polygons[oi].vertices[2] = faces[iter->first].vertices[2];
1243  polygon_to_face_index[oi] = iter->first;
1244  ++oi;
1245  }
1246 
1247  std::vector<std::set<int> > neighbors;
1248  std::vector<std::set<int> > vertexToPolygons;
1250  (int)camera_cloud->size(),
1251  neighbors,
1252  vertexToPolygons);
1253  std::list<std::list<int> > clusters = rtabmap::util3d::clusterPolygons(
1254  neighbors,
1255  min_cluster_size_);
1256  std::set<int> polygonsKept;
1257  for(std::list<std::list<int> >::iterator iter=clusters.begin(); iter!=clusters.end(); ++iter)
1258  {
1259  for(std::list<int>::iterator jter=iter->begin(); jter!=iter->end(); ++jter)
1260  {
1261  polygonsKept.insert(polygon_to_face_index[*jter]);
1262  faceCameras[polygon_to_face_index[*jter]].push_back(current_cam);
1263  }
1264  }
1265 
1266  for(std::map<int, FaceInfo>::iterator iter=visibleFaces[current_cam].begin(); iter!=visibleFaces[current_cam].end();)
1267  {
1268  if(polygonsKept.find(iter->first) == polygonsKept.end())
1269  {
1270  visibleFaces[current_cam].erase(iter++);
1271  ++clusterFaces;
1272  }
1273  else
1274  {
1275  ++iter;
1276  }
1277  }
1278 
1279  msg = uFormat("Processed camera %d/%d: %d occluded and %d spurious polygons out of %d", (int)current_cam+1, (int)cameras.size(), (int)occludedFaces.size(), clusterFaces, (int)visibilityIndices.size());
1280  UINFO(msg.c_str());
1281  if(state && !state->callback(msg))
1282  {
1283  //cancelled!
1284  UWARN("Texturing cancelled!");
1285  return false;
1286  }
1287  }
1288 
1289  msg = uFormat("Texturing %d polygons...", (int)faces.size());
1290  UINFO(msg.c_str());
1291  if(state && !state->callback(msg))
1292  {
1293  //cancelled!
1294  UWARN("Texturing cancelled!");
1295  return false;
1296  }
1297  int textured = 0;
1298  if(vertexToPixels)
1299  {
1300  *vertexToPixels = std::vector<std::map<int, pcl::PointXY> >(mesh_cloud->size());
1301  }
1302  for(unsigned int idx_face=0; idx_face<faces.size(); ++idx_face)
1303  {
1304  if((idx_face+1)%10000 == 0)
1305  {
1306  UDEBUG("face %d/%d", idx_face+1, (int)faces.size());
1307  if(state && !state->callback(uFormat("Textured %d/%d of %d polygons", textured, idx_face+1, (int)faces.size())))
1308  {
1309  //cancelled!
1310  UWARN("Texturing cancelled!");
1311  return false;
1312  }
1313  }
1314  pcl::Vertices & face = faces[idx_face];
1315 
1316  int cameraIndex = -1;
1317  float smallestWeight = std::numeric_limits<float>::max();
1318  bool depthSet = false;
1319  pcl::PointXY uv_coords[3];
1320  for (std::list<int>::iterator camIter = faceCameras[idx_face].begin(); camIter!=faceCameras[idx_face].end(); ++camIter)
1321  {
1322  int current_cam = *camIter;
1323  std::map<int, FaceInfo>::iterator iter = visibleFaces[current_cam].find(idx_face);
1324  UASSERT(iter != visibleFaces[current_cam].end());
1325  if (iter->second.facingTheCam && (max_angle_ <=0.0f || iter->second.angle <= max_angle_))
1326  {
1327  float distanceToCam = iter->second.distance;
1328  float vx = (iter->second.uv_coord1.x+iter->second.uv_coord2.x+ iter->second.uv_coord3.x)/3.0f-0.5f;
1329  float vy = (iter->second.uv_coord1.y+iter->second.uv_coord2.y+ iter->second.uv_coord3.y)/3.0f-0.5f;
1330  float distanceToCenter = vx*vx+vy*vy;
1331 
1332  cv::Mat depth = cameras[current_cam].depth;
1333  bool currentDepthSet = false;
1334  float maxDepthError = max_depth_error_==0.0f?std::sqrt(iter->second.longestEdgeSqrd)*2.0f : max_depth_error_;
1335  if(!cameras[current_cam].depth.empty() && maxDepthError > 0.0f)
1336  {
1337  float d1 = depth.type() == CV_32FC1?
1338  depth.at<float>((1.0f-iter->second.uv_coord1.y)*depth.rows, iter->second.uv_coord1.x*depth.cols):
1339  float(depth.at<unsigned short>((1.0f-iter->second.uv_coord1.y)*depth.rows, iter->second.uv_coord1.x*depth.cols))/1000.0f;
1340  float d2 = depth.type() == CV_32FC1?
1341  depth.at<float>((1.0f-iter->second.uv_coord2.y)*depth.rows, iter->second.uv_coord2.x*depth.cols):
1342  float(depth.at<unsigned short>((1.0f-iter->second.uv_coord2.y)*depth.rows, iter->second.uv_coord2.x*depth.cols))/1000.0f;
1343  float d3 = depth.type() == CV_32FC1?
1344  depth.at<float>((1.0f-iter->second.uv_coord3.y)*depth.rows, iter->second.uv_coord3.x*depth.cols):
1345  float(depth.at<unsigned short>((1.0f-iter->second.uv_coord3.y)*depth.rows, iter->second.uv_coord3.x*depth.cols))/1000.0f;
1346  if(d1 <= 0.0f || !std::isfinite(d1) || d2 <= 0.0f || !std::isfinite(d2) || d3 <= 0.0f || !std::isfinite(d3))
1347  {
1348  if(depthSet)
1349  {
1350  // ignore pixels with no depth
1351  continue;
1352  }
1353  else if(d1 > 0.0f && std::isfinite(d1) && fabs(d1 - distanceToCam) > maxDepthError)
1354  {
1355  // ignore pixels with too much depth error
1356  continue;
1357  }
1358  else if(d2 > 0.0f && std::isfinite(d2) && fabs(d2 - distanceToCam) > maxDepthError)
1359  {
1360  // ignore pixels with too much depth error
1361  continue;
1362  }
1363  else if(d3 > 0.0f && std::isfinite(d3) && fabs(d3 - distanceToCam) > maxDepthError)
1364  {
1365  // ignore pixels with too much depth error
1366  continue;
1367  }
1368  //else it could be a window for which no depth is available on any cameras
1369  }
1370  else
1371  {
1372  if(fabs(d1 - distanceToCam) > maxDepthError ||
1373  fabs(d2 - distanceToCam) > maxDepthError ||
1374  fabs(d3 - distanceToCam) > maxDepthError)
1375  {
1376  // ignore pixels with too much depth error
1377  continue;
1378  }
1379  currentDepthSet = true;
1380  }
1381  }
1382 
1383  if(vertexToPixels)
1384  {
1385  vertexToPixels->at(face.vertices[0]).insert(std::make_pair(current_cam, iter->second.uv_coord1));
1386  vertexToPixels->at(face.vertices[1]).insert(std::make_pair(current_cam, iter->second.uv_coord2));
1387  vertexToPixels->at(face.vertices[2]).insert(std::make_pair(current_cam, iter->second.uv_coord3));
1388  }
1389 
1390  //UDEBUG("Process polygon %d cam =%d distanceToCam=%f", idx_face, current_cam, distanceToCam);
1391 
1392  float distance = distanceToCenter;
1393  if(distanceToCamPolicy)
1394  {
1395  distance = distanceToCam;
1396  }
1397  if(distance <= smallestWeight || (!depthSet && currentDepthSet))
1398  {
1399  cameraIndex = current_cam;
1400  smallestWeight = distance;
1401  uv_coords[0] = iter->second.uv_coord1;
1402  uv_coords[1] = iter->second.uv_coord2;
1403  uv_coords[2] = iter->second.uv_coord3;
1404  if(!depthSet && currentDepthSet)
1405  {
1406  depthSet = true;
1407  }
1408  }
1409  }
1410  }
1411 
1412  if(cameraIndex >= 0)
1413  {
1414  ++textured;
1415  mesh.tex_polygons[cameraIndex].push_back(face);
1416  mesh.tex_coordinates[cameraIndex].push_back(Eigen::Vector2f(uv_coords[0].x, uv_coords[0].y));
1417  mesh.tex_coordinates[cameraIndex].push_back(Eigen::Vector2f(uv_coords[1].x, uv_coords[1].y));
1418  mesh.tex_coordinates[cameraIndex].push_back(Eigen::Vector2f(uv_coords[2].x, uv_coords[2].y));
1419  }
1420  else
1421  {
1422  mesh.tex_polygons[cameras.size()].push_back(face);
1423  mesh.tex_coordinates[cameras.size()].push_back(Eigen::Vector2f(-1.0,-1.0));
1424  mesh.tex_coordinates[cameras.size()].push_back(Eigen::Vector2f(-1.0,-1.0));
1425  mesh.tex_coordinates[cameras.size()].push_back(Eigen::Vector2f(-1.0,-1.0));
1426  }
1427  }
1428  UINFO("Process %d polygons...done! (%d textured)", (int)faces.size(), textured);
1429 
1430  return true;
1431 }
1432 
1434 template<typename PointInT> inline void
1435 pcl::TextureMapping<PointInT>::getTriangleCircumcenterAndSize(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circomcenter, double &radius)
1436 {
1437  // we simplify the problem by translating the triangle's origin to its first point
1438  pcl::PointXY ptB, ptC;
1439  ptB.x = p2.x - p1.x; ptB.y = p2.y - p1.y; // B'=B-A
1440  ptC.x = p3.x - p1.x; ptC.y = p3.y - p1.y; // C'=C-A
1441 
1442  double D = 2.0*(ptB.x*ptC.y - ptB.y*ptC.x); // D'=2(B'x*C'y - B'y*C'x)
1443 
1444  // Safety check to avoid division by zero
1445  if(D == 0)
1446  {
1447  circomcenter.x = p1.x;
1448  circomcenter.y = p1.y;
1449  }
1450  else
1451  {
1452  // compute squares once
1453  double bx2 = ptB.x * ptB.x; // B'x^2
1454  double by2 = ptB.y * ptB.y; // B'y^2
1455  double cx2 = ptC.x * ptC.x; // C'x^2
1456  double cy2 = ptC.y * ptC.y; // C'y^2
1457 
1458  // compute circomcenter's coordinates (translate back to original coordinates)
1459  circomcenter.x = static_cast<float> (p1.x + (ptC.y*(bx2 + by2) - ptB.y*(cx2 + cy2)) / D);
1460  circomcenter.y = static_cast<float> (p1.y + (ptB.x*(cx2 + cy2) - ptC.x*(bx2 + by2)) / D);
1461  }
1462 
1463  radius = sqrt( (circomcenter.x - p1.x)*(circomcenter.x - p1.x) + (circomcenter.y - p1.y)*(circomcenter.y - p1.y));//2.0* (p1.x*(p2.y - p3.y) + p2.x*(p3.y - p1.y) + p3.x*(p1.y - p2.y));
1464 }
1465 
1467 template<typename PointInT> inline void
1468 pcl::TextureMapping<PointInT>::getTriangleCircumcscribedCircleCentroid ( const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
1469 {
1470  // compute centroid's coordinates (translate back to original coordinates)
1471  circumcenter.x = static_cast<float> (p1.x + p2.x + p3.x ) / 3;
1472  circumcenter.y = static_cast<float> (p1.y + p2.y + p3.y ) / 3;
1473  double r1 = (circumcenter.x - p1.x) * (circumcenter.x - p1.x) + (circumcenter.y - p1.y) * (circumcenter.y - p1.y) ;
1474  double r2 = (circumcenter.x - p2.x) * (circumcenter.x - p2.x) + (circumcenter.y - p2.y) * (circumcenter.y - p2.y) ;
1475  double r3 = (circumcenter.x - p3.x) * (circumcenter.x - p3.x) + (circumcenter.y - p3.y) * (circumcenter.y - p3.y) ;
1476 
1477  // radius
1478  radius = std::sqrt( std::max( r1, std::max( r2, r3) )) ;
1479 }
1480 
1481 
1483 template<typename PointInT> inline bool
1484 pcl::TextureMapping<PointInT>::getPointUVCoordinates(const PointInT &pt, const Camera &cam, pcl::PointXY &UV_coordinates)
1485 {
1486  if (pt.z > 0 && (max_distance_<=0.0f || pt.z<max_distance_))
1487  {
1488  // compute image center and dimension
1489  double sizeX = cam.width;
1490  double sizeY = cam.height;
1491  double cx, cy;
1492  if (cam.center_w > 0)
1493  cx = cam.center_w;
1494  else
1495  cx = sizeX / 2.0;
1496  if (cam.center_h > 0)
1497  cy = cam.center_h;
1498  else
1499  cy = sizeY / 2.0;
1500 
1501  double focal_x, focal_y;
1502  if (cam.focal_length_w > 0)
1503  focal_x = cam.focal_length_w;
1504  else
1505  focal_x = cam.focal_length;
1506  if (cam.focal_length_h > 0)
1507  focal_y = cam.focal_length_h;
1508  else
1509  focal_y = cam.focal_length;
1510 
1511  // project point on camera's image plane
1512  UV_coordinates.x = static_cast<float> ((focal_x * (pt.x / pt.z) + cx) / sizeX); //horizontal
1513  UV_coordinates.y = static_cast<float> ((focal_y * (pt.y / pt.z) + cy) / sizeY); //vertical
1514 
1515  if(cam.roi.size() == 4)
1516  {
1517  if( UV_coordinates.x < cam.roi[0]/sizeX ||
1518  UV_coordinates.y < cam.roi[1]/sizeY ||
1519  UV_coordinates.x > (cam.roi[0]+cam.roi[2])/sizeX ||
1520  UV_coordinates.y > (cam.roi[1]+cam.roi[3])/sizeY)
1521  {
1522  // point is NOT in region of interest of the camera
1523  UV_coordinates.x = -1.0f;
1524  UV_coordinates.y = -1.0f;
1525  return false;
1526  }
1527  }
1528 
1529  // point is visible!
1530  if (UV_coordinates.x >= 0.0 && UV_coordinates.x <= 1.0 && UV_coordinates.y >= 0.0 && UV_coordinates.y <= 1.0)
1531  {
1532  // point is visible by the camera
1533  // original code of PCL inverted y
1534  UV_coordinates.y = 1.0f - UV_coordinates.y;
1535  return (true);
1536  }
1537  }
1538 
1539  // point is NOT visible by the camera
1540  UV_coordinates.x = -1.0f;
1541  UV_coordinates.y = -1.0f;
1542  return (false); // point was not visible by the camera
1543 }
1544 
1546 template<typename PointInT> inline bool
1547 pcl::TextureMapping<PointInT>::checkPointInsideTriangle(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, const pcl::PointXY &pt)
1548 {
1549  // Compute vectors
1550  Eigen::Vector2d v0, v1, v2;
1551  v0(0) = p3.x - p1.x; v0(1) = p3.y - p1.y; // v0= C - A
1552  v1(0) = p2.x - p1.x; v1(1) = p2.y - p1.y; // v1= B - A
1553  v2(0) = pt.x - p1.x; v2(1) = pt.y - p1.y; // v2= P - A
1554 
1555  // Compute dot products
1556  double dot00 = v0.dot(v0); // dot00 = dot(v0, v0)
1557  double dot01 = v0.dot(v1); // dot01 = dot(v0, v1)
1558  double dot02 = v0.dot(v2); // dot02 = dot(v0, v2)
1559  double dot11 = v1.dot(v1); // dot11 = dot(v1, v1)
1560  double dot12 = v1.dot(v2); // dot12 = dot(v1, v2)
1561 
1562  // Compute barycentric coordinates
1563  double invDenom = 1.0 / (dot00*dot11 - dot01*dot01);
1564  double u = (dot11*dot02 - dot01*dot12) * invDenom;
1565  double v = (dot00*dot12 - dot01*dot02) * invDenom;
1566 
1567  // Check if point is in triangle
1568  return ((u >= 0) && (v >= 0) && (u + v < 1));
1569 }
1570 
1572 template<typename PointInT> inline bool
1573 pcl::TextureMapping<PointInT>::isFaceProjected (const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
1574 {
1575  return getPointUVCoordinates(p1, camera, proj1)
1576  &&
1577  getPointUVCoordinates(p2, camera, proj2)
1578  &&
1579  getPointUVCoordinates(p3, camera, proj3);
1580 }
1581 
1582 #define PCL_INSTANTIATE_TextureMapping(T) \
1583  template class PCL_EXPORTS pcl::TextureMapping<T>;
1584 
1585 #endif /* TEXTURE_MAPPING_HPP_ */
1586 
glm::min
GLM_FUNC_DECL genType min(genType const &x, genType const &y)
octree
UINFO
#define UINFO(...)
rtabmap::util3d::createPolygonIndexes
void RTABMAP_CORE_EXPORT createPolygonIndexes(const std::vector< pcl::Vertices > &polygons, int cloudSize, std::vector< std::set< int > > &neighborPolygons, std::vector< std::set< int > > &vertexPolygons)
Given a set of polygons, create two indexes: polygons to neighbor polygons and vertices to polygons.
Definition: util3d_surface.cpp:94
cy
const double cy
rtabmap::d1
const int d1
Definition: SuperPoint.cc:20
texture_mapping.h
D
MatrixXcd D
v1
v1
pcl::TextureMapping::isPointOccluded
bool isPointOccluded(const PointInT &pt, const OctreePtr octree)
Check if a point is occluded using raycasting on octree.
Definition: texture_mapping.hpp:395
Eigen::Transform
alpha
RealScalar alpha
s
RealScalar s
pcl::TextureMapping::mapTexture2Mesh
void mapTexture2Mesh(pcl::TextureMesh &tex_mesh)
Map texture to a mesh synthesis algorithm.
Definition: texture_mapping.hpp:146
pcl::texture_mapping::UvIndex::idx_face
int idx_face
Definition: texture_mapping.h:93
size
Index size
sign
const EIGEN_DEVICE_FUNC SignReturnType sign() const
tango_gl::vertices
static const float vertices[]
Definition: quad.cpp:39
pcl::texture_mapping::Camera::texture_file
std::string texture_file
Definition: texture_mapping.h:80
end
end
FaceInfo::angle
float angle
Definition: texture_mapping.hpp:1029
state
RecoveryProgressState state
Definition: tools/Recovery/main.cpp:56
FaceInfo::uv_coord2
pcl::PointXY uv_coord2
Definition: texture_mapping.hpp:1033
pcl::TextureMapping::removeOccludedPoints
void removeOccludedPoints(const PointCloudPtr &input_cloud, PointCloudPtr &filtered_cloud, const double octree_voxel_size, std::vector< int > &visible_indices, std::vector< int > &occluded_indices)
Remove occluded points from a point cloud.
Definition: texture_mapping.hpp:437
rtabmap::util3d::clusterPolygons
std::list< std::list< int > > RTABMAP_CORE_EXPORT clusterPolygons(const std::vector< std::set< int > > &neighborPolygons, int minClusterSize=0)
Definition: util3d_surface.cpp:131
y
Matrix3f y
iterator
pcl::TextureMapping::getTriangleCircumcscribedCircleCentroid
void getTriangleCircumcscribedCircleCentroid(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the centroid of a triangle and the corresponding circumscribed circle's radius.
Definition: texture_mapping.hpp:1468
FaceInfo::FaceInfo
FaceInfo(float d, float a, float edge, bool facingCam, const pcl::PointXY &uv1, const pcl::PointXY &uv2, const pcl::PointXY &uv3, const pcl::PointXY &center)
Definition: texture_mapping.hpp:1011
pcl::TextureMapping::Octree
pcl::octree::OctreePointCloudSearch< PointInT > Octree
Definition: texture_mapping.h:116
Vector2::y
float y
rtabmap::maxDepthError
const float maxDepthError
Definition: CameraTango.cpp:43
cam
rtabmap::SensorCaptureThread * cam
Definition: tools/DataRecorder/main.cpp:58
glm::acos
GLM_FUNC_DECL genType acos(genType const &x)
indices
indices
fabs
Real fabs(const Real &a)
p3
p3
n
int n
pcl::TextureMapping::PointCloud
pcl::PointCloud< PointInT > PointCloud
Definition: texture_mapping.h:112
pcl::TextureMapping::PointCloudConstPtr
PointCloud::ConstPtr PointCloudConstPtr
Definition: texture_mapping.h:114
rtabmap::util3d::transformPointCloud
pcl::PointCloud< pcl::PointXYZ >::Ptr RTABMAP_CORE_EXPORT transformPointCloud(const pcl::PointCloud< pcl::PointXYZ >::Ptr &cloud, const Transform &transform)
Definition: util3d_transforms.cpp:107
A
vy
StridedVectorType vy(make_vector(y, *n, std::abs(*incy)))
pcl::TextureMapping::mapTexture2MeshUV
void mapTexture2MeshUV(pcl::TextureMesh &tex_mesh)
Map texture to a mesh UV mapping.
Definition: texture_mapping.hpp:206
j
std::ptrdiff_t j
glm::sqrt
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
pcl::TextureMapping::OctreePtr
Octree::Ptr OctreePtr
Definition: texture_mapping.h:117
glm::max
GLM_FUNC_DECL genType max(genType const &x, genType const &y)
p2
p2
FaceInfo::facingTheCam
bool facingTheCam
Definition: texture_mapping.hpp:1031
info
else if n * info
u2
Vector u2
UASSERT
#define UASSERT(condition)
d
d
rtabmap::ProgressState
Definition: ProgressState.h:35
z
z
RecoveryProgressState::callback
virtual bool callback(const std::string &msg) const
Definition: tools/Recovery/main.cpp:49
x
x
m
Matrix3f m
p
Point3_ p(2)
pcl::TextureMapping::isFaceProjected
bool isFaceProjected(const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
Returns true if all the vertices of one face are projected on the camera's image plane.
Definition: texture_mapping.hpp:1573
pcl::TextureMapping::checkPointInsideTriangle
bool checkPointInsideTriangle(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, const pcl::PointXY &pt)
Returns True if a point lays within a triangle.
Definition: texture_mapping.hpp:1547
ptInTriangle
bool ptInTriangle(const pcl::PointXY &p0, const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p)
Definition: texture_mapping.hpp:1038
glm::angle
GLM_FUNC_DECL T angle(detail::tquat< T, P > const &x)
Eigen::Transform::inverse
EIGEN_DEVICE_FUNC Transform inverse(TransformTraits traits=(TransformTraits) Mode) const
pcl::texture_mapping::UvIndex::idx_cloud
int idx_cloud
Definition: texture_mapping.h:92
normal
Unit3_ normal(const OrientedPlane3_ &p)
p1
Vector3f p1
FaceInfo::uv_coord1
pcl::PointXY uv_coord1
Definition: texture_mapping.hpp:1032
glm::isfinite
GLM_FUNC_DECL bool isfinite(genType const &x)
Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
UWARN
#define UWARN(...)
pcl::TextureMapping::textureMeshwithMultipleCameras2
bool textureMeshwithMultipleCameras2(pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras, const rtabmap::ProgressState *callback=0, std::vector< std::map< int, pcl::PointXY > > *vertexToPixels=0, bool distanceToCamPolicy=false)
Definition: texture_mapping.hpp:1049
f
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
d2
d2
uFormat
std::string UTILITE_EXPORT uFormat(const char *fmt,...)
Definition: UConversion.cpp:365
FaceInfo::uv_coord3
pcl::PointXY uv_coord3
Definition: texture_mapping.hpp:1034
kdtree
kdtree
pcl::TextureMapping::PointCloudPtr
PointCloud::Ptr PointCloudPtr
Definition: texture_mapping.h:113
a
ArrayXXi a
pcl::texture_mapping::UvIndex
Structure that links a uv coordinate to its 3D point and face.
Definition: texture_mapping.h:89
pcl::TextureMapping::mapTexture2Face
std::vector< Eigen::Vector2f, Eigen::aligned_allocator< Eigen::Vector2f > > mapTexture2Face(const Eigen::Vector3f &p1, const Eigen::Vector3f &p2, const Eigen::Vector3f &p3)
Map texture to a face.
Definition: texture_mapping.hpp:48
iter
iterator iter(handle obj)
p0
Vector3f p0
pcl::texture_mapping::Camera
Structure to store camera pose and focal length.
Definition: texture_mapping.h:68
pcl::TextureMapping::mapMultipleTexturesToMeshUV
void mapMultipleTexturesToMeshUV(pcl::TextureMesh &tex_mesh, pcl::texture_mapping::CameraVector &cams)
Map textures acquired from a set of cameras onto a mesh.
Definition: texture_mapping.hpp:297
camera
Camera camera(Pose3(Rot3().retract(Vector3(0.1, 0.2, 0.3)), Point3(0, 5, 0)), Cal3Bundler0(1, 0, 0))
glm::cos
GLM_FUNC_DECL genType cos(genType const &angle)
FaceInfo::longestEdgeSqrd
float longestEdgeSqrd
Definition: texture_mapping.hpp:1030
v
Array< int, Dynamic, 1 > v
FaceInfo::distance
float distance
Definition: texture_mapping.hpp:1028
UDEBUG
#define UDEBUG(...)
pcl::TextureMapping::getTriangleCircumcenterAndSize
void getTriangleCircumcenterAndSize(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the circumcenter of a triangle and the circle's radius.
Definition: texture_mapping.hpp:1435
pcl::TextureMapping::showOcclusions
void showOcclusions(const PointCloudPtr &input_cloud, pcl::PointCloud< pcl::PointXYZI >::Ptr &colored_cloud, const double octree_voxel_size, const bool show_nb_occlusions=true, const int max_occlusions=4)
Colors a point cloud, depending on its occlusions.
Definition: texture_mapping.hpp:669
scan_step::second
@ second
float
float
distance
Double_ distance(const OrientedPlane3_ &p)
pcl::TextureMapping::textureMeshwithMultipleCameras
void textureMeshwithMultipleCameras(pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras)
Segment and texture faces by camera visibility. Face-based segmentation.
Definition: texture_mapping.hpp:761
cx
const double cx
t
Point2 t(10, 10)
pcl::texture_mapping::CameraVector
std::vector< Camera, Eigen::aligned_allocator< Camera > > CameraVector
Definition: texture_mapping.h:96
pcl::TextureMapping::sortFacesByCamera
int sortFacesByCamera(pcl::TextureMesh &tex_mesh, pcl::TextureMesh &sorted_mesh, const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size, PointCloud &visible_pts)
Segment faces by camera visibility. Point-based segmentation.
Definition: texture_mapping.hpp:572
pcl::TextureMapping::getPointUVCoordinates
bool getPointUVCoordinates(const PointInT &pt, const Camera &cam, Eigen::Vector2f &UV_coordinates)
computes UV coordinates of point, observed by one particular camera
Definition: texture_mapping.h:227
i
int i
pcl::texture_mapping::Camera::pose
Eigen::Affine3f pose
Definition: texture_mapping.h:72
glm::sin
GLM_FUNC_DECL genType sin(genType const &angle)
vx
StridedVectorType vx(make_vector(x, *n, std::abs(*incx)))
FaceInfo::uv_center
pcl::PointXY uv_center
Definition: texture_mapping.hpp:1035
FaceInfo
Definition: texture_mapping.hpp:1008
v2
v2
msg
msg


rtabmap
Author(s): Mathieu Labbe
autogenerated on Thu Jul 25 2024 02:50:22