our_cvfh.hpp
Go to the documentation of this file.
00001 /*
00002  * Software License Agreement (BSD License)
00003  *
00004  *  Point Cloud Library (PCL) - www.pointclouds.org
00005  *  Copyright (c) 2010-2011, Willow Garage, Inc.
00006  *  Copyright (c) 2012-, Open Perception, Inc.
00007  *
00008  *  All rights reserved.
00009  *
00010  *  Redistribution and use in source and binary forms, with or without
00011  *  modification, are permitted provided that the following conditions
00012  *  are met:
00013  *
00014  *   * Redistributions of source code must retain the above copyright
00015  *     notice, this list of conditions and the following disclaimer.
00016  *   * Redistributions in binary form must reproduce the above
00017  *     copyright notice, this list of conditions and the following
00018  *     disclaimer in the documentation and/or other materials provided
00019  *     with the distribution.
00020  *   * Neither the name of the copyright holder(s) nor the names of its
00021  *     contributors may be used to endorse or promote products derived
00022  *     from this software without specific prior written permission.
00023  *
00024  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00025  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00026  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00027  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00028  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00029  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00030  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00031  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00032  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00033  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00034  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00035  *  POSSIBILITY OF SUCH DAMAGE.
00036  *
00037  * $Id: cvfh.hpp 5311 2012-03-26 22:02:04Z aaldoma $
00038  *
00039  */
00040 
00041 #ifndef PCL_FEATURES_IMPL_OURCVFH_H_
00042 #define PCL_FEATURES_IMPL_OURCVFH_H_
00043 
00044 #include <pcl/features/our_cvfh.h>
00045 #include <pcl/features/vfh.h>
00046 #include <pcl/features/normal_3d.h>
00047 #include <pcl/features/pfh_tools.h>
00048 #include <pcl/common/transforms.h>
00049 
00051 template<typename PointInT, typename PointNT, typename PointOutT> void
00052 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::compute (PointCloudOut &output)
00053 {
00054   if (!Feature<PointInT, PointOutT>::initCompute ())
00055   {
00056     output.width = output.height = 0;
00057     output.points.clear ();
00058     return;
00059   }
00060   // Resize the output dataset
00061   // Important! We should only allocate precisely how many elements we will need, otherwise
00062   // we risk at pre-allocating too much memory which could lead to bad_alloc
00063   // (see http://dev.pointclouds.org/issues/657)
00064   output.width = output.height = 1;
00065   output.points.resize (1);
00066 
00067   // Perform the actual feature computation
00068   computeFeature (output);
00069 
00070   Feature<PointInT, PointOutT>::deinitCompute ();
00071 }
00072 
00074 template<typename PointInT, typename PointNT, typename PointOutT> void
00075 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::extractEuclideanClustersSmooth (const pcl::PointCloud<pcl::PointNormal> &cloud,
00076                                                                                         const pcl::PointCloud<pcl::PointNormal> &normals,
00077                                                                                         float tolerance,
00078                                                                                         const pcl::search::Search<pcl::PointNormal>::Ptr &tree,
00079                                                                                         std::vector<pcl::PointIndices> &clusters, double eps_angle,
00080                                                                                         unsigned int min_pts_per_cluster,
00081                                                                                         unsigned int max_pts_per_cluster)
00082 {
00083   if (tree->getInputCloud ()->points.size () != cloud.points.size ())
00084   {
00085     PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
00086     return;
00087   }
00088   if (cloud.points.size () != normals.points.size ())
00089   {
00090     PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%zu) different than normals (%zu)!\n", cloud.points.size (), normals.points.size ());
00091     return;
00092   }
00093 
00094   // Create a bool vector of processed point indices, and initialize it to false
00095   std::vector<bool> processed (cloud.points.size (), false);
00096 
00097   std::vector<int> nn_indices;
00098   std::vector<float> nn_distances;
00099   // Process all points in the indices vector
00100   for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
00101   {
00102     if (processed[i])
00103       continue;
00104 
00105     std::vector<unsigned int> seed_queue;
00106     int sq_idx = 0;
00107     seed_queue.push_back (i);
00108 
00109     processed[i] = true;
00110 
00111     while (sq_idx < static_cast<int> (seed_queue.size ()))
00112     {
00113       // Search for sq_idx
00114       if (!tree->radiusSearch (seed_queue[sq_idx], tolerance, nn_indices, nn_distances))
00115       {
00116         sq_idx++;
00117         continue;
00118       }
00119 
00120       for (size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
00121       {
00122         if (processed[nn_indices[j]]) // Has this point been processed before ?
00123           continue;
00124 
00125         //processed[nn_indices[j]] = true;
00126         // [-1;1]
00127 
00128         double dot_p = normals.points[seed_queue[sq_idx]].normal[0] * normals.points[nn_indices[j]].normal[0]
00129             + normals.points[seed_queue[sq_idx]].normal[1] * normals.points[nn_indices[j]].normal[1] + normals.points[seed_queue[sq_idx]].normal[2]
00130             * normals.points[nn_indices[j]].normal[2];
00131 
00132         if (fabs (acos (dot_p)) < eps_angle)
00133         {
00134           processed[nn_indices[j]] = true;
00135           seed_queue.push_back (nn_indices[j]);
00136         }
00137       }
00138 
00139       sq_idx++;
00140     }
00141 
00142     // If this queue is satisfactory, add to the clusters
00143     if (seed_queue.size () >= min_pts_per_cluster && seed_queue.size () <= max_pts_per_cluster)
00144     {
00145       pcl::PointIndices r;
00146       r.indices.resize (seed_queue.size ());
00147       for (size_t j = 0; j < seed_queue.size (); ++j)
00148         r.indices[j] = seed_queue[j];
00149 
00150       std::sort (r.indices.begin (), r.indices.end ());
00151       r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());
00152 
00153       r.header = cloud.header;
00154       clusters.push_back (r); // We could avoid a copy by working directly in the vector
00155     }
00156   }
00157 }
00158 
00160 template<typename PointInT, typename PointNT, typename PointOutT> void
00161 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::filterNormalsWithHighCurvature (const pcl::PointCloud<PointNT> & cloud,
00162                                                                                         std::vector<int> &indices_to_use,
00163                                                                                         std::vector<int> &indices_out, std::vector<int> &indices_in,
00164                                                                                         float threshold)
00165 {
00166   indices_out.resize (cloud.points.size ());
00167   indices_in.resize (cloud.points.size ());
00168 
00169   size_t in, out;
00170   in = out = 0;
00171 
00172   for (int i = 0; i < static_cast<int> (indices_to_use.size ()); i++)
00173   {
00174     if (cloud.points[indices_to_use[i]].curvature > threshold)
00175     {
00176       indices_out[out] = indices_to_use[i];
00177       out++;
00178     }
00179     else
00180     {
00181       indices_in[in] = indices_to_use[i];
00182       in++;
00183     }
00184   }
00185 
00186   indices_out.resize (out);
00187   indices_in.resize (in);
00188 }
00189 
00190 template<typename PointInT, typename PointNT, typename PointOutT> bool
00191 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::sgurf (Eigen::Vector3f & centroid, Eigen::Vector3f & normal_centroid,
00192                                                                PointInTPtr & processed, std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > & transformations,
00193                                                                PointInTPtr & grid, pcl::PointIndices & indices)
00194 {
00195 
00196   Eigen::Vector3f plane_normal;
00197   plane_normal[0] = -centroid[0];
00198   plane_normal[1] = -centroid[1];
00199   plane_normal[2] = -centroid[2];
00200   Eigen::Vector3f z_vector = Eigen::Vector3f::UnitZ ();
00201   plane_normal.normalize ();
00202   Eigen::Vector3f axis = plane_normal.cross (z_vector);
00203   double rotation = -asin (axis.norm ());
00204   axis.normalize ();
00205 
00206   Eigen::Affine3f transformPC (Eigen::AngleAxisf (static_cast<float> (rotation), axis));
00207 
00208   grid->points.resize (processed->points.size ());
00209   for (size_t k = 0; k < processed->points.size (); k++)
00210     grid->points[k].getVector4fMap () = processed->points[k].getVector4fMap ();
00211 
00212   pcl::transformPointCloud (*grid, *grid, transformPC);
00213 
00214   Eigen::Vector4f centroid4f (centroid[0], centroid[1], centroid[2], 0);
00215   Eigen::Vector4f normal_centroid4f (normal_centroid[0], normal_centroid[1], normal_centroid[2], 0);
00216 
00217   centroid4f = transformPC * centroid4f;
00218   normal_centroid4f = transformPC * normal_centroid4f;
00219 
00220   Eigen::Vector3f centroid3f (centroid4f[0], centroid4f[1], centroid4f[2]);
00221 
00222   Eigen::Vector4f farthest_away;
00223   pcl::getMaxDistance (*grid, indices.indices, centroid4f, farthest_away);
00224   farthest_away[3] = 0;
00225 
00226   float max_dist = (farthest_away - centroid4f).norm ();
00227 
00228   pcl::demeanPointCloud (*grid, centroid4f, *grid);
00229 
00230   Eigen::Matrix4f center_mat;
00231   center_mat.setIdentity (4, 4);
00232   center_mat (0, 3) = -centroid4f[0];
00233   center_mat (1, 3) = -centroid4f[1];
00234   center_mat (2, 3) = -centroid4f[2];
00235 
00236   Eigen::Matrix3f scatter;
00237   scatter.setZero ();
00238   float sum_w = 0.f;
00239 
00240   //for (int k = 0; k < static_cast<intgrid->points[k].getVector3fMap ();> (grid->points.size ()); k++)
00241   for (int k = 0; k < static_cast<int> (indices.indices.size ()); k++)
00242   {
00243     Eigen::Vector3f pvector = grid->points[indices.indices[k]].getVector3fMap ();
00244     float d_k = (pvector).norm ();
00245     float w = (max_dist - d_k);
00246     Eigen::Vector3f diff = (pvector);
00247     Eigen::Matrix3f mat = diff * diff.transpose ();
00248     scatter = scatter + mat * w;
00249     sum_w += w;
00250   }
00251 
00252   scatter /= sum_w;
00253 
00254   Eigen::JacobiSVD <Eigen::MatrixXf> svd (scatter, Eigen::ComputeFullV);
00255   Eigen::Vector3f evx = svd.matrixV ().col (0);
00256   Eigen::Vector3f evy = svd.matrixV ().col (1);
00257   Eigen::Vector3f evz = svd.matrixV ().col (2);
00258   Eigen::Vector3f evxminus = evx * -1;
00259   Eigen::Vector3f evyminus = evy * -1;
00260   Eigen::Vector3f evzminus = evz * -1;
00261 
00262   float s_xplus, s_xminus, s_yplus, s_yminus;
00263   s_xplus = s_xminus = s_yplus = s_yminus = 0.f;
00264 
00265   //disambiguate rf using all points
00266   for (int k = 0; k < static_cast<int> (grid->points.size ()); k++)
00267   {
00268     Eigen::Vector3f pvector = grid->points[k].getVector3fMap ();
00269     float dist_x, dist_y;
00270     dist_x = std::abs (evx.dot (pvector));
00271     dist_y = std::abs (evy.dot (pvector));
00272 
00273     if ((pvector).dot (evx) >= 0)
00274       s_xplus += dist_x;
00275     else
00276       s_xminus += dist_x;
00277 
00278     if ((pvector).dot (evy) >= 0)
00279       s_yplus += dist_y;
00280     else
00281       s_yminus += dist_y;
00282 
00283   }
00284 
00285   if (s_xplus < s_xminus)
00286     evx = evxminus;
00287 
00288   if (s_yplus < s_yminus)
00289     evy = evyminus;
00290 
00291   //select the axis that could be disambiguated more easily
00292   float fx, fy;
00293   float max_x = static_cast<float> (std::max (s_xplus, s_xminus));
00294   float min_x = static_cast<float> (std::min (s_xplus, s_xminus));
00295   float max_y = static_cast<float> (std::max (s_yplus, s_yminus));
00296   float min_y = static_cast<float> (std::min (s_yplus, s_yminus));
00297 
00298   fx = (min_x / max_x);
00299   fy = (min_y / max_y);
00300 
00301   Eigen::Vector3f normal3f = Eigen::Vector3f (normal_centroid4f[0], normal_centroid4f[1], normal_centroid4f[2]);
00302   if (normal3f.dot (evz) < 0)
00303     evz = evzminus;
00304 
00305   //if fx/y close to 1, it was hard to disambiguate
00306   //what if both are equally easy or difficult to disambiguate, namely fy == fx or very close
00307 
00308   float max_axis = std::max (fx, fy);
00309   float min_axis = std::min (fx, fy);
00310 
00311   if ((min_axis / max_axis) > axis_ratio_)
00312   {
00313     PCL_WARN("Both axis are equally easy/difficult to disambiguate\n");
00314 
00315     Eigen::Vector3f evy_copy = evy;
00316     Eigen::Vector3f evxminus = evx * -1;
00317     Eigen::Vector3f evyminus = evy * -1;
00318 
00319     if (min_axis > min_axis_value_)
00320     {
00321       //combination of all possibilities
00322       evy = evx.cross (evz);
00323       Eigen::Matrix4f trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00324       transformations.push_back (trans);
00325 
00326       evx = evxminus;
00327       evy = evx.cross (evz);
00328       trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00329       transformations.push_back (trans);
00330 
00331       evx = evy_copy;
00332       evy = evx.cross (evz);
00333       trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00334       transformations.push_back (trans);
00335 
00336       evx = evyminus;
00337       evy = evx.cross (evz);
00338       trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00339       transformations.push_back (trans);
00340 
00341     }
00342     else
00343     {
00344       //1-st case (evx selected)
00345       evy = evx.cross (evz);
00346       Eigen::Matrix4f trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00347       transformations.push_back (trans);
00348 
00349       //2-nd case (evy selected)
00350       evx = evy_copy;
00351       evy = evx.cross (evz);
00352       trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00353       transformations.push_back (trans);
00354     }
00355   }
00356   else
00357   {
00358     if (fy < fx)
00359     {
00360       evx = evy;
00361       fx = fy;
00362     }
00363 
00364     evy = evx.cross (evz);
00365     Eigen::Matrix4f trans = createTransFromAxes (evx, evy, evz, transformPC, center_mat);
00366     transformations.push_back (trans);
00367 
00368   }
00369 
00370   return true;
00371 }
00372 
00374 template<typename PointInT, typename PointNT, typename PointOutT> void
00375 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::computeRFAndShapeDistribution (PointInTPtr & processed, PointCloudOut & output,
00376                                                                                      std::vector<pcl::PointIndices> & cluster_indices)
00377 {
00378   PointCloudOut ourcvfh_output;
00379   for (size_t i = 0; i < centroids_dominant_orientations_.size (); i++)
00380   {
00381 
00382     std::vector < Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > transformations;
00383     PointInTPtr grid (new pcl::PointCloud<PointInT>);
00384     sgurf (centroids_dominant_orientations_[i], dominant_normals_[i], processed, transformations, grid, cluster_indices[i]);
00385 
00386     for (size_t t = 0; t < transformations.size (); t++)
00387     {
00388 
00389       pcl::transformPointCloud (*processed, *grid, transformations[t]);
00390       transforms_.push_back (transformations[t]);
00391       valid_transforms_.push_back (true);
00392 
00393       std::vector < Eigen::VectorXf > quadrants (8);
00394       int size_hists = 13;
00395       int num_hists = 8;
00396       for (int k = 0; k < num_hists; k++)
00397         quadrants[k].setZero (size_hists);
00398 
00399       Eigen::Vector4f centroid_p;
00400       centroid_p.setZero ();
00401       Eigen::Vector4f max_pt;
00402       pcl::getMaxDistance (*grid, centroid_p, max_pt);
00403       max_pt[3] = 0;
00404       double distance_normalization_factor = (centroid_p - max_pt).norm ();
00405 
00406       float hist_incr;
00407       if (normalize_bins_)
00408         hist_incr = 100.0f / static_cast<float> (grid->points.size () - 1);
00409       else
00410         hist_incr = 1.0f;
00411 
00412       float * weights = new float[num_hists];
00413       float sigma = 0.01f; //1cm
00414       float sigma_sq = sigma * sigma;
00415 
00416       for (int k = 0; k < static_cast<int> (grid->points.size ()); k++)
00417       {
00418         Eigen::Vector4f p = grid->points[k].getVector4fMap ();
00419         p[3] = 0.f;
00420         float d = p.norm ();
00421 
00422         //compute weight for all octants
00423         float wx = 1.f - std::exp (-((p[0] * p[0]) / (2.f * sigma_sq))); //how is the weight distributed among two semi-cubes
00424         float wy = 1.f - std::exp (-((p[1] * p[1]) / (2.f * sigma_sq)));
00425         float wz = 1.f - std::exp (-((p[2] * p[2]) / (2.f * sigma_sq)));
00426 
00427         //distribute the weights using the x-coordinate
00428         if (p[0] >= 0)
00429         {
00430           for (size_t ii = 0; ii <= 3; ii++)
00431             weights[ii] = 0.5f - wx * 0.5f;
00432 
00433           for (size_t ii = 4; ii <= 7; ii++)
00434             weights[ii] = 0.5f + wx * 0.5f;
00435         }
00436         else
00437         {
00438           for (size_t ii = 0; ii <= 3; ii++)
00439             weights[ii] = 0.5f + wx * 0.5f;
00440 
00441           for (size_t ii = 4; ii <= 7; ii++)
00442             weights[ii] = 0.5f - wx * 0.5f;
00443         }
00444 
00445         //distribute the weights using the y-coordinate
00446         if (p[1] >= 0)
00447         {
00448           for (size_t ii = 0; ii <= 1; ii++)
00449             weights[ii] *= 0.5f - wy * 0.5f;
00450           for (size_t ii = 4; ii <= 5; ii++)
00451             weights[ii] *= 0.5f - wy * 0.5f;
00452 
00453           for (size_t ii = 2; ii <= 3; ii++)
00454             weights[ii] *= 0.5f + wy * 0.5f;
00455 
00456           for (size_t ii = 6; ii <= 7; ii++)
00457             weights[ii] *= 0.5f + wy * 0.5f;
00458         }
00459         else
00460         {
00461           for (size_t ii = 0; ii <= 1; ii++)
00462             weights[ii] *= 0.5f + wy * 0.5f;
00463           for (size_t ii = 4; ii <= 5; ii++)
00464             weights[ii] *= 0.5f + wy * 0.5f;
00465 
00466           for (size_t ii = 2; ii <= 3; ii++)
00467             weights[ii] *= 0.5f - wy * 0.5f;
00468 
00469           for (size_t ii = 6; ii <= 7; ii++)
00470             weights[ii] *= 0.5f - wy * 0.5f;
00471         }
00472 
00473         //distribute the weights using the z-coordinate
00474         if (p[2] >= 0)
00475         {
00476           for (size_t ii = 0; ii <= 7; ii += 2)
00477             weights[ii] *= 0.5f - wz * 0.5f;
00478 
00479           for (size_t ii = 1; ii <= 7; ii += 2)
00480             weights[ii] *= 0.5f + wz * 0.5f;
00481 
00482         }
00483         else
00484         {
00485           for (size_t ii = 0; ii <= 7; ii += 2)
00486             weights[ii] *= 0.5f + wz * 0.5f;
00487 
00488           for (size_t ii = 1; ii <= 7; ii += 2)
00489             weights[ii] *= 0.5f - wz * 0.5f;
00490         }
00491 
00492         int h_index = static_cast<int> (std::floor (size_hists * (d / distance_normalization_factor)));
00493         for (int j = 0; j < num_hists; j++)
00494           quadrants[j][h_index] += hist_incr * weights[j];
00495 
00496       }
00497 
00498       //copy to the cvfh signature
00499       PointCloudOut vfh_signature;
00500       vfh_signature.points.resize (1);
00501       vfh_signature.width = vfh_signature.height = 1;
00502       for (int d = 0; d < 308; ++d)
00503         vfh_signature.points[0].histogram[d] = output.points[i].histogram[d];
00504 
00505       int pos = 45 * 3;
00506       for (int k = 0; k < num_hists; k++)
00507       {
00508         for (int ii = 0; ii < size_hists; ii++, pos++)
00509         {
00510           vfh_signature.points[0].histogram[pos] = quadrants[k][ii];
00511         }
00512       }
00513 
00514       ourcvfh_output.points.push_back (vfh_signature.points[0]);
00515 
00516       delete[] weights;
00517     }
00518   }
00519 
00520   output = ourcvfh_output;
00521 }
00522 
00524 template<typename PointInT, typename PointNT, typename PointOutT> void
00525 pcl::OURCVFHEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
00526 {
00527   if (refine_clusters_ <= 0.f)
00528     refine_clusters_ = 1.f;
00529 
00530   // Check if input was set
00531   if (!normals_)
00532   {
00533     PCL_ERROR ("[pcl::%s::computeFeature] No input dataset containing normals was given!\n", getClassName ().c_str ());
00534     output.width = output.height = 0;
00535     output.points.clear ();
00536     return;
00537   }
00538   if (normals_->points.size () != surface_->points.size ())
00539   {
00540     PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
00541     output.width = output.height = 0;
00542     output.points.clear ();
00543     return;
00544   }
00545 
00546   centroids_dominant_orientations_.clear ();
00547   clusters_.clear ();
00548   transforms_.clear ();
00549   dominant_normals_.clear ();
00550 
00551   // ---[ Step 0: remove normals with high curvature
00552   std::vector<int> indices_out;
00553   std::vector<int> indices_in;
00554   filterNormalsWithHighCurvature (*normals_, *indices_, indices_out, indices_in, curv_threshold_);
00555 
00556   pcl::PointCloud<pcl::PointNormal>::Ptr normals_filtered_cloud (new pcl::PointCloud<pcl::PointNormal> ());
00557   normals_filtered_cloud->width = static_cast<uint32_t> (indices_in.size ());
00558   normals_filtered_cloud->height = 1;
00559   normals_filtered_cloud->points.resize (normals_filtered_cloud->width);
00560 
00561   std::vector<int> indices_from_nfc_to_indices;
00562   indices_from_nfc_to_indices.resize (indices_in.size ());
00563 
00564   for (size_t i = 0; i < indices_in.size (); ++i)
00565   {
00566     normals_filtered_cloud->points[i].x = surface_->points[indices_in[i]].x;
00567     normals_filtered_cloud->points[i].y = surface_->points[indices_in[i]].y;
00568     normals_filtered_cloud->points[i].z = surface_->points[indices_in[i]].z;
00569     //normals_filtered_cloud->points[i].getNormalVector4fMap() = normals_->points[indices_in[i]].getNormalVector4fMap();
00570     indices_from_nfc_to_indices[i] = indices_in[i];
00571   }
00572 
00573   std::vector<pcl::PointIndices> clusters;
00574 
00575   if (normals_filtered_cloud->points.size () >= min_points_)
00576   {
00577     //recompute normals and use them for clustering
00578     {
00579       KdTreePtr normals_tree_filtered (new pcl::search::KdTree<pcl::PointNormal> (false));
00580       normals_tree_filtered->setInputCloud (normals_filtered_cloud);
00581       pcl::NormalEstimation<PointNormal, PointNormal> n3d;
00582       n3d.setRadiusSearch (radius_normals_);
00583       n3d.setSearchMethod (normals_tree_filtered);
00584       n3d.setInputCloud (normals_filtered_cloud);
00585       n3d.compute (*normals_filtered_cloud);
00586     }
00587 
00588     KdTreePtr normals_tree (new pcl::search::KdTree<pcl::PointNormal> (false));
00589     normals_tree->setInputCloud (normals_filtered_cloud);
00590 
00591     extractEuclideanClustersSmooth (*normals_filtered_cloud, *normals_filtered_cloud, cluster_tolerance_, normals_tree, clusters,
00592                                     eps_angle_threshold_, static_cast<unsigned int> (min_points_));
00593 
00594     std::vector<pcl::PointIndices> clusters_filtered;
00595     int cluster_filtered_idx = 0;
00596     for (size_t i = 0; i < clusters.size (); i++)
00597     {
00598 
00599       pcl::PointIndices pi;
00600       pcl::PointIndices pi_cvfh;
00601       pcl::PointIndices pi_filtered;
00602 
00603       clusters_.push_back (pi);
00604       clusters_filtered.push_back (pi_filtered);
00605 
00606       Eigen::Vector4f avg_normal = Eigen::Vector4f::Zero ();
00607       Eigen::Vector4f avg_centroid = Eigen::Vector4f::Zero ();
00608 
00609       for (size_t j = 0; j < clusters[i].indices.size (); j++)
00610       {
00611         avg_normal += normals_filtered_cloud->points[clusters[i].indices[j]].getNormalVector4fMap ();
00612         avg_centroid += normals_filtered_cloud->points[clusters[i].indices[j]].getVector4fMap ();
00613       }
00614 
00615       avg_normal /= static_cast<float> (clusters[i].indices.size ());
00616       avg_centroid /= static_cast<float> (clusters[i].indices.size ());
00617       avg_normal.normalize ();
00618 
00619       Eigen::Vector3f avg_norm (avg_normal[0], avg_normal[1], avg_normal[2]);
00620       Eigen::Vector3f avg_dominant_centroid (avg_centroid[0], avg_centroid[1], avg_centroid[2]);
00621 
00622       for (size_t j = 0; j < clusters[i].indices.size (); j++)
00623       {
00624         //decide if normal should be added
00625         double dot_p = avg_normal.dot (normals_filtered_cloud->points[clusters[i].indices[j]].getNormalVector4fMap ());
00626         if (fabs (acos (dot_p)) < (eps_angle_threshold_ * refine_clusters_))
00627         {
00628           clusters_[cluster_filtered_idx].indices.push_back (indices_from_nfc_to_indices[clusters[i].indices[j]]);
00629           clusters_filtered[cluster_filtered_idx].indices.push_back (clusters[i].indices[j]);
00630         }
00631       }
00632 
00633       //remove last cluster if no points found...
00634       if (clusters_[cluster_filtered_idx].indices.size () == 0)
00635       {
00636         clusters_.erase (clusters_.end ());
00637         clusters_filtered.erase (clusters_filtered.end ());
00638       }
00639       else
00640         cluster_filtered_idx++;
00641     }
00642 
00643     clusters = clusters_filtered;
00644 
00645   }
00646 
00647   pcl::VFHEstimation<PointInT, PointNT, pcl::VFHSignature308> vfh;
00648   vfh.setInputCloud (surface_);
00649   vfh.setInputNormals (normals_);
00650   vfh.setIndices (indices_);
00651   vfh.setSearchMethod (this->tree_);
00652   vfh.setUseGivenNormal (true);
00653   vfh.setUseGivenCentroid (true);
00654   vfh.setNormalizeBins (normalize_bins_);
00655   output.height = 1;
00656 
00657   // ---[ Step 1b : check if any dominant cluster was found
00658   if (clusters.size () > 0)
00659   { // ---[ Step 1b.1 : If yes, compute CVFH using the cluster information
00660 
00661     for (size_t i = 0; i < clusters.size (); ++i) //for each cluster
00662 
00663     {
00664       Eigen::Vector4f avg_normal = Eigen::Vector4f::Zero ();
00665       Eigen::Vector4f avg_centroid = Eigen::Vector4f::Zero ();
00666 
00667       for (size_t j = 0; j < clusters[i].indices.size (); j++)
00668       {
00669         avg_normal += normals_filtered_cloud->points[clusters[i].indices[j]].getNormalVector4fMap ();
00670         avg_centroid += normals_filtered_cloud->points[clusters[i].indices[j]].getVector4fMap ();
00671       }
00672 
00673       avg_normal /= static_cast<float> (clusters[i].indices.size ());
00674       avg_centroid /= static_cast<float> (clusters[i].indices.size ());
00675       avg_normal.normalize ();
00676 
00677       Eigen::Vector3f avg_norm (avg_normal[0], avg_normal[1], avg_normal[2]);
00678       Eigen::Vector3f avg_dominant_centroid (avg_centroid[0], avg_centroid[1], avg_centroid[2]);
00679 
00680       //append normal and centroid for the clusters
00681       dominant_normals_.push_back (avg_norm);
00682       centroids_dominant_orientations_.push_back (avg_dominant_centroid);
00683     }
00684 
00685     //compute modified VFH for all dominant clusters and add them to the list!
00686     output.points.resize (dominant_normals_.size ());
00687     output.width = static_cast<uint32_t> (dominant_normals_.size ());
00688 
00689     for (size_t i = 0; i < dominant_normals_.size (); ++i)
00690     {
00691       //configure VFH computation for CVFH
00692       vfh.setNormalToUse (dominant_normals_[i]);
00693       vfh.setCentroidToUse (centroids_dominant_orientations_[i]);
00694       pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
00695       vfh.compute (vfh_signature);
00696       output.points[i] = vfh_signature.points[0];
00697     }
00698 
00699     //finish filling the descriptor with the shape distribution
00700     PointInTPtr cloud_input (new pcl::PointCloud<PointInT>);
00701     pcl::copyPointCloud (*surface_, *indices_, *cloud_input);
00702     computeRFAndShapeDistribution (cloud_input, output, clusters_); //this will set transforms_
00703   }
00704   else
00705   { // ---[ Step 1b.1 : If no, compute a VFH using all the object points
00706 
00707     PCL_WARN("No clusters were found in the surface... using VFH...\n");
00708     Eigen::Vector4f avg_centroid;
00709     pcl::compute3DCentroid (*surface_, avg_centroid);
00710     Eigen::Vector3f cloud_centroid (avg_centroid[0], avg_centroid[1], avg_centroid[2]);
00711     centroids_dominant_orientations_.push_back (cloud_centroid);
00712 
00713     //configure VFH computation using all object points
00714     vfh.setCentroidToUse (cloud_centroid);
00715     vfh.setUseGivenNormal (false);
00716 
00717     pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
00718     vfh.compute (vfh_signature);
00719 
00720     output.points.resize (1);
00721     output.width = 1;
00722 
00723     output.points[0] = vfh_signature.points[0];
00724     Eigen::Matrix4f id = Eigen::Matrix4f::Identity ();
00725     transforms_.push_back (id);
00726     valid_transforms_.push_back (false);
00727   }
00728 }
00729 
00730 #define PCL_INSTANTIATE_OURCVFHEstimation(T,NT,OutT) template class PCL_EXPORTS pcl::OURCVFHEstimation<T,NT,OutT>;
00731 
00732 #endif    // PCL_FEATURES_IMPL_OURCVFH_H_


pcl
Author(s): Open Perception
autogenerated on Wed Aug 26 2015 15:27:30