object_recognition.cpp
Go to the documentation of this file.
00001 /*
00002  * Software License Agreement (BSD License)
00003  *
00004  *  Copyright (c) 2010, Willow Garage, Inc.
00005  *  All rights reserved.
00006  *
00007  *  Redistribution and use in source and binary forms, with or without
00008  *  modification, are permitted provided that the following conditions
00009  *  are met:
00010  *
00011  *   * Redistributions of source code must retain the above copyright
00012  *     notice, this list of conditions and the following disclaimer.
00013  *   * Redistributions in binary form must reproduce the above
00014  *     copyright notice, this list of conditions and the following
00015  *     disclaimer in the documentation and/or other materials provided
00016  *     with the distribution.
00017  *   * Neither the name of Willow Garage, Inc. nor the names of its
00018  *     contributors may be used to endorse or promote products derived
00019  *     from this software without specific prior written permission.
00020  *
00021  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00022  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00023  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00024  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00025  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00026  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00027  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00028  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00029  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00030  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00031  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00032  *  POSSIBILITY OF SUCH DAMAGE.
00033  *
00034  */
00035 
00036 /* \author Bastian Steder */
00037 
00038 /* ---[ */
00039 
00040 
00041 #include <iostream>
00042 using namespace std;
00043 
00044 #include <narf_recognition/object_recognition_helper.h>
00045 #include <boost/thread/thread.hpp>
00046 #include "pcl/console/parse.h"
00047 
00048 #include "pcl/visualization/range_image_visualizer.h"
00049 #include "pcl/visualization/pcl_visualizer.h"
00050 
00051 #include "Eigen/Geometry"
00052 #include "pcl/point_cloud.h"
00053 #include "pcl/point_types.h"
00054 
00055 #include "pcl/io/pcd_io.h"
00056 #include "pcl/range_image/range_image.h"
00057 #include "pcl/features/narf.h"
00058 //#include "object_recognition/object_model_list.h"
00059 #include "pcl/common/point_correspondence.h"
00060 #include "pcl/common/poses_from_matches.h"
00061 #include "pcl/features/range_image_border_extractor.h"
00062 #include "pcl/keypoints/narf_keypoint.h"
00063 #include "pcl/range_image/range_image_planar.h"
00064 #include "sensor_msgs/Image.h"
00065 #include "pcl/common/file_io.h"
00066 //#include "stereo_msgs/DisparityImage.h"
00067 //#include "sensor_msgs/CameraInfo.h"
00068 
00069 #include "pcl/common/common_headers.h"
00070 
00071 using namespace pcl;
00072 using namespace pcl::visualization;
00073 
00074 ObjectRecognitionHelper object_recognition_helper;
00075 ObjectRecognitionHelper::Parameters& orh_params = object_recognition_helper.getParameters ();
00076 FalsePositivesFilter& false_positives_filter = object_recognition_helper.getFalsePositivesFilter ();
00077 FalsePositivesFilter::Parameters& fpf_params = false_positives_filter.getParameters ();
00078 
00079 int& max_no_of_threads = orh_params.max_no_of_threads;
00080 bool& use_rotation_invariance = orh_params.use_rotation_invariance;
00081 float& angular_resolution = orh_params.angular_resolution;
00082 RangeImage::CoordinateFrame& coordinate_frame = orh_params.coordinate_frame;
00083 float& max_descriptor_distance = orh_params.max_descriptor_distance;
00084 bool& set_unseen_to_max_range = orh_params.set_unseen_to_max_range;
00085 bool single_view_model = orh_params.single_view_model;
00086 
00087 float noise_level_factor = 0.1;    // Factor regarding average model radius
00088 float support_size_factor = 0.5f;  // Factor regarding average model radius
00089 
00090 bool show_model_views = 1;
00091 bool show_view_simulation_results = 0;
00092 bool show_validation_points = 0;
00093 bool show_found_objects = 1;
00094 bool show_feature_match_positions = 0;
00095 bool show_interest_points = 0;
00096 bool print_timings = true;
00097 
00098 void printUsage (const char* progName)
00099 {
00100   cout << "\n\nUsage: "<<progName<<" [options] [scene.pcd] <model.pcl> [model_2.pcl] ... [model_n.pcl]\n\n"
00101        << "Options:\n"
00102        << "-------------------------------------------\n"
00103        //<< "-s           source used to create the scene range image:\n"
00104        //<<              "  -1 from disk (default), 0 PointCloud2 msg, 1 depth image msg, 2 disparity image msg.\n"
00105        //<<              "  don't forget to set input:=... for options [0,1,2], input2:=<camera_info> for option 1\n"
00106        //<<              "  or to set the scene point cloud for option -1.\n"
00107        << "-i <bool>    do ICP (default "<< (int)fpf_params.do_icp<<").\n"
00108        << "-r <float>   angular resolution in degrees (default "<<rad2deg (angular_resolution)<<")\n"
00109        << "-v <float>   min validation point score (default "<<fpf_params.min_validation_point_score<<")\n"
00110        << "-c <int>     coordinate frame (default "<<coordinate_frame<<")\n"
00111        << "-b           activate single view model, which tells the system that the given model point cloud\n"
00112        <<             " is not of the complete object, but only one view of it - no view sampling will be performed.\n"
00113        << "-o           use orientation invariant version\n"
00114        << "-f <float>   do view simulation filtering with this minimum score.\n"
00115        << "-x <float>   use maximum range for found objects.\n"
00116        << "-m           set unseen pixels to max range\n"
00117        << "-d <float>   set the maximum descriptor distance (default: "<<max_descriptor_distance<<")\n"
00118        << "-t <int>     set the maximum number of threads for parallelization (default: "<<max_no_of_threads<<")\n"
00119        << "-h           this help\n"
00120        << "\n\n";
00121 }
00122 
00123 //sensor_msgs::PointCloud2ConstPtr cloud_msg;
00124 //PointCloud<PointXYZ> point_cloud;
00125 sensor_msgs::ImageConstPtr depth_image_msg;
00126 //stereo_msgs::DisparityImageConstPtr disparity_image_msg;
00127 //sensor_msgs::CameraInfoConstPtr camera_info_msg;
00128 boost::mutex m;
00129 bool got_new_data=false;
00130 //void
00131   //disparity_image_msg_cb (const stereo_msgs::DisparityImageConstPtr& msg)
00132 //{
00133   //m.lock ();
00134   //disparity_image_msg = msg;
00135   //m.unlock ();
00136   //got_new_data = true;
00137 //}
00138 
00139 //void
00140   //depth_image_msg_cb (const sensor_msgs::ImageConstPtr& msg)
00141 //{
00142   //m.lock ();
00143   //std::cout << "Received depth image of size "<<msg->width<<"x"<<msg->height<<".\n";
00144   //depth_image_msg = msg;
00145   //m.unlock ();
00146   //got_new_data = camera_info_msg;  // Only accept message if we already have the camera info
00147 //}
00148 
00149 //void
00150   //cloud_msg_cb (const sensor_msgs::PointCloud2ConstPtr& msg)
00151 //{
00152   //m.lock ();
00153   //cloud_msg = msg;
00154   //far_ranges.points.clear ();
00155   //m.unlock ();
00156   //got_new_data = true;
00157 //}
00158 
00159 //void
00160   //camera_info_msg_cb (const sensor_msgs::CameraInfoConstPtr& msg)
00161 //{
00162   //m.lock ();
00163   //std::cout << "Received camera info: " << "width="<<msg->width<<", height="<<msg->height<<", "
00164                                         //<< "center_x="<<msg->P[2]<<", center_y="<<msg->P[6]<<", "
00165                                        //<<"fl_x="<<msg->P[0]<<", fl_y="<<msg->P[5]<<".\n";
00166   //camera_info_msg = msg;
00167   //m.unlock ();
00168 //}
00169 
00170 int main (int argc, char** argv)
00171 {
00172   // --------------------------------------
00173   // -----Parse Command Line Arguments-----
00174   // --------------------------------------
00175   int range_image_source = -1;
00176   orh_params.view_sampling_first_layer = 0;
00177   orh_params.view_sampling_last_layer = 0;
00178   
00179   // Read command line arguments.
00180   for (char c; (c = getopt (argc, argv, "bmx:i:f:hr:c:v:od:t:")) != -1; ) {
00181     switch (c) {
00182       case 't':
00183         max_no_of_threads = strtol (optarg, NULL, 0);
00184         cout << PVARN (max_no_of_threads);
00185         break;
00186       case 'i':
00187         fpf_params.do_icp = (bool)strtol (optarg, NULL, 0);
00188         cout << "Will "<< (fpf_params.do_icp?"":"not ")<<"use ICP.\n";
00189         break;
00190       //case 's':
00191         //range_image_source = (bool)strtol (optarg, NULL, 0);
00192         //cout << PVARN (range_image_source);
00193         //break;
00194       case 'b':
00195       {
00196         single_view_model = true;
00197         cout << "Setting single_view_model to true.\n";
00198         break;
00199       }
00200       case 'o':
00201       {
00202         use_rotation_invariance = true;
00203         cout << "Using rotational invariant version of the features.\n";
00204         break;
00205       }
00206       case 'r':
00207       {
00208         angular_resolution = deg2rad (strtod (optarg, NULL));
00209         if (angular_resolution <= 0.0)
00210           cerr << "Angular resolution "<<optarg<<" invalid.\n";
00211         else
00212           cout << "Setting angular resolution to "<<rad2deg (angular_resolution)<<".\n";
00213         break;
00214       }
00215       case 'v':
00216       {
00217         fpf_params.do_validation_point_filtering = true;
00218         fpf_params.min_validation_point_score = strtod (optarg, NULL);
00219         cout << PVARN (fpf_params.min_validation_point_score);
00220         break;
00221       }
00222       case 'x':
00223       {
00224         fpf_params.maximum_range_for_found_objects = strtod (optarg, NULL);
00225         cout << PVARN (fpf_params.maximum_range_for_found_objects);
00226         break;
00227       }
00228       case 'd':
00229       {
00230         max_descriptor_distance = strtod (optarg, NULL);
00231         cout << PVARN (max_descriptor_distance);
00232         break;
00233       }
00234       case 'f':
00235       {
00236         fpf_params.do_view_simulation_filtering = true;
00237         fpf_params.min_view_simulation_score = strtod (optarg, NULL);
00238         cout << PVARN (fpf_params.min_view_simulation_score);
00239         break;
00240       }
00241 
00242       case 'c':
00243       {
00244         coordinate_frame = (RangeImage::CoordinateFrame)strtol (optarg, NULL, 0);
00245         cout << "Using coordinate frame "<<coordinate_frame<<".\n";
00246         break;
00247       }
00248       case 'm':
00249       {
00250         set_unseen_to_max_range = true;
00251         cout << "Unseen pixels will be considered max ranges.\n";
00252         break;
00253       }
00254       case 'h':
00255       default:
00256         printUsage (argv[0]);
00257         exit (0);
00258     }
00259   }
00260   
00261   vector<RangeImage> view_simulation_results;
00262   if (show_view_simulation_results)
00263     false_positives_filter.setViewSimulationImagesPtr (&view_simulation_results);
00264   
00265   // -----------------------
00266   // -----Read pcd file-----
00267   // -----------------------
00268   //int model_filename_idx = (live_scenes ? optind : optind+1);
00269   int model_filename_idx = optind+1;
00270   if (model_filename_idx >= argc)
00271   {
00272     cout << "\nNo *.pcd file for model given.\n\n";
00273     exit (0);
00274   }
00275   
00276   vector<string> model_filenames;
00277   for (; model_filename_idx<argc; ++model_filename_idx)
00278     model_filenames.push_back (argv[model_filename_idx]);
00279   ObjectModelList& object_models = object_recognition_helper.getIntermediateElements ().object_models;
00280   if (!object_recognition_helper.addObjectModelsFromPcdFiles (model_filenames))
00281   {
00282     printUsage (argv[0]);
00283     exit (1);
00284   }
00285   
00286   float average_model_radius = object_models.getAverageModelRadius ();
00287   float support_size = support_size_factor * average_model_radius;
00288   float noise_level = noise_level_factor * average_model_radius;
00289   cout << "\n-----\n"
00290        << "The average object model radius is "<<average_model_radius<<"m.\n"
00291        << "  => using support size "<<support_size<<"m.\n"
00292        << "  => using noise level "<<noise_level<<"m.\n"
00293        << "-----\n\n";
00294   orh_params.support_size = support_size;
00295   orh_params.noise_level = noise_level;
00296   
00297   object_recognition_helper.extractModelFeatures ();
00298   std::vector<Narf*>& database_features = object_recognition_helper.getIntermediateElements ().database_features;
00299   std::vector<ObjectModelList::FeatureSource>& database_feature_sources = 
00300     object_recognition_helper.getIntermediateElements ().database_feature_sources;
00301   
00302   // ------------------------------------------------------------------
00303   
00304   if (optind >= argc)
00305   {
00306     cout << "\nNo *.pcd file for scene given.\n\n";
00307     printUsage (argv[0]);
00308     exit (0);
00309   }
00310   std::string scene_filename = argv[optind];
00311   
00312   if (!object_recognition_helper.createRangeImageFromPcdFile (scene_filename))
00313   {
00314     cerr << "\n-----\nRange image creation failed.\n\n";
00315     exit (1);
00316   }
00317   got_new_data = true;
00318   
00319   vector<string> point_clouds_in_viewer;
00320   RangeImageVisualizer scene_range_image_widget ("scene range image");
00321   vector<RangeImageVisualizer*> view_simulation_result_widgets;
00322   vector<RangeImageVisualizer*> interest_points_widgets;
00323   vector<RangeImageVisualizer*> range_image_widgets;
00324   
00325   PCLVisualizer viewer ("3D Viewer");
00326   viewer.setBackgroundColor (1, 1, 1);
00327 
00328   while (!viewer.wasStopped ()) // && (node_handle==NULL || node_handle->ok ()))
00329   {
00330     viewer.spinOnce (100);
00331     scene_range_image_widget.spinOnce ();
00332     for (size_t i=0; i<view_simulation_result_widgets.size (); ++i)
00333       view_simulation_result_widgets[i]->spinOnce ();
00334     for (size_t i=0; i<interest_points_widgets.size (); ++i)
00335       interest_points_widgets[i]->spinOnce ();
00336     for (size_t i=0; i<range_image_widgets.size (); ++i)
00337       range_image_widgets[i]->spinOnce ();
00338 
00339 
00340     pcl_sleep (0.01);
00341 
00342     if (!got_new_data)
00343       continue;
00344     got_new_data = false;
00345     cout << "Got new data.\n";
00346     
00347     object_recognition_helper.extractSceneFeatures ();
00348     object_recognition_helper.extractPoseEstimates ();
00349     std::vector<PosesFromMatches::PoseEstimatesVector>& pose_estimates_per_model = 
00350       object_recognition_helper.filterFalsePositives ();
00351     
00352     object_recognition_helper.printTimings ();
00353     
00354     
00356     
00357     NarfKeypoint& interest_point_detector_scene =
00358       object_recognition_helper.getIntermediateElements ().interest_point_detector_scene;
00359     
00360     boost::shared_ptr<RangeImage>& scene_range_image_ptr =
00361       object_recognition_helper.getIntermediateElements ().scene_range_image_ptr;
00362     RangeImage& scene_range_image = *scene_range_image_ptr;   
00363    
00364     
00365     // Visualize view simulation result
00366     while (!view_simulation_result_widgets.empty ())
00367     {
00368       delete view_simulation_result_widgets.back ();
00369       view_simulation_result_widgets.pop_back ();
00370     }
00371     for (unsigned int i=0; i<view_simulation_results.size (); i+=4)
00372     {
00373       float min_range, max_range;
00374       view_simulation_results[i].getMinMaxRanges (min_range, max_range);
00375       //cout << PVARC (min_range)<<PVARN (max_range);
00376       stringstream ss;  ss << setfill ('0') << setw (3) << (i/4)+1;
00377       view_simulation_result_widgets.push_back (RangeImageVisualizer::getRangeImageWidget (view_simulation_results[i+1],
00378                                                min_range, max_range, true, ss.str ()+".1: real view"));
00379       view_simulation_result_widgets.push_back (RangeImageVisualizer::getRangeImageWidget (view_simulation_results[i],
00380                                                min_range, max_range, true, ss.str ()+".2: simulated view"));
00381       view_simulation_result_widgets.push_back (RangeImageVisualizer::getRangeImageWidget (view_simulation_results[i+2],
00382                                                0.0f, 1.0f, true, ss.str ()+".3: scores"));
00383       if (!view_simulation_results[i+3].points.empty ())
00384         view_simulation_result_widgets.push_back (RangeImageVisualizer::getRangeImageWidget (view_simulation_results[i+3],
00385                                                  0.0f, 1.0f, true, ss.str ()+".4: scores_normals"));
00386     }
00387     
00388     vector<boost::shared_ptr<ObjectModel::PointCloudType> > objects_in_scene;  // For visualization
00389     for (unsigned int model_idx=0; model_idx<object_models.size (); ++model_idx)
00390     {
00391       PosesFromMatches::PoseEstimatesVector& pose_estimates = pose_estimates_per_model[model_idx];
00392       ObjectModel& object_model = *object_models[model_idx];
00393       for (unsigned int pose_estimate_idx=0; pose_estimate_idx<pose_estimates.size () && show_found_objects;
00394                                                                                         ++pose_estimate_idx)
00395       {
00396         const PosesFromMatches::PoseEstimate& pose_estimate = pose_estimates[pose_estimate_idx];
00397         objects_in_scene.push_back (boost::shared_ptr<ObjectModel::PointCloudType> (new ObjectModel::PointCloudType));
00398         object_model.getTransformedPointCloud (pose_estimate.transformation, *objects_in_scene.back ());
00399       }
00400     }
00401     
00402     std::vector<std::vector<PointCorrespondences6DVector > >& feature_matches =
00403       object_recognition_helper.getIntermediateElements ().feature_matches;
00404     boost::shared_ptr<PointCloud<PointXYZ> > feature_match_positions_ptr (new PointCloud<PointXYZ>);
00405     PointCloud<PointXYZ>& feature_match_positions = *feature_match_positions_ptr;
00406     if (show_feature_match_positions)
00407     {
00408       feature_match_positions.points.clear ();
00409       for (unsigned int model_idx=0; model_idx<feature_matches.size (); ++model_idx)
00410       {
00411         vector<PointCorrespondences6DVector>& model_feature_matches = feature_matches[model_idx];
00412         for (unsigned int view_idx=0; view_idx<model_feature_matches.size (); ++view_idx)
00413         {
00414           const PointCorrespondences6DVector& view_feature_matches = model_feature_matches[view_idx];
00415           for (unsigned int match_idx=0; match_idx<view_feature_matches.size (); ++match_idx)
00416           {
00417             const PointCorrespondence6D& match = view_feature_matches[match_idx];
00418             PointXYZ match_position;
00419             match_position.x=match.point1[0];
00420             match_position.y=match.point1[1];
00421             match_position.z=match.point1[2];
00422             feature_match_positions.points.push_back (match_position);
00423           }
00424         }
00425       }
00426     }
00427     
00428     ObjectModel& first_object_model = *object_models[0];
00429     while (!interest_points_widgets.empty ())
00430     {
00431       delete interest_points_widgets.back ();
00432       interest_points_widgets.pop_back ();
00433     }
00434     if (show_interest_points)
00435     {
00436       interest_points_widgets.push_back (RangeImageVisualizer::getInterestPointsWidget (scene_range_image,
00437                                         interest_point_detector_scene.getInterestImage (), 0.0f, 1.0f,
00438                                         interest_point_detector_scene.getInterestPoints (), "Scene interest points"));
00439       const std::vector<RangeImage, Eigen::aligned_allocator<RangeImage> >& model_views = first_object_model.getViews ();
00440       NarfKeypoint& interest_point_detector_model =
00441         object_recognition_helper.getIntermediateElements ().interest_point_detector_model;
00442       interest_point_detector_model.getParameters ().support_size = support_size;
00443       for (unsigned int model_view_idx=0; model_view_idx<model_views.size (); ++model_view_idx)
00444       {
00445         const RangeImage& model_view = model_views[model_view_idx];
00446         interest_point_detector_model.setRangeImage (&model_view);
00447         //RangeImageVisualizer* surface_change_scores_widget = new RangeImageVisualizer;
00448         //surface_change_scores_widget->setFloatImage (border_extractor_model.getSurfaceChangeScores (),
00449         //                                            model_view.width, model_view.height,
00450         //                                            "model view surface_change_scores", 0.0f, 1.0f, true);
00451         //interest_points_widgets.push_back (surface_change_scores_widget);
00452         
00453         interest_points_widgets.push_back (RangeImageVisualizer::getInterestPointsWidget (model_view,
00454                                           interest_point_detector_model.getInterestImage (), 0.0f, 1.0f,
00455                                           interest_point_detector_model.getInterestPoints (), "Model interest points"));
00456 
00457         interest_point_detector_model.setRangeImage (NULL);
00458       }
00459     }
00460     
00461     //----------------------
00462     // -----Show images-----
00463     // ---------------------
00464     // Scene:
00465     scene_range_image_widget.showRangeImage (scene_range_image);
00466     // Object views:
00467     const std::vector<RangeImage, Eigen::aligned_allocator<RangeImage> >& model_views = first_object_model.getViews ();
00468     for (unsigned int i=0; i<model_views.size () && show_model_views; ++i)
00469     {
00470       const RangeImage& model_view = model_views[i];
00471       range_image_widgets.push_back (RangeImageVisualizer::getRangeImageWidget (model_view, -INFINITY, INFINITY, true, "Model view"));
00472       if (show_validation_points)
00473       {
00474         const ObjectModel::ValidationPoints& validation_points = first_object_model.getValidationPoints ()[i];
00475         for (unsigned int j=0; j<validation_points.surface.size (); ++j)
00476         {
00477           float image_x, image_y;
00478           model_view.getImagePoint (validation_points.surface[j], image_x, image_y);
00479           range_image_widgets.back ()->markPoint (image_x, image_y, pcl::visualization::Vector3ub (0,255,0));
00480         }
00481         for (unsigned int j=0; j<validation_points.border.size (); ++j)
00482         {
00483           float image_x, image_y;
00484           model_view.getImagePoint (validation_points.border[j], image_x, image_y);
00485           range_image_widgets.back ()->markPoint (image_x, image_y, pcl::visualization::Vector3ub (255,0,0));
00486         }
00487       }
00488     }
00489     //---------------------------
00490     
00491     for (unsigned int i=0; i<point_clouds_in_viewer.size (); ++i)
00492       viewer.removePointCloud (point_clouds_in_viewer[i]);
00493     point_clouds_in_viewer.clear ();
00494     
00495     //viewer.addCoordinateSystem (1.0f);
00496     PointCloudColorHandlerCustom<pcl::PointWithRange> color_handler_cloud (scene_range_image_ptr, 0, 0, 0);
00497     point_clouds_in_viewer.push_back ("scene_range_image");
00498     viewer.addPointCloud<PointWithRange> (scene_range_image_ptr, color_handler_cloud, point_clouds_in_viewer.back ());
00499     point_clouds_in_viewer.push_back ("feature_match_positions");
00500     viewer.addPointCloud (feature_match_positions_ptr, point_clouds_in_viewer.back ());
00501     viewer.setPointCloudRenderingProperties (PCL_VISUALIZER_POINT_SIZE, 7, point_clouds_in_viewer.back ());
00502     for (unsigned int i=0; i<objects_in_scene.size (); ++i)
00503     {
00504       stringstream ss;
00505       ss<<"Found object "<<i;
00506       point_clouds_in_viewer.push_back (ss.str ());
00507       PointCloudColorHandlerRandom<ObjectModel::PointCloudType::PointType> color_handler (objects_in_scene[i]);
00508       viewer.addPointCloud<ObjectModel::PointCloudType::PointType> (objects_in_scene[i], color_handler, point_clouds_in_viewer.back ());
00509     }
00510   }
00511 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Friends Defines


narf_recognition
Author(s): Juergen Hess
autogenerated on Wed Dec 26 2012 16:37:09