object_recognition_helper.cpp
Go to the documentation of this file.
00001 /* \author Bastian Steder */
00002 
00003 #include <narf_recognition/object_recognition_helper.h>
00004 
00005 pcl::ObjectRecognitionHelper::ObjectRecognitionHelper()
00006 {
00007 }
00008 
00009 pcl::ObjectRecognitionHelper::~ObjectRecognitionHelper()
00010 {
00011   clearObjectModels();
00012   clearModelFeatures();
00013   clearSceneFeatures();
00014 }
00015 
00016 void
00017 pcl::ObjectRecognitionHelper::clearSceneFeatures()
00018 {
00019   while (!intermediate_elements_.scene_features.empty())
00020   {
00021     delete intermediate_elements_.scene_features.back();
00022     intermediate_elements_.scene_features.pop_back();
00023   }
00024 }
00025 
00026 void
00027 pcl::ObjectRecognitionHelper::clearObjectModels()
00028 {
00029   while (!intermediate_elements_.object_models.empty())
00030   {
00031     delete intermediate_elements_.object_models.back();
00032     intermediate_elements_.object_models.pop_back();
00033   }
00034 }
00035 
00036 void
00037 pcl::ObjectRecognitionHelper::clearModelFeatures()
00038 {
00039   while (!intermediate_elements_.database_features.empty())
00040   {
00041     delete intermediate_elements_.database_features.back();
00042     intermediate_elements_.database_features.pop_back();
00043   }
00044   intermediate_elements_.database_feature_sources.clear();
00045 }
00046 
00047 bool
00048 pcl::ObjectRecognitionHelper::addObjectModelsFromPcdFiles(std::vector<std::string>& file_names)
00049 {
00050   ObjectModelList& object_models = intermediate_elements_.object_models;
00051   for (int model_filename_idx=0; model_filename_idx<file_names.size(); ++model_filename_idx)
00052   {
00053     const std::string& model_filename = file_names[model_filename_idx];
00054     if (parameters_.verbose)
00055       std::cout << "\nLoading model point cloud \""<<model_filename<<"\"...\n";
00056     object_models.push_back(new ObjectModel);
00057     ObjectModel& object_model = *object_models.back();
00058     object_model.setName(getFilenameWithoutExtension(getFilenameWithoutPath(model_filename)));
00059     sensor_msgs::PointCloud2 point_cloud_msg;
00060     Eigen::Vector4f model_sensor_origin;  Eigen::Quaternionf model_sensor_orientation;
00061     if (pcl::io::loadPCDFile(model_filename, point_cloud_msg, model_sensor_origin, model_sensor_orientation) == -1)
00062     {
00063       std::cerr << "Was not able to open file \""<<model_filename<<"\".\n";
00064       return false;
00065     }
00066     fromROSMsg(point_cloud_msg, object_model.getPointCloud());
00067     object_model.getPointCloud().sensor_origin_      = model_sensor_origin,
00068     object_model.getPointCloud().sensor_orientation_ = model_sensor_orientation;
00069     Eigen::Affine3f model_sensor_pose(Eigen::Affine3f::Identity());
00070     model_sensor_pose = Eigen::Affine3f(Eigen::Translation3f(object_model.getPointCloud().sensor_origin_[0],
00071                                                              object_model.getPointCloud().sensor_origin_[1],
00072                                                              object_model.getPointCloud().sensor_origin_[2])) *
00073                         Eigen::Affine3f(object_model.getPointCloud().sensor_orientation_);
00074     //if (pcl::getFieldIndex(point_cloud_msg, "vp_x")>=0)
00075     //{
00077       //PointCloud<PointWithViewpoint> tmp_pc;  fromROSMsg(point_cloud_msg, tmp_pc);
00078       //Eigen::Vector3f average_viewpoint = RangeImage::getAverageViewPoint(tmp_pc);
00079       //model_sensor_pose = Eigen::Translation3f(average_viewpoint) * model_sensor_pose;
00080     //}
00081     if (parameters_.verbose)
00082       std::cout << "Done.\n";
00083     if (parameters_.verbose)
00084       std::cout << "Creating model...\n";
00085     object_model.updateCenterAndRadius();
00086     
00087     if (parameters_.single_view_model)
00088       object_model.addView(model_sensor_pose, deg2rad(0.5f));
00089     else if (!parameters_.sample_views_3d)
00090       object_model.sampleViews2D(parameters_.no_of_model_views,
00091                                  parameters_.view_sampling_first_layer, parameters_.view_sampling_last_layer);
00092     else
00093       object_model.sampleViews3D(parameters_.view_sampling_3D_resolution);
00094     
00095     object_model.updateValidationPoints(parameters_.no_of_surface_validation_points,
00096                                         parameters_.no_of_border_validation_points);
00097     if (parameters_.verbose)
00098       std::cout << "Done.\n\n";
00099   }
00100   return true;
00101 }
00102 
00103 bool
00104 pcl::ObjectRecognitionHelper::createRangeImageFromPcdFile(const std::string& file_name)
00105 {
00106   intermediate_elements_.scene_range_image_ptr = intermediate_elements_.spherical_scene_range_image_ptr;
00107   if (parameters_.verbose)
00108     std::cout << "\nLoading scene point cloud \""<<file_name<<"\"...\n";
00109   sensor_msgs::PointCloud2 cloud_msg;
00110   Eigen::Vector4f cloud_msg_sensor_origin;  Eigen::Quaternionf cloud_msg_sensor_orientation;
00111   PointCloud<PointXYZ> point_cloud;
00112   PointCloud<PointWithViewpoint> far_ranges;
00113   if (pcl::io::loadPCDFile(file_name.c_str(), cloud_msg,
00114                            cloud_msg_sensor_origin, cloud_msg_sensor_orientation) == -1)
00115   {
00116     std::cerr << "Was not able to open file \""<<file_name<<"\".\n";
00117     return false;
00118   }
00119   std::string far_ranges_file_name = getFilenameWithoutExtension (file_name)+"_far_ranges.pcd";
00120   if (pcl::io::loadPCDFile(far_ranges_file_name.c_str(), far_ranges) == -1)
00121   {
00122     //if (parameters_.verbose)
00123       //std::cout << "Far ranges file \""<<far_ranges_file_name<<"\" does not exists.\n";
00124   }
00125   fromROSMsg(cloud_msg, point_cloud);
00126   point_cloud.sensor_origin_ = cloud_msg_sensor_origin;  point_cloud.sensor_orientation_ = cloud_msg_sensor_orientation;
00127   if (far_ranges.points.empty())
00128   {
00129     if (parameters_.verbose)
00130       std::cout << "\n----------\nNo extra far ranges provided. Trying to extract them from the msg...\n";
00131   }
00132   RangeImage::extractFarRanges(cloud_msg, far_ranges);
00133   if (far_ranges.points.empty()) {
00134     std::cout << "No far ranges found. You might want to use -m as a command line parameter to consider "
00135          << "all unseen areas as far ranges.\n----------\n\n";
00136   }
00137   return createRangeImageFromPointCloud(point_cloud, &far_ranges);
00138 }
00139 
00140 
00141 bool
00142 pcl::ObjectRecognitionHelper::createRangeImageFromPointCloud(const PointCloud<PointXYZ>& point_cloud,
00143                                                              const PointCloud<PointWithViewpoint>* far_ranges)
00144 {
00145         //std::cout << "creating range image from PointCloud\n";
00146   //std::cout << PVARN(far_ranges.points.size());
00147   Eigen::Affine3f sensor_pose(Eigen::Affine3f::Identity());
00148   //if (pcl::getFieldIndex(*cloud_msg, "vp_x")>=0)
00149   //{
00151     //PointCloud<PointWithViewpoint> tmp_pc;  fromROSMsg(*cloud_msg, tmp_pc);
00152     //Eigen::Vector3f average_viewpoint = RangeImage::getAverageViewPoint(tmp_pc);
00153     //sensor_pose = Eigen::Translation3f(average_viewpoint) * sensor_pose;
00154   //}
00155   sensor_pose = Eigen::Affine3f(Eigen::Translation3f(
00156                       point_cloud.sensor_origin_[0], point_cloud.sensor_origin_[1], point_cloud.sensor_origin_[2])) *
00157                       point_cloud.sensor_orientation_;
00158   timings_.scene_range_image_creation = -getTime();
00159   RangeImage& range_image = *intermediate_elements_.scene_range_image_ptr;
00160   range_image.createFromPointCloud(point_cloud, parameters_.angular_resolution, deg2rad(360.0f), deg2rad(180.0f),
00161                                    sensor_pose, parameters_.coordinate_frame, parameters_.noise_level, 0.0f, 1);
00162   if (far_ranges != NULL)
00163     range_image.integrateFarRanges(*far_ranges);
00164   if (parameters_.set_unseen_to_max_range)
00165     range_image.setUnseenToMaxRange();
00166   timings_.scene_range_image_creation += getTime();
00167   
00168   intermediate_elements_.transformation_into_original_coordinate_frame = range_image.getTransformationToWorldSystem();
00169   range_image.change3dPointsToLocalCoordinateFrame();  // HACK to make the non-rotational-invariant version work if
00170                                                        // the scene coordinate frame != model coordinate frame
00171  // std::cout << "done. \n";
00172   return true;
00173 }
00174 
00175 //bool
00176 //pcl::ObjectRecognitionHelper::createRangeImageFromOpenniDepthImage(const openni_wrapper::DepthImage& depth_image)
00177 //{
00178 //  //std::cout << PVARN(depth_image.getShadowValue())<<" ";
00179 //  //std::cout << PVARN(depth_image.getNoSampleValue())<<" ";
00180 //  //for (int i=0; i<int(depth_image.getWidth()*depth_image.getHeight()); ++i)
00181 //    //std::cout << depth_image.getDepthMetaData().Data()[i]<<" ";
00182 //
00183 //  intermediate_elements_.scene_range_image_ptr = intermediate_elements_.planar_scene_range_image_ptr;
00184 //  if (parameters_.verbose)
00185 //    std::cout << "\nCreating RangeImagePlanar from openni depth map...\n";
00186 //  timings_.scene_range_image_creation = -getTime();
00187 //  RangeImagePlanar& range_image = *intermediate_elements_.planar_scene_range_image_ptr;
00188 //  range_image.setDepthImage (depth_image.getDepthMetaData().Data(), depth_image.getWidth(), depth_image.getHeight(),
00189 //                             depth_image.getWidth()/2, depth_image.getHeight()/2, depth_image.getFocalLength(),
00190 //                             depth_image.getFocalLength(), parameters_.angular_resolution);
00191 //  if (parameters_.set_unseen_to_max_range)
00192 //    range_image.setUnseenToMaxRange();
00193 //  timings_.scene_range_image_creation += getTime();
00194 //  return !intermediate_elements_.scene_range_image_ptr->points.empty();
00195 //}
00196 
00197 bool
00198 pcl::ObjectRecognitionHelper::extractSceneFeatures()
00199 {
00200   NarfKeypoint& interest_point_detector = intermediate_elements_.interest_point_detector_scene;
00201   interest_point_detector.setRangeImage(&*intermediate_elements_.scene_range_image_ptr);
00202   interest_point_detector.getParameters().support_size = parameters_.support_size;
00203   interest_point_detector.getParameters().max_no_of_threads = parameters_.max_no_of_threads;
00204   
00205   timings_.scene_interest_point_extraction = -getTime();
00206   const PointCloud<InterestPoint>& scene_interest_points = interest_point_detector.getInterestPoints();
00207   timings_.scene_interest_point_extraction += getTime();
00208   if (parameters_.verbose)
00209     std::cout << "Extracted "<<scene_interest_points.points.size()<<" interest points from scene.\n";
00210   
00211   clearSceneFeatures();
00212   
00213   Narf::max_no_of_threads = parameters_.max_no_of_threads;
00214   std::vector<Narf*>& scene_features = intermediate_elements_.scene_features;
00215   timings_.scene_feature_extraction = -getTime();
00216   Narf::extractForInterestPoints(*intermediate_elements_.scene_range_image_ptr, scene_interest_points,
00217                                  parameters_.feature_descriptor_size, parameters_.support_size,
00218                                  parameters_.use_rotation_invariance, scene_features);
00219   timings_.scene_feature_extraction += getTime();
00220   Narf::max_no_of_threads = 1;
00221   
00222   if (parameters_.verbose)
00223     std::cout << "Extracted "<<scene_features.size()<<" scene features from "
00224               << scene_interest_points.points.size()<<" interest points.\n";
00225   
00226   return true;
00227 }
00228 
00229 bool
00230 pcl::ObjectRecognitionHelper::extractModelFeatures()
00231 {
00232   clearModelFeatures();
00233   NarfKeypoint& interest_point_detector = intermediate_elements_.interest_point_detector_model;
00234   interest_point_detector.getParameters().support_size = parameters_.support_size;
00235   // We extract additional interest points on the model to ensure matches
00236   interest_point_detector.getParameters().distance_for_additional_points = 0.1f;
00237   interest_point_detector.getParameters().max_no_of_threads = parameters_.max_no_of_threads;
00238   
00239   Narf::max_no_of_threads = parameters_.max_no_of_threads;
00240   std::vector<std::vector<std::vector<Narf*>*>*> feature_database;
00241   //object_models.extractNARFsForCompleteSurface(feature_descriptor_size, support_size,
00242   //                                             use_rotation_invariance, feature_database);
00243   
00244   intermediate_elements_.object_models.extractNARFsForInterestPoints(
00245       parameters_.feature_descriptor_size, parameters_.support_size, parameters_.use_rotation_invariance,
00246       intermediate_elements_.interest_point_detector_model, feature_database);
00247   std::vector<Narf*>& database_features = intermediate_elements_.database_features;
00248   std::vector<ObjectModelList::FeatureSource>& database_feature_sources =
00249     intermediate_elements_.database_feature_sources;
00250   
00251   ObjectModelList::getMergedFeatureList(feature_database, database_features, database_feature_sources);
00253   if (parameters_.use_kdtree)
00254     ObjectModelList::buildKdTree(database_features, intermediate_elements_.database_features_kdtree);
00255   Narf::max_no_of_threads = 1;
00256   
00257   return true;
00258 }
00259 
00260 
00261 void
00262 pcl::ObjectRecognitionHelper::extractPoseEstimates()
00263 {
00264   float min_distance_to_same_scene_feature = 0.25f*parameters_.support_size;
00265   timings_.feature_matching = -getTime();
00266   std::vector<std::vector<PointCorrespondences6DVector > >& feature_matches = intermediate_elements_.feature_matches;
00267   feature_matches.clear();
00268   ObjectModelList& object_models = intermediate_elements_.object_models;
00269   object_models.getFeatureMatches(
00270     intermediate_elements_.scene_features,
00271     intermediate_elements_.database_features, intermediate_elements_.database_feature_sources,
00272     parameters_.max_descriptor_distance, min_distance_to_same_scene_feature, feature_matches,
00273     (parameters_.use_kdtree ? &intermediate_elements_.database_features_kdtree : NULL),
00274     parameters_.max_descriptor_distance*parameters_.feature_descriptor_size);
00275   timings_.feature_matching += getTime();
00276   
00277   PosesFromMatches pose_estimation;
00278   std::vector<PosesFromMatches::PoseEstimatesVector>& pose_estimates_per_model =
00279     intermediate_elements_.pose_estimates_per_model;
00280   pose_estimates_per_model.clear();
00281   pose_estimates_per_model.resize(object_models.size());
00282   
00283   timings_.initial_pose_estimation = -getTime();
00284   for (unsigned int model_idx=0; model_idx<feature_matches.size(); ++model_idx)
00285   {
00286     PosesFromMatches::PoseEstimatesVector& pose_estimates = pose_estimates_per_model[model_idx];
00287     std::vector<PointCorrespondences6DVector >& model_feature_matches = feature_matches[model_idx];
00288     for (unsigned int view_idx=0; view_idx<model_feature_matches.size(); ++view_idx)
00289     {
00290       PointCorrespondences6DVector& view_feature_matches = model_feature_matches[view_idx];
00291       std::sort(view_feature_matches.begin(), view_feature_matches.end(), isBetterCorrespondence);
00292       pose_estimation.estimatePosesUsing1Correspondence(view_feature_matches, -1, pose_estimates);
00293       //pose_estimation.estimatePosesUsing2Correspondences(view_feature_matches, 100000, 500, pose_estimates);
00294       //pose_estimation.estimatePosesUsing3Correspondences(view_feature_matches, 100000, 500, pose_estimates);
00295     }
00296   }
00297   timings_.initial_pose_estimation += getTime();
00298 }
00299 
00300 std::vector<pcl::PosesFromMatches::PoseEstimatesVector>&
00301 pcl::ObjectRecognitionHelper::filterFalsePositives()
00302 {
00303   std::vector<PosesFromMatches::PoseEstimatesVector>& pose_estimates_per_model =
00304     intermediate_elements_.pose_estimates_per_model;
00305   ObjectModelList& object_models = intermediate_elements_.object_models;
00306   RangeImage& range_image = *intermediate_elements_.scene_range_image_ptr;
00307   FalsePositivesFilter::Parameters& fpf_params = false_positives_filter_.getParameters();
00308   fpf_params.max_no_of_threads = parameters_.max_no_of_threads;
00309   
00310   timings_.false_positives_filtering = -getTime();
00311   for (unsigned int model_idx=0; model_idx<object_models.size(); ++model_idx)
00312   {
00313     ObjectModel& object_model = *object_models[model_idx];
00314     fpf_params.max_validation_point_error = parameters_.max_validation_point_error_factor * object_model.getRadius();
00315     fpf_params.icp_max_distance_start = parameters_.icp_max_distance_start_factor * object_model.getRadius();
00316     fpf_params.icp_max_distance_end = parameters_.icp_max_distance_end_factor * object_model.getRadius();
00317     PosesFromMatches::PoseEstimatesVector& pose_estimates = pose_estimates_per_model[model_idx];
00318     false_positives_filter_.filterResults(range_image, object_model, pose_estimates);
00319   }
00320   timings_.false_positives_filtering += getTime();
00321   
00322   return pose_estimates_per_model;
00323 }
00324 
00325 void pcl::ObjectRecognitionHelper::printTimings() const
00326 {
00327   std::cout << PVARC(timings_.scene_range_image_creation)
00328             << PVARC(timings_.scene_interest_point_extraction)
00329             << PVARC(timings_.scene_feature_extraction)
00330             << PVARC(timings_.feature_matching)
00331             << PVARC(timings_.initial_pose_estimation)
00332             << PVARC(timings_.false_positives_filtering);
00333 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Friends Defines


narf_recognition
Author(s): Juergen Hess
autogenerated on Wed Dec 26 2012 16:37:09