00001
00002
00003 #include <narf_recognition/object_recognition_helper.h>
00004
00005 pcl::ObjectRecognitionHelper::ObjectRecognitionHelper()
00006 {
00007 }
00008
00009 pcl::ObjectRecognitionHelper::~ObjectRecognitionHelper()
00010 {
00011 clearObjectModels();
00012 clearModelFeatures();
00013 clearSceneFeatures();
00014 }
00015
00016 void
00017 pcl::ObjectRecognitionHelper::clearSceneFeatures()
00018 {
00019 while (!intermediate_elements_.scene_features.empty())
00020 {
00021 delete intermediate_elements_.scene_features.back();
00022 intermediate_elements_.scene_features.pop_back();
00023 }
00024 }
00025
00026 void
00027 pcl::ObjectRecognitionHelper::clearObjectModels()
00028 {
00029 while (!intermediate_elements_.object_models.empty())
00030 {
00031 delete intermediate_elements_.object_models.back();
00032 intermediate_elements_.object_models.pop_back();
00033 }
00034 }
00035
00036 void
00037 pcl::ObjectRecognitionHelper::clearModelFeatures()
00038 {
00039 while (!intermediate_elements_.database_features.empty())
00040 {
00041 delete intermediate_elements_.database_features.back();
00042 intermediate_elements_.database_features.pop_back();
00043 }
00044 intermediate_elements_.database_feature_sources.clear();
00045 }
00046
00047 bool
00048 pcl::ObjectRecognitionHelper::addObjectModelsFromPcdFiles(std::vector<std::string>& file_names)
00049 {
00050 ObjectModelList& object_models = intermediate_elements_.object_models;
00051 for (int model_filename_idx=0; model_filename_idx<file_names.size(); ++model_filename_idx)
00052 {
00053 const std::string& model_filename = file_names[model_filename_idx];
00054 if (parameters_.verbose)
00055 std::cout << "\nLoading model point cloud \""<<model_filename<<"\"...\n";
00056 object_models.push_back(new ObjectModel);
00057 ObjectModel& object_model = *object_models.back();
00058 object_model.setName(getFilenameWithoutExtension(getFilenameWithoutPath(model_filename)));
00059 sensor_msgs::PointCloud2 point_cloud_msg;
00060 Eigen::Vector4f model_sensor_origin; Eigen::Quaternionf model_sensor_orientation;
00061 if (pcl::io::loadPCDFile(model_filename, point_cloud_msg, model_sensor_origin, model_sensor_orientation) == -1)
00062 {
00063 std::cerr << "Was not able to open file \""<<model_filename<<"\".\n";
00064 return false;
00065 }
00066 fromROSMsg(point_cloud_msg, object_model.getPointCloud());
00067 object_model.getPointCloud().sensor_origin_ = model_sensor_origin,
00068 object_model.getPointCloud().sensor_orientation_ = model_sensor_orientation;
00069 Eigen::Affine3f model_sensor_pose(Eigen::Affine3f::Identity());
00070 model_sensor_pose = Eigen::Affine3f(Eigen::Translation3f(object_model.getPointCloud().sensor_origin_[0],
00071 object_model.getPointCloud().sensor_origin_[1],
00072 object_model.getPointCloud().sensor_origin_[2])) *
00073 Eigen::Affine3f(object_model.getPointCloud().sensor_orientation_);
00074
00075
00077
00078
00079
00080
00081 if (parameters_.verbose)
00082 std::cout << "Done.\n";
00083 if (parameters_.verbose)
00084 std::cout << "Creating model...\n";
00085 object_model.updateCenterAndRadius();
00086
00087 if (parameters_.single_view_model)
00088 object_model.addView(model_sensor_pose, deg2rad(0.5f));
00089 else if (!parameters_.sample_views_3d)
00090 object_model.sampleViews2D(parameters_.no_of_model_views,
00091 parameters_.view_sampling_first_layer, parameters_.view_sampling_last_layer);
00092 else
00093 object_model.sampleViews3D(parameters_.view_sampling_3D_resolution);
00094
00095 object_model.updateValidationPoints(parameters_.no_of_surface_validation_points,
00096 parameters_.no_of_border_validation_points);
00097 if (parameters_.verbose)
00098 std::cout << "Done.\n\n";
00099 }
00100 return true;
00101 }
00102
00103 bool
00104 pcl::ObjectRecognitionHelper::createRangeImageFromPcdFile(const std::string& file_name)
00105 {
00106 intermediate_elements_.scene_range_image_ptr = intermediate_elements_.spherical_scene_range_image_ptr;
00107 if (parameters_.verbose)
00108 std::cout << "\nLoading scene point cloud \""<<file_name<<"\"...\n";
00109 sensor_msgs::PointCloud2 cloud_msg;
00110 Eigen::Vector4f cloud_msg_sensor_origin; Eigen::Quaternionf cloud_msg_sensor_orientation;
00111 PointCloud<PointXYZ> point_cloud;
00112 PointCloud<PointWithViewpoint> far_ranges;
00113 if (pcl::io::loadPCDFile(file_name.c_str(), cloud_msg,
00114 cloud_msg_sensor_origin, cloud_msg_sensor_orientation) == -1)
00115 {
00116 std::cerr << "Was not able to open file \""<<file_name<<"\".\n";
00117 return false;
00118 }
00119 std::string far_ranges_file_name = getFilenameWithoutExtension (file_name)+"_far_ranges.pcd";
00120 if (pcl::io::loadPCDFile(far_ranges_file_name.c_str(), far_ranges) == -1)
00121 {
00122
00123
00124 }
00125 fromROSMsg(cloud_msg, point_cloud);
00126 point_cloud.sensor_origin_ = cloud_msg_sensor_origin; point_cloud.sensor_orientation_ = cloud_msg_sensor_orientation;
00127 if (far_ranges.points.empty())
00128 {
00129 if (parameters_.verbose)
00130 std::cout << "\n----------\nNo extra far ranges provided. Trying to extract them from the msg...\n";
00131 }
00132 RangeImage::extractFarRanges(cloud_msg, far_ranges);
00133 if (far_ranges.points.empty()) {
00134 std::cout << "No far ranges found. You might want to use -m as a command line parameter to consider "
00135 << "all unseen areas as far ranges.\n----------\n\n";
00136 }
00137 return createRangeImageFromPointCloud(point_cloud, &far_ranges);
00138 }
00139
00140
00141 bool
00142 pcl::ObjectRecognitionHelper::createRangeImageFromPointCloud(const PointCloud<PointXYZ>& point_cloud,
00143 const PointCloud<PointWithViewpoint>* far_ranges)
00144 {
00145
00146 Eigen::Affine3f sensor_pose(Eigen::Affine3f::Identity());
00147
00148
00150
00151
00152
00153
00154 sensor_pose = Eigen::Affine3f(Eigen::Translation3f(
00155 point_cloud.sensor_origin_[0], point_cloud.sensor_origin_[1], point_cloud.sensor_origin_[2])) *
00156 point_cloud.sensor_orientation_;
00157 timings_.scene_range_image_creation = -getTime();
00158 RangeImage& range_image = *intermediate_elements_.scene_range_image_ptr;
00159 range_image.createFromPointCloud(point_cloud, parameters_.angular_resolution, deg2rad(360.0f), deg2rad(180.0f),
00160 sensor_pose, parameters_.coordinate_frame, parameters_.noise_level, 0.0f, 1);
00161 if (far_ranges != NULL)
00162 range_image.integrateFarRanges(*far_ranges);
00163 if (parameters_.set_unseen_to_max_range)
00164 range_image.setUnseenToMaxRange();
00165 timings_.scene_range_image_creation += getTime();
00166
00167 intermediate_elements_.transformation_into_original_coordinate_frame = range_image.getTransformationToWorldSystem();
00168 range_image.change3dPointsToLocalCoordinateFrame();
00169
00170 return true;
00171 }
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195 bool
00196 pcl::ObjectRecognitionHelper::extractSceneFeatures()
00197 {
00198 NarfKeypoint& interest_point_detector = intermediate_elements_.interest_point_detector_scene;
00199 interest_point_detector.setRangeImage(&*intermediate_elements_.scene_range_image_ptr);
00200 interest_point_detector.getParameters().support_size = parameters_.support_size;
00201 interest_point_detector.getParameters().max_no_of_threads = parameters_.max_no_of_threads;
00202
00203 timings_.scene_interest_point_extraction = -getTime();
00204 const PointCloud<InterestPoint>& scene_interest_points = interest_point_detector.getInterestPoints();
00205 timings_.scene_interest_point_extraction += getTime();
00206 if (parameters_.verbose)
00207 std::cout << "Extracted "<<scene_interest_points.points.size()<<" interest points from scene.\n";
00208
00209 clearSceneFeatures();
00210
00211 Narf::max_no_of_threads = parameters_.max_no_of_threads;
00212 std::vector<Narf*>& scene_features = intermediate_elements_.scene_features;
00213 timings_.scene_feature_extraction = -getTime();
00214 Narf::extractForInterestPoints(*intermediate_elements_.scene_range_image_ptr, scene_interest_points,
00215 parameters_.feature_descriptor_size, parameters_.support_size,
00216 parameters_.use_rotation_invariance, scene_features);
00217 timings_.scene_feature_extraction += getTime();
00218 Narf::max_no_of_threads = 1;
00219
00220 if (parameters_.verbose)
00221 std::cout << "Extracted "<<scene_features.size()<<" scene features from "
00222 << scene_interest_points.points.size()<<" interest points.\n";
00223
00224 return true;
00225 }
00226
00227 bool
00228 pcl::ObjectRecognitionHelper::extractModelFeatures()
00229 {
00230 clearModelFeatures();
00231 NarfKeypoint& interest_point_detector = intermediate_elements_.interest_point_detector_model;
00232 interest_point_detector.getParameters().support_size = parameters_.support_size;
00233
00234 interest_point_detector.getParameters().distance_for_additional_points = 0.1f;
00235 interest_point_detector.getParameters().max_no_of_threads = parameters_.max_no_of_threads;
00236
00237 Narf::max_no_of_threads = parameters_.max_no_of_threads;
00238 std::vector<std::vector<std::vector<Narf*>*>*> feature_database;
00239
00240
00241
00242 intermediate_elements_.object_models.extractNARFsForInterestPoints(
00243 parameters_.feature_descriptor_size, parameters_.support_size, parameters_.use_rotation_invariance,
00244 intermediate_elements_.interest_point_detector_model, feature_database);
00245 std::vector<Narf*>& database_features = intermediate_elements_.database_features;
00246 std::vector<ObjectModelList::FeatureSource>& database_feature_sources =
00247 intermediate_elements_.database_feature_sources;
00248
00249 ObjectModelList::getMergedFeatureList(feature_database, database_features, database_feature_sources);
00251 if (parameters_.use_kdtree)
00252 ObjectModelList::buildKdTree(database_features, intermediate_elements_.database_features_kdtree);
00253 Narf::max_no_of_threads = 1;
00254
00255 return true;
00256 }
00257
00258
00259 void
00260 pcl::ObjectRecognitionHelper::extractPoseEstimates()
00261 {
00262 float min_distance_to_same_scene_feature = 0.25f*parameters_.support_size;
00263 timings_.feature_matching = -getTime();
00264 std::vector<std::vector<PointCorrespondences6DVector > >& feature_matches = intermediate_elements_.feature_matches;
00265 feature_matches.clear();
00266 ObjectModelList& object_models = intermediate_elements_.object_models;
00267 object_models.getFeatureMatches(
00268 intermediate_elements_.scene_features,
00269 intermediate_elements_.database_features, intermediate_elements_.database_feature_sources,
00270 parameters_.max_descriptor_distance, min_distance_to_same_scene_feature, feature_matches,
00271 (parameters_.use_kdtree ? &intermediate_elements_.database_features_kdtree : NULL),
00272 parameters_.max_descriptor_distance*parameters_.feature_descriptor_size);
00273 timings_.feature_matching += getTime();
00274
00275 PosesFromMatches pose_estimation;
00276 std::vector<PosesFromMatches::PoseEstimatesVector>& pose_estimates_per_model =
00277 intermediate_elements_.pose_estimates_per_model;
00278 pose_estimates_per_model.clear();
00279 pose_estimates_per_model.resize(object_models.size());
00280
00281 timings_.initial_pose_estimation = -getTime();
00282 for (unsigned int model_idx=0; model_idx<feature_matches.size(); ++model_idx)
00283 {
00284 PosesFromMatches::PoseEstimatesVector& pose_estimates = pose_estimates_per_model[model_idx];
00285 std::vector<PointCorrespondences6DVector >& model_feature_matches = feature_matches[model_idx];
00286 for (unsigned int view_idx=0; view_idx<model_feature_matches.size(); ++view_idx)
00287 {
00288 PointCorrespondences6DVector& view_feature_matches = model_feature_matches[view_idx];
00289 std::sort(view_feature_matches.begin(), view_feature_matches.end(), isBetterCorrespondence);
00290 pose_estimation.estimatePosesUsing1Correspondence(view_feature_matches, -1, pose_estimates);
00291
00292
00293 }
00294 }
00295 timings_.initial_pose_estimation += getTime();
00296 }
00297
00298 std::vector<pcl::PosesFromMatches::PoseEstimatesVector>&
00299 pcl::ObjectRecognitionHelper::filterFalsePositives()
00300 {
00301 std::vector<PosesFromMatches::PoseEstimatesVector>& pose_estimates_per_model =
00302 intermediate_elements_.pose_estimates_per_model;
00303 ObjectModelList& object_models = intermediate_elements_.object_models;
00304 RangeImage& range_image = *intermediate_elements_.scene_range_image_ptr;
00305 FalsePositivesFilter::Parameters& fpf_params = false_positives_filter_.getParameters();
00306 fpf_params.max_no_of_threads = parameters_.max_no_of_threads;
00307
00308 timings_.false_positives_filtering = -getTime();
00309 for (unsigned int model_idx=0; model_idx<object_models.size(); ++model_idx)
00310 {
00311 ObjectModel& object_model = *object_models[model_idx];
00312 fpf_params.max_validation_point_error = parameters_.max_validation_point_error_factor * object_model.getRadius();
00313 fpf_params.icp_max_distance_start = parameters_.icp_max_distance_start_factor * object_model.getRadius();
00314 fpf_params.icp_max_distance_end = parameters_.icp_max_distance_end_factor * object_model.getRadius();
00315 PosesFromMatches::PoseEstimatesVector& pose_estimates = pose_estimates_per_model[model_idx];
00316 false_positives_filter_.filterResults(range_image, object_model, pose_estimates);
00317 }
00318 timings_.false_positives_filtering += getTime();
00319
00320 return pose_estimates_per_model;
00321 }
00322
00323 void pcl::ObjectRecognitionHelper::printTimings() const
00324 {
00325 std::cout << PVARC(timings_.scene_range_image_creation)
00326 << PVARC(timings_.scene_interest_point_extraction)
00327 << PVARC(timings_.scene_feature_extraction)
00328 << PVARC(timings_.feature_matching)
00329 << PVARC(timings_.initial_pose_estimation)
00330 << PVARC(timings_.false_positives_filtering);
00331 }