Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008 #include "rgbd_registration/rgb_feature_detection.h"
00009
00010 #include "rgbd_registration/parameter_server.h"
00011
00012
00013 #include "opencv2/core/core.hpp"
00014 #include "opencv2/nonfree/features2d.hpp"
00015 #include "opencv2/highgui/highgui.hpp"
00016 #include <cv.h>
00017
00018 #include <ros/console.h>
00019
00020 RGBFeatureDetection::RGBFeatureDetection () :
00021 image_counter_ (0)
00022 {
00023 }
00024
00025 RGBFeatureDetection::~RGBFeatureDetection ()
00026 {
00027 }
00028
00029
00030
00031
00032
00033
00034 void RGBFeatureDetection::projectFeaturesTo3D (
00035 std::vector<cv::KeyPoint>& feature_locations_2d,
00036 std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > & feature_locations_3d,
00037 const PointCloudConstPtr point_cloud)
00038 {
00039 int index = -1;
00040 for (unsigned int i = 0; i < feature_locations_2d.size (); )
00041 {
00042 ++index;
00043
00044 cv::Point2f p2d = feature_locations_2d[i].pt;
00045 PointType p3d = point_cloud->at ((int) p2d.x, (int) p2d.y);
00046
00047
00048 if (isnan (p3d.x) || isnan (p3d.y) || isnan (p3d.z))
00049 {
00050 ROS_DEBUG ("Feature %d has been extracted at NaN depth. Omitting", i);
00051 feature_locations_2d.erase (feature_locations_2d.begin () + i);
00052 continue;
00053 }
00054
00055 feature_locations_3d.push_back (Eigen::Vector4f (p3d.x, p3d.y, p3d.z, 1.0));
00056
00057 i++;
00058 }
00059 }
00060
00061 void RGBFeatureDetection::detectFeatures (const cv::Mat& input_image,
00062 std::vector<cv::KeyPoint>& keypoints)
00063 {
00064
00065 cv::Mat image_greyscale;
00066 cvtColor (input_image, image_greyscale, CV_RGB2GRAY);
00067
00068
00069 cv::FeatureDetector* detector;
00070 if (ParameterServer::instance ()->get<std::string> ("feature_extractor") == "SIFT")
00071 detector = new cv::SiftFeatureDetector;
00072 else
00073 detector = new cv::SurfFeatureDetector (400);
00074 detector->detect (image_greyscale, keypoints);
00075 }
00076
00077 void RGBFeatureDetection::extractFeatures (const cv::Mat& input_image,
00078 std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors)
00079 {
00080
00081 cv::Mat image_greyscale;
00082 cvtColor (input_image, image_greyscale, CV_RGB2GRAY);
00083
00084
00085 cv::DescriptorExtractor* extractor;
00086
00087 if (ParameterServer::instance ()->get<std::string> ("feature_descriptor") == "SIFT")
00088 extractor = new cv::SiftDescriptorExtractor;
00089 else
00090 extractor = new cv::SurfDescriptorExtractor;
00091 extractor->compute (image_greyscale, keypoints, descriptors);
00092
00093 if (ParameterServer::instance ()->get<bool> ("save_features_image"))
00094 {
00095 cv::Mat output;
00096 cv::drawKeypoints (image_greyscale, keypoints, output);
00097 std::stringstream result;
00098 result << "sift_result" << image_counter_++ << ".jpg";
00099 cv::imwrite (result.str (), output);
00100 }
00101 }
00102