rgbdslam.cpp
Go to the documentation of this file.
00001 
00064 //Documentation see header file
00065 #include "pcl/ros/conversions.h"
00066 #include <pcl/io/io.h>
00067 #include "pcl/common/transform.h"
00068 #include "pcl_ros/transforms.h"
00069 #include <registration/registration_rgbdslam.h>
00070 #include <cv_bridge/CvBridge.h>
00071 #include <opencv2/imgproc/imgproc.hpp>
00072 #include <iostream>
00073 #include <sstream>
00074 #include <string>
00075 #include <cv.h>
00076 #include <ctime>
00077 #include "node.h"
00078 #include <sensor_msgs/PointCloud2.h>
00079 
00080 
00081 Registration_RGBDSLAM::Registration_RGBDSLAM()
00082 {
00083 
00084 }
00085 
00086 OpenNIListener::OpenNIListener(ros::NodeHandle nh, const char* visual_topic,
00087                                const char* depth_topic, const char* info_topic,
00088                                const char* cloud_topic, const char* detector_type,
00089                                const char* extractor_type)
00090 : graph_mgr(nh),
00091   visual_sub_ (nh, visual_topic, 3),
00092   depth_sub_(nh, depth_topic, 3),
00093   info_sub_(nh, info_topic, 3),
00094   cloud_sub_(nh, cloud_topic, 3),
00095   sync_(MySyncPolicy(10),  visual_sub_, depth_sub_, info_sub_, cloud_sub_),
00096   depth_mono8_img_(cv::Mat()),
00097   callback_counter_(0),
00098   pause_(true),
00099   first_frame_(true)
00100   //pc_pub(nh.advertise<sensor_msgs::PointCloud2>("transformed_cloud", 2))
00101 {
00102   // ApproximateTime takes a queue size as its constructor argument, hence MySyncPolicy(10)
00103   sync_.registerCallback(boost::bind(&OpenNIListener::cameraCallback, this, _1, _2, _3, _4));
00104   ROS_INFO_STREAM("OpenNIListener listening to " << visual_topic << ", " << depth_topic \
00105                    << ", " << info_topic << " and " << cloud_topic << "\n");
00106   detector_ = this->createDetector(detector_type);
00107   ROS_FATAL_COND(detector_.empty(), "No valid detector, directly after creation!");
00108   extractor_ = this->createDescriptorExtractor(extractor_type);
00109   matcher_ = new cv::BruteForceMatcher<cv::L2<float> >() ;
00110   pub_cloud_ = nh.advertise<sensor_msgs::PointCloud2> (
00111             "/rgbdslam/cloud", 5);
00112   pub_transf_cloud_ = nh.advertise<sensor_msgs::PointCloud2> (
00113             "/rgbdslam/transformed_slowdown_cloud", 5);
00114   pub_ref_cloud_ = nh.advertise<sensor_msgs::PointCloud2> (
00115             "/rgbdslam/first_frame", 1);
00116 }
00117 
00118 void OpenNIListener::cameraCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,
00119                                      const sensor_msgs::ImageConstPtr& depth_img_msg,
00120                                      const sensor_msgs::CameraInfoConstPtr& cam_info,
00121                                      const sensor_msgs::PointCloud2ConstPtr& point_cloud) {
00122   // ROS_INFO("Received data from kinect");
00123   if(++callback_counter_ % 3 != 0 || pause_) return;
00124   //Get images into correct format
00125         sensor_msgs::CvBridge bridge;
00126         cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg);
00127         cv::Mat visual_img =  bridge.imgMsgToCv(visual_img_msg, "mono8");
00128   if(visual_img.rows != depth_float_img.rows || visual_img.cols != depth_float_img.cols){
00129     ROS_ERROR("Depth and Visual image differ in size!");
00130     return;
00131   }
00132 
00133   //get pinhole camera model to backproject image locations later
00134   image_geometry::PinholeCameraModel cam_model;
00135   cam_model.fromCameraInfo(cam_info); //pinhole camera model
00136   depthToCV8UC1(depth_float_img, depth_mono8_img_);
00137 
00138 
00139   // create new node an add it to the graph
00140   bool has_been_added = graph_mgr.addNode(Node(&graph_mgr.nh_,visual_img, depth_float_img, cam_model, detector_, extractor_, matcher_,point_cloud, point_cloud->header.seq,0,depth_mono8_img_));
00141   graph_mgr.drawFeatureFlow(visual_img);
00142 
00143   emit newVisualImage(cvMat2QImage(visual_img));
00144   ROS_DEBUG("Sending PointClouds");
00145   //if node position was optimized: publish received pointcloud
00146   if (has_been_added && graph_mgr.freshlyOptimized_ && (pub_cloud_.getNumSubscribers() > 0)){
00147       ROS_INFO("Sending Original PointCloud in new Frame");
00148       sensor_msgs::PointCloud2 msg = *point_cloud;
00149       msg.header.stamp = graph_mgr.time_of_last_transform_;
00150       msg.header.frame_id = "/slam_transform";
00151       pub_cloud_.publish(msg);
00152   }
00153   //Very slow, but only done if subscribed to: Transform pointcloud to fixed coordinate system and resend
00154   if (has_been_added && graph_mgr.freshlyOptimized_ && (pub_transf_cloud_.getNumSubscribers() > 0)){
00155       ROS_INFO("Sending Transformed PointCloud in fixed Frame");
00156       pcl::PointCloud<pcl::PointXYZRGB> pcl_cloud;
00157       pcl::fromROSMsg(*point_cloud, pcl_cloud);
00158       pcl_ros::transformPointCloud(pcl_cloud, pcl_cloud, graph_mgr.world2cam_ );
00159       sensor_msgs::PointCloud2 msg;
00160       pcl::toROSMsg(pcl_cloud, msg);
00161       msg.header.frame_id = "/openni_camera";
00162       msg.header.stamp = ros::Time::now();
00163       pub_transf_cloud_.publish(msg);
00164   }
00165   //when first node has been added,  send it out unchanged as reference
00166   if (first_frame_ && has_been_added && (pub_ref_cloud_.getNumSubscribers() > 0)){
00167       ROS_INFO("Sending Reference PointCloud");
00168       pub_ref_cloud_.publish(*point_cloud);
00169       first_frame_ = false;
00170   }
00171   //emit newDepthImage(cvMat2QImage(depth_mono8_img_));//overwrites last cvMat2QImage
00172 }
00173 
00174 using namespace cv;
00175 FeatureDetector* OpenNIListener::createDetector( const string& detectorType ) {
00176     FeatureDetector* fd = 0;
00177     if( !detectorType.compare( "FAST" ) ) {
00178         //fd = new FastFeatureDetector( 20/*threshold*/, true/*nonmax_suppression*/ );
00179         fd = new DynamicAdaptedFeatureDetector (new FastAdjuster(20,true), 300, 600, 10);
00180     }
00181     else if( !detectorType.compare( "STAR" ) ) {
00182         fd = new StarFeatureDetector( 16/*max_size*/, 5/*response_threshold*/, 10/*line_threshold_projected*/,
00183                                       8/*line_threshold_binarized*/, 5/*suppress_nonmax_size*/ );
00184     }
00185     else if( !detectorType.compare( "SIFT" ) ) {
00186         fd = new SiftFeatureDetector(SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
00187                                      SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD());
00188     }
00189     else if( !detectorType.compare( "SURF" ) ) {
00190         fd = new DynamicAdaptedFeatureDetector(new SurfAdjuster(), 300, 600, 5);
00191         //fd = new SurfFeatureDetector( 300./*hessian_threshold*/, 3 /*octaves*/, 4/*octave_layers*/ );
00192     }
00193     else if( !detectorType.compare( "MSER" ) ) {
00194         fd = new MserFeatureDetector( 5/*delta*/, 60/*min_area*/, 14400/*_max_area*/, 0.25f/*max_variation*/,
00195                 0.2/*min_diversity*/, 200/*max_evolution*/, 1.01/*area_threshold*/, 0.003/*min_margin*/,
00196                 5/*edge_blur_size*/ );
00197     }
00198     else if( !detectorType.compare( "GFTT" ) ) {
00199         fd = new GoodFeaturesToTrackDetector( 200/*maxCorners*/, 0.001/*qualityLevel*/, 1./*minDistance*/,
00200                                               5/*int _blockSize*/, true/*useHarrisDetector*/, 0.04/*k*/ );
00201     }
00202     else {
00203       ROS_ERROR("No valid detector-type given: %s. Using SURF.", detectorType.c_str());
00204       fd = createDetector("SURF"); //recursive call with correct parameter
00205     }
00206     ROS_ERROR_COND(fd == 0, "No detector could be created");
00207     return fd;
00208 }
00209 
00210 DescriptorExtractor* OpenNIListener::createDescriptorExtractor( const string& descriptorType ) {
00211     DescriptorExtractor* extractor = 0;
00212     if( !descriptorType.compare( "SIFT" ) ) {
00213         extractor = new SiftDescriptorExtractor();/*( double magnification=SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION(), bool isNormalize=true, bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES, int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS, int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE, int angleMode=SIFT::CommonParams::FIRST_ANGLE )*/
00214     }
00215     else if( !descriptorType.compare( "SURF" ) ) {
00216         extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
00217     }
00218     else {
00219       ROS_ERROR("No valid descriptor-matcher-type given: %s. Using SURF", descriptorType.c_str());
00220       extractor = createDescriptorExtractor("SURF");
00221     }
00222     return extractor;
00223 }
00224 
00225 void OpenNIListener::togglePause(){
00226   pause_ = !pause_;
00227 }
00228 // Old helper functions from earlier times. Could be useful lateron
00229 
00230 QImage OpenNIListener::cvMat2QImage(const cv::Mat& image){
00231   ROS_DEBUG_STREAM("Converting Matrix of type " << openCVCode2String(image.type()) << " to RGBA");
00232   ROS_DEBUG_STREAM("Target Matrix is of type " << openCVCode2String(rgba_buffer_.type()));
00233   /*if(image.channels() >= 3){ //Add alpha channel if it is a color image
00234     cv::Mat alpha( image.rows, image.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance
00235     cv::Mat in[] = { image , alpha };
00236     // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
00237     // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
00238     int from_to[] = { 2,0,  1,1,  0,2,  3,3 };
00239     mixChannels( in , 2, &rgba_buffer, 1, from_to, 4 );
00240   }else if(image.channels() == 1){   // Convert visual image to rgba */
00241   if(image.rows != rgba_buffer_.rows || image.cols != rgba_buffer_.cols){
00242     ROS_INFO("Created new rgba_buffer");
00243     rgba_buffer_ = cv::Mat( image.rows, image.cols, CV_8UC4);
00244     printMatrixInfo(rgba_buffer_);
00245   }
00246   cv::Mat alpha( image.rows, image.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance
00247   cv::Mat in[] = { image, alpha };
00248   // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
00249   // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
00250   int from_to[] = { 0,0,  0,1,  0,2,  1,3 };
00251   mixChannels( in , 2, &rgba_buffer_, 1, from_to, 4 );
00252   printMatrixInfo(rgba_buffer_);
00253   //cv::cvtColor(image, rgba_buffer_, CV_GRAY2RGBA);}
00254   //}
00255   return QImage((unsigned char *)(rgba_buffer_.data),
00256                 rgba_buffer_.cols, rgba_buffer_.rows,
00257                 rgba_buffer_.step, QImage::Format_RGB32 );
00258 }
00259 
00260 void transformPointCloud (const Eigen::Matrix4f &transform, const sensor_msgs::PointCloud2 &in,
00261                           sensor_msgs::PointCloud2 &out)
00262 {
00263   // Get X-Y-Z indices
00264   int x_idx = pcl::getFieldIndex (in, "x");
00265   int y_idx = pcl::getFieldIndex (in, "y");
00266   int z_idx = pcl::getFieldIndex (in, "z");
00267 
00268   if (x_idx == -1 || y_idx == -1 || z_idx == -1) {
00269     ROS_ERROR ("Input dataset has no X-Y-Z coordinates! Cannot convert to Eigen format.");
00270     return;
00271   }
00272 
00273   if (in.fields[x_idx].datatype != sensor_msgs::PointField::FLOAT32 ||
00274       in.fields[y_idx].datatype != sensor_msgs::PointField::FLOAT32 ||
00275       in.fields[z_idx].datatype != sensor_msgs::PointField::FLOAT32) {
00276     ROS_ERROR ("X-Y-Z coordinates not floats. Currently only floats are supported.");
00277     return;
00278   }
00279   // Check if distance is available
00280   int dist_idx = pcl::getFieldIndex (in, "distance");
00281   // Copy the other data
00282   if (&in != &out)
00283   {
00284     out.header = in.header;
00285     out.height = in.height;
00286     out.width  = in.width;
00287     out.fields = in.fields;
00288     out.is_bigendian = in.is_bigendian;
00289     out.point_step   = in.point_step;
00290     out.row_step     = in.row_step;
00291     out.is_dense     = in.is_dense;
00292     out.data.resize (in.data.size ());
00293     // Copy everything as it's faster than copying individual elements
00294     memcpy (&out.data[0], &in.data[0], in.data.size ());
00295   }
00296 
00297   Eigen::Array4i xyz_offset (in.fields[x_idx].offset, in.fields[y_idx].offset, in.fields[z_idx].offset, 0);
00298 
00299   for (size_t i = 0; i < in.width * in.height; ++i) {
00300     Eigen::Vector4f pt (*(float*)&in.data[xyz_offset[0]], *(float*)&in.data[xyz_offset[1]], *(float*)&in.data[xyz_offset[2]], 1);
00301     Eigen::Vector4f pt_out;
00302 
00303     bool max_range_point = false;
00304     int distance_ptr_offset = i*in.point_step + in.fields[dist_idx].offset;
00305     float* distance_ptr = (dist_idx < 0 ? NULL : (float*)(&in.data[distance_ptr_offset]));
00306     if (!std::isfinite (pt[0]) || !std::isfinite (pt[1]) || !std::isfinite (pt[2])) {
00307       if (distance_ptr==NULL || !std::isfinite(*distance_ptr)) { // Invalid point
00308         pt_out = pt;
00309       } else { // max range point
00310         pt[0] = *distance_ptr;  // Replace x with the x value saved in distance
00311         pt_out = transform * pt;
00312         max_range_point = true;
00313         //std::cout << pt[0]<<","<<pt[1]<<","<<pt[2]<<" => "<<pt_out[0]<<","<<pt_out[1]<<","<<pt_out[2]<<"\n";
00314       }
00315     } else {
00316       pt_out = transform * pt;
00317     }
00318 
00319     if (max_range_point) {
00320       // Save x value in distance again
00321       *(float*)(&out.data[distance_ptr_offset]) = pt_out[0];
00322       pt_out[0] = std::numeric_limits<float>::quiet_NaN();
00323       //std::cout << __PRETTY_FUNCTION__<<": "<<i << "is a max range point.\n";
00324     }
00325 
00326     memcpy (&out.data[xyz_offset[0]], &pt_out[0], sizeof (float));
00327     memcpy (&out.data[xyz_offset[1]], &pt_out[1], sizeof (float));
00328     memcpy (&out.data[xyz_offset[2]], &pt_out[2], sizeof (float));
00329 
00330     xyz_offset += in.point_step;
00331   }
00332 
00333   // Check if the viewpoint information is present
00334   int vp_idx = pcl::getFieldIndex (in, "vp_x");
00335   if (vp_idx != -1) {
00336     // Transform the viewpoint info too
00337     for (size_t i = 0; i < out.width * out.height; ++i) {
00338       float *pstep = (float*)&out.data[i * out.point_step + out.fields[vp_idx].offset];
00339       // Assume vp_x, vp_y, vp_z are consecutive
00340       Eigen::Vector4f vp_in (pstep[0], pstep[1], pstep[2], 1);
00341       Eigen::Vector4f vp_out = transform * vp_in;
00342 
00343       pstep[0] = vp_out[0];
00344       pstep[1] = vp_out[1];
00345       pstep[2] = vp_out[2];
00346     }
00347   }
00348 }
00349 
00350 void depthToCV8UC1(const cv::Mat& float_img, cv::Mat& mono8_img){
00351   //Process images
00352   if(mono8_img.rows != float_img.rows || mono8_img.cols != float_img.cols){
00353     mono8_img = cv::Mat(float_img.size(), CV_8UC1);}
00354   //The following doesn't work due to NaNs
00355   //double minVal, maxVal;
00356   //minMaxLoc(float_img, &minVal, &maxVal);
00357   //ROS_DEBUG("Minimum/Maximum Depth in current image: %f/%f", minVal, maxVal);
00358   //mono8_img = cv::Scalar(0);
00359   //cv::line( mono8_img, cv::Point2i(10,10),cv::Point2i(200,100), cv::Scalar(255), 3, 8);
00360   cv::convertScaleAbs(float_img, mono8_img, 100, 0.0);
00361 }
00362 
00363 //Little debugging helper functions
00364 std::string openCVCode2String(unsigned int code){
00365   switch(code){
00366     case 0 : return std::string("CV_8UC1" );
00367     case 8 : return std::string("CV_8UC2" );
00368     case 16: return std::string("CV_8UC3" );
00369     case 24: return std::string("CV_8UC4" );
00370     case 2 : return std::string("CV_16UC1");
00371     case 10: return std::string("CV_16UC2");
00372     case 18: return std::string("CV_16UC3");
00373     case 26: return std::string("CV_16UC4");
00374     case 5 : return std::string("CV_32FC1");
00375     case 13: return std::string("CV_32FC2");
00376     case 21: return std::string("CV_32FC3");
00377     case 29: return std::string("CV_32FC4");
00378   }
00379   return std::string("Unknown");
00380 }
00381 
00382 void printMatrixInfo(cv::Mat& image){
00383   ROS_DEBUG_STREAM("Matrix Type:" << openCVCode2String(image.type()) <<  " rows: " <<  image.rows  <<  " cols: " <<  image.cols);
00384 }


cob_3d_registration
Author(s): Joshua Hampp
autogenerated on Wed Aug 26 2015 11:02:36