openni_listener.cpp
Go to the documentation of this file.
00001 /* This file is part of RGBDSLAM.
00002  * 
00003  * RGBDSLAM is free software: you can redistribute it and/or modify
00004  * it under the terms of the GNU General Public License as published by
00005  * the Free Software Foundation, either version 3 of the License, or
00006  * (at your option) any later version.
00007  * 
00008  * RGBDSLAM is distributed in the hope that it will be useful,
00009  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011  * GNU General Public License for more details.
00012  * 
00013  * You should have received a copy of the GNU General Public License
00014  * along with RGBDSLAM.  If not, see <http://www.gnu.org/licenses/>.
00015  */
00016 
00017 
00018 //Documentation see header file
00019 #include "pcl/ros/conversions.h"
00020 #include <pcl/io/io.h>
00021 //#include "pcl/common/transform.h"
00022 #include "pcl_ros/transforms.h"
00023 #include "openni_listener.h"
00024 #include <cv_bridge/cv_bridge.h>
00025 #include <opencv2/imgproc/imgproc.hpp>
00026 #include <iostream>
00027 #include <sstream>
00028 #include <string>
00029 #include <cv.h>
00030 #include <ctime>
00031 #include <sensor_msgs/PointCloud2.h>
00032 #include <Eigen/Core>
00033 #include "node.h"
00034 #include "misc.h"
00035 //#include <image_geometry/pinhole_camera_model.h>
00036 //#include "pcl/ros/for_each_type.h"
00037 
00038 //For rosbag reading
00039 #include <rosbag/view.h>
00040 #include <boost/foreach.hpp>
00041 
00042 
00043 #include "parameter_server.h"
00044 //for comparison with ground truth from mocap and movable cameras on robots
00045 #include <tf/transform_listener.h>
00046 
00047 typedef message_filters::Subscriber<sensor_msgs::Image> image_sub_type;      
00048 typedef message_filters::Subscriber<sensor_msgs::CameraInfo> cinfo_sub_type;      
00049 typedef message_filters::Subscriber<sensor_msgs::PointCloud2> pc_sub_type;      
00050 typedef message_filters::Subscriber<sensor_msgs::PointCloud2> pc_sub_type;      
00051 
00052 
00053 
00054 
00055 OpenNIListener::OpenNIListener(ros::NodeHandle nh, GraphManager* graph_mgr)
00056 : graph_mgr_(graph_mgr),
00057   stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL),
00058   visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL),
00059   depth_mono8_img_(cv::Mat()),
00060   save_bag_file(false),
00061   pause_(ParameterServer::instance()->get<bool>("start_paused")),
00062   getOneFrame_(false),
00063   first_frame_(true),
00064   tflistener_(new tf::TransformListener(nh)),
00065   data_id_(0),
00066   image_encoding_("rgb8")
00067 {
00068   ParameterServer* params = ParameterServer::instance();
00069   int q = params->get<int>("subscriber_queue_size");
00070   std::string bagfile_name = params->get<std::string>("bagfile_name");
00071   std::string visua_tpc = params->get<std::string>("topic_image_mono");
00072   std::string depth_tpc = params->get<std::string>("topic_image_depth");
00073   std::string cinfo_tpc = params->get<std::string>("camera_info_topic");
00074 
00075   if(bagfile_name.empty()){
00076     std::string cloud_tpc = params->get<std::string>("topic_points");
00077     std::string widev_tpc = params->get<std::string>("wide_topic");
00078     std::string widec_tpc = params->get<std::string>("wide_cloud_topic");
00079 
00080     //All information from Kinect
00081     if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty())
00082     {   
00083         visua_sub_ = new image_sub_type(nh, visua_tpc, q);
00084         depth_sub_ = new image_sub_type (nh, depth_tpc, q);
00085         cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q);  
00086         kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q),  *visua_sub_, *depth_sub_, *cloud_sub_),
00087         kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3));
00088         ROS_INFO_STREAM("Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc);
00089     } 
00090     //No cloud, but visual image and depth
00091     else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty())
00092     {   
00093         visua_sub_ = new image_sub_type(nh, visua_tpc, q);
00094         depth_sub_ = new image_sub_type(nh, depth_tpc, q);
00095         cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q);
00096         no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q),  *visua_sub_, *depth_sub_, *cinfo_sub_);
00097         no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
00098         ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc);
00099     } 
00100 
00101     //All information from stereo                                               
00102     if(!widec_tpc.empty() && !widev_tpc.empty())
00103     {   
00104       visua_sub_ = new image_sub_type(nh, widev_tpc, q);
00105       cloud_sub_ = new pc_sub_type(nh, widec_tpc, q);
00106       stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_);
00107       stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2));
00108       ROS_INFO_STREAM("Listening to " << widev_tpc << " and " << widec_tpc );
00109     } 
00110 
00111     detector_ = createDetector(params->get<std::string>("feature_detector_type"));
00112     extractor_ = createDescriptorExtractor(params->get<std::string>("feature_extractor_type"));
00113 
00114     if(params->get<bool>("concurrent_node_construction")){
00115       ROS_DEBUG("Threads used by QThreadPool on this Computer %i. Will increase this by one, b/c the QtRos Thread is very lightweight", QThread::idealThreadCount());
00116       QThreadPool::globalInstance()->setMaxThreadCount(QThread::idealThreadCount()*2+2);
00117     }
00118 
00119   } 
00120   else //Bagfile given
00121   {
00122 
00123     tf_pub_ = nh.advertise<tf::tfMessage>("/tf", 10);
00124     //All information from Kinect
00125     if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty())
00126     {   
00127       // Set up fake subscribers to capture images
00128       depth_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00129       rgb_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00130       cam_info_sub_ = new BagSubscriber<sensor_msgs::CameraInfo>();
00131       no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q),  *rgb_img_sub_, *depth_img_sub_, *cam_info_sub_);
00132       no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
00133       ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc);
00134     } 
00135 
00136 
00137     detector_ = createDetector(params->get<std::string>("feature_detector_type"));
00138     extractor_ = createDescriptorExtractor(params->get<std::string>("feature_extractor_type"));
00139     QtConcurrent::run(this, &OpenNIListener::loadBag, bagfile_name);
00140 
00141     if(params->get<bool>("concurrent_node_construction")){
00142       ROS_DEBUG("Threads used by QThreadPool on this Computer %i. Will increase this by two, b/c the QtRos and loadBag threads are lightweight", QThread::idealThreadCount());
00143       QThreadPool::globalInstance()->setMaxThreadCount(QThread::idealThreadCount()+2);
00144     }
00145   }
00146 }
00147 
00148  
00150 
00152 void OpenNIListener::loadBag(const std::string &filename)
00153 {
00154   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00155   ROS_INFO("Loading Bagfile %s", filename.c_str());
00156   { //bag will be destructed after this block (hopefully frees memory for the optimizer)
00157     rosbag::Bag bag;
00158     try{
00159       bag.open(filename, rosbag::bagmode::Read);
00160     } catch(rosbag::BagIOException ex) {
00161       ROS_FATAL("Opening Bagfile %s failed: %s Quitting!", filename.c_str(), ex.what());
00162       ros::shutdown();
00163       return;
00164     }
00165     ROS_INFO("Opened Bagfile %s", filename.c_str());
00166 
00167     ParameterServer* params = ParameterServer::instance();
00168     std::string visua_tpc = params->get<std::string>("topic_image_mono");
00169     std::string depth_tpc = params->get<std::string>("topic_image_depth");
00170     std::string cinfo_tpc = params->get<std::string>("camera_info_topic");
00171     std::string tf_tpc = std::string("/tf");
00172 
00173     // Image topics to load for bagfiles
00174     std::vector<std::string> topics;
00175     topics.push_back(visua_tpc);
00176     topics.push_back(depth_tpc);
00177     topics.push_back(cinfo_tpc);
00178     topics.push_back(tf_tpc);
00179 
00180     rosbag::View view(bag, rosbag::TopicQuery(topics));
00181    // int lc=0; 
00182     // Simulate sending of the messages in the bagfile
00183     std::deque<sensor_msgs::Image::ConstPtr> vis_images;
00184     std::deque<sensor_msgs::Image::ConstPtr> dep_images;
00185     std::deque<sensor_msgs::CameraInfo::ConstPtr> cam_infos;
00186     ros::Time last_tf=ros::Time(0);
00187     BOOST_FOREACH(rosbag::MessageInstance const m, view)
00188     {
00189     //  if(lc++ > 1000) break;
00190       do{ 
00191         usleep(150);
00192         if(!ros::ok()) return;
00193       } while(pause_);
00194 
00195       if (m.getTopic() == visua_tpc || ("/" + m.getTopic() == visua_tpc))
00196       {
00197         sensor_msgs::Image::ConstPtr rgb_img = m.instantiate<sensor_msgs::Image>();
00198         if (rgb_img) vis_images.push_back(rgb_img);
00199         ROS_DEBUG("Found Message of %s", visua_tpc.c_str());
00200       }
00201       
00202       if (m.getTopic() == depth_tpc || ("/" + m.getTopic() == depth_tpc))
00203       {
00204         sensor_msgs::Image::ConstPtr depth_img = m.instantiate<sensor_msgs::Image>();
00205         //if (depth_img) depth_img_sub_->newMessage(depth_img);
00206         if (depth_img) dep_images.push_back(depth_img);
00207         ROS_DEBUG("Found Message of %s", depth_tpc.c_str());
00208       }
00209       if (m.getTopic() == cinfo_tpc || ("/" + m.getTopic() == cinfo_tpc))
00210       {
00211         sensor_msgs::CameraInfo::ConstPtr cam_info = m.instantiate<sensor_msgs::CameraInfo>();
00212         //if (cam_info) cam_info_sub_->newMessage(cam_info);
00213         if (cam_info) cam_infos.push_back(cam_info);
00214         ROS_DEBUG("Found Message of %s", cinfo_tpc.c_str());
00215       }
00216       if (m.getTopic() == tf_tpc|| ("/" + m.getTopic() == tf_tpc)){
00217         tf::tfMessage::ConstPtr tf_msg = m.instantiate<tf::tfMessage>();
00218         if (tf_msg) {
00219           //if(tf_msg->transforms[0].header.frame_id == "/kinect") continue;//avoid destroying tf tree if odom is used
00220           //prevents missing callerid warning
00221           boost::shared_ptr<std::map<std::string, std::string> > msg_header_map = tf_msg->__connection_header;
00222           (*msg_header_map)["callerid"] = "rgbdslam";
00223           tf_pub_.publish(tf_msg);
00224           ROS_DEBUG("Found Message of %s", tf_tpc.c_str());
00225           last_tf = tf_msg->transforms[0].header.stamp;
00226           last_tf -= ros::Duration(1.0);
00227         }
00228       }
00229       while(!vis_images.empty() && vis_images.front()->header.stamp < last_tf){
00230           rgb_img_sub_->newMessage(vis_images.front());
00231           vis_images.pop_front();
00232       }
00233       while(!dep_images.empty() && dep_images.front()->header.stamp < last_tf){
00234           depth_img_sub_->newMessage(dep_images.front());
00235           dep_images.pop_front();
00236       }
00237       while(!cam_infos.empty() && cam_infos.front()->header.stamp < last_tf){
00238           cam_info_sub_->newMessage(cam_infos.front());
00239           cam_infos.pop_front();
00240       }
00241 
00242     }
00243     ROS_WARN_NAMED("eval", "Finished processing of Bagfile");
00244     bag.close();
00245   }
00246   do{ 
00247     if(!future_.isFinished()){
00248       graph_mgr_->finishUp();
00249       future_.waitForFinished(); //Wait if GraphManager ist still computing. 
00250     }
00251     usleep(1000000); //give it a chance to receive further messages
00252     if(!ros::ok()) return;
00253     ROS_WARN("Waiting for processing to finish.");
00254   } while(graph_mgr_->isBusy());
00255 
00256   if(ParameterServer::instance()->get<bool>("batch_processing")){
00257     graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(0));
00258     ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 0);
00259     graph_mgr_->optimizeGraph(20, true);//Non threaded call
00260     graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(1));
00261     ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 1);
00262     /*
00263     graph_mgr_->sanityCheck(100.0); //no trajectory is larger than a 100m
00264     graph_mgr_->pruneEdgesWithErrorAbove(1.5);
00265     graph_mgr_->optimizeGraph(20, true);
00266     ROS_INFO("Finished with 2nd optimization");
00267     graph_mgr_->saveTrajectory(QString(filename.c_str()) + "after2_optimization");
00268     */
00269     if(ParameterServer::instance()->get<bool>("store_pointclouds")){
00270       graph_mgr_->saveIndividualClouds(QString(filename.c_str()), false);//not threaded
00271     }
00272     clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00273     if(!ParameterServer::instance()->get<bool>("use_gui")){
00274       Q_EMIT bagFinished();
00275       ros::shutdown();
00276       usleep(10000000);//10sec to allow all threads to finish (don't know how much is required)
00277       if(ros::ok()){
00278         ROS_ERROR("Should have shutdown meanwhile");
00279         exit(1);
00280       }
00281     }
00282   }
00283   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00284 }
00285 
00286 void OpenNIListener::stereoCallback(const sensor_msgs::ImageConstPtr& visual_img_msg, const sensor_msgs::PointCloud2ConstPtr& point_cloud)
00287 {
00288     struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00289     ROS_INFO("Received data from stereo cam");
00290     ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00291     if(++data_id_ % ParameterServer::instance()->get<int>("data_skip_step") != 0){ 
00292       ROS_INFO_THROTTLE(1, "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_);
00293       return;
00294     };
00295 
00296     //calculate depthMask
00297     cv::Mat_<uchar> depth_img(visual_img_msg->height, visual_img_msg->width);
00298     float value;
00299     unsigned int pt_ctr = 0;
00300     for(unsigned int y = 0; y < visual_img_msg->height; y++){
00301         for(unsigned int x = 0; x < visual_img_msg->width; x++){
00302             const uchar* value_ch_ptr = &(point_cloud->data[pt_ctr]);
00303             value = *((const float*)value_ch_ptr);
00304             pt_ctr += point_cloud->point_step;
00305             if(value != value){//isnan
00306                 depth_img(y,x) = 0;
00307             }else{
00308                 depth_img(y,x) = (char)(value*100.0); //Full white at 2.55 meter
00309             }
00310         }
00311     }
00312 
00313     //Get images into OpenCV format
00314     //sensor_msgs::CvBridge bridge;
00315     cv::Mat visual_img =  cv_bridge::toCvCopy(visual_img_msg, "mono8")->image;
00316     if(visual_img.rows != depth_img.rows ||
00317        visual_img.cols != depth_img.cols ||
00318        point_cloud->width != (uint32_t) visual_img.cols ||
00319        point_cloud->height != (uint32_t) visual_img.rows){
00320       ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
00321       return;
00322     }
00323 
00324     if (bagfile_mutex.tryLock() && save_bag_file){
00325        // todo: make the names dynamic
00326        bag.write("/wide_stereo/points2", ros::Time::now(), point_cloud);
00327        bag.write("/wide_stereo/left/image_mono", ros::Time::now(), visual_img_msg);
00328        ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
00329        bagfile_mutex.unlock();
00330        if(pause_) return;
00331     }
00332     pointcloud_type::Ptr pc_col(new pointcloud_type());//will belong to node
00333     if(ParameterServer::instance()->get<bool>("use_gui")){
00334       Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0
00335       Q_EMIT newDepthImage (cvMat2QImage(depth_img,1));//overwrites last cvMat2QImage
00336     }
00337     pcl::fromROSMsg(*point_cloud,*pc_col);
00338     clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00339     cameraCallback(visual_img, pc_col, depth_img);
00340 }
00341 
00342 OpenNIListener::~OpenNIListener(){
00343   delete tflistener_;
00344 }
00345 
00346 void OpenNIListener::noCloudCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,
00347                                       const sensor_msgs::ImageConstPtr& depth_img_msg,
00348                                       const sensor_msgs::CameraInfoConstPtr& cam_info_msg) 
00349 {
00350   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00351   ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00352   ROS_DEBUG("Received data from kinect");
00353   ParameterServer* ps = ParameterServer::instance();
00354 
00355   // If only a subset of frames are used, skip computations but visualize if gui is running
00356   if(++data_id_ % ps->get<int>("data_skip_step") != 0){ 
00357     ROS_INFO_THROTTLE(1, "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_);
00358     if(ps->get<bool>("use_gui")){//Show the image, even if not using it
00359       //sensor_msgs::CvBridge bridge;
00360       cv::Mat depth_float_img = cv_bridge::toCvCopy(depth_img_msg)->image;
00361       //const cv::Mat& depth_float_img_big = cv_bridge::toCvShare(depth_img_msg)->image;
00362       cv::Mat visual_img =  cv_bridge::toCvCopy(visual_img_msg)->image;
00363       //const cv::Mat& visual_img_big =  cv_bridge::toCvShare(visual_img_msg)->image;
00364       //cv::Mat visual_img, depth_float_img;
00365       //cv::resize(visual_img_big, visual_img, cv::Size(), 0.25, 0.25);
00366       //cv::resize(depth_float_img_big, depth_float_img, cv::Size(), 0.25, 0.25);
00367       if(visual_img.rows != depth_float_img.rows || 
00368          visual_img.cols != depth_float_img.cols){
00369         ROS_ERROR("depth and visual image differ in size! Ignoring Data");
00370         return;
00371       }
00372       depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
00373       image_encoding_ = visual_img_msg->encoding;
00374       Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0
00375       Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage
00376     }
00377     return;
00378   }
00379 
00380 
00381   //Convert images to OpenCV format
00382   //sensor_msgs::CvBridge bridge;
00383   //cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg);
00384   //cv::Mat visual_img =  bridge.imgMsgToCv(visual_img_msg);
00385   cv::Mat depth_float_img = cv_bridge::toCvCopy(depth_img_msg)->image;
00386   //const cv::Mat& depth_float_img_big = cv_bridge::toCvShare(depth_img_msg)->image;
00387   cv::Mat visual_img =  cv_bridge::toCvCopy(visual_img_msg)->image;
00388   //const cv::Mat& visual_img_big =  cv_bridge::toCvShare(visual_img_msg)->image;
00389   //cv::Size newsize(320, 240);
00390   //cv::Mat visual_img(newsize, visual_img_big.type()), depth_float_img(newsize, depth_float_img_big.type());
00391   //cv::resize(visual_img_big, visual_img, newsize);
00392   //cv::resize(depth_float_img_big, depth_float_img, newsize);
00393   if(visual_img.rows != depth_float_img.rows || 
00394      visual_img.cols != depth_float_img.cols){
00395     ROS_ERROR("depth and visual image differ in size! Ignoring Data");
00396     return;
00397   }
00398   image_encoding_ = visual_img_msg->encoding;
00399 
00400   depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
00401 
00402   if(asyncFrameDrop(depth_img_msg->header.stamp, visual_img_msg->header.stamp)) 
00403     return;
00404 
00405   if (bagfile_mutex.tryLock() && save_bag_file){
00406      // todo: make the names dynamic
00407      bag.write("/camera/rgb/image_mono", ros::Time::now(), visual_img_msg);
00408      bag.write("/camera/depth/image", ros::Time::now(), depth_img_msg);
00409      ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
00410      bagfile_mutex.unlock();
00411   }
00412 
00413   if(ps->get<bool>("use_gui")){
00414     Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0
00415     Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage
00416   }
00417   if(pause_ && !getOneFrame_) return;
00418 
00419   if(ps->get<bool>("store_pointclouds") || 
00420      (ps->get<bool>("use_glwidget") && ps->get<bool>("use_gui")) ||
00421      ps->get<bool>("use_icp")){
00422     pointcloud_type::Ptr pc_col(createXYZRGBPointCloud(depth_img_msg, visual_img_msg, cam_info_msg));
00423     clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00424     cameraCallback(visual_img, pc_col, depth_mono8_img_);
00425 
00426   }
00427   else {
00428     //Headless run without pointcloud storage
00429     clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00430     noCloudCameraCallback(visual_img, depth_float_img, depth_mono8_img_, depth_img_msg->header, cam_info_msg);
00431   }
00432 }
00433 
00434 
00435 void OpenNIListener::kinectCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,//got to be mono?
00436                                      const sensor_msgs::ImageConstPtr& depth_img_msg,   
00437                                      const sensor_msgs::PointCloud2ConstPtr& point_cloud) 
00438 {
00440   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00441   ROS_DEBUG("Received data from kinect");
00442   ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00443 
00444   //Get images into OpenCV format
00445   //sensor_msgs::CvBridge bridge;
00446   //cv::Mat depth_float_img = bridge.imgMsgToCv(depth_img_msg);
00447   //cv::Mat visual_img =  bridge.imgMsgToCv(visual_img_msg);
00448   cv::Mat depth_float_img = cv_bridge::toCvCopy(depth_img_msg)->image;
00449   cv::Mat visual_img =  cv_bridge::toCvCopy(visual_img_msg)->image;
00450   if(visual_img.rows != depth_float_img.rows || 
00451      visual_img.cols != depth_float_img.cols ||
00452 
00453      point_cloud->width != (uint32_t) visual_img.cols ||
00454      point_cloud->height != (uint32_t) visual_img.rows){
00455     ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
00456     return;
00457   }
00458   image_encoding_ = visual_img_msg->encoding;
00459   depthToCV8UC1(depth_float_img, depth_mono8_img_); //float can't be visualized or used as mask in float format TODO: reprogram keypoint detector to use float values with nan to mask
00460 
00461   if(asyncFrameDrop(depth_img_msg->header.stamp, visual_img_msg->header.stamp)) 
00462     return;
00463 
00464   if (bagfile_mutex.tryLock() && save_bag_file){
00465      // todo: make the names dynamic
00466      bag.write("/camera/rgb/points", ros::Time::now(), point_cloud);
00467      bag.write("/camera/rgb/image_mono", ros::Time::now(), visual_img_msg);
00468      bag.write("/camera/depth/image", ros::Time::now(), depth_img_msg);
00469      ROS_INFO_STREAM("Wrote to bagfile " << bag.getFileName());
00470      bagfile_mutex.unlock();
00471   }
00472 
00473   if(ParameterServer::instance()->get<bool>("use_gui")){
00474     Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0)); //visual_idx=0
00475     Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));//overwrites last cvMat2QImage
00476   }
00477 
00478   if(pause_ && !getOneFrame_) { return; }//Visualization and nothing else
00479 
00480   pointcloud_type::Ptr pc_col(new pointcloud_type());//will belong to node
00481   pcl::fromROSMsg(*point_cloud,*pc_col);
00482   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00483   cameraCallback(visual_img, pc_col, depth_mono8_img_);
00484 }
00485 
00486 
00487 
00488 
00489 void OpenNIListener::cameraCallback(cv::Mat visual_img, 
00490                                     pointcloud_type::Ptr point_cloud, 
00491                                     cv::Mat depth_mono8_img)
00492 {
00493   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00494   ROS_WARN_COND(point_cloud ==NULL, "Nullpointer for pointcloud");
00495   if(getOneFrame_) { getOneFrame_ = false; }//if getOneFrame_ is set, unset it and skip check for  pause
00496   else if(pause_) { return; }//Visualization and nothing else
00497 
00498   cv::Mat gray_img; 
00499   if(visual_img.type() == CV_8UC3){ cvtColor(visual_img, gray_img, CV_RGB2GRAY); } 
00500   else { gray_img = visual_img; }
00501 
00502   //######### Main Work: create new node ##############################################################
00503   Q_EMIT setGUIStatus("Computing Keypoints and Features");
00504   Node* node_ptr = new Node(gray_img, detector_, extractor_, point_cloud, depth_mono8_img);
00505 
00506   retrieveTransformations(point_cloud->header, node_ptr);
00507   callProcessing(visual_img, node_ptr);
00508   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00509 }
00510 
00511 
00512 
00513 
00514 
00515 
00516 void OpenNIListener::noCloudCameraCallback(cv::Mat visual_img, 
00517                                            cv::Mat depth, 
00518                                            cv::Mat depth_mono8_img,
00519                                            std_msgs::Header depth_header,
00520                                            const sensor_msgs::CameraInfoConstPtr& cam_info)
00521 {
00522   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00523   if(getOneFrame_) { //if getOneFrame_ is set, unset it and skip check for  pause
00524       getOneFrame_ = false;
00525   } else if(pause_ && !save_bag_file) { //Visualization and nothing else
00526     return; 
00527   }
00528   cv::Mat gray_img; 
00529   if(visual_img.type() == CV_8UC3){
00530     cvtColor(visual_img, gray_img, CV_RGB2GRAY);
00531   } else {
00532     gray_img = visual_img;
00533   }
00534 
00535   //######### Main Work: create new node ##############################################################
00536   Q_EMIT setGUIStatus("Computing Keypoints and Features");
00537   Node* node_ptr = new Node(gray_img, depth, depth_mono8_img, cam_info, depth_header, detector_, extractor_);
00538 
00539   retrieveTransformations(depth_header, node_ptr);//Retrieve the transform between the lens and the base-link at capturing time;
00540 
00541   callProcessing(visual_img, node_ptr);
00542   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00543 }
00544 
00545 //Call function either regularly or as background thread
00546 void OpenNIListener::callProcessing(cv::Mat visual_img, Node* node_ptr)
00547 {
00548   std::clock_t parallel_wait_time=std::clock();
00549   if(!future_.isFinished()){
00550     graph_mgr_->finishUp();
00551     future_.waitForFinished(); //Wait if GraphManager ist still computing. 
00552     ROS_INFO_STREAM_NAMED("timings", "waiting time: "<< ( std::clock() - parallel_wait_time ) / (double)CLOCKS_PER_SEC  <<"sec"); 
00553   }
00554 
00555   //update for visualization of the feature flow
00556   visualization_img_ = visual_img; //No copy
00557   visualization_depth_mono8_img_ = depth_mono8_img_;//No copy
00558   //With this define, processNode runs in the background and after finishing visualizes the results
00559   //Thus another Callback can be started in the meantime, to create a new node in parallel
00560   if(ParameterServer::instance()->get<bool>("concurrent_node_construction")) {
00561     ROS_DEBUG("Processing Node in parallel to the construction of the next node");
00562     if(ParameterServer::instance()->get<bool>("use_gui")){
00563       //visual_img points to the data received in the callback - which might be deallocated after the callback returns. 
00564       //This will happen before visualization if processNode is running in background, therefore the data needs to be cloned
00565       visualization_img_ = visual_img.clone();
00566       visualization_depth_mono8_img_ = depth_mono8_img_.clone();
00567     }
00568     future_ = QtConcurrent::run(this, &OpenNIListener::processNode, node_ptr); 
00569   }
00570   else { //Non-concurrent
00571     processNode(node_ptr);//regular function call
00572   }
00573 }
00574 
00575 void OpenNIListener::processNode(Node* new_node)
00576 {
00577   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00578   Q_EMIT setGUIStatus("Adding Node to Graph");
00579   bool has_been_added = graph_mgr_->addNode(new_node);
00580 
00581   //######### Visualization code  #############################################
00582   if(ParameterServer::instance()->get<bool>("use_gui")){
00583     if(has_been_added){
00584       if(ParameterServer::instance()->get<bool>("visualize_mono_depth_overlay")){
00585         cv::Mat feature_img = cv::Mat( visualization_img_.rows, visualization_img_.cols, CV_8UC1); 
00586         graph_mgr_->drawFeatureFlow(feature_img);
00587         Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_,visualization_depth_mono8_img_, feature_img, 2)); //show registration
00588       } else {
00589         graph_mgr_->drawFeatureFlow(visualization_img_, cv::Scalar(255,0,0), cv::Scalar(0,128,0) );
00590         Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_, 2)); //show registration
00591       }
00592     } else {
00593       if(ParameterServer::instance()->get<bool>("visualize_mono_depth_overlay")){
00594         cv::Mat feature_img = cv::Mat( visualization_img_.rows, visualization_img_.cols, CV_8UC1); 
00595         cv::drawKeypoints(feature_img, new_node->feature_locations_2d_, feature_img, cv::Scalar(155), 5);
00596         Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_,visualization_depth_mono8_img_, feature_img, 2)); //show registration
00597       } else {
00598         cv::drawKeypoints(visualization_img_, new_node->feature_locations_2d_, visualization_img_, cv::Scalar(155, 0,0), 5);
00599         Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_, 2)); //show registration
00600       }
00601     }
00602   }
00603   if(!has_been_added) delete new_node;
00604   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00605 }
00606 
00607 
00608 void OpenNIListener::toggleBagRecording(){
00609   bagfile_mutex.lock();
00610   save_bag_file = !save_bag_file;
00611   // save bag
00612   if (save_bag_file)
00613   {
00614     time_t rawtime; 
00615     struct tm * timeinfo;
00616     char buffer [80];
00617 
00618     time ( &rawtime );
00619     timeinfo = localtime ( &rawtime );
00620     // create a nice name
00621     strftime (buffer,80,"kinect_%Y-%m-%d-%H-%M-%S.bag",timeinfo);
00622 
00623     bag.open(buffer, rosbag::bagmode::Write);
00624     ROS_INFO_STREAM("Opened bagfile " << bag.getFileName());
00625   } else {
00626     ROS_INFO_STREAM("Closing bagfile " << bag.getFileName());
00627     bag.close();
00628   }
00629   bagfile_mutex.unlock();
00630 }
00631 
00632 void OpenNIListener::togglePause(){
00633   pause_ = !pause_;
00634   ROS_INFO("Pause toggled to: %s", pause_? "true":"false");
00635   if(pause_) Q_EMIT setGUIStatus("Processing Thread Stopped");
00636   else Q_EMIT setGUIStatus("Processing Thread Running");
00637 }
00638 void OpenNIListener::getOneFrame(){
00639   getOneFrame_=true;
00640 }
00642 QImage OpenNIListener::cvMat2QImage(const cv::Mat& channel1, const cv::Mat& channel2, const cv::Mat& channel3, unsigned int idx){
00643   if(rgba_buffers_.size() <= idx){
00644       rgba_buffers_.resize(idx+1);
00645   }
00646   if(channel2.rows != channel1.rows || channel2.cols != channel1.cols ||
00647      channel3.rows != channel1.rows || channel3.cols != channel1.cols){
00648      ROS_ERROR("Image channels to be combined differ in size");
00649   }
00650   if(channel1.rows != rgba_buffers_[idx].rows || channel1.cols != rgba_buffers_[idx].cols){
00651     ROS_DEBUG("Created new rgba_buffer with index %i", idx);
00652     rgba_buffers_[idx] = cv::Mat( channel1.rows, channel1.cols, CV_8UC4); 
00653     //printMatrixInfo(rgba_buffers_[idx]);
00654   }
00655   cv::Mat mono_channel1 = channel1; 
00656   if(channel1.type() != CV_8UC1) {
00657     mono_channel1 = cv::Mat( channel1.rows, channel1.cols, CV_8UC1); 
00658     cv::cvtColor(channel1, mono_channel1, CV_RGB2GRAY);
00659   }
00660   std::vector<cv::Mat> input;
00661   cv::Mat alpha( channel1.rows, channel1.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance
00662   input.push_back(mono_channel1);
00663   input.push_back(channel2);
00664   input.push_back(channel3);
00665   input.push_back(alpha);
00666   cv::merge(input, rgba_buffers_[idx]);
00667   //printMatrixInfo(rgba_buffers_[idx]);
00668   return QImage((unsigned char *)(rgba_buffers_[idx].data), 
00669                 rgba_buffers_[idx].cols, rgba_buffers_[idx].rows, 
00670                 rgba_buffers_[idx].step, QImage::Format_RGB32 );
00671 }
00672 
00674 QImage OpenNIListener::cvMat2QImage(const cv::Mat& image, unsigned int idx){
00675   ROS_DEBUG_STREAM("Converting Matrix of type " << openCVCode2String(image.type()) << " to RGBA");
00676   if(rgba_buffers_.size() <= idx){
00677       rgba_buffers_.resize(idx+1);
00678   }
00679   ROS_DEBUG_STREAM("Target Matrix is of type " << openCVCode2String(rgba_buffers_[idx].type()));
00680   if(image.rows != rgba_buffers_[idx].rows || image.cols != rgba_buffers_[idx].cols){
00681     ROS_DEBUG("Created new rgba_buffer with index %i", idx);
00682     rgba_buffers_[idx] = cv::Mat( image.rows, image.cols, CV_8UC4); 
00683     //printMatrixInfo(rgba_buffers_[idx], "for QImage Buffering");
00684   }
00685   char red_idx = 0, green_idx = 1, blue_idx = 2;
00686   if(image_encoding_.compare("rgb8") == 0) { red_idx = 2; blue_idx = 0; }
00687   cv::Mat alpha( image.rows, image.cols, CV_8UC1, cv::Scalar(255)); //TODO this could be buffered for performance
00688   cv::Mat in[] = { image, alpha };
00689   // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
00690   // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
00691 
00692   if(image.type() == CV_8UC1){
00693     int from_to[] = { 0,0,  0,1,  0,2,  1,3 };
00694     mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 );
00695   } else {
00696     int from_to[] = { red_idx,0,  green_idx,1,  blue_idx,2,  3,3 }; //BGR+A -> RGBA
00697     mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 );
00698   }
00699   //printMatrixInfo(rgba_buffers_[idx], "for QImage Buffering");
00700   return QImage((unsigned char *)(rgba_buffers_[idx].data), 
00701                 rgba_buffers_[idx].cols, rgba_buffers_[idx].rows, 
00702                 rgba_buffers_[idx].step, QImage::Format_RGB32 );
00703 }
00704 
00705 
00706 //Retrieve the transform between the lens and the base-link at capturing time
00707 void OpenNIListener::retrieveTransformations(std_msgs::Header depth_header, Node* node_ptr)
00708 {
00709   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00710   ParameterServer* ps = ParameterServer::instance();
00711   std::string base_frame  = ps->get<std::string>("base_frame_name");
00712   std::string odom_frame  = ps->get<std::string>("odom_frame_name");
00713   std::string gt_frame    = ps->get<std::string>("ground_truth_frame_name");
00714   std::string depth_frame_id = depth_header.frame_id;
00715   ros::Time depth_time = depth_header.stamp;
00716   tf::StampedTransform base2points;
00717 
00718   try{
00719     tflistener_->waitForTransform(base_frame, depth_frame_id, depth_time, ros::Duration(0.1));
00720     tflistener_->lookupTransform(base_frame, depth_frame_id, depth_time, base2points);
00721     base2points.stamp_ = depth_time;
00722   }
00723   catch (tf::TransformException ex){
00724     ROS_WARN("%s",ex.what());
00725     ROS_WARN("Using Standard kinect /camera_link -> /openni_rgb_optical_frame as transformation");
00726     //emulate the transformation from kinect camera_link frame to camera_rgb_optical_frame
00727     base2points.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57));
00728     base2points.setOrigin(tf::Point(0,-0.04,0));
00729     base2points.stamp_ = depth_time;
00730   }
00731   node_ptr->setBase2PointsTransform(base2points);
00732 
00733   if(!gt_frame.empty()){ 
00734     //Retrieve the ground truth data. For the first frame it will be
00735     //set as origin. the rest will be used to compare
00736     tf::StampedTransform ground_truth_transform;
00737     try{
00738       tflistener_->waitForTransform(gt_frame, "/openni_camera", depth_time, ros::Duration(0.1));//Tailored to the ground truth of the RGB-D Benchmark
00739       tflistener_->lookupTransform(gt_frame, "/openni_camera", depth_time, ground_truth_transform);//Tailored to the ground truth of the RGB-D Benchmark
00740       ground_truth_transform.stamp_ = depth_time;
00741       tf::StampedTransform b2p;
00742       //HACK to comply with Jürgen Sturm's Ground truth, though I can't manage here to get the full transform from tf
00743       b2p.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57));
00744       b2p.setOrigin(tf::Point(0,-0.04,0));
00745       ground_truth_transform *= b2p;
00746     }
00747     catch (tf::TransformException ex){
00748       ROS_WARN("%s - Using Identity for Ground Truth",ex.what());
00749       ground_truth_transform = tf::StampedTransform(tf::Transform::getIdentity(), depth_time, "missing_ground_truth", "/openni_camera");//Tailored to the ground truth of the RGB-D Benchmark
00750     }
00751     printTransform("Ground Truth", ground_truth_transform);
00752     node_ptr->setGroundTruthTransform(ground_truth_transform);
00753   }
00754   if(!odom_frame.empty()){ 
00755     //Retrieve the ground truth data. For the first frame it will be
00756     //set as origin. the rest will be used to compare
00757     tf::StampedTransform current_odom_transform;
00758     try{
00759       tflistener_->waitForTransform(depth_frame_id, odom_frame, depth_time, ros::Duration(0.3));
00760       tflistener_->lookupTransform( depth_frame_id, odom_frame, depth_time, current_odom_transform);
00761     }
00762     catch (tf::TransformException ex){
00763       ROS_WARN("%s - No odometry available",ex.what());
00764       current_odom_transform = tf::StampedTransform(tf::Transform::getIdentity(), depth_time, "missing_odometry", depth_frame_id);
00765       current_odom_transform.stamp_ = depth_time;
00766     }
00767     printTransform("Odometry", current_odom_transform);
00768     node_ptr->setOdomTransform(current_odom_transform);
00769   }
00770   // End: Fill in Transformation -----------------------------------------------------------------------
00771   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00772 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Friends Defines


rgbdslam
Author(s): Felix Endres, Juergen Hess, Nikolas Engelhard
autogenerated on Wed Dec 26 2012 15:53:08