00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018 #ifdef HEMACLOUDS
00019 #define PCL_NO_PRECOMPILE
00020 #endif
00021 #include "parameter_server.h"
00022
00023 #include "pcl/ros/conversions.h"
00024 #include <pcl/common/distances.h>
00025 #include <pcl/io/io.h>
00026 #include <pcl/io/impl/pcd_io.hpp>
00027
00028 #include "pcl_ros/transforms.h"
00029 #include "openni_listener.h"
00030 #include <cv_bridge/cv_bridge.h>
00031 #include <opencv2/imgproc/imgproc.hpp>
00032 #include <iostream>
00033 #include <sstream>
00034 #include <algorithm>
00035 #include <string>
00036 #include <cv.h>
00037
00038 #include <sensor_msgs/PointCloud2.h>
00039 #include <Eigen/Core>
00040 #include "node.h"
00041 #include "misc.h"
00042 #include "features.h"
00043
00044
00045
00046
00047 #include <rosbag/view.h>
00048 #include <boost/foreach.hpp>
00049
00050
00051 #include "scoped_timer.h"
00052
00053 #include <tf/transform_listener.h>
00054
00055 typedef message_filters::Subscriber<sensor_msgs::Image> image_sub_type;
00056 typedef message_filters::Subscriber<sensor_msgs::CameraInfo> cinfo_sub_type;
00057 typedef message_filters::Subscriber<sensor_msgs::PointCloud2> pc_sub_type;
00058 typedef message_filters::Subscriber<nav_msgs::Odometry> odom_sub_type;
00059
00060
00064 void addTFMessageDirectlyToTransformer(tf::tfMessageConstPtr msg, tf::Transformer* transformer)
00065 {
00066 for (size_t i = 0; i < msg->transforms.size(); i++)
00067 {
00068 if(msg->transforms[i].header.frame_id == "/world"
00069 || msg->transforms[i].header.frame_id == "/kinect")
00070 {
00071 continue;
00072 }
00073
00074 try
00075 {
00076 tf::StampedTransform stTrans;
00077 tf::transformStampedMsgToTF(msg->transforms[i], stTrans);
00078
00079 transformer->setTransform(stTrans);
00080 ROS_DEBUG("Set transform from %s to %s with timestamp %20.10f",
00081 msg->transforms[i].header.frame_id.c_str(),
00082 msg->transforms[i].child_frame_id.c_str(),
00083 stTrans.stamp_.toSec());
00084 }
00085 catch (tf::TransformException& ex)
00086 {
00087 ROS_ERROR("Failure to set recieved transform from %s to %s with error: %s\n", msg->transforms[i].child_frame_id.c_str(), msg->transforms[i].header.frame_id.c_str(), ex.what());
00088 }
00089 }
00090 }
00091
00092
00093 void OpenNIListener::visualize_images(cv::Mat visual_image, cv::Mat depth_image){
00094 if(ParameterServer::instance()->get<bool>("visualize_mono_depth_overlay")){
00095 cv::Mat visual_edges = cv::Mat( visual_image.rows, visual_image.cols, CV_8UC1);
00096 cv::Mat depth_edges = cv::Mat( depth_image.rows, depth_image.cols, CV_8UC1);
00097 overlay_edges(visual_image, depth_image, visual_edges, depth_edges);
00098 Q_EMIT newDepthImage(cvMat2QImage(depth_image,depth_image, depth_image+visual_edges, 1));
00099 cv::Mat monochrome;
00100 if(visual_image.type() != CV_8UC1) {
00101 monochrome = cv::Mat( visual_image.rows, visual_image.cols, CV_8UC1);
00102 cv::cvtColor(visual_image, monochrome, CV_RGB2GRAY);
00103 } else {
00104 monochrome = visual_image;
00105 }
00106 Q_EMIT newVisualImage(cvMat2QImage(monochrome, monochrome + depth_edges, monochrome, 0));
00107 } else {
00108 Q_EMIT newVisualImage(cvMat2QImage(visual_image, 0));
00109 Q_EMIT newDepthImage(cvMat2QImage(depth_image, 1));
00110 }
00111 }
00112
00113
00114 OpenNIListener::OpenNIListener(GraphManager* graph_mgr)
00115 : graph_mgr_(graph_mgr),
00116 stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL),
00117 visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL),
00118 depth_mono8_img_(cv::Mat()),
00119 pause_(ParameterServer::instance()->get<bool>("start_paused")),
00120 getOneFrame_(false),
00121 first_frame_(true),
00122 data_id_(0),
00123 num_processed_(0),
00124 image_encoding_("rgb8")
00125 {
00126 ParameterServer* ps = ParameterServer::instance();
00127 if(ps->get<bool>("encoding_bgr")){
00128 image_encoding_ = "bgr8";
00129 }
00130 detector_ = createDetector(ps->get<std::string>("feature_detector_type"));
00131 extractor_ = createDescriptorExtractor(ps->get<std::string>("feature_extractor_type"));
00132 setupSubscribers();
00133 }
00134
00135
00136
00137
00138 void OpenNIListener::setupSubscribers(){
00139 ParameterServer* ps = ParameterServer::instance();
00140 int q = ps->get<int>("subscriber_queue_size");
00141 ros::NodeHandle nh;
00142 tflistener_ = new tf::TransformListener(nh);
00143 std::string bagfile_name = ps->get<std::string>("bagfile_name");
00144 if(bagfile_name.empty()){
00145 std::string visua_tpc = ps->get<std::string>("topic_image_mono");
00146 std::string depth_tpc = ps->get<std::string>("topic_image_depth");
00147 std::string cinfo_tpc = ps->get<std::string>("camera_info_topic");
00148 std::string cloud_tpc = ps->get<std::string>("topic_points");
00149 std::string widev_tpc = ps->get<std::string>("wide_topic");
00150 std::string widec_tpc = ps->get<std::string>("wide_cloud_topic");
00151
00152
00153 if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty())
00154 {
00155 visua_sub_ = new image_sub_type(nh, visua_tpc, q);
00156 depth_sub_ = new image_sub_type (nh, depth_tpc, q);
00157 cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q);
00158 kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *visua_sub_, *depth_sub_, *cloud_sub_),
00159 kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3));
00160 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc);
00161 }
00162
00163 else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty())
00164 {
00165 visua_sub_ = new image_sub_type(nh, visua_tpc, q);
00166 depth_sub_ = new image_sub_type(nh, depth_tpc, q);
00167 cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q);
00168 no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *visua_sub_, *depth_sub_, *cinfo_sub_);
00169 no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
00170 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << " and " << depth_tpc);
00171 }
00172
00173
00174 if(!widec_tpc.empty() && !widev_tpc.empty())
00175 {
00176 visua_sub_ = new image_sub_type(nh, widev_tpc, q);
00177 cloud_sub_ = new pc_sub_type(nh, widec_tpc, q);
00178 stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_);
00179 stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2));
00180 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << widev_tpc << " and " << widec_tpc );
00181 }
00182 if(ps->get<bool>("use_robot_odom")){
00183 odom_sub_= new odom_sub_type(nh, ps->get<std::string>("odometry_tpc"), 3);
00184 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to odometry on " << ps->get<std::string>("odometry_tpc"));
00185 odom_sub_->registerCallback(boost::bind(&OpenNIListener::odomCallback,this,_1));
00186 }
00187 }
00188 else {
00189 ROS_WARN("RGBDSLAM loads a bagfile - therefore doesn't subscribe to topics.");
00190 }
00191 }
00192
00193 static std::vector<std::string> createTopicsVector(const std::string& visua_tpc,
00194 const std::string& depth_tpc,
00195 const std::string& points_tpc,
00196 const std::string& cinfo_tpc,
00197 const std::string& odom_tpc,
00198 const std::string& tf_tpc)
00199 {
00200
00201 std::vector<std::string> topics;
00202 topics.push_back(visua_tpc);
00203 topics.push_back(depth_tpc);
00204 if(points_tpc.empty()){
00205 topics.push_back(cinfo_tpc);
00206 } else {
00207 topics.push_back(points_tpc);
00208 }
00209 topics.push_back(tf_tpc);
00210 if(ParameterServer::instance()->get<bool>("use_robot_odom")){
00211 ROS_INFO_STREAM_NAMED("OpenNIListener", "Using odometry on topic " << odom_tpc);
00212 topics.push_back(odom_tpc);
00213 }
00214 return topics;
00215 }
00216
00217 void OpenNIListener::processBagfile(std::string filename,
00218 const std::string& visua_tpc,
00219 const std::string& depth_tpc,
00220 const std::string& points_tpc,
00221 const std::string& cinfo_tpc,
00222 const std::string& odom_tpc,
00223 const std::string& tf_tpc)
00224 {
00225 ROS_INFO_NAMED("OpenNIListener", "Loading Bagfile %s", filename.c_str());
00226 Q_EMIT iamBusy(4, "Loading Bagfile", 0);
00227 {
00228 rosbag::Bag input_bag;
00229 try{
00230 input_bag.open(filename, rosbag::bagmode::Read);
00231 } catch(rosbag::BagIOException ex) {
00232 ROS_FATAL("Opening Bagfile %s failed: %s Quitting!", filename.c_str(), ex.what());
00233 ros::shutdown();
00234 return;
00235 }
00236 ROS_INFO_NAMED("OpenNIListener", "Opened Bagfile %s", filename.c_str());
00237
00238 std::vector<std::string> topics;
00239 topics = createTopicsVector(visua_tpc, depth_tpc, points_tpc, cinfo_tpc, odom_tpc, tf_tpc);
00240 rosbag::View view(input_bag, rosbag::TopicQuery(topics));
00241 Q_EMIT iamBusy(4, "Processing Bagfile", view.size());
00242
00243 std::deque<sensor_msgs::Image::ConstPtr> vis_images;
00244 std::deque<sensor_msgs::Image::ConstPtr> dep_images;
00245 std::deque<sensor_msgs::CameraInfo::ConstPtr> cam_infos;
00246 std::deque<sensor_msgs::PointCloud2::ConstPtr> pointclouds;
00247 std::deque<nav_msgs::OdometryConstPtr> odometries;
00248
00249 ros::Time last_tf=ros::TIME_MIN;
00250
00251
00252
00253 bool tf_available = false;
00254 int counter = 0;
00255 BOOST_FOREACH(rosbag::MessageInstance const m, view)
00256 {
00257 Q_EMIT progress(4, "Processing Bagfile", counter++);
00258 do{
00259 usleep(10);
00260 if(!ros::ok()) return;
00261 } while(pause_);
00262 ROS_INFO_NAMED("OpenNIListener", "Processing %s of type %s with timestamp %f", m.getTopic().c_str(), m.getDataType().c_str(), m.getTime().toSec());
00263
00264 if (m.getTopic() == odom_tpc || ("/" + m.getTopic() == odom_tpc)) {
00265 ROS_INFO_NAMED("OpenNIListener", "Processing %s of type %s with timestamp %f", m.getTopic().c_str(), m.getDataType().c_str(), m.getTime().toSec());
00266 nav_msgs::OdometryConstPtr odommsg = m.instantiate<nav_msgs::Odometry>();
00267 if (odommsg) odometries.push_back(odommsg);
00268 }
00269 else if (m.getTopic() == visua_tpc || ("/" + m.getTopic() == visua_tpc))
00270 {
00271 sensor_msgs::Image::ConstPtr rgb_img = m.instantiate<sensor_msgs::Image>();
00272 if (rgb_img) vis_images.push_back(rgb_img);
00273 ROS_DEBUG("Found Message of %s", visua_tpc.c_str());
00274 }
00275 else if (m.getTopic() == depth_tpc || ("/" + m.getTopic() == depth_tpc))
00276 {
00277 sensor_msgs::Image::ConstPtr depth_img = m.instantiate<sensor_msgs::Image>();
00278
00279 if (depth_img) dep_images.push_back(depth_img);
00280 ROS_DEBUG("Found Message of %s", depth_tpc.c_str());
00281 }
00282 else if (m.getTopic() == points_tpc || ("/" + m.getTopic() == points_tpc))
00283 {
00284 sensor_msgs::PointCloud2::ConstPtr pointcloud = m.instantiate<sensor_msgs::PointCloud2>();
00285
00286 if (pointcloud) pointclouds.push_back(pointcloud);
00287 ROS_DEBUG("Found Message of %s", points_tpc.c_str());
00288 }
00289 else if (m.getTopic() == cinfo_tpc || ("/" + m.getTopic() == cinfo_tpc))
00290 {
00291 sensor_msgs::CameraInfo::ConstPtr cam_info = m.instantiate<sensor_msgs::CameraInfo>();
00292
00293 if (cam_info) cam_infos.push_back(cam_info);
00294 ROS_DEBUG("Found Message of %s", cinfo_tpc.c_str());
00295 }
00296 else if (m.getTopic() == tf_tpc|| ("/" + m.getTopic() == tf_tpc)){
00297 tf::tfMessage::ConstPtr tf_msg = m.instantiate<tf::tfMessage>();
00298 if (tf_msg) {
00299 tf_available = true;
00300 addTFMessageDirectlyToTransformer(tf_msg, tflistener_);
00301 last_tf = tf_msg->transforms[0].header.stamp;
00302 last_tf -= ros::Duration(0.1);
00303 }
00304 }
00305 if (last_tf == ros::TIME_MIN){
00306 last_tf = m.getTime();
00307 last_tf -= ros::Duration(0.1);
00308 }
00309
00310 while(!odometries.empty() && odometries.front()->header.stamp < last_tf){
00311 ROS_INFO_NAMED("OpenNIListener", "Sending Odometry message");
00312 odomCallback(odometries.front());
00313 odometries.pop_front();
00314 }
00315 while(!vis_images.empty() && vis_images.front()->header.stamp < last_tf){
00316 ROS_INFO_NAMED("OpenNIListener", "Forwarding buffered visual message from time %12f", vis_images.front()->header.stamp.toSec());
00317 rgb_img_sub_->newMessage(vis_images.front());
00318 vis_images.pop_front();
00319 }
00320 while(!dep_images.empty() && dep_images.front()->header.stamp < last_tf){
00321 ROS_INFO_NAMED("OpenNIListener", "Forwarding buffered depth message from time %12f", dep_images.front()->header.stamp.toSec());
00322 depth_img_sub_->newMessage(dep_images.front());
00323 dep_images.pop_front();
00324 }
00325 while(!cam_infos.empty() && cam_infos.front()->header.stamp < last_tf){
00326 ROS_INFO_NAMED("OpenNIListener", "Forwarding buffered cam info message from time %12f", cam_infos.front()->header.stamp.toSec());
00327 cam_info_sub_->newMessage(cam_infos.front());
00328 cam_infos.pop_front();
00329 }
00330 while(!pointclouds.empty() && pointclouds.front()->header.stamp < last_tf){
00331 pc_sub_->newMessage(pointclouds.front());
00332 pointclouds.pop_front();
00333 }
00334
00335 }
00336 ROS_WARN_NAMED("eval", "Finished processing of Bagfile");
00337 input_bag.close();
00338 }
00339 }
00340
00341 void OpenNIListener::loadBagFakeSubscriberSetup(const std::string& visua_tpc,
00342 const std::string& depth_tpc,
00343 const std::string& points_tpc,
00344 const std::string& cinfo_tpc,
00345 const std::string& odom_tpc,
00346 const std::string& tf_tpc)
00347 {
00348
00349 int q = ParameterServer::instance()->get<int>("subscriber_queue_size");
00350
00351 ros::NodeHandle nh;
00352 tf_pub_ = nh.advertise<tf::tfMessage>(tf_tpc, 10);
00353
00354 if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && points_tpc.empty())
00355 {
00356
00357 depth_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00358 rgb_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00359 cam_info_sub_ = new BagSubscriber<sensor_msgs::CameraInfo>();
00360 no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *rgb_img_sub_, *depth_img_sub_, *cam_info_sub_);
00361 no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
00362 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << ", " << depth_tpc << " and " << cinfo_tpc);
00363 }
00364 else if(!visua_tpc.empty() && !depth_tpc.empty() && !points_tpc.empty())
00365 {
00366
00367 depth_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00368 rgb_img_sub_ = new BagSubscriber<sensor_msgs::Image>();
00369 pc_sub_ = new BagSubscriber<sensor_msgs::PointCloud2>();
00370 kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *rgb_img_sub_, *depth_img_sub_, *pc_sub_);
00371 kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3));
00372 ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << ", " << depth_tpc << " and " << points_tpc);
00373 }
00374 else {
00375 ROS_ERROR("Missing required information: Topic names.");
00376 ROS_ERROR_STREAM("Visual: " << visua_tpc);
00377 ROS_ERROR_STREAM("Camera Info (Optional if Points given): " << cinfo_tpc);
00378 ROS_ERROR_STREAM("Depth: " << depth_tpc);
00379 ROS_ERROR_STREAM("Points (Optional if Cam Info given): " << points_tpc);
00380 }
00381 }
00382
00383
00385
00387 void OpenNIListener::loadBag(std::string filename)
00388 {
00389 ScopedTimer s(__FUNCTION__);
00390
00391 ParameterServer* ps = ParameterServer::instance();
00392 bool eval_landmarks = ps->get<bool>("optimize_landmarks");
00393 ps->set<bool>("optimize_landmarks", false);
00394
00395 std::string visua_tpc = ps->get<std::string>("topic_image_mono");
00396 std::string depth_tpc = ps->get<std::string>("topic_image_depth");
00397 std::string points_tpc = ps->get<std::string>("topic_points");
00398 std::string cinfo_tpc = ps->get<std::string>("camera_info_topic");
00399 std::string odom_tpc = ps->get<std::string>("odometry_tpc");
00400 std::string tf_tpc = std::string("/tf");
00401
00402 loadBagFakeSubscriberSetup(visua_tpc, depth_tpc, points_tpc, cinfo_tpc, odom_tpc, tf_tpc);
00403 processBagfile(filename, visua_tpc, depth_tpc, points_tpc, cinfo_tpc, odom_tpc, tf_tpc);
00404
00405 waitAndEvaluate(filename);
00406 }
00407
00408 void OpenNIListener::waitAndEvaluate(const std::string& filename)
00409 {
00410 Q_EMIT progress(4, "Processing Bagfile", 1e6);
00411 do{
00412 if(!future_.isFinished()){
00413 future_.waitForFinished();
00414 }
00415 usleep(1000000);
00416 if(!ros::ok()) return;
00417 ROS_WARN("Waiting for processing to finish.");
00418 } while(graph_mgr_->isBusy());
00419
00420 ParameterServer* ps = ParameterServer::instance();
00421 if(ps->get<bool>("batch_processing")){
00422 evaluation(filename);
00423 }
00424 if(!ps->get<bool>("use_gui")){
00425 Q_EMIT bagFinished();
00426 usleep(10);
00427 }
00428 }
00429
00430 void OpenNIListener::evaluation(std::string filename)
00431 {
00432 graph_mgr_->optimizeGraph(-1, true);
00433 graph_mgr_->setOptimizerVerbose(true);
00434 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(0));
00435 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 0);
00436
00437
00438 ParameterServer::instance()->set<std::string>("pose_relative_to", std::string("first"));
00439 graph_mgr_->optimizeGraph(-100, true);
00440 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(1));
00441 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 1);
00442
00443 if(graph_mgr_->pruneEdgesWithErrorAbove(5) > 0){
00444 graph_mgr_->optimizeGraph(-100, true);
00445 } else {
00446 graph_mgr_->optimizeGraph(1, true);
00447 }
00448 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(2));
00449 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 2);
00450
00451 if(graph_mgr_->pruneEdgesWithErrorAbove(1) > 0){
00452 graph_mgr_->optimizeGraph(-100, true);
00453 } else {
00454 graph_mgr_->optimizeGraph(1, true);
00455 }
00456 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(3));
00457 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 3);
00458
00459 if(graph_mgr_->pruneEdgesWithErrorAbove(0.25) > 0){
00460 graph_mgr_->optimizeGraph(-100, true);
00461 } else {
00462 graph_mgr_->optimizeGraph(1, true);
00463 }
00464 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(4));
00465 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 4);
00466
00467 if(ParameterServer::instance()->get<bool>("optimize_landmarks")){
00468 ParameterServer::instance()->set<bool>("optimize_landmarks", true);
00469 graph_mgr_->optimizeGraph(-100, true);
00470 graph_mgr_->saveTrajectory(QString(filename.c_str()) + "iteration_" + QString::number(3));
00471 ROS_WARN_NAMED("eval", "Finished with optimization iteration %i.", 3);
00472 }
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512 if(!ParameterServer::instance()->get<bool>("use_gui")){
00513 Q_EMIT bagFinished();
00514 usleep(10);
00515 }
00516 std::cerr << "Evaluation Done\n";
00517 }
00518
00519 static void calculateDepthMask(cv::Mat_<uchar>& depth_img, const pointcloud_type::Ptr point_cloud)
00520 {
00521
00522 float value;
00523 for(unsigned int y = 0; y < depth_img.rows; y++){
00524 for(unsigned int x = 0; x < depth_img.cols; x++){
00525 value = point_cloud->at(x,y).z;
00526 if(value != value){
00527 depth_img.at<uchar>(y,x) = 0;
00528 }else{
00529 depth_img.at<uchar>(y,x) = static_cast<uchar>(value*50.0);
00530 }
00531 }
00532 }
00533 }
00534
00535 void OpenNIListener::pcdCallback(const sensor_msgs::ImageConstPtr visual_img_msg,
00536
00537 pointcloud_type::Ptr pcl_cloud)
00538 {
00539 ScopedTimer s(__FUNCTION__);
00540 ROS_INFO_NAMED("OpenNIListener", "Received data from pcd file reader");
00541 ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00542
00543
00544
00545
00546 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00547 cv::Mat_<uchar> depth_img(visual_img_msg->height, visual_img_msg->width);
00548 calculateDepthMask(depth_img, pcl_cloud);
00549 depth_mono8_img_ = depth_img;
00550
00551 if(ParameterServer::instance()->get<bool>("use_gui")){
00552 Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0));
00553 Q_EMIT newDepthImage (cvMat2QImage(depth_img,1));
00554 }
00555 cameraCallback(visual_img, pcl_cloud, depth_img);
00556 }
00557
00558 void OpenNIListener::stereoCallback(const sensor_msgs::ImageConstPtr& visual_img_msg,
00559 const sensor_msgs::PointCloud2ConstPtr& point_cloud)
00560 {
00561 ScopedTimer s(__FUNCTION__);
00562 ROS_INFO_NAMED("OpenNIListener", "Received data from stereo cam");
00563 ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00564 ParameterServer* ps = ParameterServer::instance();
00565 if(++data_id_ < ps->get<int>("skip_first_n_frames")
00566 || data_id_ % ps->get<int>("data_skip_step") != 0){
00567 ROS_INFO_THROTTLE_NAMED(1, "OpenNIListener", "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_);
00568 if(ps->get<bool>("use_gui")){
00569 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00570 Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0));
00571 }
00572 return;
00573 }
00574
00575
00576 pointcloud_type::Ptr pc_col(new pointcloud_type());
00577 pcl::fromROSMsg(*point_cloud,*pc_col);
00578 cv::Mat_<uchar> depth_img(visual_img_msg->height, visual_img_msg->width);
00579 calculateDepthMask(depth_img, pc_col);
00580
00581
00582
00583 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg, "mono8")->image;
00584 if(visual_img.rows != depth_img.rows ||
00585 visual_img.cols != depth_img.cols ||
00586 point_cloud->width != (uint32_t) visual_img.cols ||
00587 point_cloud->height != (uint32_t) visual_img.rows){
00588 ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
00589 return;
00590 }
00591
00592 cameraCallback(visual_img, pc_col, depth_img);
00593 }
00594
00595 OpenNIListener::~OpenNIListener(){
00596 delete tflistener_;
00597 }
00598
00599 void OpenNIListener::noCloudCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,
00600 const sensor_msgs::ImageConstPtr& depth_img_msg,
00601 const sensor_msgs::CameraInfoConstPtr& cam_info_msg)
00602 {
00603 ScopedTimer s(__FUNCTION__);
00604 ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00605 ROS_DEBUG("Received data from kinect");
00606 ParameterServer* ps = ParameterServer::instance();
00607
00608 if(++data_id_ < ps->get<int>("skip_first_n_frames")
00609 || data_id_ % ps->get<int>("data_skip_step") != 0)
00610 {
00611
00612 ROS_INFO_THROTTLE_NAMED(1, "OpenNIListener", "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_);
00613 if(ps->get<bool>("use_gui")){
00614
00615 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625 }
00626 return;
00627 }
00628
00629
00630
00631
00632
00633
00634 cv::Mat depth_float_img = cv_bridge::toCvCopy(depth_img_msg)->image;
00635
00636 cv::Mat visual_img;
00637 if(image_encoding_ == "bayer_grbg8"){
00638 cv_bridge::toCvShare(visual_img_msg);
00639 ROS_INFO_NAMED("OpenNIListener", "Converting from Bayer to RGB");
00640 cv::cvtColor(cv_bridge::toCvCopy(visual_img_msg)->image, visual_img, CV_BayerGR2RGB, 3);
00641 } else{
00642 ROS_INFO_STREAM("Encoding: " << visual_img_msg->encoding);
00643 visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00644 }
00645
00646
00647
00648
00649
00650 if(visual_img.rows != depth_float_img.rows ||
00651 visual_img.cols != depth_float_img.cols){
00652 ROS_WARN("depth and visual image differ in size! Rescaling Depth Data");
00653 cv::resize(depth_float_img, depth_float_img, visual_img.size(), 0,0,cv::INTER_NEAREST);
00654
00655 }
00656 image_encoding_ = visual_img_msg->encoding;
00657
00658 depthToCV8UC1(depth_float_img, depth_mono8_img_);
00659
00660 if(asyncFrameDrop(depth_img_msg->header.stamp, visual_img_msg->header.stamp))
00661 return;
00662
00663
00664 if(pause_ && !getOneFrame_){
00665 if(ps->get<bool>("use_gui")){
00666 Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0));
00667 Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));
00668 }
00669 return;
00670 }
00671 noCloudCameraCallback(visual_img, depth_float_img, depth_mono8_img_, visual_img_msg->header, cam_info_msg);
00672 }
00673
00674
00675 void OpenNIListener::kinectCallback (const sensor_msgs::ImageConstPtr& visual_img_msg,
00676 const sensor_msgs::ImageConstPtr& depth_img_msg,
00677 const sensor_msgs::PointCloud2ConstPtr& point_cloud)
00678 {
00680 ScopedTimer s(__FUNCTION__);
00681 ROS_DEBUG("Received data from kinect");
00682 ROS_WARN_ONCE_NAMED("eval", "First RGBD-Data Received");
00683 ParameterServer* ps = ParameterServer::instance();
00684
00685 if(++data_id_ < ps->get<int>("skip_first_n_frames")
00686 || data_id_ % ps->get<int>("data_skip_step") != 0)
00687 {
00688
00689 ROS_INFO_THROTTLE_NAMED(1, "OpenNIListener", "Skipping Frame %i because of data_skip_step setting (this msg is only shown once a sec)", data_id_);
00690 if(ps->get<bool>("use_gui")){
00691
00692 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702 }
00703 return;
00704 }
00705
00706
00707
00708
00709
00710 cv::Mat depth_float_img = cv_bridge::toCvCopy(depth_img_msg)->image;
00711 cv::Mat visual_img = cv_bridge::toCvCopy(visual_img_msg)->image;
00712 if(visual_img.rows != depth_float_img.rows ||
00713 visual_img.cols != depth_float_img.cols ||
00714 point_cloud->width != (uint32_t) visual_img.cols ||
00715 point_cloud->height != (uint32_t) visual_img.rows){
00716 ROS_ERROR("PointCloud, depth and visual image differ in size! Ignoring Data");
00717 return;
00718 }
00719 image_encoding_ = visual_img_msg->encoding;
00720 depthToCV8UC1(depth_float_img, depth_mono8_img_);
00721
00722 if(asyncFrameDrop(depth_img_msg->header.stamp, visual_img_msg->header.stamp))
00723 return;
00724
00725
00726 if(pause_ && !getOneFrame_){
00727 if(ps->get<bool>("use_gui")){
00728 Q_EMIT newVisualImage(cvMat2QImage(visual_img, 0));
00729 Q_EMIT newDepthImage (cvMat2QImage(depth_mono8_img_,1));
00730 }
00731 return;
00732 }
00733
00734 pointcloud_type::Ptr pc_col(new pointcloud_type());
00735 pcl::fromROSMsg(*point_cloud,*pc_col);
00736 cameraCallback(visual_img, pc_col, depth_mono8_img_);
00737 }
00738
00739
00740
00741
00742 void OpenNIListener::cameraCallback(cv::Mat visual_img,
00743 pointcloud_type::Ptr point_cloud,
00744 cv::Mat depth_mono8_img)
00745 {
00746 ScopedTimer s(__FUNCTION__);
00747 ROS_WARN_COND(point_cloud ==NULL, "Nullpointer for pointcloud");
00748 if(getOneFrame_) { getOneFrame_ = false; }
00749 else if(pause_) { return; }
00750
00751
00752
00753 Node* node_ptr = new Node(visual_img, detector_, extractor_, point_cloud, depth_mono8_img);
00754
00755 retrieveTransformations(pcl_conversions::fromPCL(point_cloud->header), node_ptr);
00756 callProcessing(visual_img, node_ptr);
00757 }
00758
00759
00760
00761
00762
00763
00764 void OpenNIListener::noCloudCameraCallback(cv::Mat visual_img,
00765 cv::Mat depth,
00766 cv::Mat depth_mono8_img,
00767 std_msgs::Header depth_header,
00768 const sensor_msgs::CameraInfoConstPtr& cam_info)
00769 {
00770 if(getOneFrame_) {
00771 getOneFrame_ = false;
00772 } else if(pause_) {
00773 return;
00774 }
00775 ScopedTimer s(__FUNCTION__);
00776
00777
00778 Node* node_ptr = new Node(visual_img, depth, depth_mono8_img, cam_info, depth_header, detector_, extractor_);
00779
00780 retrieveTransformations(depth_header, node_ptr);
00781 callProcessing(visual_img, node_ptr);
00782 }
00783
00784
00785
00786
00787 void OpenNIListener::callProcessing(cv::Mat visual_img, Node* node_ptr)
00788 {
00789 if(!future_.isFinished()){
00790 ScopedTimer s("New Node is ready, waiting for graph manager.");
00791 future_.waitForFinished();
00792 }
00793
00794
00795 visualization_img_ = visual_img;
00796 visualization_depth_mono8_img_ = depth_mono8_img_;
00797
00798
00799 if(ParameterServer::instance()->get<bool>("concurrent_node_construction")) {
00800 ROS_DEBUG("Processing Node in parallel to the construction of the next node");
00801 if(ParameterServer::instance()->get<bool>("use_gui")){
00802
00803
00804 visualization_img_ = visual_img.clone();
00805 visualization_depth_mono8_img_ = depth_mono8_img_.clone();
00806 visualize_images(visualization_img_, depth_mono8_img_);
00807 }
00808 future_ = QtConcurrent::run(this, &OpenNIListener::processNode, node_ptr);
00809 }
00810 else {
00811 processNode(node_ptr);
00812 }
00813 }
00814
00815
00816 void OpenNIListener::processNode(Node* new_node)
00817 {
00818 ScopedTimer s(__FUNCTION__);
00819 ParameterServer* ps = ParameterServer::instance();
00820 Q_EMIT setGUIStatus("Adding Node to Graph");
00821 bool has_been_added = graph_mgr_->addNode(new_node);
00822 ++num_processed_;
00823 Q_EMIT setGUIInfo2(QString("Frames processed: ")+QString::number(num_processed_));
00824
00826 if(has_been_added && !ps->get<std::string>("odom_frame_name").empty()){
00827 ROS_INFO_NAMED("OpenNIListener", "Verifying Odometry Information");
00828 ros::Time latest_odom_time;
00829 std::string odom_frame = ps->get<std::string>("odom_frame_name");
00830 std::string base_frame = ps->get<std::string>("base_frame_name");
00831 std::string error_msg;
00832 int ev = tflistener_->getLatestCommonTime(odom_frame, base_frame, latest_odom_time, &error_msg);
00833 if(ev == tf::NO_ERROR){
00834 graph_mgr_->addOdometry(latest_odom_time, tflistener_);
00835 } else {
00836 ROS_WARN_STREAM("Couldn't get time of latest tf transform between " << odom_frame << " and " << base_frame << ": " << error_msg);
00837 }
00838 }
00839
00840
00841
00842 if(ParameterServer::instance()->get<bool>("use_gui")){
00843 if(has_been_added){
00844 cv::Mat kp_img = visualization_img_.clone();
00845 graph_mgr_->drawFeatureFlow(visualization_img_, cv::Scalar(0,0,255), cv::Scalar(0,128,0) );
00846
00847 Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_, 5));
00848
00849 cv::drawKeypoints(visualization_img_, new_node->feature_locations_2d_, kp_img, cv::Scalar(0,255,0), 5);
00850 Q_EMIT newFeatureImage(cvMat2QImage(kp_img, 4));
00851 } else {
00852 cv::drawKeypoints(visualization_img_, new_node->feature_locations_2d_, visualization_img_, cv::Scalar(0, 100,0), 5);
00853 Q_EMIT newFeatureFlowImage(cvMat2QImage(visualization_img_, 2));
00854 }
00855 }
00856 if(!has_been_added) {
00857 delete new_node;
00858 new_node = NULL;
00859 }
00860 }
00861
00862
00863 void OpenNIListener::togglePause(){
00864 pause_ = !pause_;
00865 ROS_INFO_NAMED("OpenNIListener", "Pause toggled to: %s", pause_? "true":"false");
00866 if(pause_) Q_EMIT setGUIStatus("Processing Thread Paused");
00867 else Q_EMIT setGUIStatus("Processing Thread Running");
00868 }
00869
00870
00871 void OpenNIListener::getOneFrame(){
00872 getOneFrame_=true;
00873 }
00874
00875
00877 QImage OpenNIListener::cvMat2QImage(const cv::Mat& channel1, const cv::Mat& channel2, const cv::Mat& channel3, unsigned int idx)
00878 {
00879 if(rgba_buffers_.size() <= idx){
00880 rgba_buffers_.resize(idx+1);
00881 }
00882 if(channel2.rows != channel1.rows || channel2.cols != channel1.cols ||
00883 channel3.rows != channel1.rows || channel3.cols != channel1.cols){
00884 ROS_ERROR("Image channels to be combined differ in size");
00885 }
00886 if(channel1.rows != rgba_buffers_[idx].rows || channel1.cols != rgba_buffers_[idx].cols){
00887 ROS_DEBUG("Created new rgba_buffer with index %i", idx);
00888 rgba_buffers_[idx] = cv::Mat( channel1.rows, channel1.cols, CV_8UC4);
00889
00890 }
00891 cv::Mat mono_channel1 = channel1;
00892 if(channel1.type() != CV_8UC1) {
00893 mono_channel1 = cv::Mat( channel1.rows, channel1.cols, CV_8UC1);
00894 cv::cvtColor(channel1, mono_channel1, CV_RGB2GRAY);
00895 }
00896 std::vector<cv::Mat> input;
00897 cv::Mat alpha( channel1.rows, channel1.cols, CV_8UC1, cv::Scalar(255));
00898 input.push_back(mono_channel1);
00899 input.push_back(channel2);
00900 input.push_back(channel3);
00901 input.push_back(alpha);
00902 cv::merge(input, rgba_buffers_[idx]);
00903
00904 return QImage((unsigned char *)(rgba_buffers_[idx].data),
00905 rgba_buffers_[idx].cols, rgba_buffers_[idx].rows,
00906 rgba_buffers_[idx].step, QImage::Format_RGB32 );
00907 }
00908
00909
00910
00912 QImage OpenNIListener::cvMat2QImage(const cv::Mat& image, unsigned int idx)
00913 {
00914 ROS_DEBUG_STREAM("Converting Matrix of type " << openCVCode2String(image.type()) << " to RGBA");
00915 if(rgba_buffers_.size() <= idx){
00916 rgba_buffers_.resize(idx+1);
00917 }
00918 ROS_DEBUG_STREAM("Target Matrix is of type " << openCVCode2String(rgba_buffers_[idx].type()));
00919 if(image.rows != rgba_buffers_[idx].rows || image.cols != rgba_buffers_[idx].cols){
00920 ROS_DEBUG("Created new rgba_buffer with index %i", idx);
00921 rgba_buffers_[idx] = cv::Mat( image.rows, image.cols, CV_8UC4);
00922
00923 }
00924 char red_idx = 0, green_idx = 1, blue_idx = 2;
00925 if(image_encoding_.compare("rgb8") == 0) { red_idx = 2; blue_idx = 0; }
00926 cv::Mat alpha( image.rows, image.cols, CV_8UC1, cv::Scalar(255));
00927 cv::Mat in[] = { image, alpha };
00928
00929
00930
00931 if(image.type() == CV_8UC1){
00932 int from_to[] = { 0,0, 0,1, 0,2, 1,3 };
00933 mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 );
00934 } else {
00935 int from_to[] = { red_idx,0, green_idx,1, blue_idx,2, 3,3 };
00936 mixChannels( in , 2, &rgba_buffers_[idx], 1, from_to, 4 );
00937 }
00938
00939 return QImage((unsigned char *)(rgba_buffers_[idx].data),
00940 rgba_buffers_[idx].cols, rgba_buffers_[idx].rows,
00941 rgba_buffers_[idx].step, QImage::Format_RGB32 );
00942 }
00943
00944
00945
00946
00947 void OpenNIListener::retrieveTransformations(std_msgs::Header depth_header, Node* node_ptr)
00948 {
00949 ScopedTimer s(__FUNCTION__);
00950 ParameterServer* ps = ParameterServer::instance();
00951 std::string base_frame = ps->get<std::string>("base_frame_name");
00952 std::string odom_frame = ps->get<std::string>("odom_frame_name");
00953 std::string gt_frame = ps->get<std::string>("ground_truth_frame_name");
00954 std::string depth_frame_id = depth_header.frame_id;
00955 ros::Time depth_time = depth_header.stamp;
00956 tf::StampedTransform base2points;
00957
00958 try{
00959 tflistener_->waitForTransform(base_frame, depth_frame_id, depth_time, ros::Duration(0.005));
00960 tflistener_->lookupTransform(base_frame, depth_frame_id, depth_time, base2points);
00961 base2points.stamp_ = depth_time;
00962 }
00963 catch (tf::TransformException ex){
00964 ROS_WARN("%s",ex.what());
00965 ROS_WARN("Using Standard kinect /openni_camera -> /openni_rgb_optical_frame as transformation (This message is throttled to 1 per 5 seconds)");
00966
00967 base2points.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57));
00968 base2points.setOrigin(tf::Point(0,-0.04,0));
00969 base2points.stamp_ = depth_time;
00970 base2points.frame_id_ = base_frame;
00971 base2points.child_frame_id_ = depth_frame_id;
00972 }
00973 node_ptr->setBase2PointsTransform(base2points);
00974
00975 if(!gt_frame.empty()){
00976
00977
00978 tf::StampedTransform ground_truth_transform;
00979 try{
00980 tflistener_->waitForTransform(gt_frame, "/openni_camera", depth_time, ros::Duration(0.005));
00981 tflistener_->lookupTransform(gt_frame, "/openni_camera", depth_time, ground_truth_transform);
00982 ground_truth_transform.stamp_ = depth_time;
00983 tf::StampedTransform b2p;
00984
00985 b2p.setRotation(tf::createQuaternionFromRPY(-1.57,0,-1.57));
00986 b2p.setOrigin(tf::Point(0,-0.04,0));
00987 ground_truth_transform *= b2p;
00988 }
00989 catch (tf::TransformException ex){
00990 ROS_WARN_THROTTLE(5, "%s - Using Identity for Ground Truth (This message is throttled to 1 per 5 seconds)",ex.what());
00991 ground_truth_transform = tf::StampedTransform(tf::Transform::getIdentity(), depth_time, "missing_ground_truth", "/openni_camera");
00992 }
00993 printTransform("Ground Truth", ground_truth_transform);
00994 node_ptr->setGroundTruthTransform(ground_truth_transform);
00995 }
00996 if(!odom_frame.empty()){
00997
00998
00999 tf::StampedTransform current_odom_transform;
01000 try{
01001 tflistener_->waitForTransform(depth_frame_id, odom_frame, depth_time, ros::Duration(0.005));
01002 tflistener_->lookupTransform( depth_frame_id, odom_frame, depth_time, current_odom_transform);
01003 }
01004 catch (tf::TransformException ex){
01005 ROS_WARN_THROTTLE(5, "%s - No odometry available (This message is throttled to 1 per 5 seconds)",ex.what());
01006 current_odom_transform = tf::StampedTransform(tf::Transform::getIdentity(), depth_time, "missing_odometry", depth_frame_id);
01007 current_odom_transform.stamp_ = depth_time;
01008 }
01009 printTransform("Odometry", current_odom_transform);
01010 node_ptr->setOdomTransform(current_odom_transform);
01011 }
01012
01013 }
01014
01015
01016
01017 bool readOneFile(const QString& qfilename, sensor_msgs::PointCloud2& cloud)
01018 {
01019 if (pcl::io::loadPCDFile(qPrintable(qfilename), cloud) == -1)
01020 {
01021 ROS_ERROR ("Couldn't read file %s", qPrintable(qfilename));
01022 return false;
01023 }
01024
01025 if(cloud.width == 1 || cloud.height == 1){
01026 if(cloud.width * cloud.height == 640*480){
01027 cloud.width = 640;
01028 cloud.height = 480;
01029 } else {
01030 ROS_ERROR("Cloud has \"flat\" dimensions: %d x %d", cloud.width, cloud.height);
01031 return false;
01032 }
01033 }
01034 return true;
01035 }
01036
01037 bool readOneFile(const QString& qfilename, pointcloud_type::Ptr cloud){
01038 static int index = 0;
01039 if (pcl::io::loadPCDFile<point_type>(qPrintable(qfilename), *cloud) == -1)
01040 {
01041 ROS_ERROR ("Couldn't read file %s", qPrintable(qfilename));
01042 return false;
01043 }
01044
01045 ROS_DEBUG_STREAM("Frame Id: " << cloud->header.frame_id << " Stamp: " << cloud->header.stamp);
01046 if( cloud->header.frame_id.empty()){
01047 myHeader header(index++, ros::Time::now(), "/pcd_file_frame");
01048 cloud->header = header;
01049 }
01050 if(cloud->width == 1 || cloud->height == 1){
01051 if(cloud->height * cloud->width == 640*480) {
01052 cloud->width = 640;
01053 cloud->height = 480;
01054 } else {
01055 ROS_ERROR("Cloud has \"flat\" dimensions: %d x %d", cloud->width, cloud->height);
01056 return false;
01057 }
01058 }
01059 return true;
01060 }
01061
01062 void OpenNIListener::loadPCDFiles(QStringList file_list)
01063 {
01064 QtConcurrent::run(this, &OpenNIListener::loadPCDFilesAsync, file_list);
01065 }
01066
01067 void OpenNIListener::loadPCDFilesAsync(QStringList file_list)
01068 {
01069 bool prior_state = pause_;
01070 pause_ = false;
01071
01072 Q_EMIT iamBusy(3, "Loading PCD files", file_list.size());
01073 for (int i = 0; i < file_list.size(); i++)
01074 {
01075 Q_EMIT progress(3, "Loading PCD files", i);
01076 try{
01077 do{
01078 usleep(150);
01079 if(!ros::ok()) return;
01080 } while(pause_);
01081
01082 ROS_INFO_NAMED("OpenNIListener", "Processing file %s", qPrintable(file_list.at(i)));
01083
01084 sensor_msgs::Image::Ptr sm_img(new sensor_msgs::Image());
01085 sensor_msgs::PointCloud2::Ptr currentROSCloud(new sensor_msgs::PointCloud2());
01086 pointcloud_type::Ptr currentPCLCloud(new pointcloud_type());
01087
01088 if (!readOneFile(file_list.at(i), currentPCLCloud)) {
01089 continue;
01090 } else {
01091
01092
01093
01094
01095 pcl::toROSMsg<point_type>(*currentPCLCloud, *currentROSCloud);
01096 pcl::toROSMsg(*currentROSCloud, *sm_img);
01097 sm_img->encoding = "bgr8";
01098 this->image_encoding_ = "bgr8";
01099 pcdCallback(sm_img, currentPCLCloud);
01100 }
01101 } catch (...) {
01102 ROS_ERROR("Caught exception when processing pcd file %s",qPrintable(file_list.at(i)));
01103 }
01104 }
01105 Q_EMIT progress(3, "Loading PCD files", 1e6);
01106
01107 pause_ = prior_state;
01108
01109 }
01110
01111 void OpenNIListener::loadBagFileFromGUI(QString file)
01112 {
01113 QtConcurrent::run(this, &OpenNIListener::loadBag, file.toStdString());
01114 }
01115
01116
01118 void OpenNIListener::odomCallback(const nav_msgs::OdometryConstPtr& odom_msg){
01119 tf::Transform tfTransf;
01120 tf::poseMsgToTF (odom_msg->pose.pose, tfTransf);
01121 tf::StampedTransform stTransf(tfTransf, odom_msg->header.stamp, odom_msg->header.frame_id, odom_msg->child_frame_id);
01122 printTransform("Odometry Transformation", stTransf);
01123 tflistener_->setTransform(stTransf);
01124
01125
01126 }