depth_image_octomap_updater.cpp
Go to the documentation of this file.
00001 /*********************************************************************
00002  * Software License Agreement (BSD License)
00003  *
00004  *  Copyright (c) 2011, Willow Garage, Inc.
00005  *  All rights reserved.
00006  *
00007  *  Redistribution and use in source and binary forms, with or without
00008  *  modification, are permitted provided that the following conditions
00009  *  are met:
00010  *
00011  *   * Redistributions of source code must retain the above copyright
00012  *     notice, this list of conditions and the following disclaimer.
00013  *   * Redistributions in binary form must reproduce the above
00014  *     copyright notice, this list of conditions and the following
00015  *     disclaimer in the documentation and/or other materials provided
00016  *     with the distribution.
00017  *   * Neither the name of Willow Garage nor the names of its
00018  *     contributors may be used to endorse or promote products derived
00019  *     from this software without specific prior written permission.
00020  *
00021  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00022  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00023  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00024  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00025  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00026  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00027  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00028  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00029  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00030  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00031  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00032  *  POSSIBILITY OF SUCH DAMAGE.
00033  *********************************************************************/
00034 
00035 /* Author: Ioan Sucan, Suat Gedikli */
00036 
00037 #include <moveit/depth_image_octomap_updater/depth_image_octomap_updater.h>
00038 #include <moveit/occupancy_map_monitor/occupancy_map_monitor.h>
00039 #include <geometric_shapes/shape_operations.h>
00040 #include <sensor_msgs/image_encodings.h>
00041 #include <XmlRpcException.h>
00042 #include <stdint.h>
00043 
00044 namespace occupancy_map_monitor
00045 {
00046 
00047 DepthImageOctomapUpdater::DepthImageOctomapUpdater() :
00048   OccupancyMapUpdater("DepthImageUpdater"),
00049   nh_("~"),
00050   input_depth_transport_(nh_),
00051   model_depth_transport_(nh_),
00052   filtered_depth_transport_(nh_),
00053   filtered_label_transport_(nh_),
00054   image_topic_("depth"),
00055   queue_size_(5),
00056   near_clipping_plane_distance_(0.3),
00057   far_clipping_plane_distance_(5.0),
00058   shadow_threshold_(0.04),
00059   padding_scale_(0.0),
00060   padding_offset_(0.02),
00061   skip_vertical_pixels_(4),
00062   skip_horizontal_pixels_(6),
00063   image_callback_count_(0),
00064   average_callback_dt_(0.0),
00065   good_tf_(5), // start optimistically, so we do not output warnings right from the beginning
00066   failed_tf_(0),
00067   K0_(0.0), K2_(0.0), K4_(0.0), K5_(0.0)
00068 {
00069 }
00070 
00071 DepthImageOctomapUpdater::~DepthImageOctomapUpdater()
00072 {
00073   stopHelper();
00074 }
00075 
00076 bool DepthImageOctomapUpdater::setParams(XmlRpc::XmlRpcValue &params)
00077 {
00078   try
00079   {
00080     sensor_type_ = (std::string) params["sensor_type"];
00081     if (params.hasMember("image_topic"))
00082       image_topic_ = (std::string) params["image_topic"];
00083     if (params.hasMember("queue_size"))
00084       queue_size_ = (int)params["queue_size"];
00085 
00086     readXmlParam(params, "near_clipping_plane_distance", &near_clipping_plane_distance_);
00087     readXmlParam(params, "far_clipping_plane_distance", &far_clipping_plane_distance_);
00088     readXmlParam(params, "shadow_threshold", &shadow_threshold_);
00089     readXmlParam(params, "padding_scale", &padding_scale_);
00090     readXmlParam(params, "padding_offset", &padding_offset_);
00091     readXmlParam(params, "skip_vertical_pixels", &skip_vertical_pixels_);
00092     readXmlParam(params, "skip_horizontal_pixels", &skip_horizontal_pixels_);
00093     if (params.hasMember("filtered_cloud_topic"))
00094       filtered_cloud_topic_ = static_cast<const std::string&>(params["filtered_cloud_topic"]);
00095   }
00096   catch (XmlRpc::XmlRpcException &ex)
00097   {
00098     ROS_ERROR("XmlRpc Exception: %s", ex.getMessage().c_str());
00099     return false;
00100   }
00101 
00102   return true;
00103 }
00104 
00105 bool DepthImageOctomapUpdater::initialize()
00106 {
00107   tf_ = monitor_->getTFClient();
00108   free_space_updater_.reset(new LazyFreeSpaceUpdater(tree_));
00109 
00110   // create our mesh filter
00111   mesh_filter_.reset(new mesh_filter::MeshFilter<mesh_filter::StereoCameraModel>(mesh_filter::MeshFilterBase::TransformCallback(),
00112                                                                                  mesh_filter::StereoCameraModel::RegisteredPSDKParams));
00113   mesh_filter_->parameters().setDepthRange(near_clipping_plane_distance_, far_clipping_plane_distance_);
00114   mesh_filter_->setShadowThreshold(shadow_threshold_);
00115   mesh_filter_->setPaddingOffset(padding_offset_);
00116   mesh_filter_->setPaddingScale(padding_scale_);
00117   mesh_filter_->setTransformCallback(boost::bind(&DepthImageOctomapUpdater::getShapeTransform, this, _1, _2));
00118 
00119   return true;
00120 }
00121 
00122 void DepthImageOctomapUpdater::start()
00123 {
00124   image_transport::TransportHints hints("raw", ros::TransportHints(), nh_);
00125   pub_model_depth_image_ = model_depth_transport_.advertiseCamera("model_depth", 1);
00126 
00127   if(!filtered_cloud_topic_.empty())
00128     pub_filtered_depth_image_ = filtered_depth_transport_.advertiseCamera(filtered_cloud_topic_, 1);
00129   else
00130     pub_filtered_depth_image_ = filtered_depth_transport_.advertiseCamera("filtered_depth", 1);
00131 
00132   pub_filtered_label_image_ = filtered_label_transport_.advertiseCamera("filtered_label", 1);
00133 
00134   sub_depth_image_ = input_depth_transport_.subscribeCamera(image_topic_, queue_size_, &DepthImageOctomapUpdater::depthImageCallback, this, hints);
00135 }
00136 
00137 void DepthImageOctomapUpdater::stop()
00138 {
00139   stopHelper();
00140 }
00141 
00142 void DepthImageOctomapUpdater::stopHelper()
00143 {
00144   sub_depth_image_.shutdown();
00145 }
00146 
00147 mesh_filter::MeshHandle DepthImageOctomapUpdater::excludeShape(const shapes::ShapeConstPtr &shape)
00148 {
00149   mesh_filter::MeshHandle h = 0;
00150   if (mesh_filter_)
00151   {
00152     if (shape->type == shapes::MESH)
00153       h = mesh_filter_->addMesh(static_cast<const shapes::Mesh&>(*shape));
00154     else
00155     {
00156       boost::scoped_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
00157       if (m)
00158         h = mesh_filter_->addMesh(*m);
00159     }
00160   }
00161   else
00162     ROS_ERROR("Mesh filter not yet initialized!");
00163   return h;
00164 }
00165 
00166 void DepthImageOctomapUpdater::forgetShape(mesh_filter::MeshHandle handle)
00167 {
00168   if (mesh_filter_)
00169     mesh_filter_->removeMesh(handle);
00170 }
00171 
00172 bool DepthImageOctomapUpdater::getShapeTransform(mesh_filter::MeshHandle h, Eigen::Affine3d &transform) const
00173 {
00174   ShapeTransformCache::const_iterator it = transform_cache_.find(h);
00175   if (it == transform_cache_.end())
00176   {
00177     ROS_ERROR("Internal error. Mesh filter handle %u not found", h);
00178     return false;
00179   }
00180   transform = it->second;
00181   return true;
00182 }
00183 
00184 namespace
00185 {
00186 bool host_is_big_endian(void)
00187 {
00188   union {
00189       uint32_t i;
00190       char c[sizeof(uint32_t)];
00191   } bint = {0x01020304};
00192   return bint.c[0] == 1;
00193 }
00194 }
00195 
00196 static const bool HOST_IS_BIG_ENDIAN = host_is_big_endian();
00197 
00198 void DepthImageOctomapUpdater::depthImageCallback(const sensor_msgs::ImageConstPtr& depth_msg, const sensor_msgs::CameraInfoConstPtr& info_msg)
00199 {
00200   ROS_DEBUG("Received a new depth image message (frame = '%s', encoding='%s')", depth_msg->header.frame_id.c_str(), depth_msg->encoding.c_str());
00201 
00202   ros::WallTime start = ros::WallTime::now();
00203 
00204   // measure the frequency at which we receive updates
00205   if (image_callback_count_ < 1000)
00206   {
00207     if (image_callback_count_ > 0)
00208     {
00209       const double dt_start = (start - last_depth_callback_start_).toSec();
00210       if (image_callback_count_ < 2)
00211         average_callback_dt_ = dt_start;
00212       else
00213         average_callback_dt_ = ((image_callback_count_ - 1) * average_callback_dt_ + dt_start) / (double)image_callback_count_;
00214     }
00215   }
00216   else
00217     // every 1000 updates we reset the counter almost to the beginning (use 2 so we don't have so much of a ripple in the measured average)
00218     image_callback_count_ = 2;
00219   last_depth_callback_start_ = start;
00220   ++image_callback_count_;
00221 
00222   if (monitor_->getMapFrame().empty())
00223     monitor_->setMapFrame(depth_msg->header.frame_id);
00224 
00225   /* get transform for cloud into map frame */
00226   tf::StampedTransform map_H_sensor;
00227   if (monitor_->getMapFrame() == depth_msg->header.frame_id)
00228     map_H_sensor.setIdentity();
00229   else
00230   {
00231     if (tf_)
00232     {
00233       // wait at most 50ms
00234       static const double TEST_DT = 0.005;
00235       const int nt = (int)(0.5 + average_callback_dt_ / TEST_DT) * std::max(1, ((int)queue_size_ / 2));
00236       bool found = false;
00237       std::string err;
00238       for (int t = 0 ; t < nt ; ++t)
00239         try
00240         {
00241           tf_->lookupTransform(monitor_->getMapFrame(), depth_msg->header.frame_id, depth_msg->header.stamp, map_H_sensor);
00242           found = true;
00243           break;
00244         }
00245         catch (tf::TransformException &ex)
00246         {
00247           static const ros::Duration d(TEST_DT);
00248           err = ex.what();
00249           d.sleep();
00250         }
00251       static const unsigned int MAX_TF_COUNTER = 1000; // so we avoid int overflow
00252       if (found)
00253       {
00254         good_tf_++;
00255         if (good_tf_ > MAX_TF_COUNTER)
00256         {
00257           const unsigned int div = MAX_TF_COUNTER/10;
00258           good_tf_ /= div;
00259           failed_tf_ /= div;
00260         }
00261       }
00262       else
00263       {
00264         failed_tf_++;
00265         if (failed_tf_ > good_tf_)
00266           ROS_WARN_THROTTLE(1, "More than half of the image messages discared due to TF being unavailable (%u%%). Transform error of sensor data: %s; quitting callback.",
00267                             (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
00268         else
00269           ROS_DEBUG_THROTTLE(1, "Transform error of sensor data: %s; quitting callback", err.c_str());
00270         if (failed_tf_ > MAX_TF_COUNTER)
00271         {
00272           const unsigned int div = MAX_TF_COUNTER/10;
00273           good_tf_ /= div;
00274           failed_tf_ /= div;
00275         }
00276         return;
00277       }
00278     }
00279     else
00280       return;
00281   }
00282 
00283   if (!updateTransformCache(depth_msg->header.frame_id, depth_msg->header.stamp))
00284   {
00285     ROS_ERROR_THROTTLE(1, "Transform cache was not updated. Self-filtering may fail.");
00286     return;
00287   }
00288 
00289   if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
00290     ROS_ERROR_THROTTLE(1, "endian problem: received image data does not match host");
00291 
00292   const int w = depth_msg->width;
00293   const int h = depth_msg->height;
00294 
00295   // call the mesh filter
00296   mesh_filter::StereoCameraModel::Parameters& params = mesh_filter_->parameters();
00297   params.setCameraParameters (info_msg->K[0], info_msg->K[4], info_msg->K[2], info_msg->K[5]);
00298   params.setImageSize(w, h);
00299 
00300   const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
00301   if (is_u_short)
00302     mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
00303   else
00304   {
00305     if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
00306     {
00307       ROS_ERROR_THROTTLE(1, "Unexpected encoding type: '%s'. Ignoring input.", depth_msg->encoding.c_str());
00308       return;
00309     }
00310     mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
00311   }
00312 
00313   // the mesh filter runs in background; compute extra things in the meantime
00314 
00315   // Use correct principal point from calibration
00316   const double px = info_msg->K[2];
00317   const double py = info_msg->K[5];
00318 
00319   // if the camera parameters have changed at all, recompute the cache we had
00320   if (w >= x_cache_.size() || h >= y_cache_.size() || K2_ != px || K5_ != py || K0_ != info_msg->K[0] || K4_ != info_msg->K[4])
00321   {
00322     K2_ = px;
00323     K5_ = py;
00324     K0_ = info_msg->K[0];
00325     K4_ = info_msg->K[4];
00326 
00327     inv_fx_ = 1.0 / K0_;
00328     inv_fy_ = 1.0 / K4_;
00329 
00330     // if there are any NaNs, discard data
00331     if (!(px == px && py == py && inv_fx_ == inv_fx_ && inv_fy_ == inv_fy_))
00332       return;
00333 
00334     // Pre-compute some constants
00335     if (x_cache_.size() < w)
00336       x_cache_.resize(w);
00337     if (y_cache_.size() < h)
00338       y_cache_.resize(h);
00339 
00340     for (int x = 0; x < w; ++x)
00341       x_cache_[x] = (x - px) * inv_fx_;
00342 
00343     for (int y = 0; y < h; ++y)
00344       y_cache_[y] = (y - py) * inv_fy_;
00345   }
00346 
00347   const octomap::point3d sensor_origin(map_H_sensor.getOrigin().getX(), map_H_sensor.getOrigin().getY(), map_H_sensor.getOrigin().getZ());
00348 
00349   octomap::KeySet *occupied_cells_ptr = new octomap::KeySet();
00350   octomap::KeySet *model_cells_ptr = new octomap::KeySet();
00351   octomap::KeySet &occupied_cells = *occupied_cells_ptr;
00352   octomap::KeySet &model_cells = *model_cells_ptr;
00353 
00354   // allocate memory if needed
00355   std::size_t img_size = h * w;
00356   if (filtered_labels_.size() < img_size)
00357     filtered_labels_.resize(img_size);
00358 
00359   // get the labels of the filtered data
00360   const unsigned int* labels_row = &filtered_labels_ [0];
00361   mesh_filter_->getFilteredLabels(&filtered_labels_ [0]);
00362 
00363   // publish debug information if needed
00364   if (debug_info_)
00365   {
00366     sensor_msgs::Image debug_msg;
00367     debug_msg.header = depth_msg->header;
00368     debug_msg.height = depth_msg->height;
00369     debug_msg.width = depth_msg->width;
00370     debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00371     debug_msg.is_bigendian = depth_msg->is_bigendian;
00372     debug_msg.step = depth_msg->step;
00373     debug_msg.data.resize(img_size * sizeof(float));
00374     mesh_filter_->getModelDepth(reinterpret_cast<float*>(&debug_msg.data[0]));
00375     pub_model_depth_image_.publish(debug_msg, *info_msg);
00376 
00377     sensor_msgs::Image filtered_depth_msg;
00378     filtered_depth_msg.header = depth_msg->header;
00379     filtered_depth_msg.height = depth_msg->height;
00380     filtered_depth_msg.width = depth_msg->width;
00381     filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00382     filtered_depth_msg.is_bigendian = depth_msg->is_bigendian;
00383     filtered_depth_msg.step = depth_msg->step;
00384     filtered_depth_msg.data.resize(img_size * sizeof(float));
00385 
00386     mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_depth_msg.data[0]));
00387     pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
00388 
00389     sensor_msgs::Image label_msg;
00390     label_msg.header = depth_msg->header;
00391     label_msg.height = depth_msg->height;
00392     label_msg.width = depth_msg->width;
00393     label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
00394     label_msg.is_bigendian = depth_msg->is_bigendian;
00395     label_msg.step = w * sizeof(unsigned int);
00396     label_msg.data.resize(img_size * sizeof(unsigned int));
00397     mesh_filter_->getFilteredLabels(reinterpret_cast<unsigned int*>(&label_msg.data[0]));
00398 
00399     pub_filtered_label_image_.publish(label_msg, *info_msg);
00400   }
00401 
00402   if(!filtered_cloud_topic_.empty())
00403   {
00404     static std::vector<float> filtered_data;
00405     sensor_msgs::Image filtered_msg;
00406     filtered_msg.header = depth_msg->header;
00407     filtered_msg.height = depth_msg->height;
00408     filtered_msg.width = depth_msg->width;
00409     filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
00410     filtered_msg.is_bigendian = depth_msg->is_bigendian;
00411     filtered_msg.step = depth_msg->step;
00412     filtered_msg.data.resize(img_size * sizeof(unsigned short));
00413     if(filtered_data.size() < img_size)
00414       filtered_data.resize(img_size);
00415     mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_data[0]));
00416     unsigned short* tmp_ptr = (unsigned short*) &filtered_msg.data[0];
00417     for(std::size_t i=0; i < img_size; ++i)
00418     {
00419       tmp_ptr[i] = (unsigned short) (filtered_data[i] * 1000 + 0.5);
00420     }
00421     pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
00422   }
00423 
00424   // figure out occupied cells and model cells
00425   tree_->lockRead();
00426 
00427   try
00428   {
00429     const int h_bound = h - skip_vertical_pixels_;
00430     const int w_bound = w - skip_horizontal_pixels_;
00431 
00432     if (is_u_short)
00433     {
00434       const uint16_t *input_row = reinterpret_cast<const uint16_t*>(&depth_msg->data[0]);
00435 
00436       for (int y = skip_vertical_pixels_ ; y < h_bound ; ++y, labels_row += w, input_row += w)
00437         for (int x = skip_horizontal_pixels_ ; x < w_bound ; ++x)
00438         {
00439           // not filtered
00440           if (labels_row [x] == mesh_filter::MeshFilterBase::Background)
00441           {
00442             float zz = (float)input_row[x] * 1e-3; // scale from mm to m
00443             float yy = y_cache_[y] * zz;
00444             float xx = x_cache_[x] * zz;
00445             /* transform to map frame */
00446             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00447             occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00448           }
00449           // on far plane or a model point -> remove
00450           else if (labels_row [x] >= mesh_filter::MeshFilterBase::FarClip)
00451           {
00452             float zz = input_row[x] * 1e-3;
00453             float yy = y_cache_[y] * zz;
00454             float xx = x_cache_[x] * zz;
00455             /* transform to map frame */
00456             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00457             // add to the list of model cells
00458             model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00459           }
00460         }
00461     }
00462     else
00463     {
00464       const float *input_row = reinterpret_cast<const float*>(&depth_msg->data[0]);
00465 
00466       for (int y = skip_vertical_pixels_ ; y < h_bound ; ++y, labels_row += w, input_row += w)
00467         for (int x = skip_horizontal_pixels_ ; x < w_bound ; ++x)
00468         {
00469           if (labels_row [x] == mesh_filter::MeshFilterBase::Background)
00470           {
00471             float zz = input_row[x];
00472             float yy = y_cache_[y] * zz;
00473             float xx = x_cache_[x] * zz;
00474             /* transform to map frame */
00475             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00476             occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00477           }
00478           else if (labels_row [x] >= mesh_filter::MeshFilterBase::FarClip)
00479           {
00480             float zz = input_row[x];
00481             float yy = y_cache_[y] * zz;
00482             float xx = x_cache_[x] * zz;
00483             /* transform to map frame */
00484             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00485             // add to the list of model cells
00486             model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00487           }
00488         }
00489     }
00490 
00491   }
00492   catch (...)
00493   {
00494     tree_->unlockRead();
00495     return;
00496   }
00497   tree_->unlockRead();
00498 
00499   /* cells that overlap with the model are not occupied */
00500   for (octomap::KeySet::iterator it = model_cells.begin(), end = model_cells.end(); it != end; ++it)
00501     occupied_cells.erase(*it);
00502 
00503   // mark occupied cells
00504   tree_->lockWrite();
00505   try
00506   {
00507     /* now mark all occupied cells */
00508     for (octomap::KeySet::iterator it = occupied_cells.begin(), end = occupied_cells.end(); it != end; ++it)
00509       tree_->updateNode(*it, true);
00510   }
00511   catch (...)
00512   {
00513     ROS_ERROR("Internal error while updating octree");
00514   }
00515   tree_->unlockWrite();
00516   tree_->triggerUpdateCallback();
00517 
00518   // at this point we still have not freed the space
00519   free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
00520 
00521   ROS_DEBUG("Processed depth image in %lf ms", (ros::WallTime::now() - start).toSec() * 1000.0);
00522 }
00523 
00524 }


perception
Author(s): Ioan Sucan , Jon Binney , Suat Gedikli
autogenerated on Wed Aug 26 2015 12:43:21