depth_image_octomap_updater.cpp
Go to the documentation of this file.
00001 /*********************************************************************
00002  * Software License Agreement (BSD License)
00003  *
00004  *  Copyright (c) 2011, Willow Garage, Inc.
00005  *  All rights reserved.
00006  *
00007  *  Redistribution and use in source and binary forms, with or without
00008  *  modification, are permitted provided that the following conditions
00009  *  are met:
00010  *
00011  *   * Redistributions of source code must retain the above copyright
00012  *     notice, this list of conditions and the following disclaimer.
00013  *   * Redistributions in binary form must reproduce the above
00014  *     copyright notice, this list of conditions and the following
00015  *     disclaimer in the documentation and/or other materials provided
00016  *     with the distribution.
00017  *   * Neither the name of Willow Garage nor the names of its
00018  *     contributors may be used to endorse or promote products derived
00019  *     from this software without specific prior written permission.
00020  *
00021  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00022  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00023  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00024  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00025  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00026  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00027  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00028  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00029  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00030  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00031  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00032  *  POSSIBILITY OF SUCH DAMAGE.
00033  *********************************************************************/
00034 
00035 /* Author: Ioan Sucan, Suat Gedikli */
00036 
00037 #include <moveit/depth_image_octomap_updater/depth_image_octomap_updater.h>
00038 #include <moveit/occupancy_map_monitor/occupancy_map_monitor.h>
00039 #include <geometric_shapes/shape_operations.h>
00040 #include <sensor_msgs/image_encodings.h>
00041 #include <XmlRpcException.h>
00042 #include <stdint.h>
00043 
00044 namespace occupancy_map_monitor
00045 {
00046 DepthImageOctomapUpdater::DepthImageOctomapUpdater()
00047   : OccupancyMapUpdater("DepthImageUpdater")
00048   , nh_("~")
00049   , input_depth_transport_(nh_)
00050   , model_depth_transport_(nh_)
00051   , filtered_depth_transport_(nh_)
00052   , filtered_label_transport_(nh_)
00053   , image_topic_("depth")
00054   , queue_size_(5)
00055   , near_clipping_plane_distance_(0.3)
00056   , far_clipping_plane_distance_(5.0)
00057   , shadow_threshold_(0.04)
00058   , padding_scale_(0.0)
00059   , padding_offset_(0.02)
00060   , skip_vertical_pixels_(4)
00061   , skip_horizontal_pixels_(6)
00062   , image_callback_count_(0)
00063   , average_callback_dt_(0.0)
00064   , good_tf_(5)
00065   ,  // start optimistically, so we do not output warnings right from the beginning
00066   failed_tf_(0)
00067   , K0_(0.0)
00068   , K2_(0.0)
00069   , K4_(0.0)
00070   , K5_(0.0)
00071 {
00072 }
00073 
00074 DepthImageOctomapUpdater::~DepthImageOctomapUpdater()
00075 {
00076   stopHelper();
00077 }
00078 
00079 bool DepthImageOctomapUpdater::setParams(XmlRpc::XmlRpcValue& params)
00080 {
00081   try
00082   {
00083     sensor_type_ = (std::string)params["sensor_type"];
00084     if (params.hasMember("image_topic"))
00085       image_topic_ = (std::string)params["image_topic"];
00086     if (params.hasMember("queue_size"))
00087       queue_size_ = (int)params["queue_size"];
00088 
00089     readXmlParam(params, "near_clipping_plane_distance", &near_clipping_plane_distance_);
00090     readXmlParam(params, "far_clipping_plane_distance", &far_clipping_plane_distance_);
00091     readXmlParam(params, "shadow_threshold", &shadow_threshold_);
00092     readXmlParam(params, "padding_scale", &padding_scale_);
00093     readXmlParam(params, "padding_offset", &padding_offset_);
00094     readXmlParam(params, "skip_vertical_pixels", &skip_vertical_pixels_);
00095     readXmlParam(params, "skip_horizontal_pixels", &skip_horizontal_pixels_);
00096     if (params.hasMember("filtered_cloud_topic"))
00097       filtered_cloud_topic_ = static_cast<const std::string&>(params["filtered_cloud_topic"]);
00098   }
00099   catch (XmlRpc::XmlRpcException& ex)
00100   {
00101     ROS_ERROR("XmlRpc Exception: %s", ex.getMessage().c_str());
00102     return false;
00103   }
00104 
00105   return true;
00106 }
00107 
00108 bool DepthImageOctomapUpdater::initialize()
00109 {
00110   tf_ = monitor_->getTFClient();
00111   free_space_updater_.reset(new LazyFreeSpaceUpdater(tree_));
00112 
00113   // create our mesh filter
00114   mesh_filter_.reset(new mesh_filter::MeshFilter<mesh_filter::StereoCameraModel>(
00115       mesh_filter::MeshFilterBase::TransformCallback(), mesh_filter::StereoCameraModel::RegisteredPSDKParams));
00116   mesh_filter_->parameters().setDepthRange(near_clipping_plane_distance_, far_clipping_plane_distance_);
00117   mesh_filter_->setShadowThreshold(shadow_threshold_);
00118   mesh_filter_->setPaddingOffset(padding_offset_);
00119   mesh_filter_->setPaddingScale(padding_scale_);
00120   mesh_filter_->setTransformCallback(boost::bind(&DepthImageOctomapUpdater::getShapeTransform, this, _1, _2));
00121 
00122   return true;
00123 }
00124 
00125 void DepthImageOctomapUpdater::start()
00126 {
00127   image_transport::TransportHints hints("raw", ros::TransportHints(), nh_);
00128   pub_model_depth_image_ = model_depth_transport_.advertiseCamera("model_depth", 1);
00129 
00130   if (!filtered_cloud_topic_.empty())
00131     pub_filtered_depth_image_ = filtered_depth_transport_.advertiseCamera(filtered_cloud_topic_, 1);
00132   else
00133     pub_filtered_depth_image_ = filtered_depth_transport_.advertiseCamera("filtered_depth", 1);
00134 
00135   pub_filtered_label_image_ = filtered_label_transport_.advertiseCamera("filtered_label", 1);
00136 
00137   sub_depth_image_ = input_depth_transport_.subscribeCamera(image_topic_, queue_size_,
00138                                                             &DepthImageOctomapUpdater::depthImageCallback, this, hints);
00139 }
00140 
00141 void DepthImageOctomapUpdater::stop()
00142 {
00143   stopHelper();
00144 }
00145 
00146 void DepthImageOctomapUpdater::stopHelper()
00147 {
00148   sub_depth_image_.shutdown();
00149 }
00150 
00151 mesh_filter::MeshHandle DepthImageOctomapUpdater::excludeShape(const shapes::ShapeConstPtr& shape)
00152 {
00153   mesh_filter::MeshHandle h = 0;
00154   if (mesh_filter_)
00155   {
00156     if (shape->type == shapes::MESH)
00157       h = mesh_filter_->addMesh(static_cast<const shapes::Mesh&>(*shape));
00158     else
00159     {
00160       boost::scoped_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
00161       if (m)
00162         h = mesh_filter_->addMesh(*m);
00163     }
00164   }
00165   else
00166     ROS_ERROR("Mesh filter not yet initialized!");
00167   return h;
00168 }
00169 
00170 void DepthImageOctomapUpdater::forgetShape(mesh_filter::MeshHandle handle)
00171 {
00172   if (mesh_filter_)
00173     mesh_filter_->removeMesh(handle);
00174 }
00175 
00176 bool DepthImageOctomapUpdater::getShapeTransform(mesh_filter::MeshHandle h, Eigen::Affine3d& transform) const
00177 {
00178   ShapeTransformCache::const_iterator it = transform_cache_.find(h);
00179   if (it == transform_cache_.end())
00180   {
00181     ROS_ERROR("Internal error. Mesh filter handle %u not found", h);
00182     return false;
00183   }
00184   transform = it->second;
00185   return true;
00186 }
00187 
00188 namespace
00189 {
00190 bool host_is_big_endian(void)
00191 {
00192   union
00193   {
00194     uint32_t i;
00195     char c[sizeof(uint32_t)];
00196   } bint = { 0x01020304 };
00197   return bint.c[0] == 1;
00198 }
00199 }
00200 
00201 static const bool HOST_IS_BIG_ENDIAN = host_is_big_endian();
00202 
00203 void DepthImageOctomapUpdater::depthImageCallback(const sensor_msgs::ImageConstPtr& depth_msg,
00204                                                   const sensor_msgs::CameraInfoConstPtr& info_msg)
00205 {
00206   ROS_DEBUG("Received a new depth image message (frame = '%s', encoding='%s')", depth_msg->header.frame_id.c_str(),
00207             depth_msg->encoding.c_str());
00208 
00209   ros::WallTime start = ros::WallTime::now();
00210 
00211   // measure the frequency at which we receive updates
00212   if (image_callback_count_ < 1000)
00213   {
00214     if (image_callback_count_ > 0)
00215     {
00216       const double dt_start = (start - last_depth_callback_start_).toSec();
00217       if (image_callback_count_ < 2)
00218         average_callback_dt_ = dt_start;
00219       else
00220         average_callback_dt_ =
00221             ((image_callback_count_ - 1) * average_callback_dt_ + dt_start) / (double)image_callback_count_;
00222     }
00223   }
00224   else
00225     // every 1000 updates we reset the counter almost to the beginning (use 2 so we don't have so much of a ripple in
00226     // the measured average)
00227     image_callback_count_ = 2;
00228   last_depth_callback_start_ = start;
00229   ++image_callback_count_;
00230 
00231   if (monitor_->getMapFrame().empty())
00232     monitor_->setMapFrame(depth_msg->header.frame_id);
00233 
00234   /* get transform for cloud into map frame */
00235   tf::StampedTransform map_H_sensor;
00236   if (monitor_->getMapFrame() == depth_msg->header.frame_id)
00237     map_H_sensor.setIdentity();
00238   else
00239   {
00240     if (tf_)
00241     {
00242       // wait at most 50ms
00243       static const double TEST_DT = 0.005;
00244       const int nt = (int)(0.5 + average_callback_dt_ / TEST_DT) * std::max(1, ((int)queue_size_ / 2));
00245       bool found = false;
00246       std::string err;
00247       for (int t = 0; t < nt; ++t)
00248         try
00249         {
00250           tf_->lookupTransform(monitor_->getMapFrame(), depth_msg->header.frame_id, depth_msg->header.stamp,
00251                                map_H_sensor);
00252           found = true;
00253           break;
00254         }
00255         catch (tf::TransformException& ex)
00256         {
00257           static const ros::Duration d(TEST_DT);
00258           err = ex.what();
00259           d.sleep();
00260         }
00261       static const unsigned int MAX_TF_COUNTER = 1000;  // so we avoid int overflow
00262       if (found)
00263       {
00264         good_tf_++;
00265         if (good_tf_ > MAX_TF_COUNTER)
00266         {
00267           const unsigned int div = MAX_TF_COUNTER / 10;
00268           good_tf_ /= div;
00269           failed_tf_ /= div;
00270         }
00271       }
00272       else
00273       {
00274         failed_tf_++;
00275         if (failed_tf_ > good_tf_)
00276           ROS_WARN_THROTTLE(1, "More than half of the image messages discared due to TF being unavailable (%u%%). "
00277                                "Transform error of sensor data: %s; quitting callback.",
00278                             (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
00279         else
00280           ROS_DEBUG_THROTTLE(1, "Transform error of sensor data: %s; quitting callback", err.c_str());
00281         if (failed_tf_ > MAX_TF_COUNTER)
00282         {
00283           const unsigned int div = MAX_TF_COUNTER / 10;
00284           good_tf_ /= div;
00285           failed_tf_ /= div;
00286         }
00287         return;
00288       }
00289     }
00290     else
00291       return;
00292   }
00293 
00294   if (!updateTransformCache(depth_msg->header.frame_id, depth_msg->header.stamp))
00295   {
00296     ROS_ERROR_THROTTLE(1, "Transform cache was not updated. Self-filtering may fail.");
00297     return;
00298   }
00299 
00300   if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
00301     ROS_ERROR_THROTTLE(1, "endian problem: received image data does not match host");
00302 
00303   const int w = depth_msg->width;
00304   const int h = depth_msg->height;
00305 
00306   // call the mesh filter
00307   mesh_filter::StereoCameraModel::Parameters& params = mesh_filter_->parameters();
00308   params.setCameraParameters(info_msg->K[0], info_msg->K[4], info_msg->K[2], info_msg->K[5]);
00309   params.setImageSize(w, h);
00310 
00311   const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
00312   if (is_u_short)
00313     mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
00314   else
00315   {
00316     if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
00317     {
00318       ROS_ERROR_THROTTLE(1, "Unexpected encoding type: '%s'. Ignoring input.", depth_msg->encoding.c_str());
00319       return;
00320     }
00321     mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
00322   }
00323 
00324   // the mesh filter runs in background; compute extra things in the meantime
00325 
00326   // Use correct principal point from calibration
00327   const double px = info_msg->K[2];
00328   const double py = info_msg->K[5];
00329 
00330   // if the camera parameters have changed at all, recompute the cache we had
00331   if (w >= x_cache_.size() || h >= y_cache_.size() || K2_ != px || K5_ != py || K0_ != info_msg->K[0] ||
00332       K4_ != info_msg->K[4])
00333   {
00334     K2_ = px;
00335     K5_ = py;
00336     K0_ = info_msg->K[0];
00337     K4_ = info_msg->K[4];
00338 
00339     inv_fx_ = 1.0 / K0_;
00340     inv_fy_ = 1.0 / K4_;
00341 
00342     // if there are any NaNs, discard data
00343     if (!(px == px && py == py && inv_fx_ == inv_fx_ && inv_fy_ == inv_fy_))
00344       return;
00345 
00346     // Pre-compute some constants
00347     if (x_cache_.size() < w)
00348       x_cache_.resize(w);
00349     if (y_cache_.size() < h)
00350       y_cache_.resize(h);
00351 
00352     for (int x = 0; x < w; ++x)
00353       x_cache_[x] = (x - px) * inv_fx_;
00354 
00355     for (int y = 0; y < h; ++y)
00356       y_cache_[y] = (y - py) * inv_fy_;
00357   }
00358 
00359   const octomap::point3d sensor_origin(map_H_sensor.getOrigin().getX(), map_H_sensor.getOrigin().getY(),
00360                                        map_H_sensor.getOrigin().getZ());
00361 
00362   octomap::KeySet* occupied_cells_ptr = new octomap::KeySet();
00363   octomap::KeySet* model_cells_ptr = new octomap::KeySet();
00364   octomap::KeySet& occupied_cells = *occupied_cells_ptr;
00365   octomap::KeySet& model_cells = *model_cells_ptr;
00366 
00367   // allocate memory if needed
00368   std::size_t img_size = h * w;
00369   if (filtered_labels_.size() < img_size)
00370     filtered_labels_.resize(img_size);
00371 
00372   // get the labels of the filtered data
00373   const unsigned int* labels_row = &filtered_labels_[0];
00374   mesh_filter_->getFilteredLabels(&filtered_labels_[0]);
00375 
00376   // publish debug information if needed
00377   if (debug_info_)
00378   {
00379     sensor_msgs::Image debug_msg;
00380     debug_msg.header = depth_msg->header;
00381     debug_msg.height = depth_msg->height;
00382     debug_msg.width = depth_msg->width;
00383     debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00384     debug_msg.is_bigendian = depth_msg->is_bigendian;
00385     debug_msg.step = depth_msg->step;
00386     debug_msg.data.resize(img_size * sizeof(float));
00387     mesh_filter_->getModelDepth(reinterpret_cast<float*>(&debug_msg.data[0]));
00388     pub_model_depth_image_.publish(debug_msg, *info_msg);
00389 
00390     sensor_msgs::Image filtered_depth_msg;
00391     filtered_depth_msg.header = depth_msg->header;
00392     filtered_depth_msg.height = depth_msg->height;
00393     filtered_depth_msg.width = depth_msg->width;
00394     filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00395     filtered_depth_msg.is_bigendian = depth_msg->is_bigendian;
00396     filtered_depth_msg.step = depth_msg->step;
00397     filtered_depth_msg.data.resize(img_size * sizeof(float));
00398 
00399     mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_depth_msg.data[0]));
00400     pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
00401 
00402     sensor_msgs::Image label_msg;
00403     label_msg.header = depth_msg->header;
00404     label_msg.height = depth_msg->height;
00405     label_msg.width = depth_msg->width;
00406     label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
00407     label_msg.is_bigendian = depth_msg->is_bigendian;
00408     label_msg.step = w * sizeof(unsigned int);
00409     label_msg.data.resize(img_size * sizeof(unsigned int));
00410     mesh_filter_->getFilteredLabels(reinterpret_cast<unsigned int*>(&label_msg.data[0]));
00411 
00412     pub_filtered_label_image_.publish(label_msg, *info_msg);
00413   }
00414 
00415   if (!filtered_cloud_topic_.empty())
00416   {
00417     static std::vector<float> filtered_data;
00418     sensor_msgs::Image filtered_msg;
00419     filtered_msg.header = depth_msg->header;
00420     filtered_msg.height = depth_msg->height;
00421     filtered_msg.width = depth_msg->width;
00422     filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
00423     filtered_msg.is_bigendian = depth_msg->is_bigendian;
00424     filtered_msg.step = depth_msg->step;
00425     filtered_msg.data.resize(img_size * sizeof(unsigned short));
00426     if (filtered_data.size() < img_size)
00427       filtered_data.resize(img_size);
00428     mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_data[0]));
00429     unsigned short* tmp_ptr = (unsigned short*)&filtered_msg.data[0];
00430     for (std::size_t i = 0; i < img_size; ++i)
00431     {
00432       tmp_ptr[i] = (unsigned short)(filtered_data[i] * 1000 + 0.5);
00433     }
00434     pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
00435   }
00436 
00437   // figure out occupied cells and model cells
00438   tree_->lockRead();
00439 
00440   try
00441   {
00442     const int h_bound = h - skip_vertical_pixels_;
00443     const int w_bound = w - skip_horizontal_pixels_;
00444 
00445     if (is_u_short)
00446     {
00447       const uint16_t* input_row = reinterpret_cast<const uint16_t*>(&depth_msg->data[0]);
00448 
00449       for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
00450         for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
00451         {
00452           // not filtered
00453           if (labels_row[x] == mesh_filter::MeshFilterBase::Background)
00454           {
00455             float zz = (float)input_row[x] * 1e-3;  // scale from mm to m
00456             float yy = y_cache_[y] * zz;
00457             float xx = x_cache_[x] * zz;
00458             /* transform to map frame */
00459             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00460             occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00461           }
00462           // on far plane or a model point -> remove
00463           else if (labels_row[x] >= mesh_filter::MeshFilterBase::FarClip)
00464           {
00465             float zz = input_row[x] * 1e-3;
00466             float yy = y_cache_[y] * zz;
00467             float xx = x_cache_[x] * zz;
00468             /* transform to map frame */
00469             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00470             // add to the list of model cells
00471             model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00472           }
00473         }
00474     }
00475     else
00476     {
00477       const float* input_row = reinterpret_cast<const float*>(&depth_msg->data[0]);
00478 
00479       for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
00480         for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
00481         {
00482           if (labels_row[x] == mesh_filter::MeshFilterBase::Background)
00483           {
00484             float zz = input_row[x];
00485             float yy = y_cache_[y] * zz;
00486             float xx = x_cache_[x] * zz;
00487             /* transform to map frame */
00488             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00489             occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00490           }
00491           else if (labels_row[x] >= mesh_filter::MeshFilterBase::FarClip)
00492           {
00493             float zz = input_row[x];
00494             float yy = y_cache_[y] * zz;
00495             float xx = x_cache_[x] * zz;
00496             /* transform to map frame */
00497             tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
00498             // add to the list of model cells
00499             model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
00500           }
00501         }
00502     }
00503   }
00504   catch (...)
00505   {
00506     tree_->unlockRead();
00507     return;
00508   }
00509   tree_->unlockRead();
00510 
00511   /* cells that overlap with the model are not occupied */
00512   for (octomap::KeySet::iterator it = model_cells.begin(), end = model_cells.end(); it != end; ++it)
00513     occupied_cells.erase(*it);
00514 
00515   // mark occupied cells
00516   tree_->lockWrite();
00517   try
00518   {
00519     /* now mark all occupied cells */
00520     for (octomap::KeySet::iterator it = occupied_cells.begin(), end = occupied_cells.end(); it != end; ++it)
00521       tree_->updateNode(*it, true);
00522   }
00523   catch (...)
00524   {
00525     ROS_ERROR("Internal error while updating octree");
00526   }
00527   tree_->unlockWrite();
00528   tree_->triggerUpdateCallback();
00529 
00530   // at this point we still have not freed the space
00531   free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
00532 
00533   ROS_DEBUG("Processed depth image in %lf ms", (ros::WallTime::now() - start).toSec() * 1000.0);
00534 }
00535 }


perception
Author(s): Ioan Sucan , Jon Binney , Suat Gedikli
autogenerated on Mon Jul 24 2017 02:21:13