depth_image_octomap_updater.cpp
Go to the documentation of this file.
1 /*********************************************************************
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2011, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of Willow Garage nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *********************************************************************/
34 
35 /* Author: Ioan Sucan, Suat Gedikli */
36 
41 #include <XmlRpcException.h>
42 #include <stdint.h>
43 
44 #include <memory>
45 
46 namespace occupancy_map_monitor
47 {
49  : OccupancyMapUpdater("DepthImageUpdater")
50  , nh_("~")
51  , input_depth_transport_(nh_)
52  , model_depth_transport_(nh_)
53  , filtered_depth_transport_(nh_)
54  , filtered_label_transport_(nh_)
55  , image_topic_("depth")
56  , queue_size_(5)
57  , near_clipping_plane_distance_(0.3)
58  , far_clipping_plane_distance_(5.0)
59  , shadow_threshold_(0.04)
60  , padding_scale_(0.0)
61  , padding_offset_(0.02)
62  , skip_vertical_pixels_(4)
63  , skip_horizontal_pixels_(6)
64  , image_callback_count_(0)
65  , average_callback_dt_(0.0)
66  , good_tf_(5)
67  , // start optimistically, so we do not output warnings right from the beginning
68  failed_tf_(0)
69  , K0_(0.0)
70  , K2_(0.0)
71  , K4_(0.0)
72  , K5_(0.0)
73 {
74 }
75 
77 {
78  stopHelper();
79 }
80 
82 {
83  try
84  {
85  sensor_type_ = (std::string)params["sensor_type"];
86  if (params.hasMember("image_topic"))
87  image_topic_ = (std::string)params["image_topic"];
88  if (params.hasMember("queue_size"))
89  queue_size_ = (int)params["queue_size"];
90 
91  readXmlParam(params, "near_clipping_plane_distance", &near_clipping_plane_distance_);
92  readXmlParam(params, "far_clipping_plane_distance", &far_clipping_plane_distance_);
93  readXmlParam(params, "shadow_threshold", &shadow_threshold_);
94  readXmlParam(params, "padding_scale", &padding_scale_);
95  readXmlParam(params, "padding_offset", &padding_offset_);
96  readXmlParam(params, "skip_vertical_pixels", &skip_vertical_pixels_);
97  readXmlParam(params, "skip_horizontal_pixels", &skip_horizontal_pixels_);
98  if (params.hasMember("filtered_cloud_topic"))
99  filtered_cloud_topic_ = static_cast<const std::string&>(params["filtered_cloud_topic"]);
100  }
101  catch (XmlRpc::XmlRpcException& ex)
102  {
103  ROS_ERROR("XmlRpc Exception: %s", ex.getMessage().c_str());
104  return false;
105  }
106 
107  return true;
108 }
109 
111 {
112  tf_ = monitor_->getTFClient();
114 
115  // create our mesh filter
119  mesh_filter_->setShadowThreshold(shadow_threshold_);
120  mesh_filter_->setPaddingOffset(padding_offset_);
121  mesh_filter_->setPaddingScale(padding_scale_);
122  mesh_filter_->setTransformCallback(boost::bind(&DepthImageOctomapUpdater::getShapeTransform, this, _1, _2));
123 
124  return true;
125 }
126 
128 {
131 
132  if (!filtered_cloud_topic_.empty())
134  else
136 
138 
141 }
142 
144 {
145  stopHelper();
146 }
147 
149 {
151 }
152 
154 {
156  if (mesh_filter_)
157  {
158  if (shape->type == shapes::MESH)
159  h = mesh_filter_->addMesh(static_cast<const shapes::Mesh&>(*shape));
160  else
161  {
162  std::unique_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
163  if (m)
164  h = mesh_filter_->addMesh(*m);
165  }
166  }
167  else
168  ROS_ERROR("Mesh filter not yet initialized!");
169  return h;
170 }
171 
173 {
174  if (mesh_filter_)
175  mesh_filter_->removeMesh(handle);
176 }
177 
179 {
180  ShapeTransformCache::const_iterator it = transform_cache_.find(h);
181  if (it == transform_cache_.end())
182  {
183  ROS_ERROR("Internal error. Mesh filter handle %u not found", h);
184  return false;
185  }
186  transform = it->second;
187  return true;
188 }
189 
190 namespace
191 {
192 bool host_is_big_endian(void)
193 {
194  union
195  {
196  uint32_t i;
197  char c[sizeof(uint32_t)];
198  } bint = { 0x01020304 };
199  return bint.c[0] == 1;
200 }
201 }
202 
203 static const bool HOST_IS_BIG_ENDIAN = host_is_big_endian();
204 
205 void DepthImageOctomapUpdater::depthImageCallback(const sensor_msgs::ImageConstPtr& depth_msg,
206  const sensor_msgs::CameraInfoConstPtr& info_msg)
207 {
208  ROS_DEBUG("Received a new depth image message (frame = '%s', encoding='%s')", depth_msg->header.frame_id.c_str(),
209  depth_msg->encoding.c_str());
210 
212 
213  // measure the frequency at which we receive updates
214  if (image_callback_count_ < 1000)
215  {
216  if (image_callback_count_ > 0)
217  {
218  const double dt_start = (start - last_depth_callback_start_).toSec();
219  if (image_callback_count_ < 2)
220  average_callback_dt_ = dt_start;
221  else
224  }
225  }
226  else
227  // every 1000 updates we reset the counter almost to the beginning (use 2 so we don't have so much of a ripple in
228  // the measured average)
232 
233  if (monitor_->getMapFrame().empty())
234  monitor_->setMapFrame(depth_msg->header.frame_id);
235 
236  /* get transform for cloud into map frame */
237  tf::StampedTransform map_H_sensor;
238  if (monitor_->getMapFrame() == depth_msg->header.frame_id)
239  map_H_sensor.setIdentity();
240  else
241  {
242  if (tf_)
243  {
244  // wait at most 50ms
245  static const double TEST_DT = 0.005;
246  const int nt = (int)(0.5 + average_callback_dt_ / TEST_DT) * std::max(1, ((int)queue_size_ / 2));
247  bool found = false;
248  std::string err;
249  for (int t = 0; t < nt; ++t)
250  try
251  {
252  tf_->lookupTransform(monitor_->getMapFrame(), depth_msg->header.frame_id, depth_msg->header.stamp,
253  map_H_sensor);
254  found = true;
255  break;
256  }
257  catch (tf::TransformException& ex)
258  {
259  static const ros::Duration d(TEST_DT);
260  err = ex.what();
261  d.sleep();
262  }
263  static const unsigned int MAX_TF_COUNTER = 1000; // so we avoid int overflow
264  if (found)
265  {
266  good_tf_++;
267  if (good_tf_ > MAX_TF_COUNTER)
268  {
269  const unsigned int div = MAX_TF_COUNTER / 10;
270  good_tf_ /= div;
271  failed_tf_ /= div;
272  }
273  }
274  else
275  {
276  failed_tf_++;
277  if (failed_tf_ > good_tf_)
278  ROS_WARN_THROTTLE(1, "More than half of the image messages discared due to TF being unavailable (%u%%). "
279  "Transform error of sensor data: %s; quitting callback.",
280  (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
281  else
282  ROS_DEBUG_THROTTLE(1, "Transform error of sensor data: %s; quitting callback", err.c_str());
283  if (failed_tf_ > MAX_TF_COUNTER)
284  {
285  const unsigned int div = MAX_TF_COUNTER / 10;
286  good_tf_ /= div;
287  failed_tf_ /= div;
288  }
289  return;
290  }
291  }
292  else
293  return;
294  }
295 
296  if (!updateTransformCache(depth_msg->header.frame_id, depth_msg->header.stamp))
297  {
298  ROS_ERROR_THROTTLE(1, "Transform cache was not updated. Self-filtering may fail.");
299  return;
300  }
301 
302  if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
303  ROS_ERROR_THROTTLE(1, "endian problem: received image data does not match host");
304 
305  const int w = depth_msg->width;
306  const int h = depth_msg->height;
307 
308  // call the mesh filter
310  params.setCameraParameters(info_msg->K[0], info_msg->K[4], info_msg->K[2], info_msg->K[5]);
311  params.setImageSize(w, h);
312 
313  const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
314  if (is_u_short)
315  mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
316  else
317  {
318  if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
319  {
320  ROS_ERROR_THROTTLE(1, "Unexpected encoding type: '%s'. Ignoring input.", depth_msg->encoding.c_str());
321  return;
322  }
323  mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
324  }
325 
326  // the mesh filter runs in background; compute extra things in the meantime
327 
328  // Use correct principal point from calibration
329  const double px = info_msg->K[2];
330  const double py = info_msg->K[5];
331 
332  // if the camera parameters have changed at all, recompute the cache we had
333  if (w >= x_cache_.size() || h >= y_cache_.size() || K2_ != px || K5_ != py || K0_ != info_msg->K[0] ||
334  K4_ != info_msg->K[4])
335  {
336  K2_ = px;
337  K5_ = py;
338  K0_ = info_msg->K[0];
339  K4_ = info_msg->K[4];
340 
341  inv_fx_ = 1.0 / K0_;
342  inv_fy_ = 1.0 / K4_;
343 
344  // if there are any NaNs, discard data
345  if (!(px == px && py == py && inv_fx_ == inv_fx_ && inv_fy_ == inv_fy_))
346  return;
347 
348  // Pre-compute some constants
349  if (x_cache_.size() < w)
350  x_cache_.resize(w);
351  if (y_cache_.size() < h)
352  y_cache_.resize(h);
353 
354  for (int x = 0; x < w; ++x)
355  x_cache_[x] = (x - px) * inv_fx_;
356 
357  for (int y = 0; y < h; ++y)
358  y_cache_[y] = (y - py) * inv_fy_;
359  }
360 
361  const octomap::point3d sensor_origin(map_H_sensor.getOrigin().getX(), map_H_sensor.getOrigin().getY(),
362  map_H_sensor.getOrigin().getZ());
363 
364  octomap::KeySet* occupied_cells_ptr = new octomap::KeySet();
365  octomap::KeySet* model_cells_ptr = new octomap::KeySet();
366  octomap::KeySet& occupied_cells = *occupied_cells_ptr;
367  octomap::KeySet& model_cells = *model_cells_ptr;
368 
369  // allocate memory if needed
370  std::size_t img_size = h * w;
371  if (filtered_labels_.size() < img_size)
372  filtered_labels_.resize(img_size);
373 
374  // get the labels of the filtered data
375  const unsigned int* labels_row = &filtered_labels_[0];
376  mesh_filter_->getFilteredLabels(&filtered_labels_[0]);
377 
378  // publish debug information if needed
379  if (debug_info_)
380  {
381  sensor_msgs::Image debug_msg;
382  debug_msg.header = depth_msg->header;
383  debug_msg.height = depth_msg->height;
384  debug_msg.width = depth_msg->width;
385  debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
386  debug_msg.is_bigendian = depth_msg->is_bigendian;
387  debug_msg.step = depth_msg->step;
388  debug_msg.data.resize(img_size * sizeof(float));
389  mesh_filter_->getModelDepth(reinterpret_cast<float*>(&debug_msg.data[0]));
390  pub_model_depth_image_.publish(debug_msg, *info_msg);
391 
392  sensor_msgs::Image filtered_depth_msg;
393  filtered_depth_msg.header = depth_msg->header;
394  filtered_depth_msg.height = depth_msg->height;
395  filtered_depth_msg.width = depth_msg->width;
396  filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
397  filtered_depth_msg.is_bigendian = depth_msg->is_bigendian;
398  filtered_depth_msg.step = depth_msg->step;
399  filtered_depth_msg.data.resize(img_size * sizeof(float));
400 
401  mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_depth_msg.data[0]));
402  pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
403 
404  sensor_msgs::Image label_msg;
405  label_msg.header = depth_msg->header;
406  label_msg.height = depth_msg->height;
407  label_msg.width = depth_msg->width;
408  label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
409  label_msg.is_bigendian = depth_msg->is_bigendian;
410  label_msg.step = w * sizeof(unsigned int);
411  label_msg.data.resize(img_size * sizeof(unsigned int));
412  mesh_filter_->getFilteredLabels(reinterpret_cast<unsigned int*>(&label_msg.data[0]));
413 
414  pub_filtered_label_image_.publish(label_msg, *info_msg);
415  }
416 
417  if (!filtered_cloud_topic_.empty())
418  {
419  static std::vector<float> filtered_data;
420  sensor_msgs::Image filtered_msg;
421  filtered_msg.header = depth_msg->header;
422  filtered_msg.height = depth_msg->height;
423  filtered_msg.width = depth_msg->width;
424  filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
425  filtered_msg.is_bigendian = depth_msg->is_bigendian;
426  filtered_msg.step = depth_msg->step;
427  filtered_msg.data.resize(img_size * sizeof(unsigned short));
428  if (filtered_data.size() < img_size)
429  filtered_data.resize(img_size);
430  mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_data[0]));
431  unsigned short* tmp_ptr = (unsigned short*)&filtered_msg.data[0];
432  for (std::size_t i = 0; i < img_size; ++i)
433  {
434  tmp_ptr[i] = (unsigned short)(filtered_data[i] * 1000 + 0.5);
435  }
436  pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
437  }
438 
439  // figure out occupied cells and model cells
440  tree_->lockRead();
441 
442  try
443  {
444  const int h_bound = h - skip_vertical_pixels_;
445  const int w_bound = w - skip_horizontal_pixels_;
446 
447  if (is_u_short)
448  {
449  const uint16_t* input_row = reinterpret_cast<const uint16_t*>(&depth_msg->data[0]);
450 
451  for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
452  for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
453  {
454  // not filtered
455  if (labels_row[x] == mesh_filter::MeshFilterBase::Background)
456  {
457  float zz = (float)input_row[x] * 1e-3; // scale from mm to m
458  float yy = y_cache_[y] * zz;
459  float xx = x_cache_[x] * zz;
460  /* transform to map frame */
461  tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
462  occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
463  }
464  // on far plane or a model point -> remove
465  else if (labels_row[x] >= mesh_filter::MeshFilterBase::FarClip)
466  {
467  float zz = input_row[x] * 1e-3;
468  float yy = y_cache_[y] * zz;
469  float xx = x_cache_[x] * zz;
470  /* transform to map frame */
471  tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
472  // add to the list of model cells
473  model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
474  }
475  }
476  }
477  else
478  {
479  const float* input_row = reinterpret_cast<const float*>(&depth_msg->data[0]);
480 
481  for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
482  for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
483  {
484  if (labels_row[x] == mesh_filter::MeshFilterBase::Background)
485  {
486  float zz = input_row[x];
487  float yy = y_cache_[y] * zz;
488  float xx = x_cache_[x] * zz;
489  /* transform to map frame */
490  tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
491  occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
492  }
493  else if (labels_row[x] >= mesh_filter::MeshFilterBase::FarClip)
494  {
495  float zz = input_row[x];
496  float yy = y_cache_[y] * zz;
497  float xx = x_cache_[x] * zz;
498  /* transform to map frame */
499  tf::Vector3 point_tf = map_H_sensor * tf::Vector3(xx, yy, zz);
500  // add to the list of model cells
501  model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
502  }
503  }
504  }
505  }
506  catch (...)
507  {
508  tree_->unlockRead();
509  ROS_ERROR("Internal error while parsing depth data");
510  return;
511  }
512  tree_->unlockRead();
513 
514  /* cells that overlap with the model are not occupied */
515  for (octomap::KeySet::iterator it = model_cells.begin(), end = model_cells.end(); it != end; ++it)
516  occupied_cells.erase(*it);
517 
518  // mark occupied cells
519  tree_->lockWrite();
520  try
521  {
522  /* now mark all occupied cells */
523  for (octomap::KeySet::iterator it = occupied_cells.begin(), end = occupied_cells.end(); it != end; ++it)
524  tree_->updateNode(*it, true);
525  }
526  catch (...)
527  {
528  ROS_ERROR("Internal error while updating octree");
529  }
530  tree_->unlockWrite();
531  tree_->triggerUpdateCallback();
532 
533  // at this point we still have not freed the space
534  free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
535 
536  ROS_DEBUG("Processed depth image in %lf ms", (ros::WallTime::now() - start).toSec() * 1000.0);
537 }
538 }
d
virtual ShapeHandle excludeShape(const shapes::ShapeConstPtr &shape)
const std::string & getMessage() const
void publish(const sensor_msgs::Image &image, const sensor_msgs::CameraInfo &info) const
#define ROS_WARN_THROTTLE(rate,...)
virtual bool initialize()
Do any necessary setup (subscribe to ros topics, etc.). This call assumes setMonitor() and setParams(...
#define ROS_DEBUG_THROTTLE(rate,...)
CameraSubscriber subscribeCamera(const std::string &base_topic, uint32_t queue_size, const CameraSubscriber::Callback &callback, const ros::VoidPtr &tracked_object=ros::VoidPtr(), const TransportHints &transport_hints=TransportHints())
Base class for classes which update the occupancy map.
bool sleep() const
TFSIMD_FORCE_INLINE const tfScalar & getY() const
TFSIMD_FORCE_INLINE const tfScalar & getZ() const
double y
void setIdentity()
#define ROS_ERROR_THROTTLE(rate,...)
CameraPublisher advertiseCamera(const std::string &base_topic, uint32_t queue_size, bool latch=false)
static const StereoCameraModel::Parameters & RegisteredPSDKParams
predefined sensor model for OpenNI compatible devices (e.g., PrimeSense, Kinect, Asus Xtion) ...
const boost::shared_ptr< tf::Transformer > & getTFClient() const
Parameters for Stereo-like devices.
const std::string TYPE_32FC1
const std::string TYPE_16UC1
unordered_ns::unordered_set< OcTreeKey, OcTreeKey::KeyHash > KeySet
TFSIMD_FORCE_INLINE Vector3 & getOrigin()
void setCameraParameters(float fx, float fy, float cx, float cy)
sets the camera parameters of the pinhole camera where the disparities were obtained. Usually the left camera
TFSIMD_FORCE_INLINE const tfScalar & w() const
bool hasMember(const std::string &name) const
bool getShapeTransform(mesh_filter::MeshHandle h, Eigen::Affine3d &transform) const
std::unique_ptr< LazyFreeSpaceUpdater > free_space_updater_
static WallTime now()
static void readXmlParam(XmlRpc::XmlRpcValue &params, const std::string &param_name, double *value)
TFSIMD_FORCE_INLINE const tfScalar & getX() const
virtual bool setParams(XmlRpc::XmlRpcValue &params)
Set updater params using struct that comes from parsing a yaml string. This must be called after setM...
bool updateTransformCache(const std::string &target_frame, const ros::Time &target_time)
void depthImageCallback(const sensor_msgs::ImageConstPtr &depth_msg, const sensor_msgs::CameraInfoConstPtr &info_msg)
Mesh * createMeshFromShape(const Shape *shape)
void setImageSize(unsigned width, unsigned height)
sets the image size
double x
#define ROS_ERROR(...)
std::unique_ptr< mesh_filter::MeshFilter< mesh_filter::StereoCameraModel > > mesh_filter_
boost::function< bool(MeshHandle, Eigen::Affine3d &)> TransformCallback
unsigned int MeshHandle
#define ROS_DEBUG(...)
std::shared_ptr< const Shape > ShapeConstPtr


perception
Author(s): Ioan Sucan , Jon Binney , Suat Gedikli
autogenerated on Wed Apr 18 2018 02:49:27