depth_layer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2016, Fetch Robotics Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * * Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * * Neither the name of the Fetch Robotics Inc. nor the names of its
14  * contributors may be used to endorse or promote products derived from
15  * this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL FETCH ROBOTICS INC. BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 // Author: Anuj Pasricha, Michael Ferguson
30 
33 #include <limits>
34 #include <geometry_msgs/Vector3Stamped.h>
35 
37 
38 namespace costmap_2d
39 {
40 
42 {
43 }
44 
46 {
48 
49  double observation_keep_time;
50  double expected_update_rate;
51  double transform_tolerance;
52  double obstacle_range;
53  double raytrace_range;
54  double min_obstacle_height;
55  double max_obstacle_height;
56  double min_clearing_height;
57  double max_clearing_height;
58  std::string topic = "";
59  std::string sensor_frame = "";
60 
61  ros::NodeHandle private_nh("~/" + name_);
62 
63  private_nh.param("publish_observations", publish_observations_, false);
64  private_nh.param("observations_separation_threshold", observations_threshold_, 0.06);
65 
66  // Optionally detect the ground plane
67  private_nh.param("find_ground_plane", find_ground_plane_, true);
68  private_nh.param("ground_orientation_threshold", ground_threshold_, 0.9);
69 
70  // Should NANs be used as clearing observations?
71  private_nh.param("clear_nans", clear_nans_, false);
72 
73  // Observation range values for both marking and clearing
74  private_nh.param("min_obstacle_height", min_obstacle_height, 0.0);
75  private_nh.param("max_obstacle_height", max_obstacle_height, 2.0);
76  private_nh.param("min_clearing_height", min_clearing_height, -std::numeric_limits<double>::infinity());
77  private_nh.param("max_clearing_height", max_clearing_height, std::numeric_limits<double>::infinity());
78 
79  // Skipping of potentially noisy rays near the edge of the image
80  private_nh.param("skip_rays_bottom", skip_rays_bottom_, 20);
81  private_nh.param("skip_rays_top", skip_rays_top_, 20);
82  private_nh.param("skip_rays_left", skip_rays_left_, 20);
83  private_nh.param("skip_rays_right", skip_rays_right_, 20);
84 
85  // Should skipped edge rays be used for clearing?
86  private_nh.param("clear_with_skipped_rays", clear_with_skipped_rays_, false);
87 
88  // How long should observations persist?
89  private_nh.param("observation_persistence", observation_keep_time, 0.0);
90 
91  // How often should we expect to get sensor updates?
92  private_nh.param("expected_update_rate", expected_update_rate, 0.0);
93 
94  // How long to wait for transforms to be available?
95  private_nh.param("transform_tolerance", transform_tolerance, 0.5);
96 
97  std::string raytrace_range_param_name, obstacle_range_param_name;
98 
99  // get the obstacle range for the sensor
100  obstacle_range = 2.5;
101  if (private_nh.searchParam("obstacle_range", obstacle_range_param_name))
102  {
103  private_nh.getParam(obstacle_range_param_name, obstacle_range);
104  }
105 
106  // get the raytrace range for the sensor
107  raytrace_range = 3.0;
108  if (private_nh.searchParam("raytrace_range", raytrace_range_param_name))
109  {
110  private_nh.getParam(raytrace_range_param_name, raytrace_range);
111  }
112 
114  new costmap_2d::ObservationBuffer(topic, observation_keep_time,
115  expected_update_rate, min_obstacle_height, max_obstacle_height,
116  obstacle_range, raytrace_range, *tf_, global_frame_,
117  sensor_frame, transform_tolerance));
118  marking_buffers_.push_back(marking_buf_);
120 
121  min_obstacle_height = 0.0;
122 
124  new costmap_2d::ObservationBuffer(topic, observation_keep_time,
125  expected_update_rate, min_clearing_height, max_clearing_height,
126  obstacle_range, raytrace_range, *tf_, global_frame_,
127  sensor_frame, transform_tolerance));
128  clearing_buffers_.push_back(clearing_buf_);
130 
132  {
133  clearing_pub_ = private_nh.advertise<sensor_msgs::PointCloud>("clearing_obs", 1);
134  marking_pub_ = private_nh.advertise<sensor_msgs::PointCloud>("marking_obs", 1);
135  }
136 
137  // subscribe to camera/info topics
138  std::string camera_depth_topic, camera_info_topic;
139  private_nh.param("depth_topic", camera_depth_topic,
140  std::string("/head_camera/depth_downsample/image_raw"));
141  private_nh.param("info_topic", camera_info_topic,
142  std::string("/head_camera/depth_downsample/camera_info"));
143  camera_info_sub_ = private_nh.subscribe<sensor_msgs::CameraInfo>(
144  camera_info_topic, 10, &FetchDepthLayer::cameraInfoCallback, this);
145 
146  depth_image_sub_.reset(new message_filters::Subscriber<sensor_msgs::Image>(private_nh, camera_depth_topic, 10));
149  depth_image_filter_->registerCallback(boost::bind(&FetchDepthLayer::depthImageCallback, this, _1));
150  observation_subscribers_.push_back(depth_image_sub_);
152  observation_notifiers_.back()->setTolerance(ros::Duration(0.05));
153 }
154 
156 {
157 }
158 
160  const sensor_msgs::CameraInfo::ConstPtr& msg)
161 {
162  // Lock mutex before updating K
163  boost::unique_lock<boost::mutex> lock(mutex_K_);
164 
165  float focal_pixels_ = msg->P[0];
166  float center_x_ = msg->P[2];
167  float center_y_ = msg->P[6];
168 
169  if (msg->binning_x == msg->binning_y)
170  {
171  if (msg->binning_x > 0)
172  {
173  K_ = (cv::Mat_<double>(3, 3) <<
174  focal_pixels_/msg->binning_x, 0.0, center_x_/msg->binning_x,
175  0.0, focal_pixels_/msg->binning_x, center_y_/msg->binning_x,
176  0.0, 0.0, 1.0);
177  }
178  else
179  {
180  K_ = (cv::Mat_<double>(3, 3) <<
181  focal_pixels_, 0.0, center_x_,
182  0.0, focal_pixels_, center_y_,
183  0.0, 0.0, 1.0);
184  }
185  }
186  else
187  {
188  ROS_ERROR("binning_x is not equal to binning_y");
189  }
190 }
191 
193  const sensor_msgs::Image::ConstPtr& msg)
194 {
195  // Lock mutex before using K
196  boost::unique_lock<boost::mutex> lock(mutex_K_);
197 
198  if (K_.empty())
199  {
200  ROS_DEBUG_NAMED("depth_layer", "Camera info not yet received.");
201  return;
202  }
203 
204  cv_bridge::CvImagePtr cv_ptr;
205  try
206  {
208  }
209  catch (cv_bridge::Exception& e)
210  {
211  ROS_ERROR("cv_bridge exception: %s", e.what());
212  return;
213  }
214 
215  // Clear with NANs?
216  if (clear_nans_)
217  {
218  for (int i = 0; i < cv_ptr->image.rows * cv_ptr->image.cols; i++)
219  {
220  if (std::isnan(cv_ptr->image.at<float>(i)))
221  cv_ptr->image.at<float>(i) = 25.0;
222  }
223  }
224 
225  // Convert to 3d
226  cv::Mat points3d;
227  depthTo3d(cv_ptr->image, K_, points3d);
228 
229  // Determine ground plane, either through camera or TF
230  cv::Vec4f ground_plane;
231  if (find_ground_plane_)
232  {
233  // Get normals
234  if (normals_estimator_.empty())
235  {
236  normals_estimator_.reset(new RgbdNormals(cv_ptr->image.rows,
237  cv_ptr->image.cols,
238  cv_ptr->image.depth(),
239  K_));
240  }
241  cv::Mat normals;
242  (*normals_estimator_)(points3d, normals);
243 
244  // Find plane(s)
245  if (plane_estimator_.empty())
246  {
247  plane_estimator_.reset(new RgbdPlane());
248  // Model parameters are based on notes in opencv_candidate
249  plane_estimator_->setSensorErrorA(0.0075);
250  plane_estimator_->setSensorErrorB(0.0);
251  plane_estimator_->setSensorErrorC(0.0);
252  // Image/cloud height/width must be multiple of block size
253  plane_estimator_->setBlockSize(40);
254  // Distance a point can be from plane and still be part of it
256  // Minimum cluster size to be a plane
257  plane_estimator_->setMinSize(1000);
258  }
259  cv::Mat planes_mask;
260  std::vector<cv::Vec4f> plane_coefficients;
261  (*plane_estimator_)(points3d, normals, planes_mask, plane_coefficients);
262 
263  for (size_t i = 0; i < plane_coefficients.size(); i++)
264  {
265  // check plane orientation
266  if ((fabs(0.0 - plane_coefficients[i][0]) <= ground_threshold_) &&
267  (fabs(1.0 + plane_coefficients[i][1]) <= ground_threshold_) &&
268  (fabs(0.0 - plane_coefficients[i][2]) <= ground_threshold_))
269  {
270  ground_plane = plane_coefficients[i];
271  break;
272  }
273  }
274  }
275  else
276  {
277  // find ground plane in camera coordinates using tf
278  // transform normal axis
279  geometry_msgs::Vector3Stamped vector;
280  vector.vector.x = 0;
281  vector.vector.y = 0;
282  vector.vector.z = 1;
283  vector.header.frame_id = "base_link";
284  vector.header.stamp = ros::Time();
285  tf_->transform(vector, vector, msg->header.frame_id);
286  ground_plane[0] = vector.vector.x;
287  ground_plane[1] = vector.vector.y;
288  ground_plane[2] = vector.vector.z;
289 
290  // find offset
291  geometry_msgs::TransformStamped transform;
292  try {
293  transform = tf_->lookupTransform("base_link", msg->header.frame_id, msg->header.stamp);
294  ground_plane[3] = transform.transform.translation.z;
295  } catch (tf2::TransformException){
296  ROS_WARN("Failed to lookup transform!");
297  return;
298  }
299  }
300 
301  // check that ground plane actually exists, so it doesn't count as marking observations
302  if (ground_plane[0] == 0.0 && ground_plane[1] == 0.0 &&
303  ground_plane[2] == 0.0 && ground_plane[3] == 0.0)
304  {
305  ROS_DEBUG_NAMED("depth_layer", "Invalid ground plane.");
306  return;
307  }
308 
309  cv::Mat channels[3];
310  cv::split(points3d, channels);
311 
312  sensor_msgs::PointCloud clearing_points;
313  clearing_points.header.stamp = msg->header.stamp;
314  clearing_points.header.frame_id = msg->header.frame_id;
315 
316  sensor_msgs::PointCloud marking_points;
317  marking_points.header.stamp = msg->header.stamp;
318  marking_points.header.frame_id = msg->header.frame_id;
319 
320  // Put points in clearing/marking clouds
321  for (size_t i = 0; i < points3d.rows; i++)
322  {
323  for (size_t j = 0; j < points3d.cols; j++)
324  {
325  // Get next point
326  geometry_msgs::Point32 current_point;
327  current_point.x = channels[0].at<float>(i, j);
328  current_point.y = channels[1].at<float>(i, j);
329  current_point.z = channels[2].at<float>(i, j);
330  // Check point validity
331  if (current_point.x != 0.0 &&
332  current_point.y != 0.0 &&
333  current_point.z != 0.0 &&
334  !std::isnan(current_point.x) &&
335  !std::isnan(current_point.y) &&
336  !std::isnan(current_point.z))
337  {
339  {
340  // If edge rays are to be used for clearing, go ahead and add them now.
341  clearing_points.points.push_back(current_point);
342  }
343 
344  // Do not consider boundary points for obstacles marking since they are very noisy.
345  if (i < skip_rays_top_ ||
346  i >= points3d.rows - skip_rays_bottom_ ||
347  j < skip_rays_left_ ||
348  j >= points3d.cols - skip_rays_right_)
349  {
350  continue;
351  }
352 
354  {
355  // If edge rays are not to be used for clearing, only add them after the edge check.
356  clearing_points.points.push_back(current_point);
357  }
358 
359  // Check if point is part of the ground plane
360  if (fabs(ground_plane[0] * current_point.x +
361  ground_plane[1] * current_point.y +
362  ground_plane[2] * current_point.z +
363  ground_plane[3]) <= observations_threshold_)
364  {
365  continue; // Do not mark points near the floor.
366  }
367 
368  // Check for outliers, mark non-outliers as obstacles.
369  int num_valid = 0;
370  for (int x = -1; x < 2; x++)
371  {
372  for (int y = -1; y < 2; y++)
373  {
374  if (x == 0 && y == 0)
375  {
376  continue;
377  }
378  float px = channels[0].at<float>(i+x, j+y);
379  float py = channels[1].at<float>(i+x, j+y);
380  float pz = channels[2].at<float>(i+x, j+y);
381  if (px != 0.0 && py != 0.0 && pz != 0.0 &&
382  !std::isnan(px) && !std::isnan(py) && !std::isnan(pz))
383  {
384  if ( fabs(px - current_point.x) < 0.1 &&
385  fabs(py - current_point.y) < 0.1 &&
386  fabs(pz - current_point.z) < 0.1)
387  {
388  num_valid++;
389  }
390  }
391  } // for y
392  } // for x
393 
394  if (num_valid >= 7)
395  {
396  marking_points.points.push_back(current_point);
397  }
398  } // for j (y)
399  } // for i (x)
400  }
401 
402  // Publish and buffer our clearing point cloud
404  {
405  clearing_pub_.publish(clearing_points);
406  }
407 
408  sensor_msgs::PointCloud2 clearing_cloud2;
409  if (!sensor_msgs::convertPointCloudToPointCloud2(clearing_points, clearing_cloud2))
410  {
411  ROS_ERROR("Failed to convert a PointCloud to a PointCloud2, dropping message");
412  return;
413  }
414 
415  // buffer the ground plane observation
416  clearing_buf_->lock();
417  clearing_buf_->bufferCloud(clearing_cloud2);
418  clearing_buf_->unlock();
419 
420  // Publish and buffer our marking point cloud
422  {
423  marking_pub_.publish(marking_points);
424  }
425 
426  sensor_msgs::PointCloud2 marking_cloud2;
427  if (!sensor_msgs::convertPointCloudToPointCloud2(marking_points, marking_cloud2))
428  {
429  ROS_ERROR("Failed to convert a PointCloud to a PointCloud2, dropping message");
430  return;
431  }
432 
433  marking_buf_->lock();
434  marking_buf_->bufferCloud(marking_cloud2);
435  marking_buf_->unlock();
436 }
437 
438 } // namespace costmap_2d
T & transform(const T &in, T &out, const std::string &target_frame, ros::Duration timeout=ros::Duration(0.0)) const
ros::Subscriber camera_info_sub_
Definition: depth_layer.h:116
std::vector< boost::shared_ptr< costmap_2d::ObservationBuffer > > marking_buffers_
Subscriber subscribe(const std::string &topic, uint32_t queue_size, void(T::*fp)(M), T *obj, const TransportHints &transport_hints=TransportHints())
virtual ~FetchDepthLayer()
Destructor for the depth costmap layer.
#define ROS_WARN(...)
std::string name_
cv::Ptr< RgbdNormals > normals_estimator_
Definition: depth_layer.h:132
std::vector< boost::shared_ptr< tf2_ros::MessageFilterBase > > observation_notifiers_
tf2_ros::Buffer * tf_
#define ROS_DEBUG_NAMED(name,...)
bool param(const std::string &param_name, T &param_val, const T &default_val) const
virtual geometry_msgs::TransformStamped lookupTransform(const std::string &target_frame, const std::string &source_frame, const ros::Time &time, const ros::Duration timeout) const
void publish(const boost::shared_ptr< M > &message) const
void depthImageCallback(const sensor_msgs::Image::ConstPtr &msg)
void cameraInfoCallback(const sensor_msgs::CameraInfo::ConstPtr &msg)
virtual void onInitialize()
cv::Ptr< RgbdPlane > plane_estimator_
Definition: depth_layer.h:133
bool getParam(const std::string &key, std::string &s) const
const std::string TYPE_32FC1
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
std::vector< boost::shared_ptr< message_filters::SubscriberBase > > observation_subscribers_
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
virtual void onInitialize()
Initialization function for the DepthLayer.
Definition: depth_layer.cpp:45
ros::Publisher clearing_pub_
Definition: depth_layer.h:119
bool searchParam(const std::string &key, std::string &result) const
std::vector< boost::shared_ptr< costmap_2d::ObservationBuffer > > clearing_buffers_
boost::shared_ptr< tf2_ros::MessageFilter< sensor_msgs::Image > > depth_image_filter_
Definition: depth_layer.h:112
boost::shared_ptr< costmap_2d::ObservationBuffer > marking_buf_
Definition: depth_layer.h:80
static bool convertPointCloudToPointCloud2(const sensor_msgs::PointCloud &input, sensor_msgs::PointCloud2 &output)
ros::Publisher marking_pub_
Definition: depth_layer.h:122
A costmap layer that extracts ground plane and clears it.
Definition: depth_layer.h:56
boost::shared_ptr< message_filters::Subscriber< sensor_msgs::Image > > depth_image_sub_
Definition: depth_layer.h:111
#define PLUGINLIB_EXPORT_CLASS(class_type, base_class_type)
#define ROS_ERROR(...)
boost::shared_ptr< costmap_2d::ObservationBuffer > clearing_buf_
Definition: depth_layer.h:81
std::vector< boost::shared_ptr< costmap_2d::ObservationBuffer > > observation_buffers_
FetchDepthLayer()
Constructor.
Definition: depth_layer.cpp:41


fetch_depth_layer
Author(s): Michael Ferguson
autogenerated on Mon Feb 28 2022 22:23:51