gazebo_ros_openni_kinect.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2013 Open Source Robotics Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16 */
17 
18 /*
19  Desc: GazeboRosOpenniKinect plugin for simulating cameras in Gazebo
20  Author: John Hsu
21  Date: 24 Sept 2008
22  */
23 
24 #include <algorithm>
25 #include <assert.h>
26 #include <boost/thread/thread.hpp>
27 #include <boost/bind.hpp>
28 
30 
31 #include <gazebo/sensors/Sensor.hh>
32 #include <sdf/sdf.hh>
33 #include <gazebo/sensors/SensorTypes.hh>
34 
36 
37 #include <tf/tf.h>
38 
39 namespace gazebo
40 {
41 // Register this plugin with the simulator
42 GZ_REGISTER_SENSOR_PLUGIN(GazeboRosOpenniKinect)
43 
44 // Constructor
47 {
48  this->point_cloud_connect_count_ = 0;
49  this->depth_info_connect_count_ = 0;
50  this->depth_image_connect_count_ = 0;
51  this->last_depth_image_camera_info_update_time_ = common::Time(0);
52 }
53 
55 // Destructor
57 {
58 }
59 
61 // Load the controller
62 void GazeboRosOpenniKinect::Load(sensors::SensorPtr _parent, sdf::ElementPtr _sdf)
63 {
64  DepthCameraPlugin::Load(_parent, _sdf);
65 
66  // copying from DepthCameraPlugin into GazeboRosCameraUtils
67  this->parentSensor_ = this->parentSensor;
68  this->width_ = this->width;
69  this->height_ = this->height;
70  this->depth_ = this->depth;
71  this->format_ = this->format;
72  this->camera_ = this->depthCamera;
73 
74  // using a different default
75  if (!_sdf->HasElement("imageTopicName"))
76  this->image_topic_name_ = "ir/image_raw";
77  if (!_sdf->HasElement("cameraInfoTopicName"))
78  this->camera_info_topic_name_ = "ir/camera_info";
79 
80  // point cloud stuff
81  if (!_sdf->HasElement("pointCloudTopicName"))
82  this->point_cloud_topic_name_ = "points";
83  else
84  this->point_cloud_topic_name_ = _sdf->GetElement("pointCloudTopicName")->Get<std::string>();
85 
86  // depth image stuff
87  if (!_sdf->HasElement("depthImageTopicName"))
88  this->depth_image_topic_name_ = "depth/image_raw";
89  else
90  this->depth_image_topic_name_ = _sdf->GetElement("depthImageTopicName")->Get<std::string>();
91 
92  if (!_sdf->HasElement("depthImageCameraInfoTopicName"))
93  this->depth_image_camera_info_topic_name_ = "depth/camera_info";
94  else
95  this->depth_image_camera_info_topic_name_ = _sdf->GetElement("depthImageCameraInfoTopicName")->Get<std::string>();
96 
97  if (!_sdf->HasElement("pointCloudCutoff"))
98  this->point_cloud_cutoff_ = 0.4;
99  else
100  this->point_cloud_cutoff_ = _sdf->GetElement("pointCloudCutoff")->Get<double>();
101  if (!_sdf->HasElement("pointCloudCutoffMax"))
102  this->point_cloud_cutoff_max_ = 5.0;
103  else
104  this->point_cloud_cutoff_max_ = _sdf->GetElement("pointCloudCutoffMax")->Get<double>();
105 
107  GazeboRosCameraUtils::Load(_parent, _sdf);
108 }
109 
111 {
112  ros::AdvertiseOptions point_cloud_ao =
113  ros::AdvertiseOptions::create<sensor_msgs::PointCloud2 >(
114  this->point_cloud_topic_name_,1,
115  boost::bind( &GazeboRosOpenniKinect::PointCloudConnect,this),
117  ros::VoidPtr(), &this->camera_queue_);
118  this->point_cloud_pub_ = this->rosnode_->advertise(point_cloud_ao);
119 
120  ros::AdvertiseOptions depth_image_ao =
121  ros::AdvertiseOptions::create< sensor_msgs::Image >(
122  this->depth_image_topic_name_,1,
123  boost::bind( &GazeboRosOpenniKinect::DepthImageConnect,this),
125  ros::VoidPtr(), &this->camera_queue_);
126  this->depth_image_pub_ = this->rosnode_->advertise(depth_image_ao);
127 
128  ros::AdvertiseOptions depth_image_camera_info_ao =
129  ros::AdvertiseOptions::create<sensor_msgs::CameraInfo>(
131  boost::bind( &GazeboRosOpenniKinect::DepthInfoConnect,this),
132  boost::bind( &GazeboRosOpenniKinect::DepthInfoDisconnect,this),
133  ros::VoidPtr(), &this->camera_queue_);
134  this->depth_image_camera_info_pub_ = this->rosnode_->advertise(depth_image_camera_info_ao);
135 }
136 
138 // Increment count
140 {
142  (*this->image_connect_count_)++;
143  this->parentSensor->SetActive(true);
144 }
146 // Decrement count
148 {
150  (*this->image_connect_count_)--;
151  if (this->point_cloud_connect_count_ <= 0)
152  this->parentSensor->SetActive(false);
153 }
154 
156 // Increment count
158 {
160  this->parentSensor->SetActive(true);
161 }
163 // Decrement count
165 {
167 }
168 
170 // Increment count
172 {
174 }
176 // Decrement count
178 {
180 }
181 
183 // Update the controller
184 void GazeboRosOpenniKinect::OnNewDepthFrame(const float *_image,
185  unsigned int _width, unsigned int _height, unsigned int _depth,
186  const std::string &_format)
187 {
188  if (!this->initialized_ || this->height_ <=0 || this->width_ <=0)
189  return;
190 
191  this->depth_sensor_update_time_ = this->parentSensor->LastMeasurementTime();
192  if (this->parentSensor->IsActive())
193  {
194  if (this->point_cloud_connect_count_ <= 0 &&
195  this->depth_image_connect_count_ <= 0 &&
196  (*this->image_connect_count_) <= 0)
197  {
198  this->parentSensor->SetActive(false);
199  }
200  else
201  {
202  if (this->point_cloud_connect_count_ > 0)
203  this->FillPointdCloud(_image);
204 
205  if (this->depth_image_connect_count_ > 0)
206  this->FillDepthImage(_image);
207  }
208  }
209  else
210  {
211  if (this->point_cloud_connect_count_ > 0 ||
212  this->depth_image_connect_count_ <= 0)
213  // do this first so there's chance for sensor to run 1 frame after activate
214  this->parentSensor->SetActive(true);
215  }
217 }
218 
220 // Update the controller
221 void GazeboRosOpenniKinect::OnNewImageFrame(const unsigned char *_image,
222  unsigned int _width, unsigned int _height, unsigned int _depth,
223  const std::string &_format)
224 {
225  if (!this->initialized_ || this->height_ <=0 || this->width_ <=0)
226  return;
227 
228  //ROS_ERROR_NAMED("openni_kinect", "camera_ new frame %s %s",this->parentSensor_->Name().c_str(),this->frame_name_.c_str());
229  this->sensor_update_time_ = this->parentSensor_->LastMeasurementTime();
230 
231  if (this->parentSensor->IsActive())
232  {
233  if (this->point_cloud_connect_count_ <= 0 &&
234  this->depth_image_connect_count_ <= 0 &&
235  (*this->image_connect_count_) <= 0)
236  {
237  this->parentSensor->SetActive(false);
238  }
239  else
240  {
241  if ((*this->image_connect_count_) > 0)
242  this->PutCameraData(_image);
243  }
244  }
245  else
246  {
247  if ((*this->image_connect_count_) > 0)
248  // do this first so there's chance for sensor to run 1 frame after activate
249  this->parentSensor->SetActive(true);
250  }
251 }
252 
254 // Put point cloud data to the interface
256 {
257  this->lock_.lock();
258 
259  this->point_cloud_msg_.header.frame_id = this->frame_name_;
260  this->point_cloud_msg_.header.stamp.sec = this->depth_sensor_update_time_.sec;
261  this->point_cloud_msg_.header.stamp.nsec = this->depth_sensor_update_time_.nsec;
262  this->point_cloud_msg_.width = this->width;
263  this->point_cloud_msg_.height = this->height;
264  this->point_cloud_msg_.row_step = this->point_cloud_msg_.point_step * this->width;
265 
268  this->height,
269  this->width,
270  this->skip_,
271  (void*)_src );
272 
274 
275  this->lock_.unlock();
276 }
277 
279 // Put depth image data to the interface
281 {
282  this->lock_.lock();
283  // copy data into image
284  this->depth_image_msg_.header.frame_id = this->frame_name_;
285  this->depth_image_msg_.header.stamp.sec = this->depth_sensor_update_time_.sec;
286  this->depth_image_msg_.header.stamp.nsec = this->depth_sensor_update_time_.nsec;
287 
290  this->height,
291  this->width,
292  this->skip_,
293  (void*)_src );
294 
296 
297  this->lock_.unlock();
298 }
299 
300 
301 // Fill depth information
303  sensor_msgs::PointCloud2 &point_cloud_msg,
304  uint32_t rows_arg, uint32_t cols_arg,
305  uint32_t step_arg, void* data_arg)
306 {
307  sensor_msgs::PointCloud2Modifier pcd_modifier(point_cloud_msg);
308  pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb");
309  // convert to flat array shape, we need to reconvert later
310  pcd_modifier.resize(rows_arg*cols_arg);
311  point_cloud_msg.is_dense = true;
312 
317 
318  float* toCopyFrom = (float*)data_arg;
319  int index = 0;
320 
321  double hfov = this->parentSensor->DepthCamera()->HFOV().Radian();
322  double fl = ((double)this->width) / (2.0 *tan(hfov/2.0));
323 
324  // convert depth to point cloud
325  for (uint32_t j=0; j<rows_arg; j++)
326  {
327  double pAngle;
328  if (rows_arg>1) pAngle = atan2( (double)j - 0.5*(double)(rows_arg-1), fl);
329  else pAngle = 0.0;
330 
331  for (uint32_t i=0; i<cols_arg; i++, ++iter_x, ++iter_y, ++iter_z, ++iter_rgb)
332  {
333  double yAngle;
334  if (cols_arg>1) yAngle = atan2( (double)i - 0.5*(double)(cols_arg-1), fl);
335  else yAngle = 0.0;
336 
337  double depth = toCopyFrom[index++]; // + 0.0*this->myParent->GetNearClip();
338 
339  if(depth > this->point_cloud_cutoff_ &&
340  depth < this->point_cloud_cutoff_max_)
341  {
342  // in optical frame
343  // hardcoded rotation rpy(-M_PI/2, 0, -M_PI/2) is built-in
344  // to urdf, where the *_optical_frame should have above relative
345  // rotation from the physical camera *_frame
346  *iter_x = depth * tan(yAngle);
347  *iter_y = depth * tan(pAngle);
348  *iter_z = depth;
349  }
350  else //point in the unseeable range
351  {
352  *iter_x = *iter_y = *iter_z = std::numeric_limits<float>::quiet_NaN ();
353  point_cloud_msg.is_dense = false;
354  }
355 
356  // put image color data for each point
357  uint8_t* image_src = (uint8_t*)(&(this->image_msg_.data[0]));
358  if (this->image_msg_.data.size() == rows_arg*cols_arg*3)
359  {
360  // color
361  iter_rgb[0] = image_src[i*3+j*cols_arg*3+0];
362  iter_rgb[1] = image_src[i*3+j*cols_arg*3+1];
363  iter_rgb[2] = image_src[i*3+j*cols_arg*3+2];
364  }
365  else if (this->image_msg_.data.size() == rows_arg*cols_arg)
366  {
367  // mono (or bayer? @todo; fix for bayer)
368  iter_rgb[0] = image_src[i+j*cols_arg];
369  iter_rgb[1] = image_src[i+j*cols_arg];
370  iter_rgb[2] = image_src[i+j*cols_arg];
371  }
372  else
373  {
374  // no image
375  iter_rgb[0] = 0;
376  iter_rgb[1] = 0;
377  iter_rgb[2] = 0;
378  }
379  }
380  }
381 
382  // reconvert to original height and width after the flat reshape
383  point_cloud_msg.height = rows_arg;
384  point_cloud_msg.width = cols_arg;
385  point_cloud_msg.row_step = point_cloud_msg.point_step * point_cloud_msg.width;
386 
387  return true;
388 }
389 
390 // Fill depth information
392  sensor_msgs::Image& image_msg,
393  uint32_t rows_arg, uint32_t cols_arg,
394  uint32_t step_arg, void* data_arg)
395 {
396  image_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
397  image_msg.height = rows_arg;
398  image_msg.width = cols_arg;
399  image_msg.step = sizeof(float) * cols_arg;
400  image_msg.data.resize(rows_arg * cols_arg * sizeof(float));
401  image_msg.is_bigendian = 0;
402 
403  const float bad_point = std::numeric_limits<float>::quiet_NaN();
404 
405  float* dest = (float*)(&(image_msg.data[0]));
406  float* toCopyFrom = (float*)data_arg;
407  int index = 0;
408 
409  // convert depth to point cloud
410  for (uint32_t j = 0; j < rows_arg; j++)
411  {
412  for (uint32_t i = 0; i < cols_arg; i++)
413  {
414  float depth = toCopyFrom[index++];
415 
416  if (depth > this->point_cloud_cutoff_ &&
417  depth < this->point_cloud_cutoff_max_)
418  {
419  dest[i + j * cols_arg] = depth;
420  }
421  else //point in the unseeable range
422  {
423  dest[i + j * cols_arg] = bad_point;
424  }
425  }
426  }
427  return true;
428 }
429 
431 {
432  ROS_DEBUG_NAMED("openni_kinect", "publishing default camera info, then openni kinect camera info");
434 
435  if (this->depth_info_connect_count_ > 0)
436  {
437  this->sensor_update_time_ = this->parentSensor_->LastMeasurementTime();
438 #if GAZEBO_MAJOR_VERSION >= 8
439  common::Time cur_time = this->world_->SimTime();
440 #else
441  common::Time cur_time = this->world_->GetSimTime();
442 #endif
444  {
447  }
448  }
449 }
450 
451 //@todo: publish disparity similar to openni_camera_deprecated/src/nodelets/openni_nodelet.cpp.
452 /*
453 #include <stereo_msgs/DisparityImage.h>
454 pub_disparity_ = comm_nh.advertise<stereo_msgs::DisparityImage > ("depth/disparity", 5, subscriberChanged2, subscriberChanged2);
455 
456 void GazeboRosDepthCamera::PublishDisparityImage(const DepthImage& depth, ros::Time time)
457 {
458  stereo_msgs::DisparityImagePtr disp_msg = boost::make_shared<stereo_msgs::DisparityImage > ();
459  disp_msg->header.stamp = time;
460  disp_msg->header.frame_id = device_->isDepthRegistered () ? rgb_frame_id_ : depth_frame_id_;
461  disp_msg->image.header = disp_msg->header;
462  disp_msg->image.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
463  disp_msg->image.height = depth_height_;
464  disp_msg->image.width = depth_width_;
465  disp_msg->image.step = disp_msg->image.width * sizeof (float);
466  disp_msg->image.data.resize (disp_msg->image.height * disp_msg->image.step);
467  disp_msg->T = depth.getBaseline ();
468  disp_msg->f = depth.getFocalLength () * depth_width_ / depth.getWidth ();
469 
471  disp_msg->min_disparity = 0.0;
472  disp_msg->max_disparity = disp_msg->T * disp_msg->f / 0.3;
473  disp_msg->delta_d = 0.125;
474 
475  depth.fillDisparityImage (depth_width_, depth_height_, reinterpret_cast<float*>(&disp_msg->image.data[0]), disp_msg->image.step);
476 
477  pub_disparity_.publish (disp_msg);
478 }
479 */
480 
481 }
std::string image_topic_name_
ROS image topic name.
void publish(const boost::shared_ptr< M > &message) const
boost::mutex lock_
A mutex to lock access to fields that are used in ROS message callbacks.
double point_cloud_cutoff_
Minimum range of the point cloud.
int depth_image_connect_count_
Keep track of number of connctions for point clouds.
void Load(sensors::SensorPtr _parent, sdf::ElementPtr _sdf, const std::string &_camera_name_suffix="")
Load the plugin.
virtual void Load(sensors::SensorPtr _parent, sdf::ElementPtr _sdf)
Load the plugin.
ros::Publisher point_cloud_pub_
A pointer to the ROS node. A node will be instantiated if it does not exist.
boost::shared_ptr< int > image_connect_count_
Keep track of number of image connections.
sensor_msgs::Image image_msg_
ROS image message.
#define ROS_DEBUG_NAMED(name,...)
bool FillPointCloudHelper(sensor_msgs::PointCloud2 &point_cloud_msg, uint32_t rows_arg, uint32_t cols_arg, uint32_t step_arg, void *data_arg)
event::ConnectionPtr OnLoad(const boost::function< void()> &)
virtual void Advertise()
Advertise point cloud and depth image.
const std::string TYPE_32FC1
bool FillDepthImageHelper(sensor_msgs::Image &image_msg, uint32_t height, uint32_t width, uint32_t step, void *data_arg)
bool initialized_
True if camera util is initialized.
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
int point_cloud_connect_count_
Keep track of number of connctions for point clouds.
std::string frame_name_
ROS frame transform name to use in the image message header. This should typically match the link nam...
virtual void OnNewImageFrame(const unsigned char *_image, unsigned int _width, unsigned int _height, unsigned int _depth, const std::string &_format)
Update the controller.
void FillPointdCloud(const float *_src)
push point cloud data into ros topic
void PutCameraData(const unsigned char *_src)
Put camera data to the ROS topic.
ros::NodeHandle * rosnode_
A pointer to the ROS node. A node will be instantiated if it does not exist.
std::string depth_image_topic_name_
image where each pixel contains the depth data
virtual void OnNewDepthFrame(const float *_image, unsigned int _width, unsigned int _height, unsigned int _depth, const std::string &_format)
Update the controller.
double point_cloud_cutoff_max_
Maximum range of the point cloud.
void setPointCloud2FieldsByString(int n_fields,...)
sensor_msgs::PointCloud2 point_cloud_msg_
PointCloud2 point cloud message.
boost::shared_ptr< void > VoidPtr
std::string point_cloud_topic_name_
ROS image topic name.
void FillDepthImage(const float *_src)
push depth image data into ros topic


gazebo_plugins
Author(s): John Hsu
autogenerated on Tue Mar 26 2019 02:31:27