51 , input_depth_transport_(nh_)
52 , model_depth_transport_(nh_)
53 , filtered_depth_transport_(nh_)
54 , filtered_label_transport_(nh_)
55 , image_topic_(
"depth")
57 , near_clipping_plane_distance_(0.3)
58 , far_clipping_plane_distance_(5.0)
59 , shadow_threshold_(0.04)
61 , padding_offset_(0.02)
63 , skip_vertical_pixels_(4)
64 , skip_horizontal_pixels_(6)
65 , image_callback_count_(0)
66 , average_callback_dt_(0.0)
101 if (params.
hasMember(
"filtered_cloud_topic"))
162 h =
mesh_filter_->addMesh(static_cast<const shapes::Mesh&>(*shape));
171 ROS_ERROR(
"Mesh filter not yet initialized!");
186 ROS_ERROR(
"Internal error. Mesh filter handle %u not found", h);
189 transform = it->second;
195 bool host_is_big_endian(
void)
200 char c[
sizeof(uint32_t)];
201 } bint = { 0x01020304 };
202 return bint.c[0] == 1;
209 const sensor_msgs::CameraInfoConstPtr& info_msg)
211 ROS_DEBUG(
"Received a new depth image message (frame = '%s', encoding='%s')", depth_msg->header.frame_id.c_str(),
212 depth_msg->encoding.c_str());
255 static const double TEST_DT = 0.005;
259 for (
int t = 0; t < nt; ++t)
273 static const unsigned int MAX_TF_COUNTER = 1000;
279 const unsigned int div = MAX_TF_COUNTER / 10;
288 ROS_WARN_THROTTLE(1,
"More than half of the image messages discared due to TF being unavailable (%u%%). " 289 "Transform error of sensor data: %s; quitting callback.",
292 ROS_DEBUG_THROTTLE(1,
"Transform error of sensor data: %s; quitting callback", err.c_str());
295 const unsigned int div = MAX_TF_COUNTER / 10;
312 if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
315 const int w = depth_msg->width;
316 const int h = depth_msg->height;
320 params.
setCameraParameters(info_msg->K[0], info_msg->K[4], info_msg->K[2], info_msg->K[5]);
325 mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
330 ROS_ERROR_THROTTLE(1,
"Unexpected encoding type: '%s'. Ignoring input.", depth_msg->encoding.c_str());
339 const double px = info_msg->K[2];
340 const double py = info_msg->K[5];
343 if (w >= static_cast<int>(
x_cache_.size()) || h >= static_cast<int>(
y_cache_.size()) ||
K2_ != px ||
K5_ != py ||
344 K0_ != info_msg->K[0] ||
K4_ != info_msg->K[4])
348 K0_ = info_msg->K[0];
349 K4_ = info_msg->K[4];
359 if (static_cast<int>(
x_cache_.size()) < w)
361 if (static_cast<int>(
y_cache_.size()) < h)
364 for (
int x = 0;
x < w; ++
x)
367 for (
int y = 0;
y < h; ++
y)
380 std::size_t img_size = h * w;
391 sensor_msgs::Image debug_msg;
392 debug_msg.header = depth_msg->header;
393 debug_msg.height = depth_msg->height;
394 debug_msg.width = depth_msg->width;
396 debug_msg.is_bigendian = depth_msg->is_bigendian;
397 debug_msg.step = depth_msg->step;
398 debug_msg.data.resize(img_size *
sizeof(
float));
399 mesh_filter_->getModelDepth(reinterpret_cast<float*>(&debug_msg.data[0]));
402 sensor_msgs::Image filtered_depth_msg;
403 filtered_depth_msg.header = depth_msg->header;
404 filtered_depth_msg.height = depth_msg->height;
405 filtered_depth_msg.width = depth_msg->width;
407 filtered_depth_msg.is_bigendian = depth_msg->is_bigendian;
408 filtered_depth_msg.step = depth_msg->step;
409 filtered_depth_msg.data.resize(img_size *
sizeof(
float));
411 mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_depth_msg.data[0]));
414 sensor_msgs::Image label_msg;
415 label_msg.header = depth_msg->header;
416 label_msg.height = depth_msg->height;
417 label_msg.width = depth_msg->width;
419 label_msg.is_bigendian = depth_msg->is_bigendian;
420 label_msg.step = w *
sizeof(
unsigned int);
421 label_msg.data.resize(img_size *
sizeof(
unsigned int));
422 mesh_filter_->getFilteredLabels(reinterpret_cast<unsigned int*>(&label_msg.data[0]));
429 static std::vector<float> filtered_data;
430 sensor_msgs::Image filtered_msg;
431 filtered_msg.header = depth_msg->header;
432 filtered_msg.height = depth_msg->height;
433 filtered_msg.width = depth_msg->width;
435 filtered_msg.is_bigendian = depth_msg->is_bigendian;
436 filtered_msg.step = depth_msg->step;
437 filtered_msg.data.resize(img_size *
sizeof(
unsigned short));
438 if (filtered_data.size() < img_size)
439 filtered_data.resize(img_size);
440 mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_data[0]));
441 unsigned short* tmp_ptr = (
unsigned short*)&filtered_msg.data[0];
442 for (std::size_t i = 0; i < img_size; ++i)
444 tmp_ptr[i] = (
unsigned short)(filtered_data[i] * 1000 + 0.5);
459 const uint16_t* input_row =
reinterpret_cast<const uint16_t*
>(&depth_msg->data[0]);
461 for (
int y = skip_vertical_pixels_;
y < h_bound; ++
y, labels_row += w, input_row += w)
462 for (
int x = skip_horizontal_pixels_;
x < w_bound; ++
x)
467 float zz = (float)input_row[
x] * 1e-3;
472 occupied_cells.insert(
tree_->coordToKey(point_tf.
getX(), point_tf.
getY(), point_tf.
getZ()));
477 float zz = input_row[
x] * 1e-3;
483 model_cells.insert(
tree_->coordToKey(point_tf.
getX(), point_tf.
getY(), point_tf.
getZ()));
489 const float* input_row =
reinterpret_cast<const float*
>(&depth_msg->data[0]);
491 for (
int y = skip_vertical_pixels_;
y < h_bound; ++
y, labels_row += w, input_row += w)
492 for (
int x = skip_horizontal_pixels_;
x < w_bound; ++
x)
496 float zz = input_row[
x];
501 occupied_cells.insert(
tree_->coordToKey(point_tf.
getX(), point_tf.
getY(), point_tf.
getZ()));
505 float zz = input_row[
x];
511 model_cells.insert(
tree_->coordToKey(point_tf.
getX(), point_tf.
getY(), point_tf.
getZ()));
519 ROS_ERROR(
"Internal error while parsing depth data");
525 for (octomap::KeySet::iterator it = model_cells.begin(), end = model_cells.end(); it != end; ++it)
526 occupied_cells.erase(*it);
533 for (octomap::KeySet::iterator it = occupied_cells.begin(), end = occupied_cells.end(); it != end; ++it)
534 tree_->updateNode(*it,
true);
538 ROS_ERROR(
"Internal error while updating octree");
540 tree_->unlockWrite();
541 tree_->triggerUpdateCallback();
virtual ShapeHandle excludeShape(const shapes::ShapeConstPtr &shape)
std::string filtered_cloud_topic_
const std::string & getMessage() const
void publish(const sensor_msgs::Image &image, const sensor_msgs::CameraInfo &info) const
unsigned int skip_vertical_pixels_
image_transport::CameraPublisher pub_filtered_depth_image_
#define ROS_WARN_THROTTLE(rate,...)
OccupancyMapMonitor * monitor_
DepthImageOctomapUpdater()
virtual bool initialize()
Do any necessary setup (subscribe to ros topics, etc.). This call assumes setMonitor() and setParams(...
image_transport::ImageTransport model_depth_transport_
#define ROS_DEBUG_THROTTLE(rate,...)
void setMapFrame(const std::string &frame)
CameraSubscriber subscribeCamera(const std::string &base_topic, uint32_t queue_size, const CameraSubscriber::Callback &callback, const ros::VoidPtr &tracked_object=ros::VoidPtr(), const TransportHints &transport_hints=TransportHints())
image_transport::CameraPublisher pub_model_depth_image_
Base class for classes which update the occupancy map.
double average_callback_dt_
ros::WallTime last_depth_callback_start_
ros::Time last_update_time_
TFSIMD_FORCE_INLINE const tfScalar & getY() const
image_transport::CameraSubscriber sub_depth_image_
TFSIMD_FORCE_INLINE const tfScalar & getZ() const
unsigned int image_callback_count_
double far_clipping_plane_distance_
#define ROS_ERROR_THROTTLE(rate,...)
static const bool HOST_IS_BIG_ENDIAN
CameraPublisher advertiseCamera(const std::string &base_topic, uint32_t queue_size, bool latch=false)
image_transport::ImageTransport input_depth_transport_
virtual ~DepthImageOctomapUpdater()
static const StereoCameraModel::Parameters & RegisteredPSDKParams
predefined sensor model for OpenNI compatible devices (e.g., PrimeSense, Kinect, Asus Xtion) ...
const std::string & getMapFrame() const
const boost::shared_ptr< tf::Transformer > & getTFClient() const
std::vector< float > x_cache_
unsigned int skip_horizontal_pixels_
Parameters for Stereo-like devices.
double near_clipping_plane_distance_
const std::string TYPE_32FC1
const std::string TYPE_16UC1
unordered_ns::unordered_set< OcTreeKey, OcTreeKey::KeyHash > KeySet
void setCameraParameters(float fx, float fy, float cx, float cy)
sets the camera parameters of the pinhole camera where the disparities were obtained. Usually the left camera
TFSIMD_FORCE_INLINE const tfScalar & w() const
bool hasMember(const std::string &name) const
bool getShapeTransform(mesh_filter::MeshHandle h, Eigen::Affine3d &transform) const
std::unique_ptr< LazyFreeSpaceUpdater > free_space_updater_
boost::shared_ptr< tf::Transformer > tf_
static void readXmlParam(XmlRpc::XmlRpcValue ¶ms, const std::string ¶m_name, double *value)
image_transport::ImageTransport filtered_depth_transport_
TFSIMD_FORCE_INLINE const tfScalar & getX() const
std::vector< unsigned int > filtered_labels_
virtual bool setParams(XmlRpc::XmlRpcValue ¶ms)
Set updater params using struct that comes from parsing a yaml string. This must be called after setM...
virtual void forgetShape(ShapeHandle handle)
bool updateTransformCache(const std::string &target_frame, const ros::Time &target_time)
void depthImageCallback(const sensor_msgs::ImageConstPtr &depth_msg, const sensor_msgs::CameraInfoConstPtr &info_msg)
Mesh * createMeshFromShape(const Shape *shape)
void setImageSize(unsigned width, unsigned height)
sets the image size
image_transport::ImageTransport filtered_label_transport_
ShapeTransformCache transform_cache_
image_transport::CameraPublisher pub_filtered_label_image_
std::unique_ptr< mesh_filter::MeshFilter< mesh_filter::StereoCameraModel > > mesh_filter_
boost::function< bool(MeshHandle, Eigen::Affine3d &)> TransformCallback
std::shared_ptr< const Shape > ShapeConstPtr
std::vector< float > y_cache_