38 #include <jsk_topic_tools/rosparam_utils.h>
46 ConnectionBasedNodelet::onInit();
65 pnh_->param(
"fill_value",
fill_value, std::numeric_limits<float>::quiet_NaN());
80 std::vector<double> trans_pos(3, 0);
81 std::vector<double> trans_quat(4, 0); trans_quat[3] = 1.0;
82 if (pnh_->hasParam(
"translation")) {
83 jsk_topic_tools::readVectorParameter(*pnh_,
"translation", trans_pos);
85 ROS_INFO(
"translation : %f %f %f", trans_pos[0], trans_pos[1], trans_pos[2]);
86 if (pnh_->hasParam(
"rotation")) {
87 jsk_topic_tools::readVectorParameter(*pnh_,
"rotation", trans_quat);
89 ROS_INFO(
"rotation : %f %f %f %f", trans_quat[0], trans_quat[1], trans_quat[2], trans_quat[3]);
90 tf::Quaternion btq(trans_quat[0], trans_quat[1], trans_quat[2], trans_quat[3]);
91 tf::Vector3 btp(trans_pos[0], trans_pos[1], trans_pos[2]);
103 service_ = pnh_->advertiseService(
"set_point_cloud",
117 sync_inputs_e_.reset();
118 sync_inputs_a_.reset();
123 if (use_asynchronous) {
124 sub_as_info_ = pnh_->subscribe<sensor_msgs::CameraInfo> (
"info", max_sub_queue_size_,
126 sub_as_cloud_ = pnh_->subscribe<sensor_msgs::PointCloud2> (
"input", max_sub_queue_size_,
129 sub_info_.subscribe(*pnh_,
"info", max_sub_queue_size_);
130 sub_cloud_.subscribe(*pnh_,
"input", max_sub_queue_size_);
132 if (use_approximate) {
133 sync_inputs_a_ = boost::make_shared <message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::CameraInfo, sensor_msgs::PointCloud2> > > (max_queue_size_);
134 sync_inputs_a_->connectInput (sub_info_, sub_cloud_);
137 sync_inputs_e_ = boost::make_shared <message_filters::Synchronizer<message_filters::sync_policies::ExactTime<sensor_msgs::CameraInfo, sensor_msgs::PointCloud2> > > (max_queue_size_);
138 sync_inputs_e_->connectInput (sub_info_, sub_cloud_);
144 sub_as_info_ = pnh_->subscribe<sensor_msgs::CameraInfo> (
"info", max_sub_queue_size_,
152 if (use_asynchronous) {
153 sub_as_info_.shutdown();
154 sub_as_cloud_.shutdown();
157 sub_info_.unsubscribe();
158 sub_cloud_.unsubscribe();
162 sub_as_info_.shutdown();
167 std_srvs::Empty::Response &
res) {
172 const sensor_msgs::PointCloud2ConstPtr& pcloud2) {
173 ROS_DEBUG(
"DepthImageCreator::callback_sync");
174 publish_points(
info, pcloud2);
178 ROS_DEBUG(
"DepthImageCreator::callback_cloud");
179 boost::mutex::scoped_lock
lock(this->mutex_points);
180 points_ptr_ = pcloud2;
184 ROS_DEBUG(
"DepthImageCreator::callback_info");
185 boost::mutex::scoped_lock
lock(this->mutex_points);
186 if( info_counter_++ >= info_throttle_ ) {
192 publish_points(
info, points_ptr_);
197 const sensor_msgs::PointCloud2ConstPtr& pcloud2) {
198 sensor_msgs::PointCloud2Ptr pcloud2_ptr(
new sensor_msgs::PointCloud2(*pcloud2));
202 "y", 1, sensor_msgs::PointField::FLOAT32,
203 "z", 1, sensor_msgs::PointField::FLOAT32,
204 "rgb", 1, sensor_msgs::PointField::FLOAT32);
207 ROS_DEBUG(
"DepthImageCreator::publish_points");
208 if (!pcloud2_ptr)
return;
209 bool proc_cloud =
true, proc_depth =
true, proc_image =
true, proc_disp =
true;
210 if ( pub_cloud_.getNumSubscribers()==0 ) {
213 if ( pub_depth_.getNumSubscribers()==0 ) {
216 if ( pub_image_.getNumSubscribers()==0 ) {
219 if ( pub_disp_image_.getNumSubscribers()==0 ) {
222 if( !proc_cloud && !proc_depth && !proc_image && !proc_disp)
return;
226 float fx =
info->P[0];
227 float cx =
info->P[2];
228 float tx =
info->P[3];
229 float fy =
info->P[5];
230 float cy =
info->P[6];
232 Eigen::Affine3f sensorPose;
235 if(use_fixed_transform) {
239 tf_listener_->waitForTransform(pcloud2_ptr->header.frame_id,
240 info->header.frame_id,
243 tf_listener_->lookupTransform(pcloud2_ptr->header.frame_id,
244 info->header.frame_id,
247 catch ( std::runtime_error e ) {
254 sensorPose = (Eigen::Affine3f)Eigen::Translation3f(
p.getX(),
p.getY(),
p.getZ());
255 Eigen::Quaternion<float>
rot(
q.getW(),
q.getX(),
q.getY(),
q.getZ());
256 sensorPose = sensorPose *
rot;
259 Eigen::Affine3f trans = (Eigen::Affine3f)Eigen::Translation3f(-tx/fx , 0, 0);
260 sensorPose = sensorPose * trans;
263 ROS_INFO(
"%f %f %f %f %f %f %f %f %f, %f %f %f",
264 sensorPose(0,0), sensorPose(0,1), sensorPose(0,2),
265 sensorPose(1,0), sensorPose(1,1), sensorPose(1,2),
266 sensorPose(2,0), sensorPose(2,1), sensorPose(2,2),
267 sensorPose(0,3), sensorPose(1,3), sensorPose(2,3));
272 pcl::RangeImagePlanar rangeImageP;
279 #if ( PCL_MAJOR_VERSION >= 1 && PCL_MINOR_VERSION >= 5 )
280 inv = sensorPose.inverse();
281 pcl::transformPointCloud< Point > (tpc, pointCloud, inv);
283 pcl::getInverse(sensorPose, inv);
284 pcl::getTransformedPointCloud<PointCloud> (tpc, inv, pointCloud);
287 Eigen::Affine3f dummytrans;
288 dummytrans.setIdentity();
289 rangeImageP.createFromPointCloudWithFixedSize (pointCloud,
291 cx/scale_depth, cy/scale_depth,
292 fx/scale_depth, fy/scale_depth,
297 cv::Mat color_mat = cv::Mat::zeros(rangeImageP.height, rangeImageP.width, CV_8UC3);
298 cv::Mat depth_mat(rangeImageP.height, rangeImageP.width, CV_32FC1);
299 depth_mat.setTo(fill_value);
300 for (
size_t i=0;
i<pointCloud.size();
i++) {
305 rangeImageP.getImagePoint(pt.x, pt.y, pt.z, image_x, image_y);
307 if (!rangeImageP.isInImage(image_x, image_y)) {
311 pcl::PointWithRange pt_with_range = rangeImageP.getPoint(image_x, image_y);
312 depth_mat.at<
float>(image_y, image_x) = pt_with_range.z;
314 if (!std::isnan(pt.x) && !std::isnan(pt.y) && !std::isnan(pt.z)) {
315 color_mat.at<cv::Vec3b>(image_y, image_x) = cv::Vec3b(pt.r, pt.g, pt.b);
319 if (scale_depth != 1.0) {
321 cv::resize(color_mat, color_mat, cv::Size(
info->width,
info->height));
322 cv::resize(depth_mat, depth_mat, cv::Size(
info->width,
info->height));
336 if(proc_cloud || proc_disp) {
338 pcl::RangeImagePlanar rangeImagePP;
339 rangeImagePP.setDepthImage ((
float *)depth_mat.ptr(),
342 #if PCL_MAJOR_VERSION == 1 && PCL_MINOR_VERSION >= 7
345 rangeImagePP.header =
info->header;
349 cloud_out.header = rangeImagePP.header;
350 if (organize_cloud_) {
351 cloud_out.width = rangeImagePP.width;
352 cloud_out.height = rangeImagePP.height;
354 cloud_out.width = rangeImagePP.width * rangeImagePP.height;
355 cloud_out.height = 1;
357 cloud_out.resize(cloud_out.width * cloud_out.height);
358 cloud_out.is_dense =
true;
359 for (
int y = 0;
y < (int)rangeImagePP.height;
y++ ) {
360 for (
int x = 0;
x < (int)rangeImagePP.width;
x++ ) {
361 pcl::PointWithRange pt_from = rangeImagePP.points[rangeImagePP.width *
y +
x];
362 cv::Vec3b
rgb = color_mat.at<cv::Vec3b>(
y,
x);
370 cloud_out.points[rangeImagePP.width *
y +
x] = pt_to;
371 if (std::isnan(pt_to.x) || std::isnan(pt_to.y) || std::isnan(pt_to.z)) {
372 cloud_out.is_dense =
false;
376 pub_cloud_.publish(boost::make_shared<PointCloud>(cloud_out));
380 stereo_msgs::DisparityImage disp;
381 #if PCL_MAJOR_VERSION == 1 && PCL_MINOR_VERSION >= 7
384 disp.header = rangeImagePP.header;
387 disp.image.height = rangeImagePP.height;
388 disp.image.width = rangeImagePP.width;
389 disp.image.step = disp.image.width *
sizeof(float);
390 disp.f = fx; disp.T = 0.075;
391 disp.min_disparity = 0;
392 disp.max_disparity = disp.T * disp.f / 0.3;
393 disp.delta_d = 0.125;
395 disp.image.data.resize (disp.image.height * disp.image.step);
396 float *
data =
reinterpret_cast<float*
> (&disp.image.data[0]);
398 float normalization_factor = disp.f * disp.T;
399 for (
int y = 0;
y < (int)rangeImagePP.height;
y++ ) {
400 for (
int x = 0;
x < (int)rangeImagePP.width;
x++ ) {
401 pcl::PointWithRange
p = rangeImagePP.getPoint(
x,
y);
402 data[
y*disp.image.width+
x] = normalization_factor /
p.z;
405 pub_disp_image_.publish(boost::make_shared<stereo_msgs::DisparityImage> (disp));