kinfu_nodelet.cpp
Go to the documentation of this file.
00001 /*********************************************************************
00002  * Software License Agreement (BSD License)
00003  *
00004  *  Copyright (c) 2017, Kentaro Wada and JSK Lab
00005  *  All rights reserved.
00006  *
00007  *  Redistribution and use in source and binary forms, with or without
00008  *  modification, are permitted provided that the following conditions
00009  *  are met:
00010  *
00011  *   * Redistributions of source code must retain the above copyright
00012  *     notice, this list of conditions and the following disclaimer.
00013  *   * Redistributions in binary form must reproduce the above
00014  *     copyright notice, this list of conditions and the following
00015  *     disclaimer in the documentation and/o2r other materials provided
00016  *     with the distribution.
00017  *   * Neither the name of Kentaro Wada and JSK Lab nor the names of its
00018  *     contributors may be used to endorse or promote products derived
00019  *     from this software without specific prior written permission.
00020  *
00021  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00022  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00023  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00024  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00025  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00026  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00027  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00028  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00029  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00030  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00031  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00032  *  POSSIBILITY OF SUCH DAMAGE.
00033  *********************************************************************/
00034 
00035 #define BOOST_PARAMETER_MAX_ARITY 7
00036 
00037 #include <boost/thread/thread.hpp>
00038 #include <boost/filesystem.hpp>
00039 
00040 #include <pcl/features/normal_3d.h>
00041 #include <pcl/io/obj_io.h>
00042 
00043 #include <cv_bridge/cv_bridge.h>
00044 #include <jsk_recognition_msgs/TrackingStatus.h>
00045 #include <jsk_recognition_msgs/SaveMesh.h>
00046 #include <jsk_recognition_utils/pcl_ros_util.h>
00047 #include <sensor_msgs/fill_image.h>
00048 #include <sensor_msgs/image_encodings.h>
00049 
00050 #include "jsk_pcl_ros/kinfu.h"
00051 
00052 namespace enc = sensor_msgs::image_encodings;
00053 
00054 // TODO(wkentaro): Move to upstream.
00055 namespace pcl
00056 {
00057   void
00058   concatenateFields(PointCloud<PointXYZ>& cloud_xyz, PointCloud<RGB>& cloud_rgb, PointCloud<PointXYZRGB>& cloud)
00059   {
00060     if (cloud_xyz.points.size() != cloud_rgb.points.size())
00061     {
00062       std::cout << "Clouds being concatenated must have same size." << std::endl;
00063       return;
00064     }
00065     cloud.points.resize(cloud_xyz.points.size());
00066     for (size_t i = 0; i < cloud_xyz.points.size(); i++)
00067     {
00068       cloud.points[i].x = cloud_xyz.points[i].x;
00069       cloud.points[i].y = cloud_xyz.points[i].y;
00070       cloud.points[i].z = cloud_xyz.points[i].z;
00071       cloud.points[i].r = cloud_rgb.points[i].r;
00072       cloud.points[i].g = cloud_rgb.points[i].g;
00073       cloud.points[i].b = cloud_rgb.points[i].b;
00074     }
00075     cloud.width = cloud_xyz.width;
00076     cloud.height = cloud_xyz.height;
00077   }
00078 }
00079 
00080 namespace jsk_pcl_ros
00081 {
00082   void
00083   Kinfu::onInit()
00084   {
00085     ConnectionBasedNodelet::onInit();
00086     always_subscribe_ = true;  // for mapping
00087 
00088     pnh_->param("device", device_, 0);
00089     pnh_->param("auto_reset", auto_reset_, true);
00090     pnh_->param("integrate_color", integrate_color_, false);
00091     pnh_->param("slam", slam_, false);
00092     pnh_->param<std::string>("fixed_frame_id", fixed_frame_id_, "odom_init");
00093     pnh_->param("n_textures", n_textures_, -1);
00094 
00095     srv_ = boost::make_shared <dynamic_reconfigure::Server<Config> >(*pnh_);
00096     dynamic_reconfigure::Server<Config>::CallbackType f = boost::bind(&Kinfu::configCallback, this, _1, _2);
00097     srv_->setCallback(f);
00098 
00099     tf_listener_.reset(new tf::TransformListener());
00100 
00101     pub_camera_pose_ = advertise<geometry_msgs::PoseStamped>(*pnh_, "output", 1);
00102     pub_cloud_ = advertise<sensor_msgs::PointCloud2>(*pnh_, "output/cloud", 1);
00103     pub_depth_ = advertise<sensor_msgs::Image>(*pnh_, "output/depth", 1);
00104     pub_rendered_image_ = advertise<sensor_msgs::Image>(*pnh_, "output/rendered_image", 1);
00105     pub_status_ = advertise<jsk_recognition_msgs::TrackingStatus>(*pnh_, "output/status", 1);
00106 
00107     srv_reset_ = pnh_->advertiseService("reset", &Kinfu::resetCallback, this);
00108     srv_save_mesh_ = pnh_->advertiseService("save_mesh", &Kinfu::saveMeshCallback, this);
00109     srv_save_mesh_with_context_ = pnh_->advertiseService(
00110       "save_mesh_with_context", &Kinfu::saveMeshWithContextCallback, this);
00111 
00112     onInitPostProcess();
00113   }
00114 
00115   void
00116   Kinfu::configCallback(Config& config, uint32_t level)
00117   {
00118     boost::mutex::scoped_lock lock(mutex_);
00119     save_dir_ = config.save_dir;
00120   }
00121 
00122   void
00123   Kinfu::initKinfu(const sensor_msgs::CameraInfo::ConstPtr& caminfo_msg)
00124   {
00125     pcl::gpu::setDevice(device_);
00126     pcl::gpu::printShortCudaDeviceInfo(device_);
00127 
00128     /* below are copied from pcl/gpu/kinfu_large_scale/src/kinfuLS_app.cpp */
00129 
00130     float vsz = pcl::device::kinfuLS::VOLUME_SIZE;
00131     float shift_distance = pcl::device::kinfuLS::DISTANCE_THRESHOLD;
00132     Eigen::Vector3f volume_size = Eigen::Vector3f::Constant(vsz/*meters*/);
00133     if (shift_distance > 2.5 * vsz)
00134     {
00135       NODELET_WARN("WARNING Shifting distance (%.2f) is very large compared to the volume size (%.2f).",
00136                    shift_distance, vsz);
00137     }
00138 
00139     kinfu_.reset(new pcl::gpu::kinfuLS::KinfuTracker(
00140       volume_size, shift_distance, caminfo_msg->height, caminfo_msg->width));
00141 
00142     Eigen::Matrix3f R = Eigen::Matrix3f::Identity();
00143     Eigen::Vector3f t = volume_size * 0.5f - Eigen::Vector3f(0, 0, volume_size(2) / 2 * 1.2f);
00144 
00145     Eigen::Affine3f pose = Eigen::Translation3f(t) * Eigen::AngleAxisf(R);
00146 
00147     kinfu_->setInitialCameraPose(pose);
00148     kinfu_->volume().setTsdfTruncDist(0.030f/*meters*/);
00149     kinfu_->setIcpCorespFilteringParams(0.1f/*meters*/, sin(pcl::deg2rad(20.f)));
00150     //kinfu_->setDepthTruncationForICP(3.0f/*meters*/);
00151     kinfu_->setCameraMovementThreshold(0.001f);
00152 
00153     if (integrate_color_)
00154     {
00155       const int max_color_integration_weight = 2;
00156       kinfu_->initColorIntegration(max_color_integration_weight);
00157     }
00158   }
00159 
00160   void
00161   Kinfu::subscribe()
00162   {
00163     int queue_size;
00164     pnh_->param("queue_size", queue_size, 10);
00165 
00166     sub_camera_info_.subscribe(*pnh_, "input/camera_info", 1);
00167     sub_depth_.subscribe(*pnh_, "input/depth", 1);
00168     if (integrate_color_)
00169     {
00170       sub_color_.subscribe(*pnh_, "input/color", 1);
00171       sync_with_color_.reset(new message_filters::Synchronizer<SyncPolicyWithColor>(queue_size));
00172       sync_with_color_->connectInput(sub_camera_info_, sub_depth_, sub_color_);
00173       sync_with_color_->registerCallback(boost::bind(&Kinfu::update, this, _1, _2, _3));
00174     }
00175     else
00176     {
00177       sync_.reset(new message_filters::Synchronizer<SyncPolicy>(queue_size));
00178       sync_->connectInput(sub_camera_info_, sub_depth_);
00179       sync_->registerCallback(boost::bind(&Kinfu::update, this, _1, _2));
00180     }
00181   }
00182 
00183   void
00184   Kinfu::unsubscribe()
00185   {
00186   }
00187 
00188   void
00189   Kinfu::update(const sensor_msgs::CameraInfo::ConstPtr& caminfo_msg,
00190                 const sensor_msgs::Image::ConstPtr& depth_msg)
00191   {
00192     update(caminfo_msg, depth_msg, sensor_msgs::ImageConstPtr());
00193   }
00194 
00195   void
00196   Kinfu::update(const sensor_msgs::CameraInfo::ConstPtr& caminfo_msg,
00197                 const sensor_msgs::Image::ConstPtr& depth_msg,
00198                 const sensor_msgs::Image::ConstPtr& color_msg)
00199   {
00200     boost::mutex::scoped_lock lock(mutex_);
00201 
00202     if ((depth_msg->height != caminfo_msg->height) || (depth_msg->width != caminfo_msg->width))
00203     {
00204       ROS_ERROR("Image size of input depth and camera info must be same. Depth: (%d, %d), Camera Info: (%d, %d)",
00205                 depth_msg->height, depth_msg->width, caminfo_msg->height, caminfo_msg->width);
00206       return;
00207     }
00208     if (integrate_color_ && ((color_msg->height != caminfo_msg->height) || (color_msg->width != color_msg->width)))
00209     {
00210       ROS_ERROR("Image size of input color image and camera info must be same. Color: (%d, %d), Camera Info: (%d, %d)",
00211                 color_msg->height, color_msg->width, caminfo_msg->height, caminfo_msg->width);
00212       return;
00213     }
00214 
00215     if (!kinfu_)
00216     {
00217       initKinfu(caminfo_msg);
00218     }
00219 
00220     // run kinfu
00221     {
00222       kinfu_->setDepthIntrinsics(/*fx=*/caminfo_msg->K[0], /*fy=*/caminfo_msg->K[4],
00223                                  /*cx=*/caminfo_msg->K[2], /*cy=*/caminfo_msg->K[5]);
00224 
00225       // depth: 32fc1 -> 16uc1
00226       cv::Mat depth;
00227       if (depth_msg->encoding == enc::TYPE_32FC1)
00228       {
00229         cv::Mat depth_32fc1 = cv_bridge::toCvShare(depth_msg, enc::TYPE_32FC1)->image;
00230         depth_32fc1 *= 1000.;
00231         depth_32fc1.convertTo(depth, CV_16UC1);
00232       }
00233       else if (depth_msg->encoding == enc::TYPE_16UC1)
00234       {
00235         depth = cv_bridge::toCvShare(depth_msg, enc::TYPE_16UC1)->image;
00236       }
00237       else
00238       {
00239         NODELET_FATAL("Unsupported depth image encoding: %s", depth_msg->encoding.c_str());
00240         return;
00241       }
00242 
00243       // depth: cpu -> gpu
00244       pcl::gpu::kinfuLS::KinfuTracker::DepthMap depth_device;
00245       depth_device.upload(&(depth.data[0]), depth.cols * 2, depth.rows, depth.cols);
00246 
00247 
00248       if (integrate_color_)
00249       {
00250         // color: cpu -> gpu
00251         colors_device_.upload(&(color_msg->data[0]), color_msg->step, color_msg->height, color_msg->width);
00252 
00253         (*kinfu_)(depth_device, colors_device_);
00254       }
00255       else
00256       {
00257         (*kinfu_)(depth_device);
00258       }
00259       frame_idx_++;
00260     }
00261 
00262     jsk_recognition_msgs::TrackingStatus status;
00263     status.header = caminfo_msg->header;
00264     if (kinfu_->icpIsLost())
00265     {
00266       NODELET_FATAL_THROTTLE(10, "Tracking by ICP in kinect fusion is lost. auto_reset: %d", auto_reset_);
00267       if (auto_reset_)
00268       {
00269         kinfu_.reset();
00270         frame_idx_ = 0;
00271         cameras_.clear();
00272       }
00273       status.is_lost = true;
00274       pub_status_.publish(status);
00275       return;
00276     }
00277     status.is_lost = false;
00278     pub_status_.publish(status);
00279 
00280     // save texture
00281     if (integrate_color_ && (frame_idx_ % pcl::device::kinfuLS::SNAPSHOT_RATE == 1))
00282     {
00283       cv::Mat texture = cv_bridge::toCvCopy(color_msg, color_msg->encoding)->image;
00284       if (color_msg->encoding == enc::RGB8)
00285       {
00286         cv::cvtColor(texture, texture, cv::COLOR_RGB2BGR);
00287       }
00288       textures_.push_back(texture);
00289 
00290       pcl::TextureMapping<pcl::PointXYZ>::Camera camera;
00291       camera.pose = kinfu_->getCameraPose();
00292       camera.focal_length = caminfo_msg->K[0];  // must be equal to caminfo_msg->K[4]
00293       camera.height = caminfo_msg->height;
00294       camera.width = caminfo_msg->width;
00295       cameras_.push_back(camera);
00296     }
00297 
00298     // publish kinfu origin and slam
00299     {
00300       Eigen::Affine3f camera_pose = kinfu_->getCameraPose();
00301 
00302       // publish camera pose
00303       if (pub_camera_pose_.getNumSubscribers() > 0)
00304       {
00305         geometry_msgs::PoseStamped camera_pose_msg;
00306         tf::poseEigenToMsg(camera_pose, camera_pose_msg.pose);
00307         camera_pose_msg.header.stamp = caminfo_msg->header.stamp;
00308         camera_pose_msg.header.frame_id = "kinfu_origin";
00309         pub_camera_pose_.publish(camera_pose_msg);
00310       }
00311 
00312       Eigen::Affine3f camera_to_kinfu_origin = camera_pose.inverse();
00313       tf::Transform tf_camera_to_kinfu_origin;
00314       tf::transformEigenToTF(camera_to_kinfu_origin, tf_camera_to_kinfu_origin);
00315       tf_camera_to_kinfu_origin.setRotation(tf_camera_to_kinfu_origin.getRotation().normalized());
00316       tf_broadcaster_.sendTransform(
00317         tf::StampedTransform(tf_camera_to_kinfu_origin, caminfo_msg->header.stamp,
00318                              caminfo_msg->header.frame_id, "kinfu_origin"));
00319 
00320       if (slam_)
00321       {
00322         // use kinfu as slam, and publishes tf: map -> fixed_frame_id_ (usually odom_init)
00323         try
00324         {
00325           tf::StampedTransform tf_odom_to_camera;
00326           tf_listener_->lookupTransform(
00327             fixed_frame_id_, caminfo_msg->header.frame_id, ros::Time(0), tf_odom_to_camera);
00328           Eigen::Affine3f odom_to_camera;
00329           tf::transformTFToEigen(tf_odom_to_camera, odom_to_camera);
00330 
00331           if (frame_idx_ == 1)
00332           {
00333             odom_init_to_kinfu_origin_ = odom_to_camera * camera_to_kinfu_origin;
00334           }
00335 
00336           Eigen::Affine3f map_to_odom;
00337           // map_to_odom * odom_to_camera * camera_to_kinfu_origin == odom_init_to_kinfu_origin_
00338           map_to_odom = odom_init_to_kinfu_origin_ * (odom_to_camera * camera_to_kinfu_origin).inverse();
00339           tf::StampedTransform tf_map_to_odom;
00340           tf::transformEigenToTF(map_to_odom, tf_map_to_odom);
00341           tf_map_to_odom.setRotation(tf_map_to_odom.getRotation().normalized());
00342           tf_broadcaster_.sendTransform(
00343             tf::StampedTransform(tf_map_to_odom, caminfo_msg->header.stamp, "map", fixed_frame_id_));
00344         }
00345         catch (tf::TransformException e)
00346         {
00347           NODELET_FATAL("%s", e.what());
00348         }
00349       }
00350     }
00351 
00352     // publish depth image
00353     if (pub_depth_.getNumSubscribers() > 0)
00354     {
00355       pcl::gpu::kinfuLS::KinfuTracker::DepthMap depth_gpu;
00356       Eigen::Affine3f camera_pose = kinfu_->getCameraPose();
00357       if (!raycaster_)
00358       {
00359         raycaster_ = pcl::gpu::kinfuLS::RayCaster::Ptr(
00360           new pcl::gpu::kinfuLS::RayCaster(
00361             /*height=*/kinfu_->rows(), /*width=*/kinfu_->cols(),
00362             /*fx=*/caminfo_msg->K[0], /*fy=*/caminfo_msg->K[4],
00363             /*cx=*/caminfo_msg->K[2], /*cy=*/caminfo_msg->K[5]));
00364       }
00365       raycaster_->run(kinfu_->volume(), camera_pose, kinfu_->getCyclicalBufferStructure());
00366       raycaster_->generateDepthImage(depth_gpu);
00367 
00368       int cols;
00369       std::vector<unsigned short> data;
00370       depth_gpu.download(data, cols);
00371 
00372       sensor_msgs::Image output_depth_msg;
00373       sensor_msgs::fillImage(output_depth_msg,
00374                              enc::TYPE_16UC1,
00375                              depth_gpu.rows(),
00376                              depth_gpu.cols(),
00377                              depth_gpu.cols() * 2,
00378                              reinterpret_cast<unsigned short*>(&data[0]));
00379       output_depth_msg.header = caminfo_msg->header;
00380       pub_depth_.publish(output_depth_msg);
00381     }
00382 
00383     // publish rendered image
00384     if (pub_rendered_image_.getNumSubscribers() > 0)
00385     {
00386       pcl::gpu::kinfuLS::KinfuTracker::View view_device;
00387       std::vector<pcl::gpu::kinfuLS::PixelRGB> view_host;
00388       kinfu_->getImage(view_device);
00389 
00390       if (integrate_color_)
00391       {
00392         pcl::gpu::kinfuLS::paint3DView(colors_device_, view_device);
00393       }
00394 
00395       int cols;
00396       view_device.download(view_host, cols);
00397 
00398       sensor_msgs::Image rendered_image_msg;
00399       sensor_msgs::fillImage(rendered_image_msg,
00400                              enc::RGB8,
00401                              view_device.rows(),
00402                              view_device.cols(),
00403                              view_device.cols() * 3,
00404                              reinterpret_cast<unsigned char*>(&view_host[0]));
00405       rendered_image_msg.header = caminfo_msg->header;
00406       pub_rendered_image_.publish(rendered_image_msg);
00407     }
00408 
00409     // publish cloud
00410     if (pub_cloud_.getNumSubscribers() > 0)
00411     {
00412       pcl::gpu::DeviceArray<pcl::PointXYZ> cloud_buffer_device;
00413       pcl::gpu::DeviceArray<pcl::PointXYZ> extracted = kinfu_->volume().fetchCloud(cloud_buffer_device);
00414 
00415       pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz(new pcl::PointCloud<pcl::PointXYZ>());
00416       extracted.download(cloud_xyz->points);
00417       cloud_xyz->width = static_cast<int>(cloud_xyz->points.size());
00418       cloud_xyz->height = 1;
00419 
00420       sensor_msgs::PointCloud2 output_cloud_msg;
00421       if (integrate_color_)
00422       {
00423         pcl::gpu::DeviceArray<pcl::RGB> point_colors_device;
00424         kinfu_->colorVolume().fetchColors(extracted, point_colors_device);
00425 
00426         pcl::PointCloud<pcl::RGB>::Ptr cloud_rgb(new pcl::PointCloud<pcl::RGB>());
00427         point_colors_device.download(cloud_rgb->points);
00428         cloud_rgb->width = static_cast<int>(cloud_rgb->points.size());
00429         cloud_rgb->height = 1;
00430 
00431         pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>());
00432         cloud->points.resize(cloud_xyz->width);
00433         pcl::concatenateFields(*cloud_xyz, *cloud_rgb, *cloud);
00434         pcl::toROSMsg(*cloud, output_cloud_msg);
00435       }
00436       else
00437       {
00438         pcl::toROSMsg(*cloud_xyz, output_cloud_msg);
00439       }
00440       output_cloud_msg.header.frame_id = "kinfu_origin";
00441       pub_cloud_.publish(output_cloud_msg);
00442     }
00443   }
00444 
00445   bool
00446   Kinfu::resetCallback(std_srvs::Empty::Request& req, std_srvs::Empty::Response& res)
00447   {
00448     boost::mutex::scoped_lock lock(mutex_);
00449     kinfu_.reset();
00450     textures_.clear();
00451     cameras_.clear();
00452     NODELET_INFO("Reset kinect fusion by request.");
00453     return true;
00454   }
00455 
00456   bool
00457   Kinfu::saveMeshWithContextCallback(
00458     jsk_recognition_msgs::SaveMesh::Request& req, jsk_recognition_msgs::SaveMesh::Response& res)
00459   {
00460     pcl::PolygonMesh polygon_mesh = createPolygonMesh(req.box, req.ground_frame_id);
00461 
00462     boost::filesystem::path dir(integrate_color_ ? save_dir_ + "/textures" : save_dir_);
00463     if (boost::filesystem::create_directories(dir))
00464     {
00465       NODELET_INFO("Created save_dir: %s", save_dir_.c_str());
00466     }
00467 
00468     std::string out_file = save_dir_ + "/mesh.obj";
00469     if (integrate_color_ && n_textures_ != 0)
00470     {
00471       pcl::TextureMesh texture_mesh;
00472       if (n_textures_ > 0)
00473       {
00474         std::vector<cv::Mat> textures(textures_.end() - n_textures_, textures_.end());
00475         pcl::texture_mapping::CameraVector cameras(cameras_.end() - n_textures_, cameras_.end());
00476         texture_mesh = convertToTextureMesh(polygon_mesh, textures, cameras);
00477       }
00478       else
00479       {
00480         texture_mesh = convertToTextureMesh(polygon_mesh, textures_, cameras_);
00481       }
00482       pcl::io::saveOBJFile(out_file, texture_mesh, 5);
00483     }
00484     else
00485     {
00486       pcl::io::saveOBJFile(out_file, polygon_mesh);
00487     }
00488     NODELET_INFO("Saved mesh file: %s", out_file.c_str());
00489     return true;
00490   }
00491 
00492   bool
00493   Kinfu::saveMeshCallback(std_srvs::Empty::Request& req, std_srvs::Empty::Response& res)
00494   {
00495     pcl::PolygonMesh polygon_mesh = createPolygonMesh();
00496 
00497     boost::filesystem::path dir(integrate_color_ ? save_dir_ + "/textures" : save_dir_);
00498     if (boost::filesystem::create_directories(dir))
00499     {
00500       NODELET_INFO("Created save_dir: %s", save_dir_.c_str());
00501     }
00502 
00503     std::string out_file = save_dir_ + "/mesh.obj";
00504     if (integrate_color_ && n_textures_ != 0)
00505     {
00506       pcl::TextureMesh texture_mesh;
00507       if (n_textures_ > 0)
00508       {
00509         std::vector<cv::Mat> textures(textures_.end() - n_textures_, textures_.end());
00510         pcl::texture_mapping::CameraVector cameras(cameras_.end() - n_textures_, cameras_.end());
00511         texture_mesh = convertToTextureMesh(polygon_mesh, textures, cameras);
00512       }
00513       else
00514       {
00515         texture_mesh = convertToTextureMesh(polygon_mesh, textures_, cameras_);
00516       }
00517       pcl::io::saveOBJFile(out_file, texture_mesh, 5);
00518     }
00519     else
00520     {
00521       pcl::io::saveOBJFile(out_file, polygon_mesh);
00522     }
00523     NODELET_INFO("Saved mesh file: %s", out_file.c_str());
00524     return true;
00525   }
00526 
00527   pcl::TextureMesh
00528   Kinfu::convertToTextureMesh(const pcl::PolygonMesh& triangles,
00529                               const std::vector<cv::Mat> textures,
00530                               pcl::texture_mapping::CameraVector cameras)
00531   {
00532     pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
00533     pcl::fromPCLPointCloud2(triangles.cloud, *cloud);
00534 
00535     // Create the texturemesh object that will contain our UV-mapped mesh
00536     pcl::TextureMesh mesh;
00537     mesh.cloud = triangles.cloud;
00538     std::vector< pcl::Vertices> polygon_1;
00539 
00540     // push faces into the texturemesh object
00541     polygon_1.resize (triangles.polygons.size ());
00542     for(size_t i =0; i < triangles.polygons.size (); ++i)
00543     {
00544       polygon_1[i] = triangles.polygons[i];
00545     }
00546     mesh.tex_polygons.push_back(polygon_1);
00547     NODELET_INFO("Input mesh contains %lu faces, %lu vertices and %lu textures.",
00548                  mesh.tex_polygons[0].size(), cloud->points.size(), cameras.size());
00549 
00550     // Create materials for each texture (and one extra for occluded faces)
00551     mesh.tex_materials.resize(cameras.size () + 1);
00552     for(int i = 0 ; i <= cameras.size() ; ++i)
00553     {
00554       pcl::TexMaterial mesh_material;
00555       mesh_material.tex_Ka.r = 0.2f;
00556       mesh_material.tex_Ka.g = 0.2f;
00557       mesh_material.tex_Ka.b = 0.2f;
00558 
00559       mesh_material.tex_Kd.r = 0.8f;
00560       mesh_material.tex_Kd.g = 0.8f;
00561       mesh_material.tex_Kd.b = 0.8f;
00562 
00563       mesh_material.tex_Ks.r = 1.0f;
00564       mesh_material.tex_Ks.g = 1.0f;
00565       mesh_material.tex_Ks.b = 1.0f;
00566 
00567       mesh_material.tex_d = 1.0f;
00568       mesh_material.tex_Ns = 75.0f;
00569       mesh_material.tex_illum = 2;
00570 
00571       std::stringstream tex_name;
00572       tex_name << "material_" << i;
00573       tex_name >> mesh_material.tex_name;
00574 
00575       if (i < cameras.size ())
00576       {
00577         std::stringstream ss;
00578         ss << "textures/" << i << ".jpg";
00579         std::string texture_file = ss.str();
00580         cv::imwrite(save_dir_ + "/" + texture_file, textures[i]);
00581         cameras[i].texture_file = texture_file;
00582         mesh_material.tex_file = texture_file;
00583       }
00584       else
00585       {
00586         std::string texture_file = "textures/occluded.jpg";
00587         cv::imwrite(save_dir_ + "/" + texture_file,
00588                     cv::Mat::zeros(textures[0].rows, textures[0].cols, CV_8UC1));
00589         mesh_material.tex_file = texture_file;
00590       }
00591 
00592       mesh.tex_materials[i] = mesh_material;
00593     }
00594 
00595     // sort faces
00596     pcl::TextureMapping<pcl::PointXYZ> tm;  // TextureMapping object that will perform the sort
00597     tm.textureMeshwithMultipleCameras(mesh, cameras);
00598 
00599     // compute normals for the mesh
00600     pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> n;
00601     pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
00602     pcl::search::KdTree<pcl::PointXYZ>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZ>);
00603     tree->setInputCloud(cloud);
00604     n.setInputCloud(cloud);
00605     n.setSearchMethod(tree);
00606     n.setKSearch(20);
00607     n.compute(*normals);
00608     pcl::PointCloud<pcl::PointNormal>::Ptr cloud_with_normals(new pcl::PointCloud<pcl::PointNormal>);
00609     pcl::concatenateFields(*cloud, *normals, *cloud_with_normals);
00610     pcl::toPCLPointCloud2(*cloud_with_normals, mesh.cloud);
00611 
00612     return mesh;
00613   }
00614 
00615   pcl::PolygonMesh
00616   Kinfu::createPolygonMesh()
00617   {
00618     // create triangles
00619     if (!marching_cubes_)
00620     {
00621       marching_cubes_ = pcl::gpu::kinfuLS::MarchingCubes::Ptr(new pcl::gpu::kinfuLS::MarchingCubes());
00622     }
00623     pcl::gpu::DeviceArray<pcl::PointXYZ> triangles_buffer_device;
00624     pcl::gpu::DeviceArray<pcl::PointXYZ> triangles_device =
00625       marching_cubes_->run(kinfu_->volume(), triangles_buffer_device);
00626 
00627     if (triangles_device.empty())
00628     {
00629       return pcl::PolygonMesh();
00630     }
00631 
00632     pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
00633     cloud->width = static_cast<int>(triangles_device.size());
00634     cloud->height = 1;
00635     cloud->header.frame_id = "kinfu_origin";
00636     triangles_device.download(cloud->points);
00637 
00638     pcl::PolygonMesh mesh;
00639     pcl::toPCLPointCloud2(*cloud, mesh.cloud);
00640     mesh.polygons.resize(triangles_device.size() / 3);
00641     for (size_t i = 0; i < mesh.polygons.size(); ++i)
00642     {
00643       pcl::Vertices v;
00644       v.vertices.push_back(i*3+0);
00645       v.vertices.push_back(i*3+2);
00646       v.vertices.push_back(i*3+1);
00647       mesh.polygons[i] = v;
00648     }
00649     return mesh;
00650   }
00651 
00652   pcl::PolygonMesh
00653   Kinfu::createPolygonMesh(const jsk_recognition_msgs::BoundingBox& box_msg, const std::string& ground_frame_id)
00654   {
00655     // create triangles
00656     if (!marching_cubes_)
00657     {
00658       marching_cubes_ = pcl::gpu::kinfuLS::MarchingCubes::Ptr(new pcl::gpu::kinfuLS::MarchingCubes());
00659     }
00660     pcl::gpu::DeviceArray<pcl::PointXYZ> triangles_buffer_device;
00661     pcl::gpu::DeviceArray<pcl::PointXYZ> triangles_device =
00662       marching_cubes_->run(kinfu_->volume(), triangles_buffer_device);
00663 
00664     if (triangles_device.empty())
00665     {
00666       return pcl::PolygonMesh();
00667     }
00668 
00669     // get original polygon mesh
00670     pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
00671     cloud->width = static_cast<int>(triangles_device.size());
00672     cloud->height = 1;
00673     cloud->header.frame_id = "kinfu_origin";
00674     triangles_device.download(cloud->points);
00675 
00676     // get tf
00677     Eigen::Affine3f kinfu_origin_to_box = Eigen::Affine3f::Identity();
00678     if (box_msg.header.frame_id != "kinfu_origin")
00679     {
00680       tf::StampedTransform tf_kinfu_origin_to_box;
00681       tf_listener_->lookupTransform("kinfu_origin", box_msg.header.frame_id, ros::Time(0), tf_kinfu_origin_to_box);
00682       tf::transformTFToEigen(tf_kinfu_origin_to_box, kinfu_origin_to_box);
00683     }
00684 
00685     // transform bounding box to kinfu frame
00686     jsk_recognition_msgs::BoundingBox transformed_box_msg;
00687     transformed_box_msg.dimensions = box_msg.dimensions;
00688     Eigen::Affine3f box_pose;
00689     tf::poseMsgToEigen(box_msg.pose, box_pose);
00690     box_pose = kinfu_origin_to_box * box_pose;
00691     tf::poseEigenToMsg(box_pose, transformed_box_msg.pose);
00692     transformed_box_msg.header.frame_id = "kinfu_origin";
00693 
00694     // crop cloud
00695     std::vector<int> indices;
00696     jsk_recognition_utils::cropPointCloud<pcl::PointXYZ>(cloud, transformed_box_msg, &indices);
00697 
00698     // generate filtered polygon mesh
00699     pcl::PolygonMesh mesh;
00700     pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in_box(new pcl::PointCloud<pcl::PointXYZ>());
00701     cloud_in_box->width = static_cast<int>(indices.size());
00702     cloud_in_box->height = 1;
00703     cloud_in_box->points.resize(indices.size());
00704     for (size_t i = 0; i < indices.size(); i++)
00705     {
00706       cloud_in_box->points[i] = cloud->points[indices[i]];
00707       if (indices[i] % 3 == 0 && (indices[i] + 1) == indices[i  + 1] && (indices[i] + 2) == indices[i + 2])
00708       {
00709         pcl::Vertices v;
00710         v.vertices.push_back(i + 0);
00711         v.vertices.push_back(i + 2);
00712         v.vertices.push_back(i + 1);
00713         mesh.polygons.push_back(v);
00714       }
00715     }
00716 
00717     // fill face occluded by putting object on plane (ex. tabletop)
00718     if (!ground_frame_id.empty())
00719     {
00720       pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_ground_frame(new pcl::PointCloud<pcl::PointXYZ>());
00721       tf::StampedTransform tf_transform;
00722       tf_listener_->lookupTransform(ground_frame_id, "kinfu_origin", ros::Time(0), tf_transform);
00723       Eigen::Affine3f transform;
00724       tf::transformTFToEigen(tf_transform, transform);
00725       pcl::transformPointCloud(*cloud_in_box, *cloud_ground_frame, transform);
00726 
00727       pcl::PointXYZ min_pt, max_pt;
00728       pcl::getMinMax3D(*cloud_ground_frame, min_pt, max_pt);
00729 
00730       pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_occluded(new pcl::PointCloud<pcl::PointXYZ>());
00731       for (size_t i = 0; i < cloud_ground_frame->points.size(); i++)
00732       {
00733         pcl::PointXYZ pt_visible = cloud_ground_frame->points[i];
00734         pcl::PointXYZ pt_occluded(pt_visible.x, pt_visible.y, min_pt.z);
00735         cloud_occluded->points.push_back(pt_occluded);
00736         if (i >= 3)
00737         {
00738           pcl::Vertices v;
00739           v.vertices.push_back(cloud_in_box->width + i - 2);
00740           v.vertices.push_back(cloud_in_box->width + i - 1);
00741           v.vertices.push_back(cloud_in_box->width + i - 0);
00742           mesh.polygons.push_back(v);
00743         }
00744       }
00745       cloud_occluded->width = cloud_occluded->points.size();
00746       cloud_occluded->height = 1;
00747       pcl::transformPointCloud(*cloud_occluded, *cloud_occluded, transform.inverse());
00748       *cloud_in_box = *cloud_in_box + *cloud_occluded;
00749     }
00750 
00751     pcl::toPCLPointCloud2(*cloud_in_box, mesh.cloud);
00752     return mesh;
00753   }
00754 }  // namespace jsk_pcl_ros
00755 
00756 #include <pluginlib/class_list_macros.h>
00757 PLUGINLIB_EXPORT_CLASS(jsk_pcl_ros::Kinfu, nodelet::Nodelet);


jsk_pcl_ros
Author(s): Yohei Kakiuchi
autogenerated on Sun Oct 8 2017 02:43:49