softkinetic_start.cpp
Go to the documentation of this file.
00001 
00049 #ifdef _MSC_VER
00050 #include <windows.h>
00051 #endif
00052 
00053 #include <stdio.h>
00054 #include <signal.h>
00055 #include <vector>
00056 #include <exception>
00057 #include <iostream>
00058 #include <iomanip>
00059 #include <fstream>
00060 
00061 // ROS include files
00062 #include <ros/ros.h>
00063 #include <ros/callback_queue.h>
00064 #include <std_msgs/Int32.h>
00065 #include <sensor_msgs/PointCloud2.h>
00066 #include <sensor_msgs/point_cloud_conversion.h>
00067 #include <image_transport/image_transport.h>
00068 
00069 #include <pcl_ros/point_cloud.h>
00070 #include <pcl_ros/io/pcd_io.h>
00071 #include <pcl/io/io.h>
00072 #include <pcl/point_types.h>
00073 #include <pcl/range_image/range_image.h>
00074 #include <pcl/filters/radius_outlier_removal.h>
00075 #include <pcl/filters/voxel_grid.h>
00076 #include <pcl/filters/passthrough.h>
00077 #include <pcl/filters/frustum_culling.h>
00078 
00079 #include <message_filters/subscriber.h>
00080 #include <message_filters/time_synchronizer.h>
00081 #include <message_filters/sync_policies/approximate_time.h>
00082 
00083 #include <cv_bridge/cv_bridge.h>
00084 #include <opencv2/imgproc/imgproc.hpp>
00085 
00086 #include <sensor_msgs/image_encodings.h>
00087 #include <camera_info_manager/camera_info_manager.h>
00088 
00089 #include <dynamic_reconfigure/server.h>
00090 #include <softkinetic_camera/SoftkineticConfig.h>
00091 
00092 #include <DepthSense.hxx>
00093 
00094 using namespace DepthSense;
00095 using namespace message_filters;
00096 using namespace std;
00097 
00098 namespace enc = sensor_msgs::image_encodings;
00099 
00100 Context g_context;
00101 DepthNode g_dnode;
00102 ColorNode g_cnode;
00103 AudioNode g_anode;
00104 
00105 uint32_t g_aFrames = 0;
00106 uint32_t g_cFrames = 0;
00107 uint32_t g_dFrames = 0;
00108 
00109 bool g_bDeviceFound = false;
00110 
00111 ros::Publisher pub_cloud;
00112 ros::Publisher pub_rgb_info;
00113 ros::Publisher pub_depth_info;
00114 image_transport::Publisher pub_rgb;
00115 image_transport::Publisher pub_mono;
00116 image_transport::Publisher pub_depth;
00117 
00118 sensor_msgs::CameraInfo rgb_info;
00119 sensor_msgs::CameraInfo depth_info;
00120 
00121 sensor_msgs::Image img_rgb;
00122 sensor_msgs::Image img_mono;
00123 sensor_msgs::Image img_depth;
00124 sensor_msgs::PointCloud2 cloud;
00125 
00126 cv::Mat cv_img_rgb; // Open CV image containers
00127 cv::Mat cv_img_yuy2;
00128 cv::Mat cv_img_mono;
00129 cv::Mat cv_img_depth;
00130 
00131 std_msgs::Int32 test_int;
00132 
00133 /* Confidence threshold for DepthNode configuration */
00134 int confidence_threshold;
00135 
00136 /* Parameters for downsampling cloud */
00137 bool use_voxel_grid_filter;
00138 double voxel_grid_size;
00139 
00140 /* Parameters for radius filter */
00141 bool use_radius_outlier_filter;
00142 double search_radius;
00143 int min_neighbours;
00144 
00145 /* Parameters for passthrough filer */
00146 bool use_passthrough_filter;
00147 double limit_min;
00148 double limit_max;
00149 
00150 /* Parameters for frustum culling filer */
00151 bool use_frustum_culling_filter;
00152 double hfov;
00153 double vfov;
00154 double near_plane;
00155 double far_plane;
00156 
00157 /* Shutdown request */
00158 bool ros_node_shutdown = false;
00159 
00160 /* Depth sensor parameters */
00161 bool depth_enabled;
00162 DepthSense::DepthNode::CameraMode depth_mode;
00163 DepthSense::FrameFormat depth_frame_format;
00164 int depth_frame_rate;
00165 
00166 /* Color sensor parameters */
00167 bool color_enabled;
00168 DepthSense::CompressionType color_compression;
00169 DepthSense::FrameFormat color_frame_format;
00170 int color_frame_rate;
00171 
00172 /* Serial Number parameters */
00173 bool use_serial;
00174 std::string serial;
00175 
00176 DepthSense::DepthNode::CameraMode depthMode(const std::string& depth_mode_str)
00177 {
00178     if ( depth_mode_str == "long" )
00179         return DepthNode::CAMERA_MODE_LONG_RANGE;
00180     else // if ( depth_mode_str == "close" )
00181         return DepthNode::CAMERA_MODE_CLOSE_MODE;
00182 }
00183 
00184 DepthSense::FrameFormat depthFrameFormat(const std::string& depth_frame_format_str)
00185 {
00186   if ( depth_frame_format_str == "QQVGA" )
00187     return FRAME_FORMAT_QQVGA;
00188   else if ( depth_frame_format_str == "QVGA" )
00189     return FRAME_FORMAT_QVGA;
00190   else // if ( depth_frame_format_str == "VGA" )
00191     return FRAME_FORMAT_VGA;
00192 }
00193 
00194 DepthSense::CompressionType colorCompression(const std::string& color_compression_str)
00195 {
00196   if ( color_compression_str == "YUY2" )
00197     return COMPRESSION_TYPE_YUY2;
00198   else // if ( color_compression_str == "MJPEG" )
00199     return COMPRESSION_TYPE_MJPEG;
00200 }
00201 
00202 DepthSense::FrameFormat colorFrameFormat(const std::string& color_frame_format_str)
00203 {
00204   if ( color_frame_format_str == "QQVGA" )
00205     return FRAME_FORMAT_QQVGA;
00206   else if ( color_frame_format_str == "QVGA" )
00207     return FRAME_FORMAT_QVGA;
00208   else if ( color_frame_format_str == "VGA" )
00209     return FRAME_FORMAT_VGA;
00210   else if ( color_frame_format_str == "NHD" )
00211     return FRAME_FORMAT_NHD;
00212   else // if ( color_frame_format_str == "WXGA_H" )
00213     return FRAME_FORMAT_WXGA_H;
00214 }
00215 
00216 /*----------------------------------------------------------------------------*/
00217 // New audio sample event handler
00218 void onNewAudioSample(AudioNode node, AudioNode::NewSampleReceivedData data)
00219 {
00220   //printf("A#%u: %d\n",g_aFrames,data.audioData.size());
00221   ++g_aFrames;
00222 }
00223 
00224 /*----------------------------------------------------------------------------*/
00225 // New color sample event handler
00226 void onNewColorSample(ColorNode node, ColorNode::NewSampleReceivedData data)
00227 {
00228   // If this is the first sample, we fill all the constant values
00229   // on image and camera info messages to increase working rate
00230   if (img_rgb.data.size() == 0)
00231   {
00232     // Create two sensor_msg::Image for color and grayscale images on new camera image
00233     int32_t w, h;
00234     FrameFormat_toResolution(data.captureConfiguration.frameFormat, &w, &h);
00235 
00236     img_rgb.width  = w;
00237     img_rgb.height = h;
00238     img_rgb.encoding = "bgr8";
00239     img_rgb.data.resize(w * h * 3);
00240     img_rgb.step = w * 3;
00241 
00242     img_mono.width  = w;
00243     img_mono.height = h;
00244     img_mono.encoding = "mono8";
00245     img_mono.data.resize(w * h);
00246     img_mono.step = w;
00247 
00248     cv_img_rgb.create(h, w, CV_8UC3);
00249     if (color_compression == COMPRESSION_TYPE_YUY2)
00250       cv_img_yuy2.create(h, w, CV_8UC2);
00251   }
00252 
00253   if (color_compression == COMPRESSION_TYPE_YUY2)
00254   {
00255     // Color images come compressed as YUY2, so we must convert them to BGR
00256     cv_img_yuy2.data = reinterpret_cast<uchar *>(
00257           const_cast<uint8_t *>(static_cast<const uint8_t *>(data.colorMap)));
00258     cvtColor(cv_img_yuy2, cv_img_rgb, CV_YUV2BGR_YUY2);
00259   }
00260   else
00261   {
00262     // Nothing special to do for MJPEG stream; just cast and reuse the camera data
00263     cv_img_rgb.data = reinterpret_cast<uchar *>(
00264           const_cast<uint8_t *>(static_cast<const uint8_t *>(data.colorMap)));
00265   }
00266   // Create also a gray-scale image from the BGR one
00267   cvtColor(cv_img_rgb, cv_img_mono, CV_BGR2GRAY);
00268 
00269   // Dump both on ROS image messages
00270   std::memcpy(img_rgb.data.data(),  cv_img_rgb.ptr(),  img_rgb.data.size());
00271   std::memcpy(img_mono.data.data(), cv_img_mono.ptr(), img_mono.data.size());
00272 
00273   // Ensure that all the images and camera info are timestamped and have the proper frame id
00274   img_rgb.header.stamp = ros::Time::now();
00275   img_mono.header      = img_rgb.header;
00276   rgb_info.header      = img_rgb.header;
00277 
00278   // Publish the rgb and mono images and camera info
00279   pub_rgb.publish(img_rgb);
00280   pub_mono.publish(img_mono);
00281 
00282   pub_rgb_info.publish(rgb_info);
00283 
00284   ++g_cFrames;
00285 }
00286 
00287 /*----------------------------------------------------------------------------*/
00288 
00289 void downsampleCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00290 {
00291   pcl::VoxelGrid<pcl::PointXYZRGB> sor;
00292   sor.setInputCloud(cloud_to_filter);
00293   sor.setLeafSize(voxel_grid_size, voxel_grid_size, voxel_grid_size);
00294   // apply filter
00295   ROS_DEBUG_STREAM("Starting downsampling");
00296   int before = cloud_to_filter->size();
00297   double old = ros::Time::now().toSec();
00298   sor.filter(*cloud_to_filter);
00299   double new_ = ros::Time::now().toSec() - old;
00300   int after = cloud_to_filter->size();
00301   ROS_DEBUG_STREAM("downsampled in " << new_ << " seconds; "
00302                 << "points reduced from " << before << " to " << after);
00303 }
00304 
00305 void filterCloudRadiusBased(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00306 {
00307   // Radius outlier removal filter:
00308   pcl::RadiusOutlierRemoval<pcl::PointXYZRGB> ror;
00309   ror.setInputCloud(cloud_to_filter);
00310   ror.setRadiusSearch(search_radius);
00311   ror.setMinNeighborsInRadius(min_neighbours);
00312   ror.setKeepOrganized(true);
00313   // apply filter
00314   ROS_DEBUG_STREAM("Starting radius outlier removal filtering");
00315   int before = cloud_to_filter->size();
00316   double old = ros::Time::now().toSec();
00317   ror.filter(*cloud_to_filter);
00318   double new_ = ros::Time::now().toSec() - old;
00319   int after = cloud_to_filter->size();
00320   ROS_DEBUG_STREAM("filtered in " << new_ << " seconds; "
00321                 << "points reduced from " << before << " to " << after);
00322 }
00323 
00324 void filterPassThrough(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00325 {
00326   // Passthrough filter:
00327   pcl::PassThrough<pcl::PointXYZRGB> pt;
00328   pt.setInputCloud(cloud_to_filter);
00329   pt.setFilterFieldName("x");
00330   pt.setFilterLimits(limit_min, limit_max);
00331   pt.setKeepOrganized(true);
00332   // apply filter
00333   ROS_DEBUG_STREAM("Starting passthrough filtering");
00334   int before = cloud_to_filter->size();
00335   double old = ros::Time::now().toSec();
00336   pt.filter(*cloud_to_filter);
00337   double new_= ros::Time::now().toSec() - old;
00338   int after = cloud_to_filter->size();
00339   ROS_DEBUG_STREAM("filtered in " << new_ << " seconds; "
00340                 << "points reduced from " << before << " to " << after);
00341 }
00342 
00343 void filterFrustumCulling(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00344 {
00345   // Frustum culling filter:
00346   pcl::FrustumCulling<pcl::PointXYZRGB> fc;
00347   fc.setInputCloud(cloud_to_filter);
00348   // PCL assumes a coordinate system where X is forward, Y is up, and Z is right.
00349   // Therefore we must convert from the traditional camera coordinate system
00350   // (X right, Y down, Z forward), which is used by this RGBD camera.
00351   // See:
00352   // http://docs.pointclouds.org/trunk/classpcl_1_1_frustum_culling.html#ae22a939225ebbe244fcca8712133fcf3
00353   // XXX We are not using the same frame for the pointcloud as kinect! see issue #46
00354   Eigen::Matrix4f pose = Eigen::Matrix4f::Identity();
00355   Eigen::Matrix4f T;
00356   T << 1, 0, 0, 0,
00357        0, 0, 1, 0,
00358        0,-1, 0, 0,
00359        0, 0, 0, 1;
00360   pose *= T;
00361 
00362   fc.setCameraPose(pose);
00363   fc.setHorizontalFOV(hfov);
00364   fc.setVerticalFOV(vfov);
00365   fc.setNearPlaneDistance(near_plane);
00366   fc.setFarPlaneDistance(far_plane);
00367   fc.setKeepOrganized(true);
00368   // apply filter
00369   ROS_DEBUG_STREAM("Starting frustum culling filtering");
00370   int before = cloud_to_filter->size();
00371   double old = ros::Time::now().toSec();
00372   fc.filter(*cloud_to_filter);
00373   double new_= ros::Time::now().toSec() - old;
00374   int after = cloud_to_filter->size();
00375   ROS_DEBUG_STREAM("filtered in " << new_ << " seconds; "
00376                 << "points reduced from " << before << " to " << after);
00377 }
00378 
00379 void setupCameraInfo(const DepthSense::IntrinsicParameters& params, sensor_msgs::CameraInfo& cam_info)
00380 {
00381   cam_info.distortion_model = "plumb_bob";
00382   cam_info.height = params.height;
00383   cam_info.width  = params.width;
00384 
00385   // Distortion parameters D = [k1, k2, t1, t2, k3]
00386   cam_info.D.resize(5);
00387   cam_info.D[0] = params.k1;
00388   cam_info.D[1] = params.k2;
00389   cam_info.D[2] = params.p1;
00390   cam_info.D[3] = params.p2;
00391   cam_info.D[4] = params.k3;
00392 
00393   // Intrinsic camera matrix for the raw (distorted) images:
00394   //     [fx  0 cx]
00395   // K = [ 0 fy cy]
00396   //     [ 0  0  1]
00397   cam_info.K[0] = params.fx;
00398   cam_info.K[2] = params.cx;
00399   cam_info.K[4] = params.fy;
00400   cam_info.K[5] = params.cy;
00401   cam_info.K[8] = 1.0;
00402 
00403   // Rectification matrix (stereo cameras only)
00404   //     [1 0 0]
00405   // R = [0 1 0]
00406   //     [0 0 1]
00407   cam_info.R[0] = 1.0;
00408   cam_info.R[4] = 1.0;
00409   cam_info.R[8] = 1.0;
00410 
00411   // Projection/camera matrix; we use the same values as in the raw image, as we are not
00412   // applying any correction (WARN: is this ok?). For monocular cameras, Tx = Ty = 0.
00413   //     [fx'  0  cx' Tx]
00414   // P = [ 0  fy' cy' Ty]
00415   //     [ 0   0   1   0]
00416   cam_info.P[0] = params.fx;
00417   cam_info.P[2] = params.cx;
00418   cam_info.P[5] = params.fy;
00419   cam_info.P[6] = params.cy;
00420   cam_info.P[10] = 1.0;
00421 }
00422 
00423 // New depth sample event
00424 void onNewDepthSample(DepthNode node, DepthNode::NewSampleReceivedData data)
00425 {
00426   // If this is the first sample, we fill all the constant values
00427   // on image and camera info messages to increase working rate
00428   if (img_depth.data.size() == 0)
00429   {
00430     FrameFormat_toResolution(data.captureConfiguration.frameFormat,
00431                              (int32_t*)&img_depth.width, (int32_t*)&img_depth.height);
00432     img_depth.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00433     img_depth.is_bigendian = 0;
00434     img_depth.step = sizeof(float) * img_depth.width;
00435     std::size_t data_size = img_depth.width * img_depth.height;
00436     img_depth.data.resize(data_size * sizeof(float));
00437 
00438     if (rgb_info.D.size() == 0)
00439     {
00440       // User didn't provide a calibration file for the color camera, so
00441       // fill camera info with the parameters provided by the camera itself
00442       setupCameraInfo(data.stereoCameraParameters.colorIntrinsics, rgb_info);
00443     }
00444 
00445     if (depth_info.D.size() == 0)
00446     {
00447       // User didn't provide a calibration file for the depth camera, so
00448       // fill camera info with the parameters provided by the camera itself
00449       setupCameraInfo(data.stereoCameraParameters.depthIntrinsics, depth_info);
00450     }
00451   }
00452 
00453   int32_t w = img_depth.width;
00454   int32_t h = img_depth.height;
00455 
00456   pcl::PointCloud<pcl::PointXYZRGB>::Ptr current_cloud(new pcl::PointCloud<pcl::PointXYZRGB>());
00457   current_cloud->header.frame_id = cloud.header.frame_id;
00458   current_cloud->width  = w;
00459   current_cloud->height = h;
00460   current_cloud->is_dense = true;
00461   current_cloud->points.resize(w * h);
00462 
00463   ++g_dFrames;
00464 
00465   // Dump depth map on image message, though we must do some post-processing for saturated pixels
00466   std::memcpy(img_depth.data.data(), data.depthMapFloatingPoint, img_depth.data.size());
00467 
00468   for (int count = 0; count < w * h; count++)
00469   {
00470     // Saturated pixels on depthMapFloatingPoint have -1 value, but on openni are NaN
00471     if (data.depthMapFloatingPoint[count] < 0.0)
00472     {
00473       *reinterpret_cast<float*>(&img_depth.data[count*sizeof(float)]) =
00474           std::numeric_limits<float>::quiet_NaN();
00475 
00476       // We don't process these pixels as they correspond to all-zero 3D points; but
00477       // we keep them in the pointcloud so the downsampling filter can be applied
00478       continue;
00479     }
00480 
00481     // Convert softkinetic vertices into a kinect-like coordinates pointcloud
00482     current_cloud->points[count].x =   data.verticesFloatingPoint[count].z;
00483     current_cloud->points[count].y = - data.verticesFloatingPoint[count].x;
00484     current_cloud->points[count].z =   data.verticesFloatingPoint[count].y;
00485 
00486     // Get mapping between depth map and color map, assuming we have a RGB image
00487     if (img_rgb.data.size() == 0)
00488     {
00489       ROS_WARN_THROTTLE(2.0, "Color image is empty; pointcloud will be colorless");
00490       continue;
00491     }
00492     UV uv = data.uvMap[count];
00493     if (uv.u != -FLT_MAX && uv.v != -FLT_MAX)
00494     {
00495       // Within bounds: depth fov is significantly wider than color's
00496       // one, so there are black points in the borders of the pointcloud
00497       int x_pos = (int)round(uv.u*img_rgb.width);
00498       int y_pos = (int)round(uv.v*img_rgb.height);
00499       current_cloud->points[count].b = cv_img_rgb.at<cv::Vec3b>(y_pos, x_pos)[0];
00500       current_cloud->points[count].g = cv_img_rgb.at<cv::Vec3b>(y_pos, x_pos)[1];
00501       current_cloud->points[count].r = cv_img_rgb.at<cv::Vec3b>(y_pos, x_pos)[2];
00502     }
00503   }
00504 
00505   // Check for usage of voxel grid filtering to downsample point cloud
00506   // XXX This must be the first filter to be called, as it requires the "squared" point cloud
00507   // we create (with proper values for width and height) that any other filter would destroy
00508   if (use_voxel_grid_filter)
00509   {
00510     downsampleCloud(current_cloud);
00511   }
00512 
00513   // Check for usage of passthrough filtering
00514   if (use_passthrough_filter)
00515   {
00516     filterPassThrough(current_cloud);
00517   }
00518 
00519   // Check for usage of frustum culling filtering
00520   if (use_frustum_culling_filter)
00521   {
00522     filterFrustumCulling(current_cloud);
00523   }
00524 
00525   // Check for usage of radius outlier filtering
00526   if (use_radius_outlier_filter)
00527   {
00528     // XXX Use any other filter before this one to remove the large amount of all-zero points the
00529     // camera creates for saturated pixels; if not, radius outlier filter takes really, really long
00530     if (use_voxel_grid_filter || use_passthrough_filter || use_frustum_culling_filter)
00531       filterCloudRadiusBased(current_cloud);
00532     else
00533       ROS_WARN_THROTTLE(2.0, "Calling radius outlier removal as the only filter would take too long!");
00534   }
00535 
00536   // Convert current_cloud to PointCloud2 and publish
00537   pcl::toROSMsg(*current_cloud, cloud);
00538 
00539   img_depth.header.stamp = ros::Time::now();
00540   cloud.header.stamp = ros::Time::now();
00541   depth_info.header      = img_depth.header;
00542 
00543   pub_cloud.publish(cloud);
00544   pub_depth.publish(img_depth);
00545   pub_depth_info.publish(depth_info);
00546 }
00547 
00548 /*----------------------------------------------------------------------------*/
00549 void configureAudioNode()
00550 {
00551   g_anode.newSampleReceivedEvent().connect(&onNewAudioSample);
00552 
00553   AudioNode::Configuration config = g_anode.getConfiguration();
00554   config.sampleRate = 44100;
00555 
00556   try
00557   {
00558     g_context.requestControl(g_anode, 0);
00559 
00560     g_anode.setConfiguration(config);
00561     g_anode.setInputMixerLevel(0.5f);
00562   }
00563   catch (ArgumentException& e)
00564   {
00565     printf("Argument Exception: %s\n", e.what());
00566   }
00567   catch (UnauthorizedAccessException& e)
00568   {
00569     printf("Unauthorized Access Exception: %s\n", e.what());
00570   }
00571   catch (ConfigurationException& e)
00572   {
00573     printf("Configuration Exception: %s\n", e.what());
00574   }
00575   catch (StreamingException& e)
00576   {
00577     printf("Streaming Exception: %s\n", e.what());
00578   }
00579   catch (TimeoutException&)
00580   {
00581     printf("TimeoutException\n");
00582   }
00583 }
00584 
00585 /*----------------------------------------------------------------------------*/
00586 void configureDepthNode()
00587 {
00588   g_dnode.newSampleReceivedEvent().connect(&onNewDepthSample);
00589 
00590   DepthNode::Configuration config = g_dnode.getConfiguration();
00591 
00592   config.frameFormat = depth_frame_format;
00593   config.framerate = depth_frame_rate;
00594   config.mode = depth_mode;
00595   config.saturation = true;
00596 
00597   g_context.requestControl(g_dnode, 0);
00598 
00599   g_dnode.setEnableUvMap(true);
00600   g_dnode.setEnableVerticesFloatingPoint(true);
00601   g_dnode.setEnableDepthMapFloatingPoint(true);
00602 
00603   g_dnode.setConfidenceThreshold(confidence_threshold);
00604 
00605   try
00606   {
00607     g_context.requestControl(g_dnode, 0);
00608 
00609     g_dnode.setConfiguration(config);
00610   }
00611   catch (ArgumentException& e)
00612   {
00613     printf("Argument Exception: %s\n", e.what());
00614   }
00615   catch (UnauthorizedAccessException& e)
00616   {
00617     printf("Unauthorized Access Exception: %s\n", e.what());
00618   }
00619   catch (IOException& e)
00620   {
00621     printf("IO Exception: %s\n", e.what());
00622   }
00623   catch (InvalidOperationException& e)
00624   {
00625     printf("Invalid Operation Exception: %s\n", e.what());
00626   }
00627   catch (ConfigurationException& e)
00628   {
00629     printf("Configuration Exception: %s\n", e.what());
00630   }
00631   catch (StreamingException& e)
00632   {
00633     printf("Streaming Exception: %s\n", e.what());
00634   }
00635   catch (TimeoutException&)
00636   {
00637     printf("TimeoutException\n");
00638   }
00639 }
00640 
00641 /*----------------------------------------------------------------------------*/
00642 void configureColorNode()
00643 {
00644   // Connect new color sample handler
00645   g_cnode.newSampleReceivedEvent().connect(&onNewColorSample);
00646 
00647   ColorNode::Configuration config = g_cnode.getConfiguration();
00648 
00649   config.frameFormat = color_frame_format;
00650   config.compression = color_compression;
00651   config.powerLineFrequency = POWER_LINE_FREQUENCY_50HZ;
00652   config.framerate = color_frame_rate;
00653 
00654   g_cnode.setEnableColorMap(true);
00655 
00656   try
00657   {
00658     g_context.requestControl(g_cnode, 0);
00659 
00660     g_cnode.setConfiguration(config);
00661   }
00662   catch (ArgumentException& e)
00663   {
00664     printf("Argument Exception: %s\n", e.what());
00665   }
00666   catch (UnauthorizedAccessException& e)
00667   {
00668     printf("Unauthorized Access Exception: %s\n", e.what());
00669   }
00670   catch (IOException& e)
00671   {
00672     printf("IO Exception: %s\n", e.what());
00673   }
00674   catch (InvalidOperationException& e)
00675   {
00676     printf("Invalid Operation Exception: %s\n", e.what());
00677   }
00678   catch (ConfigurationException& e)
00679   {
00680     printf("Configuration Exception: %s\n", e.what());
00681   }
00682   catch (StreamingException& e)
00683   {
00684     printf("Streaming Exception: %s\n", e.what());
00685   }
00686   catch (TimeoutException&)
00687   {
00688     printf("TimeoutException\n");
00689   }
00690 }
00691 
00692 /*----------------------------------------------------------------------------*/
00693 void configureNode(Node node)
00694 {
00695   if ((node.is<DepthNode>()) && (!g_dnode.isSet()) && (depth_enabled))
00696   {
00697     g_dnode = node.as<DepthNode>();
00698     configureDepthNode();
00699     g_context.registerNode(node);
00700   }
00701 
00702   if ((node.is<ColorNode>()) && (!g_cnode.isSet()) && (color_enabled))
00703   {
00704     g_cnode = node.as<ColorNode>();
00705     configureColorNode();
00706     g_context.registerNode(node);
00707   }
00708 
00709   if ((node.is<AudioNode>()) && (!g_anode.isSet()))
00710   {
00711     g_anode = node.as<AudioNode>();
00712     configureAudioNode();
00713     g_context.registerNode(node);
00714   }
00715 }
00716 
00717 /*----------------------------------------------------------------------------*/
00718 void onNodeConnected(Device device, Device::NodeAddedData data)
00719 {
00720   configureNode(data.node);
00721 }
00722 
00723 /*----------------------------------------------------------------------------*/
00724 void onNodeDisconnected(Device device, Device::NodeRemovedData data)
00725 {
00726   if (data.node.is<AudioNode>() && (data.node.as<AudioNode>() == g_anode))
00727     g_anode.unset();
00728   if (data.node.is<ColorNode>() && (data.node.as<ColorNode>() == g_cnode))
00729     g_cnode.unset();
00730   if (data.node.is<DepthNode>() && (data.node.as<DepthNode>() == g_dnode))
00731     g_dnode.unset();
00732   printf("Node disconnected\n");
00733 }
00734 
00735 /*----------------------------------------------------------------------------*/
00736 void onDeviceConnected(Context context, Context::DeviceAddedData data)
00737 {
00738   if (!g_bDeviceFound)
00739   {
00740     data.device.nodeAddedEvent().connect(&onNodeConnected);
00741     data.device.nodeRemovedEvent().connect(&onNodeDisconnected);
00742     g_bDeviceFound = true;
00743   }
00744 }
00745 
00746 /*----------------------------------------------------------------------------*/
00747 void onDeviceDisconnected(Context context, Context::DeviceRemovedData data)
00748 {
00749   g_bDeviceFound = false;
00750   printf("Device disconnected\n");
00751 }
00752 
00753 void sigintHandler(int sig)
00754 {
00755   g_context.quit();
00756   ros::shutdown();
00757 }
00758 
00759 void reconfigure_callback(softkinetic_camera::SoftkineticConfig& config, uint32_t level)
00760 {
00761   // @todo this one isn't trivial
00762   // camera_link = config.camera_link;
00763 
00764   confidence_threshold = config.confidence_threshold;
00765   g_dnode.setConfidenceThreshold(confidence_threshold);
00766 
00767   use_voxel_grid_filter = config.use_voxel_grid_filter;
00768   voxel_grid_size       = config.voxel_grid_size;
00769 
00770   use_radius_outlier_filter = config.use_radius_outlier_filter;
00771   search_radius             = config.search_radius;
00772   min_neighbours            = config.min_neighbours;
00773 
00774   use_passthrough_filter = config.use_passthrough_filter;
00775   limit_min              = config.limit_min;
00776   limit_max              = config.limit_max;
00777 
00778   use_frustum_culling_filter = config.use_frustum_culling_filter;
00779   hfov                       = config.hfov;
00780   vfov                       = config.vfov;
00781   near_plane                 = config.near_plane;
00782   far_plane                  = config.far_plane;
00783 
00784   depth_enabled      = config.enable_depth;
00785   depth_mode         = depthMode(config.depth_mode);
00786   depth_frame_format = depthFrameFormat(config.depth_frame_format);
00787   depth_frame_rate   = config.depth_frame_rate;
00788 
00789   color_enabled      = config.enable_color;
00790   color_compression  = colorCompression(config.color_compression);
00791   color_frame_format = colorFrameFormat(config.color_frame_format);
00792   color_frame_rate   = config.color_frame_rate;
00793 
00794   use_serial = config.use_serial;
00795   serial = config.serial;
00796 
00797   ROS_DEBUG_STREAM("New configuration:\n" <<
00798           //"camera_link = " << camera_link << "\n" <<
00799 
00800           "confidence_threshold = " << confidence_threshold << "\n" <<
00801 
00802           "use_voxel_grid_filter = " << (use_voxel_grid_filter ? "ON" : "OFF" ) << "\n" <<
00803           "voxel_grid_size = " << voxel_grid_size << "\n" <<
00804 
00805           "use_radius_outlier_filter = " << (use_radius_outlier_filter ? "ON" : "OFF" ) << "\n" <<
00806           "search_radius = " << search_radius << "\n" <<
00807           "min_neighbours = " << min_neighbours << "\n" <<
00808 
00809           "use_passthrough_filter = " << (use_passthrough_filter ? "ON" : "OFF" ) << "\n" <<
00810           "limit_min = " << limit_min << "\n" <<
00811           "limit_max = " << limit_max << "\n" <<
00812 
00813           "use_frustum_culling_filter = " << (use_frustum_culling_filter ? "ON" : "OFF" ) << "\n" <<
00814           "hfov = " << hfov << "\n" <<
00815           "vfov = " << vfov << "\n" <<
00816           "near_plane = " << near_plane << "\n" <<
00817           "far_plane = " << far_plane << "\n" <<
00818 
00819           "enable_depth = " << (depth_enabled ? "ON" : "OFF" ) << "\n" <<
00820           "depth_mode = " << config.depth_mode << "\n" <<
00821           "depth_frame_format = " << config.depth_frame_format << "\n" <<
00822           "depth_frame_rate = " << depth_frame_rate << "\n" <<
00823 
00824           "enable_color = " << (color_enabled ? "ON" : "OFF" ) << "\n" <<
00825           "color_compression = " << config.color_compression << "\n" <<
00826           "color_frame_format = " << config.color_frame_format << "\n" <<
00827           "color_frame_rate = " << color_frame_rate << "\n" <<
00828           
00829           "use_serial = " << use_serial << "\n" <<
00830           "serial = " << serial << "\n");
00831 }
00832 
00833 /*----------------------------------------------------------------------------*/
00834 int main(int argc, char* argv[])
00835 {
00836   // Initialize ROS
00837   ros::init(argc, argv, "softkinetic_bringup_node");
00838   ros::NodeHandle nh("~");
00839 
00840   // Override the default ROS SIGINT handler to call g_context.quit() and avoid escalating to SIGTERM
00841   signal(SIGINT, sigintHandler);
00842 
00843   // Get frame id from parameter server
00844   std::string softkinetic_link;
00845   if (!nh.hasParam("camera_link"))
00846   {
00847     ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'camera_link' is missing.");
00848     ros_node_shutdown = true;
00849   }
00850 
00851   nh.param<std::string>("camera_link", softkinetic_link, "softkinetic_link");
00852   cloud.header.frame_id = softkinetic_link;
00853 
00854   // Fill in the color and depth images message header frame id
00855   std::string optical_frame;
00856   if (nh.getParam("rgb_optical_frame", optical_frame))
00857   {
00858     img_rgb.header.frame_id = optical_frame.c_str();
00859     img_mono.header.frame_id = optical_frame.c_str();
00860   }
00861   else
00862   {
00863     img_rgb.header.frame_id = "/softkinetic_rgb_optical_frame";
00864     img_mono.header.frame_id = "/softkinetic_rgb_optical_frame";
00865   }
00866 
00867   if (nh.getParam("depth_optical_frame", optical_frame))
00868   {
00869     img_depth.header.frame_id = optical_frame.c_str();
00870   }
00871   else
00872   {
00873     img_depth.header.frame_id = "/softkinetic_depth_optical_frame";
00874   }
00875 
00876   // Get confidence threshold from parameter server
00877   if (!nh.hasParam("confidence_threshold"))
00878   {
00879     ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'confidence_threshold' is not set on server.");
00880     ros_node_shutdown = true;
00881   }
00882   nh.param<int>("confidence_threshold", confidence_threshold, 150);
00883 
00884   // Check for usage of voxel grid filtering to downsample the point cloud
00885   nh.param<bool>("use_voxel_grid_filter", use_voxel_grid_filter, false);
00886   if (use_voxel_grid_filter)
00887   {
00888     // Downsampling cloud parameters
00889     nh.param<double>("voxel_grid_size", voxel_grid_size, 0.01);
00890   }
00891 
00892   // Check for usage of radius filtering
00893   nh.param<bool>("use_radius_outlier_filter", use_radius_outlier_filter, false);
00894   if (use_radius_outlier_filter)
00895   {
00896     if (!nh.hasParam("search_radius"))
00897     {
00898       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'search_radius' is not set on server.");
00899       ros_node_shutdown = true;
00900     }
00901     nh.param<double>("search_radius", search_radius, 0.5);
00902 
00903     if (!nh.hasParam("min_neighbours"))
00904     {
00905       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'min_neighbours' is not set on server.");
00906       ros_node_shutdown = true;
00907     }
00908     nh.param<int>("min_neighbours", min_neighbours, 0);
00909   }
00910 
00911   // Check for usage of passthrough filtering
00912   nh.param<bool>("use_passthrough_filter", use_passthrough_filter, false);
00913   if(use_passthrough_filter)
00914   {
00915     if (!nh.hasParam("limit_min"))
00916     {
00917       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'limit_min' is not set on server.");
00918       ros_node_shutdown = true;
00919     }
00920     nh.param<double>("limit_min", limit_min, 0.0);
00921 
00922     if (!nh.hasParam("limit_max"))
00923     {
00924       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'limit_max' is not set on server.");
00925       ros_node_shutdown = true;
00926     }
00927     nh.param<double>("limit_max", limit_max, 0.0);
00928   }
00929 
00930   // Check for usage of frustum culling filtering
00931   nh.param<bool>("use_frustum_culling_filter", use_frustum_culling_filter, false);
00932   if(use_frustum_culling_filter)
00933   {
00934     if(!nh.hasParam("hfov"))
00935     {
00936       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'hfov' is not set on server.");
00937       ros_node_shutdown = true;
00938     }
00939     nh.param<double>("hfov", hfov, 180.0);
00940 
00941     if(!nh.hasParam("vfov"))
00942     {
00943       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'vfov' is not set on server.");
00944       ros_node_shutdown = true;
00945     }
00946     nh.param<double>("vfov", vfov, 180.0);
00947 
00948     if(!nh.hasParam("near_plane"))
00949     {
00950       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'near_plane' is not set on server.");
00951       ros_node_shutdown = true;
00952     }
00953     nh.param<double>("near_plane", near_plane, 0.0);
00954 
00955     if(!nh.hasParam("far_plane"))
00956     {
00957       ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'far_plane' is not set on server.");
00958       ros_node_shutdown = true;
00959     }
00960     nh.param<double>("far_plane", far_plane, 100.0);
00961   }
00962 
00963   nh.param<bool>("enable_depth", depth_enabled, true);
00964   std::string depth_mode_str;
00965   nh.param<std::string>("depth_mode", depth_mode_str, "close");
00966   depth_mode = depthMode(depth_mode_str);
00967 
00968   std::string depth_frame_format_str;
00969   nh.param<std::string>("depth_frame_format", depth_frame_format_str, "QVGA");
00970   depth_frame_format = depthFrameFormat(depth_frame_format_str);
00971 
00972   nh.param<int>("depth_frame_rate", depth_frame_rate, 25);
00973 
00974   nh.param<bool>("enable_color", color_enabled, true);
00975   std::string color_compression_str;
00976   nh.param<std::string>("color_compression", color_compression_str, "MJPEG");
00977   color_compression = colorCompression(color_compression_str);
00978 
00979   std::string color_frame_format_str;
00980   nh.param<std::string>("color_frame_format", color_frame_format_str, "WXGA_H");
00981   color_frame_format = colorFrameFormat(color_frame_format_str);
00982 
00983   nh.param<int>("color_frame_rate", color_frame_rate, 25);
00984 
00985   // Initialize image transport object
00986   image_transport::ImageTransport it(nh);
00987 
00988   // Initialize publishers
00989   pub_cloud = nh.advertise<sensor_msgs::PointCloud2>("depth/points", 1);
00990   pub_rgb = it.advertise("rgb/image_color", 1);
00991   pub_mono = it.advertise("rgb/image_mono", 1);
00992   pub_depth = it.advertise("depth/image_raw", 1);
00993   pub_depth_info = nh.advertise<sensor_msgs::CameraInfo>("depth/camera_info", 1);
00994   pub_rgb_info = nh.advertise<sensor_msgs::CameraInfo>("rgb/camera_info", 1);
00995 
00996   std::string calibration_file;
00997   if (nh.getParam("rgb_calibration_file", calibration_file))
00998   {
00999     camera_info_manager::CameraInfoManager camera_info_manager(nh, "senz3d", "file://" + calibration_file);
01000     rgb_info = camera_info_manager.getCameraInfo();
01001   }
01002 
01003   if (nh.getParam("depth_calibration_file", calibration_file))
01004   {
01005     camera_info_manager::CameraInfoManager camera_info_manager(nh, "senz3d", "file://" + calibration_file);
01006     depth_info = camera_info_manager.getCameraInfo();
01007   }
01008 
01009   g_context = Context::create("softkinetic");
01010 
01011   g_context.deviceAddedEvent().connect(&onDeviceConnected);
01012   g_context.deviceRemovedEvent().connect(&onDeviceDisconnected);
01013 
01014   // Get the list of currently connected devices
01015   vector<Device> da = g_context.getDevices();
01016 
01017   // In case there are several devices, index of camera to start ought to come as an argument
01018   // By default, index 0 is taken:
01019   int device_index = 0;
01020   ROS_INFO_STREAM("Number of Devices found: " << da.size());
01021   if (da.size() == 0)
01022   {
01023     ROS_ERROR_STREAM("No devices found!!!!!!");
01024   }
01025 
01026   if (da.size() >= 1)
01027   {
01028     // If camera index comes as argument, device_index will be updated
01029     if (argc > 1)
01030     {
01031       device_index = atoi(argv[1]);
01032     }
01033     else
01034     {
01035       nh.param<bool>("use_serial", use_serial, false);
01036       if (use_serial)
01037       {
01038         nh.getParam("serial", serial);
01039         bool serialDeviceFound = false;
01040         for (int i=0; i < da.size(); i++)
01041         {
01042           if (!da[i].getSerialNumber().compare(serial))
01043           {
01044             device_index = i;
01045             serialDeviceFound = true;
01046           }
01047         }
01048         if (serialDeviceFound)
01049         {
01050           ROS_INFO_STREAM("Serial number device found");
01051         }
01052         else
01053         {
01054           ROS_ERROR_STREAM("Serial number device not found !!");
01055           ROS_ERROR_STREAM("Required Serial Number: " << serial);
01056         }
01057       }
01058     }
01059     ROS_INFO_STREAM("Serial Number: " << da[device_index].getSerialNumber());
01060 
01061     g_bDeviceFound = true;
01062 
01063     da[device_index].nodeAddedEvent().connect(&onNodeConnected);
01064     da[device_index].nodeRemovedEvent().connect(&onNodeDisconnected);
01065 
01066     vector<Node> na = da[device_index].getNodes();
01067 
01068     for (int n = 0; n < (int)na.size(); n++)
01069       configureNode(na[n]);
01070   }
01071 
01072   // Enable dynamic reconfigure
01073   ros::NodeHandle nh_cfg("~");
01074   ros::CallbackQueue callback_queue_cfg;
01075   nh_cfg.setCallbackQueue(&callback_queue_cfg);
01076 
01077   dynamic_reconfigure::Server<softkinetic_camera::SoftkineticConfig> server(nh_cfg);
01078   server.setCallback(boost::bind(&reconfigure_callback, _1, _2));
01079 
01080   // Handle the dynamic reconfigure server callback on a
01081   // separate thread from the g_context.run() called below
01082   ros::AsyncSpinner spinner(1, &callback_queue_cfg);
01083   spinner.start();
01084 
01085   // Loop while ros core is operational or Ctrl-C is used
01086   if (ros_node_shutdown)
01087   {
01088     ros::shutdown();
01089   }
01090   while (ros::ok())
01091   {
01092     g_context.startNodes();
01093     g_context.run();
01094   }
01095 
01096   // Close out all nodes
01097   if (g_cnode.isSet())
01098     g_context.unregisterNode(g_cnode);
01099   if (g_dnode.isSet())
01100     g_context.unregisterNode(g_dnode);
01101   if (g_anode.isSet())
01102     g_context.unregisterNode(g_anode);
01103 
01104   g_context.stopNodes();
01105 
01106   return 0;
01107 }


softkinetic_camera
Author(s): Felipe Garcia Lopez
autogenerated on Sat Jun 8 2019 20:37:31