softkinetic_start.cpp
Go to the documentation of this file.
00001 
00049 #ifdef _MSC_VER
00050 #include <windows.h>
00051 #endif
00052 
00053 #include <stdio.h>
00054 #include <vector>
00055 #include <exception>
00056 #include <iostream>
00057 #include <iomanip>
00058 #include <fstream>
00059 
00060 //ros include files
00061 #include <ros/ros.h>
00062 #include <std_msgs/Int32.h>
00063 #include <sensor_msgs/PointCloud2.h>
00064 #include <sensor_msgs/point_cloud_conversion.h>
00065 #include <image_transport/image_transport.h>
00066 
00067 #include <pcl_ros/point_cloud.h>
00068 #include <pcl_ros/io/pcd_io.h>
00069 #include <pcl/io/io.h>
00070 #include <pcl/point_types.h>
00071 #include <pcl/range_image/range_image.h>
00072 #include <pcl/filters/radius_outlier_removal.h>
00073 #include <pcl/filters/voxel_grid.h>
00074 //#include <pcl/visualization/cloud_viewer.h>
00075 
00076 #include <message_filters/subscriber.h>
00077 #include <message_filters/time_synchronizer.h>
00078 #include <message_filters/sync_policies/approximate_time.h>
00079 
00080 #include <cv_bridge/cv_bridge.h>
00081 #include <sensor_msgs/image_encodings.h>
00082 
00083 #include <DepthSense.hxx>
00084 
00085 using namespace DepthSense;
00086 using namespace message_filters;
00087 using namespace std;
00088 
00089 namespace enc = sensor_msgs::image_encodings;
00090 
00091 Context g_context;
00092 DepthNode g_dnode;
00093 ColorNode g_cnode;
00094 AudioNode g_anode;
00095 
00096 uint32_t g_aFrames = 0;
00097 uint32_t g_cFrames = 0;
00098 uint32_t g_dFrames = 0;
00099 
00100 bool g_bDeviceFound = false;
00101 
00102 ProjectionHelper* g_pProjHelper = NULL;
00103 StereoCameraParameters g_scp;
00104 
00105 ros::Publisher pub_cloud;
00106 image_transport::Publisher pub_rgb;
00107 image_transport::Publisher pub_depth;
00108 
00109 sensor_msgs::Image image;
00110 std_msgs::Int32 test_int;
00111 sensor_msgs::PointCloud2 cloud;
00112 
00113 int offset;
00114 /* confidence threshold for DepthNode configuration*/
00115 int confidence_threshold;
00116 /* parameters for downsampling cloud */
00117 bool use_voxel_grid_filter;
00118 double voxel_grid_side;
00119 /* parameters for radius filter */
00120 bool use_radius_filter;
00121 double search_radius;
00122 int minNeighboursInRadius;
00123 /* shutdown request*/
00124 bool ros_node_shutdown = false;
00125 /* depth sensor parameters */
00126 bool _depth_enabled;
00127 DepthSense::DepthNode::CameraMode depth_mode;
00128 DepthSense::FrameFormat depth_frame_format;
00129 int depth_frame_rate;
00130 /* color sensor parameters */
00131 bool _color_enabled;
00132 DepthSense::CompressionType color_compression;
00133 DepthSense::FrameFormat color_frame_format;
00134 int color_frame_rate;
00135 
00136 
00137 /*----------------------------------------------------------------------------*/
00138 // New audio sample event handler
00139 void onNewAudioSample(AudioNode node, AudioNode::NewSampleReceivedData data)
00140 {
00141     //printf("A#%u: %d\n",g_aFrames,data.audioData.size());
00142     g_aFrames++;
00143 }
00144 
00145 /*----------------------------------------------------------------------------*/
00146 // New color sample event handler
00147 void onNewColorSample(ColorNode node, ColorNode::NewSampleReceivedData data)
00148 {
00149     ros::NodeHandle n_color("~");
00150     std::string softkinetic_link;
00151     if (n_color.getParam("/camera_link", softkinetic_link))
00152     {
00153         image.header.frame_id = softkinetic_link.c_str();
00154     }
00155     else
00156     {
00157         image.header.frame_id = "/softkinetic_link";
00158     }
00159     //Create a sensor_msg::Image for ROS based on the new camera image
00160     //image.header.stamp.nsec = g_cFrames++*1000;
00161     image.header.stamp = ros::Time::now();
00162 
00163     int32_t w, h;
00164     FrameFormat_toResolution(data.captureConfiguration.frameFormat,&w,&h);
00165     image.width = w;
00166     image.height = h;
00167     image.encoding = "bgr8";
00168     image.data.resize(w*h*3);
00169     int count2 = w*h*3-1;
00170 
00171     for(int i = 0;i < w;i++){
00172         for(int j = 0;j < h; j++){
00173             image.data[count2]   = data.colorMap[count2];
00174             image.data[count2+1] = data.colorMap[count2+1];
00175             image.data[count2+2] = data.colorMap[count2+2];
00176             count2-=3;
00177         }
00178     }
00179 
00180     // Publish the rgb data
00181     pub_rgb.publish(image);
00182 
00183     g_cFrames++;
00184 }
00185 
00186 /*----------------------------------------------------------------------------*/
00187 
00188 void downsampleCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00189 {
00190     ROS_DEBUG_STREAM("Starting downsampling");
00191     pcl::VoxelGrid<pcl::PointXYZRGB> sor;
00192     sor.setInputCloud (cloud_to_filter);
00193     sor.setLeafSize (voxel_grid_side, voxel_grid_side, voxel_grid_side);
00194     sor.filter (*cloud_to_filter);
00195     ROS_DEBUG_STREAM("downsampled!");
00196 }
00197 
00198 void filterCloudRadiusBased(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_to_filter)
00199 {
00200     // radius based filter:
00201     pcl::RadiusOutlierRemoval<pcl::PointXYZRGB> ror;
00202     ror.setInputCloud(cloud_to_filter);
00203     ror.setRadiusSearch(search_radius);
00204     ror.setMinNeighborsInRadius(minNeighboursInRadius);
00205     // apply filter
00206     ROS_DEBUG_STREAM("Starting filtering");
00207     int before = cloud_to_filter->size();
00208     double old_ = ros::Time::now().toSec();
00209     ror.filter(*cloud_to_filter);
00210     double new_= ros::Time::now().toSec() - old_;
00211     int after = cloud_to_filter->size();
00212     ROS_DEBUG_STREAM("filtered in " << new_ << " seconds;"
00213                   << "points reduced from " << before << " to " << after);
00214     cloud.header.stamp = ros::Time::now();
00215 }
00216 
00217 // New depth sample event varsace tieshandler
00218 void onNewDepthSample(DepthNode node, DepthNode::NewSampleReceivedData data)
00219 {
00220     pcl::PointCloud<pcl::PointXYZRGB>::Ptr current_cloud(new pcl::PointCloud<pcl::PointXYZRGB>());
00221 
00222     sensor_msgs::Image depth_img_msg;
00223     ros::NodeHandle n_depth("~");
00224 
00225     //fill in the depth image message header
00226     std::string softkinetic_link;
00227     if (n_depth.getParam("/camera_link", softkinetic_link))
00228     {
00229         depth_img_msg.header.frame_id = softkinetic_link.c_str();
00230     }
00231     else
00232     {
00233         depth_img_msg.header.frame_id = "/softkinetic_link";
00234     }
00235     depth_img_msg.header.stamp = ros::Time::now();
00236 
00237     int count = -1;
00238 
00239     // Project some 3D points in the Color Frame
00240     if (!g_pProjHelper)
00241     {
00242         g_pProjHelper = new ProjectionHelper(data.stereoCameraParameters);
00243         g_scp = data.stereoCameraParameters;
00244     }
00245     else if (g_scp != data.stereoCameraParameters)
00246     {
00247         g_pProjHelper->setStereoCameraParameters(data.stereoCameraParameters);
00248         g_scp = data.stereoCameraParameters;
00249     }
00250 
00251     int32_t w, h;
00252     FrameFormat_toResolution(data.captureConfiguration.frameFormat,&w,&h);
00253 
00254     depth_img_msg.width = w;
00255     depth_img_msg.height = h;
00256     depth_img_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
00257     depth_img_msg.is_bigendian = 0;
00258     depth_img_msg.step = sizeof(float)*w;
00259     std::size_t data_size = depth_img_msg.width*depth_img_msg.height;
00260     depth_img_msg.data.resize(data_size*sizeof(float));
00261 
00262     Vertex p3DPoints[1];
00263     Point2D p2DPoints[1];
00264 
00265     g_dFrames++;
00266 
00267     current_cloud->header.frame_id = cloud.header.frame_id;
00268     current_cloud->height = h;
00269     current_cloud->width = w;
00270     current_cloud->is_dense = false;
00271     current_cloud->points.resize(w*h);
00272 
00273     uchar b, g, r;
00274 
00275     float* depth_img_ptr = reinterpret_cast<float*>(&depth_img_msg.data[0]);
00276 
00277     for(int i = 1;i < h ;i++){
00278         for(int j = 1;j < w ; j++){
00279             count++;
00280             current_cloud->points[count].x = -data.verticesFloatingPoint[count].x;
00281             current_cloud->points[count].y = data.verticesFloatingPoint[count].y;
00282             if(data.verticesFloatingPoint[count].z == 32001){
00283                 current_cloud->points[count].z = 0;
00284             }else{
00285                 current_cloud->points[count].z = data.verticesFloatingPoint[count].z;
00286             }
00287 
00288             //get mapping between depth map and color map, assuming we have a RGB image
00289             if(image.data.size() == 0){
00290                 ROS_WARN_THROTTLE(2.0, "Color image is empty; pointcloud will be colorless");
00291                 continue;
00292             }
00293             p3DPoints[0] = data.vertices[count];
00294             g_pProjHelper->get2DCoordinates(p3DPoints, p2DPoints, 2, CAMERA_PLANE_COLOR);
00295             int x_pos = (int)p2DPoints[0].x;
00296             int y_pos = (int)p2DPoints[0].y;
00297 
00298             if(y_pos < 0 || y_pos > image.height || x_pos < 0 || x_pos > image.width){
00299                 b = 0;
00300                 g = 0;
00301                 r = 0;
00302             }else{
00303                 b = image.data[(y_pos*image.width+x_pos)*3+0];
00304                 g = image.data[(y_pos*image.width+x_pos)*3+1];
00305                 r = image.data[(y_pos*image.width+x_pos)*3+2];
00306             }
00307             current_cloud->points[count].b = b;
00308             current_cloud->points[count].g = g;
00309             current_cloud->points[count].r = r;
00310         }
00311     }
00312 
00313     //check for usage of voxel grid filtering to downsample point cloud
00314     if(use_voxel_grid_filter)
00315     {
00316          downsampleCloud(current_cloud);
00317     }
00318 
00319     //check for usage of radius filtering
00320     if(use_radius_filter)
00321     {
00322         //use_voxel_grid_filter should be enabled so that the radius filter doesn't take too long
00323         filterCloudRadiusBased(current_cloud);
00324     }
00325 
00326     //convert current_cloud to PointCloud2 and publish
00327     pcl::toROSMsg(*current_cloud, cloud);
00328 
00329     pub_cloud.publish (cloud);
00330     pub_depth.publish (depth_img_msg);
00331 
00332     g_context.quit();
00333 }
00334 
00335 /*----------------------------------------------------------------------------*/
00336 void configureAudioNode()
00337 {
00338     g_anode.newSampleReceivedEvent().connect(&onNewAudioSample);
00339 
00340     AudioNode::Configuration config = g_anode.getConfiguration();
00341     config.sampleRate = 44100;
00342 
00343     try
00344     {
00345         g_context.requestControl(g_anode,0);
00346 
00347         g_anode.setConfiguration(config);
00348 
00349         g_anode.setInputMixerLevel(0.5f);
00350     }
00351     catch (ArgumentException& e)
00352     {
00353         printf("Argument Exception: %s\n",e.what());
00354     }
00355     catch (UnauthorizedAccessException& e)
00356     {
00357         printf("Unauthorized Access Exception: %s\n",e.what());
00358     }
00359     catch (ConfigurationException& e)
00360     {
00361         printf("Configuration Exception: %s\n",e.what());
00362     }
00363     catch (StreamingException& e)
00364     {
00365         printf("Streaming Exception: %s\n",e.what());
00366     }
00367     catch (TimeoutException&)
00368     {
00369         printf("TimeoutException\n");
00370     }
00371 }
00372 
00373 /*----------------------------------------------------------------------------*/
00374 void configureDepthNode()
00375 {
00376     g_dnode.newSampleReceivedEvent().connect(&onNewDepthSample);
00377 
00378     DepthNode::Configuration config = g_dnode.getConfiguration();
00379 
00380     config.frameFormat = depth_frame_format;
00381     config.framerate = depth_frame_rate;
00382     config.mode = depth_mode;
00383     config.saturation = true;
00384 
00385     g_context.requestControl(g_dnode,0);
00386 
00387     g_dnode.setEnableVertices(true);
00388     g_dnode.setEnableConfidenceMap(true);
00389     g_dnode.setConfidenceThreshold(confidence_threshold);
00390     g_dnode.setEnableVerticesFloatingPoint(true);
00391     g_dnode.setEnableDepthMapFloatingPoint(true);
00392 
00393     try
00394     {
00395         g_context.requestControl(g_dnode,0);
00396 
00397         g_dnode.setConfiguration(config);
00398     }
00399     catch (ArgumentException& e)
00400     {
00401         printf("Argument Exception: %s\n",e.what());
00402     }
00403     catch (UnauthorizedAccessException& e)
00404     {
00405         printf("Unauthorized Access Exception: %s\n",e.what());
00406     }
00407     catch (IOException& e)
00408     {
00409         printf("IO Exception: %s\n",e.what());
00410     }
00411     catch (InvalidOperationException& e)
00412     {
00413         printf("Invalid Operation Exception: %s\n",e.what());
00414     }
00415     catch (ConfigurationException& e)
00416     {
00417         printf("Configuration Exception: %s\n",e.what());
00418     }
00419     catch (StreamingException& e)
00420     {
00421         printf("Streaming Exception: %s\n",e.what());
00422     }
00423     catch (TimeoutException&)
00424     {
00425         printf("TimeoutException\n");
00426     }
00427 
00428 }
00429 
00430 /*----------------------------------------------------------------------------*/
00431 void configureColorNode()
00432 {
00433     // connect new color sample handler
00434     g_cnode.newSampleReceivedEvent().connect(&onNewColorSample);
00435 
00436     ColorNode::Configuration config = g_cnode.getConfiguration();
00437 
00438     config.frameFormat = color_frame_format;
00439     config.compression = color_compression;
00440     config.powerLineFrequency = POWER_LINE_FREQUENCY_50HZ;
00441     config.framerate = color_frame_rate;
00442 
00443     g_cnode.setEnableColorMap(true);
00444 
00445     try
00446     {
00447         g_context.requestControl(g_cnode,0);
00448 
00449         g_cnode.setConfiguration(config);
00450     }
00451     catch (ArgumentException& e)
00452     {
00453         printf("Argument Exception: %s\n",e.what());
00454     }
00455     catch (UnauthorizedAccessException& e)
00456     {
00457         printf("Unauthorized Access Exception: %s\n",e.what());
00458     }
00459     catch (IOException& e)
00460     {
00461         printf("IO Exception: %s\n",e.what());
00462     }
00463     catch (InvalidOperationException& e)
00464     {
00465         printf("Invalid Operation Exception: %s\n",e.what());
00466     }
00467     catch (ConfigurationException& e)
00468     {
00469         printf("Configuration Exception: %s\n",e.what());
00470     }
00471     catch (StreamingException& e)
00472     {
00473         printf("Streaming Exception: %s\n",e.what());
00474     }
00475     catch (TimeoutException&)
00476     {
00477         printf("TimeoutException\n");
00478     }
00479 }
00480 
00481 /*----------------------------------------------------------------------------*/
00482 void configureNode(Node node)
00483 {
00484     if ((node.is<DepthNode>())&&(!g_dnode.isSet())&&(_depth_enabled))
00485     {
00486         g_dnode = node.as<DepthNode>();
00487         configureDepthNode();
00488         g_context.registerNode(node);
00489     }
00490 
00491     if ((node.is<ColorNode>())&&(!g_cnode.isSet())&&(_color_enabled))
00492     {
00493         g_cnode = node.as<ColorNode>();
00494         configureColorNode();
00495         g_context.registerNode(node);
00496     }
00497 
00498     if ((node.is<AudioNode>())&&(!g_anode.isSet()))
00499     {
00500         g_anode = node.as<AudioNode>();
00501         configureAudioNode();
00502         g_context.registerNode(node);
00503     }
00504 }
00505 
00506 /*----------------------------------------------------------------------------*/
00507 void onNodeConnected(Device device, Device::NodeAddedData data)
00508 {
00509     configureNode(data.node);
00510 }
00511 
00512 /*----------------------------------------------------------------------------*/
00513 void onNodeDisconnected(Device device, Device::NodeRemovedData data)
00514 {
00515     if (data.node.is<AudioNode>() && (data.node.as<AudioNode>() == g_anode))
00516         g_anode.unset();
00517     if (data.node.is<ColorNode>() && (data.node.as<ColorNode>() == g_cnode))
00518         g_cnode.unset();
00519     if (data.node.is<DepthNode>() && (data.node.as<DepthNode>() == g_dnode))
00520         g_dnode.unset();
00521     printf("Node disconnected\n");
00522 }
00523 
00524 /*----------------------------------------------------------------------------*/
00525 void onDeviceConnected(Context context, Context::DeviceAddedData data)
00526 {
00527     if (!g_bDeviceFound)
00528     {
00529         data.device.nodeAddedEvent().connect(&onNodeConnected);
00530         data.device.nodeRemovedEvent().connect(&onNodeDisconnected);
00531         g_bDeviceFound = true;
00532     }
00533 }
00534 
00535 /*----------------------------------------------------------------------------*/
00536 void onDeviceDisconnected(Context context, Context::DeviceRemovedData data)
00537 {
00538     g_bDeviceFound = false;
00539     printf("Device disconnected\n");
00540 }
00541 
00542 /*----------------------------------------------------------------------------*/
00543 int main(int argc, char* argv[])
00544 {
00545     //initialize ros
00546     ros::init (argc, argv, "softkinetic_bringup_node");
00547     ros::NodeHandle nh("~");
00548 
00549     // get frame id from parameter server
00550     std::string softkinetic_link;
00551     if(!nh.hasParam("camera_link"))
00552     {
00553         ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'camera_link' is missing.");
00554         ros_node_shutdown = true;
00555     };
00556 
00557     nh.param<std::string>("camera_link", softkinetic_link, "softkinetic_link");
00558     cloud.header.frame_id = softkinetic_link;
00559 
00560     // get confidence threshold from parameter server
00561     if(!nh.hasParam("confidence_threshold"))
00562     {
00563         ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'confidence_threshold' is not set on server.");
00564         ros_node_shutdown = true;
00565     };
00566     nh.param<int>("confidence_threshold", confidence_threshold, 150);
00567 
00568     //check for usage of voxel grid filtering to downsample the point cloud
00569     nh.param<bool>("use_voxel_grid_filter", use_voxel_grid_filter, false);
00570     if (use_voxel_grid_filter)
00571     {
00572       // downsampling cloud parameters
00573       nh.param<double>("voxel_grid_side", voxel_grid_side, 0.01);
00574     }
00575 
00576     // check for usage of radius filtering
00577     nh.param<bool>("use_radius_filter", use_radius_filter, false);
00578     if(use_radius_filter)
00579     {
00580         if(!nh.hasParam("search_radius"))
00581         {
00582             ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'search_radius' is not set on server.");
00583             ros_node_shutdown = true;
00584         };
00585         nh.param<double>("search_radius", search_radius, 0.5);
00586 
00587         if(!nh.hasParam("minNeighboursInRadius"))
00588         {
00589             ROS_ERROR_STREAM("For " << ros::this_node::getName() << ", parameter 'minNeighboursInRadius' is not set on server.");
00590             ros_node_shutdown = true;
00591         };
00592         nh.param<int>("minNeighboursInRadius", minNeighboursInRadius, 0);
00593     };
00594 
00595     nh.param<bool>("enable_depth", _depth_enabled, true);
00596     std::string depth_mode_str;
00597     nh.param<std::string>("depth_mode", depth_mode_str, "close");
00598     if ( depth_mode_str == "long" )
00599       depth_mode = DepthNode::CAMERA_MODE_LONG_RANGE;
00600     else
00601       depth_mode = DepthNode::CAMERA_MODE_CLOSE_MODE;
00602 
00603     std::string depth_frame_format_str;
00604     nh.param<std::string>("depth_frame_format", depth_frame_format_str, "QVGA");
00605     if ( depth_frame_format_str == "QQVGA" )
00606       depth_frame_format = FRAME_FORMAT_QQVGA;
00607     else if ( depth_frame_format_str == "QVGA" )
00608       depth_frame_format = FRAME_FORMAT_QVGA;
00609     else
00610       depth_frame_format = FRAME_FORMAT_VGA;
00611 
00612     nh.param<int>("depth_frame_rate", depth_frame_rate, 25);
00613 
00614     nh.param<bool>("enable_color", _color_enabled, true);
00615     std::string color_compression_str;
00616     nh.param<std::string>("color_compression", color_compression_str, "MJPEG");
00617     if ( color_compression_str == "YUY2" )
00618       color_compression = COMPRESSION_TYPE_YUY2;
00619     else
00620       color_compression = COMPRESSION_TYPE_MJPEG;
00621 
00622     std::string color_frame_format_str;
00623     nh.param<std::string>("color_frame_format", color_frame_format_str, "WXGA");
00624     if ( color_frame_format_str == "QQVGA" )
00625       color_frame_format = FRAME_FORMAT_QQVGA;
00626     else if ( color_frame_format_str == "QVGA" )
00627       color_frame_format = FRAME_FORMAT_QVGA;
00628     else if ( color_frame_format_str == "VGA" )
00629       color_frame_format = FRAME_FORMAT_VGA;
00630     else if ( color_frame_format_str == "NHD" )
00631       color_frame_format = FRAME_FORMAT_NHD;
00632     else
00633       color_frame_format = FRAME_FORMAT_WXGA_H;
00634 
00635     nh.param<int>("color_frame_rate", color_frame_rate, 25);
00636 
00637 
00638     offset = ros::Time::now().toSec();
00639     //initialize image transport object
00640     image_transport::ImageTransport it(nh);
00641 
00642     //initialize publishers
00643     pub_cloud = nh.advertise<sensor_msgs::PointCloud2> ("depth_registered/points", 1);
00644     pub_rgb = it.advertise ("rgb_data", 1);
00645     pub_depth = it.advertise ("depth_registered/image", 1);
00646 
00647     g_context = Context::create("softkinetic");
00648 
00649     g_context.deviceAddedEvent().connect(&onDeviceConnected);
00650     g_context.deviceRemovedEvent().connect(&onDeviceDisconnected);
00651 
00652     // Get the list of currently connected devices
00653     vector<Device> da = g_context.getDevices();
00654 
00655     // In case there are several devices, index of camera to start ought to come as an argument
00656     // By default, index 0 is taken:
00657     int device_index = 0;
00658     ROS_INFO_STREAM("Number of Devices found: " << da.size());
00659     if(da.size() == 0)
00660     {
00661         ROS_ERROR_STREAM("No devices found!!!!!!");
00662     };
00663     if (da.size() >= 1)
00664     {
00665         // if camera index comes as argument, device_index will be updated
00666         if (argc > 1)
00667         {
00668             device_index = atoi(argv[1]);
00669         }
00670 
00671 
00672         g_bDeviceFound = true;
00673 
00674         da[device_index].nodeAddedEvent().connect(&onNodeConnected);
00675         da[device_index].nodeRemovedEvent().connect(&onNodeDisconnected);
00676 
00677         vector<Node> na = da[device_index].getNodes();
00678 
00679         for (int n = 0; n < (int)na.size();n++)
00680             configureNode(na[n]);
00681     }
00682     //loop while ros core is operational or Ctrl-C is used
00683     if(ros_node_shutdown){
00684         ros::shutdown();
00685     }
00686     while(ros::ok()){
00687         g_context.startNodes();
00688         g_context.run();
00689     }
00690     //Close out all nodes
00691     if (g_cnode.isSet()) g_context.unregisterNode(g_cnode);
00692     if (g_dnode.isSet()) g_context.unregisterNode(g_dnode);
00693     if (g_anode.isSet()) g_context.unregisterNode(g_anode);
00694 
00695     if (g_pProjHelper)
00696         delete g_pProjHelper;
00697     g_context.stopNodes();
00698     return 0;
00699 }


softkinetic_camera
Author(s): Felipe Garcia Lopez
autogenerated on Wed Aug 26 2015 16:26:49