pr2_interactive_object_detection_backend.cpp
Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2009, Willow Garage, Inc.
00003  * All rights reserved.
00004  *
00005  * Redistribution and use in source and binary forms, with or without
00006  * modification, are permitted provided that the following conditions are met:
00007  *
00008  *     * Redistributions of source code must retain the above copyright
00009  *       notice, this list of conditions and the following disclaimer.
00010  *     * Redistributions in binary form must reproduce the above copyright
00011  *       notice, this list of conditions and the following disclaimer in the
00012  *       documentation and/or other materials provided with the distribution.
00013  *     * Neither the name of the Willow Garage, Inc. nor the names of its
00014  *       contributors may be used to endorse or promote products derived from
00015  *       this software without specific prior written permission.
00016  *
00017  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00018  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00019  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00020  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
00021  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00022  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00023  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00024  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00025  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00026  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00027  * POSSIBILITY OF SUCH DAMAGE.
00028  */
00029 
00030 #include <ros/ros.h>
00031 
00032 #include "pr2_interactive_object_detection/pr2_interactive_object_detection_backend.h"
00033 
00034 #include <manipulation_msgs/GraspableObjectList.h>
00035 
00036 #include <household_objects_database_msgs/GetModelDescription.h>
00037 #include <household_objects_database_msgs/GetModelMesh.h>
00038 
00039 #include <sensor_msgs/Image.h>
00040 #include <sensor_msgs/CameraInfo.h>
00041 #include <sensor_msgs/PointCloud2.h>
00042 
00043 #include <stereo_msgs/DisparityImage.h>
00044 
00045 #include <interactive_perception_msgs/ObjectSegmentationGuiAction.h>
00046 
00047 #include <tabletop_object_detector/marker_generator.h>
00048 #include <tabletop_object_detector/TabletopDetection.h>
00049 
00050 using namespace tabletop_object_detector;
00051 using namespace interactive_perception_msgs;
00052 using namespace manipulation_msgs;
00053 using namespace object_manipulation_msgs;
00054 using namespace household_objects_database_msgs;
00055 using namespace pr2_interactive_object_detection;
00056 using namespace rgbd_assembler;
00057 
00058 InteractiveObjDetBackend::InteractiveObjDetBackend() :
00059   root_nh_(""),
00060   priv_nh_("~"),
00061   tf_listener_( ros::Duration(60), true )
00062 {
00063   obj_rec_popup_client_ = new actionlib::SimpleActionClient<ObjectRecognitionGuiAction>
00064     ("object_recognition_popup", true);
00065 
00066   user_command_server_ = new actionlib::SimpleActionServer<UserCommandAction>
00067     (root_nh_, "object_detection_user_command", boost::bind(&InteractiveObjDetBackend::processUserCommand, this, _1), false );
00068   user_command_server_->start();
00069 
00070   ROS_INFO("Connecting services..");
00071 
00072   if ( !connectService<RgbdAssembly>( rgbd_assembler_client_, "rgbd_assembly" ) ) return;
00073 
00074   if ( !connectService<TabletopSegmentation>( auto_seg_client_, "tabletop_segmentation" ) ) return;
00075 
00076   if ( !connectService<TabletopObjectRecognition>( auto_obj_rec_client_, "tabletop_object_recognition" ) ) return;
00077 
00078   if ( !connectService<TabletopDetection>( tabletop_detection_client_, "tabletop_detection" ) ) return;
00079 
00080   if ( !connectService<GetModelDescription>
00081        ( get_model_description_client_, "objects_database_node/get_model_description" ) ) return;
00082 
00083   if ( !connectService<GetModelMesh>
00084        ( get_model_mesh_client_, "objects_database_node/get_model_mesh" ) ) return;
00085 
00086   result_publisher_ = root_nh_.advertise<GraspableObjectList>("interactive_object_recognition_result", 1, true );
00087 
00088   marker_pub_ = priv_nh_.advertise<visualization_msgs::Marker>("object_markers", 1);
00089 
00090   priv_nh_.param<double>("min_marker_quality", min_marker_quality_, 0.003);
00091   priv_nh_.param<std::string>("robot_reference_frame_id", robot_reference_frame_id_, "base_link");
00092   priv_nh_.param<double>("table_x", table_x_, 1);
00093   priv_nh_.param<double>("table_y", table_y_, 0);
00094   priv_nh_.param<double>("table_z", table_z_, 0);
00095 
00096   ROS_INFO("Backend server for Interactive Object Detection GUI started");
00097 }
00098 
00099 
00100 InteractiveObjDetBackend::~InteractiveObjDetBackend()
00101 {
00102   delete obj_rec_popup_client_;
00103   delete user_command_server_;
00104 }
00105 
00106 
00107 template <class ServiceType>
00108 bool InteractiveObjDetBackend::connectService( ros::ServiceClient& service_client, std::string topic )
00109 {
00110   /*
00111   if ( service_client.isValid() )
00112     return;
00113 
00114   while ( !ros::service::waitForService(topic, ros::Duration(2.0)) && priv_nh_.ok() )
00115   {
00116     ROS_INFO_STREAM("Waiting for '" << topic << "' service to come up..");
00117   }
00118   if ( !root_nh_.ok() ) return false;
00119   */
00120 
00121   //we only need non-persistent connections here
00122   service_client = root_nh_.serviceClient<ServiceType>(topic, false);
00123   return true;
00124 }
00125 
00126 
00127 void InteractiveObjDetBackend::processUserCommand(const UserCommandGoal::ConstPtr &goal)
00128 {
00129   switch (goal->request)
00130   {
00131     case UserCommandGoal::SEGMENT:
00132       doSegmentation( goal->interactive );
00133       break;
00134     case UserCommandGoal::RECOGNIZE:
00135       doRecognition( goal->interactive );
00136       break;
00137     case UserCommandGoal::DETECT:
00138       doDetection( goal->interactive );
00139       break;
00140     case UserCommandGoal::RESET:
00141       resetMarkers();
00142       break;
00143     default:
00144       ROS_ERROR("Unknown user command requested.");
00145       break;
00146   }
00147 
00148   if (user_command_server_->isPreemptRequested())
00149   {
00150     preemptAction();
00151   }
00152 }
00153 
00154 
00155 void InteractiveObjDetBackend::statusFeedback( std::string msg )
00156 {
00157   UserCommandFeedback feedback;
00158   feedback.status = msg;
00159   ROS_INFO_STREAM( feedback.status );
00160   user_command_server_->publishFeedback( feedback );
00161 }
00162 
00163 void InteractiveObjDetBackend::abortAction( std::string msg )
00164 {
00165   ROS_ERROR_STREAM( "Action aborted: " << msg );
00166   UserCommandResult result;
00167   user_command_server_->setAborted( result, msg );
00168 }
00169 
00170 void InteractiveObjDetBackend::finishAction( std::string msg )
00171 {
00172   if ( user_command_server_->isPreemptRequested() )
00173   {
00174     ROS_WARN( "Preempt requested - action cannot be finished" );
00175     return;
00176   }
00177   ROS_INFO_STREAM( "Action finished: " << msg );
00178   UserCommandResult result;
00179   user_command_server_->setSucceeded( result, msg );
00180 }
00181 
00182 void InteractiveObjDetBackend::preemptAction( )
00183 {
00184   ROS_WARN_STREAM( "Action canceled." );
00185   UserCommandResult result;
00186   user_command_server_->setPreempted( result, "Action canceled." );
00187 }
00188 
00189 bool InteractiveObjDetBackend::getSensorData( ros::Duration time_out )
00190 {
00191   ros::Time start_time = ros::Time::now();
00192   if (!rgbd_assembler_client_.call(rgbd_assembler_srv_))
00193   {
00194     abortAction("Call to rgbd assembler service failed");
00195     return false;
00196   }
00197   ROS_INFO_STREAM("Detection backend: assembled data received after " << ros::Time::now() - start_time << " seconds");
00198   if (rgbd_assembler_srv_.response.result != rgbd_assembler_srv_.response.SUCCESS)
00199     {
00200       std::ostringstream s;
00201       s << "RGB-D Assembler service returned error " << (int)rgbd_assembler_srv_.response.result;
00202       abortAction( s.str() );
00203       return false;
00204     }
00205 
00206   image_ = rgbd_assembler_srv_.response.image;
00207   disparity_image_ = rgbd_assembler_srv_.response.disparity_image;
00208   camera_info_ = rgbd_assembler_srv_.response.camera_info;
00209   point_cloud_ = rgbd_assembler_srv_.response.point_cloud;
00210   
00211   return true;
00212 }
00213 
00214 bool InteractiveObjDetBackend::doInteractiveSegmentation( )
00215 {
00216   statusFeedback("Waiting for user input .." );
00217   
00218   std::string segm_topic("segmentation_popup");
00219   interactive_perception_msgs::ObjectSegmentationGuiGoal   segm_goal;
00220   actionlib::SimpleActionClient<interactive_perception_msgs::ObjectSegmentationGuiAction> 
00221     os_gui_action_client(segm_topic, true);
00222   
00223   while(!os_gui_action_client.waitForServer(ros::Duration(2.0)) && priv_nh_.ok() 
00224         && !user_command_server_->isPreemptRequested()) {
00225     ROS_INFO("Waiting for action client on topic %s", segm_topic.c_str());
00226   }
00227   
00228   if (!priv_nh_.ok()) return false;
00229   //check for cancel request
00230   if (user_command_server_->isPreemptRequested()) return false;
00231   
00232   segm_goal.disparity_image  = disparity_image_;
00233   segm_goal.camera_info      = camera_info_;
00234   segm_goal.point_cloud      = point_cloud_;
00235 
00236   os_gui_action_client.sendGoal(segm_goal);
00237   ROS_INFO("Send Data as goal");
00238   while (!os_gui_action_client.waitForResult(ros::Duration(0.5)) && priv_nh_.ok() 
00239          && !user_command_server_->isPreemptRequested()) {
00240     ROS_INFO_STREAM("Waiting for result from action client on topic" << segm_topic);
00241   }
00242   
00243   //make sure we cancel the gui request if we're canceled
00244   if ( user_command_server_->isPreemptRequested() ) {
00245     os_gui_action_client.cancelGoal();
00246     return false;
00247   }
00248   
00249   if (os_gui_action_client.getState() != actionlib::SimpleClientGoalState::SUCCEEDED) {
00250     
00251     abortAction( "The interactive segmentation has not succeeded;");
00252     return false;
00253   } 
00254   
00255   interactive_perception_msgs::ObjectSegmentationGuiResult segm_result = *(os_gui_action_client.getResult());
00256   if (segm_result.result != interactive_perception_msgs::ObjectSegmentationGuiResult::SUCCESS) {
00257     std::ostringstream s;
00258     s << "Interactive Segmentation returned error " << (int)segm_result.result;
00259     abortAction( s.str() );
00260     return false;
00261   } 
00262    
00263   segmentation_result_.result   = segm_result.result;
00264   segmentation_result_.clusters = segm_result.clusters;
00265   segmentation_result_.table    = segm_result.table;
00266 
00267   return true;
00268 }
00269 
00270 
00271 bool InteractiveObjDetBackend::doAutoSegmentation( )
00272 {
00273   tabletop_object_detector::TabletopSegmentation segmentation_srv;
00274 
00275   if (!auto_seg_client_.call(segmentation_srv))
00276   {
00277     abortAction("Call to segmentation service failed");
00278     return false;
00279   }
00280 
00281   if (segmentation_srv.response.result != TabletopSegmentationResponse::SUCCESS)
00282   {
00283     std::ostringstream s;
00284     s << "Segmentation service returned error " << (int)segmentation_srv.response.result;
00285     abortAction( s.str() );
00286     return false;
00287   }
00288 
00289   segmentation_result_ = segmentation_srv.response;
00290 
00291   return true;
00292 }
00293 
00294 
00295 bool InteractiveObjDetBackend::doSegmentation( bool interactive )
00296 {
00297   if (!getSensorData(ros::Duration(10.0))) return false;
00298 
00299   segmentation_was_interactive_ = interactive;
00300   recognition_result_.cluster_model_indices.clear();
00301   recognition_result_.models.clear();
00302 
00303   if ( interactive )
00304   {
00305     if ( !doInteractiveSegmentation() ) return false;
00306   }
00307   else
00308   {
00309     if ( !doAutoSegmentation() ) return false;
00310   }
00311 
00312   //add the table to the collision map
00313   addTableToCollisionMap(segmentation_result_.table);
00314 
00315   std::ostringstream s;
00316   s << "Found " << (int)segmentation_result_.clusters.size() << " clusters.";
00317   finishAction( s.str() );
00318 
00319   int n;
00320   publishResult( n );
00321 
00322   return true;
00323 }
00324 
00325 
00326 bool InteractiveObjDetBackend::doInteractiveRecognition()
00327 {
00328   //wait for popup server
00329   actionlib::SimpleActionClient<ObjectRecognitionGuiAction> or_gui_action_client("object_recognition_popup", true);
00330 
00331   statusFeedback("Waiting for object recognition popup service.." );
00332 
00333   while(!or_gui_action_client.waitForServer(ros::Duration(2.0)) && priv_nh_.ok() && !user_command_server_->isPreemptRequested())
00334   {
00335     ROS_INFO("Waiting for ObjectRecognitionGuiAction server");
00336   }
00337 
00338   if (!priv_nh_.ok() || user_command_server_->isPreemptRequested()) return false;
00339 
00340   ObjectRecognitionGuiGoal rec_goal;
00341   rec_goal.image = image_;
00342   rec_goal.camera_info = camera_info_;
00343 
00344   //get all the meshes from the database & transforms from tf
00345 
00346   statusFeedback("Getting models and coordinate transforms.." );
00347 
00348   int num_models = recognition_result_.models.size();
00349   rec_goal.model_hypotheses.resize( num_models );
00350 
00351   for ( int m=0; m<num_models; ++m )
00352   {
00353     std::vector<household_objects_database_msgs::DatabaseModelPose> &model_pose_list =
00354         recognition_result_.models[m].model_list;
00355 
00356     int num_hypotheses = model_pose_list.size();
00357 
00358     rec_goal.model_hypotheses[m].hypotheses.resize( num_hypotheses );
00359 
00360     if ( num_hypotheses > 0 )
00361     {
00362       rec_goal.model_hypotheses[m].accept = model_pose_list[0].confidence < min_marker_quality_;
00363     }
00364     else
00365     {
00366       rec_goal.model_hypotheses[m].accept = false;
00367     }
00368 
00369     for ( int h=0; h<num_hypotheses; ++h )
00370     {
00371       shape_msgs::Mesh mesh;
00372 
00373       if ( !getModelMesh( model_pose_list[h].model_id, rec_goal.model_hypotheses[m].hypotheses[h].mesh ) )
00374       {
00375         abortAction( "Failed to get model mesh." );
00376         return false;
00377       }
00378 
00379       ROS_INFO_STREAM( "Model " << m << ", hypothesis " << h << " (database id "
00380           << model_pose_list[h].model_id << ") has " << mesh.triangles.size() << " triangles and "
00381           << mesh.vertices.size() << " vertices." );
00382 
00383       rec_goal.model_hypotheses[m].hypotheses[h].pose = model_pose_list[h].pose;
00384 
00385       if ( !transformPose( rec_goal.model_hypotheses[m].hypotheses[h].pose, point_cloud_.header.frame_id ) )
00386       {
00387         abortAction( "Failed to get transform." );
00388         return false;
00389       }
00390 
00391       if ( user_command_server_->isPreemptRequested() )
00392       {
00393         return false;
00394       }
00395     }
00396   }
00397 
00398   or_gui_action_client.sendGoal(rec_goal);
00399 
00400   statusFeedback("Waiting for user input.." );
00401 
00402   // wait for gui action server & send request
00403   while (!or_gui_action_client.waitForResult(ros::Duration(2.0)) && priv_nh_.ok() && !user_command_server_->isPreemptRequested())
00404   {
00405     ROS_INFO_STREAM("Waiting for ObjectRecognitionGuiAction result");
00406   }
00407 
00408   //make sure we cancel the gui request if we're being canceled
00409   if ( user_command_server_->isPreemptRequested() )
00410   {
00411     or_gui_action_client.cancelGoal();
00412     return false;
00413   }
00414 
00415   interactive_perception_msgs::ObjectRecognitionGuiResultConstPtr rec_result;
00416   rec_result = or_gui_action_client.getResult();
00417 
00418   if ( num_models != (int)rec_result->selected_hypothesis_indices.size() )
00419   {
00420     abortAction( "GUI returned invalid result (# of models in result don't match request.)" );
00421     return false;
00422   }
00423 
00424   // only keep the selected model/pose hypotheses in the recognition result
00425 
00426   for ( int m=0; m<num_models; ++m )
00427   {
00428     std::vector<household_objects_database_msgs::DatabaseModelPose> &model_pose_list =
00429         recognition_result_.models[m].model_list;
00430 
00431     int sel_hyp_index = rec_result->selected_hypothesis_indices[m];
00432 
00433     if ( sel_hyp_index >=0 && sel_hyp_index < (int)model_pose_list.size() )
00434     {
00435       household_objects_database_msgs::DatabaseModelPose sel_hyp = model_pose_list[sel_hyp_index];
00436       sel_hyp.confidence = 0;
00437       model_pose_list.clear();
00438       model_pose_list.push_back( sel_hyp );
00439     }
00440     else
00441     {
00442       model_pose_list.clear();
00443     }
00444   }
00445 
00446   if (!priv_nh_.ok()) return false;
00447 
00448   return true;
00449 }
00450 
00451 
00452 bool InteractiveObjDetBackend::doAutoRecognition( )
00453 {
00454   statusFeedback("Doing automatic recognition." );
00455 
00456   tabletop_object_detector::TabletopObjectRecognition recognition_srv;
00457 
00458   //call automatic recognition service
00459   recognition_srv.request.table = segmentation_result_.table;
00460   recognition_srv.request.clusters = segmentation_result_.clusters;
00461   recognition_srv.request.num_models = 5;
00462   recognition_srv.request.perform_fit_merge = !segmentation_was_interactive_;
00463 
00464   if (!auto_obj_rec_client_.call(recognition_srv))
00465   {
00466     abortAction("Call to recognition service failed");
00467     return false;
00468   }
00469 
00470   //no objects found -> abort
00471   if (recognition_srv.response.models.size() == 0)
00472   {
00473     abortAction( "No objects found." );
00474     return false;
00475   }
00476   else
00477   {
00478     std::ostringstream s;
00479     s << recognition_srv.response.models.size() << " objects recognized.";
00480     statusFeedback( s.str() );
00481   }
00482 
00483   recognition_result_ = recognition_srv.response;
00484 
00485   return true;
00486 }
00487 
00488 bool InteractiveObjDetBackend::doRecognition( bool interactive )
00489 {
00490   if ( segmentation_result_.clusters.empty() )
00491   {
00492     abortAction("No clusters found or no segmentation done yet.");
00493     return false;
00494   }
00495 
00496   // call recognition service
00497   if ( !doAutoRecognition() ) return false;
00498 
00499   if (user_command_server_->isPreemptRequested()) return false;
00500 
00501   //call gui popup service if in interactive mode
00502   if ( interactive )
00503   {
00504     if ( !doInteractiveRecognition() ) return false;
00505   }
00506 
00507   if (user_command_server_->isPreemptRequested()) return false;
00508 
00509   // publish result
00510   ROS_INFO( "Object detection procedure finished. Publishing result." );
00511   int num_recognized;
00512 
00513   if ( !publishResult( num_recognized ) )
00514   {
00515     abortAction( "An error occured while gathering the result data." );
00516     return false;
00517   }
00518 
00519   std::ostringstream s;
00520   s << num_recognized << " of " << segmentation_result_.clusters.size() << " objects recognized.";
00521   finishAction( s.str() );
00522 
00523   return true;
00524 }
00525 
00526 
00527 bool InteractiveObjDetBackend::doAutoDetection( )
00528 {
00529   statusFeedback("Doing automatic detection." );
00530 
00531   tabletop_object_detector::TabletopDetection tabletop_detection_srv;
00532 
00533   //call tabletop detection service
00534   tabletop_detection_srv.request.return_clusters = false;
00535   tabletop_detection_srv.request.return_models = true;
00536   tabletop_detection_srv.request.num_models = 5;
00537 
00538   if (!tabletop_detection_client_.call(tabletop_detection_srv))
00539   {
00540     abortAction("Call to tabletop detection service failed");
00541     return false;
00542   }
00543 
00544   //no objects found -> abort
00545   if (tabletop_detection_srv.response.detection.models.size() == 0)
00546   {
00547     abortAction( "No objects found." );
00548     return false;
00549   }
00550   else
00551   {
00552     std::ostringstream s;
00553     s << tabletop_detection_srv.response.detection.models.size() << " objects recognized.";
00554     statusFeedback( s.str() );
00555 
00556     //transform poses to odom_combined frame, so they're no longer head-relative
00557     for(size_t i=0; i<tabletop_detection_srv.response.detection.models.size(); i++)
00558     {
00559       if ( !transformPose( tabletop_detection_srv.response.detection.models[i].model_list[0].pose, "/odom_combined") )
00560       {
00561         abortAction( "Failed to get transform to odom_combined." );
00562         return false;
00563       }
00564     }
00565 
00566   }
00567 
00568   ROS_INFO( "Tabletop detection returned %d clusters.", (int)tabletop_detection_srv.response.detection.clusters.size() );
00569 
00570   segmentation_result_.clusters = tabletop_detection_srv.response.detection.clusters;
00571   recognition_result_.cluster_model_indices = tabletop_detection_srv.response.detection.cluster_model_indices;
00572   recognition_result_.models = tabletop_detection_srv.response.detection.models;
00573 
00574   //tod hack
00575 //  segmentation_result_.clusters.clear();
00576 //  recognition_result_.cluster_model_indices.clear();
00577 
00578   return true;
00579 }
00580 
00581 
00582 bool InteractiveObjDetBackend::doDetection( bool interactive )
00583 {
00584   // call recognition service
00585   if ( !doAutoDetection() ) return false;
00586 
00587   if (user_command_server_->isPreemptRequested()) return false;
00588 
00589   if (!getSensorData(ros::Duration(10.0))) return false;
00590 
00591   //call gui popup service if in interactive mode
00592   if ( interactive )
00593   {
00594     if ( !doInteractiveRecognition() ) return false;
00595   }
00596 
00597   if (user_command_server_->isPreemptRequested()) return false;
00598 
00599   // publish result
00600   ROS_INFO( "Object detection procedure finished. Publishing result." );
00601   int num_recognized;
00602 
00603   if ( !publishResult( num_recognized ) )
00604   {
00605     abortAction( "An error occured while gathering the result data." );
00606     return false;
00607   }
00608 
00609   std::ostringstream s;
00610   s << num_recognized << " of " << segmentation_result_.clusters.size() << " objects recognized.";
00611   finishAction( s.str() );
00612 
00613   return true;
00614 }
00615 
00616 bool InteractiveObjDetBackend::publishResult( int &num_recognized )
00617 {
00618   if ( segmentation_result_.clusters.size() == 0 && recognition_result_.models.size() == 0 )
00619   {
00620     ROS_ERROR( "No results available for publishing!" );
00621     return false;
00622   }
00623 
00624   num_recognized = 0;
00625   GraspableObjectList grasp_obj_list;
00626 
00627   std::string reference_frame_id;
00628 
00629   if ( segmentation_result_.clusters.size() > 0 )
00630   {
00631     reference_frame_id = segmentation_result_.clusters[0].header.frame_id;
00632   }
00633   else
00634   {
00635     if ( recognition_result_.models[0].model_list.size() == 0 )
00636     {
00637       ROS_ERROR( "Model list 0 from recognition result is empty!" );
00638       return false;
00639     }
00640     reference_frame_id = recognition_result_.models[0].model_list[0].pose.header.frame_id;
00641   }
00642 
00643   ROS_INFO( "The reference frame is %s", reference_frame_id.c_str() );
00644 
00645   grasp_obj_list.camera_info = camera_info_;
00646   grasp_obj_list.image = image_;
00647   getPose( grasp_obj_list.reference_to_camera, reference_frame_id, point_cloud_.header.frame_id );
00648 
00649   unsigned num_clusters = segmentation_result_.clusters.size();
00650   unsigned num_models = recognition_result_.models.size();
00651 
00652   std::set<unsigned> used_model_indices;
00653   std::set<unsigned> used_cluster_indices;
00654 
00655   ROS_INFO( "Number of clusters: %d models: %d", num_clusters, num_models );
00656 
00657   //first, go through all clusters and see if they match any model
00658   //store the info as graspable object
00659   for ( unsigned cluster_index=0; cluster_index<num_clusters; ++cluster_index )
00660   {
00661     manipulation_msgs::GraspableObject graspable_object;
00662     shape_msgs::Mesh mesh;
00663 
00664     //populate cluster
00665     graspable_object.cluster = segmentation_result_.clusters[cluster_index];
00666     graspable_object.reference_frame_id = reference_frame_id; // should be robot_reference_frame_id_;
00667 
00668     //if we have recognition results, populate model data
00669     if ( recognition_result_.cluster_model_indices.size() == num_clusters )
00670     {
00671       unsigned model_index = recognition_result_.cluster_model_indices[cluster_index];
00672 
00673       //skip models that we have already processed
00674       //a better way would be to actually merge all clusters that point to the same model,
00675       //but we'll go with that for now
00676       if ( used_model_indices.find( model_index ) != used_model_indices.end() )
00677       {
00678         continue;
00679       }
00680 
00681       used_model_indices.insert( model_index );
00682 
00683       if ( model_index > recognition_result_.models.size() )
00684       {
00685         ROS_ERROR( "Invalid model index for cluster %d!", cluster_index );
00686         return false;
00687       }
00688 
00689       graspable_object.cluster = segmentation_result_.clusters[cluster_index];
00690       graspable_object.reference_frame_id = robot_reference_frame_id_;
00691 
00692       //only get the mesh if we have recognition results and
00693       //the first one is good enough
00694       if ( recognition_result_.models[ model_index ].model_list.size() == 0 ||
00695           recognition_result_.models[ model_index ].model_list[0].confidence > min_marker_quality_ )
00696       {
00697         ROS_INFO( "Model %d was not recognized.", model_index );
00698 
00699         //get pose of cluster relative to the camera frame
00700         if ( graspable_object.cluster.header.frame_id != reference_frame_id )
00701         {
00702           ROS_ERROR( "Point cluster has wrong frame id (%s)", graspable_object.cluster.header.frame_id.c_str() );
00703           return false;
00704         }
00705       }
00706       else
00707       {
00708         graspable_object.potential_models = recognition_result_.models[ model_index ].model_list;
00709 
00710         if ( !getModelMesh( graspable_object.potential_models[0].model_id, mesh ) )
00711         {
00712           ROS_ERROR( "Failed to get model mesh." );
00713           return false;
00714         }
00715 
00716         ROS_INFO_STREAM( "Model " << model_index << " (database id "
00717             << graspable_object.potential_models[0].model_id << ") has " << mesh.triangles.size() << " triangles and "
00718             << mesh.vertices.size() << " vertices." );
00719 
00720         if ( graspable_object.potential_models[0].pose.header.frame_id != reference_frame_id )
00721         {
00722           ROS_ERROR( "Model pose has wrong frame id (%s)", 
00723                      graspable_object.potential_models[0].pose.header.frame_id.c_str() );
00724           return false;
00725         }
00726 
00727         num_recognized++;
00728       }
00729     }
00730 
00731     grasp_obj_list.meshes.push_back( mesh );
00732     grasp_obj_list.graspable_objects.push_back( graspable_object );
00733   }
00734 
00735   //there might be recognized object models without associated cluster (e.g. the stuff that TOD returns)
00736   //so we need to collect those
00737   for ( unsigned model_index=0; model_index<num_models; ++model_index )
00738   {
00739     //skip all models that we've already stored
00740     if ( used_model_indices.find( model_index ) != used_model_indices.end() )
00741     {
00742       continue;
00743     }
00744 
00745     //only get the mesh if we have recognition results and
00746     //the first one is good enough
00747     if ( recognition_result_.models[ model_index ].model_list.size() == 0 ||
00748          recognition_result_.models[ model_index ].model_list[0].confidence > min_marker_quality_ )
00749     {
00750       ROS_INFO( "Model %d was not recognized.", model_index );
00751       continue;
00752     }
00753 
00754     used_model_indices.insert( model_index );
00755 
00756     manipulation_msgs::GraspableObject graspable_object;
00757     shape_msgs::Mesh mesh;
00758 
00759     graspable_object.reference_frame_id = reference_frame_id;
00760     graspable_object.potential_models = recognition_result_.models[ model_index ].model_list;
00761 
00762     if ( !getModelMesh( graspable_object.potential_models[0].model_id, mesh ) )
00763     {
00764       ROS_ERROR( "Failed to get mesh for model #%d.", model_index );
00765       return false;
00766     }
00767 
00768     ROS_INFO_STREAM( "Model " << model_index << " (database id "
00769         << graspable_object.potential_models[0].model_id << ") has " << mesh.triangles.size() << " triangles and "
00770         << mesh.vertices.size() << " vertices." );
00771 
00772     if ( graspable_object.potential_models[0].pose.header.frame_id != reference_frame_id )
00773     {
00774       ROS_ERROR( "Model pose has wrong frame id (%s)", graspable_object.potential_models[0].pose.header.frame_id.c_str() );
00775       return false;
00776     }
00777 
00778     num_recognized++;
00779 
00780     //hack to take care the timestamp doesn't get too old
00781     graspable_object.potential_models[0].pose.header.stamp = ros::Time();
00782 
00783     grasp_obj_list.meshes.push_back( mesh );
00784     grasp_obj_list.graspable_objects.push_back( graspable_object );
00785   }
00786 
00787   result_publisher_.publish( grasp_obj_list );
00788 
00789   printObjects( grasp_obj_list.graspable_objects );
00790 
00791   publishFitMarkers( recognition_result_.models, segmentation_result_.table );
00792 
00793   return true;
00794 }
00795 
00796 void InteractiveObjDetBackend::resetMarkers()
00797 {
00798   //delete all published fit markers
00799   try
00800   {
00801     for ( size_t i=0; i<delete_markers_.size(); i++ )
00802     {
00803       delete_markers_[i].header.stamp = ros::Time::now();
00804       marker_pub_.publish( delete_markers_[i] );
00805     }
00806   }
00807   catch (...)
00808   {
00809     ROS_ERROR("exception when trying to clear old fit markers!");
00810   }
00811   delete_markers_.clear();
00812 
00813   //publish an empty list of objects so interactive_marker_node clears its object_handlers_ list
00814   GraspableObjectList grasp_obj_list;
00815   result_publisher_.publish( grasp_obj_list );
00816 
00817   finishAction( "reset all segmentation and detection markers" );
00818 }
00819 
00820 void InteractiveObjDetBackend::publishFitMarkers(
00821     const std::vector<household_objects_database_msgs::DatabaseModelPoseList> &potential_models,
00822     const tabletop_object_detector::Table &table )
00823 {
00824   try
00825   {
00826     //first, clear the old markers
00827     for ( size_t i=0; i<delete_markers_.size(); i++ )
00828     {
00829       delete_markers_[i].header.stamp = ros::Time::now();
00830       marker_pub_.publish( delete_markers_[i] );
00831     }
00832   }
00833   catch (...)
00834   {
00835     ROS_ERROR("exception when trying to clear old fit markers!");
00836   }
00837   delete_markers_.clear();
00838 
00839   for (size_t i=0; i<potential_models.size(); i++)
00840   {
00841     const std::vector<household_objects_database_msgs::DatabaseModelPose> models = potential_models[i].model_list;
00842     for (size_t j=0; j<models.size(); j++)
00843     {
00844       shape_msgs::Mesh mesh;
00845 
00846       if ( !getModelMesh( models[j].model_id, mesh ) )
00847       {
00848         ROS_ERROR("Failed to call database get mesh service for marker display");
00849       }
00850       else
00851       {
00852         double rank = ((double)j) / std::max( (int)(models.size())-1, 1 );
00853         visualization_msgs::Marker fitMarker =  MarkerGenerator::getFitMarker(mesh, rank);
00854         fitMarker.header = models[j].pose.header;
00855         fitMarker.pose = models[j].pose.pose;
00856         fitMarker.ns = "pr2_interactive_object_detection_model_" + boost::lexical_cast<std::string>(j);
00857         fitMarker.id = current_marker_id_++;
00858         marker_pub_.publish(fitMarker);
00859 
00860         //save a marker that will delete this one later..
00861         visualization_msgs::Marker delete_marker;
00862         delete_marker.header.frame_id = fitMarker.header.frame_id;
00863         delete_marker.id = fitMarker.id;
00864         delete_marker.ns = fitMarker.ns;
00865         delete_marker.action = visualization_msgs::Marker::DELETE;
00866         delete_markers_.push_back( delete_marker );
00867       }
00868     }
00869   }
00870 }
00871 
00872 bool InteractiveObjDetBackend::getPose( geometry_msgs::Pose &pose, std::string from_frame, std::string to_frame )
00873 {
00874   //we expect the robot to not move during the whole procedure,
00875   //so time::now should do it
00876   ros::Time pose_time = ros::Time::now();
00877 
00878   tf::StampedTransform pose_transform;
00879   try
00880   {
00881     tf::Transformer transformer;
00882     ROS_INFO_STREAM( "Looking up transform from " << from_frame << " to " << to_frame );
00883     tf_listener_.waitForTransform(to_frame, from_frame, pose_time, ros::Duration(3.0));
00884     tf_listener_.lookupTransform(to_frame, from_frame, pose_time, pose_transform);
00885   }
00886   catch (tf::TransformException ex)
00887   {
00888     ROS_ERROR("%s",ex.what());
00889     return false;
00890   }
00891 
00892   //convert back
00893   tf::poseTFToMsg ( pose_transform , pose );
00894   ROS_INFO( "Position: %f, %f, %f / orientation: %f %f %f %f ",
00895       pose.position.x, pose.position.y, pose.position.z,
00896       pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z );
00897 
00898   return true;
00899 }
00900 
00901 
00902 
00903 bool InteractiveObjDetBackend::transformPose( geometry_msgs::PoseStamped &pose, std::string target_frame )
00904 {
00905   // convert object pose to tf format
00906   tf::Transform old_pose;
00907   tf::poseMsgToTF ( pose.pose, old_pose );
00908 
00909   //get transformation from object's parent frame to camera frame
00910   std::string old_frame = pose.header.frame_id;
00911 
00912   //we expect the robot to not move during the whole procedure,
00913   //so time::now should do it
00914   ros::Time pose_time = ros::Time::now();
00915 
00916   tf::StampedTransform old_to_target;
00917   try
00918   {
00919     tf::Transformer transformer;
00920     ROS_INFO_STREAM( "Looking up transform from " << old_frame << " to " << target_frame );
00921     tf_listener_.waitForTransform(target_frame, old_frame, pose_time, ros::Duration(3.0));
00922     tf_listener_.lookupTransform(target_frame, old_frame, pose_time, old_to_target);
00923   }
00924   catch (tf::TransformException ex)
00925   {
00926     ROS_ERROR("%s",ex.what());
00927     return false;
00928   }
00929 
00930   //concatenate transformations
00931   tf::Transform target_pose = old_to_target * old_pose;
00932 
00933   ROS_INFO( "Old position %f, %f, %f / orientation %f %f %f %f ",
00934       pose.pose.position.x, pose.pose.position.y, pose.pose.position.z,
00935       pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z );
00936 
00937   //convert back
00938   tf::poseTFToMsg ( target_pose , pose.pose );
00939   pose.header.frame_id = target_frame;
00940 
00941   ROS_INFO( "New position %f, %f, %f / orientation %f %f %f %f ",
00942       pose.pose.position.x, pose.pose.position.y, pose.pose.position.z,
00943       pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z );
00944 
00945   return true;
00946 }
00947 
00948 
00949 bool InteractiveObjDetBackend::getModelMesh( int model_id, shape_msgs::Mesh& mesh )
00950 {
00951   GetModelMesh mesh_srv;
00952 
00953   mesh_srv.request.model_id = model_id;
00954   if ( !get_model_mesh_client_.call(mesh_srv) )
00955   {
00956     ROS_ERROR("Failed to call get model mesh service");
00957     return false;
00958   }
00959 
00960   if (mesh_srv.response.return_code.code != DatabaseReturnCode::SUCCESS)
00961   {
00962     ROS_ERROR("Model mesh service reports an error (code %d)", mesh_srv.response.return_code.code);
00963     return false;
00964   }
00965 
00966   mesh = mesh_srv.response.mesh;
00967   return true;
00968 }
00969 
00970 
00971 bool InteractiveObjDetBackend::getModelInfo(const DatabaseModelPose &model_pose,
00972         std::string &name, std::string &all_tags)
00973 {
00974   GetModelDescription desc;
00975   desc.request.model_id = model_pose.model_id;
00976   if ( !get_model_description_client_.call(desc) )
00977   {
00978     ROS_ERROR("Failed to call get model description service");
00979     return false;
00980   }
00981 
00982   if (desc.response.return_code.code != desc.response.return_code.SUCCESS )
00983   {
00984     ROS_ERROR("Model description service reports an error (code %d)", desc.response.return_code.code);
00985     return false;
00986   }
00987   name = desc.response.name;
00988   for (size_t i=0; i<desc.response.tags.size(); i++)
00989   {
00990     if (!all_tags.empty()) all_tags.append(",");
00991     all_tags.append(desc.response.tags.at(i));
00992   }
00993   return true;
00994 }
00995 
00996 
00997 int InteractiveObjDetBackend::printObjects(const std::vector<manipulation_msgs::GraspableObject> &objects)
00998 {
00999   ROS_INFO_STREAM( "Detected " << objects.size() << " graspable object(s):\n" );
01000   for (size_t m=0; m<objects.size(); m++)
01001   {
01002     std::string name, all_tags;
01003     if (!objects[m].potential_models.empty())
01004     {
01005       if (getModelInfo(objects[m].potential_models[0], name, all_tags))
01006       {
01007         ROS_INFO("  (%d): %s (tags: %s) in frame %s", (int)m, name.c_str(), all_tags.c_str(),
01008                  objects[m].potential_models[0].pose.header.frame_id.c_str());
01009       }
01010       else
01011       {
01012         ROS_INFO("  (%d): database object, details not available.", (int)m );
01013       }
01014     }
01015     else
01016     {
01017       ROS_INFO("  (%d): unrecognized cluster with %d points", (int)m,
01018                (unsigned int)objects[m].cluster.points.size());
01019     }
01020   }
01021   return 0;
01022 }
01023 
01024 
01025 bool InteractiveObjDetBackend::addTableToCollisionMap(tabletop_object_detector::Table table)
01026 {
01027   // make sure collision services are available
01028   ROS_INFO("interactive object detection: waiting for collision map services");
01029   ros::Time start_time = ros::Time::now();
01030   while (!collision_map_interface_.connectionsEstablished(ros::Duration(1.0))) 
01031   {
01032     if (ros::Time::now() - start_time >= ros::Duration(5.0))
01033     {
01034       ROS_ERROR("collision map services not found");
01035       return false;
01036     }
01037   }
01038   collision_map_interface_.processCollisionGeometryForTable(table, "table");
01039   return true;
01040 }
01041 
01042 
01043 int main(int argc, char **argv)
01044 {
01045   ros::init(argc, argv, "pr2_interactive_object_detection_backend");
01046   InteractiveObjDetBackend node;
01047   ros::spin();
01048   return 0;
01049 }


pr2_interactive_object_detection
Author(s): David Gossow
autogenerated on Mon Oct 6 2014 12:02:20