00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030 #include <ros/ros.h>
00031
00032 #include "pr2_interactive_object_detection/pr2_interactive_object_detection_backend.h"
00033 #include "pr2_interactive_object_detection/GraspableObjectList.h"
00034
00035 #include <household_objects_database_msgs/GetModelDescription.h>
00036 #include <household_objects_database_msgs/GetModelMesh.h>
00037
00038 #include <sensor_msgs/Image.h>
00039 #include <sensor_msgs/CameraInfo.h>
00040 #include <sensor_msgs/PointCloud2.h>
00041
00042 #include <stereo_msgs/DisparityImage.h>
00043
00044 #include <object_segmentation_gui/ObjectSegmentationGuiAction.h>
00045
00046 #include <tabletop_object_detector/marker_generator.h>
00047 #include <tabletop_object_detector/TabletopDetection.h>
00048
00049 using namespace tabletop_object_detector;
00050 using namespace object_recognition_gui;
00051 using namespace household_objects_database_msgs;
00052 using namespace pr2_interactive_object_detection;
00053 using namespace rgbd_assembler;
00054
00055 InteractiveObjDetBackend::InteractiveObjDetBackend() :
00056 root_nh_(""),
00057 priv_nh_("~"),
00058 tf_listener_( ros::Duration(60), true )
00059 {
00060 obj_rec_popup_client_ = new actionlib::SimpleActionClient<ObjectRecognitionGuiAction>
00061 ("object_recognition_popup", true);
00062
00063 user_command_server_ = new actionlib::SimpleActionServer<UserCommandAction>
00064 (root_nh_, "object_detection_user_command", boost::bind(&InteractiveObjDetBackend::processUserCommand, this, _1), false );
00065 user_command_server_->start();
00066
00067 ROS_INFO("Connecting services..");
00068
00069 if ( !connectService<RgbdAssembly>( rgbd_assembler_client_, "rgbd_assembly" ) ) return;
00070
00071 if ( !connectService<TabletopSegmentation>( auto_seg_client_, "tabletop_segmentation" ) ) return;
00072
00073 if ( !connectService<TabletopObjectRecognition>( auto_obj_rec_client_, "tabletop_object_recognition" ) ) return;
00074
00075 if ( !connectService<TabletopDetection>( tabletop_detection_client_, "tabletop_detection" ) ) return;
00076
00077 if ( !connectService<GetModelDescription>
00078 ( get_model_description_client_, "objects_database_node/get_model_description" ) ) return;
00079
00080 if ( !connectService<GetModelMesh>
00081 ( get_model_mesh_client_, "objects_database_node/get_model_mesh" ) ) return;
00082
00083 result_publisher_ = root_nh_.advertise<GraspableObjectList>("interactive_object_recognition_result", 1, true );
00084
00085 marker_pub_ = priv_nh_.advertise<visualization_msgs::Marker>("object_markers", 1);
00086
00087 priv_nh_.param<double>("min_marker_quality", min_marker_quality_, 0.003);
00088 priv_nh_.param<std::string>("robot_reference_frame_id", robot_reference_frame_id_, "base_link");
00089 priv_nh_.param<double>("table_x", table_x_, 1);
00090 priv_nh_.param<double>("table_y", table_y_, 0);
00091 priv_nh_.param<double>("table_z", table_z_, 0);
00092
00093 ROS_INFO("Backend server for Interactive Object Detection GUI started");
00094 }
00095
00096
00097 InteractiveObjDetBackend::~InteractiveObjDetBackend()
00098 {
00099 delete obj_rec_popup_client_;
00100 delete user_command_server_;
00101 }
00102
00103
00104 template <class ServiceType>
00105 bool InteractiveObjDetBackend::connectService( ros::ServiceClient& service_client, std::string topic )
00106 {
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119 service_client = root_nh_.serviceClient<ServiceType>(topic, false);
00120 return true;
00121 }
00122
00123
00124 void InteractiveObjDetBackend::processUserCommand(const UserCommandGoal::ConstPtr &goal)
00125 {
00126 switch (goal->request)
00127 {
00128 case UserCommandGoal::LOOK_AT_TABLE:
00129 lookAtTable();
00130 break;
00131 case UserCommandGoal::SEGMENT:
00132 doSegmentation( goal->interactive );
00133 break;
00134 case UserCommandGoal::RECOGNIZE:
00135 doRecognition( goal->interactive );
00136 break;
00137 case UserCommandGoal::DETECT:
00138 doDetection( goal->interactive );
00139 break;
00140 default:
00141 ROS_ERROR("Unknown user command requested.");
00142 break;
00143 }
00144
00145 if (user_command_server_->isPreemptRequested())
00146 {
00147 preemptAction();
00148 }
00149 }
00150
00151
00152 void InteractiveObjDetBackend::statusFeedback( std::string msg )
00153 {
00154 UserCommandFeedback feedback;
00155 feedback.status = msg;
00156 ROS_INFO_STREAM( feedback.status );
00157 user_command_server_->publishFeedback( feedback );
00158 }
00159
00160 void InteractiveObjDetBackend::abortAction( std::string msg )
00161 {
00162 ROS_ERROR_STREAM( "Action aborted: " << msg );
00163 UserCommandResult result;
00164 user_command_server_->setAborted( result, msg );
00165 }
00166
00167 void InteractiveObjDetBackend::finishAction( std::string msg )
00168 {
00169 if ( user_command_server_->isPreemptRequested() )
00170 {
00171 ROS_WARN( "Preempt requested - action cannot be finished" );
00172 return;
00173 }
00174 ROS_INFO_STREAM( "Action finished: " << msg );
00175 UserCommandResult result;
00176 user_command_server_->setSucceeded( result, msg );
00177 }
00178
00179 void InteractiveObjDetBackend::preemptAction( )
00180 {
00181 ROS_WARN_STREAM( "Action canceled." );
00182 UserCommandResult result;
00183 user_command_server_->setPreempted( result, "Action canceled." );
00184 }
00185
00186 bool InteractiveObjDetBackend::lookAtTable( )
00187 {
00188 geometry_msgs::PointStamped target;
00189 target.point.x = table_x_;
00190 target.point.y = table_y_;
00191 target.point.z = table_z_;
00192 target.header.frame_id = robot_reference_frame_id_;
00193
00194 statusFeedback( "Moving head.." );
00195
00196 if ( !mech_interface_.pointHeadAction( target, "/narrow_stereo_optical_frame" ) )
00197 {
00198 abortAction( "Couldn't move head." );
00199 return false;
00200 }
00201
00202 finishAction( "Robot is looking at table." );
00203 return true;
00204 }
00205
00206 bool InteractiveObjDetBackend::getSensorData( ros::Duration time_out )
00207 {
00208 if (!rgbd_assembler_client_.call(rgbd_assembler_srv_))
00209 {
00210 abortAction("Call to rgbd assembler service failed");
00211 return false;
00212 }
00213
00214 if (rgbd_assembler_srv_.response.result != rgbd_assembler_srv_.response.SUCCESS)
00215 {
00216 std::ostringstream s;
00217 s << "RGB-D Assembler service returned error " << (int)rgbd_assembler_srv_.response.result;
00218 abortAction( s.str() );
00219 return false;
00220 }
00221
00222 image_ = rgbd_assembler_srv_.response.image;
00223 disparity_image_ = rgbd_assembler_srv_.response.disparity_image;
00224 camera_info_ = rgbd_assembler_srv_.response.camera_info;
00225 point_cloud_ = rgbd_assembler_srv_.response.point_cloud;
00226
00227 return true;
00228 }
00229
00230 bool InteractiveObjDetBackend::doInteractiveSegmentation( )
00231 {
00232 statusFeedback("Waiting for user input .." );
00233
00234 std::string segm_topic("segmentation_popup");
00235 object_segmentation_gui::ObjectSegmentationGuiGoal segm_goal;
00236 actionlib::SimpleActionClient<object_segmentation_gui::ObjectSegmentationGuiAction>
00237 os_gui_action_client(segm_topic, true);
00238
00239 while(!os_gui_action_client.waitForServer(ros::Duration(2.0)) && priv_nh_.ok()
00240 && !user_command_server_->isPreemptRequested()) {
00241 ROS_INFO("Waiting for action client on topic %s", segm_topic.c_str());
00242 }
00243
00244 if (!priv_nh_.ok()) return false;
00245
00246 if (user_command_server_->isPreemptRequested()) return false;
00247
00248 segm_goal.disparity_image = disparity_image_;
00249 segm_goal.camera_info = camera_info_;
00250 segm_goal.point_cloud = point_cloud_;
00251
00252 os_gui_action_client.sendGoal(segm_goal);
00253 ROS_INFO("Send Data as goal");
00254 while (!os_gui_action_client.waitForResult(ros::Duration(0.5)) && priv_nh_.ok()
00255 && !user_command_server_->isPreemptRequested()) {
00256 ROS_INFO_STREAM("Waiting for result from action client on topic" << segm_topic);
00257 }
00258
00259
00260 if ( user_command_server_->isPreemptRequested() ) {
00261 os_gui_action_client.cancelGoal();
00262 return false;
00263 }
00264
00265 if (os_gui_action_client.getState() != actionlib::SimpleClientGoalState::SUCCEEDED) {
00266
00267 abortAction( "The interactive segmentation has not succeeded;");
00268 return false;
00269 }
00270
00271 object_segmentation_gui::ObjectSegmentationGuiResult segm_result = *(os_gui_action_client.getResult());
00272 if (segm_result.result != object_segmentation_gui::ObjectSegmentationGuiResult::SUCCESS) {
00273 std::ostringstream s;
00274 s << "Interactive Segmentation returned error " << (int)segm_result.result;
00275 abortAction( s.str() );
00276 return false;
00277 }
00278
00279 segmentation_result_.result = segm_result.result;
00280 segmentation_result_.clusters = segm_result.clusters;
00281 segmentation_result_.table = segm_result.table;
00282
00283 return true;
00284 }
00285
00286
00287 bool InteractiveObjDetBackend::doAutoSegmentation( )
00288 {
00289 tabletop_object_detector::TabletopSegmentation segmentation_srv;
00290
00291 if (!auto_seg_client_.call(segmentation_srv))
00292 {
00293 abortAction("Call to segmentation service failed");
00294 return false;
00295 }
00296
00297 if (segmentation_srv.response.result != TabletopSegmentationResponse::SUCCESS)
00298 {
00299 std::ostringstream s;
00300 s << "Segmentation service returned error " << (int)segmentation_srv.response.result;
00301 abortAction( s.str() );
00302 return false;
00303 }
00304
00305 segmentation_result_ = segmentation_srv.response;
00306
00307 return true;
00308 }
00309
00310
00311 bool InteractiveObjDetBackend::doSegmentation( bool interactive )
00312 {
00313 if (!getSensorData(ros::Duration(10.0))) return false;
00314
00315 segmentation_was_interactive_ = interactive;
00316 recognition_result_.cluster_model_indices.clear();
00317 recognition_result_.models.clear();
00318
00319 if ( interactive )
00320 {
00321 if ( !doInteractiveSegmentation() ) return false;
00322 }
00323 else
00324 {
00325 if ( !doAutoSegmentation() ) return false;
00326 }
00327
00328 std::ostringstream s;
00329 s << "Found " << (int)segmentation_result_.clusters.size() << " clusters.";
00330 finishAction( s.str() );
00331
00332 int n;
00333 publishResult( n );
00334
00335 return true;
00336 }
00337
00338
00339 bool InteractiveObjDetBackend::doInteractiveRecognition()
00340 {
00341
00342 actionlib::SimpleActionClient<ObjectRecognitionGuiAction> or_gui_action_client("object_recognition_popup", true);
00343
00344 statusFeedback("Waiting for object recognition popup service.." );
00345
00346 while(!or_gui_action_client.waitForServer(ros::Duration(2.0)) && priv_nh_.ok() && !user_command_server_->isPreemptRequested())
00347 {
00348 ROS_INFO("Waiting for ObjectRecognitionGuiAction server");
00349 }
00350
00351 if (!priv_nh_.ok() || user_command_server_->isPreemptRequested()) return false;
00352
00353 ObjectRecognitionGuiGoal rec_goal;
00354 rec_goal.image = image_;
00355 rec_goal.camera_info = camera_info_;
00356
00357
00358
00359 statusFeedback("Getting models and coordinate transforms.." );
00360
00361 int num_models = recognition_result_.models.size();
00362 rec_goal.model_hypotheses.resize( num_models );
00363
00364 for ( int m=0; m<num_models; ++m )
00365 {
00366 std::vector<household_objects_database_msgs::DatabaseModelPose> &model_pose_list =
00367 recognition_result_.models[m].model_list;
00368
00369 int num_hypotheses = model_pose_list.size();
00370
00371 rec_goal.model_hypotheses[m].hypotheses.resize( num_hypotheses );
00372
00373 if ( num_hypotheses > 0 )
00374 {
00375 rec_goal.model_hypotheses[m].accept = model_pose_list[0].confidence < min_marker_quality_;
00376 }
00377 else
00378 {
00379 rec_goal.model_hypotheses[m].accept = false;
00380 }
00381
00382 for ( int h=0; h<num_hypotheses; ++h )
00383 {
00384 geometric_shapes_msgs::Shape mesh;
00385
00386 if ( !getModelMesh( model_pose_list[h].model_id, rec_goal.model_hypotheses[m].hypotheses[h].mesh ) )
00387 {
00388 abortAction( "Failed to get model mesh." );
00389 return false;
00390 }
00391
00392 ROS_INFO_STREAM( "Model " << m << ", hypothesis " << h << " (database id "
00393 << model_pose_list[h].model_id << ") has " << mesh.triangles.size()/3 << " triangles and "
00394 << mesh.vertices.size() << " vertices." );
00395
00396 rec_goal.model_hypotheses[m].hypotheses[h].pose = model_pose_list[h].pose;
00397
00398 if ( !transformPose( rec_goal.model_hypotheses[m].hypotheses[h].pose, disparity_image_.header.frame_id ) )
00399 {
00400 abortAction( "Failed to get transform." );
00401 return false;
00402 }
00403
00404 if ( user_command_server_->isPreemptRequested() )
00405 {
00406 return false;
00407 }
00408 }
00409 }
00410
00411 or_gui_action_client.sendGoal(rec_goal);
00412
00413 statusFeedback("Waiting for user input.." );
00414
00415
00416 while (!or_gui_action_client.waitForResult(ros::Duration(2.0)) && priv_nh_.ok() && !user_command_server_->isPreemptRequested())
00417 {
00418 ROS_INFO_STREAM("Waiting for ObjectRecognitionGuiAction result");
00419 }
00420
00421
00422 if ( user_command_server_->isPreemptRequested() )
00423 {
00424 or_gui_action_client.cancelGoal();
00425 return false;
00426 }
00427
00428 object_recognition_gui::ObjectRecognitionGuiResultConstPtr rec_result;
00429 rec_result = or_gui_action_client.getResult();
00430
00431 if ( num_models != (int)rec_result->selected_hypothesis_indices.size() )
00432 {
00433 abortAction( "GUI returned invalid result (# of models in result don't match request.)" );
00434 return false;
00435 }
00436
00437
00438
00439 for ( int m=0; m<num_models; ++m )
00440 {
00441 std::vector<household_objects_database_msgs::DatabaseModelPose> &model_pose_list =
00442 recognition_result_.models[m].model_list;
00443
00444 int sel_hyp_index = rec_result->selected_hypothesis_indices[m];
00445
00446 if ( sel_hyp_index >=0 && sel_hyp_index < (int)model_pose_list.size() )
00447 {
00448 household_objects_database_msgs::DatabaseModelPose sel_hyp = model_pose_list[sel_hyp_index];
00449 sel_hyp.confidence = 0;
00450 model_pose_list.clear();
00451 model_pose_list.push_back( sel_hyp );
00452 }
00453 else
00454 {
00455 model_pose_list.clear();
00456 }
00457 }
00458
00459 if (!priv_nh_.ok()) return false;
00460
00461 return true;
00462 }
00463
00464
00465 bool InteractiveObjDetBackend::doAutoRecognition( )
00466 {
00467 statusFeedback("Doing automatic recognition." );
00468
00469 tabletop_object_detector::TabletopObjectRecognition recognition_srv;
00470
00471
00472 recognition_srv.request.table = segmentation_result_.table;
00473 recognition_srv.request.clusters = segmentation_result_.clusters;
00474 recognition_srv.request.num_models = 5;
00475 recognition_srv.request.perform_fit_merge = !segmentation_was_interactive_;
00476
00477 if (!auto_obj_rec_client_.call(recognition_srv))
00478 {
00479 abortAction("Call to recognition service failed");
00480 return false;
00481 }
00482
00483
00484 if (recognition_srv.response.models.size() == 0)
00485 {
00486 abortAction( "No objects found." );
00487 return false;
00488 }
00489 else
00490 {
00491 std::ostringstream s;
00492 s << recognition_srv.response.models.size() << " objects recognized.";
00493 statusFeedback( s.str() );
00494 }
00495
00496 recognition_result_ = recognition_srv.response;
00497
00498 return true;
00499 }
00500
00501 bool InteractiveObjDetBackend::doRecognition( bool interactive )
00502 {
00503 if ( segmentation_result_.clusters.empty() )
00504 {
00505 abortAction("No clusters found or no segmentation done yet.");
00506 return false;
00507 }
00508
00509
00510 if ( !doAutoRecognition() ) return false;
00511
00512 if (user_command_server_->isPreemptRequested()) return false;
00513
00514
00515 if ( interactive )
00516 {
00517 if ( !doInteractiveRecognition() ) return false;
00518 }
00519
00520 if (user_command_server_->isPreemptRequested()) return false;
00521
00522
00523 ROS_INFO( "Object detection procedure finished. Publishing result." );
00524 int num_recognized;
00525
00526 if ( !publishResult( num_recognized ) )
00527 {
00528 abortAction( "An error occured while gathering the result data." );
00529 return false;
00530 }
00531
00532 std::ostringstream s;
00533 s << num_recognized << " of " << segmentation_result_.clusters.size() << " objects recognized.";
00534 finishAction( s.str() );
00535
00536 return true;
00537 }
00538
00539
00540 bool InteractiveObjDetBackend::doAutoDetection( )
00541 {
00542 statusFeedback("Doing automatic detection." );
00543
00544 tabletop_object_detector::TabletopDetection tabletop_detection_srv;
00545
00546
00547 tabletop_detection_srv.request.return_clusters = true;
00548 tabletop_detection_srv.request.return_models = true;
00549 tabletop_detection_srv.request.num_models = 5;
00550
00551 if (!tabletop_detection_client_.call(tabletop_detection_srv))
00552 {
00553 abortAction("Call to tabletop detection service failed");
00554 return false;
00555 }
00556
00557
00558 if (tabletop_detection_srv.response.detection.models.size() == 0)
00559 {
00560 abortAction( "No objects found." );
00561 return false;
00562 }
00563 else
00564 {
00565 std::ostringstream s;
00566 s << tabletop_detection_srv.response.detection.models.size() << " objects recognized.";
00567 statusFeedback( s.str() );
00568 }
00569
00570 ROS_INFO( "Tabletop detection returned %d clusters.", (int)tabletop_detection_srv.response.detection.clusters.size() );
00571
00572 segmentation_result_.clusters = tabletop_detection_srv.response.detection.clusters;
00573 recognition_result_.cluster_model_indices = tabletop_detection_srv.response.detection.cluster_model_indices;
00574 recognition_result_.models = tabletop_detection_srv.response.detection.models;
00575
00576
00577
00578
00579
00580 return true;
00581 }
00582
00583
00584 bool InteractiveObjDetBackend::doDetection( bool interactive )
00585 {
00586
00587 if ( !doAutoDetection() ) return false;
00588
00589 if (user_command_server_->isPreemptRequested()) return false;
00590
00591 if (!getSensorData(ros::Duration(10.0))) return false;
00592
00593
00594 if ( interactive )
00595 {
00596 if ( !doInteractiveRecognition() ) return false;
00597 }
00598
00599 if (user_command_server_->isPreemptRequested()) return false;
00600
00601
00602 ROS_INFO( "Object detection procedure finished. Publishing result." );
00603 int num_recognized;
00604
00605 if ( !publishResult( num_recognized ) )
00606 {
00607 abortAction( "An error occured while gathering the result data." );
00608 return false;
00609 }
00610
00611 std::ostringstream s;
00612 s << num_recognized << " of " << segmentation_result_.clusters.size() << " objects recognized.";
00613 finishAction( s.str() );
00614
00615 return true;
00616 }
00617
00618 bool InteractiveObjDetBackend::publishResult( int &num_recognized )
00619 {
00620 if ( segmentation_result_.clusters.size() == 0 && recognition_result_.models.size() == 0 )
00621 {
00622 ROS_ERROR( "No results available for publishing!" );
00623 return false;
00624 }
00625
00626 num_recognized = 0;
00627 GraspableObjectList grasp_obj_list;
00628
00629 std::string reference_frame_id;
00630
00631 if ( segmentation_result_.clusters.size() > 0 )
00632 {
00633 reference_frame_id = segmentation_result_.clusters[0].header.frame_id;
00634 }
00635 else
00636 {
00637 if ( recognition_result_.models[0].model_list.size() == 0 )
00638 {
00639 ROS_ERROR( "Model list 0 from recognition result is empty!" );
00640 return false;
00641 }
00642 reference_frame_id = recognition_result_.models[0].model_list[0].pose.header.frame_id;
00643 }
00644
00645 ROS_INFO( "The reference frame is %s", reference_frame_id.c_str() );
00646
00647 grasp_obj_list.camera_info = camera_info_;
00648 grasp_obj_list.image = image_;
00649 getPose( grasp_obj_list.reference_to_camera, reference_frame_id, disparity_image_.header.frame_id );
00650
00651 unsigned num_clusters = segmentation_result_.clusters.size();
00652 unsigned num_models = recognition_result_.models.size();
00653
00654 std::set<unsigned> used_model_indices;
00655 std::set<unsigned> used_cluster_indices;
00656
00657 ROS_INFO( "Number of clusters: %d models: %d", num_clusters, num_models );
00658
00659
00660
00661 for ( unsigned cluster_index=0; cluster_index<num_clusters; ++cluster_index )
00662 {
00663 object_manipulation_msgs::GraspableObject graspable_object;
00664 geometric_shapes_msgs::Shape mesh;
00665
00666
00667 graspable_object.cluster = segmentation_result_.clusters[cluster_index];
00668 graspable_object.reference_frame_id = reference_frame_id;
00669
00670
00671 if ( recognition_result_.cluster_model_indices.size() == num_clusters )
00672 {
00673 unsigned model_index = recognition_result_.cluster_model_indices[cluster_index];
00674
00675
00676
00677
00678 if ( used_model_indices.find( model_index ) != used_model_indices.end() )
00679 {
00680 continue;
00681 }
00682
00683 used_model_indices.insert( model_index );
00684
00685 if ( model_index > recognition_result_.models.size() )
00686 {
00687 ROS_ERROR( "Invalid model index for cluster %d!", cluster_index );
00688 return false;
00689 }
00690
00691 graspable_object.cluster = segmentation_result_.clusters[cluster_index];
00692 graspable_object.reference_frame_id = robot_reference_frame_id_;
00693
00694
00695
00696 if ( recognition_result_.models[ model_index ].model_list.size() == 0 ||
00697 recognition_result_.models[ model_index ].model_list[0].confidence > min_marker_quality_ )
00698 {
00699 ROS_INFO( "Model %d was not recognized.", model_index );
00700
00701
00702 if ( graspable_object.cluster.header.frame_id != reference_frame_id )
00703 {
00704 ROS_ERROR( "Point cluster has wrong frame id (%s)", graspable_object.cluster.header.frame_id.c_str() );
00705 return false;
00706 }
00707 }
00708 else
00709 {
00710 graspable_object.potential_models = recognition_result_.models[ model_index ].model_list;
00711
00712 if ( !getModelMesh( graspable_object.potential_models[0].model_id, mesh ) )
00713 {
00714 ROS_ERROR( "Failed to get model mesh." );
00715 return false;
00716 }
00717
00718 ROS_INFO_STREAM( "Model " << model_index << " (database id "
00719 << graspable_object.potential_models[0].model_id << ") has " << mesh.triangles.size()/3 << " triangles and "
00720 << mesh.vertices.size() << " vertices." );
00721
00722 if ( graspable_object.potential_models[0].pose.header.frame_id != reference_frame_id )
00723 {
00724 ROS_ERROR( "Model pose has wrong frame id (%s)", graspable_object.potential_models[0].pose.header.frame_id.c_str() );
00725 return false;
00726 }
00727
00728 num_recognized++;
00729 }
00730 }
00731
00732 grasp_obj_list.meshes.push_back( mesh );
00733 grasp_obj_list.graspable_objects.push_back( graspable_object );
00734 }
00735
00736
00737
00738 for ( unsigned model_index=0; model_index<num_models; ++model_index )
00739 {
00740
00741 if ( used_model_indices.find( model_index ) != used_model_indices.end() )
00742 {
00743 continue;
00744 }
00745
00746
00747
00748 if ( recognition_result_.models[ model_index ].model_list.size() == 0 ||
00749 recognition_result_.models[ model_index ].model_list[0].confidence > min_marker_quality_ )
00750 {
00751 ROS_INFO( "Model %d was not recognized.", model_index );
00752 continue;
00753 }
00754
00755 used_model_indices.insert( model_index );
00756
00757 object_manipulation_msgs::GraspableObject graspable_object;
00758 geometric_shapes_msgs::Shape mesh;
00759
00760 graspable_object.reference_frame_id = reference_frame_id;
00761 graspable_object.potential_models = recognition_result_.models[ model_index ].model_list;
00762
00763 if ( !getModelMesh( graspable_object.potential_models[0].model_id, mesh ) )
00764 {
00765 ROS_ERROR( "Failed to get mesh for model #%d.", model_index );
00766 return false;
00767 }
00768
00769 ROS_INFO_STREAM( "Model " << model_index << " (database id "
00770 << graspable_object.potential_models[0].model_id << ") has " << mesh.triangles.size()/3 << " triangles and "
00771 << mesh.vertices.size() << " vertices." );
00772
00773 if ( graspable_object.potential_models[0].pose.header.frame_id != reference_frame_id )
00774 {
00775 ROS_ERROR( "Model pose has wrong frame id (%s)", graspable_object.potential_models[0].pose.header.frame_id.c_str() );
00776 return false;
00777 }
00778
00779 num_recognized++;
00780
00781
00782 graspable_object.potential_models[0].pose.header.stamp = ros::Time();
00783
00784 grasp_obj_list.meshes.push_back( mesh );
00785 grasp_obj_list.graspable_objects.push_back( graspable_object );
00786 }
00787
00788 result_publisher_.publish( grasp_obj_list );
00789
00790 printObjects( grasp_obj_list.graspable_objects );
00791
00792 publishFitMarkers( recognition_result_.models, segmentation_result_.table );
00793
00794 return true;
00795 }
00796
00797
00798 void InteractiveObjDetBackend::publishFitMarkers(
00799 const std::vector<household_objects_database_msgs::DatabaseModelPoseList> &potential_models,
00800 const tabletop_object_detector::Table &table )
00801 {
00802
00803 for ( size_t i=0; i<delete_markers_.size(); i++ )
00804 {
00805 delete_markers_[i].header.stamp = ros::Time::now();
00806 marker_pub_.publish( delete_markers_[i] );
00807 }
00808 delete_markers_.clear();
00809
00810 for (size_t i=0; i<potential_models.size(); i++)
00811 {
00812 const std::vector<household_objects_database_msgs::DatabaseModelPose> models = potential_models[i].model_list;
00813 for (size_t j=0; j<models.size(); j++)
00814 {
00815 geometric_shapes_msgs::Shape mesh;
00816
00817 if ( !getModelMesh( models[j].model_id, mesh ) )
00818 {
00819 ROS_ERROR("Failed to call database get mesh service for marker display");
00820 }
00821 else
00822 {
00823 double rank = ((double)j) / std::max( (int)(models.size())-1, 1 );
00824 visualization_msgs::Marker fitMarker = MarkerGenerator::getFitMarker(mesh, rank);
00825 fitMarker.header = models[j].pose.header;
00826 fitMarker.pose = models[j].pose.pose;
00827 fitMarker.ns = "pr2_interactive_object_detection_model_" + boost::lexical_cast<std::string>(j);
00828 fitMarker.id = current_marker_id_++;
00829 marker_pub_.publish(fitMarker);
00830
00831
00832 visualization_msgs::Marker delete_marker;
00833 delete_marker.header.frame_id = fitMarker.header.frame_id;
00834 delete_marker.id = fitMarker.id;
00835 delete_marker.ns = fitMarker.ns;
00836 delete_marker.action = visualization_msgs::Marker::DELETE;
00837 delete_markers_.push_back( delete_marker );
00838 }
00839 }
00840 }
00841 }
00842
00843 bool InteractiveObjDetBackend::getPose( geometry_msgs::Pose &pose, std::string from_frame, std::string to_frame )
00844 {
00845
00846
00847 ros::Time pose_time = ros::Time::now();
00848
00849 tf::StampedTransform pose_transform;
00850 try
00851 {
00852 tf::Transformer transformer;
00853 ROS_INFO_STREAM( "Looking up transform from " << from_frame << " to " << to_frame );
00854 tf_listener_.waitForTransform(to_frame, from_frame, pose_time, ros::Duration(3.0));
00855 tf_listener_.lookupTransform(to_frame, from_frame, pose_time, pose_transform);
00856 }
00857 catch (tf::TransformException ex)
00858 {
00859 ROS_ERROR("%s",ex.what());
00860 return false;
00861 }
00862
00863
00864 tf::poseTFToMsg ( pose_transform , pose );
00865 ROS_INFO( "Position: %f, %f, %f / orientation: %f %f %f %f ",
00866 pose.position.x, pose.position.y, pose.position.z,
00867 pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z );
00868
00869 return true;
00870 }
00871
00872
00873
00874 bool InteractiveObjDetBackend::transformPose( geometry_msgs::PoseStamped &pose, std::string target_frame )
00875 {
00876
00877 btTransform old_pose;
00878 tf::poseMsgToTF ( pose.pose, old_pose );
00879
00880
00881 std::string old_frame = pose.header.frame_id;
00882
00883
00884
00885 ros::Time pose_time = ros::Time::now();
00886
00887 tf::StampedTransform old_to_target;
00888 try
00889 {
00890 tf::Transformer transformer;
00891 ROS_INFO_STREAM( "Looking up transform from " << old_frame << " to " << target_frame );
00892 tf_listener_.waitForTransform(target_frame, old_frame, pose_time, ros::Duration(3.0));
00893 tf_listener_.lookupTransform(target_frame, old_frame, pose_time, old_to_target);
00894 }
00895 catch (tf::TransformException ex)
00896 {
00897 ROS_ERROR("%s",ex.what());
00898 return false;
00899 }
00900
00901
00902 btTransform target_pose = old_to_target * old_pose;
00903
00904 ROS_INFO( "Old position %f, %f, %f / orientation %f %f %f %f ",
00905 pose.pose.position.x, pose.pose.position.y, pose.pose.position.z,
00906 pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z );
00907
00908
00909 tf::poseTFToMsg ( target_pose , pose.pose );
00910 pose.header.frame_id = target_frame;
00911
00912 ROS_INFO( "New position %f, %f, %f / orientation %f %f %f %f ",
00913 pose.pose.position.x, pose.pose.position.y, pose.pose.position.z,
00914 pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z );
00915
00916 return true;
00917 }
00918
00919
00920 bool InteractiveObjDetBackend::getModelMesh( int model_id, geometric_shapes_msgs::Shape& mesh )
00921 {
00922 GetModelMesh mesh_srv;
00923
00924 mesh_srv.request.model_id = model_id;
00925 if ( !get_model_mesh_client_.call(mesh_srv) )
00926 {
00927 ROS_ERROR("Failed to call get model mesh service");
00928 return false;
00929 }
00930
00931 if (mesh_srv.response.return_code.code != DatabaseReturnCode::SUCCESS)
00932 {
00933 ROS_ERROR("Model mesh service reports an error (code %d)", mesh_srv.response.return_code.code);
00934 return false;
00935 }
00936
00937 mesh = mesh_srv.response.mesh;
00938 return true;
00939 }
00940
00941
00942 bool InteractiveObjDetBackend::getModelInfo(const DatabaseModelPose &model_pose,
00943 std::string &name, std::string &all_tags)
00944 {
00945 GetModelDescription desc;
00946 desc.request.model_id = model_pose.model_id;
00947 if ( !get_model_description_client_.call(desc) )
00948 {
00949 ROS_ERROR("Failed to call get model description service");
00950 return false;
00951 }
00952
00953 if (desc.response.return_code.code != desc.response.return_code.SUCCESS )
00954 {
00955 ROS_ERROR("Model description service reports an error (code %d)", desc.response.return_code.code);
00956 return false;
00957 }
00958 name = desc.response.name;
00959 for (size_t i=0; i<desc.response.tags.size(); i++)
00960 {
00961 if (!all_tags.empty()) all_tags.append(",");
00962 all_tags.append(desc.response.tags.at(i));
00963 }
00964 return true;
00965 }
00966
00967
00968 int InteractiveObjDetBackend::printObjects(const std::vector<object_manipulation_msgs::GraspableObject> &objects)
00969 {
00970 ROS_INFO_STREAM( "Detected " << objects.size() << " graspable object(s):\n" );
00971 for (size_t m=0; m<objects.size(); m++)
00972 {
00973 std::string name, all_tags;
00974 if (!objects[m].potential_models.empty())
00975 {
00976 if (getModelInfo(objects[m].potential_models[0], name, all_tags))
00977 {
00978 ROS_INFO(" (%d): %s (tags: %s) in frame %s", (int)m, name.c_str(), all_tags.c_str(),
00979 objects[m].potential_models[0].pose.header.frame_id.c_str());
00980 }
00981 else
00982 {
00983 ROS_INFO(" (%d): database object, details not available.", (int)m );
00984 }
00985 }
00986 else
00987 {
00988 ROS_INFO(" (%d): unrecognized cluster with %d points", (int)m,
00989 (unsigned int)objects[m].cluster.points.size());
00990 }
00991 }
00992 return 0;
00993 }
00994
00995
00996 int main(int argc, char **argv)
00997 {
00998 ros::init(argc, argv, "pr2_interactive_object_detection_backend");
00999 InteractiveObjDetBackend node;
01000 ros::spin();
01001 return 0;
01002 }