ObjectRecognitionListener.cpp
Go to the documentation of this file.
00001 
00013 // RAIL Recognition
00014 #include "rail_recognition/ObjectRecognitionListener.h"
00015 #include "rail_recognition/PointCloudRecognizer.h"
00016 
00017 // ROS
00018 #include <geometry_msgs/PoseArray.h>
00019 #include <pcl/common/centroid.h>
00020 #include <pcl/common/common.h>
00021 #include <rail_recognition/PointCloudMetrics.h>
00022 
00023 using namespace std;
00024 using namespace rail::pick_and_place;
00025 
00026 ObjectRecognitionListener::ObjectRecognitionListener() : private_node_("~")
00027 {
00028   // set defaults
00029   debug_ = DEFAULT_DEBUG;
00030   string segmented_objects_topic("/segmentation/segmented_objects");
00031   int port = graspdb::Client::DEFAULT_PORT;
00032   string host("127.0.0.1");
00033   string user("ros");
00034   string password("");
00035   string db("graspdb");
00036   use_image_recognition_ = true;
00037 
00038   // grab any parameters we need
00039   private_node_.getParam("debug", debug_);
00040   private_node_.getParam("segmented_objects_topic", segmented_objects_topic);
00041   private_node_.getParam("use_image_recognition", use_image_recognition_);
00042   node_.getParam("/graspdb/host", host);
00043   node_.getParam("/graspdb/port", port);
00044   node_.getParam("/graspdb/user", user);
00045   node_.getParam("/graspdb/password", password);
00046   node_.getParam("/graspdb/db", db);
00047 
00048   // connect to the grasp database
00049   graspdb_ = new graspdb::Client(host, port, user, password, db);
00050   okay_ = graspdb_->connect();
00051 
00052   // load the image recognition data (if it's being used)
00053   if (use_image_recognition_)
00054   {
00055     image_recognizer_.loadImageRecognizer();
00056   }
00057 
00058   // setup a debug publisher if we need it
00059   if (debug_)
00060   {
00061     debug_pub_ = private_node_.advertise<geometry_msgs::PoseArray>("debug", 1, true);
00062   }
00063 
00064   segmented_objects_sub_ = node_.subscribe(segmented_objects_topic, 1,
00065                                            &ObjectRecognitionListener::segmentedObjectsCallback, this);
00066   recognized_objects_pub_ = private_node_.advertise<rail_manipulation_msgs::SegmentedObjectList>(
00067       "recognized_objects", 1);
00068   remove_object_srv_ = private_node_.advertiseService("remove_object",
00069                                                       &ObjectRecognitionListener::removeObjectCallback, this);
00070 
00071   if (okay_)
00072   {
00073     ROS_INFO("Object Recognition Listener Successfully Initialized");
00074   }
00075 }
00076 
00077 ObjectRecognitionListener::~ObjectRecognitionListener()
00078 {
00079   // cleanup
00080   graspdb_->disconnect();
00081   delete graspdb_;
00082 }
00083 
00084 bool ObjectRecognitionListener::okay() const
00085 {
00086   return okay_;
00087 }
00088 
00089 void ObjectRecognitionListener::segmentedObjectsCallback(
00090     const rail_manipulation_msgs::SegmentedObjectList::ConstPtr &objects)
00091 {
00092   //lock for the object list
00093   boost::mutex::scoped_lock lock(mutex_);
00094 
00095   ROS_INFO("Received %li segmented objects.", objects->objects.size());
00096 
00097   // check against the old list to prevent throwing out data
00098   vector<rail_manipulation_msgs::SegmentedObject> new_list;
00099   for (size_t i = 0; i < objects->objects.size(); i++)
00100   {
00101     bool matched = false;
00102     // search for a match on the point cloud
00103     for (size_t j = 0; j < object_list_.objects.size(); j++)
00104     {
00105       // only do a compare if we previously recognized the object
00106       if (object_list_.objects[j].recognized &&
00107           this->comparePointClouds(objects->objects[i].point_cloud, object_list_.objects[j].point_cloud))
00108       {
00109         ROS_INFO("Found a match from previously recognized objects.");
00110         matched = true;
00111         new_list.push_back(object_list_.objects[j]);
00112         break;
00113       }
00114     }
00115 
00116     // check if we didn't match
00117     if (!matched)
00118     {
00119       new_list.push_back(objects->objects[i]);
00120     }
00121   }
00122 
00123   // store the list
00124   object_list_.objects = new_list;
00125   object_list_.cleared = objects->cleared;
00126 
00127   // run recognition
00128   ROS_INFO("Running recognition...");
00129 
00130   vector<PCLGraspModel> pcl_candidates;
00131   if (!use_image_recognition_) //populate full list of candidates to use directly with point cloud registration
00132   {
00133     // populate candidates
00134     vector<graspdb::GraspModel> candidates;
00135     graspdb_->loadGraspModels(candidates);
00136     // convert to PCL grasp models
00137     for (size_t i = 0; i < candidates.size(); i++)
00138     {
00139       pcl_candidates.push_back(PCLGraspModel(candidates[i]));
00140     }
00141   }
00142 
00143   // go through the current list
00144   PointCloudRecognizer point_cloud_recognizer;
00145   for (size_t i = 0; i < object_list_.objects.size(); i++)
00146   {
00147     if (!use_image_recognition_)
00148       pcl_candidates.clear();
00149 
00150     // check if it is already recognized
00151     rail_manipulation_msgs::SegmentedObject &object = object_list_.objects[i];
00152     if (!object.recognized)
00153     {
00154       if (use_image_recognition_)
00155       {
00156         // perform recognition based on the image
00157         vector<pair<float, string> > image_recognizer_results;
00158         image_recognizer_.recognizeObject(object_list_.objects[i], image_recognizer_results);
00159 
00160         //use results with above the recognition threshold as candidates for point cloud registration
00161         vector<string> candidate_names;
00162         for (unsigned int j = 0; j < image_recognizer_results.size(); j++)
00163         {
00164           if (image_recognizer_results[j].first >= RECOGNITION_THRESHOLD)
00165           {
00166             candidate_names.push_back(image_recognizer_results[j].second);
00167 
00168           }
00169           else
00170             break;
00171         }
00172 
00173         ROS_INFO("Image recognition resulted in %lu candidates.", candidate_names.size());
00174 
00175         if (candidate_names.empty())
00176         {
00177           ROS_INFO("No suitable recognition results, object %lu is unrecognized.", i);
00178         }
00179         else
00180         {
00181           // get candidates from graspdb
00182           vector<graspdb::GraspModel> candidates;
00183           for (unsigned int j = 0; j < candidate_names.size(); j++)
00184           {
00185             ROS_INFO("Attempting to load candidates with name: %s", candidate_names[j].c_str());
00186             graspdb_->loadGraspModelsByObjectName(candidate_names[j], candidates);
00187           }
00188           ROS_INFO("Successfully loaded %lu candidates, including: ", candidates.size());
00189           // convert to PCL grasp models
00190           pcl_candidates.clear();
00191           for (size_t j = 0; j < candidates.size(); j++)
00192           {
00193             pcl_candidates.push_back(PCLGraspModel(candidates[j]));
00194             ROS_INFO("%s", candidates[j].getObjectName().c_str());
00195           }
00196 
00197           // refine recognition with registration to a point cloud model
00198           point_cloud_recognizer.recognizeObject(object, pcl_candidates);
00199         }
00200       }
00201       else
00202       {
00203         point_cloud_recognizer.recognizeObject(object, pcl_candidates);
00204       }
00205     }
00206   }
00207 
00208   // check if any recognized models should be combined
00209   if (object_list_.objects.size() > 1)
00210   {
00211     bool something_combined = false;
00212     for (size_t i = 0; i < object_list_.objects.size() - 1; i++)
00213     {
00214       for (size_t j = i + 1; j < object_list_.objects.size(); j++)
00215       {
00216         // two models can potentially be combined if they are the same object type
00217         if (object_list_.objects[i].recognized && object_list_.objects[j].recognized
00218             && object_list_.objects[i].name == object_list_.objects[j].name)
00219         {
00220           double distance = sqrt(pow(object_list_.objects[i].center.x - object_list_.objects[j].center.x, 2)
00221                                  + pow(object_list_.objects[i].center.y - object_list_.objects[j].center.y, 2)
00222                                  + pow(object_list_.objects[i].center.z - object_list_.objects[j].center.z, 2));
00223           if (distance <= SAME_OBJECT_DIST_THRESHOLD)
00224           {
00225             rail_manipulation_msgs::SegmentedObject combined;
00226             this->combineModels(object_list_.objects[i], object_list_.objects[j], combined);
00227             object_list_.objects[i] = combined;
00228             object_list_.objects.erase(object_list_.objects.begin() + j);
00229             j--;
00230             something_combined = true;
00231           }
00232         }
00233       }
00234     }
00235 
00236     if (something_combined)
00237     {
00238       // re-index marker ids since the object_list_.objects indexing has changed
00239       for (size_t i = 0; i < object_list_.objects.size(); i++)
00240       {
00241         object_list_.objects[i].marker.id = i;
00242       }
00243     }
00244   }
00245 
00246   // republish the new list
00247   recognized_objects_pub_.publish(object_list_);
00248   // check for debug publishing
00249   if (debug_)
00250   {
00251     geometry_msgs::PoseArray poses;
00252     for (size_t i = 0; i < object_list_.objects.size(); i++)
00253     {
00254       for (size_t j = 0; j < object_list_.objects[i].grasps.size(); j++)
00255       {
00256         poses.header = object_list_.objects[i].grasps[j].grasp_pose.header;
00257         poses.poses.push_back(object_list_.objects[i].grasps[j].grasp_pose.pose);
00258       }
00259     }
00260     debug_pub_.publish(poses);
00261   }
00262 
00263   ROS_INFO("New recognized objects published.");
00264 }
00265 
00266 bool ObjectRecognitionListener::removeObjectCallback(rail_pick_and_place_msgs::RemoveObject::Request &req,
00267     rail_pick_and_place_msgs::RemoveObject::Response &res)
00268 {
00269   //lock for the object list
00270   boost::mutex::scoped_lock lock(mutex_);
00271 
00272   if (req.index < object_list_.objects.size())
00273   {
00274     // remove
00275     object_list_.objects.erase(object_list_.objects.begin() + req.index);
00276     // set header information
00277     object_list_.header.seq++;
00278     object_list_.header.stamp = ros::Time::now();
00279     object_list_.cleared = false;
00280     // republish
00281     recognized_objects_pub_.publish(object_list_);
00282     return true;
00283   } else
00284   {
00285     ROS_ERROR("Attempted to remove index %d from list of size %ld.", req.index, object_list_.objects.size());
00286     return false;
00287   }
00288 }
00289 
00290 bool ObjectRecognitionListener::comparePointClouds(const sensor_msgs::PointCloud2 &pc1,
00291     const sensor_msgs::PointCloud2 &pc2) const
00292 {
00293   // check the size first then compare
00294   return (pc1.data.size() == pc2.data.size()) && (pc1.data == pc1.data);
00295 }
00296 
00297 void ObjectRecognitionListener::combineModels(const rail_manipulation_msgs::SegmentedObject &model1,
00298     const rail_manipulation_msgs::SegmentedObject &model2, rail_manipulation_msgs::SegmentedObject &combined) const
00299 {
00300   ROS_INFO("Combining two %s models...", model1.name.c_str());
00301 
00302   // set members that won't change
00303   combined.name = model1.name;
00304   combined.recognized = model1.recognized;
00305   // keep the first model ID, as merging them won't make sense
00306   combined.model_id = model1.model_id;
00307   // TODO: calculate the merged image, for now it uses just the largest
00308   combined.image = (model1.image.data.size() > model2.image.data.size()) ? model1.image : model2.image;
00309   combined.confidence = max(model1.confidence, model2.confidence);
00310 
00311   // combine point clouds
00312   pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud1(new pcl::PointCloud<pcl::PointXYZRGB>);
00313   pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud2(new pcl::PointCloud<pcl::PointXYZRGB>);
00314   pcl::PointCloud<pcl::PointXYZRGB>::Ptr combined_cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
00315   point_cloud_metrics::rosPointCloud2ToPCLPointCloud(model1.point_cloud, cloud1);
00316   point_cloud_metrics::rosPointCloud2ToPCLPointCloud(model2.point_cloud, cloud2);
00317   *combined_cloud = *cloud1 + *cloud2;
00318   point_cloud_metrics::pclPointCloudToROSPointCloud2(combined_cloud, combined.point_cloud);
00319 
00320   // calculate new point cloud attributes (center, centroid, depth, height, width, orientation)
00321   Eigen::Vector4f centroid;
00322   pcl::compute3DCentroid(*combined_cloud, centroid);
00323   combined.centroid.x = centroid[0];
00324   combined.centroid.y = centroid[1];
00325   combined.centroid.z = centroid[2];
00326 
00327   // calculate the new bounding box
00328   int x_idx, y_idx, z_idx;
00329   Eigen::Vector4f min_pt, max_pt;
00330   pcl::getMinMax3D(*combined_cloud, min_pt, max_pt);
00331   combined.width = max_pt[0] - min_pt[0];
00332   combined.depth = max_pt[1] - min_pt[1];
00333   combined.height = max_pt[2] - min_pt[2];
00334 
00335   // calculate the new center
00336   combined.center.x = (max_pt[0] + min_pt[0]) / 2.0;
00337   combined.center.y = (max_pt[1] + min_pt[1]) / 2.0;
00338   combined.center.z = (max_pt[2] + min_pt[2]) / 2.0;
00339 
00340   // recalculate orientation of combined point clouds
00341   pcl::PointCloud<pcl::PointXYZRGB>::Ptr projected_cluster(new pcl::PointCloud<pcl::PointXYZRGB>);
00342   // project point cloud onto the xy plane
00343   pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients());
00344   coefficients->values.resize(4);
00345   coefficients->values[0] = 0;
00346   coefficients->values[1] = 0;
00347   coefficients->values[2] = 1.0;
00348   coefficients->values[3] = 0;
00349   pcl::ProjectInliers<pcl::PointXYZRGB> proj;
00350   proj.setModelType(pcl::SACMODEL_PLANE);
00351   proj.setInputCloud(combined_cloud);
00352   proj.setModelCoefficients(coefficients);
00353   proj.filter(*projected_cluster);
00354 
00355   //calculate the Eigen vectors of the projected point cloud's covariance matrix, used to determine orientation
00356   Eigen::Vector4f projected_centroid;
00357   Eigen::Matrix3f covariance_matrix;
00358   pcl::compute3DCentroid(*projected_cluster, projected_centroid);
00359   pcl::computeCovarianceMatrixNormalized(*projected_cluster, projected_centroid, covariance_matrix);
00360   Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> eigen_solver(covariance_matrix, Eigen::ComputeEigenvectors);
00361   Eigen::Matrix3f eigen_vectors = eigen_solver.eigenvectors();
00362   eigen_vectors.col(2) = eigen_vectors.col(0).cross(eigen_vectors.col(1));
00363   //calculate rotation from eigenvectors
00364   const Eigen::Quaternionf qfinal(eigen_vectors);
00365 
00366   //convert orientation to a single angle on the 2D plane defined by the segmentation coordinate frame
00367   tf::Quaternion tf_quat;
00368   tf_quat.setValue(qfinal.x(), qfinal.y(), qfinal.z(), qfinal.w());
00369   double r, p, y;
00370   tf::Matrix3x3 m(tf_quat);
00371   m.getRPY(r, p, y);
00372   double angle = r + y;
00373   while (angle < -M_PI)
00374   {
00375     angle += 2 * M_PI;
00376   }
00377   while (angle > M_PI)
00378   {
00379     angle -= 2 * M_PI;
00380   }
00381   combined.orientation = tf::createQuaternionMsgFromYaw(angle);
00382 
00383   // combine the two markers
00384   combined.marker = model1.marker;
00385   combined.marker.points.insert(combined.marker.points.end(), model2.marker.points.begin(), model2.marker.points.end());
00386 
00387   // set average RGB
00388   combined.marker.color.r = ((model1.marker.color.r * model1.marker.points.size()) +
00389                              (model2.marker.color.r * model2.marker.points.size())) /
00390                             (model1.marker.points.size() + model2.marker.points.size());
00391   combined.marker.color.g = ((model1.marker.color.g * model1.marker.points.size()) +
00392                              (model2.marker.color.g * model2.marker.points.size())) /
00393                             (model1.marker.points.size() + model2.marker.points.size());
00394   combined.marker.color.b = ((model1.marker.color.b * model1.marker.points.size()) +
00395                              (model2.marker.color.b * model2.marker.points.size())) /
00396                             (model1.marker.points.size() + model2.marker.points.size());
00397 
00398   // combine grasp lists and maintain order
00399   combined.grasps = model1.grasps;
00400   for (size_t i = 0; i < model2.grasps.size(); i++)
00401   {
00402     //add un-attempted grasps to the front of the list
00403     if (model2.grasps[i].attempts == 0)
00404     {
00405       combined.grasps.insert(combined.grasps.begin(), model2.grasps[i]);
00406     }
00407     else
00408     {
00409       // sort by success rate
00410       bool inserted = false;
00411       double success_rate = ((double) model2.grasps[i].successes) / ((double) model2.grasps[i].attempts);
00412       for (size_t j = 0; j < combined.grasps.size(); j++)
00413       {
00414         double compare_rate;
00415         if (combined.grasps[j].attempts == 0)
00416         {
00417           compare_rate = 1.0;
00418         } else
00419         {
00420           compare_rate = ((double) combined.grasps[j].successes) / ((double) combined.grasps[j].attempts);
00421         }
00422 
00423         if (success_rate >= compare_rate)
00424         {
00425           combined.grasps.insert(combined.grasps.begin() + j, model2.grasps[i]);
00426           inserted = true;
00427           break;
00428         }
00429       }
00430 
00431       // add to the end
00432       if (!inserted)
00433       {
00434         combined.grasps.push_back(model2.grasps[i]);
00435       }
00436     }
00437   }
00438 }


rail_recognition
Author(s): David Kent , Russell Toris , bhetherman
autogenerated on Sun Mar 6 2016 11:39:13