face_detection.cpp
Go to the documentation of this file.
00001 /**********************************************************************
00002  *
00003  * Software License Agreement (BSD License)
00004  *
00005  *  Copyright (c) 2008, Willow Garage, Inc.
00006  *  All rights reserved.
00007  *
00008  *  Redistribution and use in source and binary forms, with or without
00009  *  modification, are permitted provided that the following conditions
00010  *  are met:
00011  *
00012  *   * Redistributions of source code must retain the above copyright
00013  *     notice, this list of conditions and the following disclaimer.
00014  *   * Redistributions in binary form must reproduce the above
00015  *     copyright notice, this list of conditions and the following
00016  *     disclaimer in the documentation and/or other materials provided
00017  *     with the distribution.
00018  *   * Neither the name of the Willow Garage nor the names of its
00019  *     contributors may be used to endorse or promote products derived
00020  *     from this software without specific prior written permission.
00021  *
00022  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00023  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00024  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00025  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00026  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00027  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00028  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00029  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00030  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00031  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00032  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00033  *  POSSIBILITY OF SUCH DAMAGE.
00034  *********************************************************************/
00035 
00036 /* Author: Caroline Pantofaru */
00037 
00038 
00039 
00040 #include <stdio.h>
00041 #include <iostream>
00042 #include <vector>
00043 #include <fstream>
00044 
00045 #include <ros/ros.h>
00046 #include <ros/console.h>
00047 #include <boost/filesystem.hpp>
00048 #include <boost/thread/mutex.hpp>
00049 
00050 #include <people_msgs/PositionMeasurement.h>
00051 #include <message_filters/subscriber.h>
00052 #include <message_filters/time_synchronizer.h>
00053 #include <message_filters/sync_policies/exact_time.h>
00054 #include <message_filters/sync_policies/approximate_time.h>
00055 #include <image_transport/subscriber_filter.h>
00056 #include "sensor_msgs/CameraInfo.h"
00057 #include "sensor_msgs/Image.h"
00058 #include "stereo_msgs/DisparityImage.h"
00059 #include "sensor_msgs/image_encodings.h"
00060 #include "cv_bridge/cv_bridge.h"
00061 #include "tf/transform_listener.h"
00062 #include "sensor_msgs/PointCloud.h"
00063 #include "geometry_msgs/Point32.h"
00064 
00065 #include "opencv/cxcore.hpp"
00066 #include "opencv/cv.hpp"
00067 #include "opencv/highgui.h"
00068 
00069 #include "face_detector/faces.h"
00070 
00071 #include "image_geometry/stereo_camera_model.h"
00072 
00073 #include <actionlib/server/simple_action_server.h>
00074 #include <face_detector/FaceDetectorAction.h>
00075 
00076 using namespace std;
00077 
00078 namespace people {
00079 
00082 class FaceDetector {
00083 public:
00084   // Constants
00085   const double BIGDIST_M;// = 1000000.0;
00086 
00087   // Node handle
00088   ros::NodeHandle nh_;
00089 
00090   // Images and conversion for both the stereo camera and rgb-d camera cases.
00091   image_transport::ImageTransport it_;
00092   image_transport::SubscriberFilter image_sub_; 
00093   image_transport::SubscriberFilter depth_image_sub_; 
00094   message_filters::Subscriber<stereo_msgs::DisparityImage> disp_image_sub_; 
00095   message_filters::Subscriber<sensor_msgs::CameraInfo> c1_info_sub_; 
00096   message_filters::Subscriber<sensor_msgs::CameraInfo> c2_info_sub_; 
00098   // Disparity:
00099   typedef message_filters::sync_policies::ExactTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ExactDispPolicy; 
00100   typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ApproximateDispPolicy; 
00101   typedef message_filters::Synchronizer<ExactDispPolicy> ExactDispSync;
00102   typedef message_filters::Synchronizer<ApproximateDispPolicy> ApproximateDispSync;
00103   boost::shared_ptr<ExactDispSync> exact_disp_sync_;
00104   boost::shared_ptr<ApproximateDispSync> approximate_disp_sync_;
00105 
00106   // Depth:
00107   typedef message_filters::sync_policies::ExactTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ExactDepthPolicy; 
00108   typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ApproximateDepthPolicy; 
00109   typedef message_filters::Synchronizer<ExactDepthPolicy> ExactDepthSync;
00110   typedef message_filters::Synchronizer<ApproximateDepthPolicy> ApproximateDepthSync;
00111   boost::shared_ptr<ExactDepthSync> exact_depth_sync_;
00112   boost::shared_ptr<ApproximateDepthSync> approximate_depth_sync_;
00113 
00114   // Action
00115   actionlib::SimpleActionServer<face_detector::FaceDetectorAction> as_;
00116   face_detector::FaceDetectorFeedback feedback_;
00117   face_detector::FaceDetectorResult result_;
00118 
00119   // If running the face detector as a component in part of a larger person tracker, this subscribes to the tracker's position measurements and whether it was initialized by some other node.
00120   // Todo: resurrect the person tracker.
00121   ros::Subscriber pos_sub_;
00122   bool external_init_;
00123 
00124 
00125   // Publishers
00126   // A point cloud of the face positions, meant for visualization in rviz.
00127   // This could be replaced by visualization markers, but they can't be modified
00128   // in rviz at runtime (eg the alpha, display time, etc. can't be changed.)
00129   ros::Publisher cloud_pub_;
00130   ros::Publisher pos_pub_;
00131 
00132   // Display
00133   bool do_display_; 
00134   cv::Mat cv_image_out_; 
00136   // Depth
00137   bool use_depth_; 
00138   image_geometry::StereoCameraModel cam_model_; 
00140   // Face detector params and output
00141   Faces *faces_; 
00142   string name_; 
00143   string haar_filename_; 
00144   double reliability_; 
00146   struct RestampedPositionMeasurement {
00147     ros::Time restamp;
00148     people_msgs::PositionMeasurement pos;
00149     double dist;
00150   };
00151   map<string, RestampedPositionMeasurement> pos_list_; 
00153   bool quit_;
00154 
00155   tf::TransformListener tf_;
00156 
00157   boost::mutex cv_mutex_, pos_mutex_, limage_mutex_, dimage_mutex_;
00158 
00159   bool do_continuous_; 
00161   bool do_publish_unknown_; 
00163   FaceDetector(std::string name) :
00164     BIGDIST_M(1000000.0),
00165     it_(nh_),
00166     as_(nh_, name, false),
00167     faces_(0),
00168     quit_(false)
00169   {
00170     ROS_INFO_STREAM_NAMED("face_detector","Constructing FaceDetector.");
00171 
00172     // Action stuff
00173     as_.registerGoalCallback(boost::bind(&FaceDetector::goalCB, this));
00174     as_.registerPreemptCallback(boost::bind(&FaceDetector::preemptCB, this));
00175     as_.start();
00176 
00177     faces_ = new Faces();
00178     double face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m;
00179     bool use_rgbd;
00180     int queue_size;
00181     bool approx;
00182 
00183     // Parameters
00184     ros::NodeHandle local_nh("~");
00185     local_nh.param("classifier_name",name_,std::string(""));
00186     local_nh.param("classifier_filename",haar_filename_,std::string(""));
00187     local_nh.param("classifier_reliability",reliability_,0.0);
00188     local_nh.param("do_display",do_display_,false);
00189     local_nh.param("do_continuous",do_continuous_,true);
00190     local_nh.param("do_publish_faces_of_unknown_size",do_publish_unknown_,false);
00191     local_nh.param("use_depth",use_depth_,true);
00192     local_nh.param("use_external_init",external_init_,true);
00193     local_nh.param("face_size_min_m",face_size_min_m,Faces::FACE_SIZE_MIN_M);
00194     local_nh.param("face_size_max_m",face_size_max_m,Faces::FACE_SIZE_MAX_M);
00195     local_nh.param("max_face_z_m",max_face_z_m,Faces::MAX_FACE_Z_M);
00196     local_nh.param("face_separation_dist_m",face_sep_dist_m,Faces::FACE_SEP_DIST_M);
00197     local_nh.param("use_rgbd",use_rgbd,false);
00198     local_nh.param("queue_size", queue_size, 5);
00199     local_nh.param("approximate_sync", approx, false);
00200 
00201     if (do_display_) {
00202       // OpenCV: pop up an OpenCV highgui window
00203       cv::namedWindow("Face detector: Face Detection", CV_WINDOW_AUTOSIZE);
00204     }
00205 
00206     // Init the detector and subscribe to the images and camera parameters. One case for rgbd, one for stereo.
00207     if (use_rgbd) {
00208       faces_->initFaceDetectionDepth(1, haar_filename_, face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m);
00209 
00210       string camera, image_topic;
00211       camera = nh_.resolveName("camera");
00212       image_topic = nh_.resolveName("image");
00213       string camera_topic = ros::names::clean(camera + "/rgb/" + image_topic);
00214       string depth_topic = ros::names::clean(camera + "/depth_registered/" + image_topic);
00215       string camera_info_topic = ros::names::clean(camera + "/rgb/camera_info");
00216       string depth_info_topic = ros::names::clean(camera + "/depth_registered/camera_info");
00217       image_sub_.subscribe(it_,camera_topic,3);
00218       depth_image_sub_.subscribe(it_,depth_topic,3);
00219       c1_info_sub_.subscribe(nh_,camera_info_topic,3);
00220       c2_info_sub_.subscribe(nh_,depth_info_topic,3);
00221 
00222       if (approx)
00223       {
00224         approximate_depth_sync_.reset( new ApproximateDepthSync(ApproximateDepthPolicy(queue_size),
00225                                                                 image_sub_, depth_image_sub_, c1_info_sub_, c2_info_sub_));
00226         approximate_depth_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDepth,
00227                                                               this, _1, _2, _3, _4));
00228       }
00229       else
00230       {
00231         exact_depth_sync_.reset( new ExactDepthSync(ExactDepthPolicy(queue_size),
00232                                                     image_sub_, depth_image_sub_, c1_info_sub_, c2_info_sub_));
00233         exact_depth_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDepth,
00234                                                         this, _1, _2, _3, _4));
00235       }
00236     }
00237     else { 
00238       faces_->initFaceDetectionDisparity(1, haar_filename_, face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m);
00239 
00240       string stereo_namespace, image_topic;
00241       stereo_namespace = nh_.resolveName("camera");
00242       image_topic = nh_.resolveName("image");
00243       string left_topic = ros::names::clean(stereo_namespace + "/left/" + image_topic);
00244       string disparity_topic = ros::names::clean(stereo_namespace + "/disparity");
00245       string left_camera_info_topic = ros::names::clean(stereo_namespace + "/left/camera_info");
00246       string right_camera_info_topic = ros::names::clean(stereo_namespace + "/right/camera_info");
00247       image_sub_.subscribe(it_,left_topic,3);
00248       disp_image_sub_.subscribe(nh_,disparity_topic,3);
00249       c1_info_sub_.subscribe(nh_,left_camera_info_topic,3);
00250       c2_info_sub_.subscribe(nh_,right_camera_info_topic,3);
00251 
00252       if (approx)
00253       {
00254         approximate_disp_sync_.reset( new ApproximateDispSync(ApproximateDispPolicy(queue_size),
00255                                                               image_sub_, disp_image_sub_, c1_info_sub_, c2_info_sub_));
00256         approximate_disp_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDisp,
00257                                                              this, _1, _2, _3, _4));
00258       }
00259       else
00260       {
00261         exact_disp_sync_.reset( new ExactDispSync(ExactDispPolicy(queue_size),
00262                                                   image_sub_, disp_image_sub_, c1_info_sub_, c2_info_sub_));
00263         exact_disp_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDisp,
00264                                                        this, _1, _2, _3, _4));
00265       }
00266     }
00267       
00268 
00269     // Advertise a position measure message.
00270     pos_pub_ = nh_.advertise<people_msgs::PositionMeasurement>("face_detector/people_tracker_measurements",1);
00271 
00272     cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud>("face_detector/faces_cloud",0);
00273 
00274     // Subscribe to filter measurements.
00275     if (external_init_) {
00276       pos_sub_ = nh_.subscribe("people_tracker_filter",1,&FaceDetector::posCallback,this);
00277     }
00278 
00279     ros::MultiThreadedSpinner s(2);
00280     ros::spin(s);
00281 
00282   }
00283 
00284   ~FaceDetector()
00285   {
00286 
00287     cv_image_out_.release();
00288 
00289     if (do_display_) {
00290       cv::destroyWindow("Face detector: Face Detection");
00291     }
00292 
00293     if (faces_) {delete faces_; faces_ = 0;}
00294   }
00295 
00296   void goalCB() {
00297     as_.acceptNewGoal();
00298   }
00299 
00300   void preemptCB() {
00301     as_.setPreempted();
00302   }
00303 
00304 
00311   void posCallback(const people_msgs::PositionMeasurementConstPtr& pos_ptr) {
00312 
00313     // Put the incoming position into the position queue. It'll be processed in the next image callback.
00314     boost::mutex::scoped_lock lock(pos_mutex_);
00315     map<string, RestampedPositionMeasurement>::iterator it;
00316     it = pos_list_.find(pos_ptr->object_id);
00317     RestampedPositionMeasurement rpm;
00318     rpm.pos = *pos_ptr;
00319     rpm.restamp = pos_ptr->header.stamp;
00320     rpm.dist = BIGDIST_M;
00321     if (it == pos_list_.end()) {
00322       pos_list_.insert(pair<string, RestampedPositionMeasurement>(pos_ptr->object_id, rpm));
00323     }
00324     else if ((pos_ptr->header.stamp - (*it).second.pos.header.stamp) > ros::Duration().fromSec(-1.0) ){
00325       (*it).second = rpm;
00326     }
00327     lock.unlock();
00328 
00329   }
00330 
00331   // Workaround to convert a DisparityImage->Image into a shared pointer for cv_bridge in imageCBAll.
00332   struct NullDeleter
00333   {
00334     void operator()(void const *) const {}
00335   };
00336 
00345   void imageCBAllDepth(const sensor_msgs::Image::ConstPtr &image, const sensor_msgs::Image::ConstPtr& depth_image, const sensor_msgs::CameraInfo::ConstPtr& c1_info, const sensor_msgs::CameraInfo::ConstPtr& c2_info)
00346   {
00347 
00348     // Only run the detector if in continuous mode or the detector was turned on through an action invocation.
00349     if (!do_continuous_ && !as_.isActive())
00350       return; 
00351 
00352     // Clear out the result vector.
00353     result_.face_positions.clear();
00354 
00355     if (do_display_) {
00356       cv_mutex_.lock();
00357     }
00358 
00359     // ROS --> OpenCV
00360     cv_bridge::CvImageConstPtr cv_image_ptr = cv_bridge::toCvShare(image,"bgr8");
00361     cv_bridge::CvImageConstPtr cv_depth_ptr = cv_bridge::toCvShare(depth_image);
00362     cam_model_.fromCameraInfo(c1_info,c2_info);
00363 
00364     // For display, keep a copy of the image that we can draw on.
00365     if (do_display_) {
00366       cv_image_out_ = (cv_image_ptr->image).clone();
00367     }
00368 
00369     struct timeval timeofday;
00370     gettimeofday(&timeofday,NULL);
00371     ros::Time starttdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00372 
00373     vector<Box2D3D> faces_vector = faces_->detectAllFacesDepth(cv_image_ptr->image, 1.0, cv_depth_ptr->image, &cam_model_);
00374     gettimeofday(&timeofday,NULL);
00375     ros::Time endtdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00376     ros::Duration diffdetect = endtdetect - starttdetect;
00377     ROS_INFO_STREAM_NAMED("face_detector","Detection duration = " << diffdetect.toSec() << "sec");
00378 
00379     matchAndDisplay(faces_vector, image->header);
00380   }
00381 
00390   void imageCBAllDisp(const sensor_msgs::Image::ConstPtr &image, const stereo_msgs::DisparityImage::ConstPtr& disp_image, const sensor_msgs::CameraInfo::ConstPtr& c1_info, const sensor_msgs::CameraInfo::ConstPtr& c2_info)
00391   {
00392 
00393     // Only run the detector if in continuous mode or the detector was turned on through an action invocation.
00394     if (!do_continuous_ && !as_.isActive())
00395       return;
00396 
00397     // Clear out the result vector.
00398     result_.face_positions.clear();
00399 
00400     if (do_display_) {
00401       cv_mutex_.lock();
00402     }
00403 
00404 
00405     // ROS --> OpenCV
00406     cv_bridge::CvImageConstPtr cv_image_ptr = cv_bridge::toCvShare(image,sensor_msgs::image_encodings::BGR8);
00407     cv_bridge::CvImageConstPtr cv_disp_ptr = cv_bridge::toCvShare(disp_image->image, disp_image);
00408     cam_model_.fromCameraInfo(c1_info,c2_info);
00409 
00410     // For display, keep a copy of the image that we can draw on.
00411     if (do_display_) {
00412       cv_image_out_ = (cv_image_ptr->image).clone();
00413     }
00414 
00415     struct timeval timeofday;
00416     gettimeofday(&timeofday,NULL);
00417     ros::Time starttdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00418 
00419     vector<Box2D3D> faces_vector = faces_->detectAllFacesDisparity(cv_image_ptr->image, 1.0, cv_disp_ptr->image, &cam_model_);
00420     gettimeofday(&timeofday,NULL);
00421     ros::Time endtdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00422     ros::Duration diffdetect = endtdetect - starttdetect;
00423     ROS_INFO_STREAM_NAMED("face_detector","Detection duration = " << diffdetect.toSec() << "sec");
00424 
00425     matchAndDisplay(faces_vector, image->header);
00426   }
00427 
00428 
00429 private:
00430 
00431   void matchAndDisplay( vector<Box2D3D> faces_vector, std_msgs::Header header ) 
00432   {
00433     bool found_faces = false;
00434 
00435     int ngood = 0;
00436     sensor_msgs::PointCloud cloud;
00437     cloud.header.stamp = header.stamp;
00438     cloud.header.frame_id = header.frame_id;
00439 
00440     if (faces_vector.size() > 0 ) {
00441 
00442       // Transform the positions of the known faces and remove anyone who hasn't had an update in a long time.
00443       boost::mutex::scoped_lock pos_lock(pos_mutex_);
00444       map<string, RestampedPositionMeasurement>::iterator it;
00445       for (it = pos_list_.begin(); it != pos_list_.end(); it++) {
00446         if ((header.stamp - (*it).second.restamp) > ros::Duration().fromSec(5.0)) {
00447           // Position is too old, remove the person.
00448           pos_list_.erase(it);
00449         }
00450         else {
00451           // Transform the person to this time. Note that the pos time is updated but not the restamp.
00452           tf::Point pt;
00453           tf::pointMsgToTF((*it).second.pos.pos, pt);
00454           tf::Stamped<tf::Point> loc(pt, (*it).second.pos.header.stamp, (*it).second.pos.header.frame_id);
00455           try {
00456             tf_.transformPoint(header.frame_id, header.stamp, loc, "odom_combined", loc);
00457             (*it).second.pos.header.stamp = header.stamp;
00458             (*it).second.pos.pos.x = loc[0];
00459             (*it).second.pos.pos.y = loc[1];
00460             (*it).second.pos.pos.z = loc[2];
00461           }
00462           catch (tf::TransformException& ex) {
00463           }
00464         }
00465       }
00466       // End filter face position update
00467 
00468       // Associate the found faces with previously seen faces, and publish all good face centers.
00469       Box2D3D *one_face;
00470       people_msgs::PositionMeasurement pos;
00471 
00472       for (uint iface = 0; iface < faces_vector.size(); iface++) {
00473         one_face = &faces_vector[iface];
00474 
00475         if (one_face->status=="good" || (one_face->status=="unknown" && do_publish_unknown_)) {
00476 
00477           std::string id = "";
00478 
00479           // Convert the face format to a PositionMeasurement msg.
00480           pos.header.stamp = header.stamp;
00481           pos.name = name_;
00482           pos.pos.x = one_face->center3d.x;
00483           pos.pos.y = one_face->center3d.y;
00484           pos.pos.z = one_face->center3d.z;
00485           pos.header.frame_id = header.frame_id;//"*_stereo_optical_frame";
00486           pos.reliability = reliability_;
00487           pos.initialization = 1;//0;
00488           pos.covariance[0] = 0.04; pos.covariance[1] = 0.0;  pos.covariance[2] = 0.0;
00489           pos.covariance[3] = 0.0;  pos.covariance[4] = 0.04; pos.covariance[5] = 0.0;
00490           pos.covariance[6] = 0.0;  pos.covariance[7] = 0.0;  pos.covariance[8] = 0.04;
00491 
00492           // Check if this person's face is close enough to one of the previously known faces and associate it with the closest one.
00493           // Otherwise publish it with an empty id.
00494           // Note that multiple face positions can be published with the same id, but ids in the pos_list_ are unique. The position of a face in the list is updated with the closest found face.
00495           double dist, mindist = BIGDIST_M;
00496           map<string, RestampedPositionMeasurement>::iterator close_it = pos_list_.end();
00497           for (it = pos_list_.begin(); it != pos_list_.end(); it++) {
00498             dist = pow((*it).second.pos.pos.x - pos.pos.x, 2.0) + pow((*it).second.pos.pos.y - pos.pos.y, 2.0) + pow((*it).second.pos.pos.z - pos.pos.z, 2.0);
00499             if (dist <= faces_->face_sep_dist_m_ && dist < mindist) {
00500               mindist = dist;
00501               close_it = it;
00502             }
00503           }
00504           if (close_it != pos_list_.end()) {
00505             if (mindist < (*close_it).second.dist) {
00506               (*close_it).second.restamp = header.stamp;
00507               (*close_it).second.dist = mindist;
00508               (*close_it).second.pos = pos;
00509             }
00510             pos.object_id = (*close_it).second.pos.object_id;
00511             ROS_INFO_STREAM_NAMED("face_detector","Found face " << pos.object_id);
00512           }
00513           else {
00514             pos.object_id = "";
00515           }
00516           result_.face_positions.push_back(pos);
00517           found_faces = true;
00518           pos_pub_.publish(pos);
00519 
00520         }
00521 
00522       }
00523       pos_lock.unlock();
00524 
00525       // Clean out all of the distances in the pos_list_
00526       for (it = pos_list_.begin(); it != pos_list_.end(); it++) {
00527         (*it).second.dist = BIGDIST_M;
00528       }
00529       // Done associating faces
00530 
00531       /******** Display **************************************************************/
00532 
00533       // Publish a point cloud of face centers.
00534 
00535       cloud.channels.resize(1);
00536       cloud.channels[0].name = "intensity";
00537 
00538       for (uint iface = 0; iface < faces_vector.size(); iface++) {
00539         one_face = &faces_vector[iface];
00540 
00541         // Visualization of good faces as a point cloud
00542         if (one_face->status == "good") {
00543 
00544           geometry_msgs::Point32 p;
00545           p.x = one_face->center3d.x;
00546           p.y = one_face->center3d.y;
00547           p.z = one_face->center3d.z;
00548           cloud.points.push_back(p);
00549           cloud.channels[0].values.push_back(1.0f);
00550 
00551           ngood ++;
00552         }
00553       }
00554 
00555       cloud_pub_.publish(cloud);
00556       // Done publishing the point cloud.
00557 
00558     } // Done if faces_vector.size() > 0
00559     
00560 
00561     // Draw an appropriately colored rectangle on the display image and in the visualizer.
00562     if (do_display_) {
00563       displayFacesOnImage(faces_vector);
00564       cv_mutex_.unlock();
00565     }
00566     // Done drawing.
00567 
00568     /******** Done display **********************************************************/
00569 
00570     ROS_INFO_STREAM_NAMED("face_detector","Number of faces found: " << faces_vector.size() << ", number with good depth and size: " << ngood);
00571 
00572     // If you don't want continuous processing and you've found at least one face, turn off the detector.
00573     if (!do_continuous_ && found_faces) {
00574       as_.setSucceeded(result_);
00575     }
00576   }
00577 
00578   // Draw bounding boxes around detected faces on the cv_image_out_ and show the image. 
00579   void displayFacesOnImage(vector<Box2D3D>  faces_vector) 
00580   {
00581 
00582     Box2D3D *one_face;
00583     
00584     for (uint iface = 0; iface < faces_vector.size(); iface++) {
00585       one_face = &faces_vector[iface];
00586       // Visualization by image display.
00587       if (do_display_) {
00588         cv::Scalar color;
00589         if (one_face->status == "good") {
00590           color = cv::Scalar(0,255,0);
00591         }
00592         else if (one_face->status == "unknown") {
00593           color = cv::Scalar(255,0,0);
00594         }
00595         else {
00596           color = cv::Scalar(0,0,255);
00597         }
00598         
00599         cv::rectangle(cv_image_out_, 
00600                       cv::Point(one_face->box2d.x,one_face->box2d.y), 
00601                       cv::Point(one_face->box2d.x+one_face->box2d.width, one_face->box2d.y+one_face->box2d.height), color, 4);
00602       }
00603     }
00604 
00605     cv::imshow("Face detector: Face Detection",cv_image_out_);
00606     cv::waitKey(2);
00607     
00608   }
00609 
00610 
00611 }; // end class
00612 
00613 }; // end namespace people
00614 
00615 // Main
00616 int main(int argc, char **argv)
00617 {
00618   ros::init(argc,argv,"face_detector");
00619 
00620   people::FaceDetector fd(ros::this_node::getName());
00621 
00622   return 0;
00623 }
00624 
00625 


face_detector
Author(s): Caroline Pantofaru
autogenerated on Thu Jan 2 2014 11:35:05