face_detection.cpp
Go to the documentation of this file.
00001 /**********************************************************************
00002  *
00003  * Software License Agreement (BSD License)
00004  *
00005  *  Copyright (c) 2008, Willow Garage, Inc.
00006  *  All rights reserved.
00007  *
00008  *  Redistribution and use in source and binary forms, with or without
00009  *  modification, are permitted provided that the following conditions
00010  *  are met:
00011  *
00012  *   * Redistributions of source code must retain the above copyright
00013  *     notice, this list of conditions and the following disclaimer.
00014  *   * Redistributions in binary form must reproduce the above
00015  *     copyright notice, this list of conditions and the following
00016  *     disclaimer in the documentation and/or other materials provided
00017  *     with the distribution.
00018  *   * Neither the name of the Willow Garage nor the names of its
00019  *     contributors may be used to endorse or promote products derived
00020  *     from this software without specific prior written permission.
00021  *
00022  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00023  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00024  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
00025  *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
00026  *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
00027  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
00028  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00029  *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
00030  *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00031  *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
00032  *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00033  *  POSSIBILITY OF SUCH DAMAGE.
00034  *********************************************************************/
00035 
00036 /* Author: Caroline Pantofaru */
00037 
00038 
00039 
00040 #include <stdio.h>
00041 #include <iostream>
00042 #include <vector>
00043 #include <fstream>
00044 
00045 #include <ros/ros.h>
00046 #include <ros/console.h>
00047 #include <boost/filesystem.hpp>
00048 #include <boost/thread/mutex.hpp>
00049 
00050 #include <people_msgs/PositionMeasurement.h>
00051 #include <people_msgs/PositionMeasurementArray.h>
00052 #include <message_filters/subscriber.h>
00053 #include <message_filters/time_synchronizer.h>
00054 #include <message_filters/sync_policies/exact_time.h>
00055 #include <message_filters/sync_policies/approximate_time.h>
00056 #include <image_transport/subscriber_filter.h>
00057 #include "sensor_msgs/CameraInfo.h"
00058 #include "sensor_msgs/Image.h"
00059 #include "stereo_msgs/DisparityImage.h"
00060 #include "sensor_msgs/image_encodings.h"
00061 #include "cv_bridge/cv_bridge.h"
00062 #include "tf/transform_listener.h"
00063 #include "sensor_msgs/PointCloud.h"
00064 #include "geometry_msgs/Point32.h"
00065 
00066 #include "opencv/cxcore.hpp"
00067 #include "opencv/cv.hpp"
00068 #include "opencv/highgui.h"
00069 
00070 #include "face_detector/faces.h"
00071 
00072 #include "image_geometry/stereo_camera_model.h"
00073 
00074 #include <actionlib/server/simple_action_server.h>
00075 #include <face_detector/FaceDetectorAction.h>
00076 
00077 using namespace std;
00078 
00079 namespace people {
00080 
00083   class FaceDetector {
00084   public:
00085     // Constants
00086     const double BIGDIST_M;// = 1000000.0;
00087 
00088     // Node handle
00089     ros::NodeHandle nh_;
00090 
00091     boost::mutex connect_mutex_;
00092     bool use_rgbd_;
00093 
00094     // Subscription topics
00095     string image_image_;
00096     // Stereo
00097     string stereo_namespace_;
00098     string left_topic_;
00099     string disparity_topic_;
00100     string left_camera_info_topic_;
00101     string right_camera_info_topic_;
00102     // RGB-D
00103     string camera_;
00104     string camera_topic_;
00105     string depth_image_;
00106     string depth_topic_;
00107     string camera_info_topic_;
00108     string depth_info_topic_;
00109     string depth_ns_;
00110 
00111     // Images and conversion for both the stereo camera and rgb-d camera cases.
00112     image_transport::ImageTransport it_;
00113     image_transport::SubscriberFilter image_sub_; 
00114     image_transport::SubscriberFilter depth_image_sub_; 
00115     message_filters::Subscriber<stereo_msgs::DisparityImage> disp_image_sub_; 
00116     message_filters::Subscriber<sensor_msgs::CameraInfo> c1_info_sub_; 
00117     message_filters::Subscriber<sensor_msgs::CameraInfo> c2_info_sub_; 
00119     // Disparity:
00120     typedef message_filters::sync_policies::ExactTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ExactDispPolicy; 
00121     typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ApproximateDispPolicy; 
00122     typedef message_filters::Synchronizer<ExactDispPolicy> ExactDispSync;
00123     typedef message_filters::Synchronizer<ApproximateDispPolicy> ApproximateDispSync;
00124     boost::shared_ptr<ExactDispSync> exact_disp_sync_;
00125     boost::shared_ptr<ApproximateDispSync> approximate_disp_sync_;
00126 
00127     // Depth:
00128     typedef message_filters::sync_policies::ExactTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ExactDepthPolicy; 
00129     typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> ApproximateDepthPolicy; 
00130     typedef message_filters::Synchronizer<ExactDepthPolicy> ExactDepthSync;
00131     typedef message_filters::Synchronizer<ApproximateDepthPolicy> ApproximateDepthSync;
00132     boost::shared_ptr<ExactDepthSync> exact_depth_sync_;
00133     boost::shared_ptr<ApproximateDepthSync> approximate_depth_sync_;
00134 
00135     // Action
00136     actionlib::SimpleActionServer<face_detector::FaceDetectorAction> as_;
00137     face_detector::FaceDetectorFeedback feedback_;
00138     face_detector::FaceDetectorResult result_;
00139 
00140     // If running the face detector as a component in part of a larger person tracker, this subscribes to the tracker's position measurements and whether it was initialized by some other node.
00141     // Todo: resurrect the person tracker.
00142     ros::Subscriber pos_sub_;
00143     bool external_init_;
00144 
00145 
00146     // Publishers
00147     // A point cloud of the face positions, meant for visualization in rviz.
00148     // This could be replaced by visualization markers, but they can't be modified
00149     // in rviz at runtime (eg the alpha, display time, etc. can't be changed.)
00150     ros::Publisher cloud_pub_;
00151     ros::Publisher pos_array_pub_;
00152 
00153     // Display
00154     bool do_display_; 
00155     cv::Mat cv_image_out_; 
00157     // Depth
00158     bool use_depth_; 
00159     image_geometry::StereoCameraModel cam_model_; 
00161     // Face detector params and output
00162     Faces *faces_; 
00163     string name_; 
00164     string haar_filename_; 
00165     double reliability_; 
00167     struct RestampedPositionMeasurement {
00168       ros::Time restamp;
00169       people_msgs::PositionMeasurement pos;
00170       double dist;
00171     };
00172     map<string, RestampedPositionMeasurement> pos_list_; 
00174     int max_id_;
00175 
00176     bool quit_;
00177 
00178     tf::TransformListener tf_;
00179 
00180     std::string fixed_frame_;
00181 
00182     boost::mutex cv_mutex_, pos_mutex_, limage_mutex_, dimage_mutex_;
00183 
00184     bool do_continuous_; 
00186     bool do_publish_unknown_; 
00188     FaceDetector(std::string name) :
00189       BIGDIST_M(1000000.0),
00190       it_(nh_),
00191       as_(nh_, name, false),
00192       faces_(0),
00193       max_id_(-1),
00194       quit_(false)
00195     {
00196       ROS_INFO_STREAM_NAMED("face_detector","Constructing FaceDetector.");
00197 
00198       // Action stuff
00199       as_.registerGoalCallback(boost::bind(&FaceDetector::goalCB, this));
00200       as_.registerPreemptCallback(boost::bind(&FaceDetector::preemptCB, this));
00201       as_.start();
00202 
00203       faces_ = new Faces();
00204       double face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m;
00205       int queue_size;
00206       bool approx;
00207 
00208       // Parameters
00209       ros::NodeHandle local_nh("~");
00210       local_nh.param("classifier_name",name_,std::string(""));
00211       local_nh.param("classifier_filename",haar_filename_,std::string(""));
00212       local_nh.param("classifier_reliability",reliability_,0.0);
00213       local_nh.param("do_display",do_display_,false);
00214       local_nh.param("do_continuous",do_continuous_,true);
00215       local_nh.param("do_publish_faces_of_unknown_size",do_publish_unknown_,false);
00216       local_nh.param("use_depth",use_depth_,true);
00217       local_nh.param("use_external_init",external_init_,false);
00218       local_nh.param("face_size_min_m",face_size_min_m,FACE_SIZE_MIN_M);
00219       local_nh.param("face_size_max_m",face_size_max_m,FACE_SIZE_MAX_M);
00220       local_nh.param("max_face_z_m",max_face_z_m,MAX_FACE_Z_M);
00221       local_nh.param("face_separation_dist_m",face_sep_dist_m,FACE_SEP_DIST_M);
00222       local_nh.param("use_rgbd",use_rgbd_,false);
00223       local_nh.param("queue_size", queue_size, 5);
00224       local_nh.param("approximate_sync", approx, false);
00225 
00226       if (do_display_) {
00227         // OpenCV: pop up an OpenCV highgui window
00228         cv::namedWindow("Face detector: Face Detection", CV_WINDOW_AUTOSIZE);
00229       }
00230 
00231       // Init the detector and subscribe to the images and camera parameters. One case for rgbd, one for stereo.
00232       if (use_rgbd_) {
00233 
00234         faces_->initFaceDetectionDepth(1, haar_filename_, face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m);
00235 
00236         camera_ = nh_.resolveName("camera");
00237   image_image_ = nh_.resolveName("image_topic");
00238         depth_image_ = nh_.resolveName("depth_topic");
00239   depth_ns_ = nh_.resolveName("depth_ns");
00240         camera_topic_ = ros::names::clean(camera_ + "/rgb/" + image_image_);
00241         depth_topic_ = ros::names::clean(camera_ + "/" + depth_ns_ + "/" + depth_image_);
00242         camera_info_topic_ = ros::names::clean(camera_ + "/rgb/camera_info");
00243         depth_info_topic_ = ros::names::clean(camera_ + "/" + depth_ns_ + "/camera_info");
00244 
00245         local_nh.param("fixed_frame", fixed_frame_, std::string("camera_rgb_optical_frame"));
00246 
00247         if (approx)
00248         {
00249           approximate_depth_sync_.reset( new ApproximateDepthSync(ApproximateDepthPolicy(queue_size),
00250                                                                   image_sub_, depth_image_sub_, c1_info_sub_, c2_info_sub_));
00251           approximate_depth_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDepth,
00252                                                                 this, _1, _2, _3, _4));
00253         }
00254         else
00255         {
00256           exact_depth_sync_.reset( new ExactDepthSync(ExactDepthPolicy(queue_size),
00257                                                       image_sub_, depth_image_sub_, c1_info_sub_, c2_info_sub_));
00258           exact_depth_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDepth,
00259                                                           this, _1, _2, _3, _4));
00260         }
00261 
00262       }
00263       else { 
00264         faces_->initFaceDetectionDisparity(1, haar_filename_, face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m);
00265 
00266         stereo_namespace_ = nh_.resolveName("camera");
00267         image_image_ = nh_.resolveName("image");
00268         left_topic_ = ros::names::clean(stereo_namespace_ + "/left/" + image_image_);
00269         disparity_topic_ = ros::names::clean(stereo_namespace_ + "/disparity");
00270         left_camera_info_topic_ = ros::names::clean(stereo_namespace_ + "/left/camera_info");
00271         right_camera_info_topic_ = ros::names::clean(stereo_namespace_ + "/right/camera_info");
00272 
00273         local_nh.param("fixed_frame", fixed_frame_, stereo_namespace_.append("_optical_frame"));
00274 
00275         if (approx)
00276         {
00277           approximate_disp_sync_.reset( new ApproximateDispSync(ApproximateDispPolicy(queue_size),
00278                                                                 image_sub_, disp_image_sub_, c1_info_sub_, c2_info_sub_));
00279           approximate_disp_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDisp,
00280                                                                this, _1, _2, _3, _4));
00281         }
00282         else
00283         {
00284           exact_disp_sync_.reset( new ExactDispSync(ExactDispPolicy(queue_size),
00285                                                     image_sub_, disp_image_sub_, c1_info_sub_, c2_info_sub_));
00286           exact_disp_sync_->registerCallback(boost::bind(&FaceDetector::imageCBAllDisp,
00287                                                          this, _1, _2, _3, _4));
00288         }
00289       }
00290       
00291       // Connection callbacks and advertise
00292       ros::SubscriberStatusCallback pos_pub_connect_cb = boost::bind(&FaceDetector::connectCb, this);
00293       ros::SubscriberStatusCallback cloud_pub_connect_cb = boost::bind(&FaceDetector::connectCb, this);
00294       if (do_continuous_)
00295         ROS_INFO_STREAM_NAMED("face_detector","You must subscribe to one of FaceDetector's outbound topics or else it will not publish anything.");
00296 
00297       {
00298         boost::mutex::scoped_lock lock(connect_mutex_);
00299 
00300         // Advertise a position measure message.
00301         pos_array_pub_ = nh_.advertise<people_msgs::PositionMeasurementArray>("face_detector/people_tracker_measurements_array",1, pos_pub_connect_cb, pos_pub_connect_cb);
00302 
00303         cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud>("face_detector/faces_cloud",0, cloud_pub_connect_cb, cloud_pub_connect_cb);
00304       }
00305 
00306 
00307       // If running as an action server, just stay connected so that action calls are fast.
00308       if (!do_continuous_) connectCb();
00309 
00310       ros::MultiThreadedSpinner s(2);
00311       ros::spin(s);
00312 
00313     }
00314 
00315     ~FaceDetector()
00316     {
00317 
00318       cv_image_out_.release();
00319 
00320       if (do_display_) {
00321         cv::destroyWindow("Face detector: Face Detection");
00322       }
00323 
00324       if (faces_) {delete faces_; faces_ = 0;}
00325     }
00326 
00327     // Handles (un)subscribing when clients (un)subscribe
00328     void connectCb()
00329     {
00330       boost::mutex::scoped_lock lock(connect_mutex_);
00331       if (use_rgbd_) 
00332       {
00333         if (do_continuous_ && cloud_pub_.getNumSubscribers() == 0 && pos_array_pub_.getNumSubscribers() == 0)
00334         {
00335           ROS_INFO_STREAM_NAMED("face_detector","You have unsubscribed to FaceDetector's outbound topics, so it will no longer publish anything.");
00336           image_sub_.unsubscribe();
00337           depth_image_sub_.unsubscribe();
00338           c1_info_sub_.unsubscribe();
00339           c2_info_sub_.unsubscribe();
00340           pos_sub_.shutdown();
00341         }
00342         else if (!do_continuous_ || !image_sub_.getSubscriber())
00343         {
00344           image_sub_.subscribe(it_, camera_topic_, 3);
00345           depth_image_sub_.subscribe(it_, depth_topic_, 3);
00346           c1_info_sub_.subscribe(nh_, camera_info_topic_, 3);
00347           c2_info_sub_.subscribe(nh_, depth_info_topic_,3);   
00348           // // Subscribe to filter measurements.
00349           // if (external_init_) {
00350           //   //pos_sub_ = nh_.subscribe("people_tracker_filter",1,&FaceDetector::posCallback,this);
00351           //   pos_sub_ = nh_.subscribe("/face_detector/people_tracker_measurements_array",1,&FaceDetector::posCallback,this);
00352           // }
00353         }
00354       } 
00355       else
00356       {
00357         if (do_continuous_ && cloud_pub_.getNumSubscribers() == 0 && pos_array_pub_.getNumSubscribers() == 0)
00358         {
00359           ROS_INFO_STREAM_NAMED("face_detector","You have unsubscribed to FaceDetector's outbound topics, so it will no longer publish anything.");
00360           image_sub_.unsubscribe();
00361           disp_image_sub_.unsubscribe();
00362           c1_info_sub_.unsubscribe();
00363           c2_info_sub_.unsubscribe();
00364           pos_sub_.shutdown();
00365         }
00366         else if (!do_continuous_ || !image_sub_.getSubscriber())
00367         {
00368           image_sub_.subscribe(it_, left_topic_, 3);
00369           disp_image_sub_.subscribe(nh_, disparity_topic_, 3);
00370           c1_info_sub_.subscribe(nh_, left_camera_info_topic_, 3);
00371           c2_info_sub_.subscribe(nh_, right_camera_info_topic_, 3);     
00372           // // Subscribe to filter measurements.
00373           // if (external_init_) {
00374           //   //pos_sub_ = nh_.subscribe("people_tracker_filter",1,&FaceDetector::posCallback,this);
00375           //   pos_sub_ = nh_.subscribe("/face_detector/people_tracker_measurements_array",1,&FaceDetector::posCallback,this);
00376           // }
00377         }
00378       }
00379     }
00380 
00381 
00382     void goalCB() {
00383       as_.acceptNewGoal();
00384     }
00385 
00386     void preemptCB() {
00387       as_.setPreempted();
00388     }
00389 
00390 
00397     // void posCallback(const people_msgs::PositionMeasurementArrayConstPtr& pos_array_ptr) {
00398 
00399     //   // Put the incoming position into the position queue. It'll be processed in the next image callback.
00400     //   boost::mutex::scoped_lock lock(pos_mutex_);
00401 
00402     //   map<string, RestampedPositionMeasurement>::iterator it;
00403     //   for (uint ipa = 0; ipa < pos_array_ptr->people.size(); ipa++) {
00404     //     it = pos_list_.find(pos_array_ptr->people[ipa].object_id);
00405     //     RestampedPositionMeasurement rpm;
00406     //     rpm.pos = pos_array_ptr->people[ipa];
00407     //     rpm.restamp = pos_array_ptr->people[ipa].header.stamp;
00408     //     rpm.dist = BIGDIST_M;
00409     //     if (it == pos_list_.end()) {
00410     //  pos_list_.insert(pair<string, RestampedPositionMeasurement>(pos_array_ptr->people[ipa].object_id, rpm));
00411     //     }
00412     //     else if ((pos_array_ptr->people[ipa].header.stamp - (*it).second.pos.header.stamp) > ros::Duration().fromSec(-1.0) ){
00413     //  (*it).second = rpm;
00414     //     }
00415     //   }
00416     //   lock.unlock();
00417 
00418     // }
00419 
00420     // Workaround to convert a DisparityImage->Image into a shared pointer for cv_bridge in imageCBAll.
00421     struct NullDeleter
00422     {
00423       void operator()(void const *) const {}
00424     };
00425 
00434     void imageCBAllDepth(const sensor_msgs::Image::ConstPtr &image, const sensor_msgs::Image::ConstPtr& depth_image, const sensor_msgs::CameraInfo::ConstPtr& c1_info, const sensor_msgs::CameraInfo::ConstPtr& c2_info)
00435     {
00436 
00437       // Only run the detector if in continuous mode or the detector was turned on through an action invocation.
00438       if (!do_continuous_ && !as_.isActive())
00439         return; 
00440 
00441       // Clear out the result vector.
00442       result_.face_positions.clear();
00443 
00444       if (do_display_) {
00445         cv_mutex_.lock();
00446       }
00447 
00448       // ROS --> OpenCV
00449       cv_bridge::CvImageConstPtr cv_image_ptr = cv_bridge::toCvShare(image,"bgr8");
00450       cv_bridge::CvImageConstPtr cv_depth_ptr = cv_bridge::toCvShare(depth_image);
00451       cv::Mat depth_32fc1 = cv_depth_ptr->image;
00452       if(depth_image->encoding != "32FC1") {
00453         cv_depth_ptr->image.convertTo(depth_32fc1, CV_32FC1, 0.001);
00454       }
00455 
00456 
00457 
00458       cam_model_.fromCameraInfo(c1_info,c2_info);
00459 
00460       // For display, keep a copy of the image that we can draw on.
00461       if (do_display_) {
00462         cv_image_out_ = (cv_image_ptr->image).clone();
00463       }
00464 
00465       struct timeval timeofday;
00466       gettimeofday(&timeofday,NULL);
00467       ros::Time starttdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00468 
00469       vector<Box2D3D> faces_vector = faces_->detectAllFacesDepth(cv_image_ptr->image, 1.0, depth_32fc1, &cam_model_);
00470       gettimeofday(&timeofday,NULL);
00471       ros::Time endtdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00472       ros::Duration diffdetect = endtdetect - starttdetect;
00473       ROS_INFO_STREAM_NAMED("face_detector","Detection duration = " << diffdetect.toSec() << "sec");
00474 
00475       matchAndDisplay(faces_vector, image->header);
00476     }
00477 
00486     void imageCBAllDisp(const sensor_msgs::Image::ConstPtr &image, const stereo_msgs::DisparityImage::ConstPtr& disp_image, const sensor_msgs::CameraInfo::ConstPtr& c1_info, const sensor_msgs::CameraInfo::ConstPtr& c2_info)
00487     {
00488 
00489       // Only run the detector if in continuous mode or the detector was turned on through an action invocation.
00490       if (!do_continuous_ && !as_.isActive())
00491         return;
00492 
00493       // Clear out the result vector.
00494       result_.face_positions.clear();
00495 
00496       if (do_display_) {
00497         cv_mutex_.lock();
00498       }
00499 
00500 
00501       // ROS --> OpenCV
00502       cv_bridge::CvImageConstPtr cv_image_ptr = cv_bridge::toCvShare(image,sensor_msgs::image_encodings::BGR8);
00503       cv_bridge::CvImageConstPtr cv_disp_ptr = cv_bridge::toCvShare(disp_image->image, disp_image);
00504       cam_model_.fromCameraInfo(c1_info,c2_info);
00505 
00506       // For display, keep a copy of the image that we can draw on.
00507       if (do_display_) {
00508         cv_image_out_ = (cv_image_ptr->image).clone();
00509       }
00510 
00511       struct timeval timeofday;
00512       gettimeofday(&timeofday,NULL);
00513       ros::Time starttdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00514 
00515       vector<Box2D3D> faces_vector = faces_->detectAllFacesDisparity(cv_image_ptr->image, 1.0, cv_disp_ptr->image, &cam_model_);
00516       gettimeofday(&timeofday,NULL);
00517       ros::Time endtdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
00518       ros::Duration diffdetect = endtdetect - starttdetect;
00519       ROS_INFO_STREAM_NAMED("face_detector","Detection duration = " << diffdetect.toSec() << "sec");
00520 
00521       matchAndDisplay(faces_vector, image->header);
00522     }
00523 
00524 
00525   private:
00526 
00527     void matchAndDisplay( vector<Box2D3D> faces_vector, std_msgs::Header header ) 
00528     {
00529       bool found_faces = false;
00530 
00531       int ngood = 0;
00532       sensor_msgs::PointCloud cloud;
00533       cloud.header.stamp = header.stamp;
00534       cloud.header.frame_id = header.frame_id;
00535 
00536       if (faces_vector.size() > 0 ) {
00537 
00538         // Transform the positions of the known faces and remove anyone who hasn't had an update in a long time.
00539         //      boost::mutex::scoped_lock pos_lock(pos_mutex_);
00540         map<string, RestampedPositionMeasurement>::iterator it;
00541         it = pos_list_.begin();
00542         while (it != pos_list_.end()) {
00543           if ((header.stamp - (*it).second.restamp) > ros::Duration().fromSec(5.0)) {
00544             // Position is too old, remove the person.
00545             pos_list_.erase(it++);
00546           }
00547           else {
00548             // Transform the person to this time. Note that the pos time is updated but not the restamp.
00549             tf::Point pt;
00550             tf::pointMsgToTF((*it).second.pos.pos, pt);
00551             tf::Stamped<tf::Point> loc(pt, (*it).second.pos.header.stamp, (*it).second.pos.header.frame_id);
00552             try {
00553               tf_.transformPoint(header.frame_id, header.stamp, loc, fixed_frame_, loc);
00554               (*it).second.pos.header.stamp = header.stamp;
00555               (*it).second.pos.pos.x = loc[0];
00556               (*it).second.pos.pos.y = loc[1];
00557               (*it).second.pos.pos.z = loc[2];
00558             }
00559             catch (tf::TransformException& ex) {
00560             }
00561             it++;
00562           }
00563         }
00564         // End filter face position update
00565 
00566         // Associate the found faces with previously seen faces, and publish all good face centers.
00567         Box2D3D *one_face;
00568         people_msgs::PositionMeasurement pos;
00569         people_msgs::PositionMeasurementArray pos_array;
00570 
00571         for (uint iface = 0; iface < faces_vector.size(); iface++) {
00572           one_face = &faces_vector[iface];
00573 
00574           if (one_face->status=="good" || (one_face->status=="unknown" && do_publish_unknown_)) {
00575 
00576             std::string id = "";
00577 
00578             // Convert the face format to a PositionMeasurement msg.
00579             pos.header.stamp = header.stamp;
00580             pos.name = name_;
00581             pos.pos.x = one_face->center3d.x;
00582             pos.pos.y = one_face->center3d.y;
00583             pos.pos.z = one_face->center3d.z;
00584             pos.header.frame_id = header.frame_id;//"*_optical_frame";
00585             pos.reliability = reliability_;
00586             pos.initialization = 1;//0;
00587             pos.covariance[0] = 0.04; pos.covariance[1] = 0.0;  pos.covariance[2] = 0.0;
00588             pos.covariance[3] = 0.0;  pos.covariance[4] = 0.04; pos.covariance[5] = 0.0;
00589             pos.covariance[6] = 0.0;  pos.covariance[7] = 0.0;  pos.covariance[8] = 0.04;
00590 
00591             // Check if this person's face is close enough to one of the previously known faces and associate it with the closest one.
00592             // Otherwise publish it with an empty id.
00593             // Note that multiple face positions can be published with the same id, but ids in the pos_list_ are unique. The position of a face in the list is updated with the closest found face.
00594             double dist, mindist = BIGDIST_M;
00595             map<string, RestampedPositionMeasurement>::iterator close_it = pos_list_.end();
00596             for (it = pos_list_.begin(); it != pos_list_.end(); it++) {
00597               dist = pow((*it).second.pos.pos.x - pos.pos.x, 2.0) + pow((*it).second.pos.pos.y - pos.pos.y, 2.0) + pow((*it).second.pos.pos.z - pos.pos.z, 2.0);
00598               if (dist <= faces_->face_sep_dist_m_ && dist < mindist) {
00599                 mindist = dist;
00600                 close_it = it;
00601               }
00602             }
00603             if (close_it != pos_list_.end()) {
00604               pos.object_id = (*close_it).second.pos.object_id;
00605               if (mindist < (*close_it).second.dist) {
00606                 (*close_it).second.restamp = header.stamp;
00607                 (*close_it).second.dist = mindist;
00608                 (*close_it).second.pos = pos;
00609               }
00610               ROS_INFO_STREAM_NAMED("face_detector","Found face to match with id " << pos.object_id);
00611             }
00612             else {
00613               max_id_++;
00614               pos.object_id = static_cast<ostringstream*>( &(ostringstream() << max_id_) )->str();
00615               ROS_INFO_STREAM_NAMED("face_detector","Didn't find face to match, starting new ID " << pos.object_id);
00616             }
00617             result_.face_positions.push_back(pos);
00618             found_faces = true;
00619 
00620           }
00621 
00622         }
00623         pos_array.header.stamp = header.stamp;
00624         pos_array.header.frame_id = header.frame_id;
00625         pos_array.people = result_.face_positions;
00626         if (pos_array.people.size() > 0) {
00627           pos_array_pub_.publish(pos_array);
00628 
00629           // Update the position list greedily. This should be rewritten.
00630           map<string, RestampedPositionMeasurement>::iterator it;
00631           for (uint ipa = 0; ipa < pos_array.people.size(); ipa++) {
00632             it = pos_list_.find(pos_array.people[ipa].object_id);
00633             RestampedPositionMeasurement rpm;
00634             rpm.pos = pos_array.people[ipa];
00635             rpm.restamp = pos_array.people[ipa].header.stamp;
00636             rpm.dist = BIGDIST_M;
00637             if (it == pos_list_.end()) {
00638               pos_list_.insert(pair<string, RestampedPositionMeasurement>(pos_array.people[ipa].object_id, rpm));
00639             }
00640             else if ((pos_array.people[ipa].header.stamp - (*it).second.pos.header.stamp) > ros::Duration().fromSec(-1.0) ){
00641               (*it).second = rpm;
00642             }
00643           }
00644           //      pos_lock.unlock();
00645 
00646           // Clean out all of the distances in the pos_list_
00647           for (it = pos_list_.begin(); it != pos_list_.end(); it++) {
00648             (*it).second.dist = BIGDIST_M;
00649           }
00650 
00651         }
00652 
00653 
00654         // Done associating faces
00655 
00656         /******** Display **************************************************************/
00657 
00658         // Publish a point cloud of face centers.
00659 
00660         cloud.channels.resize(1);
00661         cloud.channels[0].name = "intensity";
00662 
00663         for (uint iface = 0; iface < faces_vector.size(); iface++) {
00664           one_face = &faces_vector[iface];
00665 
00666           // Visualization of good faces as a point cloud
00667           if (one_face->status == "good") {
00668 
00669             geometry_msgs::Point32 p;
00670             p.x = one_face->center3d.x;
00671             p.y = one_face->center3d.y;
00672             p.z = one_face->center3d.z;
00673             cloud.points.push_back(p);
00674             cloud.channels[0].values.push_back(1.0f);
00675 
00676             ngood ++;
00677           }
00678         }
00679 
00680         cloud_pub_.publish(cloud);
00681         // Done publishing the point cloud.
00682 
00683       } // Done if faces_vector.size() > 0
00684     
00685 
00686       // Draw an appropriately colored rectangle on the display image and in the visualizer.
00687       if (do_display_) {
00688         displayFacesOnImage(faces_vector);
00689         cv_mutex_.unlock();
00690       }
00691       // Done drawing.
00692 
00693       /******** Done display **********************************************************/
00694 
00695       ROS_INFO_STREAM_NAMED("face_detector","Number of faces found: " << faces_vector.size() << ", number with good depth and size: " << ngood);
00696 
00697       // If you don't want continuous processing and you've found at least one face, turn off the detector.
00698       if (!do_continuous_ && found_faces) {
00699         as_.setSucceeded(result_);
00700       }
00701     }
00702 
00703     // Draw bounding boxes around detected faces on the cv_image_out_ and show the image. 
00704     void displayFacesOnImage(vector<Box2D3D>  faces_vector) 
00705     {
00706 
00707       Box2D3D *one_face;
00708     
00709       for (uint iface = 0; iface < faces_vector.size(); iface++) {
00710         one_face = &faces_vector[iface];
00711         // Visualization by image display.
00712         if (do_display_) {
00713           cv::Scalar color;
00714           if (one_face->status == "good") {
00715             color = cv::Scalar(0,255,0);
00716           }
00717           else if (one_face->status == "unknown") {
00718             color = cv::Scalar(255,0,0);
00719           }
00720           else {
00721             color = cv::Scalar(0,0,255);
00722           }
00723         
00724           cv::rectangle(cv_image_out_, 
00725                         cv::Point(one_face->box2d.x,one_face->box2d.y), 
00726                         cv::Point(one_face->box2d.x+one_face->box2d.width, one_face->box2d.y+one_face->box2d.height), color, 4);
00727         }
00728       }
00729 
00730       cv::imshow("Face detector: Face Detection",cv_image_out_);
00731       cv::waitKey(2);
00732     
00733     }
00734 
00735 
00736   }; // end class
00737 
00738 }; // end namespace people
00739 
00740 // Main
00741 int main(int argc, char **argv)
00742 {
00743   ros::init(argc,argv,"face_detector");
00744 
00745   people::FaceDetector fd(ros::this_node::getName());
00746 
00747   return 0;
00748 }
00749 
00750 


face_detector
Author(s): Caroline Pantofaru
autogenerated on Thu Aug 27 2015 14:18:13