$search
00001 /********************************************************************** 00002 * 00003 * Software License Agreement (BSD License) 00004 * 00005 * Copyright (c) 2008, Willow Garage, Inc. 00006 * All rights reserved. 00007 * 00008 * Redistribution and use in source and binary forms, with or without 00009 * modification, are permitted provided that the following conditions 00010 * are met: 00011 * 00012 * * Redistributions of source code must retain the above copyright 00013 * notice, this list of conditions and the following disclaimer. 00014 * * Redistributions in binary form must reproduce the above 00015 * copyright notice, this list of conditions and the following 00016 * disclaimer in the documentation and/or other materials provided 00017 * with the distribution. 00018 * * Neither the name of the Willow Garage nor the names of its 00019 * contributors may be used to endorse or promote products derived 00020 * from this software without specific prior written permission. 00021 * 00022 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 00023 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 00024 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 00025 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 00026 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 00027 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 00028 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 00029 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 00030 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 00031 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 00032 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 00033 * POSSIBILITY OF SUCH DAMAGE. 00034 *********************************************************************/ 00035 00036 /* Author: Caroline Pantofaru 00037 * Modified for Kinect/Point Cloud support: Richard Bormann */ 00038 00039 00040 00041 #include <stdio.h> 00042 #include <iostream> 00043 #include <vector> 00044 #include <fstream> 00045 00046 #include <ros/ros.h> 00047 #include <boost/filesystem.hpp> 00048 #include <boost/thread/mutex.hpp> 00049 00050 #include <cob_people_detection/PositionMeasurement.h> 00051 #include <message_filters/subscriber.h> 00052 #include <message_filters/time_synchronizer.h> 00053 #include <message_filters/sync_policies/approximate_time.h> 00054 #include <image_transport/subscriber_filter.h> 00055 #include "sensor_msgs/CameraInfo.h" 00056 #include "sensor_msgs/Image.h" 00057 #include "stereo_msgs/DisparityImage.h" 00058 #include "sensor_msgs/PointCloud2.h" 00059 #include "cv_bridge/CvBridge.h" 00060 #include "tf/transform_listener.h" 00061 #include "sensor_msgs/PointCloud.h" 00062 #include "geometry_msgs/Point32.h" 00063 00064 #include <pcl/point_types.h> 00065 #include <pcl_ros/point_cloud.h> 00066 #include <pluginlib/class_list_macros.h> 00067 #include <nodelet/nodelet.h> 00068 00069 #include "opencv/cxcore.hpp" 00070 #include "opencv/cv.hpp" 00071 #include "opencv/highgui.h" 00072 00073 #include "cob_people_detection/faces.h" 00074 00075 #include "image_geometry/stereo_camera_model.h" 00076 00077 #include <actionlib/server/simple_action_server.h> 00078 #include <cob_people_detection/FaceDetectorAction.h> 00079 00080 00081 using namespace std; 00082 00083 namespace people { 00084 00087 class FaceDetectorColor : public nodelet::Nodelet { 00088 public: 00089 // Constants 00090 const double BIGDIST_M;// = 1000000.0; 00091 00092 // Node handle 00093 ros::NodeHandle nh_; 00094 std::string node_name_; 00095 00096 // Images and conversion 00097 image_transport::ImageTransport it_; 00098 image_transport::SubscriberFilter limage_sub_; 00099 message_filters::Subscriber<stereo_msgs::DisparityImage> dimage_sub_; 00100 message_filters::Subscriber<sensor_msgs::CameraInfo> lcinfo_sub_; 00101 message_filters::Subscriber<sensor_msgs::CameraInfo> rcinfo_sub_; 00102 sensor_msgs::CvBridge lbridge_; 00103 sensor_msgs::CvBridge dbridge_; 00104 message_filters::Subscriber<sensor_msgs::PointCloud2> depth_cloud_sub_; 00107 message_filters::TimeSynchronizer<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo> sync_stereo_; 00108 //message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo, sensor_msgs::Image, sensor_msgs::PointCloud2> > sync_pointcloud_; /**< Pointcloud synchronizer. */ 00109 message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, stereo_msgs::DisparityImage, sensor_msgs::PointCloud2> > sync_pointcloud_showdisp_; 00110 message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::PointCloud2> > sync_pointcloud_nodisp_; 00113 // Action 00114 actionlib::SimpleActionServer<cob_people_detection::FaceDetectorAction>* as_; 00115 cob_people_detection::FaceDetectorFeedback feedback_; 00116 cob_people_detection::FaceDetectorResult result_; 00117 00118 // If running the face detector as a component in part of a larger person tracker, this subscribes to the tracker's position measurements and whether it was initialized by some other node. 00119 // Todo: resurrect the person tracker. 00120 ros::Subscriber pos_sub_; 00121 bool external_init_; 00122 00123 00124 // Publishers 00125 // A point cloud of the face positions, meant for visualization in rviz. 00126 // This could be replaced by visualization markers, but they can't be modified 00127 // in rviz at runtime (eg the alpha, display time, etc. can't be changed.) 00128 ros::Publisher cloud_pub_; 00129 ros::Publisher pos_pub_; 00130 00131 // Display 00132 string do_display_; 00133 cv::Mat cv_image_out_; 00134 bool do_display_disparity_or_depth_; 00136 // Data Source - from stereo camera or depth sensor which exports a point cloud 2 00137 string data_source_mode_; 00139 // Stereo 00140 bool use_depth_; 00141 image_geometry::StereoCameraModel cam_model_; 00143 // Face detector params and output 00144 Faces *faces_; 00145 string name_; 00146 string haar_filename_; 00147 double reliability_; 00149 struct RestampedPositionMeasurement { 00150 ros::Time restamp; 00151 cob_people_detection::PositionMeasurement pos; 00152 double dist; 00153 }; 00154 map<string, RestampedPositionMeasurement> pos_list_; 00156 bool quit_; 00157 00158 tf::TransformListener tf_; 00159 00160 boost::mutex cv_mutex_, pos_mutex_, limage_mutex_, dimage_mutex_; 00161 00162 bool do_continuous_; 00164 bool do_publish_unknown_; 00166 void onInit() 00167 { 00168 std::cout << "IPA version\n\n"; 00169 00170 nh_ = getNodeHandle(); 00171 node_name_ = ros::this_node::getName(); 00172 00173 it_ = image_transport::ImageTransport(nh_); 00174 as_ = new actionlib::SimpleActionServer<cob_people_detection::FaceDetectorAction>(nh_, node_name_,false); 00175 //actionlib::SimpleActionServer<cob_people_detection::FaceDetectorAction> as(nh_, node_name_,false); 00176 00177 ROS_INFO_STREAM_NAMED("face_detector","Constructing FaceDetector."); 00178 00179 if (do_display_ == "local") { 00180 // OpenCV: pop up an OpenCV highgui window 00181 cv::namedWindow("Face detector: Face Detection", CV_WINDOW_AUTOSIZE); 00182 } 00183 00184 // Action stuff 00185 as_->registerGoalCallback(boost::bind(&FaceDetectorColor::goalCB, this)); 00186 as_->registerPreemptCallback(boost::bind(&FaceDetectorColor::preemptCB, this)); 00187 00188 faces_ = new Faces(); 00189 double face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m; 00190 00191 // Parameters 00192 ros::NodeHandle local_nh = getPrivateNodeHandle(); 00193 local_nh.param("classifier_name",name_,std::string("")); 00194 local_nh.param("classifier_filename",haar_filename_,std::string("")); 00195 local_nh.param("classifier_reliability",reliability_,0.0); 00196 local_nh.param("do_display",do_display_,std::string("none")); 00197 local_nh.param("do_continuous",do_continuous_,true); 00198 local_nh.param("do_publish_faces_of_unknown_size",do_publish_unknown_,false); 00199 local_nh.param("use_depth",use_depth_,true); 00200 local_nh.param("use_external_init",external_init_,true); 00201 local_nh.param("face_size_min_m",face_size_min_m,Faces::FACE_SIZE_MIN_M); 00202 local_nh.param("face_size_max_m",face_size_max_m,Faces::FACE_SIZE_MAX_M); 00203 local_nh.param("max_face_z_m",max_face_z_m,Faces::MAX_FACE_Z_M); 00204 local_nh.param("face_separation_dist_m",face_sep_dist_m,Faces::FACE_SEP_DIST_M); 00205 local_nh.param("do_display_disparity_or_depth",do_display_disparity_or_depth_,false); 00206 local_nh.param("data_source_mode",data_source_mode_,std::string("stereo")); 00207 00208 faces_->initFaceDetection(1, haar_filename_, face_size_min_m, face_size_max_m, max_face_z_m, face_sep_dist_m); 00209 00210 // Subscribe to the images and camera parameters 00211 string stereo_namespace, image_topic; 00212 stereo_namespace = nh_.resolveName("stereo"); 00213 image_topic = nh_.resolveName("image"); 00214 string left_topic = ros::names::clean(stereo_namespace + "/left/" + image_topic); 00215 string disparity_topic = ros::names::clean(stereo_namespace + "/disparity"); 00216 string left_camera_info_topic = ros::names::clean(stereo_namespace + "/left/camera_info"); 00217 string right_camera_info_topic = ros::names::clean(stereo_namespace + "/right/camera_info"); 00218 limage_sub_.subscribe(it_,left_topic,3); 00219 dimage_sub_.subscribe(nh_,disparity_topic,3); 00220 00221 sensor_msgs::PointCloud2::ConstPtr nullPointCloud; 00222 sensor_msgs::CameraInfo::ConstPtr nullCameraInfo; 00223 stereo_msgs::DisparityImage::ConstPtr nullDisparity; 00224 if (data_source_mode_=="stereo") 00225 { 00226 lcinfo_sub_.subscribe(nh_,left_camera_info_topic,3); 00227 rcinfo_sub_.subscribe(nh_,right_camera_info_topic,3); 00228 sync_stereo_.connectInput(limage_sub_, dimage_sub_, lcinfo_sub_, rcinfo_sub_); 00229 sync_stereo_.registerCallback(boost::bind(&FaceDetectorColor::imageCBAll, this, _1, _2, _3, _4, nullPointCloud)); 00230 } 00231 else if (data_source_mode_=="pointcloud") 00232 { 00233 depth_cloud_sub_.subscribe(nh_,ros::names::clean("/camera/depth/points"),3); 00234 // only connect to the disparity topic if this is available 00235 if (do_display_disparity_or_depth_==true) 00236 { 00237 sync_pointcloud_showdisp_.connectInput(limage_sub_, dimage_sub_, depth_cloud_sub_); 00238 sync_pointcloud_showdisp_.registerCallback(boost::bind(&FaceDetectorColor::imageCBAll, this, _1, _2, nullCameraInfo, nullCameraInfo, _3)); 00239 } 00240 else 00241 { 00242 sync_pointcloud_nodisp_.connectInput(limage_sub_, depth_cloud_sub_); 00243 sync_pointcloud_nodisp_.registerCallback(boost::bind(&FaceDetectorColor::imageCBAll, this, _1, nullDisparity, nullCameraInfo, nullCameraInfo, _2)); 00244 } 00245 } 00246 else 00247 { 00248 std::cout << "Error: FaceDetector::FaceDetector: data_source_mode_ must be either 'stereo' or 'pointcloud'.\n"; 00249 return; 00250 } 00251 //sensor_msgs::Image::ConstPtr nullImage = 0; 00252 //sync_.connectInput(limage_sub_, dimage_sub_, lcinfo_sub_, rcinfo_sub_, mdimage_sub_, depth_cloud_sub_); 00253 //sync_.registerCallback(boost::bind(&FaceDetectorColor::imageCBAll, this, _1, _2, _3, _4, /*_5*/nullImage, _6)); 00254 00255 // Advertise a position measure message. 00256 pos_pub_ = nh_.advertise<cob_people_detection::PositionMeasurement>("face_detector/people_tracker_measurements",1); 00257 00258 cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud>("face_detector/faces_cloud",0); 00259 00260 // Subscribe to filter measurements. 00261 if (external_init_) { 00262 pos_sub_ = nh_.subscribe("people_tracker_filter",1,&FaceDetectorColor::posCallback,this); 00263 ROS_INFO_STREAM_NAMED("face_detector","Subscribed to the person filter messages."); 00264 } 00265 00266 // start the action server 00267 if (do_continuous_==false) as_->start(); 00268 00269 ros::MultiThreadedSpinner s(2); 00270 ros::spin(s); 00271 } 00272 00273 FaceDetectorColor() : 00274 BIGDIST_M(1000000.0), 00275 it_(nh_), 00276 sync_stereo_(4), 00277 sync_pointcloud_showdisp_(3), 00278 sync_pointcloud_nodisp_(2), 00279 faces_(0), 00280 quit_(false) 00281 { 00282 // all code in onInit() because we are using a nodelet 00283 } 00284 00285 ~FaceDetectorColor() 00286 { 00287 00288 cv_image_out_.release(); 00289 00290 if (do_display_ == "local") { 00291 cv::destroyWindow("Face detector: Face Detection"); 00292 } 00293 00294 if (faces_) {delete faces_; faces_ = 0;} 00295 00296 as_->shutdown(); 00297 delete as_; 00298 } 00299 00300 void goalCB() { 00301 ROS_INFO("Face detector action started."); 00302 as_->acceptNewGoal(); 00303 } 00304 00305 void preemptCB() { 00306 ROS_INFO("Face detector action preempted."); 00307 as_->setPreempted(); 00308 } 00309 00310 00317 void posCallback(const cob_people_detection::PositionMeasurementConstPtr& pos_ptr) { 00318 00319 std::cout << "Tracking callback\n"; 00320 00321 // Put the incoming position into the position queue. It'll be processed in the next image callback. 00322 boost::mutex::scoped_lock lock(pos_mutex_); 00323 map<string, RestampedPositionMeasurement>::iterator it; 00324 it = pos_list_.find(pos_ptr->object_id); 00325 RestampedPositionMeasurement rpm; 00326 rpm.pos = *pos_ptr; 00327 rpm.restamp = pos_ptr->header.stamp; 00328 rpm.dist = BIGDIST_M; 00329 if (it == pos_list_.end()) { 00330 pos_list_.insert(pair<string, RestampedPositionMeasurement>(pos_ptr->object_id, rpm)); 00331 } 00332 else if ((pos_ptr->header.stamp - (*it).second.pos.header.stamp) > ros::Duration().fromSec(-1.0) ){ 00333 (*it).second = rpm; 00334 } 00335 lock.unlock(); 00336 00337 } 00338 00339 // Workaround to convert a DisparityImage->Image into a shared pointer for cv_bridge in imageCBAll. 00340 struct NullDeleter 00341 { 00342 void operator()(void const *) const {} 00343 }; 00344 00345 00354 void imageCBAll(const sensor_msgs::Image::ConstPtr &limage, const stereo_msgs::DisparityImage::ConstPtr& dimage, const sensor_msgs::CameraInfo::ConstPtr& lcinfo, const sensor_msgs::CameraInfo::ConstPtr& rcinfo, 00355 const sensor_msgs::PointCloud2::ConstPtr& depth_cloud_msg) 00356 { 00357 00358 // Only run the detector if in continuous mode or the detector was turned on through an action invocation. 00359 if (!do_continuous_ && !as_->isActive()) 00360 return; 00361 00362 // Clear out the result vector. 00363 result_.face_positions.clear(); 00364 00365 if (do_display_ == "local") { 00366 cv_mutex_.lock(); 00367 } 00368 00369 // ROS --> OpenCV 00370 cv::Mat cv_image_left(lbridge_.imgMsgToCv(limage,"bgr8")); // (left) camera image 00371 sensor_msgs::ImageConstPtr boost_dimage; 00372 if (data_source_mode_=="stereo" || do_display_disparity_or_depth_==true) boost_dimage = sensor_msgs::ImageConstPtr(const_cast<sensor_msgs::Image*>(&dimage->image), NullDeleter()); 00373 cv::Mat cv_image_disp; // disparity image for stereo mode 00374 pcl::PointCloud<pcl::PointXYZ> depth_cloud; // point cloud for point cloud mode 00375 if (data_source_mode_=="stereo") 00376 { 00377 cv_image_disp = cv::Mat(dbridge_.imgMsgToCv(boost_dimage)); 00378 cam_model_.fromCameraInfo(lcinfo,rcinfo); 00379 } 00380 else if (data_source_mode_=="pointcloud") pcl::fromROSMsg(*depth_cloud_msg, depth_cloud); 00381 00382 // check disparity image contents 00383 // std::cout << "channels: " << cv_image_disp.channels() << "\n"; 00384 // for (int i=100; i < 110; i++) 00385 // { 00386 // printf("line %d\n", i); 00387 // float* p_disp = cv_image_disp.ptr<float>(i); 00388 // for (int j=100; j < 110; j++) 00389 // printf("%.4f ", p_disp[j]); 00390 // printf("\n"); 00391 // } 00392 // check point cloud contents which are accessible like via depth image coordinates (u,v) 00393 // for (int i=100; i < 110; i++) 00394 // { 00395 // printf("line %d\n", i); 00396 // for (int j=100; j < 110; j++) 00397 // { 00398 // pcl::PointXYZ point = depth_cloud(i, j); 00399 // printf("(%d/%d):(%.4f/%.4f/%.4f) ", i, j, point.x, point.y, point.z); 00400 // } 00401 // printf("\n"); 00402 // } 00403 00404 // display the disparity image if available 00405 if (do_display_disparity_or_depth_ == true) 00406 { 00407 cv::Mat cv_image_disp_copy(dbridge_.imgMsgToCv(boost_dimage, "mono8")); 00408 cv::imshow("disp", cv_image_disp_copy); 00409 cv::waitKey(10); 00410 } 00411 00412 // For display, keep a copy of the image that we can draw on. 00413 if (do_display_ == "local") { 00414 cv_image_out_ = cv_image_left.clone(); 00415 } 00416 00417 struct timeval timeofday; 00418 gettimeofday(&timeofday,NULL); 00419 ros::Time starttdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec); 00420 00421 vector<Box2D3D> faces_vector = faces_->detectAllFaces(cv_image_left, 1.0, cv_image_disp, &cam_model_, &depth_cloud); 00422 gettimeofday(&timeofday,NULL); 00423 ros::Time endtdetect = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec); 00424 ros::Duration diffdetect = endtdetect - starttdetect; 00425 ROS_DEBUG_STREAM_NAMED("face_detector","Detection duration = " << diffdetect.toSec() << "sec"); 00426 //ROS_INFO_STREAM("Detection duration = " << diffdetect.toSec() << "sec"); 00427 00428 bool found_faces = false; 00429 00430 int ngood = 0; 00431 sensor_msgs::PointCloud cloud; 00432 cloud.header.stamp = limage->header.stamp; 00433 cloud.header.frame_id = limage->header.frame_id; 00434 00435 if (faces_vector.size() > 0 ) { 00436 00437 // Transform the positions of the known faces and remove anyone who hasn't had an update in a long time. 00438 boost::mutex::scoped_lock pos_lock(pos_mutex_); 00439 map<string, RestampedPositionMeasurement>::iterator it; 00440 for (it = pos_list_.begin(); it != pos_list_.end(); it++) { 00441 if ((limage->header.stamp - (*it).second.restamp) > ros::Duration().fromSec(5.0)) { 00442 // Position is too old, kill the person. 00443 pos_list_.erase(it); 00444 } 00445 else { 00446 // Transform the person to this time. Note that the pos time is updated but not the restamp. 00447 tf::Point pt; 00448 tf::pointMsgToTF((*it).second.pos.pos, pt); 00449 tf::Stamped<tf::Point> loc(pt, (*it).second.pos.header.stamp, (*it).second.pos.header.frame_id); 00450 try { 00451 tf_.transformPoint(limage->header.frame_id, limage->header.stamp, loc, "odom_combined", loc); 00452 (*it).second.pos.header.stamp = limage->header.stamp; 00453 (*it).second.pos.pos.x = loc[0]; 00454 (*it).second.pos.pos.y = loc[1]; 00455 (*it).second.pos.pos.z = loc[2]; 00456 } 00457 catch (tf::TransformException& ex) { 00458 } 00459 } 00460 } 00461 // End filter face position update 00462 00463 // Associate the found faces with previously seen faces, and publish all good face centers. 00464 Box2D3D *one_face; 00465 cob_people_detection::PositionMeasurement pos; 00466 00467 for (uint iface = 0; iface < faces_vector.size(); iface++) { 00468 one_face = &faces_vector[iface]; 00469 00470 if (one_face->status=="good" || (one_face->status=="unknown" && do_publish_unknown_)) { 00471 00472 std::string id = ""; 00473 00474 // Convert the face format to a PositionMeasurement msg. 00475 pos.header.stamp = limage->header.stamp; 00476 pos.name = name_; 00477 pos.pos.x = one_face->center3d.x; 00478 pos.pos.y = one_face->center3d.y; 00479 pos.pos.z = one_face->center3d.z; 00480 pos.header.frame_id = limage->header.frame_id;//"*_stereo_optical_frame"; 00481 pos.reliability = reliability_; 00482 pos.initialization = 1;//0; 00483 pos.covariance[0] = 0.04; pos.covariance[1] = 0.0; pos.covariance[2] = 0.0; 00484 pos.covariance[3] = 0.0; pos.covariance[4] = 0.04; pos.covariance[5] = 0.0; 00485 pos.covariance[6] = 0.0; pos.covariance[7] = 0.0; pos.covariance[8] = 0.04; 00486 00487 // Check if this person's face is close enough to one of the previously known faces and associate it with the closest one. 00488 // Otherwise publish it with an empty id. 00489 // Note that multiple face positions can be published with the same id, but ids in the pos_list_ are unique. The position of a face in the list is updated with the closest found face. 00490 double dist, mindist = BIGDIST_M; 00491 map<string, RestampedPositionMeasurement>::iterator close_it = pos_list_.end(); 00492 for (it = pos_list_.begin(); it != pos_list_.end(); it++) { 00493 dist = pow((*it).second.pos.pos.x - pos.pos.x, 2.0) + pow((*it).second.pos.pos.y - pos.pos.y, 2.0) + pow((*it).second.pos.pos.z - pos.pos.z, 2.0); 00494 if (dist <= faces_->face_sep_dist_m_ && dist < mindist) { 00495 mindist = dist; 00496 close_it = it; 00497 } 00498 } 00499 std::cout << "poselist size: " << pos_list_.size() << "\n"; 00500 if (close_it != pos_list_.end()) { 00501 if (mindist < (*close_it).second.dist) { 00502 (*close_it).second.restamp = limage->header.stamp; 00503 (*close_it).second.dist = mindist; 00504 (*close_it).second.pos = pos; 00505 } 00506 pos.object_id = (*close_it).second.pos.object_id; 00507 std::cout << "Object ID: " << pos.object_id << "\n"; 00508 ROS_INFO_STREAM_NAMED("face_detector","Found face " << pos.object_id); 00509 } 00510 else { 00511 pos.object_id = ""; 00512 } 00513 result_.face_positions.push_back(pos); 00514 found_faces = true; 00515 pos_pub_.publish(pos); 00516 00517 } 00518 00519 } 00520 pos_lock.unlock(); 00521 00522 // Clean out all of the distances in the pos_list_ 00523 for (it = pos_list_.begin(); it != pos_list_.end(); it++) { 00524 (*it).second.dist = BIGDIST_M; 00525 } 00526 // Done associating faces. 00527 00528 /******** Display **************************************************************/ 00529 00530 // Draw an appropriately colored rectangle on the display image and in the visualizer. 00531 00532 cloud.channels.resize(1); 00533 cloud.channels[0].name = "intensity"; 00534 00535 for (uint iface = 0; iface < faces_vector.size(); iface++) { 00536 one_face = &faces_vector[iface]; 00537 00538 // Visualization of good faces as a point cloud 00539 if (one_face->status == "good") { 00540 00541 geometry_msgs::Point32 p; 00542 p.x = one_face->center3d.x; 00543 p.y = one_face->center3d.y; 00544 p.z = one_face->center3d.z; 00545 cloud.points.push_back(p); 00546 cloud.channels[0].values.push_back(1.0f); 00547 00548 ngood ++; 00549 } 00550 else { 00551 ROS_DEBUG_STREAM_NAMED("face_detector","The detection didn't have a valid size, so it wasn't visualized."); 00552 } 00553 00554 // Visualization by image display. 00555 if (do_display_ == "local") { 00556 cv::Scalar color; 00557 if (one_face->status == "good") { 00558 color = cv::Scalar(0,255,0); 00559 } 00560 else if (one_face->status == "unknown") { 00561 color = cv::Scalar(255,0,0); 00562 } 00563 else { 00564 color = cv::Scalar(0,0,255); 00565 } 00566 00567 if (do_display_ == "local") { 00568 cv::rectangle(cv_image_out_, 00569 cv::Point(one_face->box2d.x,one_face->box2d.y), 00570 cv::Point(one_face->box2d.x+one_face->box2d.width, one_face->box2d.y+one_face->box2d.height), color, 4); 00571 } 00572 } 00573 } 00574 00575 } 00576 00577 cloud_pub_.publish(cloud); 00578 00579 // Display 00580 if (do_display_ == "local") { 00581 00582 cv::imshow("Face detector: Face Detection",cv_image_out_); 00583 cv::waitKey(2); 00584 00585 cv_mutex_.unlock(); 00586 } 00587 /******** Done display **********************************************************/ 00588 00589 ROS_INFO_STREAM_NAMED("face_detector","Number of faces found: " << faces_vector.size() << ", number with good depth and size: " << ngood); 00590 00591 00592 // If you don't want continuous processing and you've found at least one face, turn off the detector. 00593 if (!do_continuous_ && found_faces) { 00594 as_->setSucceeded(result_); 00595 } 00596 } 00597 00598 }; // end class 00599 00600 }; // end namespace people 00601 00602 PLUGINLIB_DECLARE_CLASS(people, FaceDetectorColor, people::FaceDetectorColor, nodelet::Nodelet); 00603 00604 // Main 00605 // int main(int argc, char **argv) 00606 // { 00607 // ros::init(argc,argv,"face_detector"); 00608 // 00609 // node_name_ = ros::this_node::getName(); 00610 // people::FaceDetector fd(); 00611 // 00612 // return 0; 00613 // } 00614 00615