face_detector_node.cpp
Go to the documentation of this file.
00001 
00061 #ifdef __LINUX__
00062 #include "cob_people_detection/face_detector_node.h"
00063 #include "cob_vision_utils/GlobalDefines.h"
00064 #else
00065 #endif
00066 
00067 // OpenCV
00068 #include "opencv/cv.h"
00069 #include <cv_bridge/cv_bridge.h>
00070 #include <sensor_msgs/image_encodings.h>
00071 
00072 // Boost
00073 #include <boost/shared_ptr.hpp>
00074 
00075 // timer
00076 #include <cob_people_detection/timer.h>
00077 
00078 using namespace ipa_PeopleDetector;
00079 
00080 FaceDetectorNode::FaceDetectorNode(ros::NodeHandle nh) :
00081         node_handle_(nh)
00082 {
00083         data_directory_ = ros::package::getPath("cob_people_detection") + "/common/files/";
00084 
00085         // Parameters
00086         double faces_increase_search_scale; // The factor by which the search window is scaled between the subsequent scans
00087         int faces_drop_groups; // Minimum number (minus 1) of neighbor rectangles that makes up an object.
00088         int faces_min_search_scale_x; // Minimum search scale x
00089         int faces_min_search_scale_y; // Minimum search scale y
00090         bool reason_about_3dface_size; // if true, the 3d face size is determined and only faces with reasonable size are accepted
00091         double face_size_max_m; // the maximum feasible face diameter [m] if reason_about_3dface_size is enabled
00092         double face_size_min_m; // the minimum feasible face diameter [m] if reason_about_3dface_size is enabled
00093         double max_face_z_m; // maximum distance [m] of detected faces to the sensor
00094         bool debug; // enables some debug outputs
00095         std::cout << "\n--------------------------\nFace Detector Parameters:\n--------------------------\n";
00096         node_handle_.param("data_directory", data_directory_, data_directory_);
00097         std::cout << "data_directory = " << data_directory_ << "\n";
00098         node_handle_.param("faces_increase_search_scale", faces_increase_search_scale, 1.1);
00099         std::cout << "faces_increase_search_scale = " << faces_increase_search_scale << "\n";
00100         node_handle_.param("faces_drop_groups", faces_drop_groups, 68);
00101         std::cout << "faces_drop_groups = " << faces_drop_groups << "\n";
00102         node_handle_.param("faces_min_search_scale_x", faces_min_search_scale_x, 20);
00103         std::cout << "faces_min_search_scale_x = " << faces_min_search_scale_x << "\n";
00104         node_handle_.param("faces_min_search_scale_y", faces_min_search_scale_y, 20);
00105         std::cout << "faces_min_search_scale_y = " << faces_min_search_scale_y << "\n";
00106         node_handle_.param("reason_about_3dface_size", reason_about_3dface_size, true);
00107         std::cout << "reason_about_3dface_size = " << reason_about_3dface_size << "\n";
00108         node_handle_.param("face_size_max_m", face_size_max_m, 0.35);
00109         std::cout << "face_size_max_m = " << face_size_max_m << "\n";
00110         node_handle_.param("face_size_min_m", face_size_min_m, 0.1);
00111         std::cout << "face_size_min_m = " << face_size_min_m << "\n";
00112         node_handle_.param("max_face_z_m", max_face_z_m, 8.0);
00113         std::cout << "max_face_z_m = " << max_face_z_m << "\n";
00114         node_handle_.param("debug", debug, false);
00115         std::cout << "debug = " << debug << "\n";
00116         node_handle_.param("display_timing", display_timing_, false);
00117         std::cout << "display_timing = " << display_timing_ << "\n";
00118 
00119         // initialize face detector
00120         face_detector_.init(data_directory_, faces_increase_search_scale, faces_drop_groups, faces_min_search_scale_x, faces_min_search_scale_y, reason_about_3dface_size,
00121                         face_size_max_m, face_size_min_m, max_face_z_m, debug);
00122 
00123         // advertise topics
00124         face_position_publisher_ = node_handle_.advertise<cob_perception_msgs::ColorDepthImageArray>("face_positions", 1);
00125 
00126         // subscribe to head detection topic
00127         head_position_subscriber_ = nh.subscribe("head_positions", 1, &FaceDetectorNode::head_positions_callback, this);
00128 
00129         std::cout << "FaceDetectorNode initialized." << std::endl;
00130 }
00131 
00132 FaceDetectorNode::~FaceDetectorNode(void)
00133 {
00134 }
00135 
00136 // Prevent deleting memory twice, when using smart pointer
00137 void voidDeleter(const sensor_msgs::Image* const )
00138 {
00139 }
00140 
00141 void FaceDetectorNode::head_positions_callback(const cob_perception_msgs::ColorDepthImageArray::ConstPtr& head_positions)
00142 {
00143         //      Timer tim;
00144         //      tim.start();
00145 
00146         // receive head positions and detect faces in the head region, finally publish detected faces
00147 
00148         // convert color and depth image patches of head regions
00149         std::vector<cv::Mat> heads_color_images, heads_depth_images;
00150         heads_color_images.resize(head_positions->head_detections.size());
00151         heads_depth_images.resize(head_positions->head_detections.size());
00152         cv_bridge::CvImageConstPtr cv_cptr;
00153         cv_bridge::CvImagePtr cv_ptr(new cv_bridge::CvImage);
00154         for (unsigned int i = 0; i < head_positions->head_detections.size(); i++)
00155         {
00156                 // color image
00157                 sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(head_positions->head_detections[i].color_image), voidDeleter);
00158                 try
00159                 {
00160                         cv_cptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::RGB8);
00161                 } catch (cv_bridge::Exception& e)
00162                 {
00163                         ROS_ERROR("cv_bridge exception: %s", e.what());
00164                         return;
00165                 }
00166                 heads_color_images[i] = cv_cptr->image.clone();
00167 
00168                 // depth image
00169                 msgPtr = boost::shared_ptr<const sensor_msgs::Image>(&(head_positions->head_detections[i].depth_image), voidDeleter);
00170                 try
00171                 {
00172                         cv_cptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::TYPE_32FC3);
00173                 } catch (cv_bridge::Exception& e)
00174                 {
00175                         ROS_ERROR("cv_bridge exception: %s", e.what());
00176                         return;
00177                 }
00178                 heads_depth_images[i] = cv_cptr->image;
00179         }
00180         std::vector < std::vector<cv::Rect> > face_coordinates;
00181         face_detector_.detectColorFaces(heads_color_images, heads_depth_images, face_coordinates);
00182         // face_normalizer_.normalizeFaces(heads_color_images, heads_depth_images, face_coordinates);
00183 
00184         // prepare the message for publication
00185         cob_perception_msgs::ColorDepthImageArray image_array;
00186         image_array = *head_positions;
00187         for (unsigned int i = 0; i < face_coordinates.size(); i++)
00188         {
00189                 for (unsigned int j = 0; j < face_coordinates[i].size(); j++)
00190                 {
00191                         // face rectangle
00192                         cob_perception_msgs::Rect rect;
00193                         rect.x = face_coordinates[i][j].x;
00194                         rect.y = face_coordinates[i][j].y;
00195                         rect.width = face_coordinates[i][j].width;
00196                         rect.height = face_coordinates[i][j].height;
00197                         image_array.head_detections[i].face_detections.push_back(rect);
00198                 }
00199                 // processed color image
00200                 cv_ptr->encoding = sensor_msgs::image_encodings::RGB8;
00201                 cv_ptr->image = heads_color_images[i];
00202                 cv_ptr->toImageMsg(image_array.head_detections[i].color_image);
00203                 image_array.head_detections[i].color_image.header = head_positions->head_detections[i].color_image.header;
00204         }
00205 
00206         face_position_publisher_.publish(image_array);
00207 
00208         if (display_timing_ == true)
00209                 ROS_INFO("%d FaceDetection: Time stamp of pointcloud message: %f. Delay: %f.", head_positions->header.seq, head_positions->header.stamp.toSec(),
00210                                 ros::Time::now().toSec() - head_positions->header.stamp.toSec());
00211         //      ROS_INFO("Face detection took %f ms.", tim.getElapsedTimeInMilliSec());
00212 }
00213 
00214 //#######################
00215 //#### main programm ####
00216 int main(int argc, char** argv)
00217 {
00218         // Initialize ROS, specify name of node
00219         ros::init(argc, argv, "face_detector");
00220 
00221         // Create a handle for this node, initialize node
00222         ros::NodeHandle nh;
00223 
00224         // Create FaceDetectorNode class instance
00225         FaceDetectorNode face_detector_node(nh);
00226 
00227         // Create action nodes
00228         //DetectObjectsAction detect_action_node(object_detection_node, nh);
00229         //AcquireObjectImageAction acquire_image_node(object_detection_node, nh);
00230         //TrainObjectAction train_object_node(object_detection_node, nh);
00231 
00232         ros::spin();
00233 
00234         return 0;
00235 }


cob_people_detection
Author(s): Richard Bormann , Thomas Zwölfer
autogenerated on Fri Aug 28 2015 10:24:12