face_recognizer_node.cpp
Go to the documentation of this file.
00001 
00061 #ifdef __LINUX__
00062 #include "cob_people_detection/face_recognizer_node.h"
00063 #include "cob_vision_utils/GlobalDefines.h"
00064 #else
00065 #endif
00066 
00067 // OpenCV
00068 #include "opencv/cv.h"
00069 #include "opencv/highgui.h"
00070 #include <cv_bridge/cv_bridge.h>
00071 #include <sensor_msgs/image_encodings.h>
00072 
00073 // Boost
00074 #include <boost/shared_ptr.hpp>
00075 
00076 #include <sys/time.h>
00077 
00078 using namespace ipa_PeopleDetector;
00079 
00080 FaceRecognizerNode::FaceRecognizerNode(ros::NodeHandle nh) :
00081         node_handle_(nh)
00082 {
00083         //      data_directory_ = ros::package::getPath("cob_people_detection") + "/common/files/";
00084         //      classifier_directory_ = ros::package::getPath("cob_people_detection") + "/common/files/";
00085         // Parameters
00086         bool norm_illumination;
00087         bool norm_align;
00088         bool norm_extreme_illumination;
00089         int norm_size; // Desired width and height of the Eigenfaces (=eigenvectors).
00090         int feature_dimension; // Number of eigenvectors per person to identify -> controls the total number of eigenvectors
00091         double threshold_facespace; // Threshold to facespace
00092         double threshold_unknown; // Threshold to detect unknown faces
00093         int metric; // metric for nearest neighbor search in face space: 0 = Euklidean, 1 = Mahalanobis, 2 = Mahalanobis Cosine
00094         bool debug; // enables some debug outputs
00095         int recognition_method; // choose subspace method
00096         bool use_unknown_thresh; // use threshold for unknown faces
00097         bool use_depth; // use depth for recognition
00098         std::vector < std::string > identification_labels_to_recognize; // a list of labels of persons that shall be recognized
00099         std::cout << "\n--------------------------\nFace Recognizer Parameters:\n--------------------------\n";
00100         //if(!node_handle_.getParam("~data_directory", data_directory_)) std::cout<<"PARAM NOT AVAILABLE"<<std::endl;
00101         if (!node_handle_.getParam("/cob_people_detection/data_storage_directory", data_directory_))
00102                 std::cout << "PARAM NOT AVAILABLE" << std::endl;
00103         std::cout << "data_directory = " << data_directory_ << "\n";
00104         node_handle_.param("enable_face_recognition", enable_face_recognition_, true);
00105         std::cout << "enable_face_recognition = " << enable_face_recognition_ << "\n";
00106         node_handle_.param("feature_dimension", feature_dimension, 10);
00107         std::cout << "feature dimension = " << feature_dimension << "\n";
00108         node_handle_.param("threshold_facespace", threshold_facespace, 10000.0);
00109         std::cout << "threshold_facespace = " << threshold_facespace << "\n";
00110         node_handle_.param("threshold_unknown", threshold_unknown, 1000.0);
00111         std::cout << "threshold_unknown = " << threshold_unknown << "\n";
00112         node_handle_.param("metric", metric, 0);
00113         std::cout << "metric = " << metric << "\n";
00114         node_handle_.param("debug", debug, false);
00115         std::cout << "debug = " << debug << "\n";
00116         node_handle_.param("recognition_method", recognition_method, 3);
00117         std::cout << "recognition method: " << recognition_method << "\n";
00118         node_handle_.param("use_unknown_thresh", use_unknown_thresh, true);
00119         std::cout << " use use unknown thresh: " << use_unknown_thresh << "\n";
00120         node_handle_.param("use_depth", use_depth, true);
00121         std::cout << " use depth: " << use_depth << "\n";
00122         node_handle_.param("display_timing", display_timing_, false);
00123         std::cout << "display_timing = " << display_timing_ << "\n";
00124         node_handle_.param("norm_size", norm_size, 100);
00125         std::cout << "norm_size = " << norm_size << "\n";
00126         node_handle_.param("norm_illumination", norm_illumination, true);
00127         std::cout << "norm_illumination = " << norm_illumination << "\n";
00128         node_handle_.param("norm_align", norm_align, false);
00129         std::cout << "norm_align = " << norm_align << "\n";
00130         node_handle_.param("norm_extreme_illumination", norm_extreme_illumination, false);
00131         std::cout << "norm_extreme_illumination = " << norm_extreme_illumination << "\n";
00132         node_handle_.param("debug", debug, false);
00133         std::cout << "debug = " << debug << "\n";
00134         node_handle_.param("use_depth", use_depth, false);
00135         std::cout << "use depth: " << use_depth << "\n";
00136         // todo: make parameters for illumination and alignment normalization on/off
00137 
00138         std::cout << "identification_labels_to_recognize: \n";
00139         XmlRpc::XmlRpcValue identification_labels_to_recognize_list;
00140         node_handle_.getParam("identification_labels_to_recognize", identification_labels_to_recognize_list);
00141         if (identification_labels_to_recognize_list.getType() == XmlRpc::XmlRpcValue::TypeArray)
00142         {
00143                 identification_labels_to_recognize.resize(identification_labels_to_recognize_list.size());
00144                 for (int i = 0; i < identification_labels_to_recognize_list.size(); i++)
00145                 {
00146                         ROS_ASSERT(identification_labels_to_recognize_list[i].getType() == XmlRpc::XmlRpcValue::TypeString);
00147                         identification_labels_to_recognize[i] = static_cast<std::string>(identification_labels_to_recognize_list[i]);
00148                 }
00149         }
00150 
00151         // initialize face recognizer
00152         unsigned long return_value = face_recognizer_.init(data_directory_, norm_size, norm_illumination, norm_align, norm_extreme_illumination, metric, debug,
00153                         identification_labels_to_recognize, recognition_method, feature_dimension, use_unknown_thresh, use_depth);
00154         if (return_value == ipa_Utils::RET_FAILED)
00155         {
00156                 ROS_ERROR("Recognition model not trained");
00157         }
00158         else if (return_value == ipa_Utils::RET_OK)
00159         {
00160                 std::cout << "Recognition model trained or loaded for:\n";
00161                 for (unsigned int i = 0; i < identification_labels_to_recognize.size(); i++)
00162                         std::cout << "   - " << identification_labels_to_recognize[i] << std::endl;
00163 
00164                 // advertise topics
00165                 face_recognition_publisher_ = node_handle_.advertise<cob_perception_msgs::DetectionArray>("face_recognitions", 1);
00166 
00167                 // subscribe to head detection topic
00168                 face_position_subscriber_ = nh.subscribe("face_positions", 1, &FaceRecognizerNode::facePositionsCallback, this);
00169 
00170                 // launch LoadModel server
00171                 load_model_server_ = new LoadModelServer(node_handle_, "load_model_server", boost::bind(&FaceRecognizerNode::loadModelServerCallback, this, _1), false);
00172                 load_model_server_->start();
00173 
00174                 ROS_INFO("FaceRecognizerNode initialized.");
00175         }
00176 }
00177 
00178 FaceRecognizerNode::~FaceRecognizerNode(void)
00179 {
00180         if (load_model_server_ != 0)
00181                 delete load_model_server_;
00182 }
00183 
00184 // Prevent deleting memory twice, when using smart pointer
00185 void voidDeleter(const sensor_msgs::Image* const )
00186 {
00187 }
00188 
00189 //void FaceRecognizerNode::facePositionsCallback(const cob_perception_msgs::ColorDepthImageCropArray::ConstPtr& face_positions)
00190 //{
00191 //      // receive head and face positions and recognize faces in the face region, finally publish detected and recognized faces
00192 //
00193 //      // --- convert color image patches of head regions and contained face bounding boxes ---
00194 //      cv_bridge::CvImageConstPtr cv_ptr;
00195 //      std::vector<cv::Mat> heads_color_images;
00196 //      heads_color_images.resize(face_positions->cdia.head_detections.size());
00197 //      std::vector<cv::Mat> heads_depth_images;
00198 //      heads_depth_images.resize(face_positions->cdia.head_detections.size());
00199 //
00200 //      std::vector< std::vector<cv::Rect> > face_bounding_boxes;
00201 //      std::vector< std::vector<cv::Rect> > crop_bounding_boxes;
00202 //      face_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00203 //      crop_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00204 //  cv::Rect bb=cv::Rect(0,0,160,160);
00205 //
00206 //      std::vector<cv::Rect> head_bounding_boxes;
00207 //      head_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00208 //
00209 //      for (unsigned int i=0; i<face_positions->cdia.head_detections.size(); i++)
00210 //      {
00211 //              // color image
00212 //              if (enable_face_recognition_ == true)
00213 //              {
00214 //                      sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->cdia.head_detections[i].color_image), voidDeleter);
00215 //                      try
00216 //                      {
00217 //                              cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00218 //                      }
00219 //                      catch (cv_bridge::Exception& e)
00220 //                      {
00221 //                              ROS_ERROR("cv_bridge exception: %s", e.what());
00222 //                              return;
00223 //                      }
00224 //                      heads_color_images[i] = cv_ptr->image;
00225 //              }
00226 //
00227 //              // depth image
00228 //              sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->cdia.head_detections[i].depth_image), voidDeleter);
00229 //              try
00230 //              {
00231 //                      cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::TYPE_32FC3);
00232 //              }
00233 //              catch (cv_bridge::Exception& e)
00234 //              {
00235 //                      ROS_ERROR("cv_bridge exception: %s", e.what());
00236 //                      return;
00237 //              }
00238 //              heads_depth_images[i] = cv_ptr->image;
00239 //
00240 //              // face bounding boxes
00241 //              face_bounding_boxes[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00242 //              crop_bounding_boxes[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00243 //              for (uint j=0; j<face_bounding_boxes[i].size(); j++)
00244 //              {
00245 //                      const cob_perception_msgs::Rect& source_rect = face_positions->cdia.head_detections[i].face_detections[j];
00246 //                      cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00247 //                      face_bounding_boxes[i][j] = rect;
00248 //                      crop_bounding_boxes[i][j] = bb;
00249 //              }
00250 //
00251 //              // head bounding box
00252 //              const cob_perception_msgs::Rect& source_rect = face_positions->cdia.head_detections[i].head_detection;
00253 //              cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00254 //              head_bounding_boxes[i] = rect;
00255 //      }
00256 //
00259 //      std::vector<cv::Mat> crops;
00260 //      //std::vector<std::vector<cv::Rect> > crop_bounding_boxes;
00261 //      crops.resize(face_positions->crops.size());
00262 //    for(int k=0;k<face_positions->crops.size();k++)
00263 //    {
00264 //      //tenpora TODO:
00265 //                      sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->crops[k]), voidDeleter);
00266 //                      try
00267 //                      {
00268 //                              cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00269 //                      }
00270 //                      catch (cv_bridge::Exception& e)
00271 //                      {
00272 //                              ROS_ERROR("cv_bridge exception: %s", e.what());
00273 //                              return;
00274 //                      }
00275 //                      crops[k]= cv_ptr->image;
00276 //      //temporary TODO
00277 //      //crop_bounding_boxes[k][0]=bb;
00278 //
00279 //    }
00282 //
00283 //
00284 //      // --- face recognition ---
00285 //      std::vector< std::vector<std::string> > identification_labels;
00286 //      bool identification_failed = false;
00287 //      if (enable_face_recognition_ == true)
00288 //      {
00289 //              // recognize faces
00290 //              //unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, face_bounding_boxes, identification_labels);
00291 //              unsigned long result_state = face_recognizer_.recognizeFaces(crops,crop_bounding_boxes, identification_labels);
00292 //              if (result_state == ipa_Utils::RET_FAILED)
00293 //              {
00294 //                      ROS_ERROR("FaceRecognizerNode::face_positions_callback: Please load a face recognition model at first.");
00295 //                      identification_failed = true;
00296 //              }
00297 //      }
00298 //      if (enable_face_recognition_ == false || identification_failed == true)
00299 //      {
00300 //              // label all image unknown if face recognition disabled
00301 //              identification_labels.resize(face_positions->cdia.head_detections.size());
00302 //              for (uint i=0; i<identification_labels.size(); i++)
00303 //              {
00304 //                      identification_labels[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00305 //                      for (uint j=0; j<identification_labels[i].size(); j++)
00306 //                              identification_labels[i][j] = "Unknown";
00307 //              }
00308 //      }
00309 //
00310 //      // --- publish detection message ---
00311 //      cob_perception_msgs::DetectionArray detection_msg;
00312 //      detection_msg.header = face_positions->header;
00313 //
00314 //      // prepare message
00315 //      for (int head=0; head<(int)head_bounding_boxes.size(); head++)
00316 //      {
00317 //              if (face_bounding_boxes[head].size() == 0)
00318 //              {
00319 //                      // no faces detected in head region -> publish head position
00320 //                      cob_perception_msgs::Detection det;
00321 //                      cv::Rect& head_bb = head_bounding_boxes[head];
00322 //                      // set 3d position of head's center
00323 //                      bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], 0.5*(float)head_bb.width, 0.5*(float)head_bb.height, det.pose.pose.position, 6);
00324 //                      if (valid_3d_position==false)
00325 //                              continue;
00326 //                      // write bounding box
00327 //                      det.mask.roi.x = head_bb.x;           det.mask.roi.y = head_bb.y;
00328 //                      det.mask.roi.width = head_bb.width;   det.mask.roi.height = head_bb.height;
00329 //                      // set label
00330 //                      det.label="UnknownHead";
00331 //                      // set origin of detection
00332 //                      det.detector = "head";
00333 //                      // header
00334 //                      det.header = face_positions->header;
00335 //                      // add to message
00336 //                      detection_msg.detections.push_back(det);
00337 //              }
00338 //              else
00339 //              {
00340 //                      // process all faces in head region
00341 //                      for (int face=0; face<(int)face_bounding_boxes[head].size(); face++)
00342 //                      {
00343 //                              cob_perception_msgs::Detection det;
00344 //                              cv::Rect& head_bb = head_bounding_boxes[head];
00345 //                              cv::Rect& face_bb = face_bounding_boxes[head][face];
00346 //                              // set 3d position of head's center
00347 //                              bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], face_bb.x+0.5*(float)face_bb.width, face_bb.y+0.5*(float)face_bb.height, det.pose.pose.position, 6);
00348 //                              if (valid_3d_position==false)
00349 //                                      continue;
00350 //                              // write bounding box
00351 //                              det.mask.roi.x = head_bb.x+face_bb.x; det.mask.roi.y = head_bb.y+face_bb.y;
00352 //                              det.mask.roi.width = face_bb.width;   det.mask.roi.height = face_bb.height;
00353 //                              // set label
00354 //                              det.label=identification_labels[head][face];
00355 //                              // set origin of detection
00356 //                              det.detector = "face";
00357 //                              // header
00358 //                              det.header = face_positions->header;
00359 //                              // add to message
00360 //                              detection_msg.detections.push_back(det);
00361 //                      }
00362 //              }
00363 //      }
00364 //
00365 //      // publish message
00366 //      face_recognition_publisher_.publish(detection_msg);
00367 //}
00368 void FaceRecognizerNode::facePositionsCallback(const cob_perception_msgs::ColorDepthImageArray::ConstPtr& face_positions)
00369 {
00370         //      Timer tim;
00371         //      tim.start();
00372 
00373         // receive head and face positions and recognize faces in the face region, finally publish detected and recognized faces
00374 
00375         // --- convert color image patches of head regions and contained face bounding boxes ---
00376         cv_bridge::CvImageConstPtr cv_ptr;
00377         std::vector<cv::Mat> heads_color_images;
00378         heads_color_images.resize(face_positions->head_detections.size());
00379         std::vector<cv::Mat> heads_depth_images;
00380         heads_depth_images.resize(face_positions->head_detections.size());
00381         std::vector<std::vector<cv::Rect> > face_bounding_boxes;
00382         face_bounding_boxes.resize(face_positions->head_detections.size());
00383         std::vector<cv::Rect> head_bounding_boxes;
00384         head_bounding_boxes.resize(face_positions->head_detections.size());
00385         for (unsigned int i = 0; i < face_positions->head_detections.size(); i++)
00386         {
00387                 // color image
00388                 if (enable_face_recognition_ == true)
00389                 {
00390                         sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->head_detections[i].color_image), voidDeleter);
00391                         try
00392                         {
00393                                 cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00394                         } catch (cv_bridge::Exception& e)
00395                         {
00396                                 ROS_ERROR("cv_bridge exception: %s", e.what());
00397                                 return;
00398                         }
00399                         heads_color_images[i] = cv_ptr->image;
00400                 }
00401 
00402                 // depth image
00403                 sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->head_detections[i].depth_image), voidDeleter);
00404                 try
00405                 {
00406                         cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::TYPE_32FC3);
00407                 } catch (cv_bridge::Exception& e)
00408                 {
00409                         ROS_ERROR("cv_bridge exception: %s", e.what());
00410                         return;
00411                 }
00412                 heads_depth_images[i] = cv_ptr->image;
00413 
00414                 // face bounding boxes
00415                 face_bounding_boxes[i].resize(face_positions->head_detections[i].face_detections.size());
00416                 for (uint j = 0; j < face_bounding_boxes[i].size(); j++)
00417                 {
00418                         const cob_perception_msgs::Rect& source_rect = face_positions->head_detections[i].face_detections[j];
00419                         cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00420                         face_bounding_boxes[i][j] = rect;
00421                 }
00422 
00423                 // head bounding box
00424                 const cob_perception_msgs::Rect& source_rect = face_positions->head_detections[i].head_detection;
00425                 cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00426                 head_bounding_boxes[i] = rect;
00427         }
00428 
00429         // --- face recognition ---
00430         std::vector<std::vector<std::string> > identification_labels;
00431         bool identification_failed = false;
00432         if (enable_face_recognition_ == true)
00433         {
00434                 // recognize faces
00435                 //unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, face_bounding_boxes, identification_labels);
00436 
00437                 //timeval t1,t2;
00438                 //gettimeofday(&t1,NULL);
00439                 unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, heads_depth_images, face_bounding_boxes, identification_labels);
00440                 //gettimeofday(&t2,NULL);
00441                 //std::cout<<(t2.tv_sec - t1.tv_sec) * 1000.0<<std::endl;
00442                 if (result_state == ipa_Utils::RET_FAILED)
00443                 {
00444                         ROS_ERROR("FaceRecognizerNode::face_positions_callback: Please load a face recognition model at first.");
00445                         identification_failed = true;
00446                 }
00447         }
00448         if (enable_face_recognition_ == false || identification_failed == true)
00449         {
00450                 // label all image unknown if face recognition disabled
00451                 identification_labels.resize(face_positions->head_detections.size());
00452                 for (uint i = 0; i < identification_labels.size(); i++)
00453                 {
00454                         identification_labels[i].resize(face_positions->head_detections[i].face_detections.size());
00455                         for (uint j = 0; j < identification_labels[i].size(); j++)
00456                                 identification_labels[i][j] = "Unknown";
00457                 }
00458         }
00459 
00460         // --- publish detection message ---
00461         cob_perception_msgs::DetectionArray detection_msg;
00462         detection_msg.header = face_positions->header;
00463 
00464         // prepare message
00465         for (int head = 0; head < (int)head_bounding_boxes.size(); head++)
00466         {
00467                 if (face_bounding_boxes[head].size() == 0)
00468                 {
00469                         // no faces detected in head region -> publish head position
00470                         cob_perception_msgs::Detection det;
00471                         cv::Rect& head_bb = head_bounding_boxes[head];
00472                         // set 3d position of head's center
00473                         bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], 0.5 * (float)head_bb.width, 0.5 * (float)head_bb.height, det.pose.pose.position, 6);
00474                         if (valid_3d_position == false)
00475                                 continue;
00476                         det.pose.pose.orientation.x = 0.;
00477                         det.pose.pose.orientation.y = 0.;
00478                         det.pose.pose.orientation.z = 0.;
00479                         det.pose.pose.orientation.w = 1.;
00480                         // write bounding box
00481                         det.mask.roi.x = head_bb.x;
00482                         det.mask.roi.y = head_bb.y;
00483                         det.mask.roi.width = head_bb.width;
00484                         det.mask.roi.height = head_bb.height;
00485                         // set label
00486                         det.label = "UnknownHead";
00487                         // set origin of detection
00488                         det.detector = "head";
00489                         // header
00490                         det.header = face_positions->header;
00491                         // add to message
00492                         detection_msg.detections.push_back(det);
00493                 }
00494                 else
00495                 {
00496                         // process all faces in head region
00497                         for (int face = 0; face < (int)face_bounding_boxes[head].size(); face++)
00498                         {
00499                                 cob_perception_msgs::Detection det;
00500                                 cv::Rect& head_bb = head_bounding_boxes[head];
00501                                 cv::Rect& face_bb = face_bounding_boxes[head][face];
00502                                 // set 3d position of head's center
00503                                 bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], face_bb.x + 0.5 * (float)face_bb.width, face_bb.y + 0.5 * (float)face_bb.height,
00504                                                 det.pose.pose.position, 6);
00505                                 if (valid_3d_position == false)
00506                                         continue;
00507                                 det.pose.pose.orientation.x = 0.;
00508                                 det.pose.pose.orientation.y = 0.;
00509                                 det.pose.pose.orientation.z = 0.;
00510                                 det.pose.pose.orientation.w = 1.;
00511                                 // write bounding box
00512                                 det.mask.roi.x = head_bb.x + face_bb.x;
00513                                 det.mask.roi.y = head_bb.y + face_bb.y;
00514                                 det.mask.roi.width = face_bb.width;
00515                                 det.mask.roi.height = face_bb.height;
00516                                 // set label
00517                                 det.label = identification_labels[head][face];
00518                                 // set origin of detection
00519                                 det.detector = "face";
00520                                 // header
00521                                 det.header = face_positions->header;
00522                                 // add to message
00523                                 detection_msg.detections.push_back(det);
00524                         }
00525                 }
00526         }
00527 
00528         // publish message
00529         face_recognition_publisher_.publish(detection_msg);
00530 
00531         if (display_timing_ == true)
00532                 ROS_INFO("%d FaceRecognition: Time stamp of pointcloud message: %f. Delay: %f.", face_positions->header.seq, face_positions->header.stamp.toSec(),
00533                                 ros::Time::now().toSec() - face_positions->header.stamp.toSec());
00534         //      ROS_INFO("Face recognition took %f ms", tim.getElapsedTimeInMilliSec());
00535 }
00536 
00537 bool FaceRecognizerNode::determine3DFaceCoordinates(cv::Mat& depth_image, int center2Dx, int center2Dy, geometry_msgs::Point& center3D, int search_radius)
00538 {
00539         // 3D world coordinates (and verify that the read pixel contained valid coordinates, otherwise search for valid pixel in neighborhood)
00540         cv::Point3f p;
00541         bool valid_coordinates = false;
00542         for (int d = 0; (d < search_radius && !valid_coordinates); d++)
00543         {
00544                 for (int v = -d; (v <= d && !valid_coordinates); v++)
00545                 {
00546                         for (int u = -d; (u <= d && !valid_coordinates); u++)
00547                         {
00548                                 if ((abs(v) != d && abs(u) != d) || center2Dx + u < 0 || center2Dx + u >= depth_image.cols || center2Dy + v < 0 || center2Dy + v >= depth_image.rows)
00549                                         continue;
00550 
00551                                 p = depth_image.at<cv::Point3f>(center2Dy + v, center2Dx + u);
00552                                 if (!isnan(p.x) && !isnan(p.y) && p.z != 0.f)
00553                                 {
00554                                         valid_coordinates = true;
00555                                         center3D.x = p.x;
00556                                         center3D.y = p.y;
00557                                         center3D.z = p.z;
00558                                 }
00559                         }
00560                 }
00561         }
00562 
00563         return valid_coordinates;
00564 }
00565 
00566 void FaceRecognizerNode::loadModelServerCallback(const cob_people_detection::loadModelGoalConstPtr& goal)
00567 {
00568         // read list of labels of persons that shall be recognized from goal message
00569         std::vector < std::string > identification_labels_to_recognize(goal->labels.size());
00570         for (int i = 0; i < (int)goal->labels.size(); i++)
00571                 identification_labels_to_recognize[i] = goal->labels[i];
00572 
00573         // load the corresponding recognition model
00574         bool result_state = face_recognizer_.loadRecognitionModel(identification_labels_to_recognize);
00575 
00576         cob_people_detection::loadModelResult result;
00577         if (result_state == ipa_Utils::RET_OK)
00578                 load_model_server_->setSucceeded(result, "Model successfully loaded.");
00579         else
00580                 load_model_server_->setAborted(result, "Loading new model failed.");
00581 }
00582 
00583 //#######################
00584 //#### main programm ####
00585 int main(int argc, char** argv)
00586 {
00587         // Initialize ROS, specify name of node
00588         ros::init(argc, argv, "face_recognizer");
00589 
00590         // Create a handle for this node, initialize node
00591         ros::NodeHandle nh;
00592 
00593         // Create FaceRecognizerNode class instance
00594         FaceRecognizerNode face_recognizer_node(nh);
00595 
00596         // Create action nodes
00597         //DetectObjectsAction detect_action_node(object_detection_node, nh);
00598         //AcquireObjectImageAction acquire_image_node(object_detection_node, nh);
00599         //TrainObjectAction train_object_node(object_detection_node, nh);
00600 
00601         ros::spin();
00602 
00603         return 0;
00604 }


cob_people_detection
Author(s): Richard Bormann , Thomas Zwölfer
autogenerated on Fri Aug 28 2015 10:24:13