face_recognition_nodelet.cpp
Go to the documentation of this file.
1 /*********************************************************************
2 * Software License Agreement (BSD License)
3 *
4 * Copyright (c) 2017, Yuki Furuta.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 * * Neither the name of the Kei Okada nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *********************************************************************/
34 
35 #include <algorithm>
36 #include <boost/filesystem.hpp>
37 #include <boost/shared_ptr.hpp>
38 #include <boost/thread.hpp>
39 #include <boost/version.hpp>
40 #include <iomanip>
41 #include <map>
42 #include <stdexcept>
43 #include <string>
44 #include <opencv2/opencv.hpp>
45 #include <pwd.h>
46 #include <vector>
47 
48 #include <cv_bridge/cv_bridge.h>
49 #include <dynamic_reconfigure/server.h>
57 
58 #include <opencv_apps/nodelet.h>
59 #include <opencv_apps/FaceArrayStamped.h>
60 #include <opencv_apps/FaceRecognitionTrain.h>
61 #include <opencv_apps/FaceRecognitionConfig.h>
62 
64 #if BOOST_VERSION < 105000
65 namespace fs = boost::filesystem3; // for hydro
66 #else
67 namespace fs = boost::filesystem;
68 #endif
69 
70 #if CV_MAJOR_VERSION >= 3
71 #include <opencv2/face.hpp>
72 namespace face = cv::face;
73 #else
74 namespace face = cv;
75 #endif
76 
77 #if CV_MAJOR_VERSION >= 4
78 #include <opencv2/imgcodecs/legacy/constants_c.h> // include CV_LOAD_IMAGE_COLOR
79 #include <opencv2/imgproc/imgproc_c.h> // include CV_AA
80 #endif
81 
82 // utility for resolving path
83 namespace boost
84 {
85 #if BOOST_VERSION < 105000
86 namespace filesystem3
87 { // for hydro
88 #else
89 namespace filesystem
90 {
91 #endif
92 template <>
93 path& path::append<typename path::iterator>(typename path::iterator lhs, typename path::iterator rhs,
94  const codecvt_type& cvt)
95 {
96  for (; lhs != rhs; ++lhs)
97  *this /= *lhs;
98  return *this;
99 }
100 path user_expanded_path(const path& p)
101 {
102  path::const_iterator it(p.begin());
103  std::string user_dir = (*it).string();
104  if (user_dir.length() == 0 || user_dir[0] != '~')
105  return p;
106  path ret;
107  char* homedir;
108  if (user_dir.length() == 1)
109  {
110  homedir = getenv("HOME");
111  if (homedir == nullptr)
112  {
113  homedir = getpwuid(getuid())->pw_dir;
114  }
115  }
116  else
117  {
118  std::string uname = user_dir.substr(1, user_dir.length());
119  passwd* pw = getpwnam(uname.c_str());
120  if (pw == nullptr)
121  return p;
122  homedir = pw->pw_dir;
123  }
124  ret = path(std::string(homedir));
125  return ret.append(++it, p.end(), path::codecvt());
126 }
127 } // namespace filesystem
128 } // namespace boost
129 
130 namespace opencv_apps
131 {
133 {
134  std::map<std::string, int> m_;
135 
136 public:
137  void add(std::vector<std::string>& l)
138  {
139  int id = 0;
140  for (std::map<std::string, int>::const_iterator it = m_.begin(); it != m_.end(); ++it)
141  {
142  if (id < it->second)
143  id = it->second + 1;
144  }
145  for (const std::string& i : l)
146  {
147  if (m_.find(i) == m_.end())
148  {
149  m_[i] = id;
150  id++;
151  }
152  }
153  }
154  std::vector<int> get(std::vector<std::string>& v)
155  {
156  std::vector<int> ret(v.size());
157  for (size_t i = 0; i < v.size(); ++i)
158  {
159  ret[i] = m_[v[i]];
160  }
161  return ret;
162  }
163  std::string lookup(int id)
164  {
165  for (std::map<std::string, int>::const_iterator it = m_.begin(); it != m_.end(); ++it)
166  {
167  if (it->second == id)
168  return it->first;
169  }
170  return "nan";
171  }
172  const std::map<std::string, int>& getMap() const
173  {
174  return m_;
175  }
176 
177  void debugPrint()
178  {
179  ROS_WARN_STREAM("label mapper: debug print:");
180  for (std::map<std::string, int>::const_iterator it = m_.begin(); it != m_.end(); ++it)
181  {
182  ROS_WARN_STREAM("\t" << it->first << ": " << it->second);
183  }
184  ROS_WARN_STREAM("label mapper: debug print end");
185  }
186 };
187 
188 class Storage
189 {
190  fs::path base_dir_;
191 
192 public:
193  Storage(const fs::path& base_dir)
194  {
195  base_dir_ = fs::user_expanded_path(base_dir);
196  if (!fs::exists(base_dir_))
197  {
198  init();
199  }
200  if (!fs::is_directory(base_dir_))
201  {
202  std::stringstream ss;
203  ss << base_dir_ << " is not a directory";
204  throw std::runtime_error(ss.str());
205  }
206  };
207  void init()
208  {
209  if (!fs::create_directories(base_dir_))
210  {
211  std::stringstream ss;
212  ss << "failed to initialize directory: " << base_dir_;
213  throw std::runtime_error(ss.str());
214  }
215  }
216 
217  void load(std::vector<cv::Mat>& images, std::vector<std::string>& labels, bool append = true)
218  {
219  if (!append)
220  {
221  images.clear();
222  labels.clear();
223  }
224  fs::directory_iterator end;
225  for (fs::directory_iterator it(base_dir_); it != end; ++it)
226  {
227  if (fs::is_directory(*it))
228  {
229  std::string label = it->path().stem().string();
230  for (fs::directory_iterator cit(it->path()); cit != end; ++cit)
231  {
232  if (fs::is_directory(*cit))
233  continue;
234  fs::path file_path = cit->path();
235  try
236  {
237  cv::Mat img = cv::imread(file_path.string(), CV_LOAD_IMAGE_COLOR);
238  labels.push_back(label);
239  images.push_back(img);
240  }
241  catch (cv::Exception& e)
242  {
243  ROS_ERROR_STREAM("Error: load image from " << file_path << ": " << e.what());
244  }
245  }
246  }
247  }
248  }
249 
250  void save(const std::vector<cv::Mat>& images, const std::vector<std::string>& labels)
251  {
252  if (images.size() != labels.size())
253  {
254  throw std::length_error("images.size() != labels.size()");
255  }
256  for (size_t i = 0; i < images.size(); ++i)
257  {
258  save(images[i], labels[i]);
259  }
260  }
261 
262  void save(const cv::Mat& image, const std::string& label)
263  {
264  fs::path img_dir = base_dir_ / fs::path(label);
265  if (!fs::exists(img_dir) && !fs::create_directories(img_dir))
266  {
267  std::stringstream ss;
268  ss << "failed to initialize directory: " << img_dir;
269  throw std::runtime_error(ss.str());
270  }
271  fs::directory_iterator end;
272  int file_count = 0;
273  for (fs::directory_iterator it(img_dir); it != end; ++it)
274  {
275  if (!fs::is_directory(*it))
276  file_count++;
277  }
278 
279  std::stringstream ss;
280  // data_dir/person_label/person_label_123456.jpg
281  ss << label << "_" << std::setw(6) << std::setfill('0') << file_count << ".jpg";
282  fs::path file_path = img_dir / ss.str();
283  ROS_INFO_STREAM("saving image to :" << file_path);
284  try
285  {
286  cv::imwrite(file_path.string(), image);
287  }
288  catch (cv::Exception& e)
289  {
290  ROS_ERROR_STREAM("Error: save image to " << file_path << ": " << e.what());
291  }
292  }
293 };
294 
296 {
297  typedef opencv_apps::FaceRecognitionConfig Config;
298  typedef dynamic_reconfigure::Server<Config> Server;
302 
303  Config config_;
313 
319  std::string data_dir_;
320  boost::mutex mutex_;
321 
325  cv::Ptr<face::FaceRecognizer> model_;
326 
327  void drawFace(cv::Mat& img, const opencv_apps::Face& face)
328  {
329  cv::Rect r(int(face.face.x - face.face.width / 2.0 - face.face.width * face_padding_ / 2.0),
330  int(face.face.y - face.face.height / 2.0 - face.face.height * face_padding_ / 2.0),
331  int(face.face.width + face.face.width * face_padding_),
332  int(face.face.height + face.face.height * face_padding_));
333  cv::Scalar color(0.0, 0.0, 255.0);
334  int boldness = 2;
335  cv::rectangle(img, r.tl(), r.br(), color, boldness, CV_AA);
336 
337  double font_scale = 1.5;
338  int text_height = 20;
339  cv::Point text_bl;
340  if (r.br().y + text_height > img.rows)
341  text_bl = r.tl() + cv::Point(0, -text_height);
342  else
343  text_bl = r.br() + cv::Point(-r.width, text_height);
344  std::stringstream ss;
345  ss << face.label << " (" << std::fixed << std::setprecision(2) << face.confidence << ")";
346  cv::putText(img, ss.str(), text_bl, cv::FONT_HERSHEY_PLAIN, font_scale, color, boldness, CV_AA);
347  }
348 
349  void extractImage(const cv::Mat& img, const opencv_apps::Rect& rect, cv::Mat& ret, double padding = 0.0)
350  {
351  int x = std::max(0, int(rect.x - rect.width / 2.0 - rect.width * padding / 2.0));
352  int y = std::max(0, int(rect.y - rect.height / 2.0 - rect.height * padding / 2.0));
353  cv::Rect r(x, y, std::min(img.cols - x, int(rect.width + rect.width * padding)),
354  std::min(img.rows - y, int(rect.height + rect.height * padding)));
355  ret = img(r);
356  }
357 
358  void extractImage(const cv::Mat& img, const opencv_apps::Face& face, cv::Mat& ret, double padding = 0.0)
359  {
360  extractImage(img, face.face, ret, padding);
361  }
362 
363  void retrain()
364  {
365  NODELET_DEBUG("retrain");
366  std::vector<cv::Mat> images;
367  std::vector<std::string> labels;
368  train(images, labels);
369  }
370 
371  void train(std::vector<cv::Mat>& images, std::vector<std::string>& labels)
372  {
373  size_t new_image_size = images.size();
374 
375  if (use_saved_data_)
376  {
377  storage_->load(images, labels);
378  }
379 
380  if (images.empty())
381  return;
382 
383  std::vector<cv::Mat> resized_images(images.size());
384  for (int i = 0; i < images.size(); ++i)
385  {
386  cv::resize(images[i], resized_images[i], face_model_size_, 0, 0, cv::INTER_CUBIC);
387  cv::cvtColor(resized_images[i], resized_images[i], CV_BGR2GRAY);
388  }
389 
390  label_mapper_->add(labels);
391  std::vector<int> ids = label_mapper_->get(labels);
392  NODELET_INFO_STREAM("training with " << images.size() << " images");
393  // label_mapper_->debugPrint();
394  model_->train(resized_images, ids);
395 
396  if (save_train_data_ && new_image_size > 0)
397  {
398  std::vector<cv::Mat> new_images(images.begin(), images.begin() + new_image_size);
399  std::vector<std::string> new_labels(labels.begin(), labels.begin() + new_image_size);
400  storage_->save(new_images, new_labels);
401  }
402  }
403 
404  void predict(const cv::Mat& img, int& label, double& confidence)
405  {
406  cv::Mat resized_img;
407  cv::resize(img, resized_img, face_model_size_, 0, 0, cv::INTER_CUBIC);
408  cv::cvtColor(resized_img, resized_img, CV_BGR2GRAY);
409  model_->predict(resized_img, label, confidence);
410  }
411 
412  void faceImageCallback(const sensor_msgs::Image::ConstPtr& image,
413  const opencv_apps::FaceArrayStamped::ConstPtr& faces)
414  {
415  NODELET_DEBUG("faceImageCallback");
416  boost::mutex::scoped_lock lock(mutex_);
417 
418  // check if the face data is being trained
419  if (label_mapper_->getMap().empty())
420  {
421  NODELET_WARN_THROTTLE(1.0, "Face data is not trained. Please train first.");
422  return;
423  }
424 
425  // check if need to draw and publish debug image
426  bool publish_debug_image = debug_img_pub_.getNumSubscribers() > 0;
427 
428  try
429  {
430  cv::Mat cv_img = cv_bridge::toCvShare(image, enc::BGR8)->image;
431  opencv_apps::FaceArrayStamped ret_msg = *faces;
432  for (size_t i = 0; i < faces->faces.size(); ++i)
433  {
434  cv::Mat face_img, resized_image;
435  extractImage(cv_img, faces->faces[i], face_img, face_padding_);
436 
437  int label_id = -1;
438  double confidence = 0.0;
439  predict(face_img, label_id, confidence);
440  if (label_id == -1)
441  continue;
442  ret_msg.faces[i].label = label_mapper_->lookup(label_id);
443  ret_msg.faces[i].confidence = confidence;
444 
445  // draw debug image
446  if (publish_debug_image)
447  drawFace(cv_img, ret_msg.faces[i]);
448  }
449  face_pub_.publish(ret_msg);
450  if (publish_debug_image)
451  {
452  sensor_msgs::Image::Ptr debug_img = cv_bridge::CvImage(image->header, enc::BGR8, cv_img).toImageMsg();
453  debug_img_pub_.publish(debug_img);
454  NODELET_DEBUG("Published debug image");
455  }
456  }
457  catch (cv::Exception& e)
458  {
459  NODELET_ERROR_STREAM("error at image processing: " << e.err << " " << e.func << " " << e.file << " " << e.line);
460  }
461  }
462 
463  bool trainCallback(opencv_apps::FaceRecognitionTrain::Request& req, opencv_apps::FaceRecognitionTrain::Response& res)
464  {
465  boost::mutex::scoped_lock lock(mutex_);
466  try
467  {
468  std::vector<cv::Mat> images(req.images.size());
469  bool use_roi = !req.rects.empty();
470 
471  if (use_roi && req.images.size() != req.rects.size())
472  {
473  throw std::length_error("req.images.size() != req.rects.size()");
474  }
475 
476  for (size_t i = 0; i < req.images.size(); ++i)
477  {
478  sensor_msgs::Image img = req.images[i];
479  images[i] = cv_bridge::toCvCopy(img, enc::BGR8)->image;
480  if (use_roi)
481  {
482  cv::Mat face_img;
483  extractImage(images[i], req.rects[i], face_img);
484  images[i] = face_img;
485  }
486  }
487  std::vector<std::string> labels(req.labels.begin(), req.labels.end());
488  train(images, labels);
489  res.ok = true;
490  return true;
491  }
492  catch (cv::Exception& e)
493  {
494  std::stringstream ss;
495  ss << "error at training: " << e.err << " " << e.func << " " << e.file << " " << e.line;
496  res.ok = false;
497  res.error = ss.str();
498  }
499  return false;
500  }
501 
502  void configCallback(Config& config, uint32_t level)
503  {
504  boost::mutex::scoped_lock lock(mutex_);
505 
506  bool need_recreate_model = false;
507  bool need_retrain = false;
508 
509  use_saved_data_ = config.use_saved_data;
510  save_train_data_ = config.save_train_data;
511  face_padding_ = config.face_padding;
512 
513  if (face_model_size_.width != config.face_model_width)
514  {
515  face_model_size_.width = config.face_model_width;
516  need_retrain = true;
517  }
518  if (face_model_size_.height != config.face_model_height)
519  {
520  face_model_size_.height = config.face_model_height;
521  need_retrain = true;
522  }
523 
524  if (data_dir_ != config.data_dir)
525  {
526  data_dir_ = config.data_dir;
527  need_retrain = true;
528  label_mapper_.reset(new LabelMapper());
529  storage_.reset(new Storage(fs::path(data_dir_)));
530  }
531 
532  if (config_.model_method != config.model_method)
533  {
534  need_recreate_model = true;
535  }
536 
537  if (config_.model_num_components != config.model_num_components)
538  {
539  need_recreate_model = true;
540  }
541 
542  if (config.model_method == "LBPH" &&
543  (config_.lbph_radius != config.lbph_radius || config_.lbph_neighbors != config.lbph_neighbors ||
544  config_.lbph_grid_x != config.lbph_grid_x || config_.lbph_grid_y != config.lbph_grid_y))
545  {
546  need_recreate_model = true;
547  }
548 
549  if (need_recreate_model)
550  {
551  try
552  {
553  if (config.model_method == "eigen")
554  {
555 // https://docs.opencv.org/3.3.1/da/d60/tutorial_face_main.html
556 #if (CV_MAJOR_VERSION >= 4) || (CV_MAJOR_VERSION >= 3 && CV_MINOR_VERSION >= 3)
557  model_ = face::EigenFaceRecognizer::create(config.model_num_components, config.model_threshold);
558 #else
559  model_ = face::createEigenFaceRecognizer(config.model_num_components, config.model_threshold);
560 #endif
561  }
562  else if (config.model_method == "fisher")
563  {
564 #if (CV_MAJOR_VERSION >= 4) || (CV_MAJOR_VERSION >= 3 && CV_MINOR_VERSION >= 3)
565  model_ = face::FisherFaceRecognizer::create(config.model_num_components, config.model_threshold);
566 #else
567  model_ = face::createFisherFaceRecognizer(config.model_num_components, config.model_threshold);
568 #endif
569  }
570  else if (config.model_method == "LBPH")
571  {
572 #if (CV_MAJOR_VERSION >= 4) || (CV_MAJOR_VERSION >= 3 && CV_MINOR_VERSION >= 3)
573  model_ = face::LBPHFaceRecognizer::create(config.lbph_radius, config.lbph_neighbors, config.lbph_grid_x,
574  config.lbph_grid_y);
575 #else
576  model_ = face::createLBPHFaceRecognizer(config.lbph_radius, config.lbph_neighbors, config.lbph_grid_x,
577  config.lbph_grid_y);
578 #endif
579  }
580  need_retrain = true;
581  }
582  catch (cv::Exception& e)
583  {
584  NODELET_ERROR_STREAM("Error: create face recognizer: " << e.what());
585  }
586  }
587 
588  if (need_retrain)
589  {
590  try
591  {
592  retrain();
593  }
594  catch (cv::Exception& e)
595  {
596  NODELET_ERROR_STREAM("Error: train: " << e.what());
597  }
598  }
599 
600  if (config_.model_threshold != config.model_threshold)
601  {
602  try
603  {
604 #if CV_MAJOR_VERSION >= 3
605  if (face::BasicFaceRecognizer* model = dynamic_cast<face::BasicFaceRecognizer*>(model_.get()))
606  {
607  model->setThreshold(config.model_threshold);
608  }
609  else if (face::LBPHFaceRecognizer* model = dynamic_cast<face::LBPHFaceRecognizer*>(model_.get()))
610  {
611  model->setThreshold(config.model_threshold);
612  }
613 #else
614  model_->set("threshold", config.model_threshold);
615 #endif
616  }
617  catch (cv::Exception& e)
618  {
619  NODELET_ERROR_STREAM("Error: set threshold: " << e.what());
620  }
621  }
622  config_ = config;
623  }
624 
625  void subscribe() // NOLINT(modernize-use-override)
626  {
627  NODELET_DEBUG("subscribe");
628  img_sub_.subscribe(*it_, "image", 1);
629  face_sub_.subscribe(*nh_, "faces", 1);
630  if (use_async_)
631  {
632  async_ = boost::make_shared<message_filters::Synchronizer<ApproximateSyncPolicy> >(queue_size_);
633  async_->connectInput(img_sub_, face_sub_);
634  async_->registerCallback(boost::bind(&FaceRecognitionNodelet::faceImageCallback, this, _1, _2));
635  }
636  else
637  {
638  sync_ = boost::make_shared<message_filters::Synchronizer<SyncPolicy> >(queue_size_);
639  sync_->connectInput(img_sub_, face_sub_);
640  sync_->registerCallback(boost::bind(&FaceRecognitionNodelet::faceImageCallback, this, _1, _2));
641  }
642  }
643 
644  void unsubscribe() // NOLINT(modernize-use-override)
645  {
646  NODELET_DEBUG("unsubscribe");
647  img_sub_.unsubscribe();
648  face_sub_.unsubscribe();
649  }
650 
651 public:
652  virtual void onInit() // NOLINT(modernize-use-override)
653  {
654  Nodelet::onInit();
655 
656  // variables
657  face_model_size_ = cv::Size(190, 90);
658 
659  // dynamic reconfigures
660  cfg_srv_ = boost::make_shared<Server>(*pnh_);
661  Server::CallbackType f = boost::bind(&FaceRecognitionNodelet::configCallback, this, _1, _2);
662  cfg_srv_->setCallback(f);
663 
664  // parameters
665  pnh_->param("approximate_sync", use_async_, false);
666  pnh_->param("queue_size", queue_size_, 100);
667 
668  // advertise
669  debug_img_pub_ = advertise<sensor_msgs::Image>(*pnh_, "debug_image", 1);
670  face_pub_ = advertise<opencv_apps::FaceArrayStamped>(*pnh_, "output", 1);
671  train_srv_ = pnh_->advertiseService("train", &FaceRecognitionNodelet::trainCallback, this);
673 
674  onInitPostProcess();
675  }
676 };
677 } // namespace opencv_apps
678 
680 {
682 {
683 public:
684  virtual void onInit() // NOLINT(modernize-use-override)
685  {
686  ROS_WARN("DeprecationWarning: Nodelet face_recognition/face_recognition is deprecated, "
687  "and renamed to opencv_apps/face_recognition.");
689  }
690 };
691 } // namespace face_recognition
692 
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
#define NODELET_INFO_STREAM(...)
const std::map< std::string, int > & getMap() const
virtual void onInit()
Initialize nodehandles nh_ and pnh_. Subclass should call this method in its onInit method...
void configCallback(Config &config, uint32_t level)
opencv_apps::FaceRecognitionConfig Config
void publish(const boost::shared_ptr< M > &message) const
f
Nodelet to automatically subscribe/unsubscribe topics according to subscription of advertised topics...
Definition: nodelet.h:70
boost::shared_ptr< LabelMapper > label_mapper_
Demo code to calculate moments.
Definition: nodelet.h:48
void add(std::vector< std::string > &l)
void train(std::vector< cv::Mat > &images, std::vector< std::string > &labels)
bool trainCallback(opencv_apps::FaceRecognitionTrain::Request &req, opencv_apps::FaceRecognitionTrain::Response &res)
void drawFace(cv::Mat &img, const opencv_apps::Face &face)
#define ROS_WARN(...)
void load(std::vector< cv::Mat > &images, std::vector< std::string > &labels, bool append=true)
message_filters::sync_policies::ExactTime< sensor_msgs::Image, opencv_apps::FaceArrayStamped > SyncPolicy
void faceImageCallback(const sensor_msgs::Image::ConstPtr &image, const opencv_apps::FaceArrayStamped::ConstPtr &faces)
Storage(const fs::path &base_dir)
#define NODELET_ERROR_STREAM(...)
PLUGINLIB_EXPORT_CLASS(opencv_apps::FaceRecognitionNodelet, nodelet::Nodelet)
std::map< std::string, int > m_
cv::Ptr< face::FaceRecognizer > model_
void subscribe()
This method is called when publisher is subscribed by other nodes. Set up subscribers in this method...
void save(const cv::Mat &image, const std::string &label)
#define NODELET_WARN_THROTTLE(rate,...)
image_transport::SubscriberFilter img_sub_
path user_expanded_path(const path &p)
void unsubscribe()
This method is called when publisher is unsubscribed by other nodes. Shut down subscribers in this me...
message_filters::Subscriber< opencv_apps::FaceArrayStamped > face_sub_
boost::shared_ptr< message_filters::Synchronizer< SyncPolicy > > sync_
void extractImage(const cv::Mat &img, const opencv_apps::Face &face, cv::Mat &ret, double padding=0.0)
message_filters::sync_policies::ApproximateTime< sensor_msgs::Image, opencv_apps::FaceArrayStamped > ApproximateSyncPolicy
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
virtual void onInit()
Initialize nodehandles nh_ and pnh_. Subclass should call this method in its onInit method...
boost::shared_ptr< message_filters::Synchronizer< ApproximateSyncPolicy > > async_
#define ROS_WARN_STREAM(args)
void subscribe(ImageTransport &it, const std::string &base_topic, uint32_t queue_size, const TransportHints &transport_hints=TransportHints())
uint32_t getNumSubscribers() const
#define ROS_INFO_STREAM(args)
void subscribe(ros::NodeHandle &nh, const std::string &topic, uint32_t queue_size, const ros::TransportHints &transport_hints=ros::TransportHints(), ros::CallbackQueueInterface *callback_queue=0)
ROSCPP_DECL std::string append(const std::string &left, const std::string &right)
#define ROS_ERROR_STREAM(args)
boost::shared_ptr< image_transport::ImageTransport > it_
dynamic_reconfigure::Server< Config > Server
void predict(const cv::Mat &img, int &label, double &confidence)
#define NODELET_DEBUG(...)
sensor_msgs::ImagePtr toImageMsg() const
void extractImage(const cv::Mat &img, const opencv_apps::Rect &rect, cv::Mat &ret, double padding=0.0)
void save(const std::vector< cv::Mat > &images, const std::vector< std::string > &labels)


opencv_apps
Author(s): Kei Okada
autogenerated on Sat Aug 22 2020 03:35:08