FindObject.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2011-2014, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
28 #include "find_object/FindObject.h"
29 #include "find_object/Settings.h"
31 #include "utilite/UConversion.h"
32 
33 #include "ObjSignature.h"
34 #include "utilite/UDirectory.h"
35 #include "Vocabulary.h"
36 
37 #include <QtCore/QThread>
38 #include <QtCore/QFileInfo>
39 #include <QtCore/QStringList>
40 #include <QtCore/QTime>
41 #include <QtCore/QDir>
42 #include <QGraphicsRectItem>
43 #include <stdio.h>
44 
45 namespace find_object {
46 
47 FindObject::FindObject(bool keepImagesInRAM, QObject * parent) :
48  QObject(parent),
49  vocabulary_(new Vocabulary()),
50  detector_(Settings::createKeypointDetector()),
51  extractor_(Settings::createDescriptorExtractor()),
52  sessionModified_(false),
53  keepImagesInRAM_(keepImagesInRAM)
54 {
55  qRegisterMetaType<find_object::DetectionInfo>("find_object::DetectionInfo");
56  UASSERT(detector_ != 0 && extractor_ != 0);
57 
58  if(Settings::getGeneral_debug())
59  {
62  }
63  else
64  {
67  }
68 }
69 
71  delete detector_;
72  delete extractor_;
73  delete vocabulary_;
74  objectsDescriptors_.clear();
75 }
76 
77 bool FindObject::loadSession(const QString & path)
78 {
79  if(QFile::exists(path) && !path.isEmpty() && QFileInfo(path).suffix().compare("bin") == 0)
80  {
81  QFile file(path);
82  file.open(QIODevice::ReadOnly);
83  QDataStream in(&file);
84 
85  ParametersMap parameters;
86 
87  // load parameters
88  in >> parameters;
89  for(QMap<QString, QVariant>::iterator iter=parameters.begin(); iter!=parameters.end(); ++iter)
90  {
91  Settings::setParameter(iter.key(), iter.value());
92  }
93 
94  // save vocabulary
95  vocabulary_->load(in);
96 
97  // load objects
98  while(!in.atEnd())
99  {
100  ObjSignature * obj = new ObjSignature();
101  obj->load(in, !keepImagesInRAM_);
102  if(obj->id() >= 0)
103  {
104  objects_.insert(obj->id(), obj);
105  }
106  else
107  {
108  UERROR("Failed to load and object!");
109  delete obj;
110  }
111  }
112  file.close();
113 
114  if(!Settings::getGeneral_invertedSearch())
115  {
116  // this will fill objectsDescriptors_ matrix
118  }
119  sessionModified_ = false;
120  return true;
121  }
122  else
123  {
124  UERROR("Invalid session file (should be *.bin): \"%s\"", path.toStdString().c_str());
125  }
126  return false;
127 }
128 
129 bool FindObject::saveSession(const QString & path)
130 {
131  if(!path.isEmpty() && QFileInfo(path).suffix().compare("bin") == 0)
132  {
133  QFile file(path);
134  file.open(QIODevice::WriteOnly);
135  QDataStream out(&file);
136 
137  // save parameters
138  out << Settings::getParameters();
139 
140  // save vocabulary
141  vocabulary_->save(out);
142 
143  // save objects
144  for(QMultiMap<int, ObjSignature*>::const_iterator iter=objects_.constBegin(); iter!=objects_.constEnd(); ++iter)
145  {
146  iter.value()->save(out);
147  }
148 
149  file.close();
150  sessionModified_ = false;
151  return true;
152  }
153  UERROR("Path \"%s\" not valid (should be *.bin)", path.toStdString().c_str());
154  return false;
155 }
156 
157 bool FindObject::saveVocabulary(const QString & filePath) const
158 {
159  return vocabulary_->save(filePath);
160 }
161 
162 bool FindObject::loadVocabulary(const QString & filePath)
163 {
164  if(!Settings::getGeneral_vocabularyFixed() || !Settings::getGeneral_invertedSearch())
165  {
166  UWARN("Doesn't make sense to load a vocabulary if \"General/vocabularyFixed\" and \"General/invertedSearch\" are not enabled! It will "
167  "be cleared at the time the objects are updated.");
168  }
169  if(vocabulary_->load(filePath))
170  {
171  if(objects_.size())
172  {
174  }
175  return true;
176  }
177  return false;
178 }
179 
180 int FindObject::loadObjects(const QString & dirPath, bool recursive)
181 {
182  QString formats = Settings::getGeneral_imageFormats().remove('*').remove('.');
183 
184  QStringList paths;
185  paths.append(dirPath);
186 
187  QList<int> idsLoaded;
188  while(paths.size())
189  {
190  QString currentDir = paths.front();
191  UDirectory dir(currentDir.toStdString(), formats.toStdString());
192  if(dir.isValid())
193  {
194  const std::list<std::string> & names = dir.getFileNames(); // sorted in natural order
195  for(std::list<std::string>::const_iterator iter=names.begin(); iter!=names.end(); ++iter)
196  {
197  const ObjSignature * s = this->addObject((currentDir.toStdString()+dir.separator()+*iter).c_str());
198  if(s)
199  {
200  idsLoaded.push_back(s->id());
201  }
202  }
203  }
204 
205  paths.pop_front();
206 
207  if(recursive)
208  {
209  QDir d(currentDir);
210  QStringList subDirs = d.entryList(QDir::AllDirs|QDir::NoDotAndDotDot, QDir::Name);
211  for(int i=subDirs.size()-1; i>=0; --i)
212  {
213  paths.prepend(currentDir+QDir::separator()+subDirs[i]);
214  }
215  }
216  }
217 
218  if(idsLoaded.size())
219  {
220  this->updateObjects(idsLoaded);
221  this->updateVocabulary(idsLoaded);
222  }
223 
224  return idsLoaded.size();
225 }
226 
227 const ObjSignature * FindObject::addObject(const QString & filePath)
228 {
229  if(!filePath.isNull())
230  {
231  cv::Mat img = cv::imread(filePath.toStdString().c_str(), cv::IMREAD_GRAYSCALE);
232  if(!img.empty())
233  {
234  int id = 0;
235  QFileInfo file(filePath);
236  QStringList list = file.fileName().split('.');
237  if(list.size())
238  {
239  bool ok = false;
240  id = list.front().toInt(&ok);
241  if(ok && id>0)
242  {
243  if(objects_.contains(id))
244  {
245  UWARN("Object %d already added, a new ID will be generated (new id=%d).", id, Settings::getGeneral_nextObjID());
246  id = 0;
247  }
248  }
249  else
250  {
251  id = 0;
252  }
253  }
254  else
255  {
256  UERROR("File name doesn't contain \".\" (\"%s\")", filePath.toStdString().c_str());
257  }
258 
259  const ObjSignature * s = this->addObject(img, id, filePath);
260  if(s)
261  {
262  UINFO("Added object %d (%s)", s->id(), filePath.toStdString().c_str());
263  return s;
264  }
265  }
266  else
267  {
268  UERROR("Could not read image \"%s\"", filePath.toStdString().c_str());
269  }
270  }
271  else
272  {
273  UERROR("File path is null!?");
274  }
275  return 0;
276 }
277 
278 const ObjSignature * FindObject::addObject(const cv::Mat & image, int id, const QString & filePath)
279 {
280  UASSERT(id >= 0);
281  ObjSignature * s = new ObjSignature(id, image, filePath);
282  if(!this->addObject(s))
283  {
284  delete s;
285  return 0;
286  }
287  return s;
288 }
289 
291 {
292  UASSERT(obj != 0 && obj->id() >= 0);
293  if(obj->id() && objects_.contains(obj->id()))
294  {
295  UERROR("object with id %d already added!", obj->id());
296  return false;
297  }
298  else if(obj->id() == 0)
299  {
300  obj->setId(Settings::getGeneral_nextObjID());
301  }
302 
303  Settings::setGeneral_nextObjID(obj->id()+1);
304 
305  objects_.insert(obj->id(), obj);
306 
307  return true;
308 }
309 
311 {
312  if(objects_.contains(id))
313  {
314  delete objects_.value(id);
315  objects_.remove(id);
316  clearVocabulary();
317  }
318 }
319 
321 {
322  qDeleteAll(objects_);
323  objects_.clear();
324  clearVocabulary();
325 }
326 
327 void FindObject::addObjectAndUpdate(const cv::Mat & image, int id, const QString & filePath)
328 {
329  const ObjSignature * s = this->addObject(image, id, filePath);
330  if(s)
331  {
332  QList<int> ids;
333  ids.push_back(s->id());
334  updateObjects(ids);
335  updateVocabulary(ids);
336  }
337 }
338 
340 {
341  if(objects_.contains(id))
342  {
343  delete objects_.value(id);
344  objects_.remove(id);
345  }
347 }
348 
350 {
351  delete detector_;
352  delete extractor_;
355  UASSERT(detector_ != 0 && extractor_ != 0);
356 }
357 
358 std::vector<cv::KeyPoint> limitKeypoints(const std::vector<cv::KeyPoint> & keypoints, int maxKeypoints)
359 {
360  std::vector<cv::KeyPoint> kptsKept;
361  if(maxKeypoints > 0 && (int)keypoints.size() > maxKeypoints)
362  {
363  // Sort words by response
364  std::multimap<float, int> reponseMap; // <response,id>
365  for(unsigned int i = 0; i <keypoints.size(); ++i)
366  {
367  //Keep track of the data, to be easier to manage the data in the next step
368  reponseMap.insert(std::pair<float, int>(fabs(keypoints[i].response), i));
369  }
370 
371  // Remove them
372  std::multimap<float, int>::reverse_iterator iter = reponseMap.rbegin();
373  kptsKept.resize(maxKeypoints);
374  for(unsigned int k=0; k < kptsKept.size() && iter!=reponseMap.rend(); ++k, ++iter)
375  {
376  kptsKept[k] = keypoints[iter->second];
377  }
378  }
379  else
380  {
381  kptsKept = keypoints;
382  }
383  return kptsKept;
384 }
385 
386 void limitKeypoints(std::vector<cv::KeyPoint> & keypoints, cv::Mat & descriptors, int maxKeypoints)
387 {
388  UASSERT((int)keypoints.size() == descriptors.rows);
389  std::vector<cv::KeyPoint> kptsKept;
390  cv::Mat descriptorsKept;
391  if(maxKeypoints > 0 && (int)keypoints.size() > maxKeypoints)
392  {
393  descriptorsKept = cv::Mat(0, descriptors.cols, descriptors.type());
394 
395  // Sort words by response
396  std::multimap<float, int> reponseMap; // <response,id>
397  for(unsigned int i = 0; i <keypoints.size(); ++i)
398  {
399  //Keep track of the data, to be easier to manage the data in the next step
400  reponseMap.insert(std::pair<float, int>(fabs(keypoints[i].response), i));
401  }
402 
403  // Remove them
404  std::multimap<float, int>::reverse_iterator iter = reponseMap.rbegin();
405  kptsKept.resize(maxKeypoints);
406  descriptorsKept.reserve(maxKeypoints);
407  for(unsigned int k=0; k < kptsKept.size() && iter!=reponseMap.rend(); ++k, ++iter)
408  {
409  kptsKept[k] = keypoints[iter->second];
410  descriptorsKept.push_back(descriptors.row(iter->second));
411  }
412  }
413  keypoints = kptsKept;
414  descriptors = descriptorsKept;
415  UASSERT_MSG((int)keypoints.size() == descriptors.rows, uFormat("%d vs %d", (int)keypoints.size(), descriptors.rows).c_str());
416 }
417 
419  Feature2D * detector,
420  Feature2D * extractor,
421  const cv::Mat & image,
422  const cv::Mat & mask,
423  std::vector<cv::KeyPoint> & keypoints,
424  cv::Mat & descriptors,
425  int & timeDetection,
426  int & timeExtraction)
427 {
428  QTime timeStep;
429  timeStep.start();
430  keypoints.clear();
431  descriptors = cv::Mat();
432 
433  int maxFeatures = Settings::getFeature2D_3MaxFeatures();
435  {
436  detector->detectAndCompute(image, keypoints, descriptors, mask);
437  UASSERT_MSG((int)keypoints.size() == descriptors.rows, uFormat("%d vs %d", (int)keypoints.size(), descriptors.rows).c_str());
438  if(maxFeatures > 0 && (int)keypoints.size() > maxFeatures)
439  {
440  limitKeypoints(keypoints, descriptors, maxFeatures);
441  }
442  timeDetection=timeStep.restart();
443  timeExtraction = 0;
444  }
445  else
446  {
447  detector->detect(image, keypoints, mask);
448  if(maxFeatures > 0 && (int)keypoints.size() > maxFeatures)
449  {
450  keypoints = limitKeypoints(keypoints, maxFeatures);
451  }
452  timeDetection=timeStep.restart();
453 
454  //Extract descriptors
455  try
456  {
457  extractor->compute(image, keypoints, descriptors);
458  UASSERT_MSG((int)keypoints.size() == descriptors.rows, uFormat("%d vs %d", (int)keypoints.size(), descriptors.rows).c_str());
459  }
460  catch(cv::Exception & e)
461  {
462  UERROR("Descriptor exception: %s. Maybe some keypoints are invalid "
463  "for the selected descriptor extractor.", e.what());
464  descriptors = cv::Mat();
465  keypoints.clear();
466  }
467  catch ( const std::exception& e )
468  {
469  // standard exceptions
470  UERROR("Descriptor exception: %s. Maybe some keypoints are invalid "
471  "for the selected descriptor extractor.", e.what());
472  descriptors = cv::Mat();
473  keypoints.clear();
474  }
475  timeExtraction+=timeStep.restart();
476  }
477 
478  if( Settings::getFeature2D_SIFT_rootSIFT() &&
479  Settings::currentDescriptorType() == "SIFT" &&
480  !descriptors.empty())
481  {
482  UINFO("Performing RootSIFT...");
483  // see http://www.pyimagesearch.com/2015/04/13/implementing-rootsift-in-python-and-opencv/
484  // apply the Hellinger kernel by first L1-normalizing and taking the
485  // square-root
486  for(int i=0; i<descriptors.rows; ++i)
487  {
488  descriptors.row(i) = descriptors.row(i) / cv::sum(descriptors.row(i))[0];
489  cv::sqrt(descriptors.row(i), descriptors.row(i));
490 
491  // By taking the L1 norm, followed by the square-root, we have
492  // already L2 normalized the feature vector and further normalization
493  // is not needed.
494  //descs /= (np.linalg.norm(descs, axis=1, ord=2) + eps);
495  }
496  }
497 }
498 
499 
500 // taken from ASIFT example https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py
501 // affine - is an affine transform matrix from skew_img to img
503  float tilt,
504  float phi,
505  const cv::Mat & image,
506  cv::Mat & skewImage,
507  cv::Mat & skewMask,
508  cv::Mat & Ai)
509 {
510  float h = image.rows;
511  float w = image.cols;
512  cv::Mat A = cv::Mat::zeros(2,3,CV_32FC1);
513  A.at<float>(0,0) = A.at<float>(1,1) = 1;
514  skewMask = cv::Mat::ones(h, w, CV_8U) * 255;
515  if(phi != 0.0)
516  {
517  phi = phi*CV_PI/180.0f; // deg2rad
518  float s = std::sin(phi);
519  float c = std::cos(phi);
520  cv::Mat A22 = (cv::Mat_<float>(2, 2) <<
521  c, -s,
522  s, c);
523  cv::Mat cornersIn = (cv::Mat_<float>(4, 2) <<
524  0,0,
525  w,0,
526  w,h,
527  0,h);
528  cv::Mat cornersOut = cornersIn * A22.t();
529  cv::Rect rect = cv::boundingRect(cornersOut.reshape(2,4));
530  A = (cv::Mat_<float>(2, 3) <<
531  c, -s, -rect.x,
532  s, c, -rect.y);
533  cv::warpAffine(image, skewImage, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
534  }
535  else
536  {
537  skewImage = image;
538  }
539  if(tilt != 1.0)
540  {
541  float s = 0.8*std::sqrt(tilt*tilt-1);
542  cv::Mat out, out2;
543  cv::GaussianBlur(skewImage, out, cv::Size(0, 0), s, 0.01);
544  cv::resize(out, out2, cv::Size(0, 0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
545  skewImage = out2;
546  A.row(0) /= tilt;
547  }
548  if(phi != 0.0 || tilt != 1.0)
549  {
550  cv::Mat mask = skewMask;
551  cv::warpAffine(mask, skewMask, A, skewImage.size(), cv::INTER_NEAREST);
552  }
553  cv::invertAffineTransform(A, Ai);
554 }
555 
556 class AffineExtractionThread : public QThread
557 {
558 public:
560  Feature2D * detector,
561  Feature2D * extractor,
562  const cv::Mat & image,
563  float tilt,
564  float phi) :
565  detector_(detector),
566  extractor_(extractor),
567  image_(image),
568  tilt_(tilt),
569  phi_(phi),
570  timeSkewAffine_(0),
571  timeDetection_(0),
572  timeExtraction_(0),
573  timeSubPix_(0)
574  {
575  UASSERT(detector && extractor);
576  }
577  const cv::Mat & image() const {return image_;}
578  const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
579  const cv::Mat & descriptors() const {return descriptors_;}
580 
581  int timeSkewAffine() const {return timeSkewAffine_;}
582  int timeDetection() const {return timeDetection_;}
583  int timeExtraction() const {return timeExtraction_;}
584  int timeSubPix() const {return timeSubPix_;}
585 
586 protected:
587  virtual void run()
588  {
589  QTime timeStep;
590  timeStep.start();
591  cv::Mat skewImage, skewMask, Ai;
592  FindObject::affineSkew(tilt_, phi_, image_, skewImage, skewMask, Ai);
593  timeSkewAffine_=timeStep.restart();
594 
595  //Detect features
597  detector_,
598  extractor_,
599  skewImage,
600  skewMask,
601  keypoints_,
602  descriptors_,
603  timeDetection_,
604  timeExtraction_);
605  timeStep.start();
606 
607  // Transform points to original image coordinates
608  for(unsigned int i=0; i<keypoints_.size(); ++i)
609  {
610  cv::Mat p = (cv::Mat_<float>(3, 1) << keypoints_[i].pt.x, keypoints_[i].pt.y, 1);
611  cv::Mat pa = Ai * p;
612  keypoints_[i].pt.x = pa.at<float>(0,0);
613  keypoints_[i].pt.y = pa.at<float>(1,0);
614  }
615 
616  if(keypoints_.size() && Settings::getFeature2D_6SubPix())
617  {
618  // Sub pixel should be done after descriptors extraction
619  std::vector<cv::Point2f> corners;
620  cv::KeyPoint::convert(keypoints_, corners);
621  cv::cornerSubPix(image_,
622  corners,
623  cv::Size(Settings::getFeature2D_7SubPixWinSize(), Settings::getFeature2D_7SubPixWinSize()),
624  cv::Size(-1,-1),
625  cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, Settings::getFeature2D_8SubPixIterations(), Settings::getFeature2D_9SubPixEps() ));
626  UASSERT(corners.size() == keypoints_.size());
627  for(unsigned int i=0; i<corners.size(); ++i)
628  {
629  keypoints_[i].pt = corners[i];
630  }
631  timeSubPix_ +=timeStep.restart();
632  }
633  }
634 private:
637  cv::Mat image_;
638  float tilt_;
639  float phi_;
640  std::vector<cv::KeyPoint> keypoints_;
641  cv::Mat descriptors_;
642 
647 };
648 
649 class ExtractFeaturesThread : public QThread
650 {
651 public:
653  Feature2D * detector,
654  Feature2D * extractor,
655  int objectId,
656  const cv::Mat & image) :
657  detector_(detector),
658  extractor_(extractor),
659  objectId_(objectId),
660  image_(image),
661  timeSkewAffine_(0),
662  timeDetection_(0),
663  timeExtraction_(0),
664  timeSubPix_(0)
665  {
666  UASSERT(detector && extractor);
667  UASSERT_MSG(!image.empty() && image.type() == CV_8UC1,
668  uFormat("Image of object %d is null or not type CV_8UC1!?!? (cols=%d, rows=%d, type=%d)",
669  objectId, image.cols, image.rows, image.type()).c_str());
670  }
671  int objectId() const {return objectId_;}
672  const cv::Mat & image() const {return image_;}
673  const std::vector<cv::KeyPoint> & keypoints() const {return keypoints_;}
674  const cv::Mat & descriptors() const {return descriptors_;}
675 
676  int timeSkewAffine() const {return timeSkewAffine_;}
677  int timeDetection() const {return timeDetection_;}
678  int timeExtraction() const {return timeExtraction_;}
679  int timeSubPix() const {return timeSubPix_;}
680 
681 protected:
682  virtual void run()
683  {
684  QTime time;
685  time.start();
686  UINFO("Extracting descriptors from object %d...", objectId_);
687 
688  QTime timeStep;
689  timeStep.start();
690 
691  if(!Settings::getFeature2D_4Affine())
692  {
694  detector_,
695  extractor_,
696  image_,
697  cv::Mat(),
698  keypoints_,
699  descriptors_,
700  timeDetection_,
701  timeExtraction_);
702  timeStep.start();
703 
704  if(keypoints_.size())
705  {
706  UDEBUG("Detected %d features from object %d...", (int)keypoints_.size(), objectId_);
707  if(Settings::getFeature2D_6SubPix())
708  {
709  // Sub pixel should be done after descriptors extraction
710  std::vector<cv::Point2f> corners;
711  cv::KeyPoint::convert(keypoints_, corners);
712  cv::cornerSubPix(image_,
713  corners,
714  cv::Size(Settings::getFeature2D_7SubPixWinSize(), Settings::getFeature2D_7SubPixWinSize()),
715  cv::Size(-1,-1),
716  cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, Settings::getFeature2D_8SubPixIterations(), Settings::getFeature2D_9SubPixEps() ));
717  UASSERT(corners.size() == keypoints_.size());
718  for(unsigned int i=0; i<corners.size(); ++i)
719  {
720  keypoints_[i].pt = corners[i];
721  }
722  timeSubPix_ +=timeStep.restart();
723  }
724  }
725  else
726  {
727  UWARN("no features detected in object %d !?!", objectId_);
728  }
729  }
730  else
731  {
732  //ASIFT
733  std::vector<float> tilts;
734  std::vector<float> phis;
735  tilts.push_back(1.0f);
736  phis.push_back(0.0f);
737  int nTilt = Settings::getFeature2D_5AffineCount();
738  for(int t=1; t<nTilt; ++t)
739  {
740  float tilt = std::pow(2.0f, 0.5f*float(t));
741  float inc = 72.0f / float(tilt);
742  for(float phi=0.0f; phi<180.0f; phi+=inc)
743  {
744  tilts.push_back(tilt);
745  phis.push_back(phi);
746  }
747  }
748 
749  //multi-threaded
750  unsigned int threadCounts = Settings::getGeneral_threads();
751  if(threadCounts == 0)
752  {
753  threadCounts = (unsigned int)tilts.size();
754  }
755 
756  for(unsigned int i=0; i<tilts.size(); i+=threadCounts)
757  {
758  QVector<AffineExtractionThread*> threads;
759 
760  for(unsigned int k=i; k<i+threadCounts && k<tilts.size(); ++k)
761  {
762  threads.push_back(new AffineExtractionThread(detector_, extractor_, image_, tilts[k], phis[k]));
763  threads.back()->start();
764  }
765 
766  for(int k=0; k<threads.size(); ++k)
767  {
768  threads[k]->wait();
769 
770  keypoints_.insert(keypoints_.end(), threads[k]->keypoints().begin(), threads[k]->keypoints().end());
771  descriptors_.push_back(threads[k]->descriptors());
772 
773  timeSkewAffine_ += threads[k]->timeSkewAffine();
774  timeDetection_ += threads[k]->timeDetection();
775  timeExtraction_ += threads[k]->timeExtraction();
776  timeSubPix_ += threads[k]->timeSubPix();
777  }
778  }
779  }
780 
781  UINFO("%d descriptors extracted from object %d (in %d ms)", descriptors_.rows, objectId_, time.elapsed());
782  }
783 private:
787  cv::Mat image_;
788  std::vector<cv::KeyPoint> keypoints_;
789  cv::Mat descriptors_;
790 
795 };
796 
797 void FindObject::updateObjects(const QList<int> & ids)
798 {
799  QList<ObjSignature*> objectsList;
800  if(ids.size())
801  {
802  for(int i=0; i<ids.size(); ++i)
803  {
804  if(objects_.contains(ids[i]))
805  {
806  objectsList.push_back(objects_[ids[i]]);
807  }
808  else
809  {
810  UERROR("Not found object %d!", ids[i]);
811  }
812  }
813  }
814  else
815  {
816  objectsList = objects_.values();
817  }
818 
819  if(objectsList.size())
820  {
821  sessionModified_ = true;
822  int threadCounts = Settings::getGeneral_threads();
823  if(threadCounts == 0)
824  {
825  threadCounts = objectsList.size();
826  }
827 
828  QTime time;
829  time.start();
830 
831  if(objectsList.size())
832  {
833  UINFO("Features extraction from %d objects...", objectsList.size());
834  for(int i=0; i<objectsList.size(); i+=threadCounts)
835  {
836  QVector<ExtractFeaturesThread*> threads;
837  for(int k=i; k<i+threadCounts && k<objectsList.size(); ++k)
838  {
839  if(!objectsList.at(k)->image().empty())
840  {
841  threads.push_back(new ExtractFeaturesThread(detector_, extractor_, objectsList.at(k)->id(), objectsList.at(k)->image()));
842  threads.back()->start();
843  }
844  else
845  {
846  objects_.value(objectsList.at(k)->id())->setData(std::vector<cv::KeyPoint>(), cv::Mat());
847  if(keepImagesInRAM_)
848  {
849  UERROR("Empty image detected for object %d!? No features can be detected.", objectsList.at(k)->id());
850 
851  }
852  else
853  {
854  UWARN("Empty image detected for object %d! No features can be detected. Note that images are in not kept in RAM.", objectsList.at(k)->id());
855  }
856  }
857  }
858 
859  for(int j=0; j<threads.size(); ++j)
860  {
861  threads[j]->wait();
862 
863  int id = threads[j]->objectId();
864 
865  objects_.value(id)->setData(threads[j]->keypoints(), threads[j]->descriptors());
866 
867  if(!keepImagesInRAM_)
868  {
869  objects_.value(id)->removeImage();
870  }
871  }
872  }
873  UINFO("Features extraction from %d objects... done! (%d ms)", objectsList.size(), time.elapsed());
874  }
875  }
876  else
877  {
878  UINFO("No objects to update...");
879  }
880 }
881 
883 {
884  objectsDescriptors_.clear();
885  dataRange_.clear();
886  vocabulary_->clear();
887 }
888 
889 void FindObject::updateVocabulary(const QList<int> & ids)
890 {
891  int count = 0;
892  int dim = -1;
893  int type = -1;
894  QList<ObjSignature*> objectsList;
895  if(ids.size())
896  {
897  for(int i=0; i<ids.size(); ++i)
898  {
899  if(objects_.contains(ids[i]))
900  {
901  objectsList.push_back(objects_[ids[i]]);
902  }
903  else
904  {
905  UERROR("Not found object %d!", ids[i]);
906  }
907  }
908  if(vocabulary_->size())
909  {
910  dim = vocabulary_->dim();
911  type = vocabulary_->type();
912  }
913  }
914  else
915  {
916  clearVocabulary();
917  objectsList = objects_.values();
918  }
919 
920  // Get the total size and verify descriptors
921  for(int i=0; i<objectsList.size(); ++i)
922  {
923  if(!objectsList.at(i)->descriptors().empty())
924  {
925  if(dim >= 0 && objectsList.at(i)->descriptors().cols != dim)
926  {
927  UERROR("Descriptors of the objects are not all the same size! Objects "
928  "opened must have all the same size (and from the same descriptor extractor).");
929  return;
930  }
931  dim = objectsList.at(i)->descriptors().cols;
932  if(type >= 0 && objectsList.at(i)->descriptors().type() != type)
933  {
934  UERROR("Descriptors of the objects are not all the same type! Objects opened "
935  "must have been processed by the same descriptor extractor.");
936  return;
937  }
938  type = objectsList.at(i)->descriptors().type();
939  count += objectsList.at(i)->descriptors().rows;
940  }
941  }
942 
943  // Copy data
944  if(count)
945  {
946  UINFO("Updating global descriptors matrix: Objects=%d, total descriptors=%d, dim=%d, type=%d",
947  (int)objects_.size(), count, dim, type);
948  if(!Settings::getGeneral_invertedSearch())
949  {
950  if(Settings::getGeneral_threads() == 1)
951  {
952  // If only one thread, put all descriptors in the same cv::Mat
953  int row = 0;
954  bool vocabularyEmpty = objectsDescriptors_.size() == 0;
955  if(vocabularyEmpty)
956  {
957  UASSERT(objectsDescriptors_.size() == 0);
958  objectsDescriptors_.insert(0, cv::Mat(count, dim, type));
959  }
960  else
961  {
962  row = objectsDescriptors_.begin().value().rows;
963  }
964  for(int i=0; i<objectsList.size(); ++i)
965  {
966  objectsList[i]->setWords(QMultiMap<int,int>());
967  if(objectsList.at(i)->descriptors().rows)
968  {
969  if(vocabularyEmpty)
970  {
971  cv::Mat dest(objectsDescriptors_.begin().value(), cv::Range(row, row+objectsList.at(i)->descriptors().rows));
972  objectsList.at(i)->descriptors().copyTo(dest);
973  }
974  else
975  {
976  UASSERT_MSG(objectsDescriptors_.begin().value().cols == objectsList.at(i)->descriptors().cols,
977  uFormat("%d vs %d", objectsDescriptors_.begin().value().cols, objectsList.at(i)->descriptors().cols).c_str());
978  UASSERT(objectsDescriptors_.begin().value().type() == objectsList.at(i)->descriptors().type());
979  objectsDescriptors_.begin().value().push_back(objectsList.at(i)->descriptors());
980  }
981 
982  row += objectsList.at(i)->descriptors().rows;
983  // dataRange contains the upper_bound for each
984  // object (the last descriptors position in the
985  // global object descriptors matrix)
986  if(objectsList.at(i)->descriptors().rows)
987  {
988  dataRange_.insert(row-1, objectsList.at(i)->id());
989  }
990  }
991  }
992  }
993  else
994  {
995  for(int i=0; i<objectsList.size(); ++i)
996  {
997  objectsList[i]->setWords(QMultiMap<int,int>());
998  objectsDescriptors_.insert(objectsList.at(i)->id(), objectsList.at(i)->descriptors());
999  }
1000  }
1001  }
1002  else
1003  {
1004  // Inverted index on (vocabulary)
1005  sessionModified_ = true;
1006  QTime time;
1007  time.start();
1008  bool incremental = Settings::getGeneral_vocabularyIncremental() && !Settings::getGeneral_vocabularyFixed();
1009  if(incremental)
1010  {
1011  UINFO("Creating incremental vocabulary...");
1012  }
1013  else if(Settings::getGeneral_vocabularyFixed())
1014  {
1015  UINFO("Updating vocabulary correspondences only (vocabulary is fixed)...");
1016  }
1017  else
1018  {
1019  UINFO("Creating vocabulary...");
1020  }
1021  QTime localTime;
1022  localTime.start();
1023  int updateVocabularyMinWords = Settings::getGeneral_vocabularyUpdateMinWords();
1024  int addedWords = 0;
1025  for(int i=0; i<objectsList.size(); ++i)
1026  {
1027  UASSERT(objectsList[i]->descriptors().rows == (int)objectsList[i]->keypoints().size());
1028  QMultiMap<int, int> words = vocabulary_->addWords(objectsList[i]->descriptors(), objectsList.at(i)->id());
1029  objectsList[i]->setWords(words);
1030  addedWords += words.uniqueKeys().size();
1031  bool updated = false;
1032  if(incremental && addedWords && addedWords >= updateVocabularyMinWords)
1033  {
1034  vocabulary_->update();
1035  addedWords = 0;
1036  updated = true;
1037  }
1038  UINFO("Object %d, %d words from %d descriptors (%d words, %d ms) %s",
1039  objectsList[i]->id(),
1040  words.uniqueKeys().size(),
1041  objectsList[i]->descriptors().rows,
1042  vocabulary_->size(),
1043  localTime.restart(),
1044  updated?"updated":"");
1045  }
1046  if(addedWords && !Settings::getGeneral_vocabularyFixed())
1047  {
1048  vocabulary_->update();
1049  }
1050 
1051  if(incremental)
1052  {
1053  UINFO("Creating incremental vocabulary... done! size=%d (%d ms)", vocabulary_->size(), time.elapsed());
1054  }
1055  else if(Settings::getGeneral_vocabularyFixed())
1056  {
1057  UINFO("Updating vocabulary correspondences only (vocabulary is fixed)... done! size=%d (%d ms)", vocabulary_->size(), time.elapsed());
1058  }
1059  else
1060  {
1061  UINFO("Creating vocabulary... done! size=%d (%d ms)", vocabulary_->size(), time.elapsed());
1062  }
1063  }
1064  }
1065 }
1066 
1067 class SearchThread: public QThread
1068 {
1069 public:
1070  SearchThread(Vocabulary * vocabulary, int objectId, const cv::Mat * descriptors, const QMultiMap<int, int> * sceneWords) :
1071  vocabulary_(vocabulary),
1072  objectId_(objectId),
1073  descriptors_(descriptors),
1074  sceneWords_(sceneWords),
1075  minMatchedDistance_(-1.0f),
1076  maxMatchedDistance_(-1.0f)
1077  {
1078  UASSERT(descriptors);
1079  }
1080  virtual ~SearchThread() {}
1081 
1082  int getObjectId() const {return objectId_;}
1083  float getMinMatchedDistance() const {return minMatchedDistance_;}
1084  float getMaxMatchedDistance() const {return maxMatchedDistance_;}
1085  const QMultiMap<int, int> & getMatches() const {return matches_;}
1086 
1087 protected:
1088  virtual void run()
1089  {
1090  //QTime time;
1091  //time.start();
1092 
1093  cv::Mat results;
1094  cv::Mat dists;
1095 
1096  //match objects to scene
1097  int k = Settings::getNearestNeighbor_3nndrRatioUsed()?2:1;
1098  results = cv::Mat(descriptors_->rows, k, CV_32SC1); // results index
1099  dists = cv::Mat(descriptors_->rows, k, CV_32FC1); // Distance results are CV_32FC1
1100  vocabulary_->search(*descriptors_, results, dists, k);
1101 
1102  // PROCESS RESULTS
1103  // Get all matches for each object
1104  for(int i=0; i<dists.rows; ++i)
1105  {
1106  // Check if this descriptor matches with those of the objects
1107  bool matched = false;
1108 
1109  if(Settings::getNearestNeighbor_3nndrRatioUsed() &&
1110  dists.at<float>(i,0) <= Settings::getNearestNeighbor_4nndrRatio() * dists.at<float>(i,1))
1111  {
1112  matched = true;
1113  }
1114  if((matched || !Settings::getNearestNeighbor_3nndrRatioUsed()) &&
1115  Settings::getNearestNeighbor_5minDistanceUsed())
1116  {
1117  if(dists.at<float>(i,0) <= Settings::getNearestNeighbor_6minDistance())
1118  {
1119  matched = true;
1120  }
1121  else
1122  {
1123  matched = false;
1124  }
1125  }
1126  if(!matched && !Settings::getNearestNeighbor_3nndrRatioUsed() && !Settings::getNearestNeighbor_5minDistanceUsed())
1127  {
1128  matched = true; // no criterion, match to the nearest descriptor
1129  }
1130  if(minMatchedDistance_ == -1 || minMatchedDistance_ > dists.at<float>(i,0))
1131  {
1132  minMatchedDistance_ = dists.at<float>(i,0);
1133  }
1134  if(maxMatchedDistance_ == -1 || maxMatchedDistance_ < dists.at<float>(i,0))
1135  {
1136  maxMatchedDistance_ = dists.at<float>(i,0);
1137  }
1138 
1139  int wordId = results.at<int>(i,0);
1140  if(matched && sceneWords_->count(wordId) == 1)
1141  {
1142  matches_.insert(i, sceneWords_->value(wordId));
1143  matches_.insert(i, results.at<int>(i,0));
1144  }
1145  }
1146 
1147  //UINFO("Search Object %d time=%d ms", objectIndex_, time.elapsed());
1148  }
1149 private:
1150  Vocabulary * vocabulary_; // would be const but flann search() method is not const!?
1152  const cv::Mat * descriptors_;
1153  const QMultiMap<int, int> * sceneWords_; // <word id, keypoint indexes>
1154 
1157  QMultiMap<int, int> matches_;
1158 };
1159 
1160 class HomographyThread: public QThread
1161 {
1162 public:
1164  const QMultiMap<int, int> * matches, // <object, scene>
1165  int objectId,
1166  const std::vector<cv::KeyPoint> * kptsA,
1167  const std::vector<cv::KeyPoint> * kptsB,
1168  const cv::Mat & imageA, // image only required if opticalFlow is on
1169  const cv::Mat & imageB) : // image only required if opticalFlow is on
1170  matches_(matches),
1171  objectId_(objectId),
1172  kptsA_(kptsA),
1173  kptsB_(kptsB),
1174  imageA_(imageA),
1175  imageB_(imageB),
1176  code_(DetectionInfo::kRejectedUndef)
1177  {
1178  UASSERT(matches && kptsA && kptsB);
1179  }
1180  virtual ~HomographyThread() {}
1181 
1182  int getObjectId() const {return objectId_;}
1183  const std::vector<int> & getIndexesA() const {return indexesA_;}
1184  const std::vector<int> & getIndexesB() const {return indexesB_;}
1185  const std::vector<uchar> & getOutlierMask() const {return outlierMask_;}
1186  QMultiMap<int, int> getInliers() const {return inliers_;}
1187  QMultiMap<int, int> getOutliers() const {return outliers_;}
1188  const cv::Mat & getHomography() const {return h_;}
1190 
1191 protected:
1192  virtual void run()
1193  {
1194  //QTime time;
1195  //time.start();
1196 
1197  std::vector<cv::Point2f> mpts_1(matches_->size());
1198  std::vector<cv::Point2f> mpts_2(matches_->size());
1199  indexesA_.resize(matches_->size());
1200  indexesB_.resize(matches_->size());
1201 
1202  UDEBUG("Fill matches...");
1203  int j=0;
1204  for(QMultiMap<int, int>::const_iterator iter = matches_->begin(); iter!=matches_->end(); ++iter)
1205  {
1206  UASSERT_MSG(iter.key() < (int)kptsA_->size(), uFormat("key=%d size=%d", iter.key(),(int)kptsA_->size()).c_str());
1207  UASSERT_MSG(iter.value() < (int)kptsB_->size(), uFormat("key=%d size=%d", iter.value(),(int)kptsB_->size()).c_str());
1208  mpts_1[j] = kptsA_->at(iter.key()).pt;
1209  indexesA_[j] = iter.key();
1210  mpts_2[j] = kptsB_->at(iter.value()).pt;
1211  indexesB_[j] = iter.value();
1212  ++j;
1213  }
1214 
1215  if((int)mpts_1.size() >= Settings::getHomography_minimumInliers())
1216  {
1217  if(Settings::getHomography_opticalFlow())
1218  {
1219  UASSERT(!imageA_.empty() && !imageB_.empty());
1220 
1221  cv::Mat imageA = imageA_;
1222  cv::Mat imageB = imageB_;
1223  if(imageA_.cols < imageB_.cols && imageA_.rows < imageB_.rows)
1224  {
1225  // padding, optical flow wants images of the same size
1226  imageA = cv::Mat::zeros(imageB_.size(), imageA_.type());
1227  imageA_.copyTo(imageA(cv::Rect(0,0,imageA_.cols, imageA_.rows)));
1228  }
1229  if(imageA.size() == imageB.size())
1230  {
1231  UDEBUG("Optical flow...");
1232  //refine matches
1233  std::vector<unsigned char> status;
1234  std::vector<float> err;
1235  cv::calcOpticalFlowPyrLK(
1236  imageA,
1237  imageB_,
1238  mpts_1,
1239  mpts_2,
1240  status,
1241  err,
1242  cv::Size(Settings::getHomography_opticalFlowWinSize(), Settings::getHomography_opticalFlowWinSize()),
1243  Settings::getHomography_opticalFlowMaxLevel(),
1244  cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, Settings::getHomography_opticalFlowIterations(), Settings::getHomography_opticalFlowEps()),
1245  cv::OPTFLOW_LK_GET_MIN_EIGENVALS | cv::OPTFLOW_USE_INITIAL_FLOW, 1e-4);
1246  }
1247  else
1248  {
1249  UERROR("Object's image should be less/equal size of the scene image to use Optical Flow.");
1250  }
1251  }
1252 
1253  UDEBUG("Find homography... begin");
1254 #if CV_MAJOR_VERSION < 3
1255  h_ = findHomography(mpts_1,
1256  mpts_2,
1258  Settings::getHomography_ransacReprojThr(),
1259  outlierMask_);
1260 #else
1261  h_ = findHomography(mpts_1,
1262  mpts_2,
1264  Settings::getHomography_ransacReprojThr(),
1265  outlierMask_,
1266  Settings::getHomography_maxIterations(),
1267  Settings::getHomography_confidence());
1268 #endif
1269  UDEBUG("Find homography... end");
1270 
1271  UASSERT(outlierMask_.size() == 0 || outlierMask_.size() == mpts_1.size());
1272  for(unsigned int k=0; k<mpts_1.size();++k)
1273  {
1274  if(outlierMask_.size() && outlierMask_.at(k))
1275  {
1276  inliers_.insert(indexesA_[k], indexesB_[k]);
1277  }
1278  else
1279  {
1280  outliers_.insert(indexesA_[k], indexesB_[k]);
1281  }
1282  }
1283 
1284  if(inliers_.size() == (int)outlierMask_.size() && !h_.empty())
1285  {
1286  if(Settings::getHomography_ignoreWhenAllInliers() || cv::countNonZero(h_) < 1)
1287  {
1288  // ignore homography when all features are inliers
1289  h_ = cv::Mat();
1291  }
1292  }
1293  }
1294  else
1295  {
1297  }
1298 
1299  //UINFO("Homography Object %d time=%d ms", objectIndex_, time.elapsed());
1300  }
1301 private:
1302  const QMultiMap<int, int> * matches_;
1304  const std::vector<cv::KeyPoint> * kptsA_;
1305  const std::vector<cv::KeyPoint> * kptsB_;
1306  cv::Mat imageA_;
1307  cv::Mat imageB_;
1309 
1310  std::vector<int> indexesA_;
1311  std::vector<int> indexesB_;
1312  std::vector<uchar> outlierMask_;
1313  QMultiMap<int, int> inliers_;
1314  QMultiMap<int, int> outliers_;
1315  cv::Mat h_;
1316 };
1317 
1318 void FindObject::detect(const cv::Mat & image)
1319 {
1320  QTime time;
1321  time.start();
1322  DetectionInfo info;
1323  this->detect(image, info);
1324 
1325  if(info.objDetected_.size() > 1)
1326  {
1327  UINFO("(%s) %d objects detected! (%d ms)",
1328  QTime::currentTime().toString("HH:mm:ss.zzz").toStdString().c_str(),
1329  (int)info.objDetected_.size(),
1330  time.elapsed());
1331  }
1332  else if(info.objDetected_.size() == 1)
1333  {
1334  UINFO("(%s) Object %d detected! (%d ms)",
1335  QTime::currentTime().toString("HH:mm:ss.zzz").toStdString().c_str(),
1336  (int)info.objDetected_.begin().key(),
1337  time.elapsed());
1338  }
1339  else if(Settings::getGeneral_sendNoObjDetectedEvents())
1340  {
1341  UINFO("(%s) No objects detected. (%d ms)",
1342  QTime::currentTime().toString("HH:mm:ss.zzz").toStdString().c_str(),
1343  time.elapsed());
1344  }
1345 
1346  if(info.objDetected_.size() > 0 || Settings::getGeneral_sendNoObjDetectedEvents())
1347  {
1348  Q_EMIT objectsFound(info);
1349  }
1350 }
1351 
1352 bool FindObject::detect(const cv::Mat & image, find_object::DetectionInfo & info) const
1353 {
1354  QTime totalTime;
1355  totalTime.start();
1356 
1357  // reset statistics
1358  info = DetectionInfo();
1359 
1360  bool success = false;
1361  if(!image.empty())
1362  {
1363  //Convert to grayscale
1364  cv::Mat grayscaleImg;
1365  if(image.channels() != 1 || image.depth() != CV_8U)
1366  {
1367  cv::cvtColor(image, grayscaleImg, cv::COLOR_BGR2GRAY);
1368  }
1369  else
1370  {
1371  grayscaleImg = image;
1372  }
1373 
1374  // DETECT FEATURES AND EXTRACT DESCRIPTORS
1375  UDEBUG("DETECT FEATURES AND EXTRACT DESCRIPTORS FROM THE SCENE");
1376  ExtractFeaturesThread extractThread(detector_, extractor_, -1, grayscaleImg);
1377  extractThread.start();
1378  extractThread.wait();
1379  info.sceneKeypoints_ = extractThread.keypoints();
1380  info.sceneDescriptors_ = extractThread.descriptors();
1381  UASSERT_MSG((int)extractThread.keypoints().size() == extractThread.descriptors().rows, uFormat("%d vs %d", (int)extractThread.keypoints().size(), extractThread.descriptors().rows).c_str());
1382  info.timeStamps_.insert(DetectionInfo::kTimeKeypointDetection, extractThread.timeDetection());
1384  info.timeStamps_.insert(DetectionInfo::kTimeSubPixelRefining, extractThread.timeSubPix());
1385  info.timeStamps_.insert(DetectionInfo::kTimeSkewAffine, extractThread.timeSkewAffine());
1386 
1387  bool consistentNNData = (vocabulary_->size()!=0 && vocabulary_->wordToObjects().begin().value()!=-1 && Settings::getGeneral_invertedSearch()) ||
1388  ((vocabulary_->size()==0 || vocabulary_->wordToObjects().begin().value()==-1) && !Settings::getGeneral_invertedSearch());
1389 
1390  bool descriptorsValid = !Settings::getGeneral_invertedSearch() &&
1391  !objectsDescriptors_.empty() &&
1392  objectsDescriptors_.begin().value().cols == info.sceneDescriptors_.cols &&
1393  objectsDescriptors_.begin().value().type() == info.sceneDescriptors_.type();
1394 
1395  bool vocabularyValid = Settings::getGeneral_invertedSearch() &&
1396  vocabulary_->size() &&
1397  !vocabulary_->indexedDescriptors().empty() &&
1398  vocabulary_->indexedDescriptors().cols == info.sceneDescriptors_.cols &&
1399  (vocabulary_->indexedDescriptors().type() == info.sceneDescriptors_.type() ||
1400  (Settings::getNearestNeighbor_7ConvertBinToFloat() && vocabulary_->indexedDescriptors().type() == CV_32FC1));
1401 
1402  // COMPARE
1403  UDEBUG("COMPARE");
1404  if((descriptorsValid || vocabularyValid) &&
1405  info.sceneKeypoints_.size() &&
1406  consistentNNData)
1407  {
1408  success = true;
1409  QTime time;
1410  time.start();
1411 
1412  QMultiMap<int, int> words;
1413 
1414  if(!Settings::getGeneral_invertedSearch())
1415  {
1416  vocabulary_->clear();
1417  // CREATE INDEX for the scene
1418  UDEBUG("CREATE INDEX FOR THE SCENE");
1419  words = vocabulary_->addWords(info.sceneDescriptors_, -1);
1420  vocabulary_->update();
1421  info.timeStamps_.insert(DetectionInfo::kTimeIndexing, time.restart());
1422  info.sceneWords_ = words;
1423  }
1424 
1425  for(QMap<int, ObjSignature*>::const_iterator iter=objects_.begin(); iter!=objects_.end(); ++iter)
1426  {
1427  info.matches_.insert(iter.key(), QMultiMap<int, int>());
1428  }
1429 
1430  if(Settings::getGeneral_invertedSearch() || Settings::getGeneral_threads() == 1)
1431  {
1432  cv::Mat results;
1433  cv::Mat dists;
1434  // DO NEAREST NEIGHBOR
1435  UDEBUG("DO NEAREST NEIGHBOR");
1436  int k = Settings::getNearestNeighbor_3nndrRatioUsed()?2:1;
1437  if(!Settings::getGeneral_invertedSearch())
1438  {
1439  //match objects to scene
1440  results = cv::Mat(objectsDescriptors_.begin().value().rows, k, CV_32SC1); // results index
1441  dists = cv::Mat(objectsDescriptors_.begin().value().rows, k, CV_32FC1); // Distance results are CV_32FC1
1442  vocabulary_->search(objectsDescriptors_.begin().value(), results, dists, k);
1443  }
1444  else
1445  {
1446  //match scene to objects
1447  results = cv::Mat(info.sceneDescriptors_.rows, k, CV_32SC1); // results index
1448  dists = cv::Mat(info.sceneDescriptors_.rows, k, CV_32FC1); // Distance results are CV_32FC1
1449  vocabulary_->search(info.sceneDescriptors_, results, dists, k);
1450  }
1451 
1452  // PROCESS RESULTS
1453  UDEBUG("PROCESS RESULTS");
1454  // Get all matches for each object
1455  for(int i=0; i<dists.rows; ++i)
1456  {
1457  // Check if this descriptor matches with those of the objects
1458  bool matched = false;
1459 
1460  if(Settings::getNearestNeighbor_3nndrRatioUsed() &&
1461  dists.at<float>(i,0) <= Settings::getNearestNeighbor_4nndrRatio() * dists.at<float>(i,1))
1462  {
1463  matched = true;
1464  }
1465  if((matched || !Settings::getNearestNeighbor_3nndrRatioUsed()) &&
1466  Settings::getNearestNeighbor_5minDistanceUsed())
1467  {
1468  if(dists.at<float>(i,0) <= Settings::getNearestNeighbor_6minDistance())
1469  {
1470  matched = true;
1471  }
1472  else
1473  {
1474  matched = false;
1475  }
1476  }
1477  if(!matched &&
1478  !Settings::getNearestNeighbor_3nndrRatioUsed() &&
1479  !Settings::getNearestNeighbor_5minDistanceUsed() &&
1480  dists.at<float>(i,0) >= 0.0f)
1481  {
1482  matched = true; // no criterion, match to the nearest descriptor
1483  }
1484  if(info.minMatchedDistance_ == -1 || info.minMatchedDistance_ > dists.at<float>(i,0))
1485  {
1486  info.minMatchedDistance_ = dists.at<float>(i,0);
1487  }
1488  if(info.maxMatchedDistance_ == -1 || info.maxMatchedDistance_ < dists.at<float>(i,0))
1489  {
1490  info.maxMatchedDistance_ = dists.at<float>(i,0);
1491  }
1492 
1493  if(matched)
1494  {
1495  int wordId = results.at<int>(i,0);
1496  if(Settings::getGeneral_invertedSearch())
1497  {
1498  info.sceneWords_.insertMulti(wordId, i);
1499  QList<int> objIds = vocabulary_->wordToObjects().values(wordId);
1500  for(int j=0; j<objIds.size(); ++j)
1501  {
1502  // just add unique matches
1503  if(vocabulary_->wordToObjects().count(wordId, objIds[j]) == 1)
1504  {
1505  info.matches_.find(objIds[j]).value().insert(objects_.value(objIds[j])->words().value(wordId), i);
1506  }
1507  }
1508  }
1509  else
1510  {
1511  QMap<int, int>::const_iterator iter = dataRange_.lowerBound(i);
1512  int objectId = iter.value();
1513  int fisrtObjectDescriptorIndex = (iter == dataRange_.begin())?0:(--iter).key()+1;
1514  int objectDescriptorIndex = i - fisrtObjectDescriptorIndex;
1515 
1516  if(words.count(wordId) == 1)
1517  {
1518  info.matches_.find(objectId).value().insert(objectDescriptorIndex, words.value(wordId));
1519  }
1520  }
1521  }
1522  }
1523  }
1524  else
1525  {
1526  //multi-threaded, match objects to scene
1527  UDEBUG("MULTI-THREADED, MATCH OBJECTS TO SCENE");
1528  int threadCounts = Settings::getGeneral_threads();
1529  if(threadCounts == 0)
1530  {
1531  threadCounts = (int)objectsDescriptors_.size();
1532  }
1533 
1534  QList<int> objectsDescriptorsId = objectsDescriptors_.keys();
1535  QList<cv::Mat> objectsDescriptorsMat = objectsDescriptors_.values();
1536  for(int j=0; j<objectsDescriptorsMat.size(); j+=threadCounts)
1537  {
1538  QVector<SearchThread*> threads;
1539 
1540  for(int k=j; k<j+threadCounts && k<objectsDescriptorsMat.size(); ++k)
1541  {
1542  threads.push_back(new SearchThread(vocabulary_, objectsDescriptorsId[k], &objectsDescriptorsMat[k], &words));
1543  threads.back()->start();
1544  }
1545 
1546  for(int k=0; k<threads.size(); ++k)
1547  {
1548  threads[k]->wait();
1549  info.matches_[threads[k]->getObjectId()] = threads[k]->getMatches();
1550 
1551  if(info.minMatchedDistance_ == -1 || info.minMatchedDistance_ > threads[k]->getMinMatchedDistance())
1552  {
1553  info.minMatchedDistance_ = threads[k]->getMinMatchedDistance();
1554  }
1555  if(info.maxMatchedDistance_ == -1 || info.maxMatchedDistance_ < threads[k]->getMaxMatchedDistance())
1556  {
1557  info.maxMatchedDistance_ = threads[k]->getMaxMatchedDistance();
1558  }
1559  delete threads[k];
1560  }
1561 
1562  }
1563  }
1564 
1565  info.timeStamps_.insert(DetectionInfo::kTimeMatching, time.restart());
1566 
1567  // Homographies
1568  if(Settings::getHomography_homographyComputed())
1569  {
1570  // HOMOGRAPHY
1571  UDEBUG("COMPUTE HOMOGRAPHY");
1572  int threadCounts = Settings::getGeneral_threads();
1573  if(threadCounts == 0)
1574  {
1575  threadCounts = info.matches_.size();
1576  }
1577  QList<int> matchesId = info.matches_.keys();
1578  QList<QMultiMap<int, int> > matchesList = info.matches_.values();
1579  for(int i=0; i<matchesList.size(); i+=threadCounts)
1580  {
1581  UDEBUG("Processing matches %d/%d", i+1, matchesList.size());
1582 
1583  QVector<HomographyThread*> threads;
1584 
1585  UDEBUG("Creating/Starting homography threads (%d)...", threadCounts);
1586  for(int k=i; k<i+threadCounts && k<matchesList.size(); ++k)
1587  {
1588  int objectId = matchesId[k];
1589  UASSERT(objects_.contains(objectId));
1590  threads.push_back(new HomographyThread(
1591  &matchesList[k],
1592  objectId,
1593  &objects_.value(objectId)->keypoints(),
1594  &info.sceneKeypoints_,
1595  objects_.value(objectId)->image(),
1596  grayscaleImg));
1597  threads.back()->start();
1598  }
1599  UDEBUG("Started homography threads");
1600 
1601  for(int j=0; j<threads.size(); ++j)
1602  {
1603  threads[j]->wait();
1604  UDEBUG("Processing results of homography thread %d", j);
1605 
1606  int id = threads[j]->getObjectId();
1607  QTransform hTransform;
1609  if(threads[j]->getHomography().empty())
1610  {
1611  code = threads[j]->rejectedCode();
1612  }
1613  if(code == DetectionInfo::kRejectedUndef &&
1614  threads[j]->getInliers().size() < Settings::getHomography_minimumInliers() )
1615  {
1617  }
1618  if(code == DetectionInfo::kRejectedUndef)
1619  {
1620  const cv::Mat & H = threads[j]->getHomography();
1621  UASSERT(H.cols == 3 && H.rows == 3 && H.type()==CV_64FC1);
1622  hTransform = QTransform(
1623  H.at<double>(0,0), H.at<double>(1,0), H.at<double>(2,0),
1624  H.at<double>(0,1), H.at<double>(1,1), H.at<double>(2,1),
1625  H.at<double>(0,2), H.at<double>(1,2), H.at<double>(2,2));
1626 
1627  // is homography valid?
1628  // Here we use mapToScene() from QGraphicsItem instead
1629  // of QTransform::map() because if the homography is not valid,
1630  // huge errors are set by the QGraphicsItem and not by QTransform::map();
1631  UASSERT(objects_.contains(id));
1632  QRectF objectRect = objects_.value(id)->rect();
1633  QGraphicsRectItem item(objectRect);
1634  item.setTransform(hTransform);
1635  QPolygonF rectH = item.mapToScene(item.rect());
1636 
1637  // If a point is outside of 2x times the surface of the scene, homography is invalid.
1638  for(int p=0; p<rectH.size(); ++p)
1639  {
1640  if((rectH.at(p).x() < -image.cols && rectH.at(p).x() < -objectRect.width()) ||
1641  (rectH.at(p).x() > image.cols*2 && rectH.at(p).x() > objectRect.width()*2) ||
1642  (rectH.at(p).y() < -image.rows && rectH.at(p).x() < -objectRect.height()) ||
1643  (rectH.at(p).y() > image.rows*2 && rectH.at(p).x() > objectRect.height()*2))
1644  {
1646  break;
1647  }
1648  }
1649 
1650  // angle
1651  if(code == DetectionInfo::kRejectedUndef &&
1652  Settings::getHomography_minAngle() > 0)
1653  {
1654  for(int a=0; a<rectH.size(); ++a)
1655  {
1656  // Find the smaller angle
1657  QLineF ab(rectH.at(a).x(), rectH.at(a).y(), rectH.at((a+1)%4).x(), rectH.at((a+1)%4).y());
1658  QLineF cb(rectH.at((a+1)%4).x(), rectH.at((a+1)%4).y(), rectH.at((a+2)%4).x(), rectH.at((a+2)%4).y());
1659  float angle = ab.angle(cb);
1660  float minAngle = (float)Settings::getHomography_minAngle();
1661  if(angle < minAngle ||
1662  angle > 180.0-minAngle)
1663  {
1665  break;
1666  }
1667  }
1668  }
1669 
1670  // multi detection
1671  if(code == DetectionInfo::kRejectedUndef &&
1672  Settings::getGeneral_multiDetection())
1673  {
1674  int distance = Settings::getGeneral_multiDetectionRadius(); // in pixels
1675  // Get the outliers and recompute homography with them
1676  matchesList.push_back(threads[j]->getOutliers());
1677  matchesId.push_back(id);
1678 
1679  // compute distance from previous added same objects...
1680  QMultiMap<int, QTransform>::iterator objIter = info.objDetected_.find(id);
1681  for(;objIter!=info.objDetected_.end() && objIter.key() == id; ++objIter)
1682  {
1683  qreal dx = objIter.value().m31() - hTransform.m31();
1684  qreal dy = objIter.value().m32() - hTransform.m32();
1685  int d = (int)sqrt(dx*dx + dy*dy);
1686  if(d < distance)
1687  {
1688  distance = d;
1689  }
1690  }
1691 
1692  if(distance < Settings::getGeneral_multiDetectionRadius())
1693  {
1695  }
1696  }
1697 
1698  // Corners visible
1699  if(code == DetectionInfo::kRejectedUndef &&
1700  Settings::getHomography_allCornersVisible())
1701  {
1702  // Now verify if all corners are in the scene
1703  QRectF sceneRect(0,0,image.cols, image.rows);
1704  for(int p=0; p<rectH.size(); ++p)
1705  {
1706  if(!sceneRect.contains(QPointF(rectH.at(p).x(), rectH.at(p).y())))
1707  {
1709  break;
1710  }
1711  }
1712  }
1713  }
1714 
1715  if(code == DetectionInfo::kRejectedUndef)
1716  {
1717  // Accepted!
1718  info.objDetected_.insert(id, hTransform);
1719  info.objDetectedSizes_.insert(id, objects_.value(id)->rect().size());
1720  info.objDetectedInliers_.insert(id, threads[j]->getInliers());
1721  info.objDetectedOutliers_.insert(id, threads[j]->getOutliers());
1722  info.objDetectedInliersCount_.insert(id, threads[j]->getInliers().size());
1723  info.objDetectedOutliersCount_.insert(id, threads[j]->getOutliers().size());
1724  info.objDetectedFilePaths_.insert(id, objects_.value(id)->filePath());
1725  }
1726  else
1727  {
1728  //Rejected!
1729  info.rejectedInliers_.insert(id, threads[j]->getInliers());
1730  info.rejectedOutliers_.insert(id, threads[j]->getOutliers());
1731  info.rejectedCodes_.insert(id, code);
1732  }
1733  }
1734  UDEBUG("Processed matches %d", i+1);
1735  }
1736  info.timeStamps_.insert(DetectionInfo::kTimeHomography, time.restart());
1737  }
1738  }
1739  else if((descriptorsValid || vocabularyValid) && info.sceneKeypoints_.size())
1740  {
1741  UWARN("Cannot search, objects must be updated");
1742  }
1743  else if(info.sceneKeypoints_.size() == 0)
1744  {
1745  // Accept but warn the user
1746  UWARN("No features detected in the scene!?!");
1747  success = true;
1748  }
1749  }
1750 
1751  info.timeStamps_.insert(DetectionInfo::kTimeTotal, totalTime.elapsed());
1752 
1753  return success;
1754 }
1755 
1756 } // namespace find_object
d
static void affineSkew(float tilt, float phi, const cv::Mat &image, cv::Mat &skewImage, cv::Mat &skewMask, cv::Mat &Ai)
Definition: FindObject.cpp:502
static int getHomographyMethod()
Definition: Settings.cpp:1404
std::string uFormat(const char *fmt,...)
static Feature2D * createKeypointDetector()
Definition: Settings.cpp:617
QMultiMap< int, int > matches_
void save(QDataStream &streamSessionPtr) const
Definition: Vocabulary.cpp:73
f
const std::list< std::string > & getFileNames() const
Definition: UDirectory.h:125
QMultiMap< int, QString > objDetectedFilePaths_
Definition: DetectionInfo.h:73
virtual void detectAndCompute(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors, const cv::Mat &mask=cv::Mat())
Definition: Settings.cpp:1499
const cv::Mat & descriptors() const
Definition: FindObject.cpp:674
void load(QDataStream &streamSessionPtr)
Definition: Vocabulary.cpp:87
QMap< int, cv::Mat > objectsDescriptors_
Definition: FindObject.h:104
const cv::Mat & indexedDescriptors() const
Definition: Vocabulary.h:50
XmlRpcServer s
std::vector< cv::KeyPoint > sceneKeypoints_
Definition: DetectionInfo.h:80
FindObject(bool keepImagesInRAM_=true, QObject *parent=0)
Definition: FindObject.cpp:47
DetectionInfo::RejectedCode code_
QMultiMap< int, QSize > objDetectedSizes_
Definition: DetectionInfo.h:72
const QMultiMap< int, int > * sceneWords_
std::vector< cv::KeyPoint > limitKeypoints(const std::vector< cv::KeyPoint > &keypoints, int maxKeypoints)
Definition: FindObject.cpp:358
AffineExtractionThread(Feature2D *detector, Feature2D *extractor, const cv::Mat &image, float tilt, float phi)
Definition: FindObject.cpp:559
const std::vector< int > & getIndexesA() const
Some conversion functions.
QMultiMap< int, RejectedCode > rejectedCodes_
Definition: DetectionInfo.h:88
TFSIMD_FORCE_INLINE tfScalar distance(const Vector3 &v) const
QMap< int, QMultiMap< int, int > > matches_
Definition: DetectionInfo.h:83
std::vector< int > indexesB_
static void setLevel(ULogger::Level level)
Definition: ULogger.h:301
void addObjectAndUpdate(const cv::Mat &image, int id=0, const QString &filePath=QString())
Definition: FindObject.cpp:327
void objectsFound(const find_object::DetectionInfo &)
TFSIMD_FORCE_INLINE tfScalar angle(const Quaternion &q1, const Quaternion &q2)
const QMultiMap< int, int > & getMatches() const
#define UASSERT(condition)
const cv::Mat & descriptors() const
Definition: FindObject.cpp:579
static QString currentDetectorType()
Definition: Settings.cpp:1230
static const ParametersMap & getParameters()
Definition: Settings.h:323
QMultiMap< int, int > addWords(const cv::Mat &descriptors, int objectId)
Definition: Vocabulary.cpp:148
void search(const cv::Mat &descriptors, cv::Mat &results, cv::Mat &dists, int k)
Definition: Vocabulary.cpp:371
void updateVocabulary(const QList< int > &ids=QList< int >())
Definition: FindObject.cpp:889
const std::vector< uchar > & getOutlierMask() const
QMultiMap< int, int > objDetectedInliersCount_
Definition: DetectionInfo.h:74
float getMinMatchedDistance() const
const cv::Mat & image() const
Definition: FindObject.cpp:577
#define UASSERT_MSG(condition, msg_str)
Definition: ULogger.h:67
int loadObjects(const QString &dirPath, bool recursive=false)
Definition: FindObject.cpp:180
QMap< TimeStamp, float > timeStamps_
Definition: DetectionInfo.h:79
virtual void detect(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, const cv::Mat &mask=cv::Mat())
Definition: Settings.cpp:1457
Feature2D * extractor_
Definition: FindObject.h:107
bool loadVocabulary(const QString &filePath)
Definition: FindObject.cpp:162
QMap< int, ObjSignature * > objects_
Definition: FindObject.h:102
HomographyThread(const QMultiMap< int, int > *matches, int objectId, const std::vector< cv::KeyPoint > *kptsA, const std::vector< cv::KeyPoint > *kptsB, const cv::Mat &imageA, const cv::Mat &imageB)
QMultiMap< int, int > getOutliers() const
QMultiMap< int, QTransform > objDetected_
Definition: DetectionInfo.h:71
const std::vector< int > & getIndexesB() const
void updateObjects(const QList< int > &ids=QList< int >())
Definition: FindObject.cpp:797
DetectionInfo::RejectedCode rejectedCode() const
static void setParameter(const QString &key, const QVariant &value)
Definition: Settings.h:326
std::vector< cv::KeyPoint > keypoints_
Definition: FindObject.cpp:788
const std::vector< cv::KeyPoint > * kptsA_
QMap< QString, QVariant > ParametersMap
Definition: Settings.h:41
static QString currentDescriptorType()
Definition: Settings.cpp:1236
QMultiMap< int, QMultiMap< int, int > > rejectedInliers_
Definition: DetectionInfo.h:86
static void setPrintWhere(bool printWhere)
Definition: ULogger.h:271
const cv::Mat & image() const
Definition: FindObject.cpp:672
Vocabulary * vocabulary_
Definition: FindObject.h:103
std::vector< uchar > outlierMask_
QMultiMap< int, QMultiMap< int, int > > objDetectedInliers_
Definition: DetectionInfo.h:76
std::vector< cv::KeyPoint > keypoints_
Definition: FindObject.cpp:640
ExtractFeaturesThread(Feature2D *detector, Feature2D *extractor, int objectId, const cv::Mat &image)
Definition: FindObject.cpp:652
void computeFeatures(Feature2D *detector, Feature2D *extractor, const cv::Mat &image, const cv::Mat &mask, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors, int &timeDetection, int &timeExtraction)
Definition: FindObject.cpp:418
QMultiMap< int, int > getInliers() const
QMultiMap< int, int > objDetectedOutliersCount_
Definition: DetectionInfo.h:75
TFSIMD_FORCE_INLINE const tfScalar & w() const
void load(QDataStream &streamPtr, bool ignoreImage)
Definition: ObjSignature.h:103
virtual void compute(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors)
Definition: Settings.cpp:1478
void removeObject(int id)
Definition: FindObject.cpp:310
static bool in(Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4)
Definition: jsoncpp.cpp:244
float getMaxMatchedDistance() const
const Vocabulary * vocabulary() const
Definition: FindObject.h:88
#define UDEBUG(...)
bool loadSession(const QString &path)
Definition: FindObject.cpp:77
bool saveSession(const QString &path)
Definition: FindObject.cpp:129
const std::vector< cv::KeyPoint > & keypoints() const
Definition: FindObject.cpp:578
const QMultiMap< int, int > * matches_
QMultiMap< int, int > inliers_
#define UERROR(...)
QMultiMap< int, int > sceneWords_
Definition: DetectionInfo.h:82
ULogger class and convenient macros.
const std::vector< cv::KeyPoint > * kptsB_
#define UWARN(...)
QMultiMap< int, QMultiMap< int, int > > rejectedOutliers_
Definition: DetectionInfo.h:87
static Feature2D * createDescriptorExtractor()
Definition: Settings.cpp:954
SearchThread(Vocabulary *vocabulary, int objectId, const cv::Mat *descriptors, const QMultiMap< int, int > *sceneWords)
const cv::Mat * descriptors_
std::vector< int > indexesA_
const QMultiMap< int, int > & wordToObjects() const
Definition: Vocabulary.h:49
QMultiMap< int, QMultiMap< int, int > > objDetectedOutliers_
Definition: DetectionInfo.h:77
const std::vector< cv::KeyPoint > & keypoints() const
Definition: FindObject.cpp:673
QMultiMap< int, int > outliers_
const ObjSignature * addObject(const QString &filePath)
Definition: FindObject.cpp:227
QMap< int, int > dataRange_
Definition: FindObject.h:105
const std::string response
void removeObjectAndUpdate(int id)
Definition: FindObject.cpp:339
const cv::Mat & getHomography() const
bool detect(const cv::Mat &image, find_object::DetectionInfo &info) const
bool saveVocabulary(const QString &filePath) const
Definition: FindObject.cpp:157
#define UINFO(...)


find_object_2d
Author(s): Mathieu Labbe
autogenerated on Thu Jun 6 2019 19:22:26