37 #include <QtCore/QThread> 38 #include <QtCore/QFileInfo> 39 #include <QtCore/QStringList> 40 #include <QtCore/QTime> 41 #include <QtCore/QDir> 42 #include <QGraphicsRectItem> 45 #if CV_MAJOR_VERSION > 3 46 #include <opencv2/core/types_c.h> 54 detector_(
Settings::createKeypointDetector()),
55 extractor_(
Settings::createDescriptorExtractor()),
56 sessionModified_(false),
57 keepImagesInRAM_(keepImagesInRAM)
59 qRegisterMetaType<find_object::DetectionInfo>(
"find_object::DetectionInfo");
60 qRegisterMetaType<find_object::Header>(
"find_object::Header");
63 if(Settings::getGeneral_debug())
84 if(QFile::exists(path) && !path.isEmpty() && QFileInfo(path).suffix().compare(
"bin") == 0)
87 file.open(QIODevice::ReadOnly);
88 QDataStream
in(&file);
94 for(QMap<QString, QVariant>::iterator iter=parameters.begin(); iter!=parameters.end(); ++iter)
96 QMap<QString, QVariant>::const_iterator cter = customParameters.find(iter.key());
97 if(cter != customParameters.constEnd())
123 UERROR(
"Failed to load and object!");
129 if(!Settings::getGeneral_invertedSearch())
139 UERROR(
"Invalid session file (should be *.bin): \"%s\"", path.toStdString().c_str());
146 if(!path.isEmpty() && QFileInfo(path).suffix().compare(
"bin") == 0)
149 file.open(QIODevice::WriteOnly);
150 QDataStream out(&file);
159 for(QMultiMap<int, ObjSignature*>::const_iterator iter=
objects_.constBegin(); iter!=
objects_.constEnd(); ++iter)
161 iter.value()->save(out);
168 UERROR(
"Path \"%s\" not valid (should be *.bin)", path.toStdString().c_str());
174 if(!filePath.isEmpty() && QFileInfo(filePath).suffix().compare(
"bin") == 0)
176 QFile file(filePath);
177 file.open(QIODevice::WriteOnly);
178 QDataStream out(&file);
198 if(!Settings::getGeneral_vocabularyFixed() || !Settings::getGeneral_invertedSearch())
200 UWARN(
"Doesn't make sense to load a vocabulary if \"General/vocabularyFixed\" and \"General/invertedSearch\" are not enabled! It will " 201 "be cleared at the time the objects are updated.");
204 if(QFile::exists(filePath) && !filePath.isEmpty() && QFileInfo(filePath).suffix().compare(
"bin") == 0)
207 QFile file(filePath);
208 file.open(QIODevice::ReadOnly);
209 QDataStream
in(&file);
238 QString formats = Settings::getGeneral_imageFormats().remove(
'*').remove(
'.');
241 paths.append(dirPath);
243 QList<int> idsLoaded;
246 QString currentDir = paths.front();
247 UDirectory dir(currentDir.toStdString(), formats.toStdString());
250 const std::list<std::string> & names = dir.
getFileNames();
251 for(std::list<std::string>::const_iterator iter=names.begin(); iter!=names.end(); ++iter)
256 idsLoaded.push_back(s->
id());
266 QStringList subDirs = d.entryList(QDir::AllDirs|QDir::NoDotAndDotDot, QDir::Name);
267 for(
int i=subDirs.size()-1; i>=0; --i)
269 paths.prepend(currentDir+QDir::separator()+subDirs[i]);
280 return idsLoaded.size();
285 if(!filePath.isNull())
287 cv::Mat img = cv::imread(filePath.toStdString().c_str(), cv::IMREAD_GRAYSCALE);
291 QFileInfo file(filePath);
292 QStringList list = file.fileName().split(
'.');
296 id = list.front().toInt(&ok);
301 UWARN(
"Object %d already added, a new ID will be generated (new id=%d).",
id, Settings::getGeneral_nextObjID());
312 UERROR(
"File name doesn't contain \".\" (\"%s\")", filePath.toStdString().c_str());
318 UINFO(
"Added object %d (%s)", s->id(), filePath.toStdString().c_str());
324 UERROR(
"Could not read image \"%s\"", filePath.toStdString().c_str());
329 UERROR(
"File path is null!?");
351 UERROR(
"object with id %d already added!", obj->
id());
354 else if(obj->
id() == 0)
356 obj->
setId(Settings::getGeneral_nextObjID());
359 Settings::setGeneral_nextObjID(obj->
id()+1);
389 ids.push_back(s->
id());
414 std::vector<cv::KeyPoint>
limitKeypoints(
const std::vector<cv::KeyPoint> & keypoints,
int maxKeypoints)
416 std::vector<cv::KeyPoint> kptsKept;
417 if(maxKeypoints > 0 && (
int)keypoints.size() > maxKeypoints)
420 std::multimap<float, int> reponseMap;
421 for(
unsigned int i = 0; i <keypoints.size(); ++i)
424 reponseMap.insert(std::pair<float, int>(fabs(keypoints[i].
response), i));
428 std::multimap<float, int>::reverse_iterator iter = reponseMap.rbegin();
429 kptsKept.resize(maxKeypoints);
430 for(
unsigned int k=0; k < kptsKept.size() && iter!=reponseMap.rend(); ++k, ++iter)
432 kptsKept[k] = keypoints[iter->second];
437 kptsKept = keypoints;
442 void limitKeypoints(std::vector<cv::KeyPoint> & keypoints, cv::Mat & descriptors,
int maxKeypoints)
444 UASSERT((
int)keypoints.size() == descriptors.rows);
445 std::vector<cv::KeyPoint> kptsKept;
446 cv::Mat descriptorsKept;
447 if(maxKeypoints > 0 && (
int)keypoints.size() > maxKeypoints)
449 descriptorsKept = cv::Mat(0, descriptors.cols, descriptors.type());
452 std::multimap<float, int> reponseMap;
453 for(
unsigned int i = 0; i <keypoints.size(); ++i)
456 reponseMap.insert(std::pair<float, int>(fabs(keypoints[i].
response), i));
460 std::multimap<float, int>::reverse_iterator iter = reponseMap.rbegin();
461 kptsKept.resize(maxKeypoints);
462 descriptorsKept.reserve(maxKeypoints);
463 for(
unsigned int k=0; k < kptsKept.size() && iter!=reponseMap.rend(); ++k, ++iter)
465 kptsKept[k] = keypoints[iter->second];
466 descriptorsKept.push_back(descriptors.row(iter->second));
469 keypoints = kptsKept;
470 descriptors = descriptorsKept;
471 UASSERT_MSG((
int)keypoints.size() == descriptors.rows,
uFormat(
"%d vs %d", (
int)keypoints.size(), descriptors.rows).c_str());
477 const cv::Mat & image,
478 const cv::Mat & mask,
479 std::vector<cv::KeyPoint> & keypoints,
480 cv::Mat & descriptors,
482 int & timeExtraction)
487 descriptors = cv::Mat();
489 int maxFeatures = Settings::getFeature2D_3MaxFeatures();
493 UASSERT_MSG((
int)keypoints.size() == descriptors.rows,
uFormat(
"%d vs %d", (
int)keypoints.size(), descriptors.rows).c_str());
494 if(maxFeatures > 0 && (
int)keypoints.size() > maxFeatures)
498 timeDetection=timeStep.restart();
503 detector->
detect(image, keypoints, mask);
504 if(maxFeatures > 0 && (
int)keypoints.size() > maxFeatures)
508 timeDetection=timeStep.restart();
513 extractor->
compute(image, keypoints, descriptors);
514 UASSERT_MSG((
int)keypoints.size() == descriptors.rows,
uFormat(
"%d vs %d", (
int)keypoints.size(), descriptors.rows).c_str());
516 catch(cv::Exception & e)
518 UERROR(
"Descriptor exception: %s. Maybe some keypoints are invalid " 519 "for the selected descriptor extractor.", e.what());
520 descriptors = cv::Mat();
523 catch (
const std::exception& e )
526 UERROR(
"Descriptor exception: %s. Maybe some keypoints are invalid " 527 "for the selected descriptor extractor.", e.what());
528 descriptors = cv::Mat();
531 timeExtraction+=timeStep.restart();
534 if( Settings::getFeature2D_SIFT_rootSIFT() &&
536 !descriptors.empty())
538 UINFO(
"Performing RootSIFT...");
542 for(
int i=0; i<descriptors.rows; ++i)
544 descriptors.row(i) = descriptors.row(i) / cv::sum(descriptors.row(i))[0];
545 cv::sqrt(descriptors.row(i), descriptors.row(i));
561 const cv::Mat & image,
566 float h = image.rows;
567 float w = image.cols;
568 cv::Mat A = cv::Mat::zeros(2,3,CV_32FC1);
569 A.at<
float>(0,0) = A.at<
float>(1,1) = 1;
570 skewMask = cv::Mat::ones(h, w, CV_8U) * 255;
573 phi = phi*CV_PI/180.0f;
574 float s = std::sin(phi);
575 float c = std::cos(phi);
576 cv::Mat A22 = (cv::Mat_<float>(2, 2) <<
579 cv::Mat cornersIn = (cv::Mat_<float>(4, 2) <<
584 cv::Mat cornersOut = cornersIn * A22.t();
585 cv::Rect rect = cv::boundingRect(cornersOut.reshape(2,4));
586 A = (cv::Mat_<float>(2, 3) <<
589 cv::warpAffine(image, skewImage, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
597 float s = 0.8*std::sqrt(tilt*tilt-1);
599 cv::GaussianBlur(skewImage, out, cv::Size(0, 0), s, 0.01);
600 cv::resize(out, out2, cv::Size(0, 0), 1.0/tilt, 1.0, cv::INTER_NEAREST);
604 if(phi != 0.0 || tilt != 1.0)
606 cv::Mat mask = skewMask;
607 cv::warpAffine(mask, skewMask, A, skewImage.size(), cv::INTER_NEAREST);
609 cv::invertAffineTransform(A, Ai);
618 const cv::Mat & image,
631 UASSERT(detector && extractor);
633 const cv::Mat &
image()
const {
return image_;}
634 const std::vector<cv::KeyPoint> &
keypoints()
const {
return keypoints_;}
647 cv::Mat skewImage, skewMask, Ai;
649 timeSkewAffine_=timeStep.restart();
664 for(
unsigned int i=0; i<keypoints_.size(); ++i)
666 cv::Mat p = (cv::Mat_<float>(3, 1) << keypoints_[i].pt.x, keypoints_[i].pt.y, 1);
668 keypoints_[i].pt.x = pa.at<
float>(0,0);
669 keypoints_[i].pt.y = pa.at<
float>(1,0);
672 if(keypoints_.size() && Settings::getFeature2D_6SubPix())
675 std::vector<cv::Point2f> corners;
676 cv::KeyPoint::convert(keypoints_, corners);
677 cv::cornerSubPix(image_,
679 cv::Size(Settings::getFeature2D_7SubPixWinSize(), Settings::getFeature2D_7SubPixWinSize()),
681 cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, Settings::getFeature2D_8SubPixIterations(), Settings::getFeature2D_9SubPixEps() ));
682 UASSERT(corners.size() == keypoints_.size());
683 for(
unsigned int i=0; i<corners.size(); ++i)
685 keypoints_[i].pt = corners[i];
687 timeSubPix_ +=timeStep.restart();
712 const cv::Mat & image) :
722 UASSERT(detector && extractor);
723 UASSERT_MSG(!image.empty() && image.type() == CV_8UC1,
724 uFormat(
"Image of object %d is null or not type CV_8UC1!?!? (cols=%d, rows=%d, type=%d)",
725 objectId, image.cols, image.rows, image.type()).c_str());
729 const cv::Mat &
image()
const {
return image_;}
730 const std::vector<cv::KeyPoint> &
keypoints()
const {
return keypoints_;}
743 UDEBUG(
"Extracting descriptors from object %d...", objectId_);
748 if(!Settings::getFeature2D_4Affine())
761 if(keypoints_.size())
763 UDEBUG(
"Detected %d features from object %d...", (
int)keypoints_.size(), objectId_);
764 if(Settings::getFeature2D_6SubPix())
767 std::vector<cv::Point2f> corners;
768 cv::KeyPoint::convert(keypoints_, corners);
769 cv::cornerSubPix(image_,
771 cv::Size(Settings::getFeature2D_7SubPixWinSize(), Settings::getFeature2D_7SubPixWinSize()),
773 cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, Settings::getFeature2D_8SubPixIterations(), Settings::getFeature2D_9SubPixEps() ));
774 UASSERT(corners.size() == keypoints_.size());
775 for(
unsigned int i=0; i<corners.size(); ++i)
777 keypoints_[i].pt = corners[i];
779 timeSubPix_ +=timeStep.restart();
784 UWARN(
"no features detected in object %d !?!", objectId_);
790 std::vector<float> tilts;
791 std::vector<float> phis;
792 tilts.push_back(1.0
f);
793 phis.push_back(0.0
f);
794 int nTilt = Settings::getFeature2D_5AffineCount();
795 for(
int t=1; t<nTilt; ++t)
797 float tilt = std::pow(2.0
f, 0.5
f*
float(t));
798 float inc = 72.0f / float(tilt);
799 for(
float phi=0.0
f; phi<180.0f; phi+=inc)
801 tilts.push_back(tilt);
807 unsigned int threadCounts = Settings::getGeneral_threads();
808 if(threadCounts == 0)
810 threadCounts = (
unsigned int)tilts.size();
813 for(
unsigned int i=0; i<tilts.size(); i+=threadCounts)
815 QVector<AffineExtractionThread*> threads;
817 for(
unsigned int k=i; k<i+threadCounts && k<tilts.size(); ++k)
820 threads.back()->start();
823 for(
int k=0; k<threads.size(); ++k)
827 keypoints_.insert(keypoints_.end(), threads[k]->keypoints().begin(), threads[k]->keypoints().end());
828 descriptors_.push_back(threads[k]->descriptors());
830 timeSkewAffine_ += threads[k]->timeSkewAffine();
831 timeDetection_ += threads[k]->timeDetection();
832 timeExtraction_ += threads[k]->timeExtraction();
833 timeSubPix_ += threads[k]->timeSubPix();
840 UINFO(
"%d descriptors extracted from object %d (in %d ms)", descriptors_.rows, objectId_, time.elapsed());
858 UINFO(
"Update %d objects...", ids.size());
859 QList<ObjSignature*> objectsList;
862 for(
int i=0; i<ids.size(); ++i)
866 objectsList.push_back(
objects_[ids[i]]);
870 UERROR(
"Not found object %d!", ids[i]);
879 if(objectsList.size())
882 int threadCounts = Settings::getGeneral_threads();
883 if(threadCounts == 0)
885 threadCounts = objectsList.size();
891 if(objectsList.size())
893 UINFO(
"Features extraction from %d objects... (threads=%d)", objectsList.size(), threadCounts);
894 for(
int i=0; i<objectsList.size(); i+=threadCounts)
896 QVector<ExtractFeaturesThread*> threads;
897 for(
int k=i; k<i+threadCounts && k<objectsList.size(); ++k)
899 if(!objectsList.at(k)->image().empty())
902 threads.back()->start();
906 objects_.value(objectsList.at(k)->id())->setData(std::vector<cv::KeyPoint>(), cv::Mat());
909 UERROR(
"Empty image detected for object %d!? No features can be detected.", objectsList.at(k)->id());
914 UWARN(
"Empty image detected for object %d! No features can be detected. Note that images are in not kept in RAM.", objectsList.at(k)->id());
919 for(
int j=0; j<threads.size(); ++j)
923 int id = threads[j]->objectId();
925 objects_.value(
id)->setData(threads[j]->keypoints(), threads[j]->descriptors());
934 UINFO(
"Features extraction from %d objects... done! (%d ms)", objectsList.size(), time.elapsed());
939 UINFO(
"No objects to update...");
955 QList<ObjSignature*> objectsList;
958 for(
int i=0; i<ids.size(); ++i)
962 objectsList.push_back(
objects_[ids[i]]);
966 UERROR(
"Not found object %d!", ids[i]);
982 for(
int i=0; i<objectsList.size(); ++i)
984 if(!objectsList.at(i)->descriptors().empty())
986 if(dim >= 0 && objectsList.at(i)->descriptors().cols != dim)
988 UERROR(
"Descriptors of the objects are not all the same size! Objects " 989 "opened must have all the same size (and from the same descriptor extractor).");
992 dim = objectsList.at(i)->descriptors().cols;
993 if(type >= 0 && objectsList.at(i)->descriptors().type() != type)
995 UERROR(
"Descriptors of the objects are not all the same type! Objects opened " 996 "must have been processed by the same descriptor extractor.");
999 type = objectsList.at(i)->descriptors().type();
1000 count += objectsList.at(i)->descriptors().rows;
1004 UINFO(
"Updating vocabulary with %d objects and %d descriptors...", ids.size(), count);
1009 UINFO(
"Updating global descriptors matrix: Objects=%d, total descriptors=%d, dim=%d, type=%d",
1010 (
int)
objects_.size(), count, dim, type);
1011 if(!Settings::getGeneral_invertedSearch())
1013 if(Settings::getGeneral_threads() == 1)
1027 for(
int i=0; i<objectsList.size(); ++i)
1029 objectsList[i]->setWords(QMultiMap<int,int>());
1030 if(objectsList.at(i)->descriptors().rows)
1034 cv::Mat dest(
objectsDescriptors_.begin().value(), cv::Range(row, row+objectsList.at(i)->descriptors().rows));
1035 objectsList.at(i)->descriptors().copyTo(dest);
1045 row += objectsList.at(i)->descriptors().rows;
1049 if(objectsList.at(i)->descriptors().rows)
1051 dataRange_.insert(row-1, objectsList.at(i)->id());
1058 for(
int i=0; i<objectsList.size(); ++i)
1060 objectsList[i]->setWords(QMultiMap<int,int>());
1071 bool incremental = Settings::getGeneral_vocabularyIncremental() && !Settings::getGeneral_vocabularyFixed();
1074 UINFO(
"Creating incremental vocabulary...");
1076 else if(Settings::getGeneral_vocabularyFixed())
1078 UINFO(
"Updating vocabulary correspondences only (vocabulary is fixed)...");
1082 UINFO(
"Creating vocabulary...");
1086 int updateVocabularyMinWords = Settings::getGeneral_vocabularyUpdateMinWords();
1088 for(
int i=0; i<objectsList.size(); ++i)
1090 UASSERT(objectsList[i]->descriptors().rows == (
int)objectsList[i]->keypoints().size());
1091 QMultiMap<int, int> words =
vocabulary_->
addWords(objectsList[i]->descriptors(), objectsList.at(i)->id());
1092 objectsList[i]->setWords(words);
1093 addedWords += words.uniqueKeys().size();
1094 bool updated =
false;
1095 if(incremental && addedWords && addedWords >= updateVocabularyMinWords)
1101 UINFO(
"Object %d, %d words from %d descriptors (%d words, %d ms) %s",
1102 objectsList[i]->
id(),
1103 words.uniqueKeys().size(),
1104 objectsList[i]->descriptors().rows,
1106 localTime.restart(),
1107 updated?
"updated":
"");
1109 if(addedWords && !Settings::getGeneral_vocabularyFixed())
1113 UINFO(
"Updating vocabulary...");
1120 UINFO(
"Creating incremental vocabulary... done! size=%d (%d ms)",
vocabulary_->
size(), time.elapsed());
1122 else if(Settings::getGeneral_vocabularyFixed())
1124 UINFO(
"Updating vocabulary correspondences only (vocabulary is fixed)... done! size=%d (%d ms)",
vocabulary_->
size(), time.elapsed());
1139 objectId_(objectId),
1140 descriptors_(descriptors),
1141 sceneWords_(sceneWords),
1142 minMatchedDistance_(-1.0
f),
1143 maxMatchedDistance_(-1.0
f)
1152 const QMultiMap<int, int> &
getMatches()
const {
return matches_;}
1164 int k = Settings::getNearestNeighbor_3nndrRatioUsed()?2:1;
1165 results = cv::Mat(descriptors_->rows, k, CV_32SC1);
1166 dists = cv::Mat(descriptors_->rows, k, CV_32FC1);
1171 for(
int i=0; i<dists.rows; ++i)
1174 bool matched =
false;
1176 if(Settings::getNearestNeighbor_3nndrRatioUsed() &&
1177 dists.at<
float>(i,0) <= Settings::getNearestNeighbor_4nndrRatio() * dists.at<
float>(i,1))
1181 if((matched || !Settings::getNearestNeighbor_3nndrRatioUsed()) &&
1182 Settings::getNearestNeighbor_5minDistanceUsed())
1184 if(dists.at<
float>(i,0) <= Settings::getNearestNeighbor_6minDistance())
1193 if(!matched && !Settings::getNearestNeighbor_3nndrRatioUsed() && !Settings::getNearestNeighbor_5minDistanceUsed())
1197 if(minMatchedDistance_ == -1 || minMatchedDistance_ > dists.at<
float>(i,0))
1199 minMatchedDistance_ = dists.at<
float>(i,0);
1201 if(maxMatchedDistance_ == -1 || maxMatchedDistance_ < dists.at<
float>(i,0))
1203 maxMatchedDistance_ = dists.at<
float>(i,0);
1206 int wordId = results.at<
int>(i,0);
1207 if(matched && sceneWords_->count(wordId) == 1)
1209 matches_.insert(i, sceneWords_->value(wordId));
1210 matches_.insert(i, results.at<
int>(i,0));
1231 const QMultiMap<int, int> * matches,
1233 const std::vector<cv::KeyPoint> * kptsA,
1234 const std::vector<cv::KeyPoint> * kptsB,
1235 const cv::Mat & imageA,
1236 const cv::Mat & imageB) :
1238 objectId_(objectId),
1245 UASSERT(matches && kptsA && kptsB);
1264 std::vector<cv::Point2f> mpts_1(matches_->size());
1265 std::vector<cv::Point2f> mpts_2(matches_->size());
1266 indexesA_.resize(matches_->size());
1267 indexesB_.resize(matches_->size());
1269 UDEBUG(
"Fill matches...");
1271 for(QMultiMap<int, int>::const_iterator iter = matches_->begin(); iter!=matches_->end(); ++iter)
1273 UASSERT_MSG(iter.key() < (int)kptsA_->size(),
uFormat(
"key=%d size=%d", iter.key(),(int)kptsA_->size()).c_str());
1274 UASSERT_MSG(iter.value() < (int)kptsB_->size(),
uFormat(
"key=%d size=%d", iter.value(),(int)kptsB_->size()).c_str());
1275 mpts_1[j] = kptsA_->at(iter.key()).pt;
1276 indexesA_[j] = iter.key();
1277 mpts_2[j] = kptsB_->at(iter.value()).pt;
1278 indexesB_[j] = iter.value();
1282 if((
int)mpts_1.size() >= Settings::getHomography_minimumInliers())
1284 if(Settings::getHomography_opticalFlow())
1286 UASSERT(!imageA_.empty() && !imageB_.empty());
1288 cv::Mat imageA = imageA_;
1289 cv::Mat imageB = imageB_;
1290 if(imageA_.cols < imageB_.cols && imageA_.rows < imageB_.rows)
1293 imageA = cv::Mat::zeros(imageB_.size(), imageA_.type());
1294 imageA_.copyTo(imageA(cv::Rect(0,0,imageA_.cols, imageA_.rows)));
1296 if(imageA.size() == imageB.size())
1298 UDEBUG(
"Optical flow...");
1300 std::vector<unsigned char> status;
1301 std::vector<float> err;
1302 cv::calcOpticalFlowPyrLK(
1309 cv::Size(Settings::getHomography_opticalFlowWinSize(), Settings::getHomography_opticalFlowWinSize()),
1310 Settings::getHomography_opticalFlowMaxLevel(),
1311 cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, Settings::getHomography_opticalFlowIterations(), Settings::getHomography_opticalFlowEps()),
1312 cv::OPTFLOW_LK_GET_MIN_EIGENVALS | cv::OPTFLOW_USE_INITIAL_FLOW, 1e-4);
1316 UERROR(
"Object's image should be less/equal size of the scene image to use Optical Flow.");
1320 UDEBUG(
"Find homography... begin");
1321 #if CV_MAJOR_VERSION < 3 1322 h_ = findHomography(mpts_1,
1325 Settings::getHomography_ransacReprojThr(),
1328 h_ = findHomography(mpts_1,
1331 Settings::getHomography_ransacReprojThr(),
1333 Settings::getHomography_maxIterations(),
1334 Settings::getHomography_confidence());
1336 UDEBUG(
"Find homography... end");
1338 UASSERT(outlierMask_.size() == 0 || outlierMask_.size() == mpts_1.size());
1339 for(
unsigned int k=0; k<mpts_1.size();++k)
1341 if(outlierMask_.size() && outlierMask_.at(k))
1343 inliers_.insert(indexesA_[k], indexesB_[k]);
1347 outliers_.insert(indexesA_[k], indexesB_[k]);
1351 if(inliers_.size() == (int)outlierMask_.size() && !h_.empty())
1353 if(Settings::getHomography_ignoreWhenAllInliers() || cv::countNonZero(h_) < 1)
1395 this->
detect(image, info);
1399 UINFO(
"(%s) %d objects detected! (%d ms)",
1400 QTime::currentTime().toString(
"HH:mm:ss.zzz").toStdString().c_str(),
1406 UINFO(
"(%s) Object %d detected! (%d ms)",
1407 QTime::currentTime().toString(
"HH:mm:ss.zzz").toStdString().c_str(),
1411 else if(Settings::getGeneral_sendNoObjDetectedEvents())
1413 UINFO(
"(%s) No objects detected. (%d ms)",
1414 QTime::currentTime().toString(
"HH:mm:ss.zzz").toStdString().c_str(),
1418 if(info.
objDetected_.size() > 0 || Settings::getGeneral_sendNoObjDetectedEvents())
1420 Q_EMIT
objectsFound(info, header, depth, depthConstant);
1432 bool success =
false;
1436 cv::Mat grayscaleImg;
1437 if(image.channels() != 1 || image.depth() != CV_8U)
1439 cv::cvtColor(image, grayscaleImg, cv::COLOR_BGR2GRAY);
1443 grayscaleImg = image;
1447 UDEBUG(
"DETECT FEATURES AND EXTRACT DESCRIPTORS FROM THE SCENE");
1449 extractThread.start();
1450 extractThread.wait();
1462 bool descriptorsValid = !Settings::getGeneral_invertedSearch() &&
1467 bool vocabularyValid = Settings::getGeneral_invertedSearch() &&
1476 if((descriptorsValid || vocabularyValid) &&
1484 QMultiMap<int, int> words;
1486 if(!Settings::getGeneral_invertedSearch())
1490 UDEBUG(
"CREATE INDEX FOR THE SCENE");
1497 for(QMap<int, ObjSignature*>::const_iterator iter=
objects_.begin(); iter!=
objects_.end(); ++iter)
1499 info.
matches_.insert(iter.key(), QMultiMap<int, int>());
1502 if(Settings::getGeneral_invertedSearch() || Settings::getGeneral_threads() == 1)
1507 UDEBUG(
"DO NEAREST NEIGHBOR");
1508 int k = Settings::getNearestNeighbor_3nndrRatioUsed()?2:1;
1509 if(!Settings::getGeneral_invertedSearch())
1525 UDEBUG(
"PROCESS RESULTS");
1527 for(
int i=0; i<dists.rows; ++i)
1530 bool matched =
false;
1532 if(Settings::getNearestNeighbor_3nndrRatioUsed() &&
1533 dists.at<
float>(i,0) <= Settings::getNearestNeighbor_4nndrRatio() * dists.at<
float>(i,1))
1537 if((matched || !Settings::getNearestNeighbor_3nndrRatioUsed()) &&
1538 Settings::getNearestNeighbor_5minDistanceUsed())
1540 if(dists.at<
float>(i,0) <= Settings::getNearestNeighbor_6minDistance())
1550 !Settings::getNearestNeighbor_3nndrRatioUsed() &&
1551 !Settings::getNearestNeighbor_5minDistanceUsed() &&
1552 dists.at<
float>(i,0) >= 0.0f)
1567 int wordId = results.at<
int>(i,0);
1568 if(Settings::getGeneral_invertedSearch())
1572 for(
int j=0; j<objIds.size(); ++j)
1577 info.
matches_.find(objIds[j]).value().insert(
objects_.value(objIds[j])->words().value(wordId), i);
1583 QMap<int, int>::const_iterator iter =
dataRange_.lowerBound(i);
1584 int objectId = iter.value();
1585 int fisrtObjectDescriptorIndex = (iter ==
dataRange_.begin())?0:(--iter).key()+1;
1586 int objectDescriptorIndex = i - fisrtObjectDescriptorIndex;
1588 if(words.count(wordId) == 1)
1590 info.
matches_.find(objectId).value().insert(objectDescriptorIndex, words.value(wordId));
1599 UDEBUG(
"MULTI-THREADED, MATCH OBJECTS TO SCENE");
1600 int threadCounts = Settings::getGeneral_threads();
1601 if(threadCounts == 0)
1608 for(
int j=0; j<objectsDescriptorsMat.size(); j+=threadCounts)
1610 QVector<SearchThread*> threads;
1612 for(
int k=j; k<j+threadCounts && k<objectsDescriptorsMat.size(); ++k)
1615 threads.back()->start();
1618 for(
int k=0; k<threads.size(); ++k)
1621 info.
matches_[threads[k]->getObjectId()] = threads[k]->getMatches();
1640 if(Settings::getHomography_homographyComputed())
1643 UDEBUG(
"COMPUTE HOMOGRAPHY");
1644 int threadCounts = Settings::getGeneral_threads();
1645 if(threadCounts == 0)
1647 threadCounts = info.
matches_.size();
1649 QList<int> matchesId = info.
matches_.keys();
1650 QList<QMultiMap<int, int> > matchesList = info.
matches_.values();
1651 for(
int i=0; i<matchesList.size(); i+=threadCounts)
1653 UDEBUG(
"Processing matches %d/%d", i+1, matchesList.size());
1655 QVector<HomographyThread*> threads;
1657 UDEBUG(
"Creating/Starting homography threads (%d)...", threadCounts);
1658 for(
int k=i; k<i+threadCounts && k<matchesList.size(); ++k)
1660 int objectId = matchesId[k];
1665 &
objects_.value(objectId)->keypoints(),
1669 threads.back()->start();
1671 UDEBUG(
"Started homography threads");
1673 for(
int j=0; j<threads.size(); ++j)
1676 UDEBUG(
"Processing results of homography thread %d", j);
1678 int id = threads[j]->getObjectId();
1679 QTransform hTransform;
1681 if(threads[j]->getHomography().empty())
1683 code = threads[j]->rejectedCode();
1686 threads[j]->getInliers().size() < Settings::getHomography_minimumInliers() )
1692 const cv::Mat & H = threads[j]->getHomography();
1693 UASSERT(H.cols == 3 && H.rows == 3 && H.type()==CV_64FC1);
1694 hTransform = QTransform(
1695 H.at<
double>(0,0), H.at<
double>(1,0), H.at<
double>(2,0),
1696 H.at<
double>(0,1), H.at<
double>(1,1), H.at<
double>(2,1),
1697 H.at<
double>(0,2), H.at<
double>(1,2), H.at<
double>(2,2));
1704 QRectF objectRect =
objects_.value(
id)->rect();
1705 QGraphicsRectItem item(objectRect);
1706 item.setTransform(hTransform);
1707 QPolygonF rectH = item.mapToScene(item.rect());
1710 for(
int p=0; p<rectH.size(); ++p)
1712 if((rectH.at(p).x() < -image.cols && rectH.at(p).x() < -objectRect.width()) ||
1713 (rectH.at(p).x() > image.cols*2 && rectH.at(p).x() > objectRect.width()*2) ||
1714 (rectH.at(p).y() < -image.rows && rectH.at(p).x() < -objectRect.height()) ||
1715 (rectH.at(p).y() > image.rows*2 && rectH.at(p).x() > objectRect.height()*2))
1724 Settings::getHomography_minAngle() > 0)
1726 for(
int a=0; a<rectH.size(); ++a)
1729 QLineF ab(rectH.at(a).x(), rectH.at(a).y(), rectH.at((a+1)%4).x(), rectH.at((a+1)%4).y());
1730 QLineF cb(rectH.at((a+1)%4).x(), rectH.at((a+1)%4).y(), rectH.at((a+2)%4).x(), rectH.at((a+2)%4).y());
1731 float angle = ab.angle(cb);
1732 float minAngle = (float)Settings::getHomography_minAngle();
1733 if(angle < minAngle ||
1734 angle > 180.0-minAngle)
1744 Settings::getGeneral_multiDetection())
1746 int distance = Settings::getGeneral_multiDetectionRadius();
1748 matchesList.push_back(threads[j]->getOutliers());
1749 matchesId.push_back(
id);
1752 QMultiMap<int, QTransform>::iterator objIter = info.
objDetected_.find(
id);
1753 for(;objIter!=info.
objDetected_.end() && objIter.key() == id; ++objIter)
1755 qreal dx = objIter.value().m31() - hTransform.m31();
1756 qreal dy = objIter.value().m32() - hTransform.m32();
1757 int d = (int)sqrt(dx*dx + dy*dy);
1764 if(distance < Settings::getGeneral_multiDetectionRadius())
1772 Settings::getHomography_allCornersVisible())
1775 QRectF sceneRect(0,0,image.cols, image.rows);
1776 for(
int p=0; p<rectH.size(); ++p)
1778 if(!sceneRect.contains(QPointF(rectH.at(p).x(), rectH.at(p).y())))
1807 UDEBUG(
"Processed matches %d", i+1);
1812 else if((descriptorsValid || vocabularyValid) && info.
sceneKeypoints_.size())
1814 UWARN(
"Cannot search, objects must be updated");
1819 UWARN(
"No features detected in the scene!?!");
static void affineSkew(float tilt, float phi, const cv::Mat &image, cv::Mat &skewImage, cv::Mat &skewMask, cv::Mat &Ai)
static int getHomographyMethod()
std::string uFormat(const char *fmt,...)
const QMultiMap< int, int > & wordToObjects() const
static Feature2D * createKeypointDetector()
QMultiMap< int, int > matches_
QMultiMap< int, QString > objDetectedFilePaths_
virtual void detectAndCompute(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors, const cv::Mat &mask=cv::Mat())
QMap< int, cv::Mat > objectsDescriptors_
std::vector< cv::KeyPoint > sceneKeypoints_
FindObject(bool keepImagesInRAM_=true, QObject *parent=0)
const std::vector< int > & getIndexesB() const
bool detect(const cv::Mat &image, find_object::DetectionInfo &info) const
DetectionInfo::RejectedCode code_
QMultiMap< int, QSize > objDetectedSizes_
virtual ~HomographyThread()
const QMultiMap< int, int > * sceneWords_
std::vector< cv::KeyPoint > limitKeypoints(const std::vector< cv::KeyPoint > &keypoints, int maxKeypoints)
const cv::Mat & indexedDescriptors() const
Some conversion functions.
QMultiMap< int, RejectedCode > rejectedCodes_
QMultiMap< int, int > getInliers() const
QMap< int, QMultiMap< int, int > > matches_
std::vector< int > indexesB_
cv::Mat sceneDescriptors_
static void setLevel(ULogger::Level level)
bool loadSession(const QString &path, const ParametersMap &customParameters=ParametersMap())
void addObjectAndUpdate(const cv::Mat &image, int id=0, const QString &filePath=QString())
const std::list< std::string > & getFileNames() const
DetectionInfo::RejectedCode rejectedCode() const
TFSIMD_FORCE_INLINE tfScalar angle(const Quaternion &q1, const Quaternion &q2)
#define UASSERT(condition)
static QString currentDetectorType()
static const ParametersMap & getParameters()
QMultiMap< int, int > addWords(const cv::Mat &descriptors, int objectId)
float getMaxMatchedDistance() const
void search(const cv::Mat &descriptors, cv::Mat &results, cv::Mat &dists, int k)
void updateVocabulary(const QList< int > &ids=QList< int >())
QMultiMap< int, int > getOutliers() const
QMultiMap< int, int > objDetectedInliersCount_
const std::vector< uchar > & getOutlierMask() const
#define UASSERT_MSG(condition, msg_str)
int loadObjects(const QString &dirPath, bool recursive=false)
QMap< TimeStamp, float > timeStamps_
void updateDetectorExtractor()
virtual void detect(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, const cv::Mat &mask=cv::Mat())
void objectsFound(const find_object::DetectionInfo &, const find_object::Header &, const cv::Mat &, float)
bool loadVocabulary(const QString &filePath)
QMap< int, ObjSignature * > objects_
HomographyThread(const QMultiMap< int, int > *matches, int objectId, const std::vector< cv::KeyPoint > *kptsA, const std::vector< cv::KeyPoint > *kptsB, const cv::Mat &imageA, const cv::Mat &imageB)
QMultiMap< int, QTransform > objDetected_
void updateObjects(const QList< int > &ids=QList< int >())
static void setParameter(const QString &key, const QVariant &value)
float minMatchedDistance_
const std::vector< cv::KeyPoint > * kptsA_
QMap< QString, QVariant > ParametersMap
const Vocabulary * vocabulary() const
static QString currentDescriptorType()
QMultiMap< int, QMultiMap< int, int > > rejectedInliers_
static void setPrintWhere(bool printWhere)
std::vector< uchar > outlierMask_
QMultiMap< int, QMultiMap< int, int > > objDetectedInliers_
void computeFeatures(Feature2D *detector, Feature2D *extractor, const cv::Mat &image, const cv::Mat &mask, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors, int &timeDetection, int &timeExtraction)
QMultiMap< int, int > objDetectedOutliersCount_
void load(QDataStream &streamSessionPtr, bool loadVocabularyOnly=false)
void load(QDataStream &streamPtr, bool ignoreImage)
virtual void compute(const cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors)
void removeObject(int id)
static bool in(Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4)
bool saveSession(const QString &path)
void save(QDataStream &streamSessionPtr, bool saveVocabularyOnly=false) const
const QMultiMap< int, int > * matches_
float getMinMatchedDistance() const
QMultiMap< int, int > inliers_
const std::vector< int > & getIndexesA() const
QMultiMap< int, int > sceneWords_
ULogger class and convenient macros.
bool saveVocabulary(const QString &filePath) const
const std::vector< cv::KeyPoint > * kptsB_
QMultiMap< int, QMultiMap< int, int > > rejectedOutliers_
const QMultiMap< int, int > & getMatches() const
float maxMatchedDistance_
static Feature2D * createDescriptorExtractor()
float maxMatchedDistance_
SearchThread(Vocabulary *vocabulary, int objectId, const cv::Mat *descriptors, const QMultiMap< int, int > *sceneWords)
const cv::Mat * descriptors_
float minMatchedDistance_
std::vector< int > indexesA_
QMultiMap< int, QMultiMap< int, int > > objDetectedOutliers_
QMultiMap< int, int > outliers_
const ObjSignature * addObject(const QString &filePath)
QMap< int, int > dataRange_
const std::string response
const cv::Mat & getHomography() const
void removeObjectAndUpdate(int id)