node.cpp
Go to the documentation of this file.
00001 /* This file is part of RGBDSLAM.
00002  * 
00003  * RGBDSLAM is free software: you can redistribute it and/or modify
00004  * it under the terms of the GNU General Public License as published by
00005  * the Free Software Foundation, either version 3 of the License, or
00006  * (at your option) any later version.
00007  * 
00008  * RGBDSLAM is distributed in the hope that it will be useful,
00009  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011  * GNU General Public License for more details.
00012  * 
00013  * You should have received a copy of the GNU General Public License
00014  * along with RGBDSLAM.  If not, see <http://www.gnu.org/licenses/>.
00015  */
00016 
00017 
00018 #include "node.h"
00019 #include "features.h"
00020 #include "transformation_estimation_euclidean.h"
00021 #include "transformation_estimation.h"
00022 #include <cmath>
00023 #include "scoped_timer.h"
00024 #include <Eigen/Geometry>
00025 //#include "pcl/ros/conversions.h"
00026 #include <pcl/common/transformation_from_correspondences.h>
00027 //#include <opencv2/highgui/highgui.hpp>
00028 //#include <qtconcurrentrun.h>
00029 //#include <QtConcurrentMap> 
00030 
00031 #ifdef USE_SIFT_GPU
00032 #include "sift_gpu_wrapper.h"
00033 #endif
00034 
00035 //#include <math.h>
00036 #include <fstream>
00037 
00038 //#ifdef USE_ICP_CODE
00039 //#include "../external/gicp/transform.h"
00040 //#endif
00041 #ifdef USE_ICP_BIN
00042 #include "gicp-fallback.h"
00043 #endif
00044 
00045 #ifdef USE_ICP_CODE
00046 #include "gicp/gicp.h"
00047 #include "gicp/transform.h"
00048 #endif
00049 
00050 //#include <iostream>
00051 #include "misc.h"
00052 #include "misc2.h"
00053 #include <pcl/filters/voxel_grid.h>
00054 #include <pcl/filters/impl/voxel_grid.hpp>
00055 #include <opencv/highgui.h>
00056 #ifdef USE_PCL_ICP
00057 #include "icp.h"
00058 #endif
00059 #include <string>
00060 #include <iostream>
00061 #include <cmath>
00062 QMutex Node::gicp_mutex;
00063 QMutex Node::siftgpu_mutex;
00064 
00065 /*Check for all keypoint coordinates whether valid depth is available */
00066 void removeDepthless(std::vector<cv::KeyPoint>& feature_locations_2d, const cv::Mat& depth)
00067 {
00068   cv::Point2f p2d;
00069   unsigned int i = 0; 
00070   while(i < feature_locations_2d.size())
00071   {
00072     p2d = feature_locations_2d[i].pt;
00073     if (p2d.x >= depth.cols || p2d.x < 0 ||
00074         p2d.y >= depth.rows || p2d.y < 0 ||
00075         std::isnan(p2d.x) || std::isnan(p2d.y)){ //TODO: Unclear why points should be outside the image or be NaN
00076       ROS_WARN_STREAM("Ignoring invalid keypoint: " << p2d); //Does it happen at all? If not, remove this code block
00077       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00078       continue;
00079     }
00080     float Z;
00081     if(ParameterServer::instance()->get<bool>("use_feature_min_depth")){
00082       Z = getMinDepthInNeighborhood(depth, p2d, feature_locations_2d[i].size);
00083     } else {
00084       Z = depth.at<float>(round(p2d.y), round(p2d.x));//unfortunately rounding is necessary. Seldomly, casting the floating point coordinates (b/c subpix accuracy) shifted the point. Happend only for ORB, which produces coordinats like 10.9999959
00085     }
00086     // Check for invalid measurements
00087     if(std::isnan (Z))
00088     {
00089       ROS_INFO("%.3u. Feature (%f %f) has been extracted at NaN depth. Omitting", i, p2d.x, p2d.y);
00090       //FIXME Use parameter here to choose whether to use
00091       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00092       continue;
00093     }
00094     i++; //Only increment if no element is removed from vector
00095   }
00096 }
00097 
00100 Node::Node(const cv::Mat& visual, 
00101            const cv::Mat& depth,
00102            const cv::Mat& detection_mask,
00103            const sensor_msgs::CameraInfoConstPtr& cam_info, 
00104            myHeader depth_header,
00105            cv::Ptr<cv::FeatureDetector> detector,
00106            cv::Ptr<cv::DescriptorExtractor> extractor) :
00107   id_(-1), seq_id_(-1), vertex_id_(-1), valid_tf_estimate_(true),
00108   timestamp_(depth_header.stamp),
00109   has_odometry_edge_(false),
00110   odometry_set_(false),
00111   matchable_(true),
00112   pc_col(new pointcloud_type()),
00113   flannIndex(NULL),
00114   header_(depth_header),
00115   base2points_(tf::Transform::getIdentity(), depth_header.stamp, ParameterServer::instance()->get<std::string>("base_frame_name"), depth_header.frame_id),
00116   ground_truth_transform_(tf::Transform::getIdentity(), depth_header.stamp, ParameterServer::instance()->get<std::string>("ground_truth_frame_name"), ParameterServer::instance()->get<std::string>("base_frame_name")),
00117   odom_transform_(tf::Transform::getIdentity(), depth_header.stamp, "missing_odometry", depth_header.frame_id),
00118   initial_node_matches_(0),
00119   cam_info_(*cam_info)
00120 {
00121   ScopedTimer s("Node Constructor");
00122   ParameterServer* ps = ParameterServer::instance();
00123 
00124   //Create point cloud inf necessary
00125   if(ps->get<bool>("store_pointclouds") || 
00126      ps->get<int>("emm__skip_step") > 0 ||
00127      ps->get<bool>("use_icp") ||
00128      (ps->get<bool>("use_glwidget") && ps->get<bool>("use_gui") && ! ps->get<bool>("glwidget_without_clouds")))
00129   {
00130     pc_col = pointcloud_type::Ptr(createXYZRGBPointCloud(depth, visual, cam_info));
00131   }
00132   else //Else use empty one
00133   {
00134     pc_col = pointcloud_type::Ptr(new pointcloud_type());
00135   }
00136   pc_col->header = header_;
00137 
00138   cv::Mat gray_img; 
00139   if(visual.type() == CV_8UC3){
00140     cvtColor(visual, gray_img, CV_RGB2GRAY);
00141   } else {
00142     gray_img = visual;
00143   }
00144 
00145 
00146 #ifdef USE_SIFT_GPU
00147   std::vector<float> descriptors;
00148   if(ps->get<std::string>("feature_detector_type") == "SIFTGPU"){
00149     ScopedTimer s("Feature Detection and Descriptor Extraction");
00150     SiftGPUWrapper* siftgpu = SiftGPUWrapper::getInstance();
00151     siftgpu->detect(gray_img, feature_locations_2d_, descriptors);
00152     ROS_WARN_COND(descriptors.size()==0, "No keypoints for current image!");
00153   } else 
00154 #endif
00155   {
00156     ScopedTimer s("Feature Detection");
00157     ROS_FATAL_COND(detector.empty(), "No valid detector!");
00158     detector->detect( gray_img, feature_locations_2d_, detection_mask);// fill 2d locations
00159   }
00160 
00161   // project pixels to 3dPositions and create search structures for the gicp
00162 #ifdef USE_SIFT_GPU
00163   if(ps->get<std::string>("feature_extractor_type") == "SIFTGPU"){
00164 
00165     if(ps->get<std::string>("feature_detector_type") != "SIFTGPU"){
00166       //not already extracted descriptors in detection step
00167       //clean keypoints from those without 3d FIXME: can be made more performant
00168       projectTo3D(feature_locations_2d_, feature_locations_3d_, depth, cam_info);
00169       SiftGPUWrapper* siftgpu = SiftGPUWrapper::getInstance();
00170       siftgpu->detect(gray_img, feature_locations_2d_, descriptors);
00171     } 
00172     if(descriptors.size() > 0){
00173       projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, depth, cam_info, descriptors, feature_descriptors_); 
00174       ROS_INFO("Siftgpu Feature Descriptors size: %d x %d", feature_descriptors_.rows, feature_descriptors_.cols);
00175     } else {
00176       ROS_WARN("No descriptors for current image!");
00177     }
00178     //std::cout << "feature descriptors = "<< std::endl << std::setprecision(3)  << feature_descriptors_<< std::endl;
00179   }
00180   else
00181 #endif
00182   {
00183     //PREFILTER 
00184     removeDepthless(feature_locations_2d_, depth);
00185     size_t max_keyp = ps->get<int>("max_keypoints");
00186     if(feature_locations_2d_.size() > max_keyp){
00187       cv::KeyPointsFilter::retainBest(feature_locations_2d_, max_keyp);  
00188       feature_locations_2d_.resize(max_keyp);//Because retainBest doesn't retain exactly max_keyp?
00189     }
00190 
00191     /*for(unsigned int i = 0; i < feature_locations_2d_.size(); i++){
00192       feature_locations_2d_[i].class_id = i;
00193       feature_locations_2d_[i].pt.x = round(feature_locations_2d_[i].pt.x);
00194       feature_locations_2d_[i].pt.y = round(feature_locations_2d_[i].pt.y);
00195       ROS_WARN("%u. Keypoint %.3i: (%f %f)", i, feature_locations_2d_[i].class_id, feature_locations_2d_[i].pt.x, feature_locations_2d_[i].pt.y);
00196     }*/
00197 
00198 
00199     ScopedTimer s("Feature Extraction");
00200     extractor->compute(gray_img, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
00201     /*for(unsigned int i = 0; i < feature_locations_2d_.size(); i++){
00202       ROS_WARN("%.3u. EKeypoint1 %.3i: (%f %f)", i, feature_locations_2d_[i].class_id, feature_locations_2d_[i].pt.x, feature_locations_2d_[i].pt.y);
00203     }*/
00204     removeDepthless(feature_locations_2d_, depth);//FIXME: Unnecessary?
00205     /* for(unsigned int i = 0; i < feature_locations_2d_.size(); i++){
00206       ROS_WARN("%.3u. EKeypoint %.3i: (%f %f)", i, feature_locations_2d_[i].class_id, feature_locations_2d_[i].pt.x, feature_locations_2d_[i].pt.y);
00207     }  */
00208     projectTo3D(feature_locations_2d_, feature_locations_3d_, depth, cam_info);
00209     /* for(unsigned int i = 0; i < feature_locations_2d_.size(); i++){
00210       ROS_WARN("%.3u. EKeypoint3 %.3i: (%f %f)", i, feature_locations_2d_[i].class_id, feature_locations_2d_[i].pt.x, feature_locations_2d_[i].pt.y);
00211     }*/
00212     ROS_INFO("Keypoints: %zu", feature_locations_2d_.size());
00213     ROS_DEBUG("Feature Descriptors size: %d x %d", feature_descriptors_.rows, feature_descriptors_.cols);
00214   }
00215   assert(feature_locations_2d_.size() == feature_locations_3d_.size());
00216   assert(feature_locations_3d_.size() == (unsigned int)feature_descriptors_.rows); 
00217   feature_matching_stats_.resize(feature_locations_2d_.size(), 0);
00218   ROS_INFO_NAMED("statistics", "Feature Count of Node:\t%d", (int)feature_locations_2d_.size());
00219   //computeKeypointDepthStats(depth, feature_locations_2d_);
00220 
00221 #ifdef USE_ICP_CODE
00222   gicp_initialized = false;
00223   gicp_point_set_ = NULL;
00224   if(ps->get<int>("emm__skip_step") <= 0 && !ps->get<bool>("store_pointclouds") && ps->get<bool>("use_icp")) 
00225   {//if clearing out point clouds, the icp structure needs to be built before
00226     gicp_mutex.lock();
00227     gicp_point_set_ = this->getGICPStructure();
00228     gicp_mutex.unlock();
00229   }
00230 #endif
00231   if(ps->get<bool>("use_root_sift") &&
00232      (ps->get<std::string>("feature_extractor_type") == "SIFTGPU" ||
00233       ps->get<std::string>("feature_extractor_type") == "SURF" ||
00234       ps->get<std::string>("feature_extractor_type") == "GFTT" ||
00235       ps->get<std::string>("feature_extractor_type") == "SIFT")){
00236     squareroot_descriptor_space(feature_descriptors_);
00237   }
00238 }
00239 
00240 
00241 
00242 
00243 
00244 
00245 
00246 
00247 
00248 
00249 
00250 Node::Node(const cv::Mat visual,
00251            cv::Ptr<cv::FeatureDetector> detector,
00252            cv::Ptr<cv::DescriptorExtractor> extractor,
00253            pointcloud_type::Ptr point_cloud,
00254            const cv::Mat detection_mask) : 
00255   id_(-1), seq_id_(-1), vertex_id_(-1), valid_tf_estimate_(true), matchable_(true),
00256   timestamp_(point_cloud->header.stamp),
00257   has_odometry_edge_(false),
00258   odometry_set_(false),
00259   pc_col(point_cloud),
00260   flannIndex(NULL),
00261   header_(point_cloud->header),
00262   base2points_(tf::Transform::getIdentity(), header_.stamp,ParameterServer::instance()->get<std::string>("base_frame_name"), header_.frame_id),
00263   ground_truth_transform_(tf::Transform::getIdentity(), header_.stamp, ParameterServer::instance()->get<std::string>("ground_truth_frame_name"), ParameterServer::instance()->get<std::string>("base_frame_name")),
00264   odom_transform_(tf::Transform::getIdentity(), header_.stamp, "missing_odometry", header_.frame_id),
00265   initial_node_matches_(0)
00266 {
00267   //cv::namedWindow("matches");
00268   ParameterServer* ps = ParameterServer::instance();
00269 
00270   ROS_INFO_STREAM("Construction of Node with " << ps->get<std::string>("feature_detector_type") << " Features");
00271   ScopedTimer s("Node Constructor");
00272 
00273   cv::Mat gray_img; 
00274   if(visual.type() == CV_8UC3){ cvtColor(visual, gray_img, CV_RGB2GRAY); } 
00275   else { gray_img = visual; }
00276 
00277 
00278 #ifdef USE_SIFT_GPU
00279   std::vector<float> descriptors;
00280   if(ps->get<std::string>("feature_detector_type") == "SIFTGPU"){
00281     ScopedTimer s("Feature Detection and Descriptor Extraction");
00282     SiftGPUWrapper* siftgpu = SiftGPUWrapper::getInstance();
00283     siftgpu->detect(gray_img, feature_locations_2d_, descriptors);
00284     ROS_FATAL_COND(descriptors.size() ==0, "Can't run SiftGPU");
00285   } else 
00286 #endif
00287   if(ps->get<std::string>("feature_detector_type") != "GICP")
00288   {
00289     ScopedTimer s("Feature Detection");
00290     ROS_FATAL_COND(detector.empty(), "No valid detector!");
00291     detector->detect( gray_img, feature_locations_2d_, detection_mask);// fill 2d locations
00292   }
00293 
00294   // project pixels to 3dPositions and create search structures for the gicp
00295 #ifdef USE_SIFT_GPU
00296   if(ps->get<std::string>("feature_detector_type") == "SIFTGPU"
00297      && descriptors.size() > 0)
00298   {
00299     // removes also unused descriptors from the descriptors matrix
00300     // build descriptor matrix and sets siftgpu_descriptors!
00301     projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec
00302   }
00303   else 
00304 #endif
00305   if(ps->get<std::string>("feature_detector_type") != "GICP")
00306   {
00307     projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
00308     // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
00309     ScopedTimer s("Feature Extraction");
00310     extractor->compute(gray_img, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
00311   }
00312 
00313   if(ps->get<std::string>("feature_detector_type") != "GICP")
00314   {
00315     assert(feature_locations_2d_.size() == feature_locations_3d_.size());
00316     assert(feature_locations_3d_.size() == (unsigned int)feature_descriptors_.rows); 
00317     feature_matching_stats_.resize(feature_locations_2d_.size(), 0);
00318     ROS_INFO_NAMED("statistics", "Feature Count of Node:\t%d", (int)feature_locations_2d_.size());
00319     /* Now inside the projection method:
00320     size_t max_keyp = ps->get<int>("max_keypoints");
00321     if(feature_locations_2d_.size() > max_keyp) {
00322       feature_locations_2d_.resize(max_keyp);
00323       feature_locations_3d_.resize(max_keyp);
00324       feature_descriptors_ = feature_descriptors_.rowRange(0,max_keyp);
00325     }
00326     */
00327     assert(feature_locations_2d_.size() == feature_locations_3d_.size());
00328     assert(feature_locations_3d_.size() == (unsigned int)feature_descriptors_.rows); 
00329   }
00330 
00331 #ifdef USE_ICP_CODE
00332   gicp_initialized = false;
00333   gicp_point_set_ = NULL;
00334   if(!ps->get<bool>("store_pointclouds") && ps->get<bool>("use_icp")) 
00335   {//if clearing out point clouds, the icp structure needs to be built before
00336     gicp_mutex.lock();
00337     gicp_point_set_ = this->getGICPStructure();
00338     gicp_mutex.unlock();
00339   }
00340 #endif
00341 
00342   if((!ps->get<bool>("use_glwidget") ||
00343       !ps->get<bool>("use_gui")) &&
00344      !ps->get<bool>("store_pointclouds") &&
00345      !ps->get<int>("emm__skip_step"))
00346   {
00347     ROS_WARN("Clearing out points");
00348     this->clearPointCloud();
00349   } else if(ps->get<double>("voxelfilter_size") > 0.0) {
00350     //Let it be voxelfiltered with the call to clearPointCloud
00351     /*
00352     double vfs = ps->get<double>("voxelfilter_size");
00353     pcl::VoxelGrid<point_type> sor;
00354     sor.setLeafSize(vfs,vfs,vfs);
00355     pointcloud_type::ConstPtr const_cloud_ptr = boost::make_shared<pointcloud_type> (*pc_col);                                                                 
00356     sor.setInputCloud (const_cloud_ptr);
00357     sor.filter (*pc_col);
00358     */
00359   }
00360   if(ps->get<bool>("use_root_sift") &&
00361      (ps->get<std::string>("feature_extractor_type") == "SIFTGPU" ||
00362       ps->get<std::string>("feature_extractor_type") == "SURF" ||
00363       ps->get<std::string>("feature_extractor_type") == "GFTT" ||
00364       ps->get<std::string>("feature_extractor_type") == "SIFT")){
00365     squareroot_descriptor_space(feature_descriptors_);
00366   }
00367 }
00368 
00369 Node::~Node() {
00370     delete flannIndex; flannIndex = NULL;
00371 }
00372 
00373 void Node::setOdomTransform(tf::StampedTransform gt){
00374     odom_transform_ = gt;
00375     odometry_set_=true;
00376 }
00377 void Node::setGroundTruthTransform(tf::StampedTransform gt){
00378     ground_truth_transform_ = gt;
00379 }
00380 void Node::setBase2PointsTransform(tf::StampedTransform& b2p){
00381     base2points_ = b2p;
00382 }
00383 tf::StampedTransform Node::getOdomTransform() const {
00384     return odom_transform_;
00385 }
00386 tf::StampedTransform Node::getGroundTruthTransform() const {
00387     return ground_truth_transform_;
00388 }
00389 tf::StampedTransform Node::getBase2PointsTransform() const {
00390     return base2points_;
00391 }
00392 
00393 #ifdef USE_ICP_CODE
00394 bool Node::getRelativeTransformationTo_ICP_code(const Node* target_node,
00395                                                 Eigen::Matrix4f& transformation,
00396                                                 const Eigen::Matrix4f& initial_transformation)
00397 {
00398   ScopedTimer s(__FUNCTION__);
00399   dgc_transform_t initial;
00400   Eigen2GICP(initial_transformation,initial);
00401 
00402   dgc_transform_t final_trafo;
00403   dgc_transform_identity(final_trafo);
00404 
00405   gicp_mutex.lock();
00406         dgc::gicp::GICPPointSet* gicp_point_set = this->getGICPStructure();
00407   ROS_INFO("this'  (%d) Point Set: %d", this->id_, gicp_point_set->Size());
00408         dgc::gicp::GICPPointSet* target_gicp_point_set = target_node->getGICPStructure();
00409   ROS_INFO("others (%d) Point Set: %d", target_node->id_, target_gicp_point_set->Size());
00410   int iterations = gicp_max_iterations;
00411   if(gicp_point_set->Size() > Node::gicp_min_point_cnt && 
00412      target_gicp_point_set->Size() > Node::gicp_min_point_cnt)
00413   {
00414    iterations = target_gicp_point_set->AlignScan(gicp_point_set, initial, final_trafo, gicp_d_max_);
00415    GICP2Eigen(final_trafo,transformation);
00416   } else {
00417     ROS_WARN("GICP Point Sets not big enough. Skipping ICP");
00418   }
00419   gicp_mutex.unlock();
00420 
00421 
00422   return iterations <= gicp_max_iterations;
00423 }
00424 
00425 void Node::clearGICPStructure() const
00426 {
00427     gicp_mutex.lock();
00428     delete gicp_point_set_; gicp_point_set_ = NULL;
00429     gicp_mutex.unlock();
00430 }
00431 dgc::gicp::GICPPointSet* Node::getGICPStructure(unsigned int max_count) const
00432 {
00433   ScopedTimer s(__FUNCTION__);
00434   if(max_count == 0) max_count = ParameterServer::instance()->get<int>("gicp_max_cloud_size");
00435   //Use Cache
00436   if(gicp_point_set_ != NULL){
00437     return gicp_point_set_;
00438   }
00439   
00440   dgc::gicp::GICPPointSet* gicp_point_set = new dgc::gicp::GICPPointSet();
00441 
00442   dgc::gicp::GICPPoint g_p;
00443   g_p.range = -1;
00444   for(int k = 0; k < 3; k++) {
00445     for(int l = 0; l < 3; l++) {
00446       g_p.C[k][l] = (k == l)?1:0;
00447     }
00448   }
00449 
00450   std::vector<dgc::gicp::GICPPoint> non_NaN;
00451   non_NaN.reserve((*pc_col).points.size());
00452   for (unsigned int i=0; i<(*pc_col).points.size(); i++ ){
00453     point_type&  p = (*pc_col).points.at(i);
00454     if (!isnan(p.z)) { // add points to candidate pointset for icp
00455       g_p.x=p.x;
00456       g_p.y=p.y;
00457       g_p.z=p.z;
00458       non_NaN.push_back(g_p);
00459     }
00460   }
00461   float step = non_NaN.size()/static_cast<float>(max_count);
00462   step =  step < 1.0 ? 1.0 : step; //only skip, don't use points more than once
00463   for (float i=0; i<non_NaN.size(); i+=step ){
00464     gicp_point_set->AppendPoint(non_NaN[static_cast<unsigned int>(i)]);
00465   }
00466   ROS_INFO("GICP point set size: %i", gicp_point_set->Size() );
00467   
00468   if(gicp_point_set->Size() > Node::gicp_min_point_cnt){
00469     ScopedTimer s("GICP structure creation");
00470     // build search structure for gicp:
00471     gicp_point_set->SetDebug(true);
00472     gicp_point_set->SetGICPEpsilon(gicp_epsilon);
00473     gicp_point_set->BuildKDTree();
00474     gicp_point_set->ComputeMatrices();
00475     gicp_point_set->SetMaxIterationInner(8); // as in test_gicp->cpp
00476     gicp_point_set->SetMaxIteration(gicp_max_iterations);
00477   }
00478   else
00479   {
00480     ROS_WARN("GICP point set too small, this node will not be algined with GICP!");
00481   }
00482   //ROS_INFO_STREAM("time for creating the structure: " << ((std::clock()-starttime_gicp*1.0) / (double)CLOCKS_PER_SEC));
00483   //ROS_INFO_STREAM("current: " << std::clock() << " " << "start_time: " << starttime_gicp);
00484 
00485   gicp_point_set_ = gicp_point_set;
00486   return gicp_point_set;
00487 }
00488 #endif
00489 
00490 //Build flann index on demand
00491 const cv::flann::Index* Node::getFlannIndex() const {
00492 // build search structure for descriptor matching
00493   if (flannIndex == NULL
00494       && ParameterServer::instance()->get<std::string> ("matcher_type") == "FLANN" 
00495       && ParameterServer::instance()->get<std::string> ("feature_detector_type") != "GICP"
00496       && ParameterServer::instance()->get<std::string> ("feature_extractor_type") != "ORB")
00497   {
00498     ScopedTimer s(__FUNCTION__);
00499     //KDTreeIndexParams When passing an object of this type the index constructed will 
00500     //consist of a set of randomized kd-trees which will be searched in parallel.
00501     flannIndex = new cv::flann::Index(feature_descriptors_, cv::flann::KDTreeIndexParams(4));
00502     ROS_DEBUG("Built flannIndex (address %p) for Node %i", flannIndex, this->id_);
00503   }
00504   else if (flannIndex == NULL
00505       && ParameterServer::instance()->get<std::string> ("matcher_type") == "FLANN" 
00506       && ParameterServer::instance()->get<std::string> ("feature_extractor_type") == "ORB")
00507   {
00508     flannIndex = new cv::flann::Index(feature_descriptors_, cv::flann::LshIndexParams(10, 10, 2));
00509   }
00510 
00511   return flannIndex;
00512 }
00513 
00514 bool isNearer(const cv::DMatch& m1, const cv::DMatch& m2) { 
00515   return m1.distance < m2.distance; 
00516 }
00517 //Throw away the worst matches based on their distance - which is the nn_ratio set in featureMatching(...)
00518 static void keepStrongestMatches(int n, std::vector<cv::DMatch>* matches)
00519 {
00520   if(matches->size() > n)
00521   {
00522     //m1 better than m2?
00523     //auto lambda = [](const cv::DMatch& m1, const cv::DMatch& m2) { return m1.distance < m2.distance; };
00524 
00525     std::vector<cv::DMatch>::iterator nth = matches->begin() + n;
00526     std::nth_element(matches->begin(), nth, matches->end(), isNearer);
00527     matches->erase(nth, matches->end());
00528   }
00529 }
00530 
00531 
00532 //TODO: This function seems to be resistant to parallelization probably due to knnSearch
00533 unsigned int Node::featureMatching(const Node* other, std::vector<cv::DMatch>* matches) const 
00534 {
00535   ScopedTimer s(__FUNCTION__);
00536   assert(matches->size()==0);
00537   // number of neighbours found (two, to compare the best matches for distinctness
00538   const int k = 2;
00539   //unsigned int one_nearest_neighbour = 0, two_nearest_neighbours = 0;
00540 
00541   // number of neighbors found (has to be two, see l. 57)
00542   double sum_distances = 0.0;
00543   ParameterServer* ps = ParameterServer::instance();
00544   //const int min_kp = ps->get<int> ("min_keypoints");
00545 
00546   //using siftgpu, if available and wanted
00547   if(ps->get<std::string>("feature_detector_type") == "GICP"){
00548     return 0;
00549   }
00550 #ifdef USE_SIFT_GPU
00551   if (ps->get<std::string> ("matcher_type") == "SIFTGPU") {
00552     siftgpu_mutex.lock();
00553     sum_distances = SiftGPUWrapper::getInstance()->match(siftgpu_descriptors, feature_descriptors_.rows, other->siftgpu_descriptors, other->feature_descriptors_.rows, matches);
00554     siftgpu_mutex.unlock();
00555   }
00556   else
00557 #endif
00558   //using BruteForceMatcher for ORB features
00559   if (ps->get<std::string> ("matcher_type") != "BRUTEFORCE_OPENCV" && ps->get<std::string> ("feature_extractor_type") == "ORB")
00560   {
00561     ROS_INFO_COND(ps->get<std::string>("matcher_type") == "FLANN", "You specified FLANN with ORB, this is SLOW! Using BRUTEFORCE instead.");
00562     cv::Ptr<cv::DescriptorMatcher> matcher;
00563     if(ps->get<std::string> ("feature_extractor_type") == "ORB"){
00564         ScopedTimer s("My bruteforce Search", false, true);
00565         uint64_t* query_value =  reinterpret_cast<uint64_t*>(this->feature_descriptors_.data);
00566         uint64_t* search_array = reinterpret_cast<uint64_t*>(other->feature_descriptors_.data);
00567         for(unsigned int i = 0; i < this->feature_locations_2d_.size(); ++i, query_value += 4){//ORB feature = 32*8bit = 4*64bit
00568           int result_index = -1;
00569           int hd = bruteForceSearchORB(query_value, search_array, other->feature_locations_2d_.size(), result_index);
00570           if(hd >= 128) continue;//not more than half of the bits matching: Random
00571           cv::DMatch match(i, result_index, hd /256.0 + (float)rand()/(1000.0*RAND_MAX));
00572           matches->push_back(match);
00573         }
00574     }
00575     //else//any bruteforce
00576     if (ps->get<std::string> ("matcher_type") == "BRUTEFORCE_OPENCV" && ps->get<std::string> ("feature_extractor_type") == "ORB")
00577     {
00578       ScopedTimer s("OpenCV bruteforce Search", false, true);
00579       std::string brute_force_type("BruteForce-HammingLUT"); //L2 per default
00580       matcher = cv::DescriptorMatcher::create(brute_force_type);
00581       std::vector< std::vector<cv::DMatch> > bruteForceMatches;
00582       matcher->knnMatch(feature_descriptors_, other->feature_descriptors_, bruteForceMatches, k);
00583       double max_dist_ratio_fac = ps->get<double>("nn_distance_ratio");
00584       //if ((int)bruteForceMatches.size() < min_kp) max_dist_ratio_fac = 1.0; //if necessary use possibly bad descriptors
00585       srand((long)std::clock());
00586       std::set<int> train_indices;
00587       matches->clear();
00588       matches->reserve(bruteForceMatches.size());
00589       for(unsigned int i = 0; i < bruteForceMatches.size(); i++) {
00590           cv::DMatch m1 = bruteForceMatches[i][0];
00591           cv::DMatch m2 = bruteForceMatches[i][1];
00592           float dist_ratio_fac = m1.distance / m2.distance;
00593           if (dist_ratio_fac < max_dist_ratio_fac) {//this check seems crucial to matching quality
00594               int train_idx = m1.trainIdx;
00595               if(train_indices.count(train_idx) > 0)
00596                 continue; //FIXME: Keep better
00597                 
00598               train_indices.insert(train_idx);
00599               sum_distances += m1.distance;
00600               m1.distance = dist_ratio_fac + (float)rand()/(1000.0*RAND_MAX); //add a small random offset to the distance, since later the dmatches are inserted to a set, which omits duplicates and the duplicates are found via the less-than function, which works on the distance. Therefore we need to avoid equal distances, which happens very often for ORB
00601               matches->push_back(m1);
00602           } 
00603 
00604       }
00605     }
00606     //matcher->match(feature_descriptors_, other->feature_descriptors_, *matches);
00607   } 
00608   else if (ps->get<std::string>("matcher_type") == "FLANN") // && ps->get<std::string>("feature_extractor_type") != "ORB")
00609   {
00610     ScopedTimer s("OpenCV FLANN Search", false, true);
00611     int start_feature = 0; //feature_descriptors_.rows - std::min(2*ps->get<int>("max_matches"), feature_descriptors_.rows);//FIXME: search only for best keypoints (in SIFTGPU they are at the back)
00612     int num_features = feature_descriptors_.rows;                               //compute features per chunk
00613     { //old block of (removed) search for matches chunkwise
00614       // compare
00615       // http://opencv-cocoa.googlecode.com/svn/trunk/samples/c/find_obj.cpp
00616       cv::Mat indices(num_features-start_feature, k, CV_32S);
00617       cv::Mat dists(num_features-start_feature, k, CV_32F);
00618       cv::Mat relevantDescriptors = feature_descriptors_.rowRange(start_feature, num_features);
00619 
00620       // get the best two neighbors
00621       {
00622         ScopedTimer s("FLANN KNN-search");
00623         /* 64: The number of times the tree(s) in the index should be
00624          * recursively traversed. A higher value for this parameter would give
00625          * better search precision, but also take more time. If automatic
00626          * configuration was used when the index was created, the number of
00627          * checks required to achieve the specified precision was also
00628          * computed, in which case this parameter is ignored.
00629          */
00630         other->knnSearch(relevantDescriptors, indices, dists, k, cv::flann::SearchParams(16));
00631       }
00632 
00633       cv::DMatch match;
00634       double avg_ratio = 0.0;
00635       double max_dist_ratio_fac = ps->get<double>("nn_distance_ratio");
00636       std::set<int> train_indices;
00637       matches->clear();
00638       matches->reserve(indices.rows);
00639       for(int i = 0; i < indices.rows; ++i) {
00640         float dist_ratio_fac =  dists.at<float>(2*i) / dists.at<float>(2*i + 1);
00641         avg_ratio += dist_ratio_fac;
00642         //if (indices.rows < min_kp) dist_ratio_fac = 1.0; //if necessary use possibly bad descriptors
00643         if (max_dist_ratio_fac > dist_ratio_fac) {
00644           int train_idx = indices.at<int>(2 * i);
00645           if(train_indices.count(train_idx) > 0)
00646             continue; //FIXME: Keep better
00647             
00648           train_indices.insert(train_idx);
00649           match.queryIdx = i+start_feature;
00650           match.trainIdx = train_idx;
00651           match.distance = dist_ratio_fac; //dists_ptr[2 * i];
00652           sum_distances += match.distance;
00653 
00654           assert(match.trainIdx < other->feature_descriptors_.rows);
00655           assert(match.queryIdx < feature_descriptors_.rows);
00656           matches->push_back(match);
00657         }
00658       }
00659       
00660       ROS_INFO("Feature Matches between Nodes %3d (%4d features) and %3d (%4d features) (features %d to %d of first node):\t%4d. Percentage: %f%%, Avg NN Ratio: %f",
00661                 this->id_, (int)this->feature_locations_2d_.size(), other->id_, (int)other->feature_locations_2d_.size(), start_feature, num_features, 
00662                 (int)matches->size(), (100.0*matches->size())/((float)num_features-start_feature), avg_ratio / (num_features-start_feature));
00663 
00664     }//for
00665   }
00666   else {
00667       ROS_FATAL_STREAM("Cannot match features:\nNo valid combination for " <<
00668                        "matcher_type ("           << ps->get<std::string>("matcher_type") << ") and " <<
00669                        "feature_extractor_type (" << ps->get<std::string>("feature_extractor_type") << ") chosen.");
00670   }
00671 
00672   keepStrongestMatches(ps->get<int>("max_matches"), matches);
00673 
00674   ROS_DEBUG_NAMED("statistics", "count_matrix(%3d, %3d) =  %4d;",
00675                   this->id_+1, other->id_+1, (int)matches->size());
00676   ROS_DEBUG_NAMED("statistics", "dista_matrix(%3d, %3d) =  %f;",
00677                  this->id_+1, other->id_+1, sum_distances/ (float)matches->size());
00678   ROS_DEBUG_NAMED("statistics", "Feature Matches between Nodes %3d (%4d features) and %3d (%4d features):\t%4d",
00679                   this->id_, (int)this->feature_locations_2d_.size(),
00680                   other->id_, (int)other->feature_locations_2d_.size(),
00681                   (int)matches->size());
00682 
00683   //ROS_INFO("matches size: %i, rows: %i", (int) matches->size(), feature_descriptors_.rows);
00684 
00685   //assert(one_nearest_neighbour+two_nearest_neighbours > 0);
00686   //return static_cast<float>(one_nearest_neighbour) / static_cast<float>(one_nearest_neighbour+two_nearest_neighbours);
00687   return matches->size();
00688 }
00689 
00690 
00691 
00692 #ifdef USE_SIFT_GPU
00693 void Node::projectTo3DSiftGPU(std::vector<cv::KeyPoint>& feature_locations_2d,
00694                               std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& feature_locations_3d,
00695                               const cv::Mat& depth,
00696                               const sensor_msgs::CameraInfoConstPtr& cam_info,
00697                               std::vector<float>& descriptors_in, cv::Mat& descriptors_out)
00698 {
00699 
00700   ScopedTimer s(__FUNCTION__);
00701 
00702   ParameterServer* ps = ParameterServer::instance();
00703   double depth_scaling = ps->get<double>("depth_scaling_factor");
00704   size_t max_keyp = ps->get<int>("max_keypoints");
00705   float x,y, z;//temp point, 
00706   //principal point and focal lengths:
00707   float fxinv = 1./ (ps->get<double>("depth_camera_fx") != 0 ? ps->get<double>("depth_camera_fx") : cam_info->K[0]); //(cloud->width >> 1) - 0.5f;
00708   float fyinv = 1./ (ps->get<double>("depth_camera_fy") != 0 ? ps->get<double>("depth_camera_fy") : cam_info->K[4]); //(cloud->width >> 1) - 0.5f;
00709   float cx = ps->get<double>("depth_camera_cx") != 0 ? ps->get<double>("depth_camera_cx") : cam_info->K[2]; //(cloud->width >> 1) - 0.5f;
00710   float cy = ps->get<double>("depth_camera_cy") != 0 ? ps->get<double>("depth_camera_cy") : cam_info->K[5]; //(cloud->width >> 1) - 0.5f;
00711   cv::Point2f p2d;
00712 
00713   if(feature_locations_3d.size()){
00714     ROS_INFO("There is already 3D Information in the FrameInfo, clearing it");
00715     feature_locations_3d.clear();
00716   }
00717 
00718   std::list<int> featuresUsed;
00719   
00720   int index = -1;
00721 
00722   bool use_feature_min_depth = ParameterServer::instance()->get<bool>("use_feature_min_depth");
00723   for(unsigned int i = 0; i < feature_locations_2d.size(); /*increment at end of loop*/){
00724     ++index;
00725 
00726     p2d = feature_locations_2d[i].pt;
00727     float Z;
00728     if(use_feature_min_depth ){
00729       Z = getMinDepthInNeighborhood(depth, p2d, feature_locations_2d[i].size) * depth_scaling;
00730     } else {
00731       Z = depth.at<float>(p2d.y, p2d.x) * depth_scaling;//unfortunately rounding is necessary. Seldomly, casting the floating point coordinates (b/c subpix accuracy) shifted the point. Happend only for ORB, which produces coordinats like 10.9999959
00732     }
00733     // Check for invalid measurements
00734     if (std::isnan (Z))
00735     {
00736       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00737       continue;
00738     }
00739     else
00740     {
00741       backProject(fxinv, fyinv, cx, cy, p2d.x, p2d.y, Z, x, y, z);
00742     }
00743 
00744     feature_locations_3d.push_back(Eigen::Vector4f(x,y, z, 1.0));
00745     featuresUsed.push_back(index);  //save id for constructing the descriptor matrix
00746     i++; //Only increment if no element is removed from vector
00747     if(feature_locations_3d.size() >= max_keyp) break;
00748   }
00749 
00750   //create descriptor matrix
00751   int size = feature_locations_3d.size();
00752   descriptors_out = cv::Mat(size, 128, CV_32F);
00753   siftgpu_descriptors.resize(size * 128);
00754   assert(featuresUsed.size() == feature_locations_3d.size());
00755   for (int y = 0; y < size; ++y) {
00756     int id = featuresUsed.front();
00757     featuresUsed.pop_front();
00758 
00759     for (int x = 0; x < 128; ++x) {
00760       float tmp = descriptors_in[id * 128 + x];
00761       descriptors_out.at<float>(y, x) = tmp;
00762       siftgpu_descriptors[y * 128 + x] = tmp;
00763     }
00764   }
00765 
00766   feature_locations_2d.resize(feature_locations_3d.size());
00767 }
00768 
00769 #ifdef HEMACLOUDS
00770 bool searchLabelInNeighborhood( const pointcloud_type::Ptr point_cloud, cv::Point2f center, float diameter, int label)
00771 {
00772     // Get neighbourhood area of keypoint
00773     int radius = (diameter - 1)/2;
00774     int top   = center.y - radius; top   = top   < 0 ? 0 : top;
00775     int left  = center.x - radius; left  = left  < 0 ? 0 : left;
00776     int bot   = center.y + radius; bot   = bot   >= point_cloud->height ? point_cloud->height-1 : bot;
00777     int right = center.x + radius; right = right >= point_cloud->width ? point_cloud->width-1 : right;
00778     for(int x = left; x <= right; ++x){
00779       for(int y = top; y <= bot; ++y){
00780         if(point_cloud->at(x,y).label == label){
00781           return true;
00782         }
00783       } 
00784     } 
00785     return false;
00786 }
00787 #endif
00788 
00789 void Node::projectTo3DSiftGPU(std::vector<cv::KeyPoint>& feature_locations_2d,
00790                               std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& feature_locations_3d,
00791                               const pointcloud_type::Ptr point_cloud, 
00792                               std::vector<float>& descriptors_in, cv::Mat& descriptors_out)
00793 {
00794   ScopedTimer s(__FUNCTION__);
00795   size_t max_keyp = ParameterServer::instance()->get<int>("max_keypoints");
00796   cv::Point2f p2d;
00797 
00798   if(feature_locations_3d.size()){
00799     ROS_INFO("There is already 3D Information in the FrameInfo, clearing it");
00800     feature_locations_3d.clear();
00801   }
00802 
00803   std::list<int> featuresUsed;
00804 
00805   int index = -1;
00806   for(unsigned int i = 0; i < feature_locations_2d.size(); /*increment at end of loop*/){
00807     ++index;
00808 
00809     p2d = feature_locations_2d[i].pt;
00810     point_type p3d = point_cloud->at((int) p2d.x,(int) p2d.y);
00811 
00812     // Check for invalid measurements
00813     if (std::isnan(p3d.z))
00814     {
00815       ROS_DEBUG("Feature %d has been extracted at NaN depth. Using pixel coordinates", i);
00816       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00817       continue;
00818     }
00819     
00820 #ifdef HEMACLOUDS
00821     int target_label = ParameterServer::instance()->get<int>("segment_to_optimize");
00822     //FIXME: Reversed: Label must not be there
00823     if(target_label >= 0 && searchLabelInNeighborhood(point_cloud, p2d, 1, target_label)){ // Optimize transformation estimation specifically for features in segment
00824       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00825       continue;
00826     }
00827 #endif
00828     feature_locations_3d.push_back(Eigen::Vector4f(p3d.x, p3d.y, p3d.z, 1));
00829     i++; //Only increment if no element is removed from vector
00830     featuresUsed.push_back(index);  //save id for constructing the descriptor matrix
00831     if(feature_locations_3d.size() >= max_keyp) break;
00832   }
00833 
00834   //create descriptor matrix
00835   int size = feature_locations_3d.size();
00836   descriptors_out = cv::Mat(size, 128, CV_32F);
00837   siftgpu_descriptors.resize(size * 128);
00838   for (int y = 0; y < size && featuresUsed.size() > 0; ++y) {
00839     int id = featuresUsed.front();
00840     featuresUsed.pop_front();
00841 
00842     for (int x = 0; x < 128; ++x) {
00843       descriptors_out.at<float>(y, x) = descriptors_in[id * 128 + x];
00844       siftgpu_descriptors[y * 128 + x] = descriptors_in[id * 128 + x];
00845     }
00846   }
00847 
00848   feature_locations_2d.resize(feature_locations_3d.size());
00849 
00850 }
00851 #endif
00852 
00853 void Node::projectTo3D(std::vector<cv::KeyPoint>& feature_locations_2d,
00854                        std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& feature_locations_3d,
00855                        pointcloud_type::ConstPtr point_cloud)
00856 {
00857   ScopedTimer s(__FUNCTION__);
00858 
00859   size_t max_keyp = ParameterServer::instance()->get<int>("max_keypoints");
00860   double maximum_depth = ParameterServer::instance()->get<double>("maximum_depth");
00861   cv::Point2f p2d;
00862 
00863   if(feature_locations_3d.size()){
00864     ROS_INFO("There is already 3D Information in the FrameInfo, clearing it");
00865     feature_locations_3d.clear();
00866   }
00867 
00868   for(unsigned int i = 0; i < feature_locations_2d.size(); /*increment at end of loop*/){
00869     p2d = feature_locations_2d[i].pt;
00870     if (p2d.x >= point_cloud->width || p2d.x < 0 ||
00871         p2d.y >= point_cloud->height || p2d.y < 0 ||
00872         std::isnan(p2d.x) || std::isnan(p2d.y)){ //TODO: Unclear why points should be outside the image or be NaN
00873       ROS_WARN_STREAM("Ignoring invalid keypoint: " << p2d); //Does it happen at all? If not, remove this code block
00874       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00875       continue;
00876     }
00877 
00878     point_type p3d = point_cloud->at((int) p2d.x,(int) p2d.y);
00879 
00880     // Check for invalid measurements
00881     if ( (p3d.z > maximum_depth) || isnan(p3d.x) || isnan(p3d.y) || isnan(p3d.z))
00882     {
00883       ROS_DEBUG_NAMED(__FILE__, "Feature %d has been extracted at NaN depth. Omitting", i);
00884       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00885       continue;
00886     }
00887 
00888     feature_locations_3d.push_back(Eigen::Vector4f(p3d.x, p3d.y, p3d.z, 1.0));
00889     i++; //Only increment if no element is removed from vector
00890     if(feature_locations_3d.size() >= max_keyp) break;
00891   }
00892 
00893   ROS_INFO("Feature 2d size: %zu, 3D: %zu", feature_locations_2d.size(), feature_locations_3d.size());
00894   feature_locations_2d.resize(feature_locations_3d.size());
00895   ROS_INFO("Feature 2d size: %zu, 3D: %zu", feature_locations_2d.size(), feature_locations_3d.size());
00896 }
00897 
00898 void Node::projectTo3D(std::vector<cv::KeyPoint>& feature_locations_2d,
00899                        std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& feature_locations_3d,
00900                        const cv::Mat& depth,
00901                        const sensor_msgs::CameraInfoConstPtr& cam_info)
00902 {
00903   ScopedTimer s(__FUNCTION__);
00904 
00905   ParameterServer* ps = ParameterServer::instance();
00906   double depth_scaling = ps->get<double>("depth_scaling_factor");
00907   size_t max_keyp = ps->get<int>("max_keypoints");
00908   double maximum_depth = ps->get<double>("maximum_depth");
00909   float x,y,z;//temp point, 
00910   //principal point and focal lengths:
00911   float fxinv = 1./ (ps->get<double>("depth_camera_fx") != 0 ? ps->get<double>("depth_camera_fx") : cam_info->K[0]); //(cloud->width >> 1) - 0.5f;
00912   float fyinv = 1./ (ps->get<double>("depth_camera_fy") != 0 ? ps->get<double>("depth_camera_fy") : cam_info->K[4]); //(cloud->width >> 1) - 0.5f;
00913   float cx = ps->get<double>("depth_camera_cx") != 0 ? ps->get<double>("depth_camera_cx") : cam_info->K[2]; //(cloud->width >> 1) - 0.5f;
00914   float cy = ps->get<double>("depth_camera_cy") != 0 ? ps->get<double>("depth_camera_cy") : cam_info->K[5]; //(cloud->width >> 1) - 0.5f;
00915   /*
00916   float cx = 325.1;//cam_info->K[2]; //(cloud->width >> 1) - 0.5f;
00917   float cy = 249.7;//cam_info->K[5]; //(cloud->height >> 1) - 0.5f;
00918   float fxinv = 1.0/521.0;//1.0f / cam_info->K[0]; 
00919   float fyinv = 1.0/521.0;//1.0f / cam_info->K[4]; 
00920   */
00921   cv::Point2f p2d;
00922 
00923   if(feature_locations_3d.size()){
00924     ROS_INFO("There is already 3D Information in the FrameInfo, clearing it");
00925     feature_locations_3d.clear();
00926   }
00927   bool use_feature_min_depth = ParameterServer::instance()->get<bool>("use_feature_min_depth");
00928   for(unsigned int i = 0; i < feature_locations_2d.size(); /*increment at end of loop*/){
00929     p2d = feature_locations_2d[i].pt;
00930     if (p2d.x >= depth.cols || p2d.x < 0 ||
00931         p2d.y >= depth.rows || p2d.y < 0 ||
00932         std::isnan(p2d.x) || std::isnan(p2d.y)){ //TODO: Unclear why points should be outside the image or be NaN
00933       ROS_WARN_STREAM("Ignoring invalid keypoint: " << p2d); //Does it happen at all? If not, remove this code block
00934       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00935       continue;
00936     }
00937     float Z;
00938     if(use_feature_min_depth){
00939       Z = getMinDepthInNeighborhood(depth, p2d, feature_locations_2d[i].size) * depth_scaling;
00940     } else {
00941       Z = depth.at<float>(round(p2d.y), round(p2d.x)) * depth_scaling;//unfortunately rounding is necessary. Seldomly, casting the floating point coordinates (b/c subpix accuracy) shifted the point. Happend only for ORB, which produces coordinats like 10.9999959
00942       //printMatrixInfo(depth, "Depth Image");
00943       //ROS_INFO("X Y Z: %f %f %f", p2d.x, p2d.y, Z);
00944     }
00945     // Check for invalid measurements
00946     if(std::isnan (Z))
00947     {
00948       ROS_DEBUG("Feature %d has been extracted at NaN depth. Omitting", i);
00949       //FIXME Use parameter here to choose whether to use
00950       feature_locations_2d.erase(feature_locations_2d.begin()+i);
00951       continue;
00952     }
00953     backProject(fxinv, fyinv, cx, cy, p2d.x, p2d.y, Z, x, y, z);
00954 
00955     feature_locations_3d.push_back(Eigen::Vector4f(x, y, z, 1.0));
00956     i++; //Only increment if no element is removed from vector
00957     if(feature_locations_3d.size() >= max_keyp) break;
00958   }
00959 
00960   //ROS_INFO("Feature 2d size: %zu, 3D: %zu", feature_locations_2d.size(), feature_locations_3d.size());
00961   feature_locations_2d.resize(feature_locations_3d.size());
00962   ROS_INFO("Feature 2d size: %zu, 3D: %zu", feature_locations_2d.size(), feature_locations_3d.size());
00963 }
00964 
00965 
00966 void Node::computeInliersAndError(const std::vector<cv::DMatch> & all_matches,
00967                                   const Eigen::Matrix4f& transformation4f,
00968                                   const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& origins,
00969                                   const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> >& earlier,
00970                                   size_t min_inliers, //break if this can't be reached
00971                                   std::vector<cv::DMatch>& inliers, //pure output var
00972                                   double& return_mean_error,//pure output var: rms-mahalanobis-distance
00973                                   //std::vector<double>& errors,
00974                                   double squaredMaxInlierDistInM) const
00975 { 
00976   inliers.clear();
00977   assert(all_matches.size() > 0);
00978   inliers.reserve(all_matches.size());
00979   //errors.clear();
00980   const size_t all_matches_size = all_matches.size();
00981   double mean_error = 0.0;
00982   Eigen::Matrix4d transformation4d = transformation4f.cast<double>();
00983 
00984 //parallelization is detrimental here
00985 //#pragma omp parallel for reduction (+: mean_error)
00986   for(int i=0; i < all_matches_size; ++i)
00987   //BOOST_FOREACH(const cv::DMatch& m, all_matches)
00988   {
00989     const cv::DMatch& m = all_matches[i];
00990     const Eigen::Vector4f& origin = origins[m.queryIdx];
00991     const Eigen::Vector4f& target = earlier[m.trainIdx];
00992     if(origin(2) == 0.0 || target(2) == 0.0){ //does NOT trigger on NaN
00993        continue;
00994     }
00995     double mahal_dist = errorFunction2(origin, target, transformation4d);
00996     if(mahal_dist > squaredMaxInlierDistInM){
00997       continue; //ignore outliers
00998     }
00999     if(!(mahal_dist >= 0.0)){
01000       ROS_WARN_STREAM("Mahalanobis_ML_Error: "<<mahal_dist);
01001       ROS_WARN_STREAM("Transformation for error !>= 0:\n" << transformation4d << "Matches: " << all_matches.size());
01002       continue;
01003     }
01004     mean_error += mahal_dist;
01005 //#pragma omp critical
01006     inliers.push_back(m); //include inlier
01007   }
01008 
01009 
01010   if (inliers.size()<3){ //at least the samples should be inliers
01011     ROS_DEBUG("No inliers at all in %d matches!", (int)all_matches.size()); // only warn if this checks for all initial matches
01012     return_mean_error = 1e9;
01013   } else {
01014     mean_error /= inliers.size();
01015     return_mean_error = sqrt(mean_error);
01016   }
01017 
01018 }
01019 
01020 
01022 std::vector<cv::DMatch> sample_matches_prefer_by_distance(unsigned int sample_size, std::vector<cv::DMatch>& matches_with_depth)
01023 {
01024     //Sample ids to pick matches lateron (because they are unique and the
01025     //DMatch operator< overload breaks uniqueness of the Matches if they have the
01026     //exact same distance, e.g., 0.0)
01027     std::set<std::vector<cv::DMatch>::size_type> sampled_ids;
01028     int safety_net = 0;
01029     while(sampled_ids.size() < sample_size && matches_with_depth.size() >= sample_size){
01030       //generate a set of samples. Using a set solves the problem of drawing a sample more than once
01031       int id1 = rand() % matches_with_depth.size();
01032       int id2 = rand() % matches_with_depth.size();
01033       if(id1 > id2) id1 = id2; //use smaller one => increases chance for lower id
01034       sampled_ids.insert(id1);
01035       if(++safety_net > 10000){ ROS_ERROR("Infinite Sampling"); break; } 
01036     }
01037 
01038     //Given the ids, construct the resulting vector
01039     std::vector<cv::DMatch> sampled_matches;
01040     sampled_matches.reserve(sampled_ids.size());
01041     BOOST_FOREACH(std::vector<cv::DMatch>::size_type id, sampled_ids){
01042       sampled_matches.push_back(matches_with_depth[id]);
01043     }
01044     return sampled_matches;
01045 }
01046 
01048 std::vector<cv::DMatch> sample_matches(unsigned int sample_size, std::vector<cv::DMatch>& matches_with_depth)
01049 {
01050     //Sample ids to pick matches lateron (because they are unique and the
01051     //DMatch operator< overload breaks uniqueness of the Matches if they have the
01052     //exact same distance, e.g., 0.0)
01053     std::set<std::vector<cv::DMatch>::size_type> sampled_ids;
01054     int safety_net = 0;
01055     while(sampled_ids.size() < sample_size && matches_with_depth.size() >= sample_size){
01056       //generate a set of samples. Using a set solves the problem of drawing a sample more than once
01057       sampled_ids.insert(rand() % matches_with_depth.size());
01058       if(++safety_net > 10000){ ROS_ERROR("Infinite Sampling"); break; } 
01059     }
01060 
01061     //Given the ids, construct the resulting vector
01062     std::vector<cv::DMatch> sampled_matches;
01063     sampled_matches.reserve(sampled_ids.size());
01064     BOOST_FOREACH(std::vector<cv::DMatch>::size_type id, sampled_ids){
01065       sampled_matches.push_back(matches_with_depth[id]);
01066     }
01067     return sampled_matches;
01068 }
01069 
01072 bool Node::getRelativeTransformationTo(const Node* earlier_node,
01073                                        std::vector<cv::DMatch>* initial_matches,
01074                                        Eigen::Matrix4f& resulting_transformation,
01075                                        float& rmse, 
01076                                        std::vector<cv::DMatch>& matches) const
01077 {
01078   ScopedTimer s(__FUNCTION__);
01079   static const bool allow_features_without_depth = ParameterServer::instance()->get<bool>("allow_features_without_depth");
01080   //VALIDATION
01081   assert(initial_matches != NULL);
01082   
01083   std::stringstream nodesstringstream; nodesstringstream << "Nodes " << (this->id_) << "<->" << (earlier_node->id_);
01084   std::string nodesstring = nodesstringstream.str();
01085   if(initial_matches->size() <= (unsigned int) ParameterServer::instance()->get<int>("min_matches")){
01086     ROS_INFO("%s: Only %d feature matches between %d and %d (minimal: %i)",nodesstring.c_str(), (int)initial_matches->size() , this->id_, earlier_node->id_, ParameterServer::instance()->get<int>("min_matches"));
01087     return false;
01088   }
01089 
01090   //PREPARATION
01091   //unsigned int min_inlier_threshold = int(initial_matches->size()*0.2);
01092   unsigned int min_inlier_threshold = (unsigned int) ParameterServer::instance()->get<int>("min_matches");
01093   if(min_inlier_threshold > 0.75 * initial_matches->size()){
01094     //FIXME: Evaluate whether beneficial
01095     ROS_INFO("Lowering min_inlier_threshold from %d to %d, because there are only %d matches to begin with", min_inlier_threshold, (int) (0.75 * initial_matches->size()), (int)initial_matches->size());
01096     min_inlier_threshold = 0.75 * initial_matches->size();
01097   }
01098 
01099   double inlier_error; //all squared errors
01100   srand((long)std::clock());
01101   
01102   // a point is an inlier if it's no more than max_dist_m m from its partner apart
01103   const float max_dist_m = ParameterServer::instance()->get<double>("max_dist_for_inliers");
01104   const int ransac_iterations = ParameterServer::instance()->get<int>("ransac_iterations");
01105   //std::vector<double> dummy;
01106 
01107   // initialize result values of all iterations 
01108   matches.clear();
01109   resulting_transformation = Eigen::Matrix4f::Identity();
01110   rmse = 1e6;
01111   unsigned int valid_iterations = 0;//, best_inlier_cnt = 0;
01112   const unsigned int sample_size = 4;// chose this many randomly from the correspondences:
01113   bool valid_tf = false; // valid is false iff the sampled points clearly aren't inliers themself 
01114 
01115   std::vector<cv::DMatch>* matches_with_depth = initial_matches;
01116   if(allow_features_without_depth){ 
01117       matches_with_depth = new std::vector<cv::DMatch>(); //matches without depth can validate but not create the trafo
01118       matches_with_depth->reserve(initial_matches->size());
01119     BOOST_FOREACH(const cv::DMatch& m, *initial_matches){
01120         if(!isnan(this->feature_locations_3d_[m.queryIdx](2)) 
01121            && !isnan(earlier_node->feature_locations_3d_[m.trainIdx](2)))
01122             matches_with_depth->push_back(m);
01123     }
01124   }
01125   std::sort(matches_with_depth->begin(), matches_with_depth->end()); //sort by distance, which is the nn_ratio
01126 
01127   int real_iterations = 0;
01128   for(int n = 0; (n < ransac_iterations && matches_with_depth->size() >= sample_size); n++) //Without the minimum number of matches, the transformation can not be computed as usual TODO: implement monocular motion est
01129   {
01130     //Initialize Results of refinement
01131     double refined_error = 1e6;
01132     std::vector<cv::DMatch> refined_matches; 
01133     std::vector<cv::DMatch> inlier = sample_matches_prefer_by_distance(sample_size, *matches_with_depth); //initialization with random samples 
01134     //std::vector<cv::DMatch> inlier = sample_matches(sample_size, *matches_with_depth); //initialization with random samples 
01135     Eigen::Matrix4f refined_transformation = Eigen::Matrix4f::Identity();
01136 
01137     real_iterations++;
01138     for(int refinements = 1; refinements < 20 /*got stuck?*/; refinements++) 
01139     {
01140         Eigen::Matrix4f transformation = getTransformFromMatches(this, earlier_node, inlier,valid_tf,max_dist_m);
01141         //Eigen::Matrix4f transformation = getTransformFromMatchesUmeyama(this, earlier_node, inlier,valid_tf);
01142         if (!valid_tf || transformation!=transformation)  //Trafo Contains NaN?
01143           break; // valid_tf is false iff the sampled points aren't inliers themself 
01144 
01145         //test which features are inliers 
01146         computeInliersAndError(*initial_matches, transformation, 
01147                                this->feature_locations_3d_, //this->feature_depth_stats_, 
01148                                earlier_node->feature_locations_3d_, //earlier_node->feature_depth_stats_, 
01149                                std::max(min_inlier_threshold, static_cast<unsigned int>(refined_matches.size())), //break if no chance to reach this amount of inliers
01150                                inlier, inlier_error, max_dist_m*max_dist_m); 
01151         
01152         if(inlier.size() < min_inlier_threshold || inlier_error > max_dist_m){
01153           ROS_DEBUG("Skipped iteration: inliers: %i (min %i), inlier_error: %.2f (max %.2f)", (int)inlier.size(), (int) min_inlier_threshold,  inlier_error*100, max_dist_m*100);
01154           break; //hopeless case
01155         }
01156 
01157         //superior to before?
01158         if (inlier.size() >= refined_matches.size() && inlier_error <= refined_error) {
01159           size_t prev_num_inliers = refined_matches.size();
01160           assert(inlier_error>=0);
01161           refined_transformation = transformation;
01162           refined_matches = inlier;
01163           refined_error = inlier_error;
01164           if(inlier.size() == prev_num_inliers) break; //only error improved -> no change would happen next iteration
01165         }
01166         else break;
01167     }  //END REFINEMENTS
01168     //Successful Iteration?
01169     if(refined_matches.size() > 0){ //Valid?
01170         valid_iterations++;
01171         ROS_DEBUG("Valid iteration: inliers/matches: %lu/%lu (min %u), refined error: %.2f (max %.2f), global error: %.2f", 
01172                 refined_matches.size(), matches.size(), min_inlier_threshold,  refined_error, max_dist_m, rmse);
01173 
01174         //Acceptable && superior to previous iterations?
01175         if (refined_error <= rmse &&  
01176             refined_matches.size() >= matches.size() && 
01177             refined_matches.size() >= min_inlier_threshold)
01178         {
01179           ROS_DEBUG("%s: Improvment in iteration %d: inliers: %i (min %i), inlier_error: %.2f (max %.2f)",nodesstring.c_str(), real_iterations, (int)refined_matches.size(), (int) min_inlier_threshold,  refined_error, max_dist_m);
01180           rmse = refined_error;
01181           resulting_transformation = refined_transformation;
01182           matches.assign(refined_matches.begin(), refined_matches.end());
01183           //Performance hacks:
01184           if (refined_matches.size() > initial_matches->size()*0.5) n+=10;
01185           if (refined_matches.size() > initial_matches->size()*0.75) n+=10;
01186           if (refined_matches.size() > initial_matches->size()*0.8) break; 
01187         }
01188     }
01189   } //iterations
01190   if(valid_iterations == 0) // maybe no depth. Try identity?
01191   { 
01192     //IDENTITYTEST
01193     ROS_INFO("Last Resort: Trying identity as hypothesis");
01194     //1 ransac iteration with identity
01195     Eigen::Matrix4f transformation = Eigen::Matrix4f::Identity();//hypothesis
01196     std::vector<cv::DMatch> inlier; //result
01197     //test which samples are inliers 
01198     computeInliersAndError(*initial_matches, transformation, 
01199                            this->feature_locations_3d_, //this->feature_depth_stats_, 
01200                            earlier_node->feature_locations_3d_, //earlier_node->feature_depth_stats_, 
01201                            min_inlier_threshold, //break if no chance to reach this amount of inliers
01202                            inlier, inlier_error, max_dist_m*max_dist_m); 
01203     
01204     //superior to before?
01205     if (inlier.size() > min_inlier_threshold && inlier_error < max_dist_m) {
01206       assert(inlier_error>=0);
01207       resulting_transformation = transformation;
01208       matches.assign(inlier.begin(), inlier.end());
01209       rmse = inlier_error;
01210       valid_iterations++;
01211       ROS_INFO("No-Motion guess for %i<->%i: inliers: %i (min %i), inlier_error: %.2f (max %.2f)", this->id_, earlier_node->id_, (int)matches.size(), (int) min_inlier_threshold,  rmse, max_dist_m);
01212     }
01213   } //END IDENTITY AS GUESS
01214   ROS_INFO("%i good iterations (from %i), inlier pct %i, inlier cnt: %i, error (MHD): %.2f",valid_iterations, ransac_iterations, (int) (matches.size()*1.0/initial_matches->size()*100),(int) matches.size(),rmse);
01215   
01216   //ROS_INFO_STREAM("Transformation estimated:\n" << resulting_transformation);
01217   
01218 
01219 
01220 
01221   //G2O Refinement (minimize mahalanobis distance, include depthless features in optimization)
01222   //Optimize transform based on latest inliers (in "matches") and initial guess (in "resulting_transformation")
01223   const int g2o_iterations = ParameterServer::instance()->get<int>( "g2o_transformation_refinement");
01224   if(g2o_iterations > 0 && matches.size() > min_inlier_threshold)//Do not do this if RANSAC didn't succeed. Can't handle outliers
01225   {
01226     Eigen::Matrix4f transformation = resulting_transformation;//current hypothesis
01227     getTransformFromMatchesG2O(earlier_node, this,matches, transformation, g2o_iterations);
01228     std::vector<cv::DMatch> inlier; //result
01229 
01230     //Evaluate the new transformation
01231     computeInliersAndError(*initial_matches, transformation, 
01232                            this->feature_locations_3d_, //this->feature_depth_stats_, 
01233                            earlier_node->feature_locations_3d_, //earlier_node->feature_depth_stats_, 
01234                            matches.size(), //minimum to reach
01235                            inlier,inlier_error, //Output!
01236                            max_dist_m*max_dist_m); 
01237     ROS_INFO_STREAM(nodesstring << ": g2o transformation estimated to Node " << earlier_node->id_ << ":\n" << transformation);
01238     //superior in inliers or equal inliers and better rmse?
01239     if (inlier.size() >= matches.size() || inlier.size() >= min_inlier_threshold && inlier_error < rmse) {
01240       //if More inliers -> Refine with them included
01241       if (inlier.size() > matches.size()) {
01242         //Refine using the new inliers
01243         getTransformFromMatchesG2O(earlier_node, this,inlier, transformation, g2o_iterations);
01244         computeInliersAndError(*initial_matches, transformation, 
01245                                this->feature_locations_3d_, //this->feature_depth_stats_, 
01246                                earlier_node->feature_locations_3d_, //earlier_node->feature_depth_stats_, 
01247                                inlier.size(), //minimum to reach
01248                                inlier,inlier_error, max_dist_m*max_dist_m); 
01249       }
01250       ROS_INFO("G2o optimization result for %i<->%i: inliers: %i (min %i), inlier_error: %.2f (max %.2f)", this->id_, earlier_node->id_, (int)inlier.size(), (int) min_inlier_threshold,  inlier_error, max_dist_m);
01251       //Again superier? Then use the new result
01252       if (inlier.size() >= matches.size()) 
01253       {
01254         ROS_INFO_STREAM("Refined transformation estimate" << earlier_node->id_ << ":\n" << transformation);
01255         assert(inlier_error>=0);
01256         resulting_transformation = transformation;
01257         matches.assign(inlier.begin(), inlier.end());
01258         rmse = inlier_error;
01259         valid_iterations++;
01260         ROS_INFO("G2o refinement optimization result for %i<->%i: inliers: %i (min %i), inlier_error: %.2f (max %.2f)", this->id_, earlier_node->id_, (int)matches.size(), (int) min_inlier_threshold,  rmse, max_dist_m);
01261       }
01262     }
01263     else {
01264       ROS_INFO("G2O optimization of RANSAC for %s rejected: G2O inliers: %i (min %i), inlier_error: %.2f (max %.2f) RANSAC inls: %i (min %i), inlier_error: %.2f", nodesstring.c_str(), (int)inlier.size(), (int) min_inlier_threshold,  inlier_error, max_dist_m, (int)matches.size(), (int) min_inlier_threshold,  rmse);
01265     }
01266   }
01267   
01268 
01269 
01270   ROS_INFO("%s: %i good iterations (from %i), inlier pct %i, inlier cnt: %i, error (MHD): %.2f",nodesstring.c_str(), valid_iterations, ransac_iterations, (int) (matches.size()*1.0/initial_matches->size()*100),(int) matches.size(),rmse);
01271   // ROS_INFO("best overall: inlier: %i, error: %.2f",best_inlier_invalid, best_error_invalid*100);
01272 
01273   bool enough_absolute = matches.size() >= min_inlier_threshold;
01274   return enough_absolute;
01275 }
01276 
01277 
01278 #ifdef USE_ICP_CODE
01279 void Node::Eigen2GICP(const Eigen::Matrix4f& m, dgc_transform_t g_m){
01280   for(int i=0;i<4; i++)
01281     for(int j=0;j<4; j++)
01282       g_m[i][j] = m(i,j);
01283 
01284 }
01285 void Node::GICP2Eigen(const dgc_transform_t g_m, Eigen::Matrix4f& m){
01286   for(int i=0;i<4; i++)
01287     for(int j=0;j<4; j++)
01288       m(i,j) = g_m[i][j];
01289 }
01290 
01291 void Node::gicpSetIdentity(dgc_transform_t m){
01292   for(int i=0;i<4; i++)
01293     for(int j=0;j<4; j++)
01294       if (i==j)
01295         m[i][j] = 1;
01296       else
01297         m[i][j] = 0;
01298 }
01299 #endif
01300 
01301 
01302 
01303 MatchingResult Node::matchNodePair(const Node* older_node)
01304 {
01305   MatchingResult mr;
01306   try{
01307     bool found_transformation = false;
01308     if(ParameterServer::instance()->get<int>("max_connections") > 0 &&
01309        initial_node_matches_ > ParameterServer::instance()->get<int>("max_connections")) 
01310       return mr; //enough is enough
01311     const unsigned int min_matches = (unsigned int) ParameterServer::instance()->get<int>("min_matches");// minimal number of feature correspondences to be a valid candidate for a link
01312 
01313     this->featureMatching(older_node, &mr.all_matches); 
01314 
01315     double ransac_quality = 0;
01316     ROS_DEBUG_NAMED(__FILE__, "found %i inital matches",(int) mr.all_matches.size());
01317     if (mr.all_matches.size() < min_matches){
01318         ROS_INFO("Too few inliers between %i and %i for RANSAC method. Only %i correspondences to begin with.",
01319                  older_node->id_,this->id_,(int)mr.all_matches.size());
01320     } 
01321     else {//All good for feature based transformation estimation
01322         found_transformation = getRelativeTransformationTo(older_node,&mr.all_matches, mr.ransac_trafo, mr.rmse, mr.inlier_matches); 
01323         //Statistics
01324         float nn_ratio = 0.0;
01325         if(found_transformation){
01326           //double w = 1.0 + (double)mr.inlier_matches.size()-(double)min_matches;///(double)mr.all_matches.size();
01327           for(unsigned int i = 0; i < mr.inlier_matches.size(); i++){
01328             nn_ratio += mr.inlier_matches[i].distance;
01329           }
01330           nn_ratio /= mr.inlier_matches.size();
01331           //ParameterServer::instance()->set("nn_distance_ratio", static_cast<double>(nn_ratio + 0.2));
01332           mr.final_trafo = mr.ransac_trafo;
01333           mr.edge.informationMatrix =   Eigen::Matrix<double,6,6>::Identity()*(mr.inlier_matches.size()/(mr.rmse*mr.rmse)); //TODO: What do we do about the information matrix? Scale with inlier_count. Should rmse be integrated?)
01334 
01335           mr.edge.id1 = older_node->id_;//and we have a valid transformation
01336           mr.edge.id2 = this->id_; //since there are enough matching features,
01337           mr.edge.transform = mr.final_trafo.cast<double>();//we insert an edge between the frames
01338           if(ParameterServer::instance()->get<double>("observability_threshold") > 0){
01339             pairwiseObservationLikelihood(this, older_node, mr);
01340             found_transformation = observation_criterion_met(mr.inlier_points, mr.outlier_points, mr.occluded_points + mr.inlier_points + mr.outlier_points, ransac_quality);
01341           } else {
01342             found_transformation = true;
01343           }
01344 
01345           ROS_INFO("RANSAC found a %s transformation with %d inliers matches with average ratio %f", found_transformation? "valid" : "invalid", (int) mr.inlier_matches.size(), nn_ratio);
01346 
01347         } else {
01348           for(unsigned int i = 0; i < mr.all_matches.size(); i++){
01349             nn_ratio += mr.all_matches[i].distance;
01350           }
01351           nn_ratio /= mr.all_matches.size();
01352           ROS_INFO("RANSAC found no valid trafo, but had initially %d feature matches with average ratio %f",(int) mr.all_matches.size(), nn_ratio);
01353 
01354 #ifdef USE_PCL_ICP
01355           if(((int)this->id_ - (int)older_node->id_) <= 1){ //Apply icp only for adjacent frames, as the initial guess needs to be in the global minimum
01356             mr.icp_trafo = Eigen::Matrix4f::Identity();
01357             int max_count = ParameterServer::instance()->get<int>("gicp_max_cloud_size");
01358             pointcloud_type::Ptr tmp1(new pointcloud_type());
01359             pointcloud_type::Ptr tmp2(new pointcloud_type());
01360             filterCloud(*pc_col, *tmp1, max_count);
01361             filterCloud(*(older_node->pc_col), *tmp2, max_count);
01362             mr.icp_trafo = icpAlignment(tmp2, tmp1, mr.icp_trafo);
01363             //pairwiseObservationLikelihood(this, older_node, mr);
01364             //double icp_quality;
01365             found_transformation = true; //observation_criterion_met(mr.inlier_points, mr.outlier_points, mr.occluded_points + mr.inlier_points + mr.outlier_points, icp_quality);
01366             if(found_transformation){
01367               ROS_ERROR("Found trafo via icp");
01368               mr.final_trafo = mr.icp_trafo;
01369               mr.edge.id1 = older_node->id_;//and we have a valid transformation
01370               mr.edge.id2 = this->id_; //since there are enough matching features,
01371               mr.edge.transform = mr.final_trafo.cast<double>();//we insert an edge between the frames
01372               std::cout << std::endl << mr.final_trafo << std::endl;
01373             }
01374           }
01375 #endif
01376         }
01377     } 
01378     
01379 #ifdef USE_ICP_CODE
01380     unsigned int ransac_inlier_points = 0, ransac_outlier_points = 0;
01381     if(ParameterServer::instance()->get<bool>("use_icp")){
01382         if((!found_transformation && (((int)this->id_ - (int)older_node->id_) <= 1)) //Apply icp only for adjacent frames, as the initial guess needs to be in the global minimum
01383             || found_transformation) //Apply icp only for adjacent frames, as the initial guess needs to be in the global minimum
01384            //|| (!found_transformation && initial_node_matches_ == 0)) //no matches were found, and frames are not too far apart => identity is a good initial guess.
01385         {
01386             ROS_INFO("Applying GICP for Transformation between Nodes %d and %d",this->id_ , older_node->id_);
01387             MatchingResult mr_icp;
01388             if(getRelativeTransformationTo_ICP_code(older_node,mr_icp.icp_trafo, mr.ransac_trafo) && //converged
01389                !((mr_icp.icp_trafo.array() != mr_icp.icp_trafo.array()).any())) //No NaNs
01390             {
01391                 ROS_INFO("GICP for Nodes %u and %u Successful", this->id_, older_node->id_);
01392                 double icp_quality;
01393 
01394                 bool no_obs_test = (ParameterServer::instance()->get<double>("observability_threshold") < 0);
01395                 pairwiseObservationLikelihood(this, older_node, mr_icp);
01396                 if(no_obs_test || 
01397                     (observation_criterion_met(mr_icp.inlier_points, mr_icp.outlier_points, mr_icp.occluded_points + mr_icp.inlier_points + mr_icp.outlier_points, icp_quality)
01398                      && icp_quality > ransac_quality))
01399                 {
01400                     //This signals a valid result:
01401                     found_transformation = true;
01402                     mr.edge.id1 = older_node->id_;//and we have a valid transformation
01403                     mr.edge.id2 = this->id_; //since there are enough matching features,
01404                     mr.final_trafo = mr_icp.icp_trafo;    
01405                     mr.edge.informationMatrix = Eigen::Matrix<double,6,6>::Identity()*(1e-2); //TODO: What do we do about the information matrix? 
01406                     mr.edge.mean = eigen2G2O(mr.final_trafo.cast<double>());//we insert an edge between the frames
01407                 }
01408             }
01409         }
01410     }
01411 #endif  
01412 
01413     if(found_transformation) {
01414         ROS_INFO("Returning Valid Edge");
01415         ++initial_node_matches_; //trafo is accepted
01416     }
01417     else {
01418         mr.edge.id1 = -1;
01419         mr.edge.id2 = -1;
01420     }
01421   }
01422   catch (std::exception e){
01423     ROS_ERROR("Caught Exception in comparison of Nodes %i and %i: %s", this->id_, older_node->id_, e.what());
01424   }
01425 
01426   return mr;
01427 }
01428 
01429 void Node::clearFeatureInformation(){
01430   //clear feature info, by swapping data with empty vector (so mem really gets freed)
01431   ROS_INFO("Deleting feature information of Node %i", this->id_);
01432         std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > f_l_3d;  
01433   f_l_3d.swap(feature_locations_3d_);
01434         std::vector<cv::KeyPoint> f_l_2d; 
01435   f_l_2d.swap(feature_locations_2d_);
01436         std::vector<float> f_l_siftgpu; 
01437   f_l_siftgpu.swap(siftgpu_descriptors);
01438   feature_descriptors_.release();
01439   delete flannIndex; flannIndex = NULL;
01440   matchable_ = false;
01441 }
01442 void Node::addPointCloud(pointcloud_type::Ptr new_pc){
01443   pc_col = new_pc;
01444   header_=pc_col->header;
01445 }
01446 void Node::reducePointCloud(double vfs){
01447   if(vfs > 0.0){
01448     ROS_INFO("Reducing points (%d) of Node %d", (int)pc_col->size(), this->id_);
01449     pcl::VoxelGrid<point_type> sor;
01450     sor.setLeafSize(vfs,vfs,vfs);
01451     pointcloud_type::ConstPtr const_cloud_ptr = boost::make_shared<pointcloud_type> (*pc_col);                                                                 
01452     sor.setInputCloud (const_cloud_ptr);
01453     sor.filter (*pc_col);
01454     ROS_INFO("Reduced points of Node %d to %d", this->id_, (int)pc_col->size());
01455   } else {
01456     ROS_WARN("Point Clouds can't be reduced because of invalid voxelfilter_size");
01457   }
01458 }
01459 long Node::getMemoryFootprint(bool write_to_log) const
01460 {
01461   size_t size = 0, tmp = 0;
01462   tmp = sizeof(Node);
01463   ROS_INFO_COND(write_to_log, "Base Size of Node Class: %zu bytes", tmp); 
01464   size += tmp;
01465 
01466   tmp = feature_descriptors_.step * feature_descriptors_.rows;  
01467   ROS_INFO_COND(write_to_log, "Descriptor Information: %zu bytes", tmp);
01468   size += tmp;
01469 
01470   tmp = siftgpu_descriptors.size() * sizeof(float);
01471   ROS_INFO_COND(write_to_log, "SIFTGPU Descriptor Information: %zu bytes", tmp);
01472   size += tmp;
01473 
01474   tmp = feature_locations_2d_.size() * sizeof(cv::KeyPoint);
01475   ROS_INFO_COND(write_to_log, "Feature 2D Location Information: %zu bytes", tmp);
01476   size += tmp;
01477 
01478   tmp = feature_locations_3d_.size() * sizeof(Eigen::Vector4f);
01479   ROS_INFO_COND(write_to_log, "Feature 3D Location Information: %zu bytes", tmp);
01480   size += tmp;
01481 
01482   tmp = pc_col->size() * sizeof(point_type);
01483   ROS_INFO_COND(write_to_log, "Point Cloud: %zu bytes", tmp);
01484   size += tmp;
01485   ROS_INFO_COND(write_to_log, "Rough Summary: %zu Kbytes", size/1024);
01486   ROS_WARN("Rough Summary: %zu Kbytes", size/1024);
01487   return size;
01488 }
01489 void Node::clearPointCloud(){
01490     ROS_INFO("Deleting points of Node %i", this->id_);
01491     //clear only points, by swapping data with empty vector (so mem really gets freed)
01492     pc_col->width = 0;
01493     pc_col->height = 0;
01494     pointcloud_type pc_empty;
01495     pc_empty.points.swap(pc_col->points);
01496 }
01497 
01498 /*TODO use this to discount features at depth jumps (or duplicate them -> sensed position + minimum position
01499 void Node::computeKeypointDepthStats(const cv::Mat& depth_img, const std::vector<cv::KeyPoint> keypoints)
01500 {
01501     ROS_INFO("Computing Keypoint Depth Statistics");
01502     BOOST_FOREACH(cv::KeyPoint kp, keypoints)
01503     { 
01504       int radius = kp.size/2;
01505       int left = kp.pt.x-radius;
01506       int top  = kp.pt.y-radius;
01507       double nearest=0.0, farthest=0.0;
01508       cv::Mat keypoint_neighbourhood(depth_img, cv::Rect(left, top, (int)kp.size, (int)kp.size));
01509       ROS_DEBUG("Nearest: %f, Farthest: %f", nearest, farthest);
01510       if(isnan(nearest)) nearest = 1.0;
01511       if(isnan(farthest)) farthest = 10.0;
01512       cv::minMaxLoc(keypoint_neighbourhood, &nearest, &farthest);
01513       feature_depth_stats_.push_back(std::make_pair(nearest, farthest)); 
01514     }
01515 }
01516 */
01517 
01518 void pairwiseObservationLikelihood(const Node* newer_node, const Node* older_node, MatchingResult& mr)
01519 { 
01520       double likelihood, confidence;
01521       unsigned int inlier_points = 0, outlier_points = 0, all_points = 0, occluded_points = 0;
01522       #pragma omp parallel sections reduction (+: inlier_points, outlier_points, all_points, occluded_points)
01523       {
01524         #pragma omp section
01525         {
01526           unsigned int inlier_pts = 0, outlier_pts = 0, occluded_pts = 0, all_pts = 0;
01527           observationLikelihood(mr.final_trafo, newer_node->pc_col, older_node->pc_col, older_node->getCamInfo(), likelihood, confidence, inlier_pts, outlier_pts, occluded_pts, all_pts) ;
01528           ROS_INFO("Observation Likelihood: %d projected to %d: good_point_ratio: %d/%d: %g, occluded points: %d", newer_node->id_, older_node->id_, inlier_pts, inlier_pts+outlier_pts, ((float)inlier_pts)/(inlier_pts+outlier_pts), occluded_pts);
01529           //rejectionSignificance(mr.final_trafo, newer_node->pc_col, older_node->pc_col);
01530           inlier_points += inlier_pts;
01531           outlier_points += outlier_pts;
01532           occluded_points += occluded_pts;
01533           all_points += all_pts;
01534         }
01535 
01536         #pragma omp section
01537         {
01538           unsigned int inlier_pts = 0, outlier_pts = 0, occluded_pts = 0, all_pts = 0;
01539           observationLikelihood(mr.final_trafo.inverse(), older_node->pc_col, newer_node->pc_col, newer_node->getCamInfo(), likelihood, confidence, inlier_pts, outlier_pts, occluded_pts, all_pts) ;
01540           ROS_INFO("Observation Likelihood: %d projected to %d: good_point_ratio: %d/%d: %g, occluded points: %d", older_node->id_, newer_node->id_, inlier_pts, inlier_pts+outlier_pts, ((float)inlier_pts)/(inlier_pts+outlier_pts), occluded_pts);
01541           //rejectionSignificance(mr.final_trafo, newer_node->pc_col, older_node->pc_col);
01542           inlier_points += inlier_pts;
01543           outlier_points += outlier_pts;
01544           occluded_points += occluded_pts;
01545           all_points += all_pts;
01546         }
01547       }
01548       mr.inlier_points = inlier_points;
01549       mr.outlier_points = outlier_points;
01550       mr.occluded_points = occluded_points;
01551       mr.all_points = all_points;
01552 }
01553 
01555 void squareroot_descriptor_space(cv::Mat& descriptors)
01556 {
01557   // Compute sums for L1 Norm
01558   cv::Mat sums_vec;
01559   descriptors = cv::abs(descriptors); //otherwise we draw sqrt of negative vals
01560   cv::reduce(descriptors, sums_vec, 1 /*sum over columns*/, CV_REDUCE_SUM, CV_32FC1);
01561   for(unsigned int row = 0; row < descriptors.rows; row++){
01562     if(sums_vec.at<float>(row) == 0) continue; //Do not normalize norm-zero vectors
01563     int offset = row*descriptors.cols;
01564     for(unsigned int col = 0; col < descriptors.cols; col++){
01565       descriptors.at<float>(offset + col) = 
01566         sqrt(descriptors.at<float>(offset + col) / sums_vec.at<float>(row) /*L1-Normalize*/);
01567     }
01568   }
01569 }
01570 
01571 void Node::knnSearch(cv::Mat& query,
01572                      cv::Mat& indices,
01573                      cv::Mat& dists,
01574                      int knn, 
01575                      const cv::flann::SearchParams& params) const
01576 {
01577   this->getFlannIndex();//make sure it is constructed (cannot use it directly b/c of constness
01578   flannIndex->knnSearch(query, indices, dists, knn, params);
01579 }
01580 
01581 void copy_filter_cloud (const Eigen::Vector3f& center, float radius, const Node* old_node, Node* clone)
01582 {
01583   float sq_radius = radius*radius;
01584   BOOST_FOREACH(const point_type& p, old_node->pc_col->points)
01585   {
01586     float sq_distance = (p.getVector3fMap() - center).squaredNorm();
01587     if(sq_distance <= sq_radius){
01588       clone->pc_col->push_back(p);
01589     }
01590   }
01591   ROS_INFO("Copied %zu/%zu points to Node clone", clone->pc_col->size(), old_node->pc_col->size());
01592 }
01593 
01594 void copy_filter_features (const Eigen::Vector3f& center, float radius, const Node* old_node, Node* clone)
01595 {
01596   assert(old_node->feature_locations_3d_.size() == old_node->feature_locations_2d_.size());
01597   float sq_radius = radius*radius;
01598 
01599   ROS_DEBUG_STREAM("Center: " << center);
01600   //Determine which features qualify
01601   std::list<size_t> featuresToCopy;
01602   for(size_t i = 0; i < old_node->feature_locations_3d_.size(); i++){
01603     float sq_distance = (old_node->feature_locations_3d_.at(i).head<3>() - center).squaredNorm();
01604     ROS_DEBUG("Sq. distance of feature %zu: %f/%f", i, sq_distance, sq_radius);
01605     if(sq_distance <= sq_radius){
01606       featuresToCopy.push_back(i);
01607     }
01608   }
01609   ROS_INFO("Copying %zu/%zu features to Node clone", featuresToCopy.size(), old_node->feature_locations_3d_.size());
01610 
01611   //Copy qualified features
01612   size_t size = featuresToCopy.size();
01613   clone->feature_locations_3d_.reserve(size);
01614   clone->feature_locations_2d_.reserve(size);
01615   clone->feature_matching_stats_.reserve(size);
01616   clone->feature_descriptors_ = cv::Mat(size, 128, CV_32F);
01617   clone->siftgpu_descriptors.resize(size * 128);
01618   size_t i = 0; // the index in the clone
01619   BOOST_FOREACH(size_t usedFeatureIndex, featuresToCopy)
01620   {
01621       clone->feature_locations_3d_[i] = old_node->feature_locations_3d_[usedFeatureIndex];
01622       clone->feature_locations_2d_[i] = old_node->feature_locations_2d_[usedFeatureIndex];
01623       clone->feature_matching_stats_[i] = old_node->feature_matching_stats_[usedFeatureIndex];
01624       clone->feature_descriptors_.row(i) = old_node->feature_descriptors_.row(usedFeatureIndex);
01625       for(int j = 0; j < 128; j++){
01626         clone->siftgpu_descriptors[j+i]  = old_node->siftgpu_descriptors[j+usedFeatureIndex];
01627       }
01628       i++;
01629   }
01630 }
01631 
01632 Node* Node::copy_filtered(const Eigen::Vector3f& center, float radius) const
01633 {
01634   Node* clone = new Node();
01635   //Copy-filter point cloud
01636   clone->pc_col = pointcloud_type::Ptr(new pointcloud_type());
01637   copy_filter_cloud(center, radius, this, clone);
01638   copy_filter_features(center, radius, this, clone);
01639   this->getMemoryFootprint(true);
01640   clone->getMemoryFootprint(true);
01641   return clone;
01642 }


rgbdslam_v2
Author(s): Felix Endres, Juergen Hess, Nikolas Engelhard
autogenerated on Thu Jun 6 2019 21:49:45