misc.cpp
Go to the documentation of this file.
00001 /* This file is part of RGBDSLAM.
00002  * 
00003  * RGBDSLAM is free software: you can redistribute it and/or modify
00004  * it under the terms of the GNU General Public License as published by
00005  * the Free Software Foundation, either version 3 of the License, or
00006  * (at your option) any later version.
00007  * 
00008  * RGBDSLAM is distributed in the hope that it will be useful,
00009  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011  * GNU General Public License for more details.
00012  * 
00013  * You should have received a copy of the GNU General Public License
00014  * along with RGBDSLAM.  If not, see <http://www.gnu.org/licenses/>.
00015  */
00016 #include <ros/ros.h>
00017 #include <tf/transform_datatypes.h>
00018 #include <Eigen/Core>
00019 #include <QString>
00020 #include <QMatrix4x4>
00021 #include <ctime>
00022 #include <limits>
00023 #include <algorithm>
00024 #include "parameter_server.h"
00025 #include <cv.h>
00026 #include "scoped_timer.h"
00027 #include "header.h"
00028 
00029 #include "g2o/types/slam3d/se3quat.h"
00030 #include "g2o/types/slam3d/vertex_se3.h"
00031 
00032 #include <pcl_ros/transforms.h>
00033 #include "pcl/common/io.h"
00034 #include "pcl/common/distances.h"
00035 
00036 #if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION >= 4
00037 #include "opencv2/core/core.hpp"
00038 #include "opencv2/features2d/features2d.hpp"
00039 #include "opencv2/highgui/highgui.hpp"
00040 #endif
00041 #include "aorb.h"
00042 
00043 #include <omp.h>
00044 #include "misc2.h"
00045 #include "point_types.h"
00046 
00047 //For the observability test
00048 #include <boost/math/distributions/chi_squared.hpp>
00049 #include <numeric>
00050 
00051 #include "feature_adjuster.h"
00052 #include <sensor_msgs/Image.h>
00053 #include <sensor_msgs/CameraInfo.h>
00054 
00055 
00056 static void getCameraIntrinsics(float& fx, float& fy, float& cx, float& cy, const sensor_msgs::CameraInfo& cam_info) 
00057 {
00058   ParameterServer* ps = ParameterServer::instance();
00059   fx = ps->get<double>("depth_camera_fx") != 0 ? ps->get<double>("depth_camera_fx") : cam_info.K[0];
00060   fy = ps->get<double>("depth_camera_fy") != 0 ? ps->get<double>("depth_camera_fy") : cam_info.K[4];
00061   cx = ps->get<double>("depth_camera_cx") != 0 ? ps->get<double>("depth_camera_cx") : cam_info.K[2];
00062   cy = ps->get<double>("depth_camera_cy") != 0 ? ps->get<double>("depth_camera_cy") : cam_info.K[5];
00063 }
00064 static void getCameraIntrinsicsInverseFocalLength(float& fxinv, float& fyinv, float& cx, float& cy, const sensor_msgs::CameraInfo& cam_info) 
00065 {
00066   getCameraIntrinsics(fxinv, fyinv, cx, cy, cam_info);
00067   fxinv = 1./ fxinv;
00068   fyinv = 1./ fyinv;
00069 }
00070 
00071 
00072 void printQMatrix4x4(const char* name, const QMatrix4x4& m){
00073     ROS_DEBUG("QMatrix %s:", name);
00074     ROS_DEBUG("%f\t%f\t%f\t%f", m(0,0), m(0,1), m(0,2), m(0,3));
00075     ROS_DEBUG("%f\t%f\t%f\t%f", m(1,0), m(1,1), m(1,2), m(1,3));
00076     ROS_DEBUG("%f\t%f\t%f\t%f", m(2,0), m(2,1), m(2,2), m(2,3));
00077     ROS_DEBUG("%f\t%f\t%f\t%f", m(3,0), m(3,1), m(3,2), m(3,3));
00078 }
00079 
00080 void printTransform(const char* name, const tf::StampedTransform t) {
00081     ROS_INFO_STREAM(name << ": Translation " << t.getOrigin().x() << " " << t.getOrigin().y() << " " << t.getOrigin().z());
00082     ROS_INFO_STREAM(name << ": Rotation " << t.getRotation().getX() << " " << t.getRotation().getY() << " " << t.getRotation().getZ() << " " << t.getRotation().getW());
00083     ROS_INFO_STREAM(name << ": From " << t.frame_id_ << " to " << t.child_frame_id_ << " at " << t.stamp_.sec << ":" << std::setfill('0') << std::setw(9) << t.stamp_.nsec);
00084 }
00085 void printTransform(const char* name, const tf::Transform t) {
00086     ROS_INFO_STREAM(name << ": Translation " << t.getOrigin().x() << " " << t.getOrigin().y() << " " << t.getOrigin().z());
00087     ROS_INFO_STREAM(name << ": Rotation " << t.getRotation().getX() << " " << t.getRotation().getY() << " " << t.getRotation().getZ() << " " << t.getRotation().getW());
00088 }
00089 
00090 void logTransform(QTextStream& out, const tf::Transform& t, double timestamp, const char* label) {
00091     if(label) out << label << ": ";
00092     out << timestamp << " " << t.getOrigin().x() << " " << t.getOrigin().y() << " " << t.getOrigin().z() << " " << t.getRotation().getX() << " " << t.getRotation().getY() << " " << t.getRotation().getZ() << " " << t.getRotation().getW() << "\n";
00093 }
00094 
00095 
00096 QMatrix4x4 g2o2QMatrix(const g2o::SE3Quat se3) {
00097     Eigen::Matrix<double, 4, 4> m = se3.to_homogeneous_matrix(); //_Matrix< 4, 4, double >
00098     ROS_DEBUG_STREAM("Eigen Matrix:\n" << m);
00099     QMatrix4x4 qmat( static_cast<qreal*>( m.data() )  );
00100     // g2o/Eigen seems to use a different row-major/column-major array layout
00101     printQMatrix4x4("from conversion", qmat.transposed());//thus the transposes
00102     return qmat.transposed();//thus the transposes
00103 }
00104 
00105 tf::Transform g2o2TF(const g2o::SE3Quat se3) {
00106     tf::Transform result;
00107     tf::Vector3 translation;
00108     translation.setX(se3.translation().x());
00109     translation.setY(se3.translation().y());
00110     translation.setZ(se3.translation().z());
00111 
00112     tf::Quaternion rotation;
00113     rotation.setX(se3.rotation().x());
00114     rotation.setY(se3.rotation().y());
00115     rotation.setZ(se3.rotation().z());
00116     rotation.setW(se3.rotation().w());
00117 
00118     result.setOrigin(translation);
00119     result.setRotation(rotation);
00120     //printTransform("from conversion", result);
00121     return result;
00122 }
00123 
00124 #ifdef HEMACLOUDS
00125 void transformAndAppendPointCloud (const pointcloud_type &cloud_in, 
00126                                    pcl::PointCloud<hema::PointXYZRGBCamSL> &cloud_to_append_to,
00127                                    const tf::Transform transformation, float max_Depth, int idx)
00128 {
00129     Eigen::Matrix4f eigen_transform;
00130     pcl_ros::transformAsMatrix(transformation, eigen_transform);
00131     size_t original_size = cloud_to_append_to.size();
00132     cloud_to_append_to.points.reserve(cloud_in.size()+original_size);
00133 
00134     if(cloud_to_append_to.points.size() ==0){
00135         cloud_to_append_to.header   = cloud_in.header;
00136         cloud_to_append_to.width    = 0;
00137         cloud_to_append_to.height   = 0;
00138         cloud_to_append_to.is_dense = false;
00139     }
00140 
00141     Eigen::Matrix3f rot   = eigen_transform.block<3, 3> (0, 0);
00142     Eigen::Vector3f trans = eigen_transform.block<3, 1> (0, 3);
00143     size_t i = 0;
00144     for (; i < cloud_in.points.size (); ++i)
00145     { 
00146       const point_type& in_pt = cloud_in[i];
00147       const Eigen::Map<Eigen::Vector3f> vec_in (const_cast<float*>(&in_pt.x), 3, 1);
00148       hema::PointXYZRGBCamSL out_pt;
00149       Eigen::Map<Eigen::Vector3f> vec_out (&out_pt.x, 3, 1);
00150       //filter out points with a range greater than the given Parameter or do nothing if negativ
00151       float distance = vec_in.norm();
00152       out_pt.distance = distance;
00153       out_pt.cameraIndex = idx;
00154       out_pt.segment = 0;
00155       out_pt.label = 0;
00156 #ifndef RGB_IS_4TH_DIM
00157       out_pt.rgb = in_pt.rgb;
00158 #else
00159       out_pt.data[3] = in_pt.data[3];
00160 #endif
00161       if(max_Depth >= 0 && max_Depth > distance){//Erase coordinates high-noise points if desired
00162          vec_out[0]= vec_out[1]= vec_out[2]= std::numeric_limits<float>::quiet_NaN();
00163       }
00164       vec_out = rot * vec_in + trans;//Might be nan, but whatever
00165       cloud_to_append_to.push_back(out_pt);
00166     }
00167 }
00168 
00169 
00170 #else //Now #if HEMACLOUDS
00171 //From: /opt/ros/unstable/stacks/perception_pcl/pcl/src/pcl/registration/transforms.hpp
00173 
00180 //template <typename PointT> void
00181 //transformAndAppendPointCloud (const pcl::PointCloud<PointT> &cloud_in, pcl::PointCloud<PointT> &cloud_to_append_to,
00182 //                              const tf::Transform transformation)
00183 void transformAndAppendPointCloud (const pointcloud_type &cloud_in, 
00184                                    pointcloud_type &cloud_to_append_to,
00185                                    const tf::Transform transformation, float max_Depth, int )
00186 {
00187     bool compact = !ParameterServer::instance()->get<bool>("preserve_raster_on_save");
00188     Eigen::Matrix4f eigen_transform;
00189     pcl_ros::transformAsMatrix(transformation, eigen_transform);
00190     unsigned int cloud_to_append_to_original_size = cloud_to_append_to.size();
00191     if(cloud_to_append_to.points.size() ==0){
00192         cloud_to_append_to.header   = cloud_in.header;
00193         cloud_to_append_to.width    = 0;
00194         cloud_to_append_to.height   = 0;
00195         cloud_to_append_to.is_dense = false;
00196     }
00197 
00198     ROS_DEBUG("max_Depth = %f", max_Depth);
00199     ROS_DEBUG("cloud_to_append_to_original_size = %i", cloud_to_append_to_original_size);
00200 
00201     //Append all points untransformed. This also copies other fields, e.g rgb
00202     cloud_to_append_to += cloud_in;
00203 
00204     Eigen::Matrix3f rot   = eigen_transform.block<3, 3> (0, 0);
00205     Eigen::Vector3f trans = eigen_transform.block<3, 1> (0, 3);
00206     point_type origin = point_type();
00207     origin.x = 0;
00208     origin.y = 0;
00209     origin.z = 0;
00210     int j = 0; //Output index
00211     for (size_t i = 0; i < cloud_in.points.size (); ++i) //i: Input index
00212     { 
00213       Eigen::Map<Eigen::Vector3f> p_in (const_cast<float*>(&cloud_in.points[i].x), 3, 1);
00214       Eigen::Map<Eigen::Vector3f> p_out (&cloud_to_append_to.points[j+cloud_to_append_to_original_size].x, 3, 1);
00215       if(compact){ cloud_to_append_to.points[j+cloud_to_append_to_original_size] = cloud_in.points[i]; }
00216       //filter out points with a range greater than the given Parameter or do nothing if negativ
00217       if(max_Depth >= 0){
00218         if(pcl::squaredEuclideanDistance(cloud_in.points[i], origin) > max_Depth*max_Depth){
00219            p_out[0]= std::numeric_limits<float>::quiet_NaN();
00220            p_out[1]= std::numeric_limits<float>::quiet_NaN();
00221            p_out[2]= std::numeric_limits<float>::quiet_NaN();
00222            if(!compact) j++; 
00223            continue;
00224          }
00225       }
00226       if (pcl_isnan (cloud_in.points[i].x) || pcl_isnan (cloud_in.points[i].y) || pcl_isnan (cloud_in.points[i].z)){
00227          if(!compact) j++; //Skip, but leave output as is
00228          continue;
00229       }
00230       p_out = rot * p_in + trans;
00231       j++;
00232     }
00233     if(compact){
00234       cloud_to_append_to.points.resize(j+cloud_to_append_to_original_size);
00235       cloud_to_append_to.width    = 1;
00236       cloud_to_append_to.height   = j+cloud_to_append_to_original_size;
00237     }
00238 }
00239 #endif //HEMACLOUDS
00240 
00241 //do spurious type conversions
00242 geometry_msgs::Point pointInWorldFrame(const Eigen::Vector4f& point3d, const g2o::VertexSE3::EstimateType& transf)
00243 {
00244     Eigen::Vector3d tmp(point3d[0], point3d[1], point3d[2]);
00245     tmp = transf * tmp; //transform to world frame
00246     geometry_msgs::Point p;
00247     p.x = tmp.x(); 
00248     p.y = tmp.y(); 
00249     p.z = tmp.z();
00250     return p;
00251 }
00252 void mat2dist(const Eigen::Matrix4f& t, double &dist){
00253     dist = sqrt(t(0,3)*t(0,3)+t(1,3)*t(1,3)+t(2,3)*t(2,3));
00254 }
00256 void mat2RPY(const Eigen::Matrix4f& t, double& roll, double& pitch, double& yaw) {
00257     roll = atan2(t(2,1),t(2,2));
00258     pitch = atan2(-t(2,0),sqrt(t(2,1)*t(2,1)+t(2,2)*t(2,2)));
00259     yaw = atan2(t(1,0),t(0,0));
00260 }
00261 void mat2components(const Eigen::Matrix4f& t, double& roll, double& pitch, double& yaw, double& dist){
00262 
00263   mat2RPY(t, roll,pitch,yaw);
00264   mat2dist(t, dist);
00265 
00266   roll = roll/M_PI*180;
00267   pitch = pitch/M_PI*180;
00268   yaw = yaw/M_PI*180;
00269 
00270 }
00271 
00272 void trafoSize(const Eigen::Isometry3d& t, double& angle, double& dist){
00273   angle = acos((t.rotation().trace() -1)/2 ) *180.0 / M_PI;
00274   dist = t.translation().norm();
00275   ROS_INFO("Rotation:% 4.2f, Distance: % 4.3fm", angle, dist);
00276 }
00277 
00278 bool isBigTrafo(const Eigen::Isometry3d& t){
00279     double angle, dist;
00280     trafoSize(t, angle, dist);
00281     return (dist > ParameterServer::instance()->get<double>("min_translation_meter") ||
00282               angle > ParameterServer::instance()->get<double>("min_rotation_degree"));
00283 }
00284 
00285 // true iff edge qualifies for generating a new vertex
00286 bool isBigTrafo(const Eigen::Matrix4f& t){
00287     double roll, pitch, yaw, dist;
00288 
00289     mat2RPY(t, roll,pitch,yaw);
00290     mat2dist(t, dist);
00291 
00292     roll = roll/M_PI*180;
00293     pitch = pitch/M_PI*180;
00294     yaw = yaw/M_PI*180;
00295 
00296     double max_angle = std::max(roll,std::max(pitch,yaw));
00297 
00298     return (dist > ParameterServer::instance()->get<double>("min_translation_meter")
00299                 || max_angle > ParameterServer::instance()->get<double>("min_rotation_degree"));
00300 }
00301 
00302 
00303 bool isSmallTrafo(const Eigen::Isometry3d& t, double seconds){
00304     if(seconds <= 0.0){
00305       ROS_WARN("Time delta invalid: %f. Skipping test for small transformation", seconds);
00306       return true;
00307     }
00308     
00309     double angle_around_axis, dist;
00310     trafoSize(t, angle_around_axis, dist);
00311 
00312     ParameterServer* ps =  ParameterServer::instance();
00313     return (dist / seconds < ps->get<double>("max_translation_meter") &&
00314             angle_around_axis / seconds < ps->get<double>("max_rotation_degree"));
00315 }
00316 
00317 bool isSmallTrafo(const g2o::SE3Quat& t, double seconds){
00318     if(seconds <= 0.0){
00319       ROS_WARN("Time delta invalid: %f. Skipping test for small transformation", seconds);
00320       return true;
00321     }
00322     float angle_around_axis = 2.0*acos(t.rotation().w()) *180.0 / M_PI;
00323     float dist = t.translation().norm();
00324     QString infostring;
00325     ROS_DEBUG("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00326     infostring.sprintf("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00327     //Q_EMIT setGUIInfo2(infostring);
00328     ParameterServer* ps =  ParameterServer::instance();
00329     //Too big fails too
00330     return (dist / seconds < ps->get<double>("max_translation_meter") &&
00331             angle_around_axis / seconds < ps->get<double>("max_rotation_degree"));
00332 }
00333 
00334 bool isBigTrafo(const g2o::SE3Quat& t){
00335     float angle_around_axis = 2.0*acos(t.rotation().w()) *180.0 / M_PI;
00336     float dist = t.translation().norm();
00337     QString infostring;
00338     ROS_DEBUG("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00339     infostring.sprintf("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00340     //Q_EMIT setGUIInfo2(infostring);
00341     ParameterServer* ps =  ParameterServer::instance();
00342     return (dist > ps->get<double>("min_translation_meter") ||
00343             angle_around_axis > ps->get<double>("min_rotation_degree"));
00344 }
00345 
00346 
00347 /*
00348 bool overlappingViews(LoadedEdge3D edge){
00349     //opening angles
00350    double alpha = 57.0/180.0*M_PI;
00351    double beta = 47.0/180.0*M_PI; 
00352    //assumes robot coordinate system (x is front, y is left, z is up)
00353    Eigen::Matrix<double, 4,3> cam1;
00354    cam1 <<  1.0, std::tan(alpha),  std::tan(beta),//upper left
00355                                        1.0, -std::tan(alpha), std::tan(beta),//upper right
00356                                        1.0, std::tan(alpha), -std::tan(beta),//lower left
00357                                        1.0, -std::tan(alpha),-std::tan(beta);//lower right
00358    return false;
00359 }
00360 bool triangleRayIntersection(Eigen::Vector3d triangle1,Eigen::Vector3d triangle2, 
00361                              Eigen::Vector3d ray_origin, Eigen::Vector3d ray){
00362     Eigen::Matrix3d m;
00363     m.col(2) = -ray;
00364     m.col(0) = triangle1;
00365     m.col(1) = triangle2;
00366     Eigen::Vector3d lengths = m.inverse() * ray_origin;
00367     return (lengths(0) < 0 && lengths(1) > 0 && lengths(2) > 0 );
00368 }
00369 */
00370 
00371 
00372 g2o::SE3Quat tf2G2O(const tf::Transform t) 
00373 {
00374   Eigen::Quaterniond eigen_quat(t.getRotation().getW(), t.getRotation().getX(), t.getRotation().getY(), t.getRotation().getZ());
00375   Eigen::Vector3d translation(t.getOrigin().x(), t.getOrigin().y(), t.getOrigin().z());
00376   g2o::SE3Quat result(eigen_quat, translation);
00377   return result;
00378 }
00379 
00380 g2o::SE3Quat eigen2G2O(const Eigen::Matrix4d& eigen_mat) 
00381 {
00382   Eigen::Affine3d eigen_transform(eigen_mat);
00383   Eigen::Quaterniond eigen_quat(eigen_transform.rotation());
00384   Eigen::Vector3d translation(eigen_mat(0, 3), eigen_mat(1, 3), eigen_mat(2, 3));
00385   g2o::SE3Quat result(eigen_quat, translation);
00386 
00387   return result;
00388 }
00389 
00390 //Little debugging helper functions
00391 std::string openCVCode2String(unsigned int code){
00392   switch(code){
00393     case 0 : return std::string("CV_8UC1" );
00394     case 8 : return std::string("CV_8UC2" );
00395     case 16: return std::string("CV_8UC3" );
00396     case 24: return std::string("CV_8UC4" );
00397     case 2 : return std::string("CV_16UC1");
00398     case 10: return std::string("CV_16UC2");
00399     case 18: return std::string("CV_16UC3");
00400     case 26: return std::string("CV_16UC4");
00401     case 5 : return std::string("CV_32FC1");
00402     case 13: return std::string("CV_32FC2");
00403     case 21: return std::string("CV_32FC3");
00404     case 29: return std::string("CV_32FC4");
00405   }
00406   return std::string("Unknown");
00407 }
00408 
00409 void printMatrixInfo(const cv::Mat& image, std::string name){
00410   ROS_INFO_STREAM("Matrix " << name << " - Type:" << openCVCode2String(image.type()) <<  " rows: " <<  image.rows  <<  " cols: " <<  image.cols);
00411 }
00412 
00413 
00414 void depthToCV8UC1(cv::Mat& depth_img, cv::Mat& mono8_img){
00415   //Process images
00416   if(depth_img.type() == CV_32FC1){
00417     depth_img.convertTo(mono8_img, CV_8UC1, 100,0); //milimeter (scale of mono8_img does not matter)
00418   }
00419   else if(depth_img.type() == CV_16UC1){
00420     mono8_img = cv::Mat(depth_img.size(), CV_8UC1);
00421     cv::Mat float_img;
00422     depth_img.convertTo(mono8_img, CV_8UC1, 0.05, -25); //scale to 2cm
00423     depth_img.convertTo(float_img, CV_32FC1, 0.001, 0);//From mm to m(scale of depth_img matters)
00424     depth_img = float_img;
00425   }
00426   else {
00427     printMatrixInfo(depth_img, "Depth Image");
00428     ROS_ERROR_STREAM("Don't know how to handle depth image of type "<< openCVCode2String(depth_img.type()));
00429   }
00430 }
00431 
00432 bool asyncFrameDrop(ros::Time depth, ros::Time rgb)
00433 {
00434   long rgb_timediff = abs(static_cast<long>(rgb.nsec) - static_cast<long>(depth.nsec));
00435   if(rgb_timediff > 33333333){
00436      ROS_DEBUG("Depth image time: %d - %d", depth.sec,   depth.nsec);
00437      ROS_DEBUG("RGB   image time: %d - %d", rgb.sec, rgb.nsec);
00438      ROS_INFO("Depth and RGB image off more than 1/30sec: %li (nsec)", rgb_timediff);
00439      if(ParameterServer::instance()->get<bool>("drop_async_frames")){
00440        ROS_WARN("Asynchronous frames ignored. See parameters if you want to keep async frames.");
00441        return true;
00442      }
00443   } else {
00444      ROS_DEBUG("Depth image time: %d - %d", depth.sec,   depth.nsec);
00445      ROS_DEBUG("RGB   image time: %d - %d", rgb.sec, rgb.nsec);
00446   }
00447   return false;
00448 }
00449 
00450 
00452 
00453 typedef union
00454 {
00455   struct /*anonymous*/
00456   {
00457     unsigned char Blue;
00458     unsigned char Green;
00459     unsigned char Red;
00460     unsigned char Alpha;
00461   };
00462   float float_value;
00463   long long_value;
00464 } RGBValue;
00466 
00467 pointcloud_type* createXYZRGBPointCloud (const cv::Mat& depth_img, 
00468                                          const cv::Mat& rgb_img,
00469                                          const sensor_msgs::CameraInfoConstPtr& cam_info) 
00470 {
00471   ScopedTimer s(__FUNCTION__);
00472   pointcloud_type* cloud (new pointcloud_type() );
00473   cloud->is_dense         = false; //single point of view, 2d rasterized NaN where no depth value was found
00474 
00475   float fxinv, fyinv, cx, cy;
00476   getCameraIntrinsicsInverseFocalLength(fxinv, fyinv, cx, cy, *cam_info);
00477   ParameterServer* ps = ParameterServer::instance();
00478   int data_skip_step = ps->get<int>("cloud_creation_skip_step");
00479   if(depth_img.rows % data_skip_step != 0 || depth_img.cols % data_skip_step != 0){
00480     ROS_WARN("The parameter cloud_creation_skip_step is not a divisor of the depth image dimensions. This will most likely crash the program!");
00481   }
00482   cloud->height = ceil(depth_img.rows / static_cast<float>(data_skip_step));
00483   cloud->width = ceil(depth_img.cols / static_cast<float>(data_skip_step));
00484   int pixel_data_size = 3;
00485   //Assume RGB
00486   char red_idx = 0, green_idx = 1, blue_idx = 2;
00487   if(rgb_img.type() == CV_8UC1) pixel_data_size = 1;
00488   else if(ps->get<bool>("encoding_bgr")) { red_idx = 2; blue_idx = 0; }
00489 
00490   unsigned int color_row_step, color_pix_step, depth_pix_step, depth_row_step;
00491   color_pix_step = pixel_data_size * (rgb_img.cols / cloud->width);
00492   color_row_step = pixel_data_size * (rgb_img.rows / cloud->height -1 ) * rgb_img.cols;
00493   depth_pix_step = (depth_img.cols / cloud->width);
00494   depth_row_step = (depth_img.rows / cloud->height -1 ) * depth_img.cols;
00495 
00496   cloud->points.resize (cloud->height * cloud->width);
00497 
00498   //const uint8_t* rgb_buffer = &rgb_msg->data[0];
00499 
00500   // depth_img already has the desired dimensions, but rgb_img may be higher res.
00501   int color_idx = 0 * color_pix_step - 0 * color_row_step, depth_idx = 0; //FIXME: Hack for hard-coded calibration of color to depth
00502   double depth_scaling = ps->get<double>("depth_scaling_factor");
00503   float max_depth = ps->get<double>("maximum_depth");
00504   float min_depth = ps->get<double>("minimum_depth");
00505   if(max_depth < 0.0) max_depth = std::numeric_limits<float>::infinity();
00506 
00507   //Main Looop
00508   pointcloud_type::iterator pt_iter = cloud->begin();
00509   for (int v = 0; v < (int)rgb_img.rows; v += data_skip_step, color_idx += color_row_step, depth_idx += depth_row_step)
00510   {
00511     for (int u = 0; u < (int)rgb_img.cols; u += data_skip_step, color_idx += color_pix_step, depth_idx += depth_pix_step, ++pt_iter)
00512     {
00513       if(pt_iter == cloud->end()){
00514         break;
00515       }
00516       point_type& pt = *pt_iter;
00517       if(u < 0 || v < 0 || u >= depth_img.cols || v >= depth_img.rows){
00518         pt.x = pt.y = pt.z = std::numeric_limits<float>::quiet_NaN();
00519         continue;
00520       }
00521 
00522       float Z = depth_img.at<float>(depth_idx) * depth_scaling;
00523 
00524       // Check for invalid measurements
00525       if (!(Z >= min_depth)) //Should also be trigger on NaN//std::isnan (Z))
00526       {
00527         pt.x = (u - cx) * 1.0 * fxinv; //FIXME: better solution as to act as at 1meter?
00528         pt.y = (v - cy) * 1.0 * fyinv;
00529         pt.z = std::numeric_limits<float>::quiet_NaN();
00530       }
00531       else // Fill in XYZ
00532       {
00533         backProject(fxinv, fyinv, cx, cy, u, v, Z, pt.x, pt.y, pt.z);
00534       }
00535       // Fill in color
00536       RGBValue color;
00537       if(color_idx > 0 && color_idx < rgb_img.total()*color_pix_step){ //Only necessary because of the color_idx offset 
00538         if(pixel_data_size == 3){
00539           color.Red   = rgb_img.at<uint8_t>(color_idx + red_idx);
00540           color.Green = rgb_img.at<uint8_t>(color_idx + green_idx);
00541           color.Blue  = rgb_img.at<uint8_t>(color_idx + blue_idx);
00542         } else {
00543           color.Red   = color.Green = color.Blue  = rgb_img.at<uint8_t>(color_idx);
00544         }
00545         color.Alpha = 0;
00546 #ifndef RGB_IS_4TH_DIM
00547         pt.rgb = color.float_value;
00548 #else
00549         pt.data[3] = color.float_value;
00550 #endif
00551       }
00552     }
00553   }
00554 
00555   return cloud;
00556 }
00557 pointcloud_type* createXYZRGBPointCloud (const sensor_msgs::ImageConstPtr& depth_msg, 
00558                                          const sensor_msgs::ImageConstPtr& rgb_msg,
00559                                          const sensor_msgs::CameraInfoConstPtr& cam_info) 
00560 {
00561   ScopedTimer s(__FUNCTION__);
00562   pointcloud_type* cloud (new pointcloud_type() );
00563   myHeader h(0, depth_msg->header.stamp,  rgb_msg->header.frame_id);
00564   cloud->header = h;
00565   //cloud->header.stamp     = depth_msg->header.stamp;
00566   //cloud->header.frame_id  = rgb_msg->header.frame_id;
00567   cloud->is_dense         = false; //single point of view, 2d rasterized NaN where no depth value was found
00568 
00569   float fxinv, fyinv, cx, cy;
00570   getCameraIntrinsicsInverseFocalLength(fxinv, fyinv, cx, cy, *cam_info);
00571   ParameterServer* ps = ParameterServer::instance();
00572   int data_skip_step = ps->get<int>("cloud_creation_skip_step");
00573   cloud->height = depth_msg->height / data_skip_step;
00574   cloud->width = depth_msg->width / data_skip_step;
00575   //Reciprocal to use multiplication in loop, not slower division
00576   int pixel_data_size = 3;
00577   char red_idx = 0, green_idx = 1, blue_idx = 2;
00578   if(rgb_msg->encoding.compare("mono8") == 0) pixel_data_size = 1;
00579   if(rgb_msg->encoding.compare("bgr8") == 0) { red_idx = 2; blue_idx = 0; }
00580 
00581 
00582   ROS_ERROR_COND(pixel_data_size == 0, "Unknown image encoding: %s!", rgb_msg->encoding.c_str());
00583   unsigned int color_row_step, color_pix_step, depth_pix_step, depth_row_step;
00584   color_pix_step = pixel_data_size * (rgb_msg->width / cloud->width);
00585   color_row_step = pixel_data_size * (rgb_msg->height / cloud->height -1 ) * rgb_msg->width;
00586   depth_pix_step = (depth_msg->width / cloud->width);
00587   depth_row_step = (depth_msg->height / cloud->height -1 ) * depth_msg->width;
00588 
00589   cloud->points.resize (cloud->height * cloud->width);
00590 
00591   const uint8_t* rgb_buffer = &rgb_msg->data[0];
00592 
00593   // depth_msg already has the desired dimensions, but rgb_msg may be higher res.
00594   int color_idx = 0, depth_idx = 0;
00595   double depth_scaling = ps->get<double>("depth_scaling_factor");
00596   float max_depth = ps->get<double>("maximum_depth");
00597   if(max_depth < 0.0) max_depth = std::numeric_limits<float>::infinity();
00598 
00599   const float* depth_buffer = reinterpret_cast<const float*>(&depth_msg->data[0]);
00600   pointcloud_type::iterator pt_iter = cloud->begin ();
00601   for (int v = 0; v < (int)rgb_msg->height; v += data_skip_step, color_idx += color_row_step, depth_idx += depth_row_step)
00602   {
00603     for (int u = 0; u < (int)rgb_msg->width; u += data_skip_step, color_idx += color_pix_step, depth_idx += depth_pix_step, ++pt_iter)
00604     {
00605       point_type& pt = *pt_iter;
00606       float Z = depth_buffer[depth_idx] * depth_scaling;
00607 
00608       // Check for invalid measurements
00609       if (!(Z <= max_depth)) //Should also be trigger on NaN//std::isnan (Z))
00610       {
00611         pt.z = std::numeric_limits<float>::quiet_NaN();
00612       }
00613       else // Fill in XYZ
00614       {
00615         backProject(fxinv, fyinv, cx, cy, u, v, Z, pt.x, pt.y, pt.z);
00616       }
00617       // Fill in color
00618       RGBValue color;
00619       if(pixel_data_size == 3){
00620         color.Red   = rgb_buffer[color_idx + red_idx];
00621         color.Green = rgb_buffer[color_idx + green_idx];
00622         color.Blue  = rgb_buffer[color_idx + blue_idx];
00623       } else {
00624         color.Red   = color.Green = color.Blue  = rgb_buffer[color_idx];
00625       }
00626       color.Alpha = 0;
00627 #ifndef RGB_IS_4TH_DIM
00628       pt.rgb = color.float_value;
00629 #else
00630       pt.data[3] = color.float_value;
00631 #endif
00632     }
00633   }
00634 
00635   return cloud;
00636 }
00637 
00638 double errorFunction(const Eigen::Vector4f& x1, const double x1_depth_cov, 
00639                      const Eigen::Vector4f& x2, const double x2_depth_cov, 
00640                      const Eigen::Matrix4f& tf_1_to_2)
00641 {
00642   const double cam_angle_x = 58.0/180.0*M_PI;
00643   const double cam_angle_y = 45.0/180.0*M_PI;
00644   const double cam_resol_x = 640;
00645   const double cam_resol_y = 480;
00646   const double raster_stddev_x = 2*tan(cam_angle_x/cam_resol_x);  //2pix stddev in x
00647   const double raster_stddev_y = 2*tan(cam_angle_y/cam_resol_y);  //2pix stddev in y
00648   const double raster_cov_x = raster_stddev_x * raster_stddev_x;
00649   const double raster_cov_y = raster_stddev_y * raster_stddev_y;
00650 
00651   ROS_DEBUG_COND(x1(3) != 1.0, "4th element of x1 should be 1.0, is %f", x1(3));
00652   ROS_DEBUG_COND(x2(3) != 1.0, "4th element of x2 should be 1.0, is %f", x2(3));
00653   
00654   Eigen::Vector3d mu_1 = x1.head<3>().cast<double>();
00655   Eigen::Vector3d mu_2 = x2.head<3>().cast<double>();
00656   Eigen::Matrix3d rotation_mat = tf_1_to_2.block(0,0,3,3).cast<double>();
00657 
00658   //Point 1
00659   Eigen::Matrix3d cov1 = Eigen::Matrix3d::Identity();
00660   cov1(0,0) = raster_cov_x* mu_1(2); //how big is 1px std dev in meter, depends on depth
00661   cov1(1,1) = raster_cov_y* mu_1(2); //how big is 1px std dev in meter, depends on depth
00662   cov1(2,2) = x1_depth_cov;
00663   //Point2
00664   Eigen::Matrix3d cov2 = Eigen::Matrix3d::Identity();
00665   cov2(0,0) = raster_cov_x* mu_2(2); //how big is 1px std dev in meter, depends on depth
00666   cov2(1,1) = raster_cov_y* mu_2(2); //how big is 1px std dev in meter, depends on depth
00667   cov2(2,2) = x2_depth_cov;
00668 
00669   Eigen::Matrix3d cov2inv = cov2.inverse(); // Σ₂⁻¹  
00670 
00671   Eigen::Vector3d mu_1_in_frame_2 = (tf_1_to_2 * x1).head<3>().cast<double>(); // μ₁⁽²⁾  = T₁₂ μ₁⁽¹⁾  
00672   Eigen::Matrix3d cov1_in_frame_2 = rotation_mat.transpose() * cov1 * rotation_mat;//Works since the cov is diagonal => Eig-Vec-Matrix is Identity
00673   Eigen::Matrix3d cov1inv_in_frame_2 = cov1_in_frame_2.inverse();// Σ₁⁻¹  
00674 
00675   Eigen::Matrix3d cov_sum = (cov1inv_in_frame_2 + cov2inv);
00676   Eigen::Matrix3d inv_cov_sum = cov_sum.inverse();
00677   ROS_ERROR_STREAM_COND(inv_cov_sum!=inv_cov_sum,"Sum of Covariances not invertible: \n" << cov_sum);
00678 
00679   Eigen::Vector3d x_ml;//Max Likelhood Position of latent point, that caused the sensor msrmnt
00680   x_ml = inv_cov_sum * (cov1inv_in_frame_2 * mu_1_in_frame_2 + cov2inv * mu_2); // (Σ₁⁻¹ +  Σ₂⁻¹)⁻¹(Σ₁⁻¹μ₁  +  Σ₂⁻¹μ₂)
00681   Eigen::Vector3d delta_mu_1 = mu_1_in_frame_2 - x_ml;
00682   Eigen::Vector3d delta_mu_2 = mu_2 - x_ml;
00683   
00684   float sqrd_mahalanobis_distance1 = delta_mu_1.transpose() * cov1inv_in_frame_2 * delta_mu_1;// Δx_2^T Σ Δx_2 
00685   float sqrd_mahalanobis_distance2 = delta_mu_2.transpose() * cov2inv * delta_mu_2; // Δx_1^T Σ Δx_1
00686   float bad_mahalanobis_distance = sqrd_mahalanobis_distance1 + sqrd_mahalanobis_distance2; //FIXME
00687 
00688   if(!(bad_mahalanobis_distance >= 0.0))
00689   {
00690     ROS_ERROR_STREAM("Non-Positive Mahalanobis Distance");
00691     return std::numeric_limits<double>::max();
00692   }
00693   ROS_DEBUG_STREAM_NAMED("statistics", "Mahalanobis ML: " << std::setprecision(25) << bad_mahalanobis_distance);
00694   return bad_mahalanobis_distance;
00695 }
00696 
00697 double errorFunction2(const Eigen::Vector4f& x1,
00698                       const Eigen::Vector4f& x2,
00699                       const Eigen::Matrix4d& transformation)
00700 {
00701   //FIXME: Take from paramter_server or cam info
00702   static const double cam_angle_x = 58.0/180.0*M_PI;/*{{{*/
00703   static const double cam_angle_y = 45.0/180.0*M_PI;
00704   static const double cam_resol_x = 640;
00705   static const double cam_resol_y = 480;
00706   static const double raster_stddev_x = 3*tan(cam_angle_x/cam_resol_x);  //5pix stddev in x
00707   static const double raster_stddev_y = 3*tan(cam_angle_y/cam_resol_y);  //5pix stddev in y
00708   static const double raster_cov_x = raster_stddev_x * raster_stddev_x;
00709   static const double raster_cov_y = raster_stddev_y * raster_stddev_y;/*}}}*/
00710   static const bool use_error_shortcut = true;//ParameterServer::instance()->get<bool>("use_error_shortcut");
00711 
00712   bool nan1 = isnan(x1(2));
00713   bool nan2 = isnan(x2(2));
00714   if(nan1||nan2){
00715     //TODO: Handle Features with NaN, by reporting the reprojection error
00716     return std::numeric_limits<double>::max();
00717   }
00718   Eigen::Vector4d x_1 = x1.cast<double>();
00719   Eigen::Vector4d x_2 = x2.cast<double>();
00720 
00721   Eigen::Matrix4d tf_12 = transformation;
00722   Eigen::Vector3d mu_1 = x_1.head<3>();
00723   Eigen::Vector3d mu_2 = x_2.head<3>();
00724   Eigen::Vector3d mu_1_in_frame_2 = (tf_12 * x_1).head<3>(); // μ₁⁽²⁾  = T₁₂ μ₁⁽¹⁾  
00725   //New Shortcut to determine clear outliers
00726   if(use_error_shortcut)
00727   {
00728     double delta_sq_norm = (mu_1_in_frame_2 - mu_2).squaredNorm();
00729     double sigma_max_1 = std::max(raster_cov_x, depth_covariance(mu_1(2)));//Assuming raster_cov_x and _y to be approx. equal
00730     double sigma_max_2 = std::max(raster_cov_x, depth_covariance(mu_2(2)));//Assuming raster_cov_x and _y to be approx. equal
00731     if(delta_sq_norm > 2.0 * (sigma_max_1+sigma_max_2)) //FIXME: Factor 3 for mahal dist should be gotten from caller
00732     {
00733       return std::numeric_limits<double>::max();
00734     }
00735   } 
00736 
00737   Eigen::Matrix3d rotation_mat = tf_12.block(0,0,3,3);
00738 
00739   //Point 1
00740   Eigen::Matrix3d cov1 = Eigen::Matrix3d::Zero();
00741   cov1(0,0) = raster_cov_x * mu_1(2); //how big is 1px std dev in meter, depends on depth
00742   cov1(1,1) = raster_cov_y * mu_1(2); //how big is 1px std dev in meter, depends on depth
00743   cov1(2,2) = depth_covariance(mu_1(2));
00744 
00745   //Point2
00746   Eigen::Matrix3d cov2 = Eigen::Matrix3d::Zero();
00747   cov2(0,0) = raster_cov_x* mu_2(2); //how big is 1px std dev in meter, depends on depth
00748   cov2(1,1) = raster_cov_y* mu_2(2); //how big is 1px std dev in meter, depends on depth
00749   cov2(2,2) = depth_covariance(mu_2(2));
00750 
00751   Eigen::Matrix3d cov1_in_frame_2 = rotation_mat.transpose() * cov1 * rotation_mat;//Works since the cov is diagonal => Eig-Vec-Matrix is Identity
00752 
00753   // Δμ⁽²⁾ =  μ₁⁽²⁾ - μ₂⁽²⁾
00754   Eigen::Vector3d delta_mu_in_frame_2 = mu_1_in_frame_2 - mu_2;
00755   if(isnan(delta_mu_in_frame_2(2))){
00756     ROS_ERROR("Unexpected NaN");
00757     return std::numeric_limits<double>::max();
00758   }
00759   // Σc = (Σ₁ + Σ₂)
00760   Eigen::Matrix3d cov_mat_sum_in_frame_2 = cov1_in_frame_2 + cov2;     
00761   //ΔμT Σc⁻¹Δμ  
00762   //double sqrd_mahalanobis_distance = delta_mu_in_frame_2.transpose() * cov_mat_sum_in_frame_2.inverse() * delta_mu_in_frame_2;
00763   double sqrd_mahalanobis_distance = delta_mu_in_frame_2.transpose() *cov_mat_sum_in_frame_2.llt().solve(delta_mu_in_frame_2);
00764   
00765   if(!(sqrd_mahalanobis_distance >= 0.0))
00766   {
00767     return std::numeric_limits<double>::max();
00768   }
00769   return sqrd_mahalanobis_distance;
00770 }
00771 
00772 
00773 
00774 float getMinDepthInNeighborhood(const cv::Mat& depth, cv::Point2f center, float diameter){
00775     // Get neighbourhood area of keypoint
00776     int radius = (diameter - 1)/2;
00777     int top   = center.y - radius; top   = top   < 0 ? 0 : top;
00778     int left  = center.x - radius; left  = left  < 0 ? 0 : left;
00779     int bot   = center.y + radius; bot   = bot   > depth.rows ? depth.rows : bot;
00780     int right = center.x + radius; right = right > depth.cols ? depth.cols : right;
00781 
00782     cv::Mat neigborhood(depth, cv::Range(top, bot), cv::Range(left,right));
00783     double minZ = std::numeric_limits<float>::quiet_NaN();
00784     cv::minMaxLoc(neigborhood, &minZ);
00785     if(minZ == 0.0){ //FIXME: Why are there features with depth set to zero?
00786       ROS_INFO_THROTTLE(1,"Caught feature with zero in depth neighbourhood");
00787       minZ = std::numeric_limits<float>::quiet_NaN();
00788     }
00789 
00790     return static_cast<float>(minZ);
00791 }
00792 
00793 
00794 //#include "parameter_server.h" //For pointcloud definitions
00795 #include <pcl/ros/conversions.h>
00796 //#include <pcl_ros/transforms.h>
00797 
00798 //#include <Eigen/Core>
00799 #include <math.h>
00800 #define SQRT_2_PI 2.5066283
00801 #define SQRT_2 1.41421
00802 #define LOG_SQRT_2_PI = 0.9189385332
00803 
00804 inline int round(float d)
00805 {
00806   return static_cast<int>(floor(d + 0.5));
00807 }
00808 // Returns the probability of [-inf,x] of a gaussian distribution
00809 double cdf(double x, double mu, double sigma)
00810 {
00811         return 0.5 * (1 + erf((x - mu) / (sigma * SQRT_2)));
00812 }
00813 
00814 void observationLikelihood(const Eigen::Matrix4f& proposed_transformation,//new to old
00815                              pointcloud_type::Ptr new_pc,
00816                              pointcloud_type::Ptr old_pc,
00817                              const sensor_msgs::CameraInfo& old_cam_info,
00818                              double& likelihood, 
00819                              double& confidence,
00820                              unsigned int& inliers,
00821                              unsigned int& outliers,
00822                              unsigned int& occluded,
00823                              unsigned int& all) 
00824 {
00825   ScopedTimer s(__FUNCTION__);
00826  
00827   int skip_step = ParameterServer::instance()->get<int>("emm__skip_step");
00828   const bool mark_outliers = ParameterServer::instance()->get<bool>("emm__mark_outliers");
00829   double observability_threshold = ParameterServer::instance()->get<double>("observability_threshold");
00830   inliers = outliers = occluded = all = 0;
00831   if(skip_step < 0 || observability_threshold <= 0.0){
00832     inliers = all = 1;
00833     return;
00834   }
00835   if(old_pc->width <= 1 || old_pc->height <= 1){
00836     //We need structured point clouds
00837     ROS_ERROR("Point Cloud seems to be non-structured: %u/%u (w/h). Observation can not be evaluated! ", old_pc->width, old_pc->height);
00838     if(ParameterServer::instance()->get<double>("voxelfilter_size") > 0){
00839       ROS_ERROR("The parameter voxelfilter_size is set. This is incompatible with the environment measurement model (parameter 'observability_threshold')");
00840     }
00841     inliers = all = 1;
00842     return;
00843   }
00844   if(old_pc->width != new_pc->width){
00845     ROS_ERROR("Differing cloud dimensions: %d vs %d width. Skipping observationLikelihood.", old_pc->width, new_pc->width);
00846     return;
00847   }
00848 
00849   pointcloud_type new_pc_transformed;
00850   pcl::transformPointCloud(*new_pc, new_pc_transformed, proposed_transformation);
00851 
00852   //Camera Calibration FIXME: Get actual values from cameraInfo (need to store in node?)
00853   ParameterServer* ps = ParameterServer::instance();
00854   float fx, fy, cx, cy;
00855   getCameraIntrinsics(fx, fy, cx, cy, old_cam_info);
00856   int cloud_creation_skip_step = 1; 
00857   if(ps->get<std::string>("topic_points").empty()){//downsampled cloud?
00858     cloud_creation_skip_step = ps->get<int>("cloud_creation_skip_step");
00859     fx = fx / cloud_creation_skip_step;
00860     fy = fy / cloud_creation_skip_step;
00861     cx = cx / cloud_creation_skip_step;
00862     cy = cy / cloud_creation_skip_step;
00863   }
00864 
00865   double sumloglikelihood = 0.0, observation_count = 0.0;
00866   unsigned int bad_points = 0, good_points = 0, occluded_points = 0;
00867   uint8_t r1 = rand() % 32, g1 = 128 + rand() % 128, b1 = 128+rand() % 128; // Mark occluded point in cyan color
00868   uint32_t rgb1 = ((uint32_t)r1 << 16 | (uint32_t)g1 << 8 | (uint32_t)b1);
00869   uint8_t r2 = 128 + rand() % 128, g2 = rand() % 32,  b2 = 128+rand() % 128; // Mark bad points in magenta color
00870   uint32_t rgb2 = ((uint32_t)r2 << 16 | (uint32_t)g2 << 8 | (uint32_t)b2);
00871 
00872 //#pragma omp parallel for reduction(+: good_points, bad_points, occluded_points) 
00873   for(int new_ry = 0; new_ry < (int)new_pc->height; new_ry+=skip_step){
00874     for(int new_rx = 0; new_rx < (int)new_pc->width; new_rx+=skip_step, all++){
00875       //Backproject transformed new 3D point to 2d raster of old image
00876       //TODO: Cache this?
00877       point_type& p = new_pc_transformed.at(new_rx, new_ry);
00878       if(p.z != p.z) continue; //NaN
00879       if(p.z < 0) continue; // Behind the camera
00880       int old_rx_center = round((p.x / p.z)* fx + cx);
00881       int old_ry_center = round((p.y / p.z)* fy + cy);
00882       //ROS_INFO_COND(new_ry % 32 == 0 && new_rx % 32 == 0, "Projected point from [%d, %d] to [%d, %d]", new_rx, new_ry, old_rx_center, old_ry_center);
00883       if(old_rx_center >= (int)old_pc->width || old_rx_center < 0 ||
00884          old_ry_center >= (int)old_pc->height|| old_ry_center < 0 )
00885       {
00886         ROS_DEBUG("New point not projected into old image, skipping");
00887         continue;
00888       }
00889       int nbhd = 2; //1 => 3x3 neighbourhood
00890       bool good_point = false, occluded_point = false, bad_point = false;
00891       int startx = std::max(0,old_rx_center - nbhd);
00892       int starty = std::max(0,old_ry_center - nbhd);
00893       int endx = std::min(static_cast<int>(old_pc->width), old_rx_center + nbhd +1);
00894       int endy = std::min(static_cast<int>(old_pc->height), old_ry_center + nbhd +1);
00895       int neighbourhood_step = 2; //Search for depth jumps in this area
00896       for(int old_ry = starty; old_ry < endy; old_ry+=neighbourhood_step){
00897         for(int old_rx = startx; old_rx < endx; old_rx+=neighbourhood_step){
00898 
00899           const point_type& old_p = old_pc->at(old_rx, old_ry);
00900           if(old_p.z != old_p.z) continue; //NaN
00901           
00902           // likelihood for old msrmnt = new msrmnt:
00903           double old_sigma = cloud_creation_skip_step*depth_covariance(old_p.z);
00904           //TODO: (Wrong) Assumption: Transformation does not change the viewing angle. 
00905           double new_sigma = cloud_creation_skip_step*depth_covariance(p.z);
00906           //Assumption: independence of sensor noise lets us sum variances
00907           double joint_sigma = old_sigma + new_sigma;
00909           
00910           //Cumulative of position: probability of being occluded
00911           double p_new_in_front = cdf(old_p.z, p.z, sqrt(joint_sigma));
00912           //ROS_INFO("Msrmnt. dz=%g MHD=%g sigma=%g obs_p=%g, behind_p=%g", dz, mahal_dist, sqrt(joint_sigma), observation_p, p_new_in_front);
00913           if( p_new_in_front < 0.001)
00914           { // it is in behind and outside the 99.8% interval
00915             occluded_point = true; //Outside, but occluded
00916           }
00917           else if( p_new_in_front < 0.999)
00918           { // it is inside the 99.8% interval (and not behind)
00919             good_point = true;
00920             //goto end_of_neighbourhood_loop; <-- Don't break, search for better Mahal distance
00921           }
00922           else {//It would have blocked the view from the old cam position
00923             bad_point = true;
00924             //Bad point?
00925           }
00926           //sumloglikelihood += observation_p;
00927           //observation_count += p_new_in_front; //Discount by probability of having been occluded
00928         }
00929       }//End neighbourhood loop
00930       end_of_neighbourhood_loop:
00931       if(good_point) {
00932         good_points++;
00933       } else if(occluded_point){
00934         occluded_points++;
00935         if(mark_outliers){
00936 #ifndef RGB_IS_4TH_DIM
00937           new_pc->at(new_rx, new_ry).rgb = *reinterpret_cast<float*>(&rgb1);
00938           old_pc->at(old_rx_center, old_ry_center).rgb = *reinterpret_cast<float*>(&rgb1);
00939 #else
00940           new_pc->at(new_rx, new_ry).data[3] = *reinterpret_cast<float*>(&rgb1);
00941           old_pc->at(old_rx_center, old_ry_center).data[3] = *reinterpret_cast<float*>(&rgb1);
00942 #endif
00943         }
00944       }
00945       else if(bad_point){
00946         bad_points++;
00947         if(mark_outliers){
00948           //uint8_t r1 = 255, g1 = 0, b1 = 0; // Mark bad point in red color
00949 #ifndef RGB_IS_4TH_DIM
00950           new_pc->at(new_rx, new_ry).rgb = *reinterpret_cast<float*>(&rgb2);
00951           old_pc->at(old_rx_center, old_ry_center).rgb = *reinterpret_cast<float*>(&rgb2);
00952 #else
00953           new_pc->at(new_rx, new_ry).data[3] = *reinterpret_cast<float*>(&rgb2);
00954           old_pc->at(old_rx_center, old_ry_center).data[3] = *reinterpret_cast<float*>(&rgb2);
00955 #endif
00956           //Kill point
00957           //new_pc->at(new_rx, new_ry).z = std::numeric_limits<float>::quiet_NaN();
00958           //old_pc->at(old_rx_center, old_ry_center).z = std::numeric_limits<float>::quiet_NaN();
00959         }
00960       }
00961       else {} //only NaN?
00962     }
00963   }
00964   likelihood = sumloglikelihood/observation_count;//more readable
00965   confidence = observation_count;
00966   inliers = good_points;
00967   outliers = bad_points;
00968   occluded = occluded_points;
00969 }
00970 
00974 double rejectionSignificance(const Eigen::Matrix4f& proposed_transformation,//new to old
00975                            pointcloud_type::Ptr new_pc,
00976                            pointcloud_type::Ptr old_pc) 
00977 {
00978    ScopedTimer s(__FUNCTION__);
00979  
00980   int skip_step = ParameterServer::instance()->get<int>("emm__skip_step");
00981   bool mark_outliers = ParameterServer::instance()->get<bool>("emm__mark_outliers");
00982   double observability_threshold = ParameterServer::instance()->get<double>("observability_threshold");
00983   if(skip_step < 0 || observability_threshold <= 0.0){
00984     return 1.0;
00985   }
00986   if(old_pc->width <= 1 || old_pc->height <= 1){
00987     //We need structured point clouds
00988     ROS_ERROR("Point Cloud seems to be non-structured: %u/%u (w/h). Observation can not be evaluated! ", old_pc->width, old_pc->height);
00989     if(ParameterServer::instance()->get<double>("voxelfilter_size") > 0){
00990       ROS_ERROR("The parameter voxelfilter_size is set. This is incompatible with the environment measurement model (parameter 'observability_threshold')");
00991     }
00992     return 1.0;
00993   }
00994 
00995   pointcloud_type new_pc_transformed;
00996   pcl::transformPointCloud(*new_pc, new_pc_transformed, proposed_transformation);
00997 
00998   //Camera Calibration FIXME: Get actual values
00999   float cx = old_pc->width /2 - 0.5;
01000   float cy = old_pc->height/2 - 0.5;
01001   float fx = 521.0f / (640.0/old_pc->width); 
01002   float fy = 521.0f / (480.0/old_pc->height); 
01003 
01004   double sum_mahalanobis_sq = 0.0;
01005   float observation_count = 0.0;
01006 
01007   unsigned int bad_points = 0, good_points = 0, occluded_points = 0;
01008 //#pragma omp parallel for reduction(+: good_points, bad_points, occluded_points) 
01009   for(int new_ry = 0; new_ry < (int)new_pc->height; new_ry+=skip_step){
01010     for(int new_rx = 0; new_rx < (int)new_pc->width; new_rx+=skip_step){
01011       //Backproject transformed new 3D point to 2d raster of old image
01012       //TODO: Cache this?
01013       point_type& p = new_pc_transformed.at(new_rx, new_ry);
01014       if(p.z != p.z) continue; //NaN
01015       int old_rx_center = round((p.x / p.z)* fx + cx);
01016       int old_ry_center = round((p.y / p.z)* fy + cy);
01017       if(old_rx_center >= (int)old_pc->width || old_rx_center < 0 ||
01018          old_ry_center >= (int)old_pc->height|| old_ry_center < 0 )
01019       {
01020         ROS_DEBUG("New point not projected into old image, skipping");
01021         continue;
01022       }
01023       int nbhd = 10; //1 => 3x3 neighbourhood
01024       bool good_point = false, occluded_point = false, bad_point = false;
01025       int startx = std::max(0,old_rx_center - nbhd);
01026       int starty = std::max(0,old_ry_center - nbhd);
01027       int endx = std::min(static_cast<int>(old_pc->width), old_rx_center + nbhd +1);
01028       int endy = std::min(static_cast<int>(old_pc->height), old_ry_center + nbhd +1);
01029       int neighbourhood_step = 1; //Search for depth jumps in this area
01030       double min_mahalanobis_sq = std::numeric_limits<double>::infinity();
01031       double min_dz = std::numeric_limits<double>::infinity();
01032       std::vector<double> depth_values_for_neighborhood;
01033       for(int old_ry = starty; old_ry < endy; old_ry+=neighbourhood_step){
01034         for(int old_rx = startx; old_rx < endx; old_rx+=neighbourhood_step){
01035           
01036           const point_type& old_p = old_pc->at(old_rx, old_ry);
01037           if(old_p.z != old_p.z) continue; //NaN
01038           //ROS_INFO("Msrmnt. P1: (%f;%f;%f) P2: (%f;%f;%f)", p.x, p.y, p.z, old_p.x, old_p.y, old_p.z);
01039           
01040           //Sensor model
01041           //New point (p) is projected to old point (old_p)
01042           double dz = (old_p.z - p.z);//Positive: p.z smaller than (in front of) old_p.z
01043           depth_values_for_neighborhood.push_back(old_p.z);
01044           double dz_sq = dz*dz;
01045           // likelihood for old msrmnt = new msrmnt:
01046           double old_cov = depth_covariance(old_p.z);
01047           //TODO: (Wrong) Assumption: Transformation does not change the viewing angle. 
01048           double new_cov = depth_covariance(p.z);
01049           //Assumption: independence of sensor noise lets us sum variances
01050           double joint_cov = old_cov + new_cov;
01052           //Gaussian probability of new being same as old
01053           double mahal_distance_sq = (dz_sq) / joint_cov;
01054 
01055           //Independent statistical tests.
01056           if(dz_sq < 9*joint_cov){ //Within 3 sigma
01057             good_point = true;
01058             //if(min_mahalanobis_sq > mahal_distance_sq){
01059             //  min_mahalanobis_sq = mahal_distance_sq;
01060             //}
01061             if(min_dz > dz){
01062               min_dz = dz;
01063             }
01064           }
01065           else { 
01066             if(dz > 0){ //New point was projected in front of old point
01067               bad_point = true;
01068               //if(min_mahalanobis_sq > mahal_distance_sq){
01069               //  min_mahalanobis_sq = mahal_distance_sq;
01070               //}
01071               if(min_dz > dz){
01072                 min_dz = dz;
01073               }
01074             }
01075             else {
01076               occluded_point = true;
01077             }
01078           }
01079           //ROS_INFO("Msrmnt. dz=%g MHD=%g sigma=%g obs_p=%g, behind_p=%g", dz, mahal_dist, sqrt(joint_cov), observation_p, p_new_in_front);
01080         }
01081       }//End neighbourhood loop
01082       end_of_neighbourhood_loop:
01083 
01085       if(good_point || (!occluded_point && bad_point)){
01086         double sum = std::accumulate(depth_values_for_neighborhood.begin(), depth_values_for_neighborhood.end(), 0.0);
01087         double depth_mean_nbhd = sum / depth_values_for_neighborhood.size();
01088 
01089         double sq_sum = std::inner_product(depth_values_for_neighborhood.begin(), depth_values_for_neighborhood.end(), depth_values_for_neighborhood.begin(), 0.0);
01090         double nbhd_cov = sq_sum / depth_values_for_neighborhood.size() - depth_mean_nbhd * depth_mean_nbhd;
01091         double nbhd_dz = (depth_mean_nbhd - p.z);//Positive: p.z smaller than (in front of) old_p.z
01092         double new_cov = depth_covariance(p.z);
01093 
01094         min_mahalanobis_sq = (nbhd_dz*nbhd_dz) / (new_cov + depth_covariance(depth_mean_nbhd) + nbhd_cov);
01095       }
01096 
01097       if(good_point){
01098         good_points++;
01099         //TODO: One tail of the distribution, containing the occluded points, is removed,
01100         //so samples in the bulk need to be discounted to keep the distribution
01101         sum_mahalanobis_sq += min_mahalanobis_sq;
01102         observation_count ++;
01103       }
01104       else if(occluded_point){
01105         occluded_points++;
01106       }
01107       else if(bad_point){
01108         bad_points++;
01109         sum_mahalanobis_sq += min_mahalanobis_sq;
01110         observation_count++;
01111         if(mark_outliers){
01112           uint8_t r1 = 255, g1 = 0, b1 = 0; // Mark bad boint in new point cloud with red color
01113           uint32_t rgb1 = ((uint32_t)r1 << 16 | (uint32_t)g1 << 8 | (uint32_t)b1);
01114           uint8_t r2 = 0, g2 = 255, b2 = 255; // Mark bad boint in older point cloud with cyan color
01115           uint32_t rgb2 = ((uint32_t)r2 << 16 | (uint32_t)g2 << 8 | (uint32_t)b2);
01116 #ifndef RGB_IS_4TH_DIM
01117           new_pc->at(new_rx, new_ry).rgb = *reinterpret_cast<float*>(&rgb1);
01118           old_pc->at(old_rx_center, old_ry_center).rgb = *reinterpret_cast<float*>(&rgb2);
01119 #else
01120           new_pc->at(new_rx, new_ry).data[3] = *reinterpret_cast<float*>(&rgb1);
01121           old_pc->at(old_rx_center, old_ry_center).data[3] = *reinterpret_cast<float*>(&rgb2);
01122 #endif
01123         }
01124       }
01125       else {} //only NaN?
01126     }
01127   }
01128   boost::math::chi_squared chi_square(observation_count);
01129   double p_value = boost::math::cdf(chi_square, sum_mahalanobis_sq);
01130   double quantile = boost::math::quantile(chi_square, 0.75);
01131   ROS_INFO("Hypothesis Checking: Chi² with %f degrees of freedom. 75%% Quantile: %g, MD: %g", observation_count, quantile, sum_mahalanobis_sq);
01132   ROS_INFO("Hypothesis Checking: P-Value: %.20g, good_point_ratio: %d/%d: %g, occluded points: %d", p_value, good_points, good_points+bad_points, ((float)good_points)/(good_points+bad_points), occluded_points);
01133   return p_value;
01134 }
01135 
01136 bool observation_criterion_met(unsigned int inliers, unsigned int outliers, unsigned int all, double& quality)
01137 {
01138   double obs_thresh = ParameterServer::instance()->get<double>("observability_threshold");
01139   if(obs_thresh < 0) return true;
01140   quality = inliers/static_cast<double>(inliers+outliers);
01141   double certainty = inliers/static_cast<double>(all);
01142   bool criterion1_met = quality > obs_thresh; //TODO: parametrice certainty (and use meaningful statistic)
01143   bool criterion2_met = certainty > 0.25; //TODO: parametrice certainty (and use meaningful statistic)
01144   bool both_criteria_met = criterion1_met && criterion2_met;
01145   ROS_WARN_COND(!criterion1_met, "Transformation does not meet observation likelihood criterion, because of ratio good/(good+bad): %f. Threshold: %f", inliers/static_cast<float>(inliers+outliers), obs_thresh);
01146   ROS_WARN_COND(!criterion2_met, "Transformation does not meet observation likelihood criterion, because of ratio good/(all): %f. Threshold: 0.25", certainty);
01147   return both_criteria_met;
01148 }
01149 
01150 void getColor(const point_type& p, unsigned char& r, unsigned char& g, unsigned char& b){
01151 #ifndef RGB_IS_4TH_DIM
01152     b = *(  (unsigned char*)&(p.rgb));
01153     g = *(1+(unsigned char*)&(p.rgb));
01154     r = *(2+(unsigned char*)&(p.rgb));
01155 #else
01156     b = *(  (unsigned char*)&(p.data[3]));
01157     g = *(1+(unsigned char*)&(p.data[3]));
01158     r = *(2+(unsigned char*)&(p.data[3]));
01159 #endif
01160 }
01161 
01163 void overlay_edges(cv::Mat visual, cv::Mat depth, cv::Mat& visual_edges, cv::Mat& depth_edges)
01164 {
01165   if(visual.type() != CV_8UC1){
01166     visual_edges = cv::Mat( visual.rows, visual.cols, CV_8UC1); 
01167     cv::cvtColor(visual, visual_edges, CV_RGB2GRAY);
01168   }
01169   else 
01170   {
01171     visual_edges = visual;
01172   }
01173   cv::blur( visual_edges, visual_edges, cv::Size(3,3) );
01174   cv::Canny(visual_edges, visual_edges, 25, 300);
01175   cv::Canny(depth, depth_edges, 10, 300);
01176 }
01177 


rgbdslam_v2
Author(s): Felix Endres, Juergen Hess, Nikolas Engelhard
autogenerated on Thu Jun 6 2019 21:49:45