misc.cpp
Go to the documentation of this file.
00001 /* This file is part of RGBDSLAM.
00002  * 
00003  * RGBDSLAM is free software: you can redistribute it and/or modify
00004  * it under the terms of the GNU General Public License as published by
00005  * the Free Software Foundation, either version 3 of the License, or
00006  * (at your option) any later version.
00007  * 
00008  * RGBDSLAM is distributed in the hope that it will be useful,
00009  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00011  * GNU General Public License for more details.
00012  * 
00013  * You should have received a copy of the GNU General Public License
00014  * along with RGBDSLAM.  If not, see <http://www.gnu.org/licenses/>.
00015  */
00016 #include <ros/ros.h>
00017 #include <tf/transform_datatypes.h>
00018 #include <Eigen/Core>
00019 #include <QString>
00020 #include <QMatrix4x4>
00021 #include <ctime>
00022 #include <limits>
00023 #include "parameter_server.h"
00024 #include <cv.h>
00025 
00026 #include <g2o/math_groups/se3quat.h>
00027 
00028 #include <pcl_ros/transforms.h>
00029 
00030 #if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION >= 4
00031 #include "opencv2/core/core.hpp"
00032 #include "opencv2/features2d/features2d.hpp"
00033 #include "opencv2/highgui/highgui.hpp"
00034 #include "opencv2/nonfree/nonfree.hpp"
00035 #endif
00036 
00037 void printQMatrix4x4(const char* name, const QMatrix4x4& m){
00038     ROS_DEBUG("QMatrix %s:", name);
00039     ROS_DEBUG("%f\t%f\t%f\t%f", m(0,0), m(0,1), m(0,2), m(0,3));
00040     ROS_DEBUG("%f\t%f\t%f\t%f", m(1,0), m(1,1), m(1,2), m(1,3));
00041     ROS_DEBUG("%f\t%f\t%f\t%f", m(2,0), m(2,1), m(2,2), m(2,3));
00042     ROS_DEBUG("%f\t%f\t%f\t%f", m(3,0), m(3,1), m(3,2), m(3,3));
00043 }
00044 
00045 void printTransform(const char* name, const tf::Transform t) {
00046     ROS_INFO_STREAM(name << ": Translation " << t.getOrigin().x() << " " << t.getOrigin().y() << " " << t.getOrigin().z());
00047     ROS_INFO_STREAM(name << ": Rotation " << t.getRotation().getX() << " " << t.getRotation().getY() << " " << t.getRotation().getZ() << " " << t.getRotation().getW());
00048 }
00049 
00050 void logTransform(QTextStream& out, const tf::Transform& t, double timestamp, const char* label) {
00051     if(label) out << label << ": ";
00052     out << timestamp << " " << t.getOrigin().x() << " " << t.getOrigin().y() << " " << t.getOrigin().z() << " " << t.getRotation().getX() << " " << t.getRotation().getY() << " " << t.getRotation().getZ() << " " << t.getRotation().getW() << "\n";
00053 }
00054 
00055 
00056 QMatrix4x4 g2o2QMatrix(const g2o::SE3Quat se3) {
00057     struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00058     Eigen::Matrix<double, 4, 4> m = se3.to_homogenious_matrix(); //_Matrix< 4, 4, double >
00059     ROS_DEBUG_STREAM("Eigen Matrix:\n" << m);
00060     QMatrix4x4 qmat( static_cast<qreal*>( m.data() )  );
00061     // g2o/Eigen seems to use a different row-major/column-major array layout
00062     printQMatrix4x4("from conversion", qmat.transposed());//thus the transposes
00063     return qmat.transposed();//thus the transposes
00064     clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00065 }
00066 
00067 tf::Transform g2o2TF(const g2o::SE3Quat se3) {
00068     tf::Transform result;
00069     tf::Vector3 translation;
00070     translation.setX(se3.translation().x());
00071     translation.setY(se3.translation().y());
00072     translation.setZ(se3.translation().z());
00073 
00074     tf::Quaternion rotation;
00075     rotation.setX(se3.rotation().x());
00076     rotation.setY(se3.rotation().y());
00077     rotation.setZ(se3.rotation().z());
00078     rotation.setW(se3.rotation().w());
00079 
00080     result.setOrigin(translation);
00081     result.setRotation(rotation);
00082     //printTransform("from conversion", result);
00083     return result;
00084 }
00085 //From: /opt/ros/unstable/stacks/perception_pcl/pcl/src/pcl/registration/transforms.hpp
00087 
00094 //template <typename PointT> void
00095 //transformAndAppendPointCloud (const pcl::PointCloud<PointT> &cloud_in, pcl::PointCloud<PointT> &cloud_to_append_to,
00096 //                              const tf::Transform transformation)
00097 void transformAndAppendPointCloud (const pointcloud_type &cloud_in, 
00098                                    pointcloud_type &cloud_to_append_to,
00099                                    const tf::Transform transformation, float max_Depth)
00100 {
00101     bool compact = !ParameterServer::instance()->get<bool>("preserve_raster_on_save");
00102     Eigen::Matrix4f eigen_transform;
00103     pcl_ros::transformAsMatrix(transformation, eigen_transform);
00104     unsigned int cloud_to_append_to_original_size = cloud_to_append_to.size();
00105     if(cloud_to_append_to.points.size() ==0){
00106         cloud_to_append_to.header   = cloud_in.header;
00107         cloud_to_append_to.width    = 0;
00108         cloud_to_append_to.height   = 0;
00109         cloud_to_append_to.is_dense = false;
00110     }
00111 
00112     ROS_DEBUG("max_Depth = %f", max_Depth);
00113     ROS_DEBUG("cloud_to_append_to_original_size = %i", cloud_to_append_to_original_size);
00114 
00115     //Append all points untransformed
00116     cloud_to_append_to += cloud_in;
00117 
00118     Eigen::Matrix3f rot   = eigen_transform.block<3, 3> (0, 0);
00119     Eigen::Vector3f trans = eigen_transform.block<3, 1> (0, 3);
00120     point_type origin = point_type();
00121     origin.x = 0;
00122     origin.y = 0;
00123     origin.z = 0;
00124     int j = 0;
00125     for (size_t i = 0; i < cloud_in.points.size (); ++i)
00126     { 
00127       Eigen::Map<Eigen::Vector3f> p_in (const_cast<float*>(&cloud_in.points[i].x), 3, 1);
00128       Eigen::Map<Eigen::Vector3f> p_out (&cloud_to_append_to.points[j+cloud_to_append_to_original_size].x, 3, 1);
00129       if(compact){ cloud_to_append_to.points[j+cloud_to_append_to_original_size] = cloud_in.points[i]; }
00130       //filter out points with a range greater than the given Parameter or do nothing if negativ
00131       if(max_Depth >= 0){
00132         if(pcl::squaredEuclideanDistance(cloud_in.points[i], origin) > max_Depth*max_Depth){
00133            p_out[0]= std::numeric_limits<float>::quiet_NaN();
00134            p_out[1]= std::numeric_limits<float>::quiet_NaN();
00135            p_out[2]= std::numeric_limits<float>::quiet_NaN();
00136            if(!compact) j++; 
00137            continue;
00138          }
00139       }
00140       if (pcl_isnan (cloud_in.points[i].x) || pcl_isnan (cloud_in.points[i].y) || pcl_isnan (cloud_in.points[i].z)){
00141          if(!compact) j++;
00142          continue;
00143       }
00144       p_out = rot * p_in + trans;
00145       j++;
00146     }
00147     if(compact){
00148       cloud_to_append_to.points.resize(j+cloud_to_append_to_original_size);
00149       cloud_to_append_to.width    = 1;
00150       cloud_to_append_to.height   = j+cloud_to_append_to_original_size;
00151         }
00152 }
00153 
00154 //do spurious type conversions
00155 geometry_msgs::Point pointInWorldFrame(const Eigen::Vector4f& point3d, g2o::SE3Quat transf){
00156     Eigen::Vector3d tmp(point3d[0], point3d[1], point3d[2]);
00157     tmp = transf * tmp; //transform to world frame
00158     geometry_msgs::Point p;
00159     p.x = tmp.x(); 
00160     p.y = tmp.y(); 
00161     p.z = tmp.z();
00162     return p;
00163 }
00164 void mat2dist(const Eigen::Matrix4f& t, double &dist){
00165     dist = sqrt(t(0,3)*t(0,3)+t(1,3)*t(1,3)+t(2,3)*t(2,3));
00166 }
00168 void mat2RPY(const Eigen::Matrix4f& t, double& roll, double& pitch, double& yaw) {
00169     roll = atan2(t(2,1),t(2,2));
00170     pitch = atan2(-t(2,0),sqrt(t(2,1)*t(2,1)+t(2,2)*t(2,2)));
00171     yaw = atan2(t(1,0),t(0,0));
00172 }
00173 void mat2components(const Eigen::Matrix4f& t, double& roll, double& pitch, double& yaw, double& dist){
00174 
00175   mat2RPY(t, roll,pitch,yaw);
00176   mat2dist(t, dist);
00177 
00178   roll = roll/M_PI*180;
00179   pitch = pitch/M_PI*180;
00180   yaw = yaw/M_PI*180;
00181 
00182 }
00183 
00184 // true iff edge qualifies for generating a new vertex
00185 bool isBigTrafo(const Eigen::Matrix4f& t){
00186     double roll, pitch, yaw, dist;
00187 
00188     mat2RPY(t, roll,pitch,yaw);
00189     mat2dist(t, dist);
00190 
00191     roll = roll/M_PI*180;
00192     pitch = pitch/M_PI*180;
00193     yaw = yaw/M_PI*180;
00194 
00195     double max_angle = std::max(roll,std::max(pitch,yaw));
00196 
00197     // at least 10cm or 5deg
00198     return (dist > ParameterServer::instance()->get<double>("min_translation_meter")
00199                 || max_angle > ParameterServer::instance()->get<int>("min_rotation_degree"));
00200 }
00201 
00202 bool isBigTrafo(const g2o::SE3Quat& t){
00203     float angle_around_axis = 2.0*acos(t.rotation().w()) *180.0 / M_PI;
00204     float dist = t.translation().norm();
00205     QString infostring;
00206     ROS_INFO("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00207     infostring.sprintf("Rotation:% 4.2f, Distance: % 4.3fm", angle_around_axis, dist);
00208     //Q_EMIT setGUIInfo2(infostring);
00209     ParameterServer* ps =  ParameterServer::instance();
00210     if(dist > ps->get<double>("min_translation_meter") ||
00211          angle_around_axis > ps->get<int>("min_rotation_degree"))
00212     {
00213       //Too big fails too
00214       if(dist < ps->get<double>("max_translation_meter") ||
00215                  angle_around_axis < ps->get<int>("max_rotation_degree"))
00216       {
00217         return true;
00218       }
00219       else
00220       {
00221         ROS_INFO("Rejected Transformation because it is too large");
00222       }
00223     }
00224     return false;
00225 }
00226 
00227 
00228 /*
00229 bool overlappingViews(LoadedEdge3D edge){
00230     //opening angles
00231    double alpha = 57.0/180.0*M_PI;
00232    double beta = 47.0/180.0*M_PI; 
00233    //assumes robot coordinate system (x is front, y is left, z is up)
00234    Eigen::Matrix<double, 4,3> cam1;
00235    cam1 <<  1.0, std::tan(alpha),  std::tan(beta),//upper left
00236                                        1.0, -std::tan(alpha), std::tan(beta),//upper right
00237                                        1.0, std::tan(alpha), -std::tan(beta),//lower left
00238                                        1.0, -std::tan(alpha),-std::tan(beta);//lower right
00239    return false;
00240 }
00241 bool triangleRayIntersection(Eigen::Vector3d triangle1,Eigen::Vector3d triangle2, 
00242                              Eigen::Vector3d ray_origin, Eigen::Vector3d ray){
00243     Eigen::Matrix3d m;
00244     m.col(2) = -ray;
00245     m.col(0) = triangle1;
00246     m.col(1) = triangle2;
00247     Eigen::Vector3d lengths = m.inverse() * ray_origin;
00248     return (lengths(0) < 0 && lengths(1) > 0 && lengths(2) > 0 );
00249 }
00250 */
00251 
00252 
00253 g2o::SE3Quat tf2G2O(const tf::Transform t) 
00254 {
00255   Eigen::Quaterniond eigen_quat(t.getRotation().getW(), t.getRotation().getX(), t.getRotation().getY(), t.getRotation().getZ());
00256   Eigen::Vector3d translation(t.getOrigin().x(), t.getOrigin().y(), t.getOrigin().z());
00257   g2o::SE3Quat result(eigen_quat, translation);
00258   return result;
00259 }
00260 
00261 g2o::SE3Quat eigen2G2O(const Eigen::Matrix4d& eigen_mat) 
00262 {
00263   Eigen::Affine3d eigen_transform(eigen_mat);
00264   Eigen::Quaterniond eigen_quat(eigen_transform.rotation());
00265   Eigen::Vector3d translation(eigen_mat(0, 3), eigen_mat(1, 3), eigen_mat(2, 3));
00266   g2o::SE3Quat result(eigen_quat, translation);
00267 
00268   return result;
00269 }
00270 
00271 using namespace cv;
00273 FeatureDetector* createDetector( const string& detectorType ) 
00274 {
00275         ParameterServer* params = ParameterServer::instance();
00276         FeatureDetector* fd = 0;
00277     if( !detectorType.compare( "FAST" ) ) {
00278         //fd = new FastFeatureDetector( 20/*threshold*/, true/*nonmax_suppression*/ );
00279         fd = new DynamicAdaptedFeatureDetector (new FastAdjuster(20,true), 
00280                                                                                                 params->get<int>("min_keypoints"),
00281                                                                                                 params->get<int>("max_keypoints"),
00282                                                                                                 params->get<int>("adjuster_max_iterations"));
00283     }
00284     else if( !detectorType.compare( "STAR" ) ) {
00285         fd = new StarFeatureDetector( 16/*max_size*/, 5/*response_threshold*/, 10/*line_threshold_projected*/,
00286                                       8/*line_threshold_binarized*/, 5/*suppress_nonmax_size*/ );
00287     }
00288     else if( !detectorType.compare( "SIFT" ) ) {
00289 #if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION <= 3
00290         fd = new SiftFeatureDetector(SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
00291                                      SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD());
00292         ROS_INFO("Default SIFT threshold: %f, Default SIFT Edge Threshold: %f", 
00293                  SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
00294                  SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD());
00295 #else
00296         fd = new SiftFeatureDetector();
00297 #endif
00298     }
00299     else if( !detectorType.compare( "SURF" ) ) {
00300       /* fd = new SurfFeatureDetector(200.0, 6, 5); */
00301         fd = new DynamicAdaptedFeatureDetector(new SurfAdjuster(),
00302                                                                                         params->get<int>("min_keypoints"),
00303                             params->get<int>("max_keypoints")+300,
00304                             params->get<int>("adjuster_max_iterations"));
00305     }
00306     else if( !detectorType.compare( "MSER" ) ) {
00307         fd = new MserFeatureDetector( 1/*delta*/, 60/*min_area*/, 114400/*_max_area*/, 0.35f/*max_variation*/,
00308                 0.2/*min_diversity*/, 200/*max_evolution*/, 1.01/*area_threshold*/, 0.003/*min_margin*/,
00309                 5/*edge_blur_size*/ );
00310     }
00311     else if( !detectorType.compare( "GFTT" ) ) {
00312         fd = new GoodFeaturesToTrackDetector( 200/*maxCorners*/, 0.001/*qualityLevel*/, 1./*minDistance*/,
00313                                               5/*int _blockSize*/, true/*useHarrisDetector*/, 0.04/*k*/ );
00314     }
00315     else if( !detectorType.compare( "ORB" ) ) {
00316 #if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION == 3
00317         fd = new OrbFeatureDetector(params->get<int>("max_keypoints")+1500,
00318                 ORB::CommonParams(1.2, ORB::CommonParams::DEFAULT_N_LEVELS, 31, ORB::CommonParams::DEFAULT_FIRST_LEVEL));
00319 #elif CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION >= 4
00320         fd = new OrbFeatureDetector();
00321 #else
00322       ROS_ERROR("ORB features are not implemented in your version of OpenCV");
00323 #endif
00324     }
00325     else if( !detectorType.compare( "SIFTGPU" ) ) {
00326       ROS_INFO("%s is to be used", detectorType.c_str());
00327       ROS_DEBUG("Creating SURF detector as fallback.");
00328       fd = createDetector("SURF"); //recursive call with correct parameter
00329     }
00330     else {
00331       ROS_WARN("No valid detector-type given: %s. Using SURF.", detectorType.c_str());
00332       fd = createDetector("SURF"); //recursive call with correct parameter
00333     }
00334     ROS_ERROR_COND(fd == 0, "No detector could be created");
00335     return fd;
00336 }
00337 
00338 DescriptorExtractor* createDescriptorExtractor( const string& descriptorType ) 
00339 {
00340     DescriptorExtractor* extractor = 0;
00341     if( !descriptorType.compare( "SIFT" ) ) {
00342         extractor = new SiftDescriptorExtractor();/*( double magnification=SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION(), bool isNormalize=true, bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES, int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS, int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE, int angleMode=SIFT::CommonParams::FIRST_ANGLE )*/
00343     }
00344     else if( !descriptorType.compare( "SURF" ) ) {
00345         extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
00346     }
00347 #if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION >= 3
00348     else if( !descriptorType.compare( "ORB" ) ) {
00349         extractor = new OrbDescriptorExtractor();
00350     }
00351 #endif
00352     else if( !descriptorType.compare( "SIFTGPU" ) ) {
00353       ROS_INFO("%s is to be used as extractor, creating SURF descriptor extractor as fallback.", descriptorType.c_str());
00354       extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
00355     }
00356     else {
00357       ROS_ERROR("No valid descriptor-matcher-type given: %s. Using SURF", descriptorType.c_str());
00358       extractor = createDescriptorExtractor("SURF");
00359     }
00360     ROS_ERROR_COND(extractor == 0, "No extractor could be created");
00361     return extractor;
00362 }
00363 
00364 void depthToCV8UC1(const cv::Mat& float_img, cv::Mat& mono8_img){
00365   //Process images
00366   if(mono8_img.rows != float_img.rows || mono8_img.cols != float_img.cols){
00367     mono8_img = cv::Mat(float_img.size(), CV_8UC1);}
00368   cv::convertScaleAbs(float_img, mono8_img, 100, 0.0);
00369   //The following doesn't work due to NaNs
00370   //double minVal, maxVal; 
00371   //minMaxLoc(float_img, &minVal, &maxVal);
00372   //ROS_DEBUG("Minimum/Maximum Depth in current image: %f/%f", minVal, maxVal);
00373   //mono8_img = cv::Scalar(0);
00374   //cv::line( mono8_img, cv::Point2i(10,10),cv::Point2i(200,100), cv::Scalar(255), 3, 8);
00375 }
00376 
00377 //Little debugging helper functions
00378 std::string openCVCode2String(unsigned int code){
00379   switch(code){
00380     case 0 : return std::string("CV_8UC1" );
00381     case 8 : return std::string("CV_8UC2" );
00382     case 16: return std::string("CV_8UC3" );
00383     case 24: return std::string("CV_8UC4" );
00384     case 2 : return std::string("CV_16UC1");
00385     case 10: return std::string("CV_16UC2");
00386     case 18: return std::string("CV_16UC3");
00387     case 26: return std::string("CV_16UC4");
00388     case 5 : return std::string("CV_32FC1");
00389     case 13: return std::string("CV_32FC2");
00390     case 21: return std::string("CV_32FC3");
00391     case 29: return std::string("CV_32FC4");
00392   }
00393   return std::string("Unknown");
00394 }
00395 
00396 void printMatrixInfo(cv::Mat& image, std::string name){
00397   ROS_INFO_STREAM("Matrix " << name << " - Type:" << openCVCode2String(image.type()) <<  " rows: " <<  image.rows  <<  " cols: " <<  image.cols);
00398 }
00399 
00400 bool asyncFrameDrop(ros::Time depth, ros::Time rgb)
00401 {
00402   long rgb_timediff = abs(static_cast<long>(rgb.nsec) - static_cast<long>(depth.nsec));
00403   if(rgb_timediff > 33333333){
00404      ROS_INFO("Depth image time: %d - %d", depth.sec,   depth.nsec);
00405      ROS_INFO("RGB   image time: %d - %d", rgb.sec, rgb.nsec);
00406      ROS_WARN("Depth and RGB image off more than 1/30sec: %li (nsec)", rgb_timediff);
00407      if(ParameterServer::instance()->get<bool>("drop_async_frames")){
00408        ROS_WARN("Asynchronous frames ignored. See parameters if you want to keep async frames.");
00409        return true;
00410      }
00411   } else {
00412      ROS_DEBUG("Depth image time: %d - %d", depth.sec,   depth.nsec);
00413      ROS_DEBUG("RGB   image time: %d - %d", rgb.sec, rgb.nsec);
00414   }
00415   return false;
00416 }
00417 
00418 
00419 #include <sensor_msgs/Image.h>
00420 #include <sensor_msgs/CameraInfo.h>
00421 
00423 
00424 typedef union
00425 {
00426   struct /*anonymous*/
00427   {
00428     unsigned char Blue;
00429     unsigned char Green;
00430     unsigned char Red;
00431     unsigned char Alpha;
00432   };
00433   float float_value;
00434   long long_value;
00435 } RGBValue;
00437 
00438 pointcloud_type* createXYZRGBPointCloud (const sensor_msgs::ImageConstPtr& depth_msg, 
00439                                          const sensor_msgs::ImageConstPtr& rgb_msg,
00440                                          const sensor_msgs::CameraInfoConstPtr& cam_info) 
00441 {
00442   struct timespec starttime, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &starttime);
00443   pointcloud_type* cloud (new pointcloud_type() );
00444   cloud->header.stamp     = depth_msg->header.stamp;
00445   cloud->header.frame_id  = rgb_msg->header.frame_id;
00446   cloud->is_dense         = true; //single point of view, 2d rasterized
00447 
00448   float cx, cy, fx, fy;//principal point and focal lengths
00449   unsigned color_step, color_skip;
00450 
00451   cloud->height = depth_msg->height;
00452   cloud->width = depth_msg->width;
00453   cx = cam_info->K[2]; //(cloud->width >> 1) - 0.5f;
00454   cy = cam_info->K[5]; //(cloud->height >> 1) - 0.5f;
00455   fx = 1.0f / cam_info->K[0]; 
00456   fy = 1.0f / cam_info->K[4]; 
00457   int pixel_data_size = 3;
00458   char red_idx = 0, green_idx = 1, blue_idx = 2;
00459   if(rgb_msg->encoding.compare("mono8") == 0) pixel_data_size = 1;
00460   if(rgb_msg->encoding.compare("bgr8") == 0) { red_idx = 2; blue_idx = 0; }
00461 
00462 
00463   ROS_ERROR_COND(pixel_data_size == 0, "Unknown image encoding: %s!", rgb_msg->encoding.c_str());
00464   color_step = pixel_data_size * rgb_msg->width / cloud->width;
00465   color_skip = pixel_data_size * (rgb_msg->height / cloud->height - 1) * rgb_msg->width;
00466 
00467   cloud->points.resize (cloud->height * cloud->width);
00468 
00469   const float* depth_buffer = reinterpret_cast<const float*>(&depth_msg->data[0]);
00470   const uint8_t* rgb_buffer = &rgb_msg->data[0];
00471 
00472   // depth_msg already has the desired dimensions, but rgb_msg may be higher res.
00473   int color_idx = 0, depth_idx = 0;
00474   double depth_scaling = ParameterServer::instance()->get<double>("depth_scaling_factor");
00475 
00476   pointcloud_type::iterator pt_iter = cloud->begin ();
00477   for (int v = 0; v < (int)cloud->height; ++v, color_idx += color_skip)
00478   {
00479     for (int u = 0; u < (int)cloud->width; ++u, color_idx += color_step, ++depth_idx, ++pt_iter)
00480     {
00481       point_type& pt = *pt_iter;
00482       float Z = depth_buffer[depth_idx] * depth_scaling;
00483 
00484       // Check for invalid measurements
00485       if (std::isnan (Z))
00486       {
00487         pt.x = pt.y = pt.z = Z;
00488       }
00489       else // Fill in XYZ
00490       {
00491         pt.x = (u - cx) * Z * fx;
00492         pt.y = (v - cy) * Z * fy;
00493         pt.z = Z;
00494       }
00495 
00496       // Fill in color
00497       RGBValue color;
00498       if(pixel_data_size == 3){
00499         color.Red   = rgb_buffer[color_idx + red_idx];
00500         color.Green = rgb_buffer[color_idx + green_idx];
00501         color.Blue  = rgb_buffer[color_idx + blue_idx];
00502       } else {
00503         color.Red   = color.Green = color.Blue  = rgb_buffer[color_idx];
00504       }
00505       color.Alpha = 0;
00506       pt.rgb = color.float_value;
00507     }
00508   }
00509 
00510   clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - starttime.tv_sec); elapsed += (finish.tv_nsec - starttime.tv_nsec) / 1000000000.0; ROS_INFO_STREAM_COND_NAMED(elapsed > ParameterServer::instance()->get<double>("min_time_reported"), "timings", __FUNCTION__ << " runtime: "<< elapsed <<" s");
00511   return cloud;
00512 }
00513 
00514 double errorFunction(const Eigen::Vector4f& x1, const double x1_depth_cov, 
00515                      const Eigen::Vector4f& x2, const double x2_depth_cov, 
00516                      const Eigen::Matrix4f& tf_1_to_2)
00517 {
00518   const double cam_angle_x = 58.0/180.0*M_PI;
00519   const double cam_angle_y = 45.0/180.0*M_PI;
00520   const double cam_resol_x = 640;
00521   const double cam_resol_y = 480;
00522   const double raster_stddev_x = 2*tan(cam_angle_x/cam_resol_x);  //2pix stddev in x
00523   const double raster_stddev_y = 2*tan(cam_angle_y/cam_resol_y);  //2pix stddev in y
00524   const double raster_cov_x = raster_stddev_x * raster_stddev_x;
00525   const double raster_cov_y = raster_stddev_y * raster_stddev_y;
00526 
00527   ROS_WARN_COND(x1(3) != 1.0, "4th element of x1 should be 1.0, is %f", x1(3));
00528   ROS_WARN_COND(x2(3) != 1.0, "4th element of x2 should be 1.0, is %f", x2(3));
00529   
00530   Eigen::Vector3d mu_1 = x1.head<3>().cast<double>();
00531   Eigen::Vector3d mu_2 = x2.head<3>().cast<double>();
00532   Eigen::Matrix3d rotation_mat = tf_1_to_2.block(0,0,3,3).cast<double>();
00533 
00534   //Point 1
00535   Eigen::Matrix3d cov1 = Eigen::Matrix3d::Identity();
00536   cov1(0,0) = raster_cov_x* mu_1(2); //how big is 1px std dev in meter, depends on depth
00537   cov1(1,1) = raster_cov_y* mu_1(2); //how big is 1px std dev in meter, depends on depth
00538   cov1(2,2) = x1_depth_cov;
00539   //Point2
00540   Eigen::Matrix3d cov2 = Eigen::Matrix3d::Identity();
00541   cov2(0,0) = raster_cov_x* mu_2(2); //how big is 1px std dev in meter, depends on depth
00542   cov2(1,1) = raster_cov_y* mu_2(2); //how big is 1px std dev in meter, depends on depth
00543   cov2(2,2) = x2_depth_cov;
00544 
00545   Eigen::Matrix3d cov2inv = cov2.inverse(); // Σ₂⁻¹  
00546 
00547   Eigen::Vector3d mu_1_in_frame_2 = (tf_1_to_2 * x1).head<3>().cast<double>(); // μ₁⁽²⁾  = T₁₂ μ₁⁽¹⁾  
00548   Eigen::Matrix3d cov1_in_frame_2 = rotation_mat.transpose() * cov1 * rotation_mat;//Works since the cov is diagonal => Eig-Vec-Matrix is Identity
00549   Eigen::Matrix3d cov1inv_in_frame_2 = cov1_in_frame_2.inverse();// Σ₁⁻¹  
00550 
00551   Eigen::Matrix3d cov_sum = (cov1inv_in_frame_2 + cov2inv);
00552   Eigen::Matrix3d inv_cov_sum = cov_sum.inverse();
00553   ROS_ERROR_STREAM_COND(inv_cov_sum!=inv_cov_sum,"Sum of Covariances not invertible: \n" << cov_sum);
00554 
00555   Eigen::Vector3d x_ml;//Max Likelhood Position of latent point, that caused the sensor msrmnt
00556   x_ml = inv_cov_sum * (cov1inv_in_frame_2 * mu_1_in_frame_2 + cov2inv * mu_2); // (Σ₁⁻¹ +  Σ₂⁻¹)⁻¹(Σ₁⁻¹μ₁  +  Σ₂⁻¹μ₂)
00557   Eigen::Vector3d delta_mu_1 = mu_1_in_frame_2 - x_ml;
00558   Eigen::Vector3d delta_mu_2 = mu_2 - x_ml;
00559   
00560   float sqrd_mahalanobis_distance1 = delta_mu_1.transpose() * cov1inv_in_frame_2 * delta_mu_1;// Δx_2^T Σ Δx_2 
00561   float sqrd_mahalanobis_distance2 = delta_mu_2.transpose() * cov2inv * delta_mu_2; // Δx_1^T Σ Δx_1
00562   float bad_mahalanobis_distance = sqrd_mahalanobis_distance1 + sqrd_mahalanobis_distance2; //FIXME
00563 
00564   if(!(bad_mahalanobis_distance >= 0.0))
00565   {
00566     ROS_ERROR_STREAM("Non-Positive Mahalanobis Distance");
00567     return std::numeric_limits<double>::max();
00568   }
00569   ROS_DEBUG_STREAM_NAMED("statistics", "Mahalanobis ML: " << std::setprecision(25) << bad_mahalanobis_distance);
00570   return bad_mahalanobis_distance;
00571 }
00572 
00573 
00574 double errorFunction2(const Eigen::Vector4f& x1,
00575                       const Eigen::Vector4f& x2,
00576                       const Eigen::Matrix4f& tf_1_to_2)
00577 {
00578   static const double cam_angle_x = 58.0/180.0*M_PI;/*{{{*/
00579   static const double cam_angle_y = 45.0/180.0*M_PI;
00580   static const double cam_resol_x = 640;
00581   static const double cam_resol_y = 480;
00582   static const double raster_stddev_x = 2*tan(cam_angle_x/cam_resol_x);  //2pix stddev in x
00583   static const double raster_stddev_y = 2*tan(cam_angle_y/cam_resol_y);  //2pix stddev in y
00584   static const double raster_cov_x = raster_stddev_x * raster_stddev_x;
00585   static const double raster_cov_y = raster_stddev_y * raster_stddev_y;/*}}}*/
00586 
00587   ROS_WARN_COND(x1(3) != 1.0, "4th element of x1 should be 1.0, is %f", x1(3));
00588   ROS_WARN_COND(x2(3) != 1.0, "4th element of x2 should be 1.0, is %f", x2(3));
00589 
00590   Eigen::Vector3d mu_1 = x1.head<3>().cast<double>();
00591   Eigen::Vector3d mu_1_in_frame_2 = (tf_1_to_2 * x1).head<3>().cast<double>(); // μ₁⁽²⁾  = T₁₂ μ₁⁽¹⁾  
00592   Eigen::Vector3d mu_2 = x2.head<3>().cast<double>();
00593   Eigen::Matrix3d rotation_mat = tf_1_to_2.block(0,0,3,3).cast<double>();
00594 
00595   //Point 1
00596   Eigen::Matrix3d cov1 = Eigen::Matrix3d::Zero();
00597   cov1(0,0) = 1 * raster_cov_x * mu_1(2); //how big is 1px std dev in meter, depends on depth
00598   cov1(1,1) = 1 * raster_cov_y * mu_1(2); //how big is 1px std dev in meter, depends on depth
00599   cov1(2,2) = mu_1(2)*mu_1(2) * 0.0075; //stddev computed from information on http://www.ros.org/wiki/openni_kinect/kinect_accuracy
00600   //Point2
00601   Eigen::Matrix3d cov2 = Eigen::Matrix3d::Zero();
00602   cov2(0,0) = 1 * raster_cov_x* mu_2(2); //how big is 1px std dev in meter, depends on depth
00603   cov2(1,1) = 1 * raster_cov_y* mu_2(2); //how big is 1px std dev in meter, depends on depth
00604   cov2(2,2) = mu_2(2)*mu_2(2) * 0.0075; //stddev computed from information on http://www.ros.org/wiki/openni_kinect/kinect_accuracy
00605 
00606   Eigen::Matrix3d cov1_in_frame_2 = rotation_mat.transpose() * cov1 * rotation_mat;//Works since the cov is diagonal => Eig-Vec-Matrix is Identity
00607 
00608   // Δμ⁽²⁾ =  μ₁⁽²⁾ - μ₂⁽²⁾
00609   Eigen::Vector3d delta_mu_in_frame_2 = mu_1_in_frame_2 - mu_2;
00610   // Σc = (Σ₁ + Σ₂)
00611   Eigen::Matrix3d cov_mat_sum_in_frame_2 = cov1_in_frame_2 + cov2;     
00612   //ΔμT Σc⁻¹Δμ  
00613   double sqrd_mahalanobis_distance = delta_mu_in_frame_2.transpose() * cov_mat_sum_in_frame_2.inverse() * delta_mu_in_frame_2;
00614   
00615   if(!(sqrd_mahalanobis_distance >= 0.0))
00616   {
00617     ROS_ERROR_STREAM("Non-Positive Mahalanobis Distance for vectors " << x1 << "\nand\n" << x2 << " with covariances\n" << cov1 << "\nand\n" << cov2);
00618     return std::numeric_limits<double>::max();
00619   }
00620   //ROS_INFO_STREAM_NAMED("statistics", "Probability: " << std::setprecision(10) << probability << " Normalization: " << normalization);
00621   ROS_DEBUG_STREAM_NAMED("statistics", "Mahalanobis Distance: " << sqrd_mahalanobis_distance);
00622   ROS_DEBUG_STREAM_NAMED("statistics", "Covariance Matrix:\n" << cov_mat_sum_in_frame_2);
00623   ROS_DEBUG_STREAM_NAMED("statistics", "Sigma 1:\n" << cov1 << "\nSigma 2:\n" << cov2);
00624   ROS_DEBUG_STREAM_NAMED("statistics", "Sigma 1 in Frame 2:\n" << cov1_in_frame_2 << "\nDelta mu: " << delta_mu_in_frame_2);
00625   ROS_DEBUG_STREAM_NAMED("statistics", "Transformation Matrix:\n" << tf_1_to_2 << "\nRotation:\n" << rotation_mat.transpose() * rotation_mat);
00626   return sqrd_mahalanobis_distance;
00627 }
00628 
00629 float getMinDepthInNeighborhood(const cv::Mat& depth, cv::Point2f center, float diameter){
00630     // Get neighbourhood area of keypoint
00631     int radius = (diameter - 1)/2;
00632     int top   = center.y - radius; top   = top   < 0 ? 0 : top;
00633     int left  = center.x - radius; left  = left  < 0 ? 0 : left;
00634     int bot   = center.y + radius; bot   = bot   > depth.rows ? depth.rows : bot;
00635     int right = center.x + radius; right = right > depth.cols ? depth.cols : right;
00636 
00637     cv::Mat neigborhood(depth, cv::Range(top, bot), cv::Range(left,right));
00638     double minZ = std::numeric_limits<float>::quiet_NaN();
00639     cv::minMaxLoc(neigborhood, &minZ);
00640     if(minZ == 0.0){ //FIXME: Why are there features with depth set to zero?
00641       ROS_WARN_THROTTLE(1,"Caught feature with zero in depth neighbourhood");
00642       minZ = std::numeric_limits<float>::quiet_NaN();
00643     }
00644 
00645     return static_cast<float>(minZ);
00646 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Friends Defines


rgbdslam
Author(s): Felix Endres, Juergen Hess, Nikolas Engelhard
autogenerated on Wed Dec 26 2012 15:53:08