VO.cpp
Go to the documentation of this file.
00001 #include <VO/VO.h>
00002 
00003 // define whether to use approximate nearest-neighbor search
00004 #define USE_FLANN
00005 
00006 
00007 CvMemStorage* storage1_pair = cvCreateMemStorage(0);
00008 CvMemStorage* consecutive_Storage = cvCreateMemStorage(0);
00009 
00010 using namespace std;
00011 using namespace cv;
00012 using namespace Eigen;
00013 using namespace image_transport;
00014 
00015 
00016 static CvScalar colors[] =
00017         {
00018                 {{0,0,255}},
00019                 {{0,128,255}},
00020                 {{0,255,255}},
00021                 {{0,255,0}},
00022                 {{255,128,0}},
00023                 {{255,255,0}},
00024                 {{255,0,0}},
00025                 {{255,0,255}},
00026                 {{255,255,255}}
00027         };
00028 
00029 
00030 class Error_Prop{
00031 
00032   ros::NodeHandle nh_;                                          //Nodo para suscribirse a las imagenes stereo
00033   image_transport::ImageTransport it_;                          //Para leer las imagenes
00034   image_transport::Subscriber image_sub_l_, image_sub_r_;       // Suscripcion a los topicos que publican las imagenes
00035 
00036   ros::NodeHandle n_pcl1, n_pcl2, n_pos, n_odom,n_tgt,n_src,n_wpts1,n_cpts1,n_src_RT,n_od;
00037   ros::Publisher pub_pcl1,pub_pcl2,pub_pos,pub_odom,pub_tgt,pub_src,pub_wpts1,pub_cpts1,pub_src_RT,pub_od,pub_uvl,pub_uvr;
00038 
00039   tf::TransformBroadcaster od_broadcaster;
00040 
00041   CvSURFParams params;
00042 
00043 public:
00044 
00045   bool START;
00046 
00047   int ncol,nrow,sigma_ul,sigma_vl,sigma_ur,sigma_vr,Npoints,Nclouds,numRandPoints,size;
00048   float om[3],T[3],tx,ty,tz,alfa_l[2],alfa_r[2],alfa_U_l,alfa_U_r,alfa_V_l,alfa_V_r,alfa[4],uol,vol,uor,vor,wpx_,wpy_,wpz_,px,py,pz,xlims[2],ylims[2],zlims[2],meanul,meanvl,meanur,meanvr,mean[3],xl[2],yl[2],zl[2],h_r[16];
00049   Mat R,H_l2r,rdm,wpxyz,randomUL,randomVL,randomUR,randomVR,randomx,randomy,randomz,im1,im2;
00050   CvMat p_xyz,uvs_l,uvs_r,Cov,Jf,HC,d2C_dydx,RTCamFr,invC2R,Hcam2rob,H_r;
00051   double hcam2rob[16],inv_cam2rob[16];
00052   CvMat* Hcaminv;
00053 
00054   pcl::PointCloud<pcl::PointXYZ> cloud_a, cloud_b, cloud_c, cloud_ant, cloud_pos, cloud_odom, cloud_odom2, wpts1, cpts1, cpts_ant, wpts1RANSAC, cpts1RANSAC, cloud_w,cloud_t, cloud_wRT,wpts1RT,UV_l,UV_r;
00055 
00056   Eigen::Matrix<float,4,4> Hcam2rob_;
00057   Eigen::Matrix3f R_;
00058   Eigen::Matrix<float,3,4> Jf_;
00059   Eigen::Matrix<float,4,4> H_l2r_;           //Relacion entre las camaras (left to right)
00060 
00061   Eigen::Matrix<float,3,2000> Points3D;
00062   Eigen::Matrix<float,3,3> cam2world;
00063 
00064   // -------Visual odometry-------
00065 
00066   double x, y, z,th, vx, vy, vth;
00067   ros::Time current_time, last_time;
00068 
00069   tf::Quaternion quat_ant;
00070   Quaterniond fq;
00071   Vector4d trans_;
00072   geometry_msgs::Pose odom3D;
00073 
00074   double t_stereo;
00075   vector<int> ptpairs1,ptpairs2,ptpairs3,ptpairs1_resp,ptpairs2_resp,ptpairs3_resp,indices_a,indices_b;
00076 
00077   CvSeq *leftKeypoints1, *leftDescriptors1, *rightKeypoints1, *rightDescriptors1, *leftKeypoints2, *leftDescriptors2, *rightKeypoints2, *rightDescriptors2,             *leftKeypoints3, *leftDescriptors3, *rightKeypoints3, *rightDescriptors3;
00078 
00079   Matrix<float,3,3> Rot_12,Rot_i;
00080   Matrix<float,3,1> Tras_12,Tras_i;
00081 
00082   Eigen::Matrix4f transformation;
00083 
00084   Eigen::Matrix<float,3,500> nube1;
00085   Eigen::Matrix<float,3,500> nube2;
00086 
00087   Matrix<float,3,1> odom_1,odom_2;
00088 
00089   IplImage* left_;
00090   IplImage* left_1;
00091   IplImage* left_gray;
00092   IplImage* left_ant;
00093   IplImage* left_gray_ant;
00094   IplImage* right_;
00095   IplImage* right_1;
00096   IplImage* right_gray;
00097 
00098  // ---------Funciones---------
00099 
00100         void init();
00101         void ypr(const Matrix<float,3,3> Rot,Matrix<float,3,1>& ypr_);
00102         double compareSURFDescriptors ( const float* d1, const float* d2, double best, int length );
00103         int locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
00104                           const CvPoint src_corners[4], CvPoint dst_corners[4] );
00105         void flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,const CvSeq*,const CvSeq* imageDescriptors,vector<int>& ptpairs );
00106         void findPairs( const CvSeq* objectKeypoints,const CvSeq* objectDescriptors,const CvSeq* imageKeypoints,const CvSeq* imageDescriptors,vector<int>& ptpairs);
00107         int naiveNearestNeighbor( const float* vec, int laplacian, const CvSeq* model_keypoints, const CvSeq* model_descriptors );
00108 
00109  // -------------------Visual odometry---------------------
00110         void run();
00111 
00112         //--------Image capture-------
00113         void imageCb_l(const sensor_msgs::ImageConstPtr& msg);
00114         void imageCb_r(const sensor_msgs::ImageConstPtr& msg);
00115 
00116         //--------Matching-------
00117         void VO_StereoMatching();                               // Match entre los pares de imagenes stereo
00118         void VO_ConsecutiveMatching();                          // Match entre las imagenes de la izquierda sucesivas
00119 
00120         // ------ View Matches--------
00121         void VO_ViewStereo();
00122         void VO_ViewConsecutive();
00123         void VO_ViewAll();
00124 
00125         //--------3D reconstruction-------
00126         void VO_matches_to_3D();                                // Conversion de la nube de puntos 3D a partir de los matches
00127         void VO_3D_to_matches();
00128 
00129         //--------Filtering-------
00130         void VO_matchFilter();
00131         void VO_cloudFilter();
00132 
00133         //--------RT from clouds-------
00134         void VO_get_RT_RANSAC();
00135 
00136         //--------World representation-------
00137         void VO_RT_to_world();
00138 
00139         //----------Clean------------
00140         void VO_clean();
00141 
00142  //-------- Constructor ---------
00143 
00144   Error_Prop():it_(nh_)
00145   {
00146         //Nodos de las camaras
00147         START=FALSE;
00148 
00149         image_sub_l_ = it_.subscribe("/teo/left_camera/camera_image", 1, &Error_Prop::imageCb_l, this);
00150         image_sub_r_ = it_.subscribe("/teo/right_camera/camera_image", 1, &Error_Prop::imageCb_r, this);
00151 
00152         //---------------Ventanas para ver las imagenes de las camaras--------------
00153         left_1 = cvCreateImage(cvSize(480,320), 8, 3 );        //esto es mentira en verdad es mas grande, es para que tenga un tamano
00154         right_1 = cvCreateImage(cvGetSize(left_1), 8, 3 );
00155 
00156         left_ = cvCreateImage(cvSize(480,320), 8, 3 );
00157         left_ant = cvCreateImage(cvGetSize(left_), 8, 3 );
00158         left_gray_ant = cvCreateImage(cvGetSize(left_ant), IPL_DEPTH_8U, 1);
00159         right_ = cvCreateImage(cvGetSize(left_), 8, 3 );
00160 
00161         //---------------------Keypoints & Descriptors------------------------------
00162 
00163         leftKeypoints1 = 0; leftDescriptors1 = 0;
00164         rightKeypoints1 = 0; rightDescriptors1 = 0;
00165 
00166         leftKeypoints2 = 0; leftDescriptors2 = 0;
00167         rightKeypoints2 = 0; rightDescriptors2 = 0;
00168   }
00169 
00170         //--------------------- Destructor -------------------------
00171 
00172   ~Error_Prop()
00173   {
00174     //cv::destroyWindow("Left");
00175     //cv::destroyWindow("Right");
00176   }
00177 
00178 };
00179 
00180 
00181 /*
00182 -------------------------------------------------------------------------------------------------------------
00183                                                 Funciones
00184 -------------------------------------------------------------------------------------------------------------
00185 */
00186 
00187 void Error_Prop::ypr(const Matrix<float,3,3> R,Matrix<float,3,1>& ypr_){
00188 /* Extracts angles from a rotation matrix. (yaw pitch roll angles)*/
00189 
00190   float yaw,pitch,roll;
00191   roll=atan2(R(1,0),R(0,0));  //roll
00192   pitch=atan2(-R(2,0),R(0,0)*cos(roll)+R(1,0)*sin(roll)); //pitch
00193   yaw=atan2(R(2,1),R(2,2));  //yaw
00194   ypr_(0,0)=yaw;ypr_(1,0)=pitch,ypr_(2,0)=roll;
00195 }
00196 
00197 double Error_Prop::compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
00198 {
00199         double total_cost = 0;
00200         assert( length % 4 == 0 );
00201         for( int i = 0; i < length; i += 4 )
00202         {
00203                 double t0 = d1[i] - d2[i];
00204                 double t1 = d1[i+1] - d2[i+1];
00205                 double t2 = d1[i+2] - d2[i+2];
00206                 double t3 = d1[i+3] - d2[i+3];
00207                 total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;
00208                 if( total_cost > best )
00209                         break;
00210         }
00211         return total_cost;
00212 }
00213 
00214 int Error_Prop::naiveNearestNeighbor( const float* vec, int laplacian,const CvSeq* model_keypoints,const CvSeq* model_descriptors )
00215 {
00216         int length = (int)(model_descriptors->elem_size/sizeof(float));
00217         int i, neighbor = -1;
00218         double d, dist1 = 1e6, dist2 = 1e6;
00219         CvSeqReader reader, kreader;
00220         cvStartReadSeq( model_keypoints, &kreader, 0 );
00221         cvStartReadSeq( model_descriptors, &reader, 0 );
00222 
00223         for( i = 0; i < model_descriptors->total; i++ )
00224         {
00225                 const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
00226                 const float* mvec = (const float*)reader.ptr;
00227                 CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
00228                 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
00229                 if( laplacian != kp->laplacian )
00230                         continue;
00231                         d = compareSURFDescriptors( vec, mvec, dist2, length );
00232                 if( d < dist1 )
00233                 {
00234                         dist2 = dist1;
00235                         dist1 = d;
00236                         neighbor = i;
00237                 }
00238                 else if ( d < dist2 )
00239                         dist2 = d;
00240                 }
00241         if ( dist1 < 0.6*dist2 )
00242                 return neighbor;
00243         return -1;
00244 }
00245 
00246 void Error_Prop::findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
00247            const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
00248 {
00249         int i;
00250         CvSeqReader reader, kreader;
00251         cvStartReadSeq( objectKeypoints, &kreader );
00252         cvStartReadSeq( objectDescriptors, &reader );
00253         ptpairs.clear();
00254 
00255         for( i = 0; i < objectDescriptors->total; i++ )
00256         {
00257                 const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
00258                 const float* descriptor = (const float*)reader.ptr;
00259                 CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
00260                 CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
00261                 int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );
00262                 if( nearest_neighbor >= 0 )
00263                 {
00264                         ptpairs.push_back(i);
00265                         ptpairs.push_back(nearest_neighbor);
00266                 }
00267         }
00268 }
00269 
00270 void Error_Prop::flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
00271 {
00272         int length = (int)(objectDescriptors->elem_size/sizeof(float));
00273 
00274         cv::Mat m_object(objectDescriptors->total, length, CV_32F);  //Left
00275         cv::Mat m_image(imageDescriptors->total, length, CV_32F);    //Right
00276 
00277 
00278         // copy descriptors
00279         CvSeqReader obj_reader;
00280         float* obj_ptr = m_object.ptr<float>(0);
00281         cvStartReadSeq( objectDescriptors, &obj_reader );
00282         for(int i = 0; i < objectDescriptors->total; i++ )
00283         {
00284                 const float* descriptor = (const float*)obj_reader.ptr;
00285                 CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
00286                 memcpy(obj_ptr, descriptor, length*sizeof(float));
00287                 obj_ptr += length;
00288         }
00289         CvSeqReader img_reader;
00290         float* img_ptr = m_image.ptr<float>(0);
00291         cvStartReadSeq( imageDescriptors, &img_reader );
00292         for(int i = 0; i < imageDescriptors->total; i++ )
00293         {
00294                 const float* descriptor = (const float*)img_reader.ptr;
00295                 CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
00296                 memcpy(img_ptr, descriptor, length*sizeof(float));
00297                 img_ptr += length;
00298         }
00299 
00300         // find nearest neighbors using FLANN
00301         cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
00302         cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
00303         cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4));  // using 4 randomized kdtrees
00304         flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
00305 
00306         int* indices_ptr = m_indices.ptr<int>(0);
00307         float* dists_ptr = m_dists.ptr<float>(0);
00308         for (int i=0;i<m_indices.rows;++i) {
00309                 if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
00310                         ptpairs.push_back(i);
00311                         ptpairs.push_back(indices_ptr[2*i]);
00312                 }
00313         }
00314 }
00315 
00316 /* a rough implementation for object location */
00317 int Error_Prop::locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors, const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,                                       const CvPoint src_corners[4], CvPoint dst_corners[4] )
00318 {
00319         double h[9];
00320         CvMat _h = cvMat(3, 3, CV_64F, h);
00321         vector<int> ptpairs;
00322         vector<CvPoint2D32f> pt1, pt2;
00323         CvMat _pt1, _pt2;
00324         int i, n;
00325 
00326         #ifdef USE_FLANN
00327                 flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
00328         #else
00329                 findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
00330         #endif
00331 
00332         n = ptpairs.size()/2;
00333         if( n < 4 )
00334                 return 0;
00335 
00336         pt1.resize(n);
00337         pt2.resize(n);
00338         for( i = 0; i < n; i++ )
00339         {
00340                 pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
00341                 pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
00342         }
00343 
00344         _pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
00345         _pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
00346         if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
00347                 return 0;
00348 
00349         for( i = 0; i < 4; i++ )
00350         {
00351                 double x = src_corners[i].x, y = src_corners[i].y;
00352                 double Z = 1./(h[6]*x + h[7]*y + h[8]);
00353                 double X = (h[0]*x + h[1]*y + h[2])*Z;
00354                 double Y = (h[3]*x + h[4]*y + h[5])*Z;
00355                 dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
00356         }
00357 
00358         return 1;
00359 }
00360 
00361 
00362 /*
00363 ----------------------------------------------------------------------------------------------------------------------
00364                                                 Visual odometry
00365 ----------------------------------------------------------------------------------------------------------------------
00366 */
00367 
00368 
00369 void Error_Prop::imageCb_l(const sensor_msgs::ImageConstPtr& msg)
00370   {
00371         ROS_INFO("Received left image");
00372         sensor_msgs::CvBridge bridge_;
00373 
00374         try
00375         {
00376                 left_1 = bridge_.imgMsgToCv(msg, "bgr8");
00377         }
00378         catch (sensor_msgs::CvBridgeException error)
00379         {
00380                 ROS_ERROR("Error converting left image to IplImage");
00381         }
00382 
00383         //----------Image Capture-----------
00384 
00385         cvResize(left_1,left_);                                                 
00386         cvShowImage("Left", left_);
00387         left_gray = cvCreateImage(cvGetSize(left_), IPL_DEPTH_8U, 1);
00388         cvCvtColor(left_, left_gray, CV_BGR2GRAY );
00389 
00390   }
00391 
00392 
00393 void Error_Prop::imageCb_r(const sensor_msgs::ImageConstPtr& msg)
00394   {
00395 
00396         ROS_INFO("Received right image");
00397         sensor_msgs::CvBridge bridge_;
00398         try
00399         {
00400                 right_1 = bridge_.imgMsgToCv(msg, "bgr8");
00401         }
00402         catch (sensor_msgs::CvBridgeException error)
00403         {
00404                 ROS_ERROR("Error converting right image to IplImage");
00405         }
00406 
00407         //----------Image Capture-----------
00408 
00409         cvResize(right_1,right_);                               
00410         cvShowImage("Right", right_);
00411         right_gray = cvCreateImage(cvGetSize(right_), IPL_DEPTH_8U, 1);
00412         cvCvtColor(right_, right_gray, CV_BGR2GRAY );
00413     }
00414 
00415 
00416 void Error_Prop::run()
00417 {
00418     //---------Stereo Matching----------
00419     if ((START==FALSE)||(left_gray_ant!=left_gray))
00420     {
00421     t_stereo = (double)cvGetTickCount();
00422     VO_StereoMatching();
00423     t_stereo = (double)cvGetTickCount() - t_stereo;
00424     printf( "Extraction time STEREO = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00425 
00426     //-------Consecutive Matching-------
00427 
00428     t_stereo = (double)cvGetTickCount();
00429     VO_ConsecutiveMatching();
00430     t_stereo = (double)cvGetTickCount() - t_stereo;
00431     printf( "Extraction time CONSECUTIVE = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00432 
00433     //-----------Filtering-----------
00434 
00435     t_stereo = (double)cvGetTickCount();
00436     VO_matchFilter();                                   //Filtrado de los ptpairs
00437     VO_cloudFilter();                                   //Filtrado de la nube de todos los puntos 3D
00438     t_stereo = (double)cvGetTickCount() - t_stereo;
00439     printf( "Extraction time FILTER = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00440 
00441     //-------3D Reconstruction-------
00442 
00443     t_stereo = (double)cvGetTickCount();
00444     VO_matches_to_3D();                                 // cloud_a y cloud_b en 3D con todos los matches ecepto los filtrados hasta ahora
00445     t_stereo = (double)cvGetTickCount() - t_stereo;
00446     printf( "Extraction time 3D RECONSTRUCTION = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00447 
00448     //---------Get RT-----------------
00449 
00450     if (cloud_a.size()==cloud_b.size())
00451     {
00452             if (cloud_a.size()>6)
00453             {
00454                     t_stereo = (double)cvGetTickCount();
00455                     VO_get_RT_RANSAC();
00456                     t_stereo = (double)cvGetTickCount() - t_stereo;
00457                     printf( "Extraction time RT (RANSAC) = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00458             }
00459             else
00460             {
00461                     printf( "Not enough points..");
00462                     Rot_12.setIdentity();
00463                     Tras_12.setZero();
00464             }
00465     }
00466     else
00467     {
00468             printf( "Unbalanced clouds..");
00469             Rot_12.setIdentity();
00470             Tras_12.setZero();
00471     }
00472 
00473     //-----------Plot RT--------------
00474     t_stereo = (double)cvGetTickCount();
00475     VO_RT_to_world();
00476     t_stereo = (double)cvGetTickCount() - t_stereo;
00477     printf( "Extraction time RT2world = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00478 
00479     //---------Limpieza -----------
00480     t_stereo = (double)cvGetTickCount();
00481     VO_clean();
00482     t_stereo = (double)cvGetTickCount() - t_stereo;
00483     printf( "Extraction time CLEAN = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00484 
00485     START=TRUE;
00486 
00487     t_stereo = (double)cvGetTickCount() - t_stereo;
00488     printf( "ITERATION TIME = %gms\n", t_stereo/(cvGetTickFrequency()*1000.));
00489   }
00490 }
00491 
00492   void Error_Prop::init()
00493 {
00494 
00495         params= cvSURFParams(1000, 1); //Parametros del SURF
00496 
00497         pub_pcl1  = n_pcl1.advertise<pcl::PointCloud<pcl::PointXYZ> >("puntos1", 1000);
00498         pub_pcl2  = n_pcl2.advertise<pcl::PointCloud<pcl::PointXYZ> >("puntos2", 1000);
00499         pub_pos   = n_pos.advertise<pcl::PointCloud<pcl::PointXYZ> >("posicion", 1000);  //2
00500         pub_odom  = n_odom.advertise<pcl::PointCloud<pcl::PointXYZ> >("miodom", 1);  //
00501 
00502         pub_tgt   = n_tgt.advertise<pcl::PointCloud<pcl::PointXYZ> >("tgt", 6);
00503         pub_src   = n_src.advertise<pcl::PointCloud<pcl::PointXYZ> >("src", 6);  //
00504         pub_wpts1 = n_wpts1.advertise<pcl::PointCloud<pcl::PointXYZ> >("wpts1", 1000);
00505         pub_cpts1 = n_cpts1.advertise<pcl::PointCloud<pcl::PointXYZ> >("cpts1", 1000);
00506 
00507         pub_src_RT = n_src_RT.advertise<pcl::PointCloud<pcl::PointXYZ> >("srcRT", 6);
00508 
00509         pub_od = n_od.advertise<nav_msgs::Odometry>("od", 50);
00510 
00511         pub_uvl = n_pcl1.advertise<pcl::PointCloud<pcl::PointXYZ> >("uvl", 1000);
00512         pub_uvr = n_pcl1.advertise<pcl::PointCloud<pcl::PointXYZ> >("uvr", 1000);
00513 
00514 
00515         cloud_a.header.frame_id="nube1_(i)";
00516         cloud_ant.header.frame_id="nube1_ (i-1)";
00517         cloud_b.header.frame_id="nube2";
00518         cloud_pos.header.frame_id="posicion";
00519         wpts1.header.frame_id="world relativo";
00520         cpts1.header.frame_id="camera relativo";
00521 
00522 
00523 
00524         wpts1RT.header.frame_id="world rel RT";
00525 //------------------------------------------------------------------------
00526         cloud_w.header.frame_id="src";
00527         cloud_t.header.frame_id="tgt";
00528 
00529         cloud_wRT.header.frame_id="tgt";
00530         cloud_wRT.width  = 6;
00531         cloud_wRT.height = 1;
00532         cloud_wRT.points.resize (cloud_w.width * cloud_w.height);
00533 
00534 
00535         cloud_w.width  = 6;
00536         cloud_w.height = 1;
00537         cloud_w.points.resize (cloud_w.width * cloud_w.height);
00538 
00539         cloud_t.width  = 6;
00540         cloud_t.height = 1;
00541         cloud_t.points.resize (cloud_t.width * cloud_t.height);
00542 //------------------------------------------------------------------------
00543 
00544         cloud_a.width  = 2000;
00545         cloud_a.height = 1;
00546         cloud_a.points.resize (cloud_a.width * cloud_a.height);
00547 
00548         cloud_b.width  = 2000;
00549         cloud_b.height = 1;
00550         cloud_b.points.resize (cloud_b.width * cloud_b.height);
00551 
00552         cloud_pos.width  = 2000;  //2
00553         cloud_pos.height = 1;     //1
00554         cloud_pos.points.resize (cloud_b.width * cloud_b.height);
00555 
00556         cloud_odom.header.frame_id="miodom";
00557         cloud_odom.width = 1;
00558         cloud_odom.height = 1;
00559         cloud_odom.points.resize (cloud_odom.width * cloud_odom.height);
00560 
00561         //cloud_odom.points[0].x=0;
00562         //cloud_odom.points[0].y=0;
00563         //cloud_odom.points[0].z=0;
00564 
00565         ROS_INFO("Inicializando variables");
00566 
00567         //Image Size (width X height)
00568         ncol=cvGetSize(left_).width;
00569         nrow=cvGetSize(left_).height;
00570 
00571         numRandPoints=100;
00572 
00573         //Extrinsic Parameters
00574 
00575         //om[0]=0.04046;  om[1]=0.01616;  om[2]=0.01419;
00576         om[0]=-0.03770;  om[1]=-0.01934;  om[2]=-0.01324;
00577         //T[0]=-0.38571991;  T[1]=-0.00724661;  T[2]=0.00011057;
00578         T[0]=0.30828733;  T[1]=0.00093104;  T[2]=0.00260338;
00579 
00580 
00581         tx=T[0];ty=T[1];tz=T[2];
00582 
00583         //Intrinsic Parameters
00584 
00585 
00586         //Focal length
00587 
00588         alfa_l[0]=1753.52020;  alfa_l[1]=1753.89203;
00589         alfa_r[0]=1760.90953;  alfa_r[1]=1758.60452;
00590 
00591         alfa_U_l=alfa_l[0];   alfa_V_l=alfa_l[1];
00592         alfa_U_r=alfa_r[0];   alfa_V_r=alfa_r[1];
00593 
00594         alfa[0]=alfa_U_l;alfa[1]=alfa_V_l;alfa[2]=alfa_U_r;alfa[3]=alfa_V_r;
00595 
00596         //Principal Point
00597 
00598         uol=578.92621;
00599         vol=511.56295;
00600         uor=618.17344;
00601         vor=504.21283;
00602 
00603 
00604         //Rotation Matrix
00605 
00606         float sx,cx,sy,cy,sz,cz;
00607         sx=sin(om[0]);
00608         cx=cos(om[0]);
00609         sy=sin(om[1]);
00610         cy=cos(om[1]);
00611         sz=sin(om[2]);
00612         cz=cos(om[2]);
00613 
00614         R_ << cy*cz,sx*sy*cz-cx*sz, cz*sy*cx+sx*sz,cy*sz, sx*sy*sz+cx*cz, cx*sy*sz-sx*cz,-sy,sx*cy,cx*cy;
00615 
00616         H_l2r_ << R_(0,0),R_(0,1),R_(0,2),T[0],R_(1,0),R_(1,1),R_(1,2),T[1],R_(2,0),R_(2,1),R_(2,2),T[2],0,0,0,1;
00617 
00618   /*  World coord. frame  == robot frame at first pose
00619       Robot Frame: x=forwards; y=horizontal(left); z=vertical(upwards)
00620       Transformation between camera and robot coord. frames
00621       Hcam2rob  = Ry(-pi)*Rx(pi/2)*Ry(-pi/2)
00622       Ry -> Rotation around Y (pitch). Homogeneous matrix for a rotation about Y axis.                    */
00623 
00624         Hcam2rob_ << 0,0,1,0,-1,0,0,0,0,-1,0,0,0,0,0,1;
00625 
00626         odom_1.setZero();
00627         odom_2.setZero();
00628 
00629 }
00630 
00631 void Error_Prop::VO_ViewStereo(){
00632     //---------------------------- Construccion de la imagen para ver los match-----------------
00633 
00634             IplImage* correspond1 = cvCreateImage( cvSize(right_gray->width+left_gray->width, left_gray->height), 8, 1 );
00635 
00636             cvSetImageROI( correspond1, cvRect( 0, 0, left_gray->width, left_gray->height ) );
00637             cvCopy( left_gray, correspond1 );
00638             cvSetImageROI( correspond1, cvRect( left_gray->width, 0, correspond1->width, correspond1->height ) );
00639             cvCopy( right_gray, correspond1 );
00640             cvResetImageROI( correspond1 );
00641 
00642             for( int i = 0; i < (int)ptpairs1.size(); i += 2 )
00643             {
00644                     CvSURFPoint* r11 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs1[i] );
00645                     CvSURFPoint* r21 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints1, ptpairs1[i+1] );
00646 
00647                     cvLine( correspond1, cvPointFrom32f(r11->pt), cvPoint(cvRound(r21->pt.x+left_gray->width), cvRound(r21->pt.y)), colors[8] );
00648             }
00649 
00650             cvShowImage( "Stereo matches", correspond1 );
00651 }
00652 
00653 void Error_Prop::VO_StereoMatching(){
00654 
00655     if (START==TRUE)
00656     {
00657         //---------------clear new and vertical ptpairs----------------
00658         ptpairs1.clear();
00659         ptpairs3.clear();
00660 
00661         //----------clear old keypoints and descriptors---------------
00662         cvClearSeq(leftKeypoints2);
00663         cvClearSeq(rightKeypoints2);
00664         cvClearSeq(leftDescriptors2);
00665         cvClearSeq(rightDescriptors2);
00666 
00667         //-----copy current keypoints and descriptors as olds----------
00668         leftKeypoints2 = cvCloneSeq(leftKeypoints1,consecutive_Storage);
00669         leftDescriptors2 = cvCloneSeq(leftDescriptors1,consecutive_Storage);
00670         rightKeypoints2 = cvCloneSeq(rightKeypoints1,consecutive_Storage);              //Estas se pueden eliminar solo tenemos que mantener la left vieja
00671         rightDescriptors2 = cvCloneSeq(rightDescriptors1,consecutive_Storage);
00672 
00673         //----------copy current image as old image--------------------
00674         left_gray_ant=left_gray;
00675 
00676         //---------------------Update old ptpairs----------------------
00677         ptpairs2=ptpairs1_resp; //Todos los ptpairs sin filtrar
00678         ptpairs2_resp=ptpairs1_resp;
00679 
00680         //--------clear memory for new keypoints and descriptors-------
00681         cvClearSeq(leftKeypoints1);
00682         cvClearSeq(rightKeypoints1);
00683         cvClearSeq(leftDescriptors1);
00684         cvClearSeq(rightDescriptors1);
00685 
00686         ROS_INFO("cargadas!");
00687     }
00688 
00689  //---------------------Extraction KeyPoints & Descriptors-----------------------------------
00690 
00691         cvExtractSURF( left_gray, 0, &leftKeypoints1, &leftDescriptors1, storage1_pair, params );
00692         cvExtractSURF( right_gray, 0, &rightKeypoints1, &rightDescriptors1, storage1_pair, params );
00693         printf("Left Keypoints: %d | Right Keypoints: %d\n", leftDescriptors1->total, rightDescriptors1->total);
00694 
00695  //-----------------------------------Matching---------------------------------------------
00696 
00697         #ifdef USE_FLANN
00698                 printf("Using approximate nearest neighbor search\n");
00699                 flannFindPairs( leftKeypoints1, leftDescriptors1, rightKeypoints1, rightDescriptors1, ptpairs1 );
00700         #else
00701                 findPairs( leftKeypoints1, leftDescriptors1, rightKeypoints1, rightDescriptors1, ptpairs1);
00702         #endif
00703 
00704         printf(" STEREO (1): Number of matches SIN FILTRADO INICIAL: %d\n",(int)ptpairs1.size()/2);
00705 
00706 
00707         for( int i = 0; i < (int)ptpairs1.size(); i += 2 )
00708         {
00709                 CvSURFPoint* r11 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs1[i] );
00710                 CvSURFPoint* r21 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints1, ptpairs1[i+1] );
00711 
00712                 if ( (cvPointFrom32f(r11->pt).y >= cvPointFrom32f(r21->pt).y) && (cvPointFrom32f(r11->pt).x >= cvPointFrom32f(r21->pt).x) /*&&                                       ((cvPointFrom32f(r11->pt).y - cvPointFrom32f(r21->pt).y) < 40) && ((cvPointFrom32f(r11->pt).y - cvPointFrom32f(r21->pt).y) > 0)*/ )
00713                 {
00714                     //cvLine( correspond1, cvPointFrom32f(r11->pt), cvPoint(cvRound(r21->pt.x+left_gray->width), cvRound(r21->pt.y)), colors[8] );
00715                     //printf("Borrando puntos..");
00716                 }
00717                 else
00718                 {
00719                         //elminamos el match entre los dos puntos
00720                         ptpairs1.erase(ptpairs1.begin()+i);
00721                         ptpairs1.erase(ptpairs1.begin()+i);
00722                         i-=2;
00723                 }
00724 
00725         }
00726 
00727 
00728         printf(" STEREO (2): Number of matches PRIMER FILTRO: %d\n",(int)ptpairs1.size()/2);
00729 
00730         ptpairs1_resp=ptpairs1;  // guardamos los pares para la proxima vez
00731 
00732         Error_Prop::VO_ViewStereo();
00733 
00734 
00735     }
00736 
00737 void Error_Prop::VO_ViewConsecutive(){
00738 
00739     IplImage* correspond4 = cvCreateImage( cvSize(left_gray_ant->width, left_gray->height+left_gray->height), 8, 1 );
00740     cvSetImageROI( correspond4, cvRect( 0, 0, left_gray->width, left_gray->height ) );
00741     cvCopy( left_gray, correspond4 );
00742     cvSetImageROI( correspond4, cvRect( 0,left_gray->height, left_gray->width, left_gray_ant->height+left_gray->height ) );
00743     cvCopy( left_gray_ant, correspond4 );
00744     cvResetImageROI( correspond4 );
00745 
00746     for( int i = 0; i < (int)ptpairs3.size(); i += 2 )
00747     {
00748             CvSURFPoint* r13 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs3[i] );
00749             CvSURFPoint* r23 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints2, ptpairs3[i+1] );
00750 
00751             cvLine( correspond4, cvPointFrom32f(r13->pt), cvPoint(cvPointFrom32f(r23->pt).x,cvPointFrom32f(r23->pt).y + left_gray->height), colors[0] );
00752     }
00753 
00754     cvShowImage( "Vertical matches", correspond4 );
00755 }
00756 
00757 
00758 void Error_Prop::VO_ViewAll(){
00759 
00760     //---------------------------- Construccion de la imagen para ver los match-----------------
00761 
00762     IplImage* correspond5 = cvCreateImage( cvSize(right_gray->width+left_gray->width, left_gray->height+left_gray->height), 8, 1 );
00763 
00764     cvSetImageROI( correspond5, cvRect( 0, 0, left_gray->width, left_gray->height ) );
00765     cvCopy( left_gray, correspond5 );
00766     cvSetImageROI( correspond5, cvRect( left_gray->width, 0, correspond5->width, left_gray->height ) );
00767     cvCopy( right_gray, correspond5 );
00768     cvSetImageROI( correspond5, cvRect( 0,left_gray->height, left_gray->width, correspond5->height ) );
00769     cvCopy( left_gray_ant, correspond5 );
00770     cvSetImageROI( correspond5, cvRect( left_gray->width,left_gray->height, correspond5->width, correspond5->height ) );
00771     cvCopy( left_gray_ant, correspond5 );
00772 
00773     cvResetImageROI( correspond5 );
00774 
00775     for( int i = 0; i < (int)ptpairs1.size(); i += 2 )
00776     {
00777             CvSURFPoint* r11 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs1[i] );
00778             CvSURFPoint* r21 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints1, ptpairs1[i+1] );
00779 
00780             cvLine( correspond5, cvPointFrom32f(r11->pt), cvPoint(cvRound(r21->pt.x+left_gray->width), cvRound(r21->pt.y)), colors[8] );
00781     }
00782 
00783    /* for( int i = 0; i < (int)ptpairs2.size(); i += 2 )
00784     {
00785             CvSURFPoint* r11 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints2, ptpairs2[i] );
00786             CvSURFPoint* r21 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints2, ptpairs2[i+1] );
00787 
00788             cvLine( correspond5, cvPoint( cvRound(r11->pt.x), cvRound(r11->pt.y+left_gray->height)) ,cvPoint(cvRound(r21->pt.x + left_gray->width), cvRound(r21->pt.y + left_gray->height)) , colors[8] );
00789     }*/
00790 
00791     for( int i = 0; i < (int)ptpairs3.size(); i += 2 )
00792     {
00793             CvSURFPoint* r13 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs3[i] );
00794             CvSURFPoint* r23 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints2, ptpairs3[i+1] );
00795 
00796             cvLine( correspond5, cvPointFrom32f(r13->pt), cvPoint(cvPointFrom32f(r23->pt).x,cvPointFrom32f(r23->pt).y + left_gray->height), colors[0] );
00797     }
00798 
00799     cvShowImage( "Show all", correspond5 );
00800 }
00801 
00802 
00803 void Error_Prop::VO_ConsecutiveMatching(){
00804 
00805     if (START==FALSE)
00806     {   // Solo para que se ejecute la primera vez
00807             leftKeypoints2 = cvCloneSeq(leftKeypoints1,consecutive_Storage);
00808             leftDescriptors2 = cvCloneSeq(leftDescriptors1,consecutive_Storage);
00809             rightKeypoints2 = cvCloneSeq(rightKeypoints1,consecutive_Storage);        //Posiblemente no haya que guardar estos
00810             rightDescriptors2 = cvCloneSeq(rightDescriptors1,consecutive_Storage);
00811             left_gray_ant=left_gray;
00812             ptpairs2=ptpairs1_resp;
00813             ptpairs2_resp=ptpairs2;
00814             ROS_INFO("clonadas!");
00815     }
00816 
00817         printf(" CONSECUTIVE (0): Number of matches ANTES EMPEZAR: %d\n",(int)ptpairs3.size()/2);
00818 
00819  //-----------------------------------Matching---------------------------------------------
00820         #ifdef USE_FLANN
00821                 flannFindPairs( leftKeypoints1, leftDescriptors1,leftKeypoints2, leftDescriptors2, ptpairs3 );
00822         #else
00823                 findPairs( leftKeypoints1, leftDescriptors1,leftKeypoints2, leftDescriptors2, ptpairs3);
00824         #endif
00825 
00826         Error_Prop::VO_ViewConsecutive();
00827 
00828 }
00829 
00830 
00831 void Error_Prop::VO_matches_to_3D(){
00832 
00833 //      printf(" 3D RECONSTRUCTION (0): starting\n");
00834 
00835         //Compute 3D points with randomly genereated noice in both images
00836         float Ul,Vl,Ur,Vr;
00837 
00838         Matrix<float,4,3> An;
00839         Matrix<float,4,1> Bn;
00840         Matrix<float,3,1> xyzn;
00841 
00842         Matrix<float,3,3> Cam2Rob;
00843 
00844         Cam2Rob.setZero();
00845         Cam2Rob(0,2)=1; Cam2Rob(1,0)=-1; Cam2Rob(2,2)=-1;
00846 
00847         xyzn.setZero();
00848         int cuenta = 0;
00849         int j=0;
00850 
00851         cloud_a.width   = (int)ptpairs1.size()/2;
00852         cloud_a.height  = 1;
00853         cloud_a.points.resize (cloud_a.width * cloud_a.height);
00854         cloud_b.width  = cloud_a.width;
00855         cloud_b.height  = 1;
00856         cloud_b.points.resize (cloud_b.width * cloud_b.height);
00857 
00858 
00859         int k=0;
00860         for (uint i=0; i<ptpairs1.size(); i+=2)
00861         {
00862                 /* @nota Check if is any ROS package that has the 3D reconstruccion already implemented to change this section */
00863 
00864                 //Extraemos las coordenadas de los key points en L y R
00865                 CvSURFPoint* r11 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints1, ptpairs1[i] );
00866                 CvSURFPoint* r21 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints1, ptpairs1[i+1] );
00867                 Ul = cvPointFrom32f(r11->pt).x;
00868                 Vl = cvPointFrom32f(r11->pt).y;
00869                 Ur = cvPointFrom32f(r21->pt).x;
00870                 Vr = cvPointFrom32f(r21->pt).y;
00871                 j++;
00872 
00873                 An.setZero();
00874                 An(0,0)= -alfa_l[0]; An(0,2)= Ul-uol;
00875                 An(1,1)= -alfa_l[1]; An(1,2)= Vl-vol;
00876                 An.row(2)= ( (Ur-uor)*R_.row(2) ) - ( alfa_r[0]*R_.row(0) );
00877                 An.row(3)= ( (Vr-vor)*R_.row(2) ) - ( alfa_r[1]*R_.row(1) );
00878                 Bn.setZero();
00879                 Bn(2,0) = ((uor-Ur)*tz + alfa_r[0]*tx);
00880                 Bn(3,0) = ((vor-Vr)*tz + alfa_r[1]*ty);
00881 
00882                 //xyzn.setZero();
00883                 xyzn=(((An.transpose()*An).inverse())*An.transpose())*Bn;
00884 
00885 
00886 
00887                 cloud_a.points[k].x=xyzn(2,0);
00888                 cloud_a.points[k].y=(-1)*xyzn(0,0);
00889                 cloud_a.points[k].z=(-1)*xyzn(1,0);
00890 
00891                 k++;
00892            }
00893 
00894         k=0;
00895         for (uint i=0; i<ptpairs2.size(); i+=2)
00896         {
00897                 /* @nota Check if is any ROS package that has the 3D reconstruccion already implemented to change this section */
00898 
00899                 //Extraemos las coordenadas de los key points en L y R
00900                 CvSURFPoint* r12 = (CvSURFPoint*)cvGetSeqElem( leftKeypoints2, ptpairs2[i] );
00901                 CvSURFPoint* r22 = (CvSURFPoint*)cvGetSeqElem( rightKeypoints2, ptpairs2[i+1] );
00902                 Ul = cvPointFrom32f(r12->pt).x;
00903                 Vl = cvPointFrom32f(r12->pt).y;
00904                 Ur = cvPointFrom32f(r22->pt).x;
00905                 Vr = cvPointFrom32f(r22->pt).y;
00906                 j++;
00907 
00908                 An.setZero();
00909                 An(0,0)= -alfa_l[0]; An(0,2)= Ul-uol;
00910                 An(1,1)= -alfa_l[1]; An(1,2)= Vl-vol;
00911                 An.row(2)= ( (Ur-uor)*R_.row(2) ) - ( alfa_r[0]*R_.row(0) );
00912                 An.row(3)= ( (Vr-vor)*R_.row(2) ) - ( alfa_r[1]*R_.row(1) );
00913                 Bn.setZero();
00914                 Bn(2,0) = ((uor-Ur)*tz + alfa_r[0]*tx);
00915                 Bn(3,0) = ((vor-Vr)*tz + alfa_r[1]*ty);
00916 
00917                 //xyzn.setZero();
00918                 xyzn=(((An.transpose()*An).inverse())*An.transpose())*Bn;
00919 
00920                 cloud_b.points[k].x=xyzn(2,0);
00921                 cloud_b.points[k].y=(-1)*xyzn(0,0);
00922                 cloud_b.points[k].z=(-1)*xyzn(1,0);
00923 
00924                 k++;
00925            }
00926 
00927 
00928         //Publicar para ver todos los puntos sin filtrar
00929         pub_pcl1.publish(cloud_a);
00930         pub_pcl2.publish(cloud_b);
00931 
00932         printf(" 3D RECONSTRUCTION (1): tamaño cloud_a %i cloud_b %i \n",(int)cloud_a.size(), (int)cloud_b.size());
00933  }
00934 
00935 void Error_Prop::VO_3D_to_matches(){
00936 
00937 //      printf(" 3D RECONSTRUCTION (0): starting\n");
00938 
00939         Matrix<float,3,4> An;
00940 
00941         Matrix<float,3,1> UVl,UVr;
00942         Matrix<float,4,1> xyz;
00943 
00944 
00945         cloud_c   = cloud_a;
00946 
00947         UV_l.width  = cloud_a.width;
00948         UV_l.height  = 1;
00949         UV_l.points.resize (UV_l.width * UV_l.height);
00950         UV_r.width  = cloud_a.width;
00951         UV_r.height  = 1;
00952         UV_r.points.resize (UV_r.width * UV_r.height);
00953 
00954         for (uint i=0; i<cloud_a.size(); i++)
00955         {
00956 
00957                 xyz(0) = cloud_c.points[i].x;
00958                 xyz(1) = cloud_c.points[i].y;
00959                 xyz(2) = cloud_c.points[i].z;
00960                 xyz(3) = 1;
00961 
00962                 An.setZero();
00963                 An(0,0)= -alfa_l[0]; An(0,2)= uol;
00964                 An(1,1)= -alfa_l[1]; An(1,2)= vol;
00965                 An(2,2)=1;
00966 
00967                 UVl= An*xyz;
00968 
00969                 An.setZero();
00970                 An(0,0)= -alfa_r[0]; An(0,2)= uor;
00971                 An(1,1)= -alfa_r[1]; An(1,2)= vor;
00972                 An(2,2)=1;
00973 
00974                 UVr = An*H_l2r_*xyz;
00975 
00976                 UV_l.points[i].x=UVl(0);
00977                 UV_l.points[i].y=UVl(1);
00978                 UV_l.points[i].z=1;
00979                 UV_r.points[i].x=UVr(0);
00980                 UV_r.points[i].y=UVr(1);
00981                 UV_r.points[i].z=1;
00982            }
00983         pub_uvl.publish(UV_l);
00984         pub_uvr.publish(UV_r);
00985 
00986  }
00987 
00988 
00989 void Error_Prop::VO_matchFilter(){
00990 
00991         //--------Filtrado de matches, quedan solo los que sirven (de las 4 imagenes)-------
00992 
00993         printf(" MATCH FILTER (0): Pares en 1 %i Pares en 2 %i Pares verticales %i \n",(int)ptpairs1.size()/2,(int)ptpairs2.size()/2,(int)ptpairs3.size()/2);
00994 
00995         int limp=0;
00996 
00997         // 1.- Filtramos los matches verticales con los nuevos horizontales (3 con 1)
00998 
00999         for (uint i=0;i<ptpairs3.size();i+=2){
01000                 for(uint j=0;j<ptpairs1.size();j+=2)
01001                 {
01002                         if ( (ptpairs3[i]==ptpairs1[j]))
01003                         {
01004                                 limp=1;                                 //los que estan en 1 y 3
01005                         }
01006                         else if ( ptpairs3[i]<=ptpairs1[j])
01007                         {
01008                                 j=ptpairs1.size();
01009                         }
01010                 }
01011                 if(limp==0)
01012                 {                                                               //el match horizontal no tiene match vertical
01013                         ptpairs3.erase(ptpairs3.begin()+i);
01014                         ptpairs3.erase(ptpairs3.begin()+i);
01015                         i-=2;
01016                 }
01017                 limp=0;
01018         }
01019 
01020         //Error_Prop::VO_ViewStereo();
01021         //Error_Prop::VO_ViewConsecutive();
01022 
01023         printf(" MATCH FILTER (1): Pares en 1 %i Pares en 2 %i Pares verticales %i \n",(int)ptpairs1.size()/2,(int)ptpairs2.size()/2,(int)ptpairs3.size()/2);
01024 
01025 
01026         // 2.- Filtramos los matches verticales con los viejos horizontales (3 con 2)
01027 
01028         limp=0;
01029         for (uint i=0;i<ptpairs3.size();i+=2){
01030                 for(uint j=0;j<ptpairs2.size();j+=2)
01031                 {                       //se puede mejorar la eficiencia de esto
01032                         if ( (ptpairs3[i+1]==ptpairs2[j]))
01033                         {
01034                                 limp=1;                                 //los que estan en 1 y 3
01035                         }
01036                 }
01037                 if(limp==0)
01038                 {                                                               //el match horizontal no tiene match vertical
01039                         ptpairs3.erase(ptpairs3.begin()+i);
01040                         ptpairs3.erase(ptpairs3.begin()+i);
01041                         i-=2;
01042                 }
01043                 limp=0;
01044         }
01045 
01046         printf(" MATCH FILTER (2): Pares en 1 %i Pares en 2 %i Pares verticales %i \n",(int)ptpairs1.size()/2,(int)ptpairs2.size()/2,(int)ptpairs3.size()/2);
01047 
01048         // 3.- Filtramos los matches horizontales presentes en el vertical (1 con 3)
01049 
01050         limp=0;
01051 
01052         for (int i=0;i<(int)ptpairs1.size();i+=2){
01053                 if ( (ptpairs1[i]!=ptpairs3[i])){
01054           //            printf("eliminando..\n");
01055                         ptpairs1.erase(ptpairs1.begin()+i);
01056                         ptpairs1.erase(ptpairs1.begin()+i);
01057                         i-=2;
01058                 }
01059         }
01060 
01061         printf(" MATCH FILTER (3): Pares en 1 %i Pares en 2 %i Pares verticales %i \n",(int)ptpairs1.size()/2,(int)ptpairs2.size()/2,(int)ptpairs3.size()/2);
01062 
01063         // 4.- Filtramos los matches horizontales viejos presentes en el vertical (2 con 3)
01064 
01065         vector<int> lista;
01066         lista.clear();
01067 
01068         for (uint i=0;i<ptpairs3.size();i+=2){
01069                 for(uint j=0;j<ptpairs2.size();j+=2)
01070                 {                       //se puede mejorar la eficiencia de esto
01071                         if ( (ptpairs3[i+1]==ptpairs2[j]))
01072                         {
01073                             lista.push_back(ptpairs2[j]);
01074                             lista.push_back(ptpairs2[j+1]);
01075                         }
01076                 }
01077         }
01078 
01079         ptpairs2.clear();
01080         ptpairs2=lista;
01081 
01082         printf(" MATCH FILTER (4): Pares en 1 %i Pares en 2 %i Pares verticales %i \n",(int)ptpairs1.size()/2,(int)ptpairs2.size()/2,(int)ptpairs3.size()/2);
01083 
01084         //Error_Prop::VO_ViewAll();
01085     }
01086 
01087 
01088 void Error_Prop::VO_cloudFilter(){
01089 
01090 //      printf(" CLOUD FILTER (0): Size cloud_wpts %i cloud_cpts %i \n",(int)wpts1.size(),(int)cpts1.size());
01091         //El tamaño de las nubes deben ser iguales
01092 
01093         if (START==TRUE)
01094         {
01095                 cpts_ant.width  = cpts1.size();
01096                 cpts_ant.height = 1;
01097                 cpts_ant.points.resize (cpts_ant.width * cpts_ant.height);
01098                 pcl::copyPointCloud(cpts1,cpts_ant);
01099         }
01100 
01101         wpts1.width  = ptpairs2.size()/2;
01102         wpts1.height = 1;
01103         wpts1.points.resize (wpts1.width * wpts1.height);
01104 
01105         wpts1RT.width  = ptpairs2.size()/2;
01106         wpts1RT.height = 1;
01107         wpts1RT.points.resize (wpts1RT.width * wpts1RT.height);
01108 
01109         cpts1.width  = ptpairs1.size()/2;
01110         cpts1.height = 1;
01111         cpts1.points.resize (cpts1.width * cpts1.height);
01112 
01113         indices_a.clear();
01114         indices_b.clear();
01115 
01116         indices_a.resize(ptpairs1.size()/2);
01117         indices_b.resize(ptpairs2.size()/2);
01118 
01119         int limp=0;
01120 
01121         for (uint i=0;i<ptpairs1.size();i+=2)
01122         {
01123                 for (uint j=0;j<ptpairs1_resp.size();j+=2)
01124                 {
01125                         if (ptpairs1[i]==ptpairs1_resp[j])
01126                         {
01127                                 indices_a[i/2]=j/2;
01128                                 limp=1;
01129                         }
01130                 }
01131                 if (limp==0)
01132                 {
01133                         cout << "NO ENCONTRO EL MATCH!!!!!! \n" <<endl;
01134                 }
01135                 limp=0;
01136 
01137         }
01138 
01139         for (uint i=0;i<ptpairs2.size();i+=2)
01140         {
01141                 for (uint j=0;j<ptpairs2_resp.size();j+=2)
01142                 {
01143                         if (ptpairs2[i]==ptpairs2_resp[j])
01144                         {
01145                                 indices_b[i/2]=j/2;
01146                                 limp=1;
01147                         }
01148                 }
01149                 if (limp==0)
01150                 {
01151                         cout << "NO ENCONTRO EL MATCH!!!!!! \n" <<endl;
01152                 }
01153                 limp=0;
01154         }
01155 
01156         //Nube en el frame de la camara (puntos i)
01157         pcl::copyPointCloud(cloud_a,indices_a,cpts1);
01158         //Nube en el frame del mundo (puntos i-1)
01159         pcl::copyPointCloud(cloud_b,indices_b,wpts1);
01160 
01161 
01162 //      printf("size: cloud_a %i cloud_b %i cpts1 %i wpts1 %i ",(int)cloud_a.size(),(int)cloud_b.size(),(int)cpts1.size(),(int)wpts1.size());
01163 
01164         if (START==FALSE)
01165         {
01166                 pcl::copyPointCloud(cpts1,cpts_ant);
01167         }
01168 
01169         pub_cpts1.publish(cpts1); //pub_pcl1.publish(cpts1);            //Publicamos los puntos actuales
01170         pub_wpts1.publish(wpts1); //pub_pcl2.publish(cpts_ant);         //Publicamos los puntos anteriores (los que fueron publicados antes)
01171 
01172 //      printf(" CLOUD FILTER (0): Size cloud_wpts %i cloud_cpts %i \n",(int)wpts1.size(),(int)cpts1.size());
01173 
01174 }
01175 
01176 
01177 void Error_Prop::VO_get_RT_RANSAC()
01178 {
01179         int num;
01180         float err,errorant,errRT,errRTo;
01181         Eigen::Vector4f centroid_wpts, centroid_cpts,centroid_new,centroid_nubet,centroid_nubew,centroid_wpts1RT;
01182 
01183         srand (time(NULL));
01184 
01185         err=1000;
01186         errorant=1000;
01187 
01188         Rot_12.setIdentity();
01189         Tras_12.setZero();
01190 
01191         //calculo de los centroides originales
01192 
01193         printf(" RANSAC (0): Size cloud a %i Size cloud b %i \n",(int)cloud_a.size(),(int)cloud_b.size());
01194 
01195         pcl::compute3DCentroid (cloud_b, centroid_wpts);
01196         pcl::compute3DCentroid (cloud_a, centroid_cpts);   //cloud_a -> cpts1
01197 
01198         pub_cpts1.publish(cloud_a);
01199         pub_wpts1.publish(cloud_b);
01200 
01201         vector<int> ind;
01202 
01203         Eigen::Matrix4f transf;
01204 
01205         transformation.setIdentity();
01206 
01207         for (int j=0; j<150 ;j++)               // Numero de veces que repetimos este ciclo nos dice cuanto vamos a buscar la mejor R y T
01208         {
01209                 ind.clear();
01210                 for (int i=0; i<6; i++)
01211                 {       //Seleccion de los puntos aleatorios de la nube para el calculo de la R y T
01212                         num=rand() % cloud_b.size();
01213                         ind.push_back(num);
01214                         if(ind.size()>6)
01215                             cout << "INDICES > 6!!!!!!!!!!!!!"<< endl;
01216                 }
01217 
01218                 pcl::copyPointCloud(cloud_a,ind,cloud_t);
01219                 pcl::copyPointCloud(cloud_b,ind,cloud_w);
01220                 pcl::copyPointCloud(cloud_w,cloud_wRT);
01221 
01222                 transf.setIdentity();
01223 
01224                 // Estimate the centroids of source, target
01225                 Eigen::Vector4f centroid_src, centroid_tgt;
01226                 pcl::compute3DCentroid (cloud_w, centroid_src);
01227                 pcl::compute3DCentroid (cloud_t, centroid_tgt);
01228 
01229                 // Subtract the centroids from source, target
01230                 Eigen::MatrixXf cloud_src_demean;
01231                 Eigen::MatrixXf cloud_tgt_demean;
01232                 pcl::demeanPointCloud (cloud_w, centroid_src, cloud_src_demean);  //cloud_w
01233                 pcl::demeanPointCloud (cloud_t, centroid_tgt, cloud_tgt_demean);  //cloud_t
01234 
01235                 // Assemble the correlation matrix H = source * target'
01236                 Eigen::Matrix3f H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner<3, 3>();
01237 
01238                 // Compute the Singular Value Decomposition
01239                 Eigen::JacobiSVD<Eigen::Matrix3f> svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV);  //puede ser thin
01240                 Eigen::Matrix3f u = svd.matrixU ();
01241                 Eigen::Matrix3f v = svd.matrixV ();
01242 
01243                 // Compute R = V * U'
01244                 Eigen::Matrix3f R;
01245 
01246                 if (u.determinant () * v.determinant () < 0)
01247                 {
01248                         //printf(" RANSAC (1): inv RANSAC\n");
01249                         for (int x = 0; x < 3; ++x)
01250                         {
01251                                 v (x, 2) *= -1;
01252                         }
01253                 }
01254 
01255 
01256                 R = v * u.transpose ();
01257 
01258                 transf.setIdentity();
01259                 transf.topLeftCorner<3, 3> () = R;
01260 
01261                 // Traslacion
01262                 Eigen::Vector3f R_src = R * centroid_src.head<3> ();
01263                 Eigen::Vector3f T = centroid_tgt.head<3> () - R_src;
01264                 transf.block<3,1>(0,3) = T;
01265 
01266                 //transformamos la nube w con la R y T calculadas
01267                 pcl::transformPointCloud(cloud_w,cloud_wRT,transf);
01268 
01269 
01270                 // transformation matrix evaluation
01271                 err=0;
01272                 for (int i=0; i< (int)cloud_t.size(); i++)
01273                 {
01274                         Eigen::Vector3f err_pto = cloud_t.points[i].getVector3fMap() - cloud_wRT.points[i].getVector3fMap() ;
01275                         err += (pow(err_pto(0),2)+pow(err_pto(1),2)+pow(err_pto(2),2));
01276                 }
01277 
01278                 if( (err<errorant)/*&&(err<2)*/ )
01279                 {
01280                         Rot_12  = R;
01281                         Tras_12 = T;
01282                         transformation=transf;
01283                         errorant=err;
01284                 }
01285 
01286 
01287         }
01288 
01289         cout << "best transf \n" << transformation << endl;
01290 
01291         //Utilizamos la mejor rotacion y traslacion calculada
01292         pcl::transformPointCloud(cloud_b,wpts1RT,transformation);               //rotacion y traslacion de la nube de puntos wpts1 (world frame)
01293         pcl::compute3DCentroid (wpts1RT, centroid_wpts1RT);             //calculo de la media
01294 
01295         vector<int> copiar;
01296         copiar.clear();
01297 
01298         // cloud filter, the points with a error bigger than the threshold are deleted
01299         float thres = 1;
01300         err=0;
01301         for (int i=0; i< (int)cloud_b.size(); i++)
01302         {
01303                 Eigen::Vector3f err_pto = cloud_a.points[i].getVector3fMap() - wpts1RT.points[i].getVector3fMap() ;
01304                 err = (pow(err_pto(0),2)+pow(err_pto(1),2)+pow(err_pto(2),2));
01305                 if( err<thres )
01306                 {
01307                         //los que tienen mucho error no lo copiamos
01308                         copiar.push_back(i);
01309                 }
01310         }
01311 
01312         pcl::copyPointCloud(cloud_b,copiar,wpts1RANSAC);
01313         pcl::copyPointCloud(cloud_a,copiar,cpts1RANSAC);
01314 
01315         cout << " size without outliers " << wpts1RANSAC.size() << " and " << cpts1RANSAC.size() << endl;
01316 
01317         // At this point we have the two point clouds without outliers
01318         if (wpts1RANSAC.size() >= 6)
01319         {
01320                 //--------------------------Calculo de la nueva R y T con los puntos que pasaron el RANSAC-------------------------------
01321 
01322                 // Estimate the centroids of source, target
01323                 Eigen::Vector4f centroid_src_RANSAC, centroid_tgt_RANSAC;
01324                 pcl::compute3DCentroid (wpts1RANSAC, centroid_src_RANSAC);
01325                 pcl::compute3DCentroid (cpts1RANSAC, centroid_tgt_RANSAC);
01326 
01327                 // Subtract the centroids from source, target
01328                 Eigen::MatrixXf cloud_src_demean_RANSAC;
01329                 Eigen::MatrixXf cloud_tgt_demean_RANSAC;
01330                 pcl::demeanPointCloud (wpts1RANSAC, centroid_src_RANSAC, cloud_src_demean_RANSAC);  //cloud_w
01331                 pcl::demeanPointCloud (cpts1RANSAC, centroid_tgt_RANSAC, cloud_tgt_demean_RANSAC);  //cloud_t
01332 
01333                 // Assemble the correlation matrix H = source * target'
01334                 Eigen::Matrix3f H_RANSAC = (cloud_src_demean_RANSAC * cloud_tgt_demean_RANSAC.transpose ()).topLeftCorner<3, 3>();
01335 
01336                 // Compute the Singular Value Decomposition
01337                 Eigen::JacobiSVD<Eigen::Matrix3f> svd_RANSAC (H_RANSAC, Eigen::ComputeFullU | Eigen::ComputeFullV);  //puede ser thin
01338                 Eigen::Matrix3f u_RANSAC = svd_RANSAC.matrixU ();
01339                 Eigen::Matrix3f v_RANSAC = svd_RANSAC .matrixV ();
01340 
01341                 // Compute R = V * U'
01342                 Eigen::Matrix3f R_RANSAC;
01343 
01344                 if (u_RANSAC.determinant () * v_RANSAC.determinant () < 0)
01345                 {
01346                         printf(" ============================== RANSAC (1): inv \n");
01347                         for (int x = 0; x < 3; ++x)
01348                         {
01349                                 v_RANSAC (x, 2) *= -1;
01350                         }
01351                 }
01352 
01353                 //Rotacion
01354                 R_RANSAC = v_RANSAC * u_RANSAC.transpose ();
01355 
01356                 // Traslacion
01357                 Eigen::Vector3f Rc_RANSAC = R_RANSAC * centroid_src_RANSAC.head<3> ();
01358                 Eigen::Vector3f T_RANSAC = centroid_tgt_RANSAC.head<3> () - Rc_RANSAC;
01359 
01360                 Rot_12  = R_RANSAC;
01361                 Tras_12 = T_RANSAC;
01362 
01363                 transformation.setIdentity();
01364                 transformation.topLeftCorner<3, 3> () = R_RANSAC;
01365                 transformation.block<3,1>(0,3) = T_RANSAC;
01366 
01367                 pub_src.publish(wpts1RANSAC);                                   //inliers de ransac antes de trasladar y rotar
01368                 pub_src_RT.publish(wpts1RT);                                    //publicacion de la nube rotada y trasladada solo con los inliers
01369 
01370                 cout << "def transf \n" << transformation << endl;
01371         }
01372         else
01373         {
01374                 ROS_INFO("NO HAY PUNTOS SUFICIENTES CON ESTE THRESHOLD DE ERROR PARA EL RANSAC");
01375                 Rot_12.setIdentity();
01376                 Tras_12.setZero();
01377                 transformation.setIdentity();
01378         }
01379 
01380 
01381 
01382 }
01383 
01384 
01385 void Error_Prop::VO_RT_to_world(){
01386 
01387             Matrix<float,3,3> R_cam2rob;
01388             R_cam2rob.setZero();
01389             R_cam2rob(0,2) = 1; R_cam2rob(1,0) = -1; R_cam2rob(2,1) = -1;
01390             //R_cam2rob(0,2) = -1; R_cam2rob(1,2) = 1; R_cam2rob(2,0) = -1;  //Da la inclinada hacia arriba
01391 
01392             current_time = ros::Time::now();
01393 
01394             //since all odometry is 6DOF we'll need a quaternion created from yaw
01395             Matrix<float,3,1> ang;
01396 
01397             //Matrix<float,3,3> Rota;
01398             //Rota = transformation.topLeftCorner<3, 3> ();
01399             //Rota = Rota*R_cam2rob;
01400 
01401             geometry_msgs::Quaternion quat_msg;
01402 
01403             if (START==FALSE)
01404             {
01405                 Rot_i.setIdentity();
01406                 Tras_i.setZero();
01407 
01408                 ypr(Rot_i,ang);
01409                 tf::Quaternion quat = tf::createQuaternionFromRPY(ang(2,0),ang(1,0),ang(0,0)); //0 2 1
01410                 //Rot_i = Rot_i*R_cam2rob;
01411 
01412                     //trans_ = Vector4d(0,0,0,1);
01413                     //fq.coeffs() = Vector4d(0,0,0,1);
01414 
01415 
01416             //ypr(transformation.topLeftCorner<3, 3> (),ang);
01417 
01418             //Tras_12 = R_cam2rob * Tras_12;
01419 
01420             //if (START==FALSE)
01421             //{
01422     //ODOMETRY
01423                 odom3D.position.x=Tras_i(0,0);
01424                 odom3D.position.y=Tras_i(1,0);
01425                 odom3D.position.z=Tras_i(2,0);
01426                 tf::quaternionTFToMsg(quat, quat_msg);
01427                 odom3D.orientation= quat_msg;
01428 
01429                 quat_ant = quat;
01430 
01431             }
01432             else
01433             {
01434                 //Rot_12 = R_cam2rob * Rot_12;
01435                 //Tras_12 = R_cam2rob * Tras_12;
01436 
01437 
01438                 Tras_i = Rot_i * Tras_12 + Tras_i;
01439                 cout << Tras_i<< endl;
01440                 Rot_i = Rot_i * Rot_12;
01441 
01442                 ypr(Rot_i,ang);
01443                 tf::Quaternion quat = tf::createQuaternionFromRPY(ang(2,0),ang(1,0),ang(0,0)); //0 2 1
01444 
01445                 quat = quat * quat_ant;
01446                 tf::quaternionTFToMsg(quat, quat_msg);
01447 
01448 
01449      /*               if (transformation(0,3)>0.5)
01450                         odom3D.position.x+=0.5;
01451                     else if (transformation(0,3)<-0.5)
01452                         odom3D.position.x+=-0.5;
01453                     else */
01454 
01455                 odom3D.position.x=Tras_i(0);//transformation(0,3);
01456     /*
01457                     if (transformation(1,3)>0.5)
01458                         odom3D.position.y+=0.5;
01459                     else if (transformation(1,3)<-0.5)
01460                         odom3D.position.y+=-0.5;
01461                     else */
01462 
01463                 odom3D.position.y=Tras_i(1);//transformation(1,3);
01464     /*
01465                     if(transformation(2,3)>0.5)
01466                         odom3D.position.z+=0.5;
01467                     else if (transformation(2,3)<-0.5)
01468                         odom3D.position.z+=-0.5;
01469                     else */
01470 
01471                odom3D.position.z=0;//Tras_i(2);//transformation(2,3)
01472                odom3D.orientation=quat_msg;
01473 
01474                pcl::copyPointCloud(cloud_odom,cloud_odom2);
01475 
01476                quat_ant=quat;
01477 
01478             }
01479 
01480             //we'll publish the odometry message over ROS
01481 
01482             nav_msgs::Odometry od;
01483             od.header.stamp = current_time;
01484             od.header.frame_id = "od";
01485 
01486             //set the position
01487             od.pose.pose.position.x = odom3D.position.x;
01488             od.pose.pose.position.y = odom3D.position.y;
01489             od.pose.pose.position.z = odom3D.position.z;
01490             od.pose.pose.orientation = odom3D.orientation;
01491 
01492             //publish the message
01493             pub_od.publish(od);
01494 
01495 
01496             cout << "transf \n" << transformation << endl;
01497             cout << "\n odom msg \n" << odom3D << endl;
01498 
01499             cloud_odom.points[0].x = odom3D.position.x;
01500             cloud_odom.points[0].y = odom3D.position.y;
01501             cloud_odom.points[0].z = odom3D.position.z;
01502 
01503             pub_odom.publish(cloud_odom);
01504 
01505  }
01506 
01507 void Error_Prop::VO_clean()
01508 {
01509         //----------clear memory for consecutive storage--------------
01510         cvClearMemStorage(consecutive_Storage);
01511 
01512 }
01513 
01514 
01515 void EnviromentConfiguration(){
01516 
01517     cv::namedWindow("Left");
01518     cv::namedWindow("Right");
01519     cv::namedWindow("Stereo matches");
01520     cv::namedWindow("Vertical matches");
01521     cv::namedWindow("Show all");
01522     cvStartWindowThread();
01523     ROS_INFO("running VO environment configuration..");
01524 
01525 }
01526 
01527 /*void viewImage(const Mat& Image,vector<KeyPoint>& Keypoints,Mat& outImage)
01528 {
01529     drawKeypoints(Image,Keypoints,outImage);
01530     namedWindow(Name,1);
01531     imshow(Name, Image);
01532     namedWindow(kp,1);
01533     imshow(kp,outImage);
01534     waitKey();
01535 }*/
01536 
01537 
01538 /*-----------------------------------------------------------------------------------------------------------
01539                                                   Main
01540 -----------------------------------------------------------------------------------------------------------*/
01541 
01542 
01543 int main(int argc, char** argv)
01544 {
01545         ros::init(argc, argv, "error_prop");
01546 
01547         Error_Prop Error;
01548 
01549         Error.init();
01550         EnviromentConfiguration();
01551 
01552 //--------------------------Calculo a tiempo real-----------------------------
01553 
01554         ros::Rate loop_rate(1);   //1 Hz
01555         loop_rate.sleep();
01556         loop_rate.sleep();
01557 
01558         while(TRUE)
01559         {
01560                 ROS_INFO("while");
01561                 ros::spinOnce();
01562 
01563                 loop_rate.sleep();
01564                 Error.run();
01565     
01566                 int wait = cvWaitKey(10) & 255;
01567                 if( wait == 27 ) break;                 //wait for escape
01568       }
01569 
01570 
01571         return 0;
01572 }


iri_visual_odometry
Author(s): Jose Capriles
autogenerated on Fri Dec 6 2013 21:04:25