tracking.cpp
Go to the documentation of this file.
00001 #include "ros/ros.h"
00002 #include "cv.h"
00003 #include "highgui.h"
00004 #include "tracking.h"
00005 #include <visp/vpImageConvert.h>
00006 #include <visp/vpPixelMeterConversion.h>
00007 #include <visp/vpImagePoint.h>
00008 #include <visp/vpDisplayX.h>
00009 #include <visp/vpPose.h>
00010 #include <visp/vpMeterPixelConversion.h>
00011 #include <visp/vpTrackingException.h>
00012 #include <visp/vpImageIo.h>
00013 #include <visp/vpRect.h>
00014 #include <visp/vpMbKltTracker.h>
00015 #include <visp/vpMbEdgeTracker.h>
00016 
00017 #include "logfilewriter.hpp"
00018 
00019 namespace tracking{
00020 
00021 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)
00022   Tracker_:: Tracker_(CmdLine& cmd, detectors::DetectorBase* detector,vpMbTracker* tracker,bool flush_display) :
00023 #else
00024   Tracker_:: Tracker_(CmdLine& cmd, vpDetectorBase* detector,vpMbTracker* tracker,bool flush_display) :
00025 #endif
00026   cmd(cmd),
00027       iter_(0),
00028       flashcode_center_(640/2,480/2),
00029       detector_(detector),
00030       tracker_(tracker),
00031       flush_display_(flush_display){
00032     std::cout << "starting tracker" << std::endl;
00033     cvTrackingBox_init_ = false;
00034     cvTrackingBox_.x = 0;
00035     cvTrackingBox_.y = 0;
00036     cvTrackingBox_.width = 0;
00037     cvTrackingBox_.height = 0;
00038 
00039     // Retrieve camera parameters comming from camera_info message in order to update them after loadConfigFile()
00040     tracker_->getCameraParameters(cam_); // init camera parameters
00041 
00042     points3D_inner_ = cmd.get_inner_points_3D();
00043     points3D_outer_ = cmd.get_outer_points_3D();
00044     outer_points_3D_bcp_ = cmd.get_outer_points_3D();
00045     if(cmd.using_adhoc_recovery() || cmd.log_checkpoints()){
00046       for(unsigned int i=0;i<points3D_outer_.size();i++){
00047         vpPoint p;
00048         p.setWorldCoordinates(
00049                   (points3D_outer_[i].get_oX()+points3D_inner_[i].get_oX())*cmd.get_adhoc_recovery_ratio(),
00050                   (points3D_outer_[i].get_oY()+points3D_inner_[i].get_oY())*cmd.get_adhoc_recovery_ratio(),
00051                   (points3D_outer_[i].get_oZ()+points3D_inner_[i].get_oZ())*cmd.get_adhoc_recovery_ratio()
00052                 );
00053         points3D_middle_.push_back(p);
00054       }
00055     }
00056     f_ = cmd.get_flashcode_points_3D();
00057 
00058     if(cmd.using_var_file()){
00059       varfile_.open(cmd.get_var_file().c_str(),std::ios::out);
00060       varfile_ << "#These are variances and other data from the model based tracker in gnuplot format" << std::endl;
00061       if(cmd.using_hinkley())
00062         varfile_ << "iteration\tvar_x\var_y\tvar_z\tvar_wx\tvar_wy\var_wz";
00063       if(cmd.using_mbt_dynamic_range())
00064         varfile_ << "\tmbt_range";
00065       if(cmd.log_pose())
00066         varfile_ << "\tpose_tx\tpose_ty\tpose_tz\tpose_rx\tpose_ry\tpose_rz";
00067       if(cmd.log_checkpoints())
00068         varfile_ << "\tcheckpoint_median";
00069 
00070       varfile_ << std::endl;
00071     }
00072 
00073     if(cmd.using_hinkley()){
00074       if(cmd.get_verbose())
00075         std::cout << "Initialising hinkley with alpha=" << cmd.get_hinkley_alpha() << " and delta=" << cmd.get_hinkley_delta() << std::endl;
00076       for(hinkley_array_t::iterator i = hink_.begin();i!=hink_.end();i++)
00077         i->init(cmd.get_hinkley_alpha(),cmd.get_hinkley_delta());
00078     }
00079 
00080     if(cmd.using_mbt_dynamic_range()){
00081       vpMbEdgeTracker *tracker_me = dynamic_cast<vpMbEdgeTracker*>(tracker_);
00082       if(tracker_me)
00083         tracker_me->getMovingEdge(tracker_me_config_);
00084       else
00085         std::cout << "error: could not init moving edges on tracker that doesn't support them." << std::endl;
00086     }
00087 
00088     tracker_->loadConfigFile(cmd.get_xml_file() ); // Load the configuration of the tracker
00089     tracker_->loadModel(cmd.get_mbt_cad_file()); // load the 3d model, to read .wrl model the 3d party library coin is required, if coin is not installed .cao file can be used.
00090     tracker_->setCameraParameters(cam_); // Set the good camera parameters coming from camera_info message
00091   }
00092 
00093 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)
00094   detectors::DetectorBase& Tracker_:: get_detector(){
00095 #else
00096   vpDetectorBase& Tracker_:: get_detector(){
00097 #endif
00098     return *detector_;
00099   }
00100 
00101   vpMbTracker& Tracker_:: get_mbt(){
00102     return *tracker_;
00103   }
00104 
00105   std::vector<vpPoint>& Tracker_:: get_points3D_inner(){
00106     return points3D_inner_;
00107   }
00108 
00109   std::vector<vpPoint>& Tracker_:: get_points3D_outer(){
00110     return points3D_outer_;
00111 
00112   }
00113 
00114   std::vector<vpPoint>& Tracker_:: get_points3D_middle(){
00115     return points3D_middle_;
00116   }
00117 
00118   std::vector<vpPoint>& Tracker_:: get_flashcode(){
00119     return f_;
00120   }
00121 
00122   vpImage<vpRGBa>& Tracker_:: get_I(){
00123     return *I_;
00124   }
00125 
00126   vpCameraParameters& Tracker_:: get_cam(){
00127     return cam_;
00128   }
00129 
00130   CmdLine& Tracker_:: get_cmd(){
00131     return cmd;
00132   }
00133 
00134   template<>
00135   const cv::Rect& Tracker_:: get_tracking_box<cv::Rect>(){
00136     return cvTrackingBox_;
00137   }
00138 
00139   template<>
00140   const vpRect& Tracker_:: get_tracking_box<vpRect>(){
00141     return vpTrackingBox_;
00142   }
00143 
00144   bool Tracker_:: input_selected(input_ready const& evt){
00145     return vpDisplay::getClick(evt.I,false);
00146   }
00147 
00148 
00149   bool Tracker_:: no_input_selected(input_ready const& evt){
00150     return !input_selected(evt);
00151   }
00152 
00153   bool Tracker_:: flashcode_detected(input_ready const& evt){
00154 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)
00155     //this->cam_ = evt.cam_;
00156 
00157     cv::Mat rgba = cv::Mat((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC4, (void*)evt.I.bitmap);
00158 
00159     cv::Mat bgr = cv::Mat((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC3);
00160     cv::Mat alpha((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC1);
00161 
00162     cv::Mat out[] = {bgr, alpha};
00163     // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
00164     // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
00165     int from_to[] = { 0,2,  1,1,  2,0,  3,3 };
00166     cv::mixChannels(&rgba, 1, out, 2, from_to, 4);
00167 
00168     //vpImageConvert::convert(evt.I,bgr);
00169 
00170     return detector_->detect(bgr,cmd.get_dmx_timeout(),0,0);
00171 #else
00172     vpImageConvert::convert(evt.I, Igray_);
00173     detector_->detect(Igray_);
00174     if (detector_->getNbObjects()) {
00175       if (cmd.get_code_message().empty()) {
00176         cmd.set_code_message_index(0); // we retain the largest code at index 0
00177         return true;
00178       }
00179       else {
00180         for (size_t i=0; i<detector_->getNbObjects(); i++) {
00181           if (detector_->getMessage(i) == cmd.get_code_message()) {
00182             cmd.set_code_message_index(i);
00183             ROS_WARN_STREAM("Code with message \"" << cmd.get_code_message() << "\" found");
00184             return true;
00185           }
00186         }
00187         ROS_WARN_STREAM("Code with message \"" << cmd.get_code_message() << "\" not found");
00188         return false;
00189       }
00190     }
00191     return false;
00192 #endif
00193   }
00194 
00195   /*
00196    * Detect flashcode in region delimited by the outer points of the model
00197    * The timeout is the default timeout times the surface ratio
00198    */
00199   bool Tracker_:: flashcode_redetected(input_ready const& evt){
00200 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)
00201     //this->cam_ = evt.cam_;
00202 
00203     //vpImageConvert::convert(evt.I,cvI);
00204     cv::Mat rgba = cv::Mat((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC4, (void*)evt.I.bitmap);
00205 
00206     cv::Mat bgr = cv::Mat((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC3);
00207     cv::Mat alpha((int)evt.I.getRows(), (int)evt.I.getCols(), CV_8UC1);
00208 
00209     cv::Mat out[] = {bgr, alpha};
00210     // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
00211     // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
00212     int from_to[] = { 0,2,  1,1,  2,0,  3,3 };
00213     cv::mixChannels(&rgba, 1, out, 2, from_to, 4);
00214 
00215     if (cvTrackingBox_init_)
00216     {
00217       cv::Mat subImage = cv::Mat(bgr,get_tracking_box<cv::Rect>()).clone();
00218 
00219       double timeout = cmd.get_dmx_timeout()*(double)(get_tracking_box<cv::Rect>().width*get_tracking_box<cv::Rect>().height)/(double)(bgr.cols*bgr.rows);
00220       return detector_->detect(subImage,(unsigned int)timeout,get_tracking_box<cv::Rect>().x,get_tracking_box<cv::Rect>().y);
00221     }
00222     else
00223     {
00224       return detector_->detect(bgr,cmd.get_dmx_timeout(),0,0);
00225     }
00226 #else
00227     // TODO, use boundig box as for ViSP < 2.10.0
00228     vpImageConvert::convert(evt.I, Igray_);
00229     detector_->detect(Igray_);
00230     if (detector_->getNbObjects()) {
00231       if (cmd.get_code_message().empty()) {
00232         cmd.set_code_message_index(0); // we retain the largest code at index 0
00233         return true;
00234       }
00235       else {
00236         for (size_t i=0; i<detector_->getNbObjects(); i++) {
00237           if (detector_->getMessage(i) == cmd.get_code_message()) {
00238             cmd.set_code_message_index(i);
00239             ROS_WARN_STREAM("Code with message \"" << cmd.get_code_message() << "\" found");
00240             return true;
00241           }
00242         }
00243         ROS_WARN_STREAM("Code with message \"" << cmd.get_code_message() << "\" not found");
00244         return false;
00245       }
00246     }
00247     return false;
00248 #endif
00249   }
00250 
00251   void Tracker_:: find_flashcode_pos(input_ready const& evt){
00252     this->cam_ = evt.cam_;
00253 
00254 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)
00255     std::vector<cv::Point> polygon = detector_->get_polygon();
00256     double centerX = (double)(polygon[0].x+polygon[1].x+polygon[2].x+polygon[3].x)/4.;
00257     double centerY = (double)(polygon[0].y+polygon[1].y+polygon[2].y+polygon[3].y)/4.;
00258     vpPixelMeterConversion::convertPoint(cam_, flashcode_center_, centerX, centerY);
00259 
00260     for(unsigned int i=0;i<f_.size();i++){
00261       double x=0, y=0;
00262       vpImagePoint poly_pt(polygon[i].y,polygon[i].x);
00263 
00264       vpPixelMeterConversion::convertPoint(cam_, poly_pt, x, y);
00265       f_[i].set_x(x);
00266       f_[i].set_y(y);
00267     }
00268 #else
00269     // TODO: add a parameter to be able to select the QRcode from it's message
00270     // For the moment we get the position of the first code that is the largest in the image
00271     std::vector< std::vector< vpImagePoint > > polygons = detector_->getPolygon();
00272     std::vector< vpImagePoint > polygon(4);
00273     if (polygons.size())
00274       polygon = polygons[0];
00275 
00276     // TODO: remove flashcode_center_, centerX, centerY that are not used
00277     //double centerX = cog.get_u();
00278     //double centerY = cog.get_v();
00279     //vpPixelMeterConversion::convertPoint(cam_, flashcode_center_, centerX, centerY);
00280 
00281     for(unsigned int i=0;i<f_.size();i++){
00282       double x=0, y=0;
00283 
00284       vpPixelMeterConversion::convertPoint(cam_, polygon[i], x, y);
00285       f_[i].set_x(x);
00286       f_[i].set_y(y);
00287     }
00288 #endif
00289 
00290     I_ = _I = &(evt.I);
00291   }
00292 
00293 
00294   bool Tracker_:: model_detected(msm::front::none const&){
00295     vpImageConvert::convert(*I_,Igray_);
00296     vpPose pose;
00297 
00298     for(unsigned int i=0;i<f_.size();i++)
00299       pose.addPoint(f_[i]);
00300 
00301     try {
00302       vpHomogeneousMatrix cMo_dem;
00303       vpHomogeneousMatrix cMo_lag;
00304       pose.computePose(vpPose::DEMENTHON, cMo_dem);
00305       pose.computePose(vpPose::LAGRANGE, cMo_lag);
00306       double residual_dem = pose.computeResidual(cMo_dem);
00307       double residual_lag = pose.computeResidual(cMo_lag);
00308       if (residual_dem < residual_lag)
00309         cMo_ = cMo_dem;
00310       else
00311         cMo_ = cMo_lag;
00312       pose.computePose(vpPose::VIRTUAL_VS,cMo_);
00313       //vpDisplay::displayFrame(*I_,cMo_,cam_,0.01,vpColor::none,2);
00314     }
00315     catch(vpException& e) {
00316       std::cout << "Pose computation failed: " << e.getStringMessage() << std::endl;
00317       return false;
00318     }
00319 
00320     std::vector<vpImagePoint> model_inner_corner(4);
00321     std::vector<vpImagePoint> model_outer_corner(4);
00322     for(unsigned int i=0;i<4;i++){
00323       points3D_outer_[i].project(cMo_);
00324       points3D_inner_[i].project(cMo_);
00325       if(cmd.using_adhoc_recovery() || cmd.log_checkpoints())
00326         points3D_middle_[i].project(cMo_);
00327       vpMeterPixelConversion::convertPoint(cam_,points3D_outer_[i].get_x(),points3D_outer_[i].get_y(),model_outer_corner[i]);
00328       vpMeterPixelConversion::convertPoint(cam_,points3D_inner_[i].get_x(),points3D_inner_[i].get_y(),model_inner_corner[i]);
00329 
00330       if(cmd.get_verbose()){
00331         std::cout << "model inner corner: (" << model_inner_corner[i].get_i() << "," << model_inner_corner[i].get_j() << ")" << std::endl;
00332       }
00333     }
00334 
00335     try{
00336       tracker_->resetTracker();
00337       tracker_->loadConfigFile(cmd.get_xml_file() );
00338       tracker_->loadModel(cmd.get_mbt_cad_file()); // load the 3d model, to read .wrl model the 3d party library coin is required, if coin is not installed .cao file can be used.
00339       tracker_->setCameraParameters(cam_);
00340       {
00341           vpCameraParameters cam;
00342           tracker_->getCameraParameters(cam);
00343           if (cam.get_px() != 558) ROS_INFO_STREAM("detection Camera parameters: \n" << cam_);
00344       }
00345 
00346       tracker_->initFromPose(Igray_,cMo_);
00347 
00348       tracker_->track(Igray_); // track the object on this image
00349       tracker_->getPose(cMo_); // get the pose
00350       tracker_->setCovarianceComputation(true);
00351       for(int i=0;i<cmd.get_mbt_convergence_steps();i++){
00352         tracker_->track(Igray_); // track the object on this image
00353         tracker_->getPose(cMo_); // get the pose
00354       }
00355     }catch(vpException& e){
00356       std::cout << "Tracking failed" << std::endl;
00357       std::cout << e.getStringMessage() << std::endl;
00358       return false;
00359     }
00360     return true;
00361   }
00362 
00363   bool Tracker_:: mbt_success(input_ready const& evt){
00364     iter_ = evt.frame;
00365     this->cam_ = evt.cam_;
00366 
00367     try{
00368       LogFileWriter writer(varfile_); //the destructor of this class will act as a finally statement
00369       vpImageConvert::convert(evt.I,Igray_);
00370 
00371       tracker_->track(Igray_); // track the object on this image
00372       tracker_->getPose(cMo_);
00373       covariance_ = tracker_->getCovarianceMatrix();
00374       if(cmd.using_var_file()){
00375         writer.write(iter_);
00376         for(unsigned int i=0;i<covariance_.getRows();i++)
00377           writer.write(covariance_[i][i]);
00378       }
00379       if(cmd.using_var_limit())
00380         for(unsigned int i=0; i<6; i++)
00381           if(covariance_[i][i]>cmd.get_var_limit())
00382             return false;
00383       if(cmd.using_hinkley())
00384         for(unsigned int i=0; i<6; i++){
00385           if(hink_[i].testDownUpwardJump(covariance_[i][i]) != vpHinkley::noJump){
00386             writer.write(covariance_[i][i]);
00387             if(cmd.get_verbose())
00388               std::cout << "Hinkley:detected jump!" << std::endl;
00389             return false;
00390           }
00391         }
00392       if(cmd.using_var_file() && cmd.using_mbt_dynamic_range())
00393         writer.write(tracker_me_config_.getRange());
00394 
00395       for(unsigned int i=0;i<covariance_.getRows();i++) {
00396         statistics.var(covariance_[i][i]);
00397       }
00398 
00399       if(covariance_.getRows() == 6){ //if the covariance matrix is set
00400         statistics.var_x(covariance_[0][0]);
00401         statistics.var_y(covariance_[1][1]);
00402         statistics.var_z(covariance_[2][2]);
00403         statistics.var_wx(covariance_[3][3]);
00404         statistics.var_wy(covariance_[4][4]);
00405         statistics.var_wz(covariance_[5][5]);
00406       }
00407 
00408       if(cmd.using_var_file() && cmd.log_pose()){
00409         vpPoseVector p(cMo_);
00410         for(unsigned int i=0;i<p.getRows();i++)
00411           writer.write(p[i]);
00412       }
00413 
00414       if(cmd.using_adhoc_recovery() || cmd.log_checkpoints()){
00415         for(unsigned int p=0;p<points3D_middle_.size();p++){
00416           vpPoint& point3D = points3D_middle_[p];
00417 
00418           double _u=0.,_v=0.,_u_inner=0.,_v_inner=0.;
00419           point3D.project(cMo_);
00420           vpMeterPixelConversion::convertPoint(cam_,point3D.get_x(),point3D.get_y(),_u,_v);
00421           vpMeterPixelConversion::convertPoint(cam_,points3D_inner_[p].get_x(),points3D_inner_[p].get_y(),_u_inner,_v_inner);
00422 
00423           boost::accumulators::accumulator_set<
00424                   unsigned char,
00425                   boost::accumulators::stats<
00426                     boost::accumulators::tag::median(boost::accumulators::with_p_square_quantile)
00427                   >
00428                 > acc;
00429 
00430           int region_width= std::max((int)(std::abs(_u-_u_inner)*cmd.get_adhoc_recovery_size()),1);
00431           int region_height=std::max((int)(std::abs(_v-_v_inner)*cmd.get_adhoc_recovery_size()),1);
00432           int u=(int)_u;
00433           int v=(int)_v;
00434           for(int i=std::max(u-region_width,0);
00435               i<std::min(u+region_width,(int)evt.I.getWidth());
00436               i++){
00437             for(int j=std::max(v-region_height,0);
00438                 j<std::min(v+region_height,(int)evt.I.getHeight());
00439                 j++){
00440               acc(Igray_[j][i]);
00441               statistics.checkpoints(Igray_[j][i]);
00442             }
00443           }
00444           double checkpoints_median = boost::accumulators::median(acc);
00445           if(cmd.using_var_file() && cmd.log_checkpoints())
00446             writer.write(checkpoints_median);
00447           if( cmd.using_adhoc_recovery() && (unsigned int)checkpoints_median>cmd.get_adhoc_recovery_treshold() )
00448             return false;
00449         }
00450       }
00451     }catch(vpException& e){
00452       std::cout << "Tracking lost" << std::endl;
00453       return false;
00454     }
00455     return true;
00456   }
00457 
00458   void Tracker_:: track_model(input_ready const& evt){
00459     this->cam_ = evt.cam_;
00460 
00461     std::vector<cv::Point> points;
00462     I_ = _I = &(evt.I);
00463     vpImageConvert::convert(evt.I,Igray_);
00464 
00465     boost::accumulators::accumulator_set<
00466                       double,
00467                       boost::accumulators::stats<
00468                         boost::accumulators::tag::mean
00469                       >
00470                     > acc;
00471 
00472     for(unsigned int i=0;i<points3D_outer_.size();i++){
00473       points3D_outer_[i].project(cMo_);
00474       points3D_inner_[i].project(cMo_);
00475 
00476       double u=0.,v=0.,u_inner=0.,v_inner=0;
00477       vpMeterPixelConversion::convertPoint(cam_,points3D_outer_[i].get_x(),points3D_outer_[i].get_y(),u,v);
00478       vpMeterPixelConversion::convertPoint(cam_,points3D_inner_[i].get_x(),points3D_inner_[i].get_y(),u_inner,v_inner);
00479 
00480       acc(std::abs(u-u_inner));
00481       acc(std::abs(v-v_inner));
00482 
00483       // To avoid OpenCV exception that may occur when creating cv::boundingRect() from the points,
00484       // we ensure that the coordinates of the points remain in the image.
00485       u = std::max(u,0.);
00486       u = std::min(u,(double)evt.I.getWidth()-1);
00487       v = std::max(v,0.);
00488       v = std::min(v,(double)evt.I.getHeight()-1);
00489       points.push_back(cv::Point(u,v));
00490     }
00491 
00492     if(cmd.using_mbt_dynamic_range()){
00493       int range = (const unsigned int)(boost::accumulators::mean(acc)*cmd.get_mbt_dynamic_range());
00494 
00495       vpMbEdgeTracker *tracker_me = dynamic_cast<vpMbEdgeTracker*>(tracker_);
00496       if(tracker_me){
00497         tracker_me->getMovingEdge(tracker_me_config_);
00498         tracker_me_config_.setRange(range);
00499         tracker_me->setMovingEdge(tracker_me_config_);
00500       }else
00501         std::cout << "error: could not init moving edges on tracker that doesn't support them." << std::endl;
00502     }
00503     cvTrackingBox_init_ = true;
00504     cvTrackingBox_ = cv::boundingRect(cv::Mat(points));
00505 
00506     vpTrackingBox_.setRect(cvTrackingBox_.x,cvTrackingBox_.y,cvTrackingBox_.width,cvTrackingBox_.height);
00507   }
00508 
00509   Tracker_::statistics_t& Tracker_:: get_statistics(){
00510     return statistics;
00511   }
00512 
00513   void Tracker_:: set_flush_display(bool val){
00514     flush_display_ = val;
00515   }
00516 
00517   bool Tracker_:: get_flush_display(){
00518     return flush_display_;
00519   }
00520 
00521   void
00522   Tracker_::updateMovingEdgeSites(visp_tracker::MovingEdgeSitesPtr sites)
00523   {
00524     if (!sites)
00525       return;
00526 
00527     std::list<vpMbtDistanceLine*> linesList;
00528 
00529     if(cmd.get_tracker_type() != CmdLine::KLT) // For mbt and hybrid
00530       dynamic_cast<vpMbEdgeTracker*>(tracker_)->getLline(linesList, 0);
00531 
00532     std::list<vpMbtDistanceLine*>::iterator linesIterator = linesList.begin();
00533     if (linesList.empty())
00534       ROS_DEBUG_THROTTLE(10, "no distance lines");
00535     bool noVisibleLine = true;
00536     for (; linesIterator != linesList.end(); ++linesIterator)
00537     {
00538       vpMbtDistanceLine* line = *linesIterator;
00539 
00540       if (line && line->isVisible() && line->meline)
00541       {
00542         if (line->meline->list.empty()) {
00543           ROS_DEBUG_THROTTLE(10, "no moving edge for a line");
00544         }
00545 
00546         std::list<vpMeSite>::const_iterator sitesIterator = line->meline->list.begin();
00547 
00548         for (; sitesIterator != line->meline->list.end(); ++sitesIterator)
00549         {
00550           visp_tracker::MovingEdgeSite movingEdgeSite;
00551           movingEdgeSite.x = sitesIterator->ifloat;
00552           movingEdgeSite.y = sitesIterator->jfloat;
00553           movingEdgeSite.suppress = sitesIterator->suppress;
00554           sites->moving_edge_sites.push_back (movingEdgeSite);
00555         }
00556         noVisibleLine = false;
00557 
00558       }
00559     }
00560     if (noVisibleLine)
00561       ROS_DEBUG_THROTTLE(10, "no distance lines");
00562   }
00563 
00564   void
00565   Tracker_::updateKltPoints(visp_tracker::KltPointsPtr klt)
00566   {
00567     if (!klt)
00568       return;
00569 
00570 #if VISP_VERSION_INT < VP_VERSION_INT(2,10,0)// ViSP < 2.10.0
00571     vpMbHiddenFaces<vpMbtKltPolygon> *poly_lst;
00572     std::map<int, vpImagePoint> *map_klt;
00573 
00574     if(cmd.get_tracker_type() != CmdLine::MBT) // For klt and hybrid
00575       poly_lst = &dynamic_cast<vpMbKltTracker*>(tracker_)->getFaces();
00576 
00577     for(unsigned int i = 0 ; i < poly_lst->size() ; i++)
00578     {
00579       if((*poly_lst)[i])
00580       {
00581         map_klt = &((*poly_lst)[i]->getCurrentPoints());
00582 
00583         if(map_klt->size() > 3)
00584         {
00585           for (std::map<int, vpImagePoint>::iterator it=map_klt->begin(); it!=map_klt->end(); ++it)
00586           {
00587             visp_tracker::KltPoint kltPoint;
00588             kltPoint.id = it->first;
00589             kltPoint.i = it->second.get_i();
00590             kltPoint.j = it->second.get_j();
00591             klt->klt_points_positions.push_back (kltPoint);
00592           }
00593         }
00594       }
00595     }
00596 #else // ViSP >= 2.10.0
00597     std::list<vpMbtDistanceKltPoints*> *poly_lst;
00598     std::map<int, vpImagePoint> *map_klt;
00599 
00600     if(cmd.get_tracker_type() != CmdLine::MBT) { // For klt and hybrid
00601       poly_lst = &dynamic_cast<vpMbKltTracker*>(tracker_)->getFeaturesKlt();
00602 
00603       for(std::list<vpMbtDistanceKltPoints*>::const_iterator it=poly_lst->begin(); it!=poly_lst->end(); ++it){
00604         map_klt = &((*it)->getCurrentPoints());
00605 
00606         if((*it)->polygon->isVisible()){
00607         if(map_klt->size() > 3)
00608           {
00609             for (std::map<int, vpImagePoint>::iterator it=map_klt->begin(); it!=map_klt->end(); ++it)
00610             {
00611               visp_tracker::KltPoint kltPoint;
00612               kltPoint.id = it->first;
00613               kltPoint.i = it->second.get_i();
00614               kltPoint.j = it->second.get_j();
00615               klt->klt_points_positions.push_back (kltPoint);
00616             }
00617           }
00618         }
00619       }
00620     }
00621 #endif
00622   }
00623 }
00624 


visp_auto_tracker
Author(s): Filip Novotny
autogenerated on Fri Aug 28 2015 13:36:44