PointFeatureTracker.cpp
Go to the documentation of this file.
00001 #include "feature_tracker/PointFeatureTracker.h"
00002 
00003 #include "feature_tracker/FeatureTracker.h"
00004 
00005 #include <pcl_ros/publisher.h>
00006 #include <opencv2/imgproc/imgproc.hpp>
00007 //#include <opencv2/gpu/gpu.hpp>
00008 #include <opencv2/video/tracking.hpp>
00009 #include <opencv2/highgui/highgui.hpp>
00010 #include <sensor_msgs/image_encodings.h>
00011 #include <sensor_msgs/JointState.h>
00012 #include <sensor_msgs/CameraInfo.h>
00013 
00014 #include <rosbag/bag.h>
00015 #include <rosbag/view.h>
00016 
00017 #include <boost/foreach.hpp>
00018 
00019 #include <pcl_conversions/pcl_conversions.h>
00020 
00021 #if ROS_VERSION_MINIMUM(1, 11, 1) // if current ros version is >= 1.10.1 (hydro)
00022 #else
00023 #include "feature_tracker/pcl_conversions_indigo.h"
00024 #endif
00025 
00026 #define EROSION_SIZE_DETECTING 15
00027 #define EROSION_SIZE_TRACKING 3
00028 
00029 #define MAXIMUM_TIME_TO_USE_A_PREDICTION 0.05
00030 
00031 using namespace omip;
00032 
00033 PointFeatureTracker::PointFeatureTracker(double loop_period_ns, bool using_pc, std::string ft_ns) :
00034     FeatureTracker(loop_period_ns),
00035     _using_pc(using_pc),
00036     _first_frame(true),
00037     _occlusion_msk_img_rcvd(false),
00038     _predicted_state_rcvd(false),
00039     _predicted_state_processed(true),
00040     _camera_info_msg_rcvd(false),
00041     _erosion_size_detecting(-1),
00042     _erosion_size_tracking(-1),
00043     _canny_kernel_size(-1),
00044     _canny_ratio(-1),
00045     _number_of_features(-1),
00046     _min_number_of_features(-1),
00047     _frame_counter(1),
00048     _feature_id_counter(1),
00049     _max_allowed_depth(-1.f),
00050     _min_allowed_depth(-1.f),
00051     _canny_low_threshold(-1),
00052     _max_interframe_jump(0.f),
00053     _ft_ns(ft_ns),
00054     _publishing_predicted_and_past_feats_img(false),
00055     _publishing_tracked_feats_img(false),
00056     _publishing_tracked_feats_with_pred_msk_img(false),
00057     _sensor_fps(0.0),
00058     _processing_factor(0),
00059     _attention_to_motion(false),
00060     _use_motion_mask(false),
00061     _min_feat_quality(0.001),
00062     _predicted_state_rcvd_time_ns(0),
00063     _so_positive(false)
00064 {
00065     this->_ReadParameters();
00066     this->_InitializeVariables();
00067 
00068     _features_file.open("features.txt");
00069 }
00070 
00071 PointFeatureTracker::~PointFeatureTracker()
00072 {
00073     _features_file.close();
00074 }
00075 
00076 void PointFeatureTracker::setDynamicReconfigureValues(feature_tracker::FeatureTrackerDynReconfConfig &config)
00077 {
00078     this->_publishing_predicted_and_past_feats_img = config.pub_predicted_and_past_feats_img;
00079 }
00080 
00081 void PointFeatureTracker::predictState(double time_interval_ns)
00082 {
00083     // The predicted state based on current state is the current state itself (based on continuity prior: the features in
00084     // current step will be very close to where they were in the step before)
00085     this->_predicted_states[0] = this->_state;
00086 }
00087 
00088 void PointFeatureTracker::predictMeasurement()
00089 {
00090     // We have two predictions for the measurements, one using each predicted state
00091 
00092     // First predicted measurement:
00093     // Uses the first predicted state, which is the same that the previous state (no feature motion)
00094     // Therefore, we could project the 3D features back to the image, but for the sake of accuracy and computation time, we just take
00095     // the previous "refined" measurement
00096     this->_predicted_measurements[0] = this->_corrected_measurement;
00097 
00098     // Second predicted measurement:
00099     // If we received from the higher level (RBT) a prediction about the next feature 3D locations, we project them to the image plane
00100 
00101     // By copying the previous vector of 2D feature locations we assure an initial value for tracking the features
00102     this->_predicted_measurements[1] = this->_corrected_measurement;
00103 
00104     if (!this->_first_frame && this->_camera_info_msg_rcvd && this->_predicted_states.size() > 1 && this->_predicted_states[1] &&
00105             pcl_conversions::fromPCL((double)this->_predicted_states[1]->header.stamp).toNSec() - this->_measurement_timestamp_ns < this->_loop_period_ns/2.0)
00106     {
00107         // Initialize variables
00108         this->_received_prediction_ids.clear();
00109         this->_predicted_feats_msk_mat = cv::Scalar(0);
00110         Eigen::Vector4d predicted_feat_3d_eigen = Eigen::Vector4d(0., 0., 0., 1.);
00111         Eigen::Vector3d predicted_feat_2d_eigen = Eigen::Vector3d(0., 0., 0.);
00112         cv::Point predicted_feat_2d_cv_int = cvPoint(0, 0);
00113         cv::Point2f predicted_feat_2d_cv_f = cvPoint2D32f(0.f, 0.f);
00114         double predicted_distance_to_sensor = 0.;
00115         int correction_search_area_radius = 0;
00116 
00117         BOOST_FOREACH(FeaturePCLwc predicted_feat_3d_pcl, this->_predicted_states[1]->points)
00118         {
00119             // Find the index of this feature in the vector of current features
00120             int predicted_feat_idx = std::find(this->_current_feat_ids_v.begin(), this->_current_feat_ids_v.end(), predicted_feat_3d_pcl.label) - this->_current_feat_ids_v.begin();
00121 
00122             // There is a current point with this idx
00123             if (predicted_feat_idx < this->_number_of_features)
00124             {
00125                 this->_received_prediction_ids.push_back(predicted_feat_3d_pcl.label);
00126                 // Convert the predicted feature from PCL point into Eigen vector homogeneous coordinates
00127                 predicted_feat_3d_eigen.x() = predicted_feat_3d_pcl.x;
00128                 predicted_feat_3d_eigen.y() = predicted_feat_3d_pcl.y;
00129                 predicted_feat_3d_eigen.z() = predicted_feat_3d_pcl.z;
00130 
00131                 // Estimate the distance of the predicted feature to the sensor
00132                 predicted_distance_to_sensor = sqrt(predicted_feat_3d_pcl.x * predicted_feat_3d_pcl.x +
00133                                                     predicted_feat_3d_pcl.y * predicted_feat_3d_pcl.y +
00134                                                     predicted_feat_3d_pcl.z * predicted_feat_3d_pcl.z);
00135 
00136                 // Project the predicted feature from 3D space into the image plane
00137                 predicted_feat_2d_eigen = this->_image_plane_proj_mat_eigen * predicted_feat_3d_eigen;
00138 
00139                 // Convert the predicted feature from image coordinates to pixel coordinates
00140                 predicted_feat_2d_cv_f.x = predicted_feat_2d_eigen.x() / predicted_feat_2d_eigen.z();
00141                 predicted_feat_2d_cv_f.y = predicted_feat_2d_eigen.y() / predicted_feat_2d_eigen.z();
00142 
00143                 // Estimate the pixel (round the float pixel coordinates to the closest integer value)
00144                 predicted_feat_2d_cv_int.x = cvRound(predicted_feat_2d_cv_f.x);
00145                 predicted_feat_2d_cv_int.y = cvRound(predicted_feat_2d_cv_f.y);
00146 
00147                 // If the predicted feature is inside the image limits
00148                 // 1: We store this feature as prediction to help the tracker (it will use this locations as starting point)
00149                 // 2: We clear a circular area around its location in the tracking mask. The tracker (as part of the correction of the predicted state)
00150                 // will search for the feature only in this area of the image
00151                 if (predicted_feat_2d_cv_int.x >= 0 && predicted_feat_2d_cv_int.x < this->_camera_info_ptr->width
00152                         && predicted_feat_2d_cv_int.y >= 0 && predicted_feat_2d_cv_int.y < this->_camera_info_ptr->height)
00153                 {
00154                     this->_predicted_measurements[1][predicted_feat_idx] = predicted_feat_2d_cv_int;
00155                     // The radius of the circle in the mask is inversely proportional to the depth of the predicted location
00156                     // Motion of close 3-D points (small depth) will translate in large motion in image plane -> large RGB searching area for
00157                     // the feature
00158                     // Motion of distanced 3-D points (large depth) will translate in smaller motion in image plane -> small RGB searching area
00159                     // for the feature
00160                     // TODO: There are two other variables that should be taken into account when defining the searching area:
00161                     //        1: Uncertainty in the RBM: If the motion is very uncertain, the searching area should be larger. If the motion is
00162                     //        very certain, the feature location should be very close to the predicted one
00163                     //        2: Velocity of the RB: Large velocities are more difficult to track, and thus, the searching area should be larger.
00164                     correction_search_area_radius = std::max( 3, (int)(8.0 / predicted_distance_to_sensor));
00165                     cv::circle(this->_predicted_feats_msk_mat, predicted_feat_2d_cv_int, correction_search_area_radius, CV_RGB(255, 255, 255), -1, 8, 0);
00166 
00167                 }
00168             }
00169         }
00170     }
00171 }
00172 
00173 void PointFeatureTracker::correctState()
00174 {
00175     this->_ProcessOcclusionMaskImg();
00176 
00177     this->_ProcessDepthImg();
00178 
00179     this->_ProcessRGBImg();
00180 
00181     if (this->_first_frame)
00182     {
00183         // Detect new features in the image
00184         this->_DetectNewFeatures();
00185         this->_first_frame = false;
00186     }
00187     else
00188     {
00189         this->_TrackFeatures();
00190     }
00191 
00192     // Copy the result
00193     this->_previous_measurement = this->_corrected_measurement;
00194 
00195     this->_previous_feat_ids_v = this->_current_feat_ids_v;
00196     this->_previous_bw_img_ptr = this->_current_bw_img_ptr;
00197     this->_previous_belief_state = this->_corrected_belief_state;
00198 
00199     for (size_t num_feats = 0; num_feats < this->_number_of_features; num_feats++)
00200     {
00201         this->_state->points[num_feats].label = this->_current_feat_ids_v[num_feats];
00202         this->_state->points[num_feats].x = this->_corrected_belief_state[num_feats].x;
00203         this->_state->points[num_feats].y = this->_corrected_belief_state[num_feats].y;
00204         this->_state->points[num_feats].z = this->_corrected_belief_state[num_feats].z;
00205 
00206         // Covariance
00207         //
00208     }
00209     this->_frame_counter++;
00210 }
00211 
00212 void PointFeatureTracker::addPredictedState(const ft_state_t &predicted_state, const double& predicted_state_timestamp_ns)
00213 {
00214     this->_predicted_states[1] = predicted_state;
00215     this->_predicted_state_rcvd_time_ns = predicted_state_timestamp_ns;
00216 }
00217 
00218 void PointFeatureTracker::setFullRGBDPC(PointCloudPCL::ConstPtr full_rgb_pc)
00219 {
00220     this->_received_point_cloud = full_rgb_pc;
00221 }
00222 
00223 void PointFeatureTracker::setOcclusionMaskImg(cv_bridge::CvImagePtr occ_mask_img)
00224 {
00225     this->_occlusion_msk_img_ptr = occ_mask_img;
00226     this->_occlusion_msk_img_rcvd = true;
00227 }
00228 
00229 void PointFeatureTracker::_ProcessDepthImg()
00230 {
00231     this->_depth_img_ptr = this->_measurement.second;
00232 
00233     // Convert from sensor_msg to cv image
00234     // 1) Create matrices
00235     this->_depth_mat = cv::Mat(this->_depth_img_ptr->image.rows, this->_depth_img_ptr->image.cols, CV_8UC1);
00236     this->_aux1_mat = cv::Mat(this->_depth_img_ptr->image.rows, this->_depth_img_ptr->image.cols, CV_8UC1);
00237 
00238     // 2) Find the minimum and maximum distances
00239     // Exclude points that contain infinity values
00240     double minVal, maxVal;
00241     cv::Point minpt,maxpt;
00242 
00243 #if(CV_MAJOR_VERSION == 2)
00244         if(this->_depth_img_ptr->encoding == "32FC1")
00245     {
00246         float infinity = std::numeric_limits<float>::infinity();
00247         cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
00248     }else{
00249         uint16_t infinity = std::numeric_limits<uint16_t>::infinity();
00250         cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
00251     }
00252 #elif(CV_MAJOR_VERSION==3)
00253     cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, cv::Mat(this->_depth_img_ptr->image == this->_depth_img_ptr->image) );
00254 #else
00255 #error CV major version is not 2 or 3
00256 #endif
00257 
00258     // If we set a ROI we use it
00259     if(this->_max_allowed_depth > 0)
00260     {
00261         maxVal = std::min(maxVal, this->_max_allowed_depth);
00262     }
00263 
00264     //TODO: Sometimes there are noisy points very close to the sensor. If we use the minipum in the image we will span
00265     // the values to this minimum value -> ignore it
00266     // If we set a ROI we use it
00267     if(this->_min_allowed_depth > 0)
00268     {
00269         minVal = std::max(this->_min_allowed_depth, minVal);
00270     }
00271 
00272     this->_depth_img_ptr->image.convertTo(this->_aux1_mat, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
00273 
00274     this->_aux1_mat.setTo(0, this->_depth_img_ptr->image > maxVal);
00275 
00276     this->_aux1_mat.setTo(0, this->_depth_img_ptr->image < minVal);
00277 
00278 
00279     // Filter OUT large distances and disappeared points and edges
00280     // _depth_map contains 255 for the values that are not 0
00281     cv::threshold(this->_aux1_mat, this->_depth_mat, 0, 255, cv::THRESH_BINARY);
00282 
00283     // Reduce noise with a kernel 3x3
00284     cv::blur(this->_aux1_mat, this->_aux1_mat, cv::Size(3, 3));
00285 
00286 
00287     // Detect Canny edges
00288     cv::Canny(this->_aux1_mat, this->_canny_edges_mat,
00289               this->_canny_low_threshold,
00290               this->_canny_low_threshold * this->_canny_ratio,
00291               this->_canny_kernel_size, false);
00292 
00293     // Invert the resulting edges: edges are 0, not edges are 255
00294     cv::threshold(this->_canny_edges_mat, this->_canny_edges_mat, 0, 255, cv::THRESH_BINARY_INV);
00295 
00296     cv::bitwise_and(this->_depth_mat, this->_canny_edges_mat,this->_feats_detection_msk_mat);
00297 
00298     // If the occlusion mask was received, we merge it with the estimated mask
00299     if (this->_occlusion_msk_img_rcvd)
00300     {
00301         this->_feats_detection_msk_mat = this->_feats_detection_msk_mat & _occlusion_msk_mat;
00302     }
00303 
00304     // Erode EROSION_SIZE_TRACKING pixel for the tracking mask
00305     cv::erode(this->_feats_detection_msk_mat, this->_feats_tracking_msk_mat, this->_erosion_element_tracking_mat);
00306 
00307     for (int n = 0; n < this->_corrected_measurement.size(); n++)
00308     {
00309         if (this->_feat_status_v[n])
00310         {
00311             cv::circle(this->_feats_detection_msk_mat, this->_corrected_measurement[n], 2, CV_RGB(0, 0, 0), -1, 8, 0);
00312         }
00313     }
00314 
00315     // Erode EROSION_SIZE_DETECTING pixel for the detecting mask
00316     cv::erode(this->_feats_detection_msk_mat, this->_feats_detection_msk_mat, this->_erosion_element_detecting_mat);
00317 
00318     // Focus the detection of new features into the areas of the image where the depth changed
00319     // TODO: Detect also areas of color changes
00320     if(this->_attention_to_motion)
00321     {
00322         // First time
00323         if(this->_previous_time_of_motion_detection == ros::Time(0))
00324         {
00325             this->_previous_depth_mat = this->_depth_img_ptr->image;
00326             this->_previous_time_of_motion_detection = ros::Time::now();
00327         }
00328         else
00329         {
00330             if((ros::Time::now() - this->_previous_time_of_motion_detection).toSec() > this->_min_time_to_detect_motion)
00331             {
00332                 cv::Mat depth_difference;
00333 
00334                 // Absolute difference between the previous and the current depth maps
00335                 cv::absdiff(this->_depth_img_ptr->image, this->_previous_depth_mat, depth_difference);
00336 
00337                 // Threshold the absolute difference to the minimum depth to consider motion
00338                 if(this->_depth_img_ptr->encoding == "32FC1")
00339                 {
00340                     this->_depth_difference_mask = (depth_difference > (double)(this->_min_depth_difference));
00341                 }else{
00342                     this->_depth_difference_mask = (depth_difference > (uint16_t)(this->_min_depth_difference*1000));
00343                 }
00344 
00345 
00346                 // Set to zero areas close to edges
00347                 cv::bitwise_and(this->_feats_tracking_msk_mat, this->_depth_difference_mask, this->_depth_difference_mask);
00348 
00349                 int area_size = cv::countNonZero(this->_depth_difference_mask);
00350 
00351                 if(area_size > this->_min_area_size_pixels)
00352                 {
00353                     cv::bitwise_and(this->_feats_detection_msk_mat, this->_depth_difference_mask,this->_feats_detection_msk_mat);
00354                     this->_use_motion_mask = true;
00355                 }
00356                 else
00357                 {
00358                     this->_use_motion_mask = false;
00359                 }
00360 
00361                 this->_previous_time_of_motion_detection = ros::Time::now();
00362                 this->_previous_depth_mat = this->_depth_img_ptr->image;
00363             }
00364             else if(this->_use_motion_mask)
00365             {
00366                 cv::bitwise_and(this->_feats_detection_msk_mat, this->_depth_difference_mask,this->_feats_detection_msk_mat);
00367             }
00368         }
00369     }
00370 }
00371 
00372 void PointFeatureTracker::_ProcessRGBImg()
00373 {
00374     this->_rgb_img_ptr = this->_measurement.first;
00375     this->_current_bw_img_ptr = cv_bridge::cvtColor(this->_rgb_img_ptr, "mono8");
00376     _measurement_timestamp_ns = (double)this->_rgb_img_ptr->header.stamp.toNSec();
00377 }
00378 
00379 void PointFeatureTracker::_ProcessOcclusionMaskImg()
00380 {
00381     if (this->_occlusion_msk_img_rcvd)
00382     {
00383         // Create a mask by thresholding the image
00384         if(this->_so_positive)
00385             _occlusion_msk_mat = this->_occlusion_msk_img_ptr->image == 0;
00386         else
00387             _occlusion_msk_mat = this->_occlusion_msk_img_ptr->image != 0;
00388     }
00389 }
00390 
00391 void PointFeatureTracker::setCameraInfoMsg(const sensor_msgs::CameraInfo* camera_info)
00392 {
00393     this->_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(new sensor_msgs::CameraInfo(*camera_info));
00394 
00395     if(this->_predicted_feats_msk_mat.cols != this->_camera_info_ptr->width || this->_predicted_feats_msk_mat.rows != this->_camera_info_ptr->height)
00396     {
00397         ROS_ERROR_STREAM_NAMED("PointFeatureTracker.setCameraInfoMsg", "Resizing predicted features mask!");
00398         this->_predicted_feats_msk_mat = cv::Mat::zeros(this->_camera_info_ptr->height, this->_camera_info_ptr->width, CV_8U);
00399     }
00400 
00401     this->_image_plane_proj_mat_eigen(0, 0) = this->_camera_info_ptr->P[0]; // For carmine + 32;
00402     this->_image_plane_proj_mat_eigen(0, 1) = this->_camera_info_ptr->P[1];
00403     this->_image_plane_proj_mat_eigen(0, 2) = this->_camera_info_ptr->P[2];
00404     this->_image_plane_proj_mat_eigen(0, 3) = this->_camera_info_ptr->P[3];
00405     this->_image_plane_proj_mat_eigen(1, 0) = this->_camera_info_ptr->P[4];
00406     this->_image_plane_proj_mat_eigen(1, 1) = this->_camera_info_ptr->P[5]; // For carmine + 32;
00407     this->_image_plane_proj_mat_eigen(1, 2) = this->_camera_info_ptr->P[6];
00408     this->_image_plane_proj_mat_eigen(1, 3) = this->_camera_info_ptr->P[7];
00409     this->_image_plane_proj_mat_eigen(2, 0) = this->_camera_info_ptr->P[8];
00410     this->_image_plane_proj_mat_eigen(2, 1) = this->_camera_info_ptr->P[9];
00411     this->_image_plane_proj_mat_eigen(2, 2) = this->_camera_info_ptr->P[10];
00412     this->_image_plane_proj_mat_eigen(2, 3) = this->_camera_info_ptr->P[11];
00413     this->_camera_info_msg_rcvd = true;
00414 }
00415 
00416 ft_state_t PointFeatureTracker::getState() const
00417 {
00418     return this->_state;
00419 }
00420 
00421 cv_bridge::CvImagePtr PointFeatureTracker::getTrackedFeaturesImg()
00422 {
00423     if(this->_publishing_tracked_feats_img && this->_rgb_img_ptr)
00424     {
00425         this->_rgb_img_ptr->image.copyTo(this->_tracked_features_img->image);
00426         this->_tracked_features_img->encoding = this->_rgb_img_ptr->encoding;
00427         this->_tracked_features_img->header.stamp = ros::Time::now();
00428 
00429         for (size_t tracked_feats_idx = 0; tracked_feats_idx < this->_corrected_measurement.size(); tracked_feats_idx++)
00430         {
00431             cv::Point p = cvPoint(cvRound(this->_corrected_measurement[tracked_feats_idx].x),
00432                                   cvRound(this->_corrected_measurement[tracked_feats_idx].y));
00433 
00434             // If the feature has been correctly tracked we paint a yellow cross
00435             if (this->_feat_status_v[tracked_feats_idx])
00436             {
00437                 //cv::circle(this->_cv_ptr_rgb->image, p,3, CV_RGB(255, 255, 255), -1, 8, 0);
00438                 cv::line(this->_tracked_features_img->image, cv::Point(p.x, p.y + 3),
00439                          cv::Point(p.x, p.y - 3), CV_RGB(0, 255, 255), 1, 8);
00440                 cv::line(this->_tracked_features_img->image, cv::Point(p.x + 3, p.y),
00441                          cv::Point(p.x - 3, p.y), CV_RGB(0, 255, 255), 1, 8);
00442             }
00443             // If the feature has been lost, we paint a red circle
00444             else
00445             {
00446                 cv::circle(this->_tracked_features_img->image, p, 5, CV_RGB(0, 0, 255),-1, 8, 0);
00447             }
00448         }
00449     }
00450     return this->_tracked_features_img;
00451 }
00452 
00453 cv_bridge::CvImagePtr PointFeatureTracker::getRGBImg()
00454 {
00455     return this->_rgb_img_ptr;
00456 }
00457 
00458 cv_bridge::CvImagePtr PointFeatureTracker::getDepthImg()
00459 {
00460     return this->_depth_img_ptr;
00461 }
00462 
00463 cv_bridge::CvImagePtr PointFeatureTracker::getTrackedFeaturesWithPredictionMaskImg()
00464 {
00465     if(this->_publishing_tracked_feats_with_pred_msk_img)
00466     {
00467         this->_tracked_features_with_pred_msk_img->image.setTo(0);
00468         this->_tracked_features_with_pred_msk_img->header.stamp = ros::Time::now();
00469         this->_tracked_features_with_pred_msk_img->encoding = this->_tracked_features_img ->encoding;
00470         this->_tracked_features_img->image.copyTo(this->_tracked_features_with_pred_msk_img->image, this->_predicted_feats_msk_mat);
00471     }
00472     return this->_tracked_features_with_pred_msk_img;
00473 }
00474 
00475 cv_bridge::CvImagePtr PointFeatureTracker::getDepthEdgesImg()
00476 {
00477     this->_depth_edges_img->header.stamp = ros::Time::now();
00478     this->_depth_edges_img->encoding = "mono8";
00479     this->_depth_edges_img->image = this->_canny_edges_mat;
00480     return  this->_depth_edges_img;
00481 }
00482 
00483 cv_bridge::CvImagePtr PointFeatureTracker::getPredictingMaskImg()
00484 {
00485     this->_predicting_mask_img->header.stamp = ros::Time::now();
00486     this->_predicting_mask_img->encoding = "mono8";
00487     this->_predicting_mask_img->image = this->_predicted_feats_msk_mat;
00488     return  this->_predicting_mask_img;
00489 }
00490 
00491 cv_bridge::CvImagePtr PointFeatureTracker::getTrackingMaskImg()
00492 {
00493     this->_tracking_mask_img->header.stamp = ros::Time::now();
00494     this->_tracking_mask_img->encoding = "mono8";
00495     this->_tracking_mask_img->image = this->_feats_tracking_msk_mat;
00496     return  this->_tracking_mask_img;
00497 }
00498 
00499 cv_bridge::CvImagePtr PointFeatureTracker::getDetectingMaskImg()
00500 {
00501     this->_detecting_mask_img->header.stamp = ros::Time::now();
00502     this->_detecting_mask_img->encoding = "mono8";
00503     this->_detecting_mask_img->image = this->_feats_detection_msk_mat;
00504     return  this->_detecting_mask_img;
00505 }
00506 
00507 cv_bridge::CvImagePtr PointFeatureTracker::getPredictedAndLastFeaturesImg()
00508 {
00509     if(this->_publishing_predicted_and_past_feats_img && this->_rgb_img_ptr)
00510     {
00511         this->_rgb_img_ptr->image.copyTo(this->_predicted_and_previous_features_img->image);
00512         this->_predicted_and_previous_features_img->encoding = this->_rgb_img_ptr->encoding;
00513         this->_predicted_and_previous_features_img->header.stamp = ros::Time::now();
00514 
00515         //
00516         for (size_t tracked_feats_idx = 0;tracked_feats_idx < this->_predicted_measurements[0].size(); tracked_feats_idx++)
00517         {
00518             cv::Point previous = cvPoint(cvRound(this->_predicted_measurements[0][tracked_feats_idx].x),
00519                     cvRound(this->_predicted_measurements[0][tracked_feats_idx].y));
00520             cv::Point predicted = cvPoint(cvRound(this->_predicted_measurements[1][tracked_feats_idx].x),
00521                     cvRound(this->_predicted_measurements[1][tracked_feats_idx].y));
00522 
00523             if (this->_feat_status_v[tracked_feats_idx])
00524             {
00525                 //cv::circle(this->_cv_ptr_rgb->image, p,3, CV_RGB(255, 255, 255), -1, 8, 0);
00526                 cv::line(this->_predicted_and_previous_features_img->image, cv::Point(previous.x, previous.y + 4),
00527                          cv::Point(previous.x, previous.y - 4), CV_RGB(0, 255, 255), 1, 8);
00528                 cv::line(this->_predicted_and_previous_features_img->image, cv::Point(previous.x + 4, previous.y),
00529                          cv::Point(previous.x - 4, previous.y), CV_RGB(0, 255, 255), 1, 8);
00530 
00531                 // The next two lines draw a blue cross at the predicted feature location
00532                 cv::line(this->_predicted_and_previous_features_img->image,cv::Point(predicted.x, predicted.y + 4),
00533                          cv::Point(predicted.x, predicted.y - 4),CV_RGB(255, 0, 0), 1, 8);
00534                 cv::line(this->_predicted_and_previous_features_img->image,cv::Point(predicted.x + 4, predicted.y),
00535                          cv::Point(predicted.x - 4, predicted.y),CV_RGB(255, 0, 0), 1, 8);
00536             }
00537             else
00538             {
00539                 cv::circle(this->_predicted_and_previous_features_img->image, previous, 5, CV_RGB(0, 0, 255),
00540                            -1, 8, 0);
00541             }
00542         }
00543     }
00544     return this->_predicted_and_previous_features_img;
00545 }
00546 
00547 void PointFeatureTracker::setSelfOcclusionPositive(bool so_positive)
00548 {
00549     this->_so_positive = so_positive;
00550 }
00551 
00552 void PointFeatureTracker::_ReadParameters()
00553 {
00554     this->_node_handle.getParam(this->_ft_ns + std::string("/max_distance"), this->_max_allowed_depth);
00555     this->_node_handle.getParam(this->_ft_ns + std::string("/min_distance"), this->_min_allowed_depth);
00556     this->_node_handle.getParam(this->_ft_ns + std::string("/number_features"), this->_number_of_features);
00557     this->_node_handle.getParam(this->_ft_ns + std::string("/min_number_features"), this->_min_number_of_features);
00558     this->_node_handle.getParam(this->_ft_ns + std::string("/min_feat_quality"), this->_min_feat_quality);
00559     this->_node_handle.getParam(this->_ft_ns + std::string("/max_interframe_jump"), this->_max_interframe_jump);
00560     this->_node_handle.getParam(this->_ft_ns + std::string("/max_distance"), this->_max_allowed_depth);
00561 
00562     this->_node_handle.getParam(this->_ft_ns + std::string("/pub_tracked_feats_with_pred_mask_img"), this->_publishing_tracked_feats_with_pred_msk_img);
00563     this->_node_handle.getParam(this->_ft_ns + std::string("/pub_tracked_feats_img"), this->_publishing_tracked_feats_img);
00564     this->_node_handle.getParam(this->_ft_ns + std::string("/pub_predicted_and_past_feats_img"), this->_publishing_predicted_and_past_feats_img);
00565 
00566     this->_node_handle.getParam(std::string("/omip/sensor_fps"), this->_sensor_fps);
00567     this->_node_handle.getParam(std::string("/omip/processing_factor"), this->_processing_factor);
00568     this->_loop_period_ns = 1e9/(this->_sensor_fps/(double)this->_processing_factor);
00569 
00570     this->_node_handle.getParam(this->_ft_ns + std::string("/erosion_size_detect"), this->_erosion_size_detecting);
00571     this->_node_handle.getParam(this->_ft_ns + std::string("/erosion_size_track"), this->_erosion_size_tracking);
00572 
00573     this->_node_handle.getParam(this->_ft_ns + std::string("/attention_to_motion"),this->_attention_to_motion);
00574     this->_node_handle.getParam(this->_ft_ns + std::string("/min_time_to_detect_motion"),this->_min_time_to_detect_motion);
00575     this->_node_handle.getParam(this->_ft_ns + std::string("/min_depth_difference"),this->_min_depth_difference);
00576     this->_node_handle.getParam(this->_ft_ns + std::string("/min_area_size_pixels"),this->_min_area_size_pixels);
00577 
00578     ROS_INFO_STREAM_NAMED(
00579                 "PointFeatureTracker::ReadParameters",
00580                 "PointFeatureTracker Parameters: " << std::endl <<
00581                 "\tMaximum distance: " << this->_max_allowed_depth << std::endl <<
00582                 "\tMinimum distance: " << this->_min_allowed_depth << std::endl <<
00583                 "\tMaximum allowed 3D motion of features between consecutive frames: " << this->_max_interframe_jump << std::endl <<
00584                 "\tNumber of tracked features: " << this->_number_of_features << std::endl <<
00585                 "\tMinimum number of tracked features: " << this->_min_number_of_features << std::endl <<
00586                 "\tMinimum feature quality: " << this->_min_feat_quality << std::endl <<
00587                 "\tMax framerate (factor): " << this->_sensor_fps << "(" << this->_processing_factor << ")" << std::endl <<
00588                 "\tSize of the erosion for detecting: " << this->_erosion_size_detecting << std::endl <<
00589                 "\tSize of the erosion for tracking: " << this->_erosion_size_tracking << std::endl <<
00590                 "\tFocussing attention into depth-changing areas: " << this->_attention_to_motion << std::endl <<
00591                 "\tMinimum time between frames to detect depth-changing areas: " << this->_min_time_to_detect_motion << std::endl <<
00592                 "\tMinimum depth change to consider a pixel as changing: " << this->_min_depth_difference << std::endl <<
00593                 "\tMinimum area size to use the attention mechanism: " << this->_min_area_size_pixels << std::endl <<
00594                 "\tPublishing image of the tracked feats: " << this->_publishing_tracked_feats_img << std::endl <<
00595                 "\tPublishing image of the tracked feats with prediction mask: " << this->_publishing_tracked_feats_with_pred_msk_img << std::endl <<
00596                 "\tPublishing image of the predicted and the past features: " << this->_publishing_predicted_and_past_feats_img);
00597 }
00598 
00599 void PointFeatureTracker::_InitializeVariables()
00600 {
00601     this->_filter_name = "FTFilter";
00602     this->_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(new sensor_msgs::CameraInfo());
00603     this->_image_plane_proj_mat_eigen = Eigen::Matrix<double, 3, 4>::Zero();
00604     this->_erosion_element_detecting_mat = cv::getStructuringElement(cv::MORPH_RECT,
00605                                                                      cv::Size(2 * this->_erosion_size_detecting + 1,
00606                                                                               2 * this->_erosion_size_detecting + 1),
00607                                                                      cv::Point(this->_erosion_size_detecting, this->_erosion_size_detecting));
00608 
00609     this->_erosion_element_tracking_mat = cv::getStructuringElement(cv::MORPH_RECT,
00610                                                                     cv::Size(2 * this->_erosion_size_tracking + 1,
00611                                                                              2 * this->_erosion_size_tracking + 1),
00612                                                                     cv::Point(this->_erosion_size_tracking, this->_erosion_size_tracking));
00613 
00614     this->_canny_low_threshold = 20;
00615     this->_canny_kernel_size = 3;
00616     this->_canny_ratio = 3;
00617 
00618     // Initialize all vectors
00619     this->_corrected_measurement.resize(this->_number_of_features,cv::Point2f(-1., -1.));
00620     this->_corrected_belief_state.resize(this->_number_of_features,cv::Point3d(-1., -1., -1.));
00621     this->_previous_belief_state.resize(this->_number_of_features,cv::Point3d(-1., -1., -1.));
00622     this->_current_feat_ids_v.resize(this->_number_of_features, 0);
00623     this->_feat_status_v.resize(this->_number_of_features, false);
00624     this->_previous_feat_ids_v = this->_current_feat_ids_v;
00625 
00626     // Initialize pcl point clouds
00627     FeaturePCLwc init_feature = FeaturePCLwc();
00628     init_feature.x = -1.f;
00629     init_feature.y = -1.f;
00630     init_feature.z = -1.f;
00631     init_feature.label = 0;
00632     this->_state = ft_state_t(new  FeatureCloudPCLwc());
00633     this->_state->points.resize(this->_number_of_features, init_feature);
00634 
00635     // The predicted states are always 2
00636     // The first element of the predicted states is obtained from the internal state model that predicts that the locations of the
00637     // features in the next step will be the locations of the features in the last step
00638     this->_predicted_states.resize(2);
00639     this->_predicted_states[0] = this->_state;
00640     // The second element of the predicted states comes from the higher level (RBT)
00641 
00642     this->_predicted_measurements.resize(2);
00643 
00644     this->_tracked_features_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00645     this->_tracked_features_with_pred_msk_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00646     this->_depth_edges_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00647     this->_predicting_mask_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00648     this->_tracking_mask_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00649     this->_detecting_mask_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00650     this->_predicted_and_previous_features_img = cv_bridge::CvImagePtr(new cv_bridge::CvImage());
00651 
00652     this->_previous_time_of_motion_detection = ros::Time(0.0);
00653 }
00654 
00655 void PointFeatureTracker::_EstimateDetectionMask()
00656 {
00657     for (int n = 0; n < this->_corrected_measurement.size(); n++)
00658     {
00659         if (this->_feat_status_v[n])
00660         {
00661             cv::circle(this->_feats_detection_msk_mat, this->_corrected_measurement[n], 2*_erosion_size_detecting, CV_RGB(0, 0, 0), -1, 8, 0);
00662         }
00663     }
00664 }
00665 
00666 void PointFeatureTracker::_DetectNewFeatures()
00667 {
00668     int detected_good_features = 0;
00669     double min_distance = 30;
00670     double min_feat_quality = _min_feat_quality;
00671     int num_new_features_to_detect = std::count(this->_feat_status_v.begin(), this->_feat_status_v.end(), false);
00672 
00673     // Repeat the detection until the desired minimum number of features is detected (maximum 5 times)
00674     int repetition_counter = 0;
00675     do
00676     {
00677         _min_feat_quality = min_feat_quality;
00678         this->_EstimateDetectionMask();
00679 
00680         //If we try to detect very few features (1 or 2) the probability that we cannot retrieve their 3D location
00681         //is high, and then even if we reduce the neccessary quality level, we do not get valid features
00682         num_new_features_to_detect = std::max(2*num_new_features_to_detect, 40);
00683         std::vector<cv::Point2f> new_features;
00684 
00685         // DETECTION /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
00686         cv::goodFeaturesToTrack(this->_current_bw_img_ptr->image, new_features, num_new_features_to_detect, min_feat_quality, min_distance, this->_feats_detection_msk_mat);
00687 
00688        // cv::imshow("a", this->_current_bw_img_ptr->image);
00689        // cv::imshow("b", this->_feats_detection_msk_mat);
00690        // cv::waitKey(-1);
00691 
00692         std::vector<bool> new_feats_status;
00693         new_feats_status.resize(num_new_features_to_detect, true);
00694         std::vector<cv::Point3d> new_features_3d;
00695         new_features_3d.resize(num_new_features_to_detect, cv::Point3d(-1., -1., -1.));
00696 
00697         this->_Retrieve3DCoords(new_features, new_feats_status, new_features_3d);
00698 
00699         for (int old_feats_idx = 0; old_feats_idx < this->_number_of_features; old_feats_idx++)
00700         {
00701             if (!this->_feat_status_v[old_feats_idx])
00702             {
00703                 for (size_t new_feats_idx = 0; new_feats_idx < new_features.size();new_feats_idx++)
00704                 {
00705                     if (new_feats_status[new_feats_idx])
00706                     {
00707                         this->_current_feat_ids_v[old_feats_idx] = this->_feature_id_counter++;
00708                         this->_corrected_measurement[old_feats_idx] = new_features[new_feats_idx];
00709                         this->_corrected_belief_state[old_feats_idx] = new_features_3d[new_feats_idx];
00710                         this->_feat_status_v[old_feats_idx] = true;
00711                         new_feats_status[new_feats_idx] = false;
00712                         detected_good_features++;
00713                         break;
00714                     }
00715                 }
00716             }
00717         }
00718         min_feat_quality /= 2.0;
00719         min_feat_quality = std::max(min_feat_quality, 0.0001);
00720         num_new_features_to_detect = std::count(this->_feat_status_v.begin(), this->_feat_status_v.end(), false);
00721         repetition_counter++;
00722     }
00723     while (num_new_features_to_detect > (this->_number_of_features - this->_min_number_of_features) && repetition_counter < 5);
00724 }
00725 
00726 void PointFeatureTracker::_TrackFeatures()
00727 {
00728     std::vector<std::vector<uchar> > feat_tracked;
00729     feat_tracked.resize(2);
00730 
00731     _feat_quality.clear();
00732     _feat_quality.resize(2);
00733 
00734     // Track using the previous locations
00735     cv::calcOpticalFlowPyrLK(
00736                 this->_previous_bw_img_ptr->image,
00737                 this->_current_bw_img_ptr->image,
00738                 this->_previous_measurement,
00739                 this->_predicted_measurements[0],
00740             feat_tracked[0],
00741             _feat_quality[0],
00742             cv::Size(7, 7),
00743             1,
00744             cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
00745                              30,
00746                              0.01),
00747             cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
00748             _min_feat_quality/2.0);
00749 
00750     int num_lost_features_tracking = 0;
00751 
00752     // If we had a recent prediction about the next feature locations
00753     bool received_useful_prediction = false;
00754     std::vector<bool> using_predicted_location;
00755     using_predicted_location.resize(this->_number_of_features, false);
00756     double time_since_last_prediction = this->_measurement_timestamp_ns - _predicted_state_rcvd_time_ns;
00757     if(true && time_since_last_prediction < this->_loop_period_ns/2.0)
00758     {
00759         received_useful_prediction = true;
00760         // Track using the predictions
00761         cv::calcOpticalFlowPyrLK(
00762                     this->_previous_bw_img_ptr->image,
00763                     this->_current_bw_img_ptr->image,
00764                     this->_previous_measurement,
00765                     this->_predicted_measurements[1],
00766                 feat_tracked[1],
00767                 _feat_quality[1],
00768                 cv::Size(7, 7),
00769                 1,
00770                 cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
00771                                  30,
00772                                  0.01),
00773                 cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
00774                 _min_feat_quality/2.0);
00775 
00776         // Select the tracked location with lower error
00777         int using_pred = 0;
00778         int using_prev = 0;
00779         for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
00780         {
00781             // Both trackers found the point -> Select the one with lower error
00782             if (feat_tracked[0][feat_idx] && feat_tracked[1][feat_idx])
00783             {
00784                 this->_corrected_measurement[feat_idx] =
00785                         (_feat_quality[1][feat_idx] <= _feat_quality[0][feat_idx] ? this->_predicted_measurements[1][feat_idx] : this->_predicted_measurements[0][feat_idx]);
00786 
00787                 // Count the tracked point used
00788                 _feat_quality[1][feat_idx] <= _feat_quality[0][feat_idx] ? using_pred++ : using_prev++;
00789             }
00790             // Only the tracker with predictions could track it
00791             else if (feat_tracked[1][feat_idx])
00792             {
00793                 this->_corrected_measurement[feat_idx] = this->_predicted_measurements[1][feat_idx];
00794                 using_predicted_location[feat_idx] = true;
00795             }
00796             // Only the tracker with previous could track it
00797             else if (feat_tracked[0][feat_idx])
00798             {
00799                 this->_corrected_measurement[feat_idx] = this->_predicted_measurements[0][feat_idx];
00800             }
00801             // Both lost the feature
00802             else
00803             {
00804                 this->_current_feat_ids_v[feat_idx] = 0;
00805                 this->_feat_status_v[feat_idx] = false;
00806                 num_lost_features_tracking++;
00807             }
00808         }
00809         ROS_INFO_STREAM_NAMED("PointFeatureTracker._TrackFeatures","Using " << using_pred << " predicted and " << using_prev << " previous Locations.");
00810     }else{
00811 
00812         ROS_ERROR_STREAM_NAMED("PointFeatureTracker.measurementCallback",
00813                                "The prediction from higher level can NOT be used to predict next measurement before correction. Delay: "
00814                                << (double)(time_since_last_prediction)/1e9);
00815         for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
00816         {
00817             // If the tracker with previous could track it
00818             if (feat_tracked[0][feat_idx])
00819             {
00820                 this->_corrected_measurement[feat_idx] = this->_predicted_measurements[0][feat_idx];
00821             }
00822             // Both lost the feature
00823             else
00824             {
00825                 this->_current_feat_ids_v[feat_idx] = 0;
00826                 this->_feat_status_v[feat_idx] = false;
00827                 num_lost_features_tracking++;
00828             }
00829         }
00830 
00831     }
00832 
00833     // 1) Check if the tracked features are in a tracking allowed region and if they could be tracked
00834     int num_lost_features_mask = 0;
00835     for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
00836     {
00837         if (this->_feat_status_v[feat_idx])
00838         {
00839             if (!this->_feats_tracking_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y,this->_corrected_measurement[feat_idx].x))
00840             {
00841                 this->_current_feat_ids_v[feat_idx] = 0;
00842                 this->_feat_status_v[feat_idx] = false;
00843                 num_lost_features_mask++;
00844             }
00845         }
00846     }
00847 
00848     // 2) If we are using predictions: check if the tracked features are in a predicted region
00849     int num_lost_features_prediction_mask = 0;
00850     if(received_useful_prediction)
00851     {
00852         for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
00853         {
00854             if (this->_feat_status_v[feat_idx]
00855                     && std::find(this->_received_prediction_ids.begin(), this->_received_prediction_ids.end(), this->_current_feat_ids_v[feat_idx])!= this->_received_prediction_ids.end()
00856                     && using_predicted_location[feat_idx])
00857             {
00858                 if (!this->_predicted_feats_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y, this->_corrected_measurement[feat_idx].x))
00859                 {
00860                     this->_current_feat_ids_v[feat_idx] = 0;
00861                     this->_feat_status_v[feat_idx] = false;
00862                     num_lost_features_prediction_mask++;
00863                 }
00864             }
00865         }
00866     }
00867 
00868     int num_lost_features_retrieve_3d = this->_Retrieve3DCoords(this->_corrected_measurement, this->_feat_status_v, this->_corrected_belief_state);
00869 
00870     int num_lost_features_compare_3d = this->_Compare3DCoords();
00871 
00872     ROS_INFO_NAMED("FeatureTracker._TrackFeatures",
00873                    "Lost features: %3d (%3d because of tracking,%3d because of masking,%3d because of out of prediction,%3d because of not having 3D data,%3d because of jumping in 3D)",
00874                    num_lost_features_tracking + num_lost_features_mask + num_lost_features_prediction_mask + num_lost_features_retrieve_3d + num_lost_features_compare_3d,
00875                    num_lost_features_tracking,
00876                    num_lost_features_mask,
00877                    num_lost_features_prediction_mask,
00878                    num_lost_features_retrieve_3d ,
00879                    num_lost_features_compare_3d);
00880 
00881     this->_DetectNewFeatures();
00882 }
00883 
00884 int PointFeatureTracker::_Retrieve3DCoords(std::vector<cv::Point2f>& points_in_2d, std::vector<bool>& points_status, std::vector<cv::Point3d>& points_in_3d)
00885 {
00886     int features_without_3d_ctr = 0;
00887     double x_coord, y_coord, z_coord;
00888     for (size_t features_idx = 0; features_idx < points_in_2d.size();
00889          features_idx++)
00890     {
00891         if (points_status[features_idx])
00892         {
00893 
00894             if (this->_using_pc)
00895             {
00896                 // Get the pixel index
00897                 int idx = (cvRound(points_in_2d[features_idx].y) * this->_received_point_cloud->width + cvRound(points_in_2d[features_idx].x));
00898 
00899                 x_coord = this->_received_point_cloud->points[idx].x;
00900                 y_coord = this->_received_point_cloud->points[idx].y;
00901                 z_coord = this->_received_point_cloud->points[idx].z;
00902             }
00903             else
00904             {
00905                 cv::Point p = cvPoint(cvRound(points_in_2d[features_idx].x),cvRound(points_in_2d[features_idx].y));
00906 
00907                 // get raw z value
00908                 float z_raw_float = 0.0;
00909                 uint16_t z_raw_uint = 0;
00910                 if(this->_depth_img_ptr->encoding == "32FC1")
00911                 {
00912                     z_raw_float = this->_depth_img_ptr->image.at<float>(p);
00913                 }else{
00914                     z_raw_uint = this->_depth_img_ptr->image.at<uint16_t>(p);
00915                 }
00916 
00917                 if ((z_raw_float != 0 && std::isfinite(z_raw_float))
00918                         || ( z_raw_uint != 0 && std::isfinite(z_raw_uint)))
00919                 {
00920                     /* K = Intrinsic camera matrix for the raw (distorted) images.
00921                         [fx  0 cx]
00922                     K = [ 0 fy cy]
00923                         [ 0  0  1]
00924                     */
00925 
00926                     if(this->_depth_img_ptr->encoding == "32FC1")
00927                     {
00928                         z_coord = (double)z_raw_float;
00929                     }else{
00930                         z_coord = (double)z_raw_uint/1000.0;
00931                     }
00932                     double umcx = points_in_2d[features_idx].x - this->_camera_info_ptr->K[2];
00933                     double vmcy = points_in_2d[features_idx].y - this->_camera_info_ptr->K[5];
00934 
00935                     // calculate x and y
00936                     x_coord = z_coord * umcx / (double)this->_camera_info_ptr->K[0];
00937                     y_coord = z_coord * vmcy / (double)this->_camera_info_ptr->K[4];
00938                 }
00939                 else
00940                 {
00941                     z_coord = 0.0 / 0.0;
00942                 }
00943 
00944             }
00945 
00946             // If the data from Kinect for this feature is RIGHT
00947             if (!isnan(x_coord) && !isnan(y_coord) && !isnan(z_coord))
00948             {
00949                 points_in_3d[features_idx].x = x_coord;
00950                 points_in_3d[features_idx].y = y_coord;
00951                 points_in_3d[features_idx].z = z_coord;
00952             }
00953             else
00954             {
00955                 points_in_3d[features_idx].x = -1.f;
00956                 points_in_3d[features_idx].y = -1.f;
00957                 points_in_3d[features_idx].z = -1.f;
00958                 points_status[features_idx] = false;
00959                 features_without_3d_ctr++;
00960             }
00961         }
00962     }
00963     return features_without_3d_ctr;
00964 }
00965 
00966 int PointFeatureTracker::_Compare3DCoords()
00967 {
00968     int jumping_features_ctr = 0;
00969     double interframe_distance = 0.f;
00970     double current_x_coord = 0.f, current_y_coord = 0.f, current_z_coord = 0.f;
00971     double previous_x_coord = 0.f, previous_y_coord = 0.f, previous_z_coord = 0.f;
00972     for (int features_idx = 0; features_idx < this->_number_of_features; features_idx++)
00973     {
00974         if (this->_previous_feat_ids_v[features_idx] == this->_current_feat_ids_v[features_idx] && this->_feat_status_v[features_idx])
00975         {
00976             current_x_coord = this->_corrected_belief_state[features_idx].x;
00977             current_y_coord = this->_corrected_belief_state[features_idx].y;
00978             current_z_coord = this->_corrected_belief_state[features_idx].z;
00979 
00980             previous_x_coord = this->_previous_belief_state[features_idx].x;
00981             previous_y_coord = this->_previous_belief_state[features_idx].y;
00982             previous_z_coord = this->_previous_belief_state[features_idx].z;
00983 
00984             //std::cout << "Current: " << current_x_coord << " " << current_y_coord << " " << current_z_coord << std::endl;
00985             //std::cout << "Previous: "<< previous_x_coord << " " << previous_y_coord << " " << previous_z_coord << std::endl;
00986 
00987             interframe_distance = sqrt( pow(current_x_coord - previous_x_coord, 2) + pow(current_y_coord - previous_y_coord, 2)
00988                                         + pow(current_z_coord - previous_z_coord, 2));
00989 
00990             if (interframe_distance >= this->_max_interframe_jump)
00991             {
00992                 this->_corrected_belief_state[features_idx].x = -1.f;
00993                 this->_corrected_belief_state[features_idx].y = -1.f;
00994                 this->_corrected_belief_state[features_idx].z = -1.f;
00995                 this->_current_feat_ids_v[features_idx] = 0;
00996                 this->_feat_status_v[features_idx] = false;
00997                 jumping_features_ctr++;
00998             }
00999         }
01000     }
01001     return jumping_features_ctr;
01002 }


feature_tracker
Author(s): Roberto Martín-Martín
autogenerated on Sat Jun 8 2019 18:26:39