6 #include <opencv2/imgproc/imgproc.hpp> 8 #include <opencv2/video/tracking.hpp> 9 #include <opencv2/highgui/highgui.hpp> 11 #include <sensor_msgs/JointState.h> 12 #include <sensor_msgs/CameraInfo.h> 17 #include <boost/foreach.hpp> 21 #if ROS_VERSION_MINIMUM(1, 11, 1) // if current ros version is >= 1.10.1 (hydro) 26 #define EROSION_SIZE_DETECTING 15 27 #define EROSION_SIZE_TRACKING 3 29 #define MAXIMUM_TIME_TO_USE_A_PREDICTION 0.05 37 _occlusion_msk_img_rcvd(false),
38 _predicted_state_rcvd(false),
39 _predicted_state_processed(true),
40 _camera_info_msg_rcvd(false),
41 _erosion_size_detecting(-1),
42 _erosion_size_tracking(-1),
43 _canny_kernel_size(-1),
45 _number_of_features(-1),
46 _min_number_of_features(-1),
48 _feature_id_counter(1),
49 _max_allowed_depth(-1.
f),
50 _min_allowed_depth(-1.
f),
51 _canny_low_threshold(-1),
52 _max_interframe_jump(0.
f),
54 _publishing_predicted_and_past_feats_img(false),
55 _publishing_tracked_feats_img(false),
56 _publishing_tracked_feats_with_pred_msk_img(false),
58 _processing_factor(0),
59 _attention_to_motion(false),
60 _use_motion_mask(false),
61 _min_feat_quality(0.001),
62 _predicted_state_rcvd_time_ns(0),
110 Eigen::Vector4d predicted_feat_3d_eigen = Eigen::Vector4d(0., 0., 0., 1.);
111 Eigen::Vector3d predicted_feat_2d_eigen = Eigen::Vector3d(0., 0., 0.);
112 cv::Point predicted_feat_2d_cv_int = cvPoint(0, 0);
113 cv::Point2f predicted_feat_2d_cv_f = cvPoint2D32f(0.
f, 0.
f);
114 double predicted_distance_to_sensor = 0.;
115 int correction_search_area_radius = 0;
127 predicted_feat_3d_eigen.x() = predicted_feat_3d_pcl.x;
128 predicted_feat_3d_eigen.y() = predicted_feat_3d_pcl.y;
129 predicted_feat_3d_eigen.z() = predicted_feat_3d_pcl.z;
132 predicted_distance_to_sensor = sqrt(predicted_feat_3d_pcl.x * predicted_feat_3d_pcl.x +
133 predicted_feat_3d_pcl.y * predicted_feat_3d_pcl.y +
134 predicted_feat_3d_pcl.z * predicted_feat_3d_pcl.z);
140 predicted_feat_2d_cv_f.x = predicted_feat_2d_eigen.x() / predicted_feat_2d_eigen.z();
141 predicted_feat_2d_cv_f.y = predicted_feat_2d_eigen.y() / predicted_feat_2d_eigen.z();
144 predicted_feat_2d_cv_int.x = cvRound(predicted_feat_2d_cv_f.x);
145 predicted_feat_2d_cv_int.y = cvRound(predicted_feat_2d_cv_f.y);
151 if (predicted_feat_2d_cv_int.x >= 0 && predicted_feat_2d_cv_int.x < this->_camera_info_ptr->width
152 && predicted_feat_2d_cv_int.y >= 0 && predicted_feat_2d_cv_int.y < this->_camera_info_ptr->height)
164 correction_search_area_radius = std::max( 3, (
int)(8.0 / predicted_distance_to_sensor));
165 cv::circle(this->
_predicted_feats_msk_mat, predicted_feat_2d_cv_int, correction_search_area_radius, CV_RGB(255, 255, 255), -1, 8, 0);
240 double minVal, maxVal;
241 cv::Point minpt,maxpt;
243 #if(CV_MAJOR_VERSION == 2) 246 float infinity = std::numeric_limits<float>::infinity();
247 cv::minMaxLoc(this->
_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
249 uint16_t infinity = std::numeric_limits<uint16_t>::infinity();
250 cv::minMaxLoc(this->
_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
252 #elif(CV_MAJOR_VERSION==3) 253 cv::minMaxLoc(this->
_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, cv::Mat(this->_depth_img_ptr->image == this->_depth_img_ptr->image) );
255 #error CV major version is not 2 or 3 272 this->
_depth_img_ptr->image.convertTo(this->
_aux1_mat, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
332 cv::Mat depth_difference;
335 cv::absdiff(this->
_depth_img_ptr->image, this->_previous_depth_mat, depth_difference);
393 this->
_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(
new sensor_msgs::CameraInfo(*camera_info));
395 if(this->
_predicted_feats_msk_mat.cols != this->_camera_info_ptr->width || this->_predicted_feats_msk_mat.rows != this->_camera_info_ptr->height)
429 for (
size_t tracked_feats_idx = 0; tracked_feats_idx < this->
_corrected_measurement.size(); tracked_feats_idx++)
439 cv::Point(p.x, p.y - 3), CV_RGB(0, 255, 255), 1, 8);
441 cv::Point(p.x - 3, p.y), CV_RGB(0, 255, 255), 1, 8);
516 for (
size_t tracked_feats_idx = 0;tracked_feats_idx < this->
_predicted_measurements[0].size(); tracked_feats_idx++)
527 cv::Point(previous.x, previous.y - 4), CV_RGB(0, 255, 255), 1, 8);
529 cv::Point(previous.x - 4, previous.y), CV_RGB(0, 255, 255), 1, 8);
533 cv::Point(predicted.x, predicted.y - 4),CV_RGB(255, 0, 0), 1, 8);
535 cv::Point(predicted.x - 4, predicted.y),CV_RGB(255, 0, 0), 1, 8);
579 "PointFeatureTracker::ReadParameters",
580 "PointFeatureTracker Parameters: " << std::endl <<
583 "\tMaximum allowed 3D motion of features between consecutive frames: " << this->
_max_interframe_jump << std::endl <<
590 "\tFocussing attention into depth-changing areas: " << this->
_attention_to_motion << std::endl <<
592 "\tMinimum depth change to consider a pixel as changing: " << this->
_min_depth_difference << std::endl <<
593 "\tMinimum area size to use the attention mechanism: " << this->
_min_area_size_pixels << std::endl <<
602 this->
_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(
new sensor_msgs::CameraInfo());
628 init_feature.x = -1.f;
629 init_feature.y = -1.f;
630 init_feature.z = -1.f;
631 init_feature.
label = 0;
668 int detected_good_features = 0;
669 double min_distance = 30;
674 int repetition_counter = 0;
682 num_new_features_to_detect = std::max(2*num_new_features_to_detect, 40);
683 std::vector<cv::Point2f> new_features;
686 cv::goodFeaturesToTrack(this->
_current_bw_img_ptr->image, new_features, num_new_features_to_detect, min_feat_quality, min_distance, this->_feats_detection_msk_mat);
692 std::vector<bool> new_feats_status;
693 new_feats_status.resize(num_new_features_to_detect,
true);
694 std::vector<cv::Point3d> new_features_3d;
695 new_features_3d.resize(num_new_features_to_detect, cv::Point3d(-1., -1., -1.));
703 for (
size_t new_feats_idx = 0; new_feats_idx < new_features.size();new_feats_idx++)
705 if (new_feats_status[new_feats_idx])
711 new_feats_status[new_feats_idx] =
false;
712 detected_good_features++;
718 min_feat_quality /= 2.0;
719 min_feat_quality = std::max(min_feat_quality, 0.0001);
721 repetition_counter++;
728 std::vector<std::vector<uchar> > feat_tracked;
729 feat_tracked.resize(2);
735 cv::calcOpticalFlowPyrLK(
737 this->_current_bw_img_ptr->image,
738 this->_previous_measurement,
739 this->_predicted_measurements[0],
744 cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
747 cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
750 int num_lost_features_tracking = 0;
753 bool received_useful_prediction =
false;
754 std::vector<bool> using_predicted_location;
759 received_useful_prediction =
true;
761 cv::calcOpticalFlowPyrLK(
763 this->_current_bw_img_ptr->image,
764 this->_previous_measurement,
765 this->_predicted_measurements[1],
770 cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
773 cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
782 if (feat_tracked[0][feat_idx] && feat_tracked[1][feat_idx])
791 else if (feat_tracked[1][feat_idx])
794 using_predicted_location[feat_idx] =
true;
797 else if (feat_tracked[0][feat_idx])
806 num_lost_features_tracking++;
809 ROS_INFO_STREAM_NAMED(
"PointFeatureTracker._TrackFeatures",
"Using " << using_pred <<
" predicted and " << using_prev <<
" previous Locations.");
813 "The prediction from higher level can NOT be used to predict next measurement before correction. Delay: " 814 << (
double)(time_since_last_prediction)/1e9);
818 if (feat_tracked[0][feat_idx])
827 num_lost_features_tracking++;
834 int num_lost_features_mask = 0;
839 if (!this->
_feats_tracking_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y,this->_corrected_measurement[feat_idx].x))
843 num_lost_features_mask++;
849 int num_lost_features_prediction_mask = 0;
850 if(received_useful_prediction)
856 && using_predicted_location[feat_idx])
858 if (!this->
_predicted_feats_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y, this->_corrected_measurement[feat_idx].x))
862 num_lost_features_prediction_mask++;
873 "Lost features: %3d (%3d because of tracking,%3d because of masking,%3d because of out of prediction,%3d because of not having 3D data,%3d because of jumping in 3D)",
874 num_lost_features_tracking + num_lost_features_mask + num_lost_features_prediction_mask + num_lost_features_retrieve_3d + num_lost_features_compare_3d,
875 num_lost_features_tracking,
876 num_lost_features_mask,
877 num_lost_features_prediction_mask,
878 num_lost_features_retrieve_3d ,
879 num_lost_features_compare_3d);
886 int features_without_3d_ctr = 0;
887 double x_coord, y_coord, z_coord;
888 for (
size_t features_idx = 0; features_idx < points_in_2d.size();
891 if (points_status[features_idx])
897 int idx = (cvRound(points_in_2d[features_idx].
y) * this->
_received_point_cloud->width + cvRound(points_in_2d[features_idx].
x));
905 cv::Point p = cvPoint(cvRound(points_in_2d[features_idx].
x),cvRound(points_in_2d[features_idx].
y));
908 float z_raw_float = 0.0;
909 uint16_t z_raw_uint = 0;
917 if ((z_raw_float != 0 && std::isfinite(z_raw_float))
918 || ( z_raw_uint != 0 && std::isfinite(z_raw_uint)))
928 z_coord = (double)z_raw_float;
930 z_coord = (double)z_raw_uint/1000.0;
947 if (!isnan(x_coord) && !isnan(y_coord) && !isnan(z_coord))
949 points_in_3d[features_idx].x = x_coord;
950 points_in_3d[features_idx].y = y_coord;
951 points_in_3d[features_idx].z = z_coord;
955 points_in_3d[features_idx].x = -1.f;
956 points_in_3d[features_idx].y = -1.f;
957 points_in_3d[features_idx].z = -1.f;
958 points_status[features_idx] =
false;
959 features_without_3d_ctr++;
963 return features_without_3d_ctr;
968 int jumping_features_ctr = 0;
969 double interframe_distance = 0.f;
970 double current_x_coord = 0.f, current_y_coord = 0.f, current_z_coord = 0.f;
971 double previous_x_coord = 0.f, previous_y_coord = 0.f, previous_z_coord = 0.f;
987 interframe_distance = sqrt( pow(current_x_coord - previous_x_coord, 2) + pow(current_y_coord - previous_y_coord, 2)
988 + pow(current_z_coord - previous_z_coord, 2));
997 jumping_features_ctr++;
1001 return jumping_features_ctr;
cv_bridge::CvImagePtr _occlusion_msk_img_ptr
std::vector< std::vector< float > > _feat_quality
std::vector< std::vector< cv::Point2f > > _predicted_measurements
double _min_depth_difference
virtual cv_bridge::CvImagePtr getPredictedAndLastFeaturesImg()
double _max_allowed_depth
virtual void setOcclusionMaskImg(cv_bridge::CvImagePtr occ_mask_img)
PointCloudPCL::ConstPtr _received_point_cloud
#define ROS_INFO_NAMED(name,...)
virtual void addPredictedState(const ft_state_t &predicted_state, const double &predicted_state_timestamp_ns)
Add a new prediction about the state generated in the higher level (as prediction about the next meas...
#define ROS_ERROR_STREAM_NAMED(name, args)
sensor_msgs::CameraInfo::Ptr _camera_info_ptr
cv_bridge::CvImagePtr _tracked_features_img
std::vector< int > _previous_feat_ids_v
std::vector< cv::Point2f > _previous_measurement
cv::Mat _erosion_element_tracking_mat
void _EstimateDetectionMask()
boost::shared_ptr< CvImage > CvImagePtr
virtual void correctState()
Corrects the predicted state(s). First we track the features using both the prediction with the syste...
virtual cv_bridge::CvImagePtr getPredictingMaskImg()
Eigen::Matrix< double, 3, 4 > _image_plane_proj_mat_eigen
std::vector< cv::Point2f > _corrected_measurement
cv::Mat _feats_detection_msk_mat
OMIP_ADD_POINT4D uint32_t label
FeatureCloudPCLwc::Ptr ft_state_t
int _min_area_size_pixels
virtual cv_bridge::CvImagePtr getDetectingMaskImg()
void _ProcessOcclusionMaskImg()
std::ofstream _features_file
virtual void setFullRGBDPC(PointCloudPCL::ConstPtr full_rgb_pc)
cv::Mat _previous_depth_mat
cv::Mat _predicted_feats_msk_mat
#define ROS_INFO_STREAM_NAMED(name, args)
virtual void setSelfOcclusionPositive(bool so_positive)
Set if the self occlusion image will be black in the background and white for the occluded area (so_p...
double _predicted_state_rcvd_time_ns
cv_bridge::CvImagePtr _detecting_mask_img
TFSIMD_FORCE_INLINE const tfScalar & y() const
double _min_time_to_detect_motion
void _DetectNewFeatures()
ros::NodeHandle _node_handle
ros::Time _previous_time_of_motion_detection
Feature::Id _feature_id_counter
virtual void setCameraInfoMsg(const sensor_msgs::CameraInfo *camera_info)
bool _publishing_tracked_feats_with_pred_msk_img
int _erosion_size_detecting
void fromPCL(const pcl::uint64_t &pcl_stamp, ros::Time &stamp)
cv::Mat _occlusion_msk_mat
virtual void predictState(double time_interval_ns)
First step when updating the filter. The next state is predicted from current state and system model ...
MeasurementType _measurement
PointFeatureTracker(double loop_period_ns, bool using_pc=false, std::string ft_ns=std::string(""))
cv_bridge::CvImagePtr _predicting_mask_img
int _erosion_size_tracking
cv_bridge::CvImagePtr _tracked_features_with_pred_msk_img
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
bool _publishing_predicted_and_past_feats_img
Interface to be implemented by feature trackers It derives from the RecursiveEstimator interface...
cv_bridge::CvImagePtr _current_bw_img_ptr
virtual cv_bridge::CvImagePtr getDepthEdgesImg()
virtual ft_state_t getState() const
std::vector< cv::Point3d > _corrected_belief_state
cv_bridge::CvImagePtr _depth_img_ptr
virtual cv_bridge::CvImagePtr getRGBImg()
cv::Mat _depth_difference_mask
virtual cv_bridge::CvImagePtr getTrackedFeaturesImg()
double _min_allowed_depth
TFSIMD_FORCE_INLINE const tfScalar & x() const
virtual cv_bridge::CvImagePtr getTrackingMaskImg()
double _measurement_timestamp_ns
void _InitializeVariables()
cv_bridge::CvImagePtr _rgb_img_ptr
bool _publishing_tracked_feats_img
double _max_interframe_jump
cv_bridge::CvImagePtr _tracking_mask_img
std::vector< bool > _feat_status_v
virtual cv_bridge::CvImagePtr getTrackedFeaturesWithPredictionMaskImg()
bool getParam(const std::string &key, std::string &s) const
virtual void setDynamicReconfigureValues(feature_tracker::FeatureTrackerDynReconfConfig &config)
Set the messages that have to be estimated and published.
virtual ~PointFeatureTracker()
pcl::PointCloud< FeaturePCLwc > FeatureCloudPCLwc
std::vector< StateType > _predicted_states
bool _camera_info_msg_rcvd
std::vector< int > _received_prediction_ids
double _canny_low_threshold
bool _occlusion_msk_img_rcvd
bool _attention_to_motion
cv_bridge::CvImagePtr _depth_edges_img
cv::Mat _erosion_element_detecting_mat
virtual cv_bridge::CvImagePtr getDepthImg()
int _Retrieve3DCoords(std::vector< cv::Point2f > &points_in_2d, std::vector< bool > &points_status, std::vector< cv::Point3d > &points_in_3d)
int _min_number_of_features
cv_bridge::CvImagePtr _previous_bw_img_ptr
virtual void predictMeasurement()
Second step when updating the filter. The next measurement is predicted from the predicted next state...
std::vector< cv::Point3d > _previous_belief_state
cv_bridge::CvImagePtr _predicted_and_previous_features_img
cv::Mat _feats_tracking_msk_mat
std::vector< int > _current_feat_ids_v