PointFeatureTracker.cpp
Go to the documentation of this file.
2 
4 
5 #include <pcl_ros/publisher.h>
6 #include <opencv2/imgproc/imgproc.hpp>
7 //#include <opencv2/gpu/gpu.hpp>
8 #include <opencv2/video/tracking.hpp>
9 #include <opencv2/highgui/highgui.hpp>
11 #include <sensor_msgs/JointState.h>
12 #include <sensor_msgs/CameraInfo.h>
13 
14 #include <rosbag/bag.h>
15 #include <rosbag/view.h>
16 
17 #include <boost/foreach.hpp>
18 
20 
21 #if ROS_VERSION_MINIMUM(1, 11, 1) // if current ros version is >= 1.10.1 (hydro)
22 #else
24 #endif
25 
26 #define EROSION_SIZE_DETECTING 15
27 #define EROSION_SIZE_TRACKING 3
28 
29 #define MAXIMUM_TIME_TO_USE_A_PREDICTION 0.05
30 
31 using namespace omip;
32 
33 PointFeatureTracker::PointFeatureTracker(double loop_period_ns, bool using_pc, std::string ft_ns) :
34  FeatureTracker(loop_period_ns),
35  _using_pc(using_pc),
36  _first_frame(true),
37  _occlusion_msk_img_rcvd(false),
38  _predicted_state_rcvd(false),
39  _predicted_state_processed(true),
40  _camera_info_msg_rcvd(false),
41  _erosion_size_detecting(-1),
42  _erosion_size_tracking(-1),
43  _canny_kernel_size(-1),
44  _canny_ratio(-1),
45  _number_of_features(-1),
46  _min_number_of_features(-1),
47  _frame_counter(1),
48  _feature_id_counter(1),
49  _max_allowed_depth(-1.f),
50  _min_allowed_depth(-1.f),
51  _canny_low_threshold(-1),
52  _max_interframe_jump(0.f),
53  _ft_ns(ft_ns),
54  _publishing_predicted_and_past_feats_img(false),
55  _publishing_tracked_feats_img(false),
56  _publishing_tracked_feats_with_pred_msk_img(false),
57  _sensor_fps(0.0),
58  _processing_factor(0),
59  _attention_to_motion(false),
60  _use_motion_mask(false),
61  _min_feat_quality(0.001),
62  _predicted_state_rcvd_time_ns(0),
63  _so_positive(false)
64 {
65  this->_ReadParameters();
66  this->_InitializeVariables();
67 
68  _features_file.open("features.txt");
69 }
70 
72 {
73  _features_file.close();
74 }
75 
76 void PointFeatureTracker::setDynamicReconfigureValues(feature_tracker::FeatureTrackerDynReconfConfig &config)
77 {
78  this->_publishing_predicted_and_past_feats_img = config.pub_predicted_and_past_feats_img;
79 }
80 
81 void PointFeatureTracker::predictState(double time_interval_ns)
82 {
83  // The predicted state based on current state is the current state itself (based on continuity prior: the features in
84  // current step will be very close to where they were in the step before)
85  this->_predicted_states[0] = this->_state;
86 }
87 
89 {
90  // We have two predictions for the measurements, one using each predicted state
91 
92  // First predicted measurement:
93  // Uses the first predicted state, which is the same that the previous state (no feature motion)
94  // Therefore, we could project the 3D features back to the image, but for the sake of accuracy and computation time, we just take
95  // the previous "refined" measurement
97 
98  // Second predicted measurement:
99  // If we received from the higher level (RBT) a prediction about the next feature 3D locations, we project them to the image plane
100 
101  // By copying the previous vector of 2D feature locations we assure an initial value for tracking the features
103 
104  if (!this->_first_frame && this->_camera_info_msg_rcvd && this->_predicted_states.size() > 1 && this->_predicted_states[1] &&
105  pcl_conversions::fromPCL((double)this->_predicted_states[1]->header.stamp).toNSec() - this->_measurement_timestamp_ns < this->_loop_period_ns/2.0)
106  {
107  // Initialize variables
108  this->_received_prediction_ids.clear();
109  this->_predicted_feats_msk_mat = cv::Scalar(0);
110  Eigen::Vector4d predicted_feat_3d_eigen = Eigen::Vector4d(0., 0., 0., 1.);
111  Eigen::Vector3d predicted_feat_2d_eigen = Eigen::Vector3d(0., 0., 0.);
112  cv::Point predicted_feat_2d_cv_int = cvPoint(0, 0);
113  cv::Point2f predicted_feat_2d_cv_f = cvPoint2D32f(0.f, 0.f);
114  double predicted_distance_to_sensor = 0.;
115  int correction_search_area_radius = 0;
116 
117  BOOST_FOREACH(FeaturePCLwc predicted_feat_3d_pcl, this->_predicted_states[1]->points)
118  {
119  // Find the index of this feature in the vector of current features
120  int predicted_feat_idx = std::find(this->_current_feat_ids_v.begin(), this->_current_feat_ids_v.end(), predicted_feat_3d_pcl.label) - this->_current_feat_ids_v.begin();
121 
122  // There is a current point with this idx
123  if (predicted_feat_idx < this->_number_of_features)
124  {
125  this->_received_prediction_ids.push_back(predicted_feat_3d_pcl.label);
126  // Convert the predicted feature from PCL point into Eigen vector homogeneous coordinates
127  predicted_feat_3d_eigen.x() = predicted_feat_3d_pcl.x;
128  predicted_feat_3d_eigen.y() = predicted_feat_3d_pcl.y;
129  predicted_feat_3d_eigen.z() = predicted_feat_3d_pcl.z;
130 
131  // Estimate the distance of the predicted feature to the sensor
132  predicted_distance_to_sensor = sqrt(predicted_feat_3d_pcl.x * predicted_feat_3d_pcl.x +
133  predicted_feat_3d_pcl.y * predicted_feat_3d_pcl.y +
134  predicted_feat_3d_pcl.z * predicted_feat_3d_pcl.z);
135 
136  // Project the predicted feature from 3D space into the image plane
137  predicted_feat_2d_eigen = this->_image_plane_proj_mat_eigen * predicted_feat_3d_eigen;
138 
139  // Convert the predicted feature from image coordinates to pixel coordinates
140  predicted_feat_2d_cv_f.x = predicted_feat_2d_eigen.x() / predicted_feat_2d_eigen.z();
141  predicted_feat_2d_cv_f.y = predicted_feat_2d_eigen.y() / predicted_feat_2d_eigen.z();
142 
143  // Estimate the pixel (round the float pixel coordinates to the closest integer value)
144  predicted_feat_2d_cv_int.x = cvRound(predicted_feat_2d_cv_f.x);
145  predicted_feat_2d_cv_int.y = cvRound(predicted_feat_2d_cv_f.y);
146 
147  // If the predicted feature is inside the image limits
148  // 1: We store this feature as prediction to help the tracker (it will use this locations as starting point)
149  // 2: We clear a circular area around its location in the tracking mask. The tracker (as part of the correction of the predicted state)
150  // will search for the feature only in this area of the image
151  if (predicted_feat_2d_cv_int.x >= 0 && predicted_feat_2d_cv_int.x < this->_camera_info_ptr->width
152  && predicted_feat_2d_cv_int.y >= 0 && predicted_feat_2d_cv_int.y < this->_camera_info_ptr->height)
153  {
154  this->_predicted_measurements[1][predicted_feat_idx] = predicted_feat_2d_cv_int;
155  // The radius of the circle in the mask is inversely proportional to the depth of the predicted location
156  // Motion of close 3-D points (small depth) will translate in large motion in image plane -> large RGB searching area for
157  // the feature
158  // Motion of distanced 3-D points (large depth) will translate in smaller motion in image plane -> small RGB searching area
159  // for the feature
160  // TODO: There are two other variables that should be taken into account when defining the searching area:
161  // 1: Uncertainty in the RBM: If the motion is very uncertain, the searching area should be larger. If the motion is
162  // very certain, the feature location should be very close to the predicted one
163  // 2: Velocity of the RB: Large velocities are more difficult to track, and thus, the searching area should be larger.
164  correction_search_area_radius = std::max( 3, (int)(8.0 / predicted_distance_to_sensor));
165  cv::circle(this->_predicted_feats_msk_mat, predicted_feat_2d_cv_int, correction_search_area_radius, CV_RGB(255, 255, 255), -1, 8, 0);
166 
167  }
168  }
169  }
170  }
171 }
172 
174 {
175  this->_ProcessOcclusionMaskImg();
176 
177  this->_ProcessDepthImg();
178 
179  this->_ProcessRGBImg();
180 
181  if (this->_first_frame)
182  {
183  // Detect new features in the image
184  this->_DetectNewFeatures();
185  this->_first_frame = false;
186  }
187  else
188  {
189  this->_TrackFeatures();
190  }
191 
192  // Copy the result
194 
198 
199  for (size_t num_feats = 0; num_feats < this->_number_of_features; num_feats++)
200  {
201  this->_state->points[num_feats].label = this->_current_feat_ids_v[num_feats];
202  this->_state->points[num_feats].x = this->_corrected_belief_state[num_feats].x;
203  this->_state->points[num_feats].y = this->_corrected_belief_state[num_feats].y;
204  this->_state->points[num_feats].z = this->_corrected_belief_state[num_feats].z;
205 
206  // Covariance
207  //
208  }
209  this->_frame_counter++;
210 }
211 
212 void PointFeatureTracker::addPredictedState(const ft_state_t &predicted_state, const double& predicted_state_timestamp_ns)
213 {
214  this->_predicted_states[1] = predicted_state;
215  this->_predicted_state_rcvd_time_ns = predicted_state_timestamp_ns;
216 }
217 
218 void PointFeatureTracker::setFullRGBDPC(PointCloudPCL::ConstPtr full_rgb_pc)
219 {
220  this->_received_point_cloud = full_rgb_pc;
221 }
222 
224 {
225  this->_occlusion_msk_img_ptr = occ_mask_img;
226  this->_occlusion_msk_img_rcvd = true;
227 }
228 
230 {
231  this->_depth_img_ptr = this->_measurement.second;
232 
233  // Convert from sensor_msg to cv image
234  // 1) Create matrices
235  this->_depth_mat = cv::Mat(this->_depth_img_ptr->image.rows, this->_depth_img_ptr->image.cols, CV_8UC1);
236  this->_aux1_mat = cv::Mat(this->_depth_img_ptr->image.rows, this->_depth_img_ptr->image.cols, CV_8UC1);
237 
238  // 2) Find the minimum and maximum distances
239  // Exclude points that contain infinity values
240  double minVal, maxVal;
241  cv::Point minpt,maxpt;
242 
243 #if(CV_MAJOR_VERSION == 2)
244  if(this->_depth_img_ptr->encoding == "32FC1")
245  {
246  float infinity = std::numeric_limits<float>::infinity();
247  cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
248  }else{
249  uint16_t infinity = std::numeric_limits<uint16_t>::infinity();
250  cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, this->_depth_img_ptr->image != infinity);
251  }
252 #elif(CV_MAJOR_VERSION==3)
253  cv::minMaxLoc(this->_depth_img_ptr->image, &minVal, &maxVal, &minpt, &maxpt, cv::Mat(this->_depth_img_ptr->image == this->_depth_img_ptr->image) );
254 #else
255 #error CV major version is not 2 or 3
256 #endif
257 
258  // If we set a ROI we use it
259  if(this->_max_allowed_depth > 0)
260  {
261  maxVal = std::min(maxVal, this->_max_allowed_depth);
262  }
263 
264  //TODO: Sometimes there are noisy points very close to the sensor. If we use the minipum in the image we will span
265  // the values to this minimum value -> ignore it
266  // If we set a ROI we use it
267  if(this->_min_allowed_depth > 0)
268  {
269  minVal = std::max(this->_min_allowed_depth, minVal);
270  }
271 
272  this->_depth_img_ptr->image.convertTo(this->_aux1_mat, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
273 
274  this->_aux1_mat.setTo(0, this->_depth_img_ptr->image > maxVal);
275 
276  this->_aux1_mat.setTo(0, this->_depth_img_ptr->image < minVal);
277 
278 
279  // Filter OUT large distances and disappeared points and edges
280  // _depth_map contains 255 for the values that are not 0
281  cv::threshold(this->_aux1_mat, this->_depth_mat, 0, 255, cv::THRESH_BINARY);
282 
283  // Reduce noise with a kernel 3x3
284  cv::blur(this->_aux1_mat, this->_aux1_mat, cv::Size(3, 3));
285 
286 
287  // Detect Canny edges
288  cv::Canny(this->_aux1_mat, this->_canny_edges_mat,
289  this->_canny_low_threshold,
290  this->_canny_low_threshold * this->_canny_ratio,
291  this->_canny_kernel_size, false);
292 
293  // Invert the resulting edges: edges are 0, not edges are 255
294  cv::threshold(this->_canny_edges_mat, this->_canny_edges_mat, 0, 255, cv::THRESH_BINARY_INV);
295 
296  cv::bitwise_and(this->_depth_mat, this->_canny_edges_mat,this->_feats_detection_msk_mat);
297 
298  // If the occlusion mask was received, we merge it with the estimated mask
299  if (this->_occlusion_msk_img_rcvd)
300  {
302  }
303 
304  // Erode EROSION_SIZE_TRACKING pixel for the tracking mask
306 
307  for (int n = 0; n < this->_corrected_measurement.size(); n++)
308  {
309  if (this->_feat_status_v[n])
310  {
311  cv::circle(this->_feats_detection_msk_mat, this->_corrected_measurement[n], 2, CV_RGB(0, 0, 0), -1, 8, 0);
312  }
313  }
314 
315  // Erode EROSION_SIZE_DETECTING pixel for the detecting mask
317 
318  // Focus the detection of new features into the areas of the image where the depth changed
319  // TODO: Detect also areas of color changes
320  if(this->_attention_to_motion)
321  {
322  // First time
324  {
325  this->_previous_depth_mat = this->_depth_img_ptr->image;
327  }
328  else
329  {
331  {
332  cv::Mat depth_difference;
333 
334  // Absolute difference between the previous and the current depth maps
335  cv::absdiff(this->_depth_img_ptr->image, this->_previous_depth_mat, depth_difference);
336 
337  // Threshold the absolute difference to the minimum depth to consider motion
338  if(this->_depth_img_ptr->encoding == "32FC1")
339  {
340  this->_depth_difference_mask = (depth_difference > (double)(this->_min_depth_difference));
341  }else{
342  this->_depth_difference_mask = (depth_difference > (uint16_t)(this->_min_depth_difference*1000));
343  }
344 
345 
346  // Set to zero areas close to edges
347  cv::bitwise_and(this->_feats_tracking_msk_mat, this->_depth_difference_mask, this->_depth_difference_mask);
348 
349  int area_size = cv::countNonZero(this->_depth_difference_mask);
350 
351  if(area_size > this->_min_area_size_pixels)
352  {
354  this->_use_motion_mask = true;
355  }
356  else
357  {
358  this->_use_motion_mask = false;
359  }
360 
362  this->_previous_depth_mat = this->_depth_img_ptr->image;
363  }
364  else if(this->_use_motion_mask)
365  {
367  }
368  }
369  }
370 }
371 
373 {
374  this->_rgb_img_ptr = this->_measurement.first;
375  this->_current_bw_img_ptr = cv_bridge::cvtColor(this->_rgb_img_ptr, "mono8");
376  _measurement_timestamp_ns = (double)this->_rgb_img_ptr->header.stamp.toNSec();
377 }
378 
380 {
381  if (this->_occlusion_msk_img_rcvd)
382  {
383  // Create a mask by thresholding the image
384  if(this->_so_positive)
385  _occlusion_msk_mat = this->_occlusion_msk_img_ptr->image == 0;
386  else
387  _occlusion_msk_mat = this->_occlusion_msk_img_ptr->image != 0;
388  }
389 }
390 
391 void PointFeatureTracker::setCameraInfoMsg(const sensor_msgs::CameraInfo* camera_info)
392 {
393  this->_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(new sensor_msgs::CameraInfo(*camera_info));
394 
395  if(this->_predicted_feats_msk_mat.cols != this->_camera_info_ptr->width || this->_predicted_feats_msk_mat.rows != this->_camera_info_ptr->height)
396  {
397  ROS_ERROR_STREAM_NAMED("PointFeatureTracker.setCameraInfoMsg", "Resizing predicted features mask!");
398  this->_predicted_feats_msk_mat = cv::Mat::zeros(this->_camera_info_ptr->height, this->_camera_info_ptr->width, CV_8U);
399  }
400 
401  this->_image_plane_proj_mat_eigen(0, 0) = this->_camera_info_ptr->P[0]; // For carmine + 32;
402  this->_image_plane_proj_mat_eigen(0, 1) = this->_camera_info_ptr->P[1];
403  this->_image_plane_proj_mat_eigen(0, 2) = this->_camera_info_ptr->P[2];
404  this->_image_plane_proj_mat_eigen(0, 3) = this->_camera_info_ptr->P[3];
405  this->_image_plane_proj_mat_eigen(1, 0) = this->_camera_info_ptr->P[4];
406  this->_image_plane_proj_mat_eigen(1, 1) = this->_camera_info_ptr->P[5]; // For carmine + 32;
407  this->_image_plane_proj_mat_eigen(1, 2) = this->_camera_info_ptr->P[6];
408  this->_image_plane_proj_mat_eigen(1, 3) = this->_camera_info_ptr->P[7];
409  this->_image_plane_proj_mat_eigen(2, 0) = this->_camera_info_ptr->P[8];
410  this->_image_plane_proj_mat_eigen(2, 1) = this->_camera_info_ptr->P[9];
411  this->_image_plane_proj_mat_eigen(2, 2) = this->_camera_info_ptr->P[10];
412  this->_image_plane_proj_mat_eigen(2, 3) = this->_camera_info_ptr->P[11];
413  this->_camera_info_msg_rcvd = true;
414 }
415 
417 {
418  return this->_state;
419 }
420 
422 {
424  {
425  this->_rgb_img_ptr->image.copyTo(this->_tracked_features_img->image);
426  this->_tracked_features_img->encoding = this->_rgb_img_ptr->encoding;
427  this->_tracked_features_img->header.stamp = ros::Time::now();
428 
429  for (size_t tracked_feats_idx = 0; tracked_feats_idx < this->_corrected_measurement.size(); tracked_feats_idx++)
430  {
431  cv::Point p = cvPoint(cvRound(this->_corrected_measurement[tracked_feats_idx].x),
432  cvRound(this->_corrected_measurement[tracked_feats_idx].y));
433 
434  // If the feature has been correctly tracked we paint a yellow cross
435  if (this->_feat_status_v[tracked_feats_idx])
436  {
437  //cv::circle(this->_cv_ptr_rgb->image, p,3, CV_RGB(255, 255, 255), -1, 8, 0);
438  cv::line(this->_tracked_features_img->image, cv::Point(p.x, p.y + 3),
439  cv::Point(p.x, p.y - 3), CV_RGB(0, 255, 255), 1, 8);
440  cv::line(this->_tracked_features_img->image, cv::Point(p.x + 3, p.y),
441  cv::Point(p.x - 3, p.y), CV_RGB(0, 255, 255), 1, 8);
442  }
443  // If the feature has been lost, we paint a red circle
444  else
445  {
446  cv::circle(this->_tracked_features_img->image, p, 5, CV_RGB(0, 0, 255),-1, 8, 0);
447  }
448  }
449  }
450  return this->_tracked_features_img;
451 }
452 
454 {
455  return this->_rgb_img_ptr;
456 }
457 
459 {
460  return this->_depth_img_ptr;
461 }
462 
464 {
466  {
467  this->_tracked_features_with_pred_msk_img->image.setTo(0);
468  this->_tracked_features_with_pred_msk_img->header.stamp = ros::Time::now();
469  this->_tracked_features_with_pred_msk_img->encoding = this->_tracked_features_img ->encoding;
470  this->_tracked_features_img->image.copyTo(this->_tracked_features_with_pred_msk_img->image, this->_predicted_feats_msk_mat);
471  }
473 }
474 
476 {
477  this->_depth_edges_img->header.stamp = ros::Time::now();
478  this->_depth_edges_img->encoding = "mono8";
479  this->_depth_edges_img->image = this->_canny_edges_mat;
480  return this->_depth_edges_img;
481 }
482 
484 {
485  this->_predicting_mask_img->header.stamp = ros::Time::now();
486  this->_predicting_mask_img->encoding = "mono8";
487  this->_predicting_mask_img->image = this->_predicted_feats_msk_mat;
488  return this->_predicting_mask_img;
489 }
490 
492 {
493  this->_tracking_mask_img->header.stamp = ros::Time::now();
494  this->_tracking_mask_img->encoding = "mono8";
495  this->_tracking_mask_img->image = this->_feats_tracking_msk_mat;
496  return this->_tracking_mask_img;
497 }
498 
500 {
501  this->_detecting_mask_img->header.stamp = ros::Time::now();
502  this->_detecting_mask_img->encoding = "mono8";
503  this->_detecting_mask_img->image = this->_feats_detection_msk_mat;
504  return this->_detecting_mask_img;
505 }
506 
508 {
510  {
511  this->_rgb_img_ptr->image.copyTo(this->_predicted_and_previous_features_img->image);
512  this->_predicted_and_previous_features_img->encoding = this->_rgb_img_ptr->encoding;
514 
515  //
516  for (size_t tracked_feats_idx = 0;tracked_feats_idx < this->_predicted_measurements[0].size(); tracked_feats_idx++)
517  {
518  cv::Point previous = cvPoint(cvRound(this->_predicted_measurements[0][tracked_feats_idx].x),
519  cvRound(this->_predicted_measurements[0][tracked_feats_idx].y));
520  cv::Point predicted = cvPoint(cvRound(this->_predicted_measurements[1][tracked_feats_idx].x),
521  cvRound(this->_predicted_measurements[1][tracked_feats_idx].y));
522 
523  if (this->_feat_status_v[tracked_feats_idx])
524  {
525  //cv::circle(this->_cv_ptr_rgb->image, p,3, CV_RGB(255, 255, 255), -1, 8, 0);
526  cv::line(this->_predicted_and_previous_features_img->image, cv::Point(previous.x, previous.y + 4),
527  cv::Point(previous.x, previous.y - 4), CV_RGB(0, 255, 255), 1, 8);
528  cv::line(this->_predicted_and_previous_features_img->image, cv::Point(previous.x + 4, previous.y),
529  cv::Point(previous.x - 4, previous.y), CV_RGB(0, 255, 255), 1, 8);
530 
531  // The next two lines draw a blue cross at the predicted feature location
532  cv::line(this->_predicted_and_previous_features_img->image,cv::Point(predicted.x, predicted.y + 4),
533  cv::Point(predicted.x, predicted.y - 4),CV_RGB(255, 0, 0), 1, 8);
534  cv::line(this->_predicted_and_previous_features_img->image,cv::Point(predicted.x + 4, predicted.y),
535  cv::Point(predicted.x - 4, predicted.y),CV_RGB(255, 0, 0), 1, 8);
536  }
537  else
538  {
539  cv::circle(this->_predicted_and_previous_features_img->image, previous, 5, CV_RGB(0, 0, 255),
540  -1, 8, 0);
541  }
542  }
543  }
545 }
546 
548 {
549  this->_so_positive = so_positive;
550 }
551 
553 {
554  this->_node_handle.getParam(this->_ft_ns + std::string("/max_distance"), this->_max_allowed_depth);
555  this->_node_handle.getParam(this->_ft_ns + std::string("/min_distance"), this->_min_allowed_depth);
556  this->_node_handle.getParam(this->_ft_ns + std::string("/number_features"), this->_number_of_features);
557  this->_node_handle.getParam(this->_ft_ns + std::string("/min_number_features"), this->_min_number_of_features);
558  this->_node_handle.getParam(this->_ft_ns + std::string("/min_feat_quality"), this->_min_feat_quality);
559  this->_node_handle.getParam(this->_ft_ns + std::string("/max_interframe_jump"), this->_max_interframe_jump);
560  this->_node_handle.getParam(this->_ft_ns + std::string("/max_distance"), this->_max_allowed_depth);
561 
562  this->_node_handle.getParam(this->_ft_ns + std::string("/pub_tracked_feats_with_pred_mask_img"), this->_publishing_tracked_feats_with_pred_msk_img);
563  this->_node_handle.getParam(this->_ft_ns + std::string("/pub_tracked_feats_img"), this->_publishing_tracked_feats_img);
564  this->_node_handle.getParam(this->_ft_ns + std::string("/pub_predicted_and_past_feats_img"), this->_publishing_predicted_and_past_feats_img);
565 
566  this->_node_handle.getParam(std::string("/omip/sensor_fps"), this->_sensor_fps);
567  this->_node_handle.getParam(std::string("/omip/processing_factor"), this->_processing_factor);
568  this->_loop_period_ns = 1e9/(this->_sensor_fps/(double)this->_processing_factor);
569 
570  this->_node_handle.getParam(this->_ft_ns + std::string("/erosion_size_detect"), this->_erosion_size_detecting);
571  this->_node_handle.getParam(this->_ft_ns + std::string("/erosion_size_track"), this->_erosion_size_tracking);
572 
573  this->_node_handle.getParam(this->_ft_ns + std::string("/attention_to_motion"),this->_attention_to_motion);
574  this->_node_handle.getParam(this->_ft_ns + std::string("/min_time_to_detect_motion"),this->_min_time_to_detect_motion);
575  this->_node_handle.getParam(this->_ft_ns + std::string("/min_depth_difference"),this->_min_depth_difference);
576  this->_node_handle.getParam(this->_ft_ns + std::string("/min_area_size_pixels"),this->_min_area_size_pixels);
577 
579  "PointFeatureTracker::ReadParameters",
580  "PointFeatureTracker Parameters: " << std::endl <<
581  "\tMaximum distance: " << this->_max_allowed_depth << std::endl <<
582  "\tMinimum distance: " << this->_min_allowed_depth << std::endl <<
583  "\tMaximum allowed 3D motion of features between consecutive frames: " << this->_max_interframe_jump << std::endl <<
584  "\tNumber of tracked features: " << this->_number_of_features << std::endl <<
585  "\tMinimum number of tracked features: " << this->_min_number_of_features << std::endl <<
586  "\tMinimum feature quality: " << this->_min_feat_quality << std::endl <<
587  "\tMax framerate (factor): " << this->_sensor_fps << "(" << this->_processing_factor << ")" << std::endl <<
588  "\tSize of the erosion for detecting: " << this->_erosion_size_detecting << std::endl <<
589  "\tSize of the erosion for tracking: " << this->_erosion_size_tracking << std::endl <<
590  "\tFocussing attention into depth-changing areas: " << this->_attention_to_motion << std::endl <<
591  "\tMinimum time between frames to detect depth-changing areas: " << this->_min_time_to_detect_motion << std::endl <<
592  "\tMinimum depth change to consider a pixel as changing: " << this->_min_depth_difference << std::endl <<
593  "\tMinimum area size to use the attention mechanism: " << this->_min_area_size_pixels << std::endl <<
594  "\tPublishing image of the tracked feats: " << this->_publishing_tracked_feats_img << std::endl <<
595  "\tPublishing image of the tracked feats with prediction mask: " << this->_publishing_tracked_feats_with_pred_msk_img << std::endl <<
596  "\tPublishing image of the predicted and the past features: " << this->_publishing_predicted_and_past_feats_img);
597 }
598 
600 {
601  this->_filter_name = "FTFilter";
602  this->_camera_info_ptr = sensor_msgs::CameraInfo::Ptr(new sensor_msgs::CameraInfo());
603  this->_image_plane_proj_mat_eigen = Eigen::Matrix<double, 3, 4>::Zero();
604  this->_erosion_element_detecting_mat = cv::getStructuringElement(cv::MORPH_RECT,
605  cv::Size(2 * this->_erosion_size_detecting + 1,
606  2 * this->_erosion_size_detecting + 1),
607  cv::Point(this->_erosion_size_detecting, this->_erosion_size_detecting));
608 
609  this->_erosion_element_tracking_mat = cv::getStructuringElement(cv::MORPH_RECT,
610  cv::Size(2 * this->_erosion_size_tracking + 1,
611  2 * this->_erosion_size_tracking + 1),
612  cv::Point(this->_erosion_size_tracking, this->_erosion_size_tracking));
613 
614  this->_canny_low_threshold = 20;
615  this->_canny_kernel_size = 3;
616  this->_canny_ratio = 3;
617 
618  // Initialize all vectors
619  this->_corrected_measurement.resize(this->_number_of_features,cv::Point2f(-1., -1.));
620  this->_corrected_belief_state.resize(this->_number_of_features,cv::Point3d(-1., -1., -1.));
621  this->_previous_belief_state.resize(this->_number_of_features,cv::Point3d(-1., -1., -1.));
622  this->_current_feat_ids_v.resize(this->_number_of_features, 0);
623  this->_feat_status_v.resize(this->_number_of_features, false);
625 
626  // Initialize pcl point clouds
627  FeaturePCLwc init_feature = FeaturePCLwc();
628  init_feature.x = -1.f;
629  init_feature.y = -1.f;
630  init_feature.z = -1.f;
631  init_feature.label = 0;
632  this->_state = ft_state_t(new FeatureCloudPCLwc());
633  this->_state->points.resize(this->_number_of_features, init_feature);
634 
635  // The predicted states are always 2
636  // The first element of the predicted states is obtained from the internal state model that predicts that the locations of the
637  // features in the next step will be the locations of the features in the last step
638  this->_predicted_states.resize(2);
639  this->_predicted_states[0] = this->_state;
640  // The second element of the predicted states comes from the higher level (RBT)
641 
642  this->_predicted_measurements.resize(2);
643 
651 
653 }
654 
656 {
657  for (int n = 0; n < this->_corrected_measurement.size(); n++)
658  {
659  if (this->_feat_status_v[n])
660  {
661  cv::circle(this->_feats_detection_msk_mat, this->_corrected_measurement[n], 2*_erosion_size_detecting, CV_RGB(0, 0, 0), -1, 8, 0);
662  }
663  }
664 }
665 
667 {
668  int detected_good_features = 0;
669  double min_distance = 30;
670  double min_feat_quality = _min_feat_quality;
671  int num_new_features_to_detect = std::count(this->_feat_status_v.begin(), this->_feat_status_v.end(), false);
672 
673  // Repeat the detection until the desired minimum number of features is detected (maximum 5 times)
674  int repetition_counter = 0;
675  do
676  {
677  _min_feat_quality = min_feat_quality;
678  this->_EstimateDetectionMask();
679 
680  //If we try to detect very few features (1 or 2) the probability that we cannot retrieve their 3D location
681  //is high, and then even if we reduce the neccessary quality level, we do not get valid features
682  num_new_features_to_detect = std::max(2*num_new_features_to_detect, 40);
683  std::vector<cv::Point2f> new_features;
684 
685  // DETECTION /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
686  cv::goodFeaturesToTrack(this->_current_bw_img_ptr->image, new_features, num_new_features_to_detect, min_feat_quality, min_distance, this->_feats_detection_msk_mat);
687 
688  // cv::imshow("a", this->_current_bw_img_ptr->image);
689  // cv::imshow("b", this->_feats_detection_msk_mat);
690  // cv::waitKey(-1);
691 
692  std::vector<bool> new_feats_status;
693  new_feats_status.resize(num_new_features_to_detect, true);
694  std::vector<cv::Point3d> new_features_3d;
695  new_features_3d.resize(num_new_features_to_detect, cv::Point3d(-1., -1., -1.));
696 
697  this->_Retrieve3DCoords(new_features, new_feats_status, new_features_3d);
698 
699  for (int old_feats_idx = 0; old_feats_idx < this->_number_of_features; old_feats_idx++)
700  {
701  if (!this->_feat_status_v[old_feats_idx])
702  {
703  for (size_t new_feats_idx = 0; new_feats_idx < new_features.size();new_feats_idx++)
704  {
705  if (new_feats_status[new_feats_idx])
706  {
707  this->_current_feat_ids_v[old_feats_idx] = this->_feature_id_counter++;
708  this->_corrected_measurement[old_feats_idx] = new_features[new_feats_idx];
709  this->_corrected_belief_state[old_feats_idx] = new_features_3d[new_feats_idx];
710  this->_feat_status_v[old_feats_idx] = true;
711  new_feats_status[new_feats_idx] = false;
712  detected_good_features++;
713  break;
714  }
715  }
716  }
717  }
718  min_feat_quality /= 2.0;
719  min_feat_quality = std::max(min_feat_quality, 0.0001);
720  num_new_features_to_detect = std::count(this->_feat_status_v.begin(), this->_feat_status_v.end(), false);
721  repetition_counter++;
722  }
723  while (num_new_features_to_detect > (this->_number_of_features - this->_min_number_of_features) && repetition_counter < 5);
724 }
725 
727 {
728  std::vector<std::vector<uchar> > feat_tracked;
729  feat_tracked.resize(2);
730 
731  _feat_quality.clear();
732  _feat_quality.resize(2);
733 
734  // Track using the previous locations
735  cv::calcOpticalFlowPyrLK(
736  this->_previous_bw_img_ptr->image,
737  this->_current_bw_img_ptr->image,
738  this->_previous_measurement,
739  this->_predicted_measurements[0],
740  feat_tracked[0],
741  _feat_quality[0],
742  cv::Size(7, 7),
743  1,
744  cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
745  30,
746  0.01),
747  cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
748  _min_feat_quality/2.0);
749 
750  int num_lost_features_tracking = 0;
751 
752  // If we had a recent prediction about the next feature locations
753  bool received_useful_prediction = false;
754  std::vector<bool> using_predicted_location;
755  using_predicted_location.resize(this->_number_of_features, false);
756  double time_since_last_prediction = this->_measurement_timestamp_ns - _predicted_state_rcvd_time_ns;
757  if(true && time_since_last_prediction < this->_loop_period_ns/2.0)
758  {
759  received_useful_prediction = true;
760  // Track using the predictions
761  cv::calcOpticalFlowPyrLK(
762  this->_previous_bw_img_ptr->image,
763  this->_current_bw_img_ptr->image,
764  this->_previous_measurement,
765  this->_predicted_measurements[1],
766  feat_tracked[1],
767  _feat_quality[1],
768  cv::Size(7, 7),
769  1,
770  cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS,
771  30,
772  0.01),
773  cv::OPTFLOW_USE_INITIAL_FLOW + cv::OPTFLOW_LK_GET_MIN_EIGENVALS,
774  _min_feat_quality/2.0);
775 
776  // Select the tracked location with lower error
777  int using_pred = 0;
778  int using_prev = 0;
779  for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
780  {
781  // Both trackers found the point -> Select the one with lower error
782  if (feat_tracked[0][feat_idx] && feat_tracked[1][feat_idx])
783  {
784  this->_corrected_measurement[feat_idx] =
785  (_feat_quality[1][feat_idx] <= _feat_quality[0][feat_idx] ? this->_predicted_measurements[1][feat_idx] : this->_predicted_measurements[0][feat_idx]);
786 
787  // Count the tracked point used
788  _feat_quality[1][feat_idx] <= _feat_quality[0][feat_idx] ? using_pred++ : using_prev++;
789  }
790  // Only the tracker with predictions could track it
791  else if (feat_tracked[1][feat_idx])
792  {
793  this->_corrected_measurement[feat_idx] = this->_predicted_measurements[1][feat_idx];
794  using_predicted_location[feat_idx] = true;
795  }
796  // Only the tracker with previous could track it
797  else if (feat_tracked[0][feat_idx])
798  {
799  this->_corrected_measurement[feat_idx] = this->_predicted_measurements[0][feat_idx];
800  }
801  // Both lost the feature
802  else
803  {
804  this->_current_feat_ids_v[feat_idx] = 0;
805  this->_feat_status_v[feat_idx] = false;
806  num_lost_features_tracking++;
807  }
808  }
809  ROS_INFO_STREAM_NAMED("PointFeatureTracker._TrackFeatures","Using " << using_pred << " predicted and " << using_prev << " previous Locations.");
810  }else{
811 
812  ROS_ERROR_STREAM_NAMED("PointFeatureTracker.measurementCallback",
813  "The prediction from higher level can NOT be used to predict next measurement before correction. Delay: "
814  << (double)(time_since_last_prediction)/1e9);
815  for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
816  {
817  // If the tracker with previous could track it
818  if (feat_tracked[0][feat_idx])
819  {
820  this->_corrected_measurement[feat_idx] = this->_predicted_measurements[0][feat_idx];
821  }
822  // Both lost the feature
823  else
824  {
825  this->_current_feat_ids_v[feat_idx] = 0;
826  this->_feat_status_v[feat_idx] = false;
827  num_lost_features_tracking++;
828  }
829  }
830 
831  }
832 
833  // 1) Check if the tracked features are in a tracking allowed region and if they could be tracked
834  int num_lost_features_mask = 0;
835  for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
836  {
837  if (this->_feat_status_v[feat_idx])
838  {
839  if (!this->_feats_tracking_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y,this->_corrected_measurement[feat_idx].x))
840  {
841  this->_current_feat_ids_v[feat_idx] = 0;
842  this->_feat_status_v[feat_idx] = false;
843  num_lost_features_mask++;
844  }
845  }
846  }
847 
848  // 2) If we are using predictions: check if the tracked features are in a predicted region
849  int num_lost_features_prediction_mask = 0;
850  if(received_useful_prediction)
851  {
852  for (size_t feat_idx = 0; feat_idx < this->_number_of_features; feat_idx++)
853  {
854  if (this->_feat_status_v[feat_idx]
855  && std::find(this->_received_prediction_ids.begin(), this->_received_prediction_ids.end(), this->_current_feat_ids_v[feat_idx])!= this->_received_prediction_ids.end()
856  && using_predicted_location[feat_idx])
857  {
858  if (!this->_predicted_feats_msk_mat.at<uchar>(this->_corrected_measurement[feat_idx].y, this->_corrected_measurement[feat_idx].x))
859  {
860  this->_current_feat_ids_v[feat_idx] = 0;
861  this->_feat_status_v[feat_idx] = false;
862  num_lost_features_prediction_mask++;
863  }
864  }
865  }
866  }
867 
868  int num_lost_features_retrieve_3d = this->_Retrieve3DCoords(this->_corrected_measurement, this->_feat_status_v, this->_corrected_belief_state);
869 
870  int num_lost_features_compare_3d = this->_Compare3DCoords();
871 
872  ROS_INFO_NAMED("FeatureTracker._TrackFeatures",
873  "Lost features: %3d (%3d because of tracking,%3d because of masking,%3d because of out of prediction,%3d because of not having 3D data,%3d because of jumping in 3D)",
874  num_lost_features_tracking + num_lost_features_mask + num_lost_features_prediction_mask + num_lost_features_retrieve_3d + num_lost_features_compare_3d,
875  num_lost_features_tracking,
876  num_lost_features_mask,
877  num_lost_features_prediction_mask,
878  num_lost_features_retrieve_3d ,
879  num_lost_features_compare_3d);
880 
881  this->_DetectNewFeatures();
882 }
883 
884 int PointFeatureTracker::_Retrieve3DCoords(std::vector<cv::Point2f>& points_in_2d, std::vector<bool>& points_status, std::vector<cv::Point3d>& points_in_3d)
885 {
886  int features_without_3d_ctr = 0;
887  double x_coord, y_coord, z_coord;
888  for (size_t features_idx = 0; features_idx < points_in_2d.size();
889  features_idx++)
890  {
891  if (points_status[features_idx])
892  {
893 
894  if (this->_using_pc)
895  {
896  // Get the pixel index
897  int idx = (cvRound(points_in_2d[features_idx].y) * this->_received_point_cloud->width + cvRound(points_in_2d[features_idx].x));
898 
899  x_coord = this->_received_point_cloud->points[idx].x;
900  y_coord = this->_received_point_cloud->points[idx].y;
901  z_coord = this->_received_point_cloud->points[idx].z;
902  }
903  else
904  {
905  cv::Point p = cvPoint(cvRound(points_in_2d[features_idx].x),cvRound(points_in_2d[features_idx].y));
906 
907  // get raw z value
908  float z_raw_float = 0.0;
909  uint16_t z_raw_uint = 0;
910  if(this->_depth_img_ptr->encoding == "32FC1")
911  {
912  z_raw_float = this->_depth_img_ptr->image.at<float>(p);
913  }else{
914  z_raw_uint = this->_depth_img_ptr->image.at<uint16_t>(p);
915  }
916 
917  if ((z_raw_float != 0 && std::isfinite(z_raw_float))
918  || ( z_raw_uint != 0 && std::isfinite(z_raw_uint)))
919  {
920  /* K = Intrinsic camera matrix for the raw (distorted) images.
921  [fx 0 cx]
922  K = [ 0 fy cy]
923  [ 0 0 1]
924  */
925 
926  if(this->_depth_img_ptr->encoding == "32FC1")
927  {
928  z_coord = (double)z_raw_float;
929  }else{
930  z_coord = (double)z_raw_uint/1000.0;
931  }
932  double umcx = points_in_2d[features_idx].x - this->_camera_info_ptr->K[2];
933  double vmcy = points_in_2d[features_idx].y - this->_camera_info_ptr->K[5];
934 
935  // calculate x and y
936  x_coord = z_coord * umcx / (double)this->_camera_info_ptr->K[0];
937  y_coord = z_coord * vmcy / (double)this->_camera_info_ptr->K[4];
938  }
939  else
940  {
941  z_coord = 0.0 / 0.0;
942  }
943 
944  }
945 
946  // If the data from Kinect for this feature is RIGHT
947  if (!isnan(x_coord) && !isnan(y_coord) && !isnan(z_coord))
948  {
949  points_in_3d[features_idx].x = x_coord;
950  points_in_3d[features_idx].y = y_coord;
951  points_in_3d[features_idx].z = z_coord;
952  }
953  else
954  {
955  points_in_3d[features_idx].x = -1.f;
956  points_in_3d[features_idx].y = -1.f;
957  points_in_3d[features_idx].z = -1.f;
958  points_status[features_idx] = false;
959  features_without_3d_ctr++;
960  }
961  }
962  }
963  return features_without_3d_ctr;
964 }
965 
967 {
968  int jumping_features_ctr = 0;
969  double interframe_distance = 0.f;
970  double current_x_coord = 0.f, current_y_coord = 0.f, current_z_coord = 0.f;
971  double previous_x_coord = 0.f, previous_y_coord = 0.f, previous_z_coord = 0.f;
972  for (int features_idx = 0; features_idx < this->_number_of_features; features_idx++)
973  {
974  if (this->_previous_feat_ids_v[features_idx] == this->_current_feat_ids_v[features_idx] && this->_feat_status_v[features_idx])
975  {
976  current_x_coord = this->_corrected_belief_state[features_idx].x;
977  current_y_coord = this->_corrected_belief_state[features_idx].y;
978  current_z_coord = this->_corrected_belief_state[features_idx].z;
979 
980  previous_x_coord = this->_previous_belief_state[features_idx].x;
981  previous_y_coord = this->_previous_belief_state[features_idx].y;
982  previous_z_coord = this->_previous_belief_state[features_idx].z;
983 
984  //std::cout << "Current: " << current_x_coord << " " << current_y_coord << " " << current_z_coord << std::endl;
985  //std::cout << "Previous: "<< previous_x_coord << " " << previous_y_coord << " " << previous_z_coord << std::endl;
986 
987  interframe_distance = sqrt( pow(current_x_coord - previous_x_coord, 2) + pow(current_y_coord - previous_y_coord, 2)
988  + pow(current_z_coord - previous_z_coord, 2));
989 
990  if (interframe_distance >= this->_max_interframe_jump)
991  {
992  this->_corrected_belief_state[features_idx].x = -1.f;
993  this->_corrected_belief_state[features_idx].y = -1.f;
994  this->_corrected_belief_state[features_idx].z = -1.f;
995  this->_current_feat_ids_v[features_idx] = 0;
996  this->_feat_status_v[features_idx] = false;
997  jumping_features_ctr++;
998  }
999  }
1000  }
1001  return jumping_features_ctr;
1002 }
cv_bridge::CvImagePtr _occlusion_msk_img_ptr
std::vector< std::vector< float > > _feat_quality
std::vector< std::vector< cv::Point2f > > _predicted_measurements
virtual cv_bridge::CvImagePtr getPredictedAndLastFeaturesImg()
virtual void setOcclusionMaskImg(cv_bridge::CvImagePtr occ_mask_img)
PointCloudPCL::ConstPtr _received_point_cloud
#define ROS_INFO_NAMED(name,...)
virtual void addPredictedState(const ft_state_t &predicted_state, const double &predicted_state_timestamp_ns)
Add a new prediction about the state generated in the higher level (as prediction about the next meas...
#define ROS_ERROR_STREAM_NAMED(name, args)
sensor_msgs::CameraInfo::Ptr _camera_info_ptr
cv_bridge::CvImagePtr _tracked_features_img
std::vector< int > _previous_feat_ids_v
std::vector< cv::Point2f > _previous_measurement
f
boost::shared_ptr< CvImage > CvImagePtr
virtual void correctState()
Corrects the predicted state(s). First we track the features using both the prediction with the syste...
virtual cv_bridge::CvImagePtr getPredictingMaskImg()
Eigen::Matrix< double, 3, 4 > _image_plane_proj_mat_eigen
std::vector< cv::Point2f > _corrected_measurement
OMIP_ADD_POINT4D uint32_t label
FeatureCloudPCLwc::Ptr ft_state_t
virtual cv_bridge::CvImagePtr getDetectingMaskImg()
virtual void setFullRGBDPC(PointCloudPCL::ConstPtr full_rgb_pc)
#define ROS_INFO_STREAM_NAMED(name, args)
virtual void setSelfOcclusionPositive(bool so_positive)
Set if the self occlusion image will be black in the background and white for the occluded area (so_p...
cv_bridge::CvImagePtr _detecting_mask_img
TFSIMD_FORCE_INLINE const tfScalar & y() const
virtual void setCameraInfoMsg(const sensor_msgs::CameraInfo *camera_info)
void fromPCL(const pcl::uint64_t &pcl_stamp, ros::Time &stamp)
virtual void predictState(double time_interval_ns)
First step when updating the filter. The next state is predicted from current state and system model ...
PointFeatureTracker(double loop_period_ns, bool using_pc=false, std::string ft_ns=std::string(""))
cv_bridge::CvImagePtr _predicting_mask_img
cv_bridge::CvImagePtr _tracked_features_with_pred_msk_img
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
Interface to be implemented by feature trackers It derives from the RecursiveEstimator interface...
cv_bridge::CvImagePtr _current_bw_img_ptr
virtual cv_bridge::CvImagePtr getDepthEdgesImg()
virtual ft_state_t getState() const
std::vector< cv::Point3d > _corrected_belief_state
cv_bridge::CvImagePtr _depth_img_ptr
virtual cv_bridge::CvImagePtr getRGBImg()
virtual cv_bridge::CvImagePtr getTrackedFeaturesImg()
TFSIMD_FORCE_INLINE const tfScalar & x() const
virtual cv_bridge::CvImagePtr getTrackingMaskImg()
cv_bridge::CvImagePtr _rgb_img_ptr
cv_bridge::CvImagePtr _tracking_mask_img
std::vector< bool > _feat_status_v
virtual cv_bridge::CvImagePtr getTrackedFeaturesWithPredictionMaskImg()
bool getParam(const std::string &key, std::string &s) const
virtual void setDynamicReconfigureValues(feature_tracker::FeatureTrackerDynReconfConfig &config)
Set the messages that have to be estimated and published.
static Time now()
pcl::PointCloud< FeaturePCLwc > FeatureCloudPCLwc
std::vector< int > _received_prediction_ids
cv_bridge::CvImagePtr _depth_edges_img
virtual cv_bridge::CvImagePtr getDepthImg()
int _Retrieve3DCoords(std::vector< cv::Point2f > &points_in_2d, std::vector< bool > &points_status, std::vector< cv::Point3d > &points_in_3d)
cv_bridge::CvImagePtr _previous_bw_img_ptr
virtual void predictMeasurement()
Second step when updating the filter. The next measurement is predicted from the predicted next state...
std::vector< cv::Point3d > _previous_belief_state
cv_bridge::CvImagePtr _predicted_and_previous_features_img
std::vector< int > _current_feat_ids_v


feature_tracker
Author(s): Roberto Martín-Martín
autogenerated on Mon Jun 10 2019 14:06:08