TrackKLT.cpp
Go to the documentation of this file.
1 /*
2  * OpenVINS: An Open Platform for Visual-Inertial Research
3  * Copyright (C) 2018-2023 Patrick Geneva
4  * Copyright (C) 2018-2023 Guoquan Huang
5  * Copyright (C) 2018-2023 OpenVINS Contributors
6  * Copyright (C) 2018-2019 Kevin Eckenhoff
7  *
8  * This program is free software: you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation, either version 3 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program. If not, see <https://www.gnu.org/licenses/>.
20  */
21 
22 #include "TrackKLT.h"
23 
24 #include "Grider_FAST.h"
25 #include "Grider_GRID.h"
26 #include "cam/CamBase.h"
27 #include "feat/Feature.h"
28 #include "feat/FeatureDatabase.h"
30 #include "utils/print.h"
31 
32 using namespace ov_core;
33 
34 void TrackKLT::feed_new_camera(const CameraData &message) {
35 
36  // Error check that we have all the data
37  if (message.sensor_ids.empty() || message.sensor_ids.size() != message.images.size() || message.images.size() != message.masks.size()) {
38  PRINT_ERROR(RED "[ERROR]: MESSAGE DATA SIZES DO NOT MATCH OR EMPTY!!!\n" RESET);
39  PRINT_ERROR(RED "[ERROR]: - message.sensor_ids.size() = %zu\n" RESET, message.sensor_ids.size());
40  PRINT_ERROR(RED "[ERROR]: - message.images.size() = %zu\n" RESET, message.images.size());
41  PRINT_ERROR(RED "[ERROR]: - message.masks.size() = %zu\n" RESET, message.masks.size());
42  std::exit(EXIT_FAILURE);
43  }
44 
45  // Preprocessing steps that we do not parallelize
46  // NOTE: DO NOT PARALLELIZE THESE!
47  // NOTE: These seem to be much slower if you parallelize them...
48  rT1 = boost::posix_time::microsec_clock::local_time();
49  size_t num_images = message.images.size();
50  for (size_t msg_id = 0; msg_id < num_images; msg_id++) {
51 
52  // Lock this data feed for this camera
53  size_t cam_id = message.sensor_ids.at(msg_id);
54  std::lock_guard<std::mutex> lck(mtx_feeds.at(cam_id));
55 
56  // Histogram equalize
57  cv::Mat img;
58  if (histogram_method == HistogramMethod::HISTOGRAM) {
59  cv::equalizeHist(message.images.at(msg_id), img);
60  } else if (histogram_method == HistogramMethod::CLAHE) {
61  double eq_clip_limit = 10.0;
62  cv::Size eq_win_size = cv::Size(8, 8);
63  cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(eq_clip_limit, eq_win_size);
64  clahe->apply(message.images.at(msg_id), img);
65  } else {
66  img = message.images.at(msg_id);
67  }
68 
69  // Extract image pyramid
70  std::vector<cv::Mat> imgpyr;
71  cv::buildOpticalFlowPyramid(img, imgpyr, win_size, pyr_levels);
72 
73  // Save!
74  img_curr[cam_id] = img;
75  img_pyramid_curr[cam_id] = imgpyr;
76  }
77 
78  // Either call our stereo or monocular version
79  // If we are doing binocular tracking, then we should parallize our tracking
80  if (num_images == 1) {
81  feed_monocular(message, 0);
82  } else if (num_images == 2 && use_stereo) {
83  feed_stereo(message, 0, 1);
84  } else if (!use_stereo) {
85  parallel_for_(cv::Range(0, (int)num_images), LambdaBody([&](const cv::Range &range) {
86  for (int i = range.start; i < range.end; i++) {
87  feed_monocular(message, i);
88  }
89  }));
90  } else {
91  PRINT_ERROR(RED "[ERROR]: invalid number of images passed %zu, we only support mono or stereo tracking", num_images);
92  std::exit(EXIT_FAILURE);
93  }
94 }
95 
96 void TrackKLT::feed_monocular(const CameraData &message, size_t msg_id) {
97 
98  // Lock this data feed for this camera
99  size_t cam_id = message.sensor_ids.at(msg_id);
100  std::lock_guard<std::mutex> lck(mtx_feeds.at(cam_id));
101 
102  // Get our image objects for this image
103  cv::Mat img = img_curr.at(cam_id);
104  std::vector<cv::Mat> imgpyr = img_pyramid_curr.at(cam_id);
105  cv::Mat mask = message.masks.at(msg_id);
106  rT2 = boost::posix_time::microsec_clock::local_time();
107 
108  // If we didn't have any successful tracks last time, just extract this time
109  // This also handles, the tracking initalization on the first call to this extractor
110  if (pts_last[cam_id].empty()) {
111  // Detect new features
112  std::vector<cv::KeyPoint> good_left;
113  std::vector<size_t> good_ids_left;
114  perform_detection_monocular(imgpyr, mask, good_left, good_ids_left);
115  // Save the current image and pyramid
116  std::lock_guard<std::mutex> lckv(mtx_last_vars);
117  img_last[cam_id] = img;
118  img_pyramid_last[cam_id] = imgpyr;
119  img_mask_last[cam_id] = mask;
120  pts_last[cam_id] = good_left;
121  ids_last[cam_id] = good_ids_left;
122  return;
123  }
124 
125  // First we should make that the last images have enough features so we can do KLT
126  // This will "top-off" our number of tracks so always have a constant number
127  int pts_before_detect = (int)pts_last[cam_id].size();
128  auto pts_left_old = pts_last[cam_id];
129  auto ids_left_old = ids_last[cam_id];
130  perform_detection_monocular(img_pyramid_last[cam_id], img_mask_last[cam_id], pts_left_old, ids_left_old);
131  rT3 = boost::posix_time::microsec_clock::local_time();
132 
133  // Our return success masks, and predicted new features
134  std::vector<uchar> mask_ll;
135  std::vector<cv::KeyPoint> pts_left_new = pts_left_old;
136 
137  // Lets track temporally
138  perform_matching(img_pyramid_last[cam_id], imgpyr, pts_left_old, pts_left_new, cam_id, cam_id, mask_ll);
139  assert(pts_left_new.size() == ids_left_old.size());
140  rT4 = boost::posix_time::microsec_clock::local_time();
141 
142  // If any of our mask is empty, that means we didn't have enough to do ransac, so just return
143  if (mask_ll.empty()) {
144  std::lock_guard<std::mutex> lckv(mtx_last_vars);
145  img_last[cam_id] = img;
146  img_pyramid_last[cam_id] = imgpyr;
147  img_mask_last[cam_id] = mask;
148  pts_last[cam_id].clear();
149  ids_last[cam_id].clear();
150  PRINT_ERROR(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET);
151  return;
152  }
153 
154  // Get our "good tracks"
155  std::vector<cv::KeyPoint> good_left;
156  std::vector<size_t> good_ids_left;
157 
158  // Loop through all left points
159  for (size_t i = 0; i < pts_left_new.size(); i++) {
160  // Ensure we do not have any bad KLT tracks (i.e., points are negative)
161  if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x >= img.cols ||
162  (int)pts_left_new.at(i).pt.y >= img.rows)
163  continue;
164  // Check if it is in the mask
165  // NOTE: mask has max value of 255 (white) if it should be
166  if ((int)message.masks.at(msg_id).at<uint8_t>((int)pts_left_new.at(i).pt.y, (int)pts_left_new.at(i).pt.x) > 127)
167  continue;
168  // If it is a good track, and also tracked from left to right
169  if (mask_ll[i]) {
170  good_left.push_back(pts_left_new[i]);
171  good_ids_left.push_back(ids_left_old[i]);
172  }
173  }
174 
175  // Update our feature database, with theses new observations
176  for (size_t i = 0; i < good_left.size(); i++) {
177  cv::Point2f npt_l = camera_calib.at(cam_id)->undistort_cv(good_left.at(i).pt);
178  database->update_feature(good_ids_left.at(i), message.timestamp, cam_id, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x, npt_l.y);
179  }
180 
181  // Move forward in time
182  {
183  std::lock_guard<std::mutex> lckv(mtx_last_vars);
184  img_last[cam_id] = img;
185  img_pyramid_last[cam_id] = imgpyr;
186  img_mask_last[cam_id] = mask;
187  pts_last[cam_id] = good_left;
188  ids_last[cam_id] = good_ids_left;
189  }
190  rT5 = boost::posix_time::microsec_clock::local_time();
191 
192  // Timing information
193  PRINT_ALL("[TIME-KLT]: %.4f seconds for pyramid\n", (rT2 - rT1).total_microseconds() * 1e-6);
194  PRINT_ALL("[TIME-KLT]: %.4f seconds for detection (%zu detected)\n", (rT3 - rT2).total_microseconds() * 1e-6,
195  (int)pts_last[cam_id].size() - pts_before_detect);
196  PRINT_ALL("[TIME-KLT]: %.4f seconds for temporal klt\n", (rT4 - rT3).total_microseconds() * 1e-6);
197  PRINT_ALL("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n", (rT5 - rT4).total_microseconds() * 1e-6,
198  (int)good_left.size());
199  PRINT_ALL("[TIME-KLT]: %.4f seconds for total\n", (rT5 - rT1).total_microseconds() * 1e-6);
200 }
201 
202 void TrackKLT::feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right) {
203 
204  // Lock this data feed for this camera
205  size_t cam_id_left = message.sensor_ids.at(msg_id_left);
206  size_t cam_id_right = message.sensor_ids.at(msg_id_right);
207  std::lock_guard<std::mutex> lck1(mtx_feeds.at(cam_id_left));
208  std::lock_guard<std::mutex> lck2(mtx_feeds.at(cam_id_right));
209 
210  // Get our image objects for this image
211  cv::Mat img_left = img_curr.at(cam_id_left);
212  cv::Mat img_right = img_curr.at(cam_id_right);
213  std::vector<cv::Mat> imgpyr_left = img_pyramid_curr.at(cam_id_left);
214  std::vector<cv::Mat> imgpyr_right = img_pyramid_curr.at(cam_id_right);
215  cv::Mat mask_left = message.masks.at(msg_id_left);
216  cv::Mat mask_right = message.masks.at(msg_id_right);
217  rT2 = boost::posix_time::microsec_clock::local_time();
218 
219  // If we didn't have any successful tracks last time, just extract this time
220  // This also handles, the tracking initalization on the first call to this extractor
221  if (pts_last[cam_id_left].empty() && pts_last[cam_id_right].empty()) {
222  // Track into the new image
223  std::vector<cv::KeyPoint> good_left, good_right;
224  std::vector<size_t> good_ids_left, good_ids_right;
225  perform_detection_stereo(imgpyr_left, imgpyr_right, mask_left, mask_right, cam_id_left, cam_id_right, good_left, good_right,
226  good_ids_left, good_ids_right);
227  // Save the current image and pyramid
228  std::lock_guard<std::mutex> lckv(mtx_last_vars);
229  img_last[cam_id_left] = img_left;
230  img_last[cam_id_right] = img_right;
231  img_pyramid_last[cam_id_left] = imgpyr_left;
232  img_pyramid_last[cam_id_right] = imgpyr_right;
233  img_mask_last[cam_id_left] = mask_left;
234  img_mask_last[cam_id_right] = mask_right;
235  pts_last[cam_id_left] = good_left;
236  pts_last[cam_id_right] = good_right;
237  ids_last[cam_id_left] = good_ids_left;
238  ids_last[cam_id_right] = good_ids_right;
239  return;
240  }
241 
242  // First we should make that the last images have enough features so we can do KLT
243  // This will "top-off" our number of tracks so always have a constant number
244  int pts_before_detect = (int)pts_last[cam_id_left].size();
245  auto pts_left_old = pts_last[cam_id_left];
246  auto pts_right_old = pts_last[cam_id_right];
247  auto ids_left_old = ids_last[cam_id_left];
248  auto ids_right_old = ids_last[cam_id_right];
249  perform_detection_stereo(img_pyramid_last[cam_id_left], img_pyramid_last[cam_id_right], img_mask_last[cam_id_left],
250  img_mask_last[cam_id_right], cam_id_left, cam_id_right, pts_left_old, pts_right_old, ids_left_old,
251  ids_right_old);
252  rT3 = boost::posix_time::microsec_clock::local_time();
253 
254  // Our return success masks, and predicted new features
255  std::vector<uchar> mask_ll, mask_rr;
256  std::vector<cv::KeyPoint> pts_left_new = pts_left_old;
257  std::vector<cv::KeyPoint> pts_right_new = pts_right_old;
258 
259  // Lets track temporally
260  parallel_for_(cv::Range(0, 2), LambdaBody([&](const cv::Range &range) {
261  for (int i = range.start; i < range.end; i++) {
262  bool is_left = (i == 0);
263  perform_matching(img_pyramid_last[is_left ? cam_id_left : cam_id_right], is_left ? imgpyr_left : imgpyr_right,
264  is_left ? pts_left_old : pts_right_old, is_left ? pts_left_new : pts_right_new,
265  is_left ? cam_id_left : cam_id_right, is_left ? cam_id_left : cam_id_right,
266  is_left ? mask_ll : mask_rr);
267  }
268  }));
269  rT4 = boost::posix_time::microsec_clock::local_time();
270 
271  //===================================================================================
272  //===================================================================================
273 
274  // left to right matching
275  // TODO: we should probably still do this to reject outliers
276  // TODO: maybe we should collect all tracks that are in both frames and make they pass this?
277  // std::vector<uchar> mask_lr;
278  // perform_matching(imgpyr_left, imgpyr_right, pts_left_new, pts_right_new, cam_id_left, cam_id_right, mask_lr);
279  rT5 = boost::posix_time::microsec_clock::local_time();
280 
281  //===================================================================================
282  //===================================================================================
283 
284  // If any of our masks are empty, that means we didn't have enough to do ransac, so just return
285  if (mask_ll.empty() && mask_rr.empty()) {
286  std::lock_guard<std::mutex> lckv(mtx_last_vars);
287  img_last[cam_id_left] = img_left;
288  img_last[cam_id_right] = img_right;
289  img_pyramid_last[cam_id_left] = imgpyr_left;
290  img_pyramid_last[cam_id_right] = imgpyr_right;
291  img_mask_last[cam_id_left] = mask_left;
292  img_mask_last[cam_id_right] = mask_right;
293  pts_last[cam_id_left].clear();
294  pts_last[cam_id_right].clear();
295  ids_last[cam_id_left].clear();
296  ids_last[cam_id_right].clear();
297  PRINT_ERROR(RED "[KLT-EXTRACTOR]: Failed to get enough points to do RANSAC, resetting.....\n" RESET);
298  return;
299  }
300 
301  // Get our "good tracks"
302  std::vector<cv::KeyPoint> good_left, good_right;
303  std::vector<size_t> good_ids_left, good_ids_right;
304 
305  // Loop through all left points
306  for (size_t i = 0; i < pts_left_new.size(); i++) {
307  // Ensure we do not have any bad KLT tracks (i.e., points are negative)
308  if (pts_left_new.at(i).pt.x < 0 || pts_left_new.at(i).pt.y < 0 || (int)pts_left_new.at(i).pt.x > img_left.cols ||
309  (int)pts_left_new.at(i).pt.y > img_left.rows)
310  continue;
311  // See if we have the same feature in the right
312  bool found_right = false;
313  size_t index_right = 0;
314  for (size_t n = 0; n < ids_right_old.size(); n++) {
315  if (ids_left_old.at(i) == ids_right_old.at(n)) {
316  found_right = true;
317  index_right = n;
318  break;
319  }
320  }
321  // If it is a good track, and also tracked from left to right
322  // Else track it as a mono feature in just the left image
323  if (mask_ll[i] && found_right && mask_rr[index_right]) {
324  // Ensure we do not have any bad KLT tracks (i.e., points are negative)
325  if (pts_right_new.at(index_right).pt.x < 0 || pts_right_new.at(index_right).pt.y < 0 ||
326  (int)pts_right_new.at(index_right).pt.x >= img_right.cols || (int)pts_right_new.at(index_right).pt.y >= img_right.rows)
327  continue;
328  good_left.push_back(pts_left_new.at(i));
329  good_right.push_back(pts_right_new.at(index_right));
330  good_ids_left.push_back(ids_left_old.at(i));
331  good_ids_right.push_back(ids_right_old.at(index_right));
332  // PRINT_DEBUG("adding to stereo - %u , %u\n", ids_left_old.at(i), ids_right_old.at(index_right));
333  } else if (mask_ll[i]) {
334  good_left.push_back(pts_left_new.at(i));
335  good_ids_left.push_back(ids_left_old.at(i));
336  // PRINT_DEBUG("adding to left - %u \n",ids_left_old.at(i));
337  }
338  }
339 
340  // Loop through all right points
341  for (size_t i = 0; i < pts_right_new.size(); i++) {
342  // Ensure we do not have any bad KLT tracks (i.e., points are negative)
343  if (pts_right_new.at(i).pt.x < 0 || pts_right_new.at(i).pt.y < 0 || (int)pts_right_new.at(i).pt.x >= img_right.cols ||
344  (int)pts_right_new.at(i).pt.y >= img_right.rows)
345  continue;
346  // See if we have the same feature in the right
347  bool added_already = (std::find(good_ids_right.begin(), good_ids_right.end(), ids_right_old.at(i)) != good_ids_right.end());
348  // If it has not already been added as a good feature, add it as a mono track
349  if (mask_rr[i] && !added_already) {
350  good_right.push_back(pts_right_new.at(i));
351  good_ids_right.push_back(ids_right_old.at(i));
352  // PRINT_DEBUG("adding to right - %u \n", ids_right_old.at(i));
353  }
354  }
355 
356  // Update our feature database, with theses new observations
357  for (size_t i = 0; i < good_left.size(); i++) {
358  cv::Point2f npt_l = camera_calib.at(cam_id_left)->undistort_cv(good_left.at(i).pt);
359  database->update_feature(good_ids_left.at(i), message.timestamp, cam_id_left, good_left.at(i).pt.x, good_left.at(i).pt.y, npt_l.x,
360  npt_l.y);
361  }
362  for (size_t i = 0; i < good_right.size(); i++) {
363  cv::Point2f npt_r = camera_calib.at(cam_id_right)->undistort_cv(good_right.at(i).pt);
364  database->update_feature(good_ids_right.at(i), message.timestamp, cam_id_right, good_right.at(i).pt.x, good_right.at(i).pt.y, npt_r.x,
365  npt_r.y);
366  }
367 
368  // Move forward in time
369  {
370  std::lock_guard<std::mutex> lckv(mtx_last_vars);
371  img_last[cam_id_left] = img_left;
372  img_last[cam_id_right] = img_right;
373  img_pyramid_last[cam_id_left] = imgpyr_left;
374  img_pyramid_last[cam_id_right] = imgpyr_right;
375  img_mask_last[cam_id_left] = mask_left;
376  img_mask_last[cam_id_right] = mask_right;
377  pts_last[cam_id_left] = good_left;
378  pts_last[cam_id_right] = good_right;
379  ids_last[cam_id_left] = good_ids_left;
380  ids_last[cam_id_right] = good_ids_right;
381  }
382  rT6 = boost::posix_time::microsec_clock::local_time();
383 
384  // // Timing information
385  PRINT_ALL("[TIME-KLT]: %.4f seconds for pyramid\n", (rT2 - rT1).total_microseconds() * 1e-6);
386  PRINT_ALL("[TIME-KLT]: %.4f seconds for detection (%d detected)\n", (rT3 - rT2).total_microseconds() * 1e-6,
387  (int)pts_last[cam_id_left].size() - pts_before_detect);
388  PRINT_ALL("[TIME-KLT]: %.4f seconds for temporal klt\n", (rT4 - rT3).total_microseconds() * 1e-6);
389  PRINT_ALL("[TIME-KLT]: %.4f seconds for stereo klt\n", (rT5 - rT4).total_microseconds() * 1e-6);
390  PRINT_ALL("[TIME-KLT]: %.4f seconds for feature DB update (%d features)\n", (rT6 - rT5).total_microseconds() * 1e-6,
391  (int)good_left.size());
392  PRINT_ALL("[TIME-KLT]: %.4f seconds for total\n", (rT6 - rT1).total_microseconds() * 1e-6);
393 }
394 
395 void TrackKLT::perform_detection_monocular(const std::vector<cv::Mat> &img0pyr, const cv::Mat &mask0, std::vector<cv::KeyPoint> &pts0,
396  std::vector<size_t> &ids0) {
397 
398  // Create a 2D occupancy grid for this current image
399  // Note that we scale this down, so that each grid point is equal to a set of pixels
400  // This means that we will reject points that less than grid_px_size points away then existing features
401  cv::Size size_close((int)((float)img0pyr.at(0).cols / (float)min_px_dist),
402  (int)((float)img0pyr.at(0).rows / (float)min_px_dist)); // width x height
403  cv::Mat grid_2d_close = cv::Mat::zeros(size_close, CV_8UC1);
404  float size_x = (float)img0pyr.at(0).cols / (float)grid_x;
405  float size_y = (float)img0pyr.at(0).rows / (float)grid_y;
406  cv::Size size_grid(grid_x, grid_y); // width x height
407  cv::Mat grid_2d_grid = cv::Mat::zeros(size_grid, CV_8UC1);
408  cv::Mat mask0_updated = mask0.clone();
409  auto it0 = pts0.begin();
410  auto it1 = ids0.begin();
411  while (it0 != pts0.end()) {
412  // Get current left keypoint, check that it is in bounds
413  cv::KeyPoint kpt = *it0;
414  int x = (int)kpt.pt.x;
415  int y = (int)kpt.pt.y;
416  int edge = 10;
417  if (x < edge || x >= img0pyr.at(0).cols - edge || y < edge || y >= img0pyr.at(0).rows - edge) {
418  it0 = pts0.erase(it0);
419  it1 = ids0.erase(it1);
420  continue;
421  }
422  // Calculate mask coordinates for close points
423  int x_close = (int)(kpt.pt.x / (float)min_px_dist);
424  int y_close = (int)(kpt.pt.y / (float)min_px_dist);
425  if (x_close < 0 || x_close >= size_close.width || y_close < 0 || y_close >= size_close.height) {
426  it0 = pts0.erase(it0);
427  it1 = ids0.erase(it1);
428  continue;
429  }
430  // Calculate what grid cell this feature is in
431  int x_grid = std::floor(kpt.pt.x / size_x);
432  int y_grid = std::floor(kpt.pt.y / size_y);
433  if (x_grid < 0 || x_grid >= size_grid.width || y_grid < 0 || y_grid >= size_grid.height) {
434  it0 = pts0.erase(it0);
435  it1 = ids0.erase(it1);
436  continue;
437  }
438  // Check if this keypoint is near another point
439  if (grid_2d_close.at<uint8_t>(y_close, x_close) > 127) {
440  it0 = pts0.erase(it0);
441  it1 = ids0.erase(it1);
442  continue;
443  }
444  // Now check if it is in a mask area or not
445  // NOTE: mask has max value of 255 (white) if it should be
446  if (mask0.at<uint8_t>(y, x) > 127) {
447  it0 = pts0.erase(it0);
448  it1 = ids0.erase(it1);
449  continue;
450  }
451  // Else we are good, move forward to the next point
452  grid_2d_close.at<uint8_t>(y_close, x_close) = 255;
453  if (grid_2d_grid.at<uint8_t>(y_grid, x_grid) < 255) {
454  grid_2d_grid.at<uint8_t>(y_grid, x_grid) += 1;
455  }
456  // Append this to the local mask of the image
457  if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) {
458  cv::Point pt1(x - min_px_dist, y - min_px_dist);
459  cv::Point pt2(x + min_px_dist, y + min_px_dist);
460  cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255), -1);
461  }
462  it0++;
463  it1++;
464  }
465 
466  // First compute how many more features we need to extract from this image
467  // If we don't need any features, just return
468  double min_feat_percent = 0.50;
469  int num_featsneeded = num_features - (int)pts0.size();
470  if (num_featsneeded < std::min(20, (int)(min_feat_percent * num_features)))
471  return;
472 
473  // This is old extraction code that would extract from the whole image
474  // This can be slow as this will recompute extractions for grid areas that we have max features already
475  // std::vector<cv::KeyPoint> pts0_ext;
476  // Grider_FAST::perform_griding(img0pyr.at(0), mask0_updated, pts0_ext, num_features, grid_x, grid_y, threshold, true);
477 
478  // We also check a downsampled mask such that we don't extract in areas where it is all masked!
479  cv::Mat mask0_grid;
480  cv::resize(mask0, mask0_grid, size_grid, 0.0, 0.0, cv::INTER_NEAREST);
481 
482  // Create grids we need to extract from and then extract our features (use fast with griding)
483  int num_features_grid = (int)((double)num_features / (double)(grid_x * grid_y)) + 1;
484  int num_features_grid_req = std::max(1, (int)(min_feat_percent * num_features_grid));
485  std::vector<std::pair<int, int>> valid_locs;
486  for (int x = 0; x < grid_2d_grid.cols; x++) {
487  for (int y = 0; y < grid_2d_grid.rows; y++) {
488  if ((int)grid_2d_grid.at<uint8_t>(y, x) < num_features_grid_req && (int)mask0_grid.at<uint8_t>(y, x) != 255) {
489  valid_locs.emplace_back(x, y);
490  }
491  }
492  }
493  std::vector<cv::KeyPoint> pts0_ext;
494  Grider_GRID::perform_griding(img0pyr.at(0), mask0_updated, valid_locs, pts0_ext, num_features, grid_x, grid_y, threshold, true);
495 
496  // Now, reject features that are close a current feature
497  std::vector<cv::KeyPoint> kpts0_new;
498  std::vector<cv::Point2f> pts0_new;
499  for (auto &kpt : pts0_ext) {
500  // Check that it is in bounds
501  int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
502  int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
503  if (x_grid < 0 || x_grid >= size_close.width || y_grid < 0 || y_grid >= size_close.height)
504  continue;
505  // See if there is a point at this location
506  if (grid_2d_close.at<uint8_t>(y_grid, x_grid) > 127)
507  continue;
508  // Else lets add it!
509  kpts0_new.push_back(kpt);
510  pts0_new.push_back(kpt.pt);
511  grid_2d_close.at<uint8_t>(y_grid, x_grid) = 255;
512  }
513 
514  // Loop through and record only ones that are valid
515  // NOTE: if we multi-thread this atomic can cause some randomness due to multiple thread detecting features
516  // NOTE: this is due to the fact that we select update features based on feat id
517  // NOTE: thus the order will matter since we try to select oldest (smallest id) to update with
518  // NOTE: not sure how to remove... maybe a better way?
519  for (size_t i = 0; i < pts0_new.size(); i++) {
520  // update the uv coordinates
521  kpts0_new.at(i).pt = pts0_new.at(i);
522  // append the new uv coordinate
523  pts0.push_back(kpts0_new.at(i));
524  // move id foward and append this new point
525  size_t temp = ++currid;
526  ids0.push_back(temp);
527  }
528 }
529 
530 void TrackKLT::perform_detection_stereo(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, const cv::Mat &mask0,
531  const cv::Mat &mask1, size_t cam_id_left, size_t cam_id_right, std::vector<cv::KeyPoint> &pts0,
532  std::vector<cv::KeyPoint> &pts1, std::vector<size_t> &ids0, std::vector<size_t> &ids1) {
533 
534  // Create a 2D occupancy grid for this current image
535  // Note that we scale this down, so that each grid point is equal to a set of pixels
536  // This means that we will reject points that less then grid_px_size points away then existing features
537  cv::Size size_close0((int)((float)img0pyr.at(0).cols / (float)min_px_dist),
538  (int)((float)img0pyr.at(0).rows / (float)min_px_dist)); // width x height
539  cv::Mat grid_2d_close0 = cv::Mat::zeros(size_close0, CV_8UC1);
540  float size_x0 = (float)img0pyr.at(0).cols / (float)grid_x;
541  float size_y0 = (float)img0pyr.at(0).rows / (float)grid_y;
542  cv::Size size_grid0(grid_x, grid_y); // width x height
543  cv::Mat grid_2d_grid0 = cv::Mat::zeros(size_grid0, CV_8UC1);
544  cv::Mat mask0_updated = mask0.clone();
545  auto it0 = pts0.begin();
546  auto it1 = ids0.begin();
547  while (it0 != pts0.end()) {
548  // Get current left keypoint, check that it is in bounds
549  cv::KeyPoint kpt = *it0;
550  int x = (int)kpt.pt.x;
551  int y = (int)kpt.pt.y;
552  int edge = 10;
553  if (x < edge || x >= img0pyr.at(0).cols - edge || y < edge || y >= img0pyr.at(0).rows - edge) {
554  it0 = pts0.erase(it0);
555  it1 = ids0.erase(it1);
556  continue;
557  }
558  // Calculate mask coordinates for close points
559  int x_close = (int)(kpt.pt.x / (float)min_px_dist);
560  int y_close = (int)(kpt.pt.y / (float)min_px_dist);
561  if (x_close < 0 || x_close >= size_close0.width || y_close < 0 || y_close >= size_close0.height) {
562  it0 = pts0.erase(it0);
563  it1 = ids0.erase(it1);
564  continue;
565  }
566  // Calculate what grid cell this feature is in
567  int x_grid = std::floor(kpt.pt.x / size_x0);
568  int y_grid = std::floor(kpt.pt.y / size_y0);
569  if (x_grid < 0 || x_grid >= size_grid0.width || y_grid < 0 || y_grid >= size_grid0.height) {
570  it0 = pts0.erase(it0);
571  it1 = ids0.erase(it1);
572  continue;
573  }
574  // Check if this keypoint is near another point
575  if (grid_2d_close0.at<uint8_t>(y_close, x_close) > 127) {
576  it0 = pts0.erase(it0);
577  it1 = ids0.erase(it1);
578  continue;
579  }
580  // Now check if it is in a mask area or not
581  // NOTE: mask has max value of 255 (white) if it should be
582  if (mask0.at<uint8_t>(y, x) > 127) {
583  it0 = pts0.erase(it0);
584  it1 = ids0.erase(it1);
585  continue;
586  }
587  // Else we are good, move forward to the next point
588  grid_2d_close0.at<uint8_t>(y_close, x_close) = 255;
589  if (grid_2d_grid0.at<uint8_t>(y_grid, x_grid) < 255) {
590  grid_2d_grid0.at<uint8_t>(y_grid, x_grid) += 1;
591  }
592  // Append this to the local mask of the image
593  if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) {
594  cv::Point pt1(x - min_px_dist, y - min_px_dist);
595  cv::Point pt2(x + min_px_dist, y + min_px_dist);
596  cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255), -1);
597  }
598  it0++;
599  it1++;
600  }
601 
602  // First compute how many more features we need to extract from this image
603  double min_feat_percent = 0.50;
604  int num_featsneeded_0 = num_features - (int)pts0.size();
605 
606  // LEFT: if we need features we should extract them in the current frame
607  // LEFT: we will also try to track them from this frame over to the right frame
608  // LEFT: in the case that we have two features that are the same, then we should merge them
609  if (num_featsneeded_0 > std::min(20, (int)(min_feat_percent * num_features))) {
610 
611  // This is old extraction code that would extract from the whole image
612  // This can be slow as this will recompute extractions for grid areas that we have max features already
613  // std::vector<cv::KeyPoint> pts0_ext;
614  // Grider_FAST::perform_griding(img0pyr.at(0), mask0_updated, pts0_ext, num_features, grid_x, grid_y, threshold, true);
615 
616  // We also check a downsampled mask such that we don't extract in areas where it is all masked!
617  cv::Mat mask0_grid;
618  cv::resize(mask0, mask0_grid, size_grid0, 0.0, 0.0, cv::INTER_NEAREST);
619 
620  // Create grids we need to extract from and then extract our features (use fast with griding)
621  int num_features_grid = (int)((double)num_features / (double)(grid_x * grid_y)) + 1;
622  int num_features_grid_req = std::max(1, (int)(min_feat_percent * num_features_grid));
623  std::vector<std::pair<int, int>> valid_locs;
624  for (int x = 0; x < grid_2d_grid0.cols; x++) {
625  for (int y = 0; y < grid_2d_grid0.rows; y++) {
626  if ((int)grid_2d_grid0.at<uint8_t>(y, x) < num_features_grid_req && (int)mask0_grid.at<uint8_t>(y, x) != 255) {
627  valid_locs.emplace_back(x, y);
628  }
629  }
630  }
631  std::vector<cv::KeyPoint> pts0_ext;
632  Grider_GRID::perform_griding(img0pyr.at(0), mask0_updated, valid_locs, pts0_ext, num_features, grid_x, grid_y, threshold, true);
633 
634  // Now, reject features that are close a current feature
635  std::vector<cv::KeyPoint> kpts0_new;
636  std::vector<cv::Point2f> pts0_new;
637  for (auto &kpt : pts0_ext) {
638  // Check that it is in bounds
639  int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
640  int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
641  if (x_grid < 0 || x_grid >= size_close0.width || y_grid < 0 || y_grid >= size_close0.height)
642  continue;
643  // See if there is a point at this location
644  if (grid_2d_close0.at<uint8_t>(y_grid, x_grid) > 127)
645  continue;
646  // Else lets add it!
647  grid_2d_close0.at<uint8_t>(y_grid, x_grid) = 255;
648  kpts0_new.push_back(kpt);
649  pts0_new.push_back(kpt.pt);
650  }
651 
652  // TODO: Project points from the left frame into the right frame
653  // TODO: This will not work for large baseline systems.....
654  // TODO: If we had some depth estimates we could do a better projection
655  // TODO: Or project and search along the epipolar line??
656  std::vector<cv::KeyPoint> kpts1_new;
657  std::vector<cv::Point2f> pts1_new;
658  kpts1_new = kpts0_new;
659  pts1_new = pts0_new;
660 
661  // If we have points, do KLT tracking to get the valid projections into the right image
662  if (!pts0_new.empty()) {
663 
664  // Do our KLT tracking from the left to the right frame of reference
665  // NOTE: we have a pretty big window size here since our projection might be bad
666  // NOTE: but this might cause failure in cases of repeated textures (eg. checkerboard)
667  std::vector<uchar> mask;
668  // perform_matching(img0pyr, img1pyr, kpts0_new, kpts1_new, cam_id_left, cam_id_right, mask);
669  std::vector<float> error;
670  cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.01);
671  cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0_new, pts1_new, mask, error, win_size, pyr_levels, term_crit,
672  cv::OPTFLOW_USE_INITIAL_FLOW);
673 
674  // Loop through and record only ones that are valid
675  for (size_t i = 0; i < pts0_new.size(); i++) {
676 
677  // Check to see if the feature is out of bounds (oob) in either image
678  bool oob_left = ((int)pts0_new.at(i).x < 0 || (int)pts0_new.at(i).x >= img0pyr.at(0).cols || (int)pts0_new.at(i).y < 0 ||
679  (int)pts0_new.at(i).y >= img0pyr.at(0).rows);
680  bool oob_right = ((int)pts1_new.at(i).x < 0 || (int)pts1_new.at(i).x >= img1pyr.at(0).cols || (int)pts1_new.at(i).y < 0 ||
681  (int)pts1_new.at(i).y >= img1pyr.at(0).rows);
682 
683  // Check to see if it there is already a feature in the right image at this location
684  // 1) If this is not already in the right image, then we should treat it as a stereo
685  // 2) Otherwise we will treat this as just a monocular track of the feature
686  // TODO: we should check to see if we can combine this new feature and the one in the right
687  // TODO: seems if reject features which overlay with right features already we have very poor tracking perf
688  if (!oob_left && !oob_right && mask[i] == 1) {
689  // update the uv coordinates
690  kpts0_new.at(i).pt = pts0_new.at(i);
691  kpts1_new.at(i).pt = pts1_new.at(i);
692  // append the new uv coordinate
693  pts0.push_back(kpts0_new.at(i));
694  pts1.push_back(kpts1_new.at(i));
695  // move id forward and append this new point
696  size_t temp = ++currid;
697  ids0.push_back(temp);
698  ids1.push_back(temp);
699  } else if (!oob_left) {
700  // update the uv coordinates
701  kpts0_new.at(i).pt = pts0_new.at(i);
702  // append the new uv coordinate
703  pts0.push_back(kpts0_new.at(i));
704  // move id forward and append this new point
705  size_t temp = ++currid;
706  ids0.push_back(temp);
707  }
708  }
709  }
710  }
711 
712  // RIGHT: Now summarise the number of tracks in the right image
713  // RIGHT: We will try to extract some monocular features if we have the room
714  // RIGHT: This will also remove features if there are multiple in the same location
715  cv::Size size_close1((int)((float)img1pyr.at(0).cols / (float)min_px_dist), (int)((float)img1pyr.at(0).rows / (float)min_px_dist));
716  cv::Mat grid_2d_close1 = cv::Mat::zeros(size_close1, CV_8UC1);
717  float size_x1 = (float)img1pyr.at(0).cols / (float)grid_x;
718  float size_y1 = (float)img1pyr.at(0).rows / (float)grid_y;
719  cv::Size size_grid1(grid_x, grid_y); // width x height
720  cv::Mat grid_2d_grid1 = cv::Mat::zeros(size_grid1, CV_8UC1);
721  cv::Mat mask1_updated = mask0.clone();
722  it0 = pts1.begin();
723  it1 = ids1.begin();
724  while (it0 != pts1.end()) {
725  // Get current left keypoint, check that it is in bounds
726  cv::KeyPoint kpt = *it0;
727  int x = (int)kpt.pt.x;
728  int y = (int)kpt.pt.y;
729  int edge = 10;
730  if (x < edge || x >= img1pyr.at(0).cols - edge || y < edge || y >= img1pyr.at(0).rows - edge) {
731  it0 = pts1.erase(it0);
732  it1 = ids1.erase(it1);
733  continue;
734  }
735  // Calculate mask coordinates for close points
736  int x_close = (int)(kpt.pt.x / (float)min_px_dist);
737  int y_close = (int)(kpt.pt.y / (float)min_px_dist);
738  if (x_close < 0 || x_close >= size_close1.width || y_close < 0 || y_close >= size_close1.height) {
739  it0 = pts1.erase(it0);
740  it1 = ids1.erase(it1);
741  continue;
742  }
743  // Calculate what grid cell this feature is in
744  int x_grid = std::floor(kpt.pt.x / size_x1);
745  int y_grid = std::floor(kpt.pt.y / size_y1);
746  if (x_grid < 0 || x_grid >= size_grid1.width || y_grid < 0 || y_grid >= size_grid1.height) {
747  it0 = pts1.erase(it0);
748  it1 = ids1.erase(it1);
749  continue;
750  }
751  // Check if this keypoint is near another point
752  // NOTE: if it is *not* a stereo point, then we will not delete the feature
753  // NOTE: this means we might have a mono and stereo feature near each other, but that is ok
754  bool is_stereo = (std::find(ids0.begin(), ids0.end(), *it1) != ids0.end());
755  if (grid_2d_close1.at<uint8_t>(y_close, x_close) > 127 && !is_stereo) {
756  it0 = pts1.erase(it0);
757  it1 = ids1.erase(it1);
758  continue;
759  }
760  // Now check if it is in a mask area or not
761  // NOTE: mask has max value of 255 (white) if it should be
762  if (mask1.at<uint8_t>(y, x) > 127) {
763  it0 = pts1.erase(it0);
764  it1 = ids1.erase(it1);
765  continue;
766  }
767  // Else we are good, move forward to the next point
768  grid_2d_close1.at<uint8_t>(y_close, x_close) = 255;
769  if (grid_2d_grid1.at<uint8_t>(y_grid, x_grid) < 255) {
770  grid_2d_grid1.at<uint8_t>(y_grid, x_grid) += 1;
771  }
772  // Append this to the local mask of the image
773  if (x - min_px_dist >= 0 && x + min_px_dist < img1pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img1pyr.at(0).rows) {
774  cv::Point pt1(x - min_px_dist, y - min_px_dist);
775  cv::Point pt2(x + min_px_dist, y + min_px_dist);
776  cv::rectangle(mask1_updated, pt1, pt2, cv::Scalar(255), -1);
777  }
778  it0++;
779  it1++;
780  }
781 
782  // RIGHT: if we need features we should extract them in the current frame
783  // RIGHT: note that we don't track them to the left as we already did left->right tracking above
784  int num_featsneeded_1 = num_features - (int)pts1.size();
785  if (num_featsneeded_1 > std::min(20, (int)(min_feat_percent * num_features))) {
786 
787  // This is old extraction code that would extract from the whole image
788  // This can be slow as this will recompute extractions for grid areas that we have max features already
789  // std::vector<cv::KeyPoint> pts1_ext;
790  // Grider_FAST::perform_griding(img1pyr.at(0), mask1_updated, pts1_ext, num_features, grid_x, grid_y, threshold, true);
791 
792  // We also check a downsampled mask such that we don't extract in areas where it is all masked!
793  cv::Mat mask1_grid;
794  cv::resize(mask1, mask1_grid, size_grid1, 0.0, 0.0, cv::INTER_NEAREST);
795 
796  // Create grids we need to extract from and then extract our features (use fast with griding)
797  int num_features_grid = (int)((double)num_features / (double)(grid_x * grid_y)) + 1;
798  int num_features_grid_req = std::max(1, (int)(min_feat_percent * num_features_grid));
799  std::vector<std::pair<int, int>> valid_locs;
800  for (int x = 0; x < grid_2d_grid1.cols; x++) {
801  for (int y = 0; y < grid_2d_grid1.rows; y++) {
802  if ((int)grid_2d_grid1.at<uint8_t>(y, x) < num_features_grid_req && (int)mask1_grid.at<uint8_t>(y, x) != 255) {
803  valid_locs.emplace_back(x, y);
804  }
805  }
806  }
807  std::vector<cv::KeyPoint> pts1_ext;
808  Grider_GRID::perform_griding(img1pyr.at(0), mask1_updated, valid_locs, pts1_ext, num_features, grid_x, grid_y, threshold, true);
809 
810  // Now, reject features that are close a current feature
811  for (auto &kpt : pts1_ext) {
812  // Check that it is in bounds
813  int x_grid = (int)(kpt.pt.x / (float)min_px_dist);
814  int y_grid = (int)(kpt.pt.y / (float)min_px_dist);
815  if (x_grid < 0 || x_grid >= size_close1.width || y_grid < 0 || y_grid >= size_close1.height)
816  continue;
817  // See if there is a point at this location
818  if (grid_2d_close1.at<uint8_t>(y_grid, x_grid) > 127)
819  continue;
820  // Else lets add it!
821  pts1.push_back(kpt);
822  size_t temp = ++currid;
823  ids1.push_back(temp);
824  grid_2d_close1.at<uint8_t>(y_grid, x_grid) = 255;
825  }
826  }
827 }
828 
829 void TrackKLT::perform_matching(const std::vector<cv::Mat> &img0pyr, const std::vector<cv::Mat> &img1pyr, std::vector<cv::KeyPoint> &kpts0,
830  std::vector<cv::KeyPoint> &kpts1, size_t id0, size_t id1, std::vector<uchar> &mask_out) {
831 
832  // We must have equal vectors
833  assert(kpts0.size() == kpts1.size());
834 
835  // Return if we don't have any points
836  if (kpts0.empty() || kpts1.empty())
837  return;
838 
839  // Convert keypoints into points (stupid opencv stuff)
840  std::vector<cv::Point2f> pts0, pts1;
841  for (size_t i = 0; i < kpts0.size(); i++) {
842  pts0.push_back(kpts0.at(i).pt);
843  pts1.push_back(kpts1.at(i).pt);
844  }
845 
846  // If we don't have enough points for ransac just return empty
847  // We set the mask to be all zeros since all points failed RANSAC
848  if (pts0.size() < 10) {
849  for (size_t i = 0; i < pts0.size(); i++)
850  mask_out.push_back((uchar)0);
851  return;
852  }
853 
854  // Now do KLT tracking to get the valid new points
855  std::vector<uchar> mask_klt;
856  std::vector<float> error;
857  cv::TermCriteria term_crit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.01);
858  cv::calcOpticalFlowPyrLK(img0pyr, img1pyr, pts0, pts1, mask_klt, error, win_size, pyr_levels, term_crit, cv::OPTFLOW_USE_INITIAL_FLOW);
859 
860  // Normalize these points, so we can then do ransac
861  // We don't want to do ransac on distorted image uvs since the mapping is nonlinear
862  std::vector<cv::Point2f> pts0_n, pts1_n;
863  for (size_t i = 0; i < pts0.size(); i++) {
864  pts0_n.push_back(camera_calib.at(id0)->undistort_cv(pts0.at(i)));
865  pts1_n.push_back(camera_calib.at(id1)->undistort_cv(pts1.at(i)));
866  }
867 
868  // Do RANSAC outlier rejection (note since we normalized the max pixel error is now in the normalized cords)
869  std::vector<uchar> mask_rsc;
870  double max_focallength_img0 = std::max(camera_calib.at(id0)->get_K()(0, 0), camera_calib.at(id0)->get_K()(1, 1));
871  double max_focallength_img1 = std::max(camera_calib.at(id1)->get_K()(0, 0), camera_calib.at(id1)->get_K()(1, 1));
872  double max_focallength = std::max(max_focallength_img0, max_focallength_img1);
873  cv::findFundamentalMat(pts0_n, pts1_n, cv::FM_RANSAC, 2.0 / max_focallength, 0.999, mask_rsc);
874 
875  // Loop through and record only ones that are valid
876  for (size_t i = 0; i < mask_klt.size(); i++) {
877  auto mask = (uchar)((i < mask_klt.size() && mask_klt[i] && i < mask_rsc.size() && mask_rsc[i]) ? 1 : 0);
878  mask_out.push_back(mask);
879  }
880 
881  // Copy back the updated positions
882  for (size_t i = 0; i < pts0.size(); i++) {
883  kpts0.at(i).pt = pts0.at(i);
884  kpts1.at(i).pt = pts1.at(i);
885  }
886 }
ov_core::Grider_GRID::perform_griding
static void perform_griding(const cv::Mat &img, const cv::Mat &mask, const std::vector< std::pair< int, int >> &valid_locs, std::vector< cv::KeyPoint > &pts, int num_features, int grid_x, int grid_y, int threshold, bool nonmaxSuppression)
This function will perform grid extraction using FAST.
Definition: Grider_GRID.h:74
FeatureDatabase.h
ov_core::TrackBase::mtx_feeds
std::vector< std::mutex > mtx_feeds
Mutexs for our last set of image storage (img_last, pts_last, and ids_last)
Definition: TrackBase.h:174
ov_core::TrackKLT::perform_matching
void perform_matching(const std::vector< cv::Mat > &img0pyr, const std::vector< cv::Mat > &img1pyr, std::vector< cv::KeyPoint > &pts0, std::vector< cv::KeyPoint > &pts1, size_t id0, size_t id1, std::vector< uchar > &mask_out)
KLT track between two images, and do RANSAC afterwards.
Definition: TrackKLT.cpp:829
ov_core::TrackBase::currid
std::atomic< size_t > currid
Master ID for this tracker (atomic to allow for multi-threading)
Definition: TrackBase.h:192
ov_core::TrackBase::database
std::shared_ptr< FeatureDatabase > database
Database with all our current features.
Definition: TrackBase.h:159
ov_core::TrackBase::img_mask_last
std::map< size_t, cv::Mat > img_mask_last
Last set of images (use map so all trackers render in the same order)
Definition: TrackBase.h:183
ov_core::TrackBase::mtx_last_vars
std::mutex mtx_last_vars
Mutex for editing the *_last variables.
Definition: TrackBase.h:177
ov_core::TrackKLT::grid_y
int grid_y
Definition: TrackKLT.h:137
ov_core::TrackBase::rT3
boost::posix_time::ptime rT3
Definition: TrackBase.h:195
ov_core::TrackKLT::grid_x
int grid_x
Definition: TrackKLT.h:136
Grider_FAST.h
ov_core::CameraData::images
std::vector< cv::Mat > images
Raw image we have collected for each camera.
Definition: sensor_data.h:64
Feature.h
ov_core::TrackKLT::perform_detection_monocular
void perform_detection_monocular(const std::vector< cv::Mat > &img0pyr, const cv::Mat &mask0, std::vector< cv::KeyPoint > &pts0, std::vector< size_t > &ids0)
Detects new features in the current image.
Definition: TrackKLT.cpp:395
ov_core::TrackBase::use_stereo
bool use_stereo
If we should use binocular tracking or stereo tracking for multi-camera.
Definition: TrackBase.h:168
TrackKLT.h
ov_core::CameraData
Struct for a collection of camera measurements.
Definition: sensor_data.h:55
ov_core::TrackBase::num_features
int num_features
Number of features we should try to track frame to frame.
Definition: TrackBase.h:165
PRINT_ERROR
#define PRINT_ERROR(x...)
Definition: print.h:100
PRINT_ALL
#define PRINT_ALL(x...)
Definition: print.h:96
ov_core::CameraData::sensor_ids
std::vector< int > sensor_ids
Camera ids for each of the images collected.
Definition: sensor_data.h:61
ov_core::TrackKLT::feed_monocular
void feed_monocular(const CameraData &message, size_t msg_id)
Process a new monocular image.
Definition: TrackKLT.cpp:96
ov_core::TrackBase::histogram_method
HistogramMethod histogram_method
What histogram equalization method we should pre-process images with?
Definition: TrackBase.h:171
ov_core::CameraData::timestamp
double timestamp
Timestamp of the reading.
Definition: sensor_data.h:58
ov_core::TrackKLT::min_px_dist
int min_px_dist
Definition: TrackKLT.h:140
ov_core::CameraData::masks
std::vector< cv::Mat > masks
Tracking masks for each camera we have.
Definition: sensor_data.h:67
ov_core::TrackKLT::img_curr
std::map< size_t, cv::Mat > img_curr
Definition: TrackKLT.h:148
ov_core::TrackBase::ids_last
std::unordered_map< size_t, std::vector< size_t > > ids_last
Set of IDs of each current feature in the database.
Definition: TrackBase.h:189
ov_core::TrackKLT::feed_stereo
void feed_stereo(const CameraData &message, size_t msg_id_left, size_t msg_id_right)
Process new stereo pair of images.
Definition: TrackKLT.cpp:202
ov_core::TrackKLT::threshold
int threshold
Definition: TrackKLT.h:135
ov_core::LambdaBody
Definition: opencv_lambda_body.h:37
print.h
CamBase.h
ov_core::TrackKLT::img_pyramid_curr
std::map< size_t, std::vector< cv::Mat > > img_pyramid_curr
Definition: TrackKLT.h:149
ov_core::TrackKLT::feed_new_camera
void feed_new_camera(const CameraData &message) override
Process a new image.
Definition: TrackKLT.cpp:34
ov_core::TrackBase::rT2
boost::posix_time::ptime rT2
Definition: TrackBase.h:195
RED
#define RED
Definition: colors.h:27
ov_core::TrackBase::rT5
boost::posix_time::ptime rT5
Definition: TrackBase.h:195
ov_core::TrackBase::rT1
boost::posix_time::ptime rT1
Definition: TrackBase.h:195
ov_core::TrackKLT::perform_detection_stereo
void perform_detection_stereo(const std::vector< cv::Mat > &img0pyr, const std::vector< cv::Mat > &img1pyr, const cv::Mat &mask0, const cv::Mat &mask1, size_t cam_id_left, size_t cam_id_right, std::vector< cv::KeyPoint > &pts0, std::vector< cv::KeyPoint > &pts1, std::vector< size_t > &ids0, std::vector< size_t > &ids1)
Detects new features in the current stereo pair.
Definition: TrackKLT.cpp:530
ov_core::TrackBase::rT6
boost::posix_time::ptime rT6
Definition: TrackBase.h:195
ov_core::TrackKLT::pyr_levels
int pyr_levels
Definition: TrackKLT.h:143
RESET
#define RESET
Definition: colors.h:25
Grider_GRID.h
ov_core::TrackBase::pts_last
std::unordered_map< size_t, std::vector< cv::KeyPoint > > pts_last
Last set of tracked points.
Definition: TrackBase.h:186
ov_core::TrackKLT::img_pyramid_last
std::map< size_t, std::vector< cv::Mat > > img_pyramid_last
Definition: TrackKLT.h:147
ov_core::TrackKLT::win_size
cv::Size win_size
Definition: TrackKLT.h:144
ov_core::TrackBase::img_last
std::map< size_t, cv::Mat > img_last
Last set of images (use map so all trackers render in the same order)
Definition: TrackBase.h:180
ov_core::TrackBase::camera_calib
std::unordered_map< size_t, std::shared_ptr< CamBase > > camera_calib
Camera object which has all calibration in it.
Definition: TrackBase.h:156
ov_core
Core algorithms for OpenVINS.
Definition: CamBase.h:30
ov_core::TrackBase::rT4
boost::posix_time::ptime rT4
Definition: TrackBase.h:195
opencv_lambda_body.h


ov_core
Author(s): Patrick Geneva , Kevin Eckenhoff , Guoquan Huang
autogenerated on Mon Dec 16 2024 03:06:46