SuperPoint.cc
Go to the documentation of this file.
1 
10 
11 
12 namespace rtabmap
13 {
14 
15 const int c1 = 64;
16 const int c2 = 64;
17 const int c3 = 128;
18 const int c4 = 128;
19 const int c5 = 256;
20 const int d1 = 256;
21 
22 
23 
25  : conv1a(torch::nn::Conv2dOptions( 1, c1, 3).stride(1).padding(1)),
26  conv1b(torch::nn::Conv2dOptions(c1, c1, 3).stride(1).padding(1)),
27 
28  conv2a(torch::nn::Conv2dOptions(c1, c2, 3).stride(1).padding(1)),
29  conv2b(torch::nn::Conv2dOptions(c2, c2, 3).stride(1).padding(1)),
30 
31  conv3a(torch::nn::Conv2dOptions(c2, c3, 3).stride(1).padding(1)),
32  conv3b(torch::nn::Conv2dOptions(c3, c3, 3).stride(1).padding(1)),
33 
34  conv4a(torch::nn::Conv2dOptions(c3, c4, 3).stride(1).padding(1)),
35  conv4b(torch::nn::Conv2dOptions(c4, c4, 3).stride(1).padding(1)),
36 
37  convPa(torch::nn::Conv2dOptions(c4, c5, 3).stride(1).padding(1)),
38  convPb(torch::nn::Conv2dOptions(c5, 65, 1).stride(1).padding(0)),
39 
40  convDa(torch::nn::Conv2dOptions(c4, c5, 3).stride(1).padding(1)),
41  convDb(torch::nn::Conv2dOptions(c5, d1, 1).stride(1).padding(0))
42 
43  {
44  register_module("conv1a", conv1a);
45  register_module("conv1b", conv1b);
46 
47  register_module("conv2a", conv2a);
48  register_module("conv2b", conv2b);
49 
50  register_module("conv3a", conv3a);
51  register_module("conv3b", conv3b);
52 
53  register_module("conv4a", conv4a);
54  register_module("conv4b", conv4b);
55 
56  register_module("convPa", convPa);
57  register_module("convPb", convPb);
58 
59  register_module("convDa", convDa);
60  register_module("convDb", convDb);
61  }
62 
63 
64 std::vector<torch::Tensor> SuperPoint::forward(torch::Tensor x) {
65 
66  x = torch::relu(conv1a->forward(x));
67  x = torch::relu(conv1b->forward(x));
68  x = torch::max_pool2d(x, 2, 2);
69 
70  x = torch::relu(conv2a->forward(x));
71  x = torch::relu(conv2b->forward(x));
72  x = torch::max_pool2d(x, 2, 2);
73 
74  x = torch::relu(conv3a->forward(x));
75  x = torch::relu(conv3b->forward(x));
76  x = torch::max_pool2d(x, 2, 2);
77 
78  x = torch::relu(conv4a->forward(x));
79  x = torch::relu(conv4b->forward(x));
80 
81  auto cPa = torch::relu(convPa->forward(x));
82  auto semi = convPb->forward(cPa); // [B, 65, H/8, W/8]
83 
84  auto cDa = torch::relu(convDa->forward(x));
85  auto desc = convDb->forward(cDa); // [B, d1, H/8, W/8]
86 
87  auto dn = torch::norm(desc, 2, 1);
88  desc = desc.div(torch::unsqueeze(dn, 1));
89 
90  semi = torch::softmax(semi, 1);
91  semi = semi.slice(1, 0, 64);
92  semi = semi.permute({0, 2, 3, 1}); // [B, H/8, W/8, 64]
93 
94 
95  int Hc = semi.size(1);
96  int Wc = semi.size(2);
97  semi = semi.contiguous().view({-1, Hc, Wc, 8, 8});
98  semi = semi.permute({0, 1, 3, 2, 4});
99  semi = semi.contiguous().view({-1, Hc * 8, Wc * 8}); // [B, H, W]
100 
101 
102  std::vector<torch::Tensor> ret;
103  ret.push_back(semi);
104  ret.push_back(desc);
105 
106  return ret;
107  }
108 
109 void NMS(const std::vector<cv::KeyPoint> & ptsIn,
110  const cv::Mat & conf,
111  const cv::Mat & descriptorsIn,
112  std::vector<cv::KeyPoint> & ptsOut,
113  cv::Mat & descriptorsOut,
114  int border, int dist_thresh, int img_width, int img_height);
115 
116 SPDetector::SPDetector(const std::string & modelPath, float threshold, bool nms, int minDistance, bool cuda) :
117  threshold_(threshold),
118  nms_(nms),
119  minDistance_(minDistance),
120  detected_(false)
121 {
122  UDEBUG("modelPath=%s thr=%f nms=%d cuda=%d", modelPath.c_str(), threshold, nms?1:0, cuda?1:0);
123  if(modelPath.empty())
124  {
125  UERROR("Model's path is empty!");
126  return;
127  }
128  std::string path = uReplaceChar(modelPath, '~', UDirectory::homeDir());
129  if(!UFile::exists(path))
130  {
131  UERROR("Model's path \"%s\" doesn't exist!", path.c_str());
132  return;
133  }
134  model_ = std::make_shared<SuperPoint>();
135  torch::load(model_, uReplaceChar(path, '~', UDirectory::homeDir()));
136 
137  if(cuda && !torch::cuda::is_available())
138  {
139  UWARN("Cuda option is enabled but torch doesn't have cuda support on this platform, using CPU instead.");
140  }
141  cuda_ = cuda && torch::cuda::is_available();
142  torch::Device device(cuda_?torch::kCUDA:torch::kCPU);
143  model_->to(device);
144 }
145 
147 {
148 }
149 
150 std::vector<cv::KeyPoint> SPDetector::detect(const cv::Mat &img, const cv::Mat & mask)
151 {
152  UASSERT(img.type() == CV_8UC1);
153  UASSERT(mask.empty() || (mask.type() == CV_8UC1 && img.cols == mask.cols && img.rows == mask.rows));
154  detected_ = false;
155  if(model_)
156  {
157  torch::NoGradGuard no_grad_guard;
158  auto x = torch::from_blob(img.data, {1, 1, img.rows, img.cols}, torch::kByte);
159  x = x.to(torch::kFloat) / 255;
160 
161  torch::Device device(cuda_?torch::kCUDA:torch::kCPU);
162  x = x.set_requires_grad(false);
163  auto out = model_->forward(x.to(device));
164 
165  prob_ = out[0].squeeze(0); // [H, W]
166  desc_ = out[1]; // [1, 256, H/8, W/8]
167 
168  auto kpts = (prob_ > threshold_);
169  kpts = torch::nonzero(kpts); // [n_keypoints, 2] (y, x)
170 
171  //convert back to cpu if in gpu
172  auto kpts_cpu = kpts.to(torch::kCPU);
173  auto prob_cpu = prob_.to(torch::kCPU);
174 
175  std::vector<cv::KeyPoint> keypoints_no_nms;
176  for (int i = 0; i < kpts_cpu.size(0); i++) {
177  if(mask.empty() || mask.at<unsigned char>(kpts_cpu[i][0].item<int>(), kpts_cpu[i][1].item<int>()) != 0)
178  {
179  float response = prob_cpu[kpts_cpu[i][0]][kpts_cpu[i][1]].item<float>();
180  keypoints_no_nms.push_back(cv::KeyPoint(kpts_cpu[i][1].item<float>(), kpts_cpu[i][0].item<float>(), 8, -1, response));
181  }
182  }
183 
184  detected_ = true;
185  if (nms_ && !keypoints_no_nms.empty()) {
186  cv::Mat conf(keypoints_no_nms.size(), 1, CV_32F);
187  for (size_t i = 0; i < keypoints_no_nms.size(); i++) {
188  int x = keypoints_no_nms[i].pt.x;
189  int y = keypoints_no_nms[i].pt.y;
190  conf.at<float>(i, 0) = prob_cpu[y][x].item<float>();
191  }
192 
193  int border = 0;
194  int dist_thresh = minDistance_;
195  int height = img.rows;
196  int width = img.cols;
197 
198  std::vector<cv::KeyPoint> keypoints;
199  cv::Mat descEmpty;
200  NMS(keypoints_no_nms, conf, descEmpty, keypoints, descEmpty, border, dist_thresh, width, height);
201  return keypoints;
202  }
203  else {
204  return keypoints_no_nms;
205  }
206  }
207  else
208  {
209  UERROR("No model is loaded!");
210  return std::vector<cv::KeyPoint>();
211  }
212 }
213 
214 cv::Mat SPDetector::compute(const std::vector<cv::KeyPoint> &keypoints)
215 {
216  if(!detected_)
217  {
218  UERROR("SPDetector has been reset before extracting the descriptors! detect() should be called before compute().");
219  return cv::Mat();
220  }
221  if(model_.get())
222  {
223  cv::Mat kpt_mat(keypoints.size(), 2, CV_32F); // [n_keypoints, 2] (y, x)
224 
225  // Based on sample_descriptors() of SuperPoint implementation in SuperGlue:
226  // https://github.com/magicleap/SuperGluePretrainedNetwork/blob/45a750e5707696da49472f1cad35b0b203325417/models/superpoint.py#L80-L92
227  float s = 8;
228  for (size_t i = 0; i < keypoints.size(); i++) {
229  kpt_mat.at<float>(i, 0) = (float)keypoints[i].pt.y - s/2 + 0.5;
230  kpt_mat.at<float>(i, 1) = (float)keypoints[i].pt.x - s/2 + 0.5;
231  }
232 
233  auto fkpts = torch::from_blob(kpt_mat.data, {(long int)keypoints.size(), 2}, torch::kFloat);
234 
235  float w = desc_.size(3); //W/8
236  float h = desc_.size(2); //H/8
237 
238  torch::Device device(cuda_?torch::kCUDA:torch::kCPU);
239  auto grid = torch::zeros({1, 1, fkpts.size(0), 2}).to(device); // [1, 1, n_keypoints, 2]
240  grid[0][0].slice(1, 0, 1) = 2.0 * fkpts.slice(1, 1, 2) / (w*s - s/2 - 0.5) - 1; // x
241  grid[0][0].slice(1, 1, 2) = 2.0 * fkpts.slice(1, 0, 1) / (h*s - s/2 - 0.5) - 1; // y
242 
243  auto desc = torch::grid_sampler(desc_, grid, 0, 0, true); // [1, 256, 1, n_keypoints]
244 
245  // normalize to 1
246  desc = torch::nn::functional::normalize(desc.reshape({1, desc_.size(1), -1})); //[1, 256, n_keypoints]
247  desc = desc.squeeze(); //[256, n_keypoints]
248  desc = desc.transpose(0, 1).contiguous(); //[n_keypoints, 256]
249 
250  if(cuda_)
251  desc = desc.to(torch::kCPU);
252 
253  cv::Mat desc_mat(cv::Size(desc.size(1), desc.size(0)), CV_32FC1, desc.data_ptr<float>());
254 
255  return desc_mat.clone();
256  }
257  else
258  {
259  UERROR("No model is loaded!");
260  return cv::Mat();
261  }
262 }
263 
264 void NMS(const std::vector<cv::KeyPoint> & ptsIn,
265  const cv::Mat & conf,
266  const cv::Mat & descriptorsIn,
267  std::vector<cv::KeyPoint> & ptsOut,
268  cv::Mat & descriptorsOut,
269  int border, int dist_thresh, int img_width, int img_height)
270 {
271 
272  std::vector<cv::Point2f> pts_raw;
273 
274  for (size_t i = 0; i < ptsIn.size(); i++)
275  {
276  int u = (int) ptsIn[i].pt.x;
277  int v = (int) ptsIn[i].pt.y;
278 
279  pts_raw.push_back(cv::Point2f(u, v));
280  }
281 
282  //Grid Value Legend:
283  // 255 : Kept.
284  // 0 : Empty or suppressed.
285  // 100 : To be processed (converted to either kept or suppressed).
286  cv::Mat grid = cv::Mat(cv::Size(img_width, img_height), CV_8UC1);
287  cv::Mat inds = cv::Mat(cv::Size(img_width, img_height), CV_16UC1);
288 
289  cv::Mat confidence = cv::Mat(cv::Size(img_width, img_height), CV_32FC1);
290 
291  grid.setTo(0);
292  inds.setTo(0);
293  confidence.setTo(0);
294 
295  for (size_t i = 0; i < pts_raw.size(); i++)
296  {
297  int uu = (int) pts_raw[i].x;
298  int vv = (int) pts_raw[i].y;
299 
300  grid.at<unsigned char>(vv, uu) = 100;
301  inds.at<unsigned short>(vv, uu) = i;
302 
303  confidence.at<float>(vv, uu) = conf.at<float>(i, 0);
304  }
305 
306  // debug
307  //cv::Mat confidenceVis = confidence.clone() * 255;
308  //confidenceVis.convertTo(confidenceVis, CV_8UC1);
309  //cv::imwrite("confidence.bmp", confidenceVis);
310  //cv::imwrite("grid_in.bmp", grid);
311 
312  cv::copyMakeBorder(grid, grid, dist_thresh, dist_thresh, dist_thresh, dist_thresh, cv::BORDER_CONSTANT, 0);
313 
314  for (size_t i = 0; i < pts_raw.size(); i++)
315  {
316  // account for top left padding
317  int uu = (int) pts_raw[i].x + dist_thresh;
318  int vv = (int) pts_raw[i].y + dist_thresh;
319  float c = confidence.at<float>(vv-dist_thresh, uu-dist_thresh);
320 
321  if (grid.at<unsigned char>(vv, uu) == 100) // If not yet suppressed.
322  {
323  for(int k = -dist_thresh; k < (dist_thresh+1); k++)
324  {
325  for(int j = -dist_thresh; j < (dist_thresh+1); j++)
326  {
327  if(j==0 && k==0)
328  continue;
329 
330  if ( confidence.at<float>(vv + k - dist_thresh, uu + j - dist_thresh) <= c )
331  {
332  grid.at<unsigned char>(vv + k, uu + j) = 0;
333  }
334  }
335  }
336  grid.at<unsigned char>(vv, uu) = 255;
337  }
338  }
339 
340  size_t valid_cnt = 0;
341  std::vector<int> select_indice;
342 
343  grid = cv::Mat(grid, cv::Rect(dist_thresh, dist_thresh, img_width, img_height));
344 
345  //debug
346  //cv::imwrite("grid_nms.bmp", grid);
347 
348  for (int v = 0; v < img_height; v++)
349  {
350  for (int u = 0; u < img_width; u++)
351  {
352  if (grid.at<unsigned char>(v,u) == 255)
353  {
354  int select_ind = (int) inds.at<unsigned short>(v, u);
355  float response = conf.at<float>(select_ind, 0);
356  ptsOut.push_back(cv::KeyPoint(pts_raw[select_ind], 8.0f, -1, response));
357 
358  select_indice.push_back(select_ind);
359  valid_cnt++;
360  }
361  }
362  }
363 
364  if(!descriptorsIn.empty())
365  {
366  UASSERT(descriptorsIn.rows == (int)ptsIn.size());
367  descriptorsOut.create(select_indice.size(), 256, CV_32F);
368 
369  for (size_t i=0; i<select_indice.size(); i++)
370  {
371  for (int j=0; j < 256; j++)
372  {
373  descriptorsOut.at<float>(i, j) = descriptorsIn.at<float>(select_indice[i], j);
374  }
375  }
376  }
377 }
378 
379 }
GLM_FUNC_DECL genIType mask(genIType const &count)
static std::string homeDir()
Definition: UDirectory.cpp:355
std::shared_ptr< SuperPoint > model_
Definition: SuperPoint.h:61
cv::Mat compute(const std::vector< cv::KeyPoint > &keypoints)
Definition: SuperPoint.cc:214
torch::nn::Conv2d conv3b
Definition: SuperPoint.h:35
std::vector< torch::Tensor > forward(torch::Tensor x)
Definition: SuperPoint.cc:64
SPDetector(const std::string &modelPath, float threshold=0.2f, bool nms=true, int minDistance=4, bool cuda=false)
Definition: SuperPoint.cc:116
torch::nn::Conv2d convDa
Definition: SuperPoint.h:44
f
virtual ~SPDetector()
Definition: SuperPoint.cc:146
const int c4
Definition: SuperPoint.cc:18
Some conversion functions.
torch::nn::Conv2d conv2b
Definition: SuperPoint.h:32
#define UASSERT(condition)
GLM_FUNC_DECL genType normalize(genType const &x)
torch::nn::Conv2d convDb
Definition: SuperPoint.h:45
torch::nn::Conv2d conv4a
Definition: SuperPoint.h:37
const int c5
Definition: SuperPoint.cc:19
torch::nn::Conv2d conv2a
Definition: SuperPoint.h:31
void NMS(const std::vector< cv::KeyPoint > &ptsIn, const cv::Mat &conf, const cv::Mat &descriptorsIn, std::vector< cv::KeyPoint > &ptsOut, cv::Mat &descriptorsOut, int border, int dist_thresh, int img_width, int img_height)
Definition: SuperPoint.cc:264
torch::Tensor prob_
Definition: SuperPoint.h:62
std::vector< cv::KeyPoint > detect(const cv::Mat &img, const cv::Mat &mask=cv::Mat())
Definition: SuperPoint.cc:150
std::string UTILITE_EXP uReplaceChar(const std::string &str, char before, char after)
Definition: UConversion.cpp:32
const int c3
Definition: SuperPoint.cc:17
#define false
Definition: ConvertUTF.c:56
torch::nn::Conv2d conv1b
Definition: SuperPoint.h:29
#define UDEBUG(...)
const int c1
Definition: SuperPoint.cc:15
bool exists()
Definition: UFile.h:104
#define UERROR(...)
ULogger class and convenient macros.
#define UWARN(...)
torch::nn::Conv2d conv3a
Definition: SuperPoint.h:34
const int c2
Definition: SuperPoint.cc:16
torch::nn::Conv2d convPa
Definition: SuperPoint.h:40
torch::nn::Conv2d conv1a
Definition: SuperPoint.h:28
const int d1
Definition: SuperPoint.cc:20
torch::Tensor desc_
Definition: SuperPoint.h:63
torch::nn::Conv2d convPb
Definition: SuperPoint.h:41
torch::nn::Conv2d conv4b
Definition: SuperPoint.h:38


rtabmap
Author(s): Mathieu Labbe
autogenerated on Mon Dec 14 2020 03:37:06