fisheye_stitcher.cpp
Go to the documentation of this file.
1 /* Copied from https://github.com/drNoob13/fisheyeStitcher/tree/45d65907b63c300523c6143794124411920cdbb9, 2020/02/05 */
2 /*
3 MIT License
4 
5 Copyright (c) 2018-2020 Tuan Phan Minh Ho
6 
7 Permission is hereby granted, free of charge, to any person obtaining a copy
8 of this software and associated documentation files (the "Software"), to deal
9 in the Software without restriction, including without limitation the rights
10 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 copies of the Software, and to permit persons to whom the Software is
12 furnished to do so, subject to the following conditions:
13 
14 The above copyright notice and this permission notice shall be included in all
15 copies or substantial portions of the Software.
16 
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 SOFTWARE.
24 */
25 
26 //----------------------------------------------------------------------------//
27 // //
28 // This file is part of the fisheye stitcher project. //
29 // Copyright (c) 2018-2020 Tuan Phan Minh Ho <drnoob2013@gmail.com> //
30 // https://github.com/drNoob13/fisheyeStitcher //
31 // //
32 //----------------------------------------------------------------------------//
33 #include "fisheye_stitcher.hpp"
34 
35 namespace stitcher
36 {
37 
38 FisheyeStitcher::FisheyeStitcher(int width, int height, float in_fovd,
39  bool enb_light_compen, bool enb_refine_align,
40  bool save_unwarped, std::string map_path )
41 : m_ws_org(width), m_hs_org(height), m_in_fovd(195.0f), m_inner_fovd(183.0f),
42  m_enb_light_compen(enb_light_compen), m_enb_refine_align(enb_refine_align),
43  m_save_unwarped(save_unwarped), m_map_path(map_path)
44 {
45  CV_Assert( (width % 2 == 0) && (height % 2 == 0) );
46 
47  // Source images
48  m_ws = static_cast<int>(width / 2); // e.g. 1920
49  m_hs = height; // e.g. 1920
50  CV_Assert( (m_ws % 2 == 0) && (m_hs % 2 == 0) );
51  m_ws2 = static_cast<int>(m_ws / 2);
52  m_hs2 = static_cast<int>(m_hs / 2);
53 
54  // Destination pano
55  m_wd = static_cast<int>(m_ws * 360.0 / MAX_FOVD);
56  m_hd = static_cast<int>(std::floor(m_wd / 2));
57  m_wd2 = static_cast<int>(std::floor(m_wd / 2));
58  m_hd2 = static_cast<int>(std::floor(m_hd / 2));
59 
60  // Initializing parameters
61  std::cout << "Initializing necessary parameters..\n";
62  init();
63 }
64 
66 {
67 }
68 
74 cv::Mat
75 FisheyeStitcher::unwarp( const cv::Mat &src )
76 {
77  cv::Mat dst(src.size(), src.type());
78  remap(src, dst, m_map_x, m_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT,
79  cv::Scalar(0, 0, 0));
80  return dst;
81 
82 } // unwarp()
83 
87 std::tuple<double, double>
88 FisheyeStitcher::fish2Eqt( const double x_dest, const double y_dest,
89  const double W_rad )
90 {
91  double phi, theta, r, s;
92  double v[3];
93  phi = x_dest / W_rad;
94  theta = -y_dest / W_rad + CV_PI / 2;
95 
96  if (theta < 0)
97  {
98  theta = -theta;
99  phi += CV_PI;
100  }
101  if (theta > CV_PI)
102  {
103  theta = CV_PI - (theta - CV_PI);
104  phi += CV_PI;
105  }
106 
107  s = sin(theta);
108  v[0] = s * sin(phi);
109  v[1] = cos(theta);
110  r = sqrt(v[1] * v[1] + v[0] * v[0]);
111  theta = W_rad * atan2(r, s * cos(phi));
112  //
113  double x_src = theta * v[0] / r;
114  double y_src = theta * v[1] / r;
115 
116  return std::make_tuple(x_src, y_src);
117 
118 } // fish2Eqt()
119 
127 void
129 {
130  cv::Mat map_x(m_hd, m_wd, CV_32FC1);
131  cv::Mat map_y(m_hd, m_wd, CV_32FC1);
132  double w_rad = m_wd / (2.0 * CV_PI);
133  double x_d, y_d; // dest
134  double x_s, y_s; // source
135  double w2 = static_cast<double>(m_wd2) - 0.5;
136  double h2 = static_cast<double>(m_hd2) - 0.5;
137  double ws2 = static_cast<double>(m_ws2) - 0.5;
138  double hs2 = static_cast<double>(m_hs2) - 0.5;
139 
140  for (int y = 0; y < m_hd; ++y)
141  {
142  // y-coordinate in dest image relative to center
143  y_d = static_cast<double>(y) - h2;
144  for (int x = 0; x < m_wd; ++x)
145  {
146  x_d = static_cast<double>(x) - w2;
147 
148  // Convert fisheye coordinate to cartesian coordinate (equirectangular)
149  std::tie(x_s, y_s) = fish2Eqt(x_d, y_d, w_rad);
150 
151  // Convert source cartesian coordinate to screen coordinate
152  x_s += ws2;
153  y_s += hs2;
154 
155  // Create map
156  map_x.at<float>(y, x) = static_cast<float>(x_s);
157  map_y.at<float>(y, x) = static_cast<float>(y_s);
158  }
159  }
160  map_x.copyTo(m_map_x);
161  map_y.copyTo(m_map_y);
162 
163 } // fish2Map()
164 
171 void
173 {
174  cv::Mat cir_mask_ = cv::Mat(m_hs, m_ws, CV_8UC3);
175  cv::Mat inner_cir_mask_ = cv::Mat(m_hs, m_ws, CV_8UC3);
176 
177  int wShift = static_cast<int>(std::floor(
178  ((m_ws * (MAX_FOVD - m_inner_fovd) / MAX_FOVD) / 2.0f)));
179 
180  // Create Circular mask to crop the input W.R.T. FOVD
181  int r1 = m_ws2;
182  int r2 = m_ws2 - wShift * 2;
183  cv::circle(cir_mask_, cv::Point(m_ws2, m_ws2), r1,
184  cv::Scalar(255, 255, 255), -1, 8, 0); // fill circle with 0xFF
185  cv::circle(inner_cir_mask_, cv::Point(m_ws2, m_ws2), r2,
186  cv::Scalar(255, 255, 255), -1, 8, 0); // fill circle with 0xFF
187 
188  cv::Mat cir_mask;
189  cv::Mat inner_cir_mask;
190  cir_mask_.convertTo(cir_mask, CV_8UC3);
191  inner_cir_mask_.convertTo(inner_cir_mask, CV_8UC3);
192  cir_mask.copyTo(m_cir_mask);
193  inner_cir_mask.copyTo(m_inner_cir_mask);
194 
195 } // createMask()
196 
202 cv::Mat
203 FisheyeStitcher::deform( const cv::Mat &src )
204 {
205  cv::Mat dst(src.size(), src.type());
206  cv::remap(src, dst, m_mls_map_x, m_mls_map_y, cv::INTER_LINEAR,
207  cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
208  return dst;
209 
210 } // deform()
211 
218 void
220 {
221  // TODO: remove duplicate params
222 
223  //------------------------------------------------------------------------//
224  // Generate R_pf (reverse light fall-off profile) //
225  //------------------------------------------------------------------------//
226  int H = m_hs;
227  int W = m_ws;
228  int W_ = m_ws2;
229  int H_ = m_hs2;
230  cv::Mat x_coor = cv::Mat::zeros(1, W_, CV_32F);
231  cv::Mat temp(x_coor.size(), x_coor.type());
232 
233  for (int i = 0; i < W_; ++i)
234  {
235  x_coor.at<float>(0, i) = i;
236  }
237 
238  //-----------------------------------------------------------------------//
239  // R_pf = P1_ * (x_coor.^5.0) + P2_ * (x_coor.^4.0) + //
240  // P3_ * (x_coor.^3.0) + P4_ * (x_coor.^2.0) + //
241  // P5_ * x_coor + P6_; //
242  //-----------------------------------------------------------------------//
243  cv::Mat R_pf = cv::Mat::zeros(x_coor.size(), x_coor.type());
244  cv::pow(x_coor, 5.0, temp);
245  R_pf = R_pf + P1_ * temp;
246  cv::pow(x_coor, 4.0, temp);
247  R_pf = R_pf + P2_ * temp;
248  cv::pow(x_coor, 3.0, temp);
249  R_pf = R_pf + P3_ * temp;
250  cv::pow(x_coor, 2.0, temp);
251  R_pf = R_pf + P4_ * temp;
252  R_pf = R_pf + P5_ * x_coor + P6_;
253 
254  // PF_LUT
255  cv::divide(1, R_pf, R_pf); //element-wise inverse
256 
257  //------------------------------------------------------------------------//
258  // Generate scale map //
259  //------------------------------------------------------------------------//
260  // Create IV quadrant map
261  cv::Mat scale_map_quad_4 = cv::Mat::zeros(H_, W_, R_pf.type());
262  float da = R_pf.at<float>(0, W_ - 1);
263  int x, y;
264  float r, a, b;
265 
266  for (x = 0; x < W_; ++x)
267  {
268  for (y = 0; y < H_; ++y)
269  {
270  r = std::floor(sqrt(std::pow(x, 2) + std::pow(y, 2)));
271  if (r >= (W_ - 1))
272  {
273  scale_map_quad_4.at<float>(y, x) = da;
274  }
275  else
276  {
277  a = R_pf.at<float>(0, r);
278  if ((x < W_) && (y < H_)) // within boundaries
279  b = R_pf.at<float>(0, r + 1);
280  else // on boundaries
281  b = R_pf.at<float>(0, r);
282  scale_map_quad_4.at<float>(y, x) = (a + b) / 2.0f;
283  }
284  } // x()
285  } // y()
286 
287  // Assume Optical Symmetry & Flip
288  cv::Mat scale_map_quad_1(scale_map_quad_4.size(), scale_map_quad_4.type());
289  cv::Mat scale_map_quad_2(scale_map_quad_4.size(), scale_map_quad_4.type());
290  cv::Mat scale_map_quad_3(scale_map_quad_4.size(), scale_map_quad_4.type());
291  //
292  cv::flip(scale_map_quad_4, scale_map_quad_1, 0); // quad I, up-down or around x-axis
293  cv::flip(scale_map_quad_4, scale_map_quad_3, 1); // quad III, left-right or around y-axis
294  cv::flip(scale_map_quad_1, scale_map_quad_2, 1); // quad II, up-down or around x-axis
295  //
296  cv::Mat quad_21, quad_34;
297  cv::hconcat(scale_map_quad_2, scale_map_quad_1, quad_21);
298  cv::hconcat(scale_map_quad_3, scale_map_quad_4, quad_34);
299  //
300  cv::Mat scale_map;
301  cv::vconcat(quad_21, quad_34, scale_map);
302 
303  scale_map.copyTo(m_scale_map);
304 
305 } // genScaleMap()
306 
312 cv::Mat
313 FisheyeStitcher::compenLightFO( const cv::Mat &in_img )
314 {
315  cv::Mat rgb_ch[3];
316  cv::Mat rgb_ch_double[3];
317  cv::Mat out_img_double(in_img.size(), in_img.type());
318  cv::split(in_img, rgb_ch);
319  rgb_ch[0].convertTo(rgb_ch_double[0], m_scale_map.type());
320  rgb_ch[1].convertTo(rgb_ch_double[1], m_scale_map.type());
321  rgb_ch[2].convertTo(rgb_ch_double[2], m_scale_map.type());
322  //
323  rgb_ch_double[0] = rgb_ch_double[0].mul(m_scale_map); // element-wise multiplication
324  rgb_ch_double[1] = rgb_ch_double[1].mul(m_scale_map);
325  rgb_ch_double[2] = rgb_ch_double[2].mul(m_scale_map);
326  cv::merge(rgb_ch_double, 3, out_img_double);
327 
328  cv::Mat out_img;
329  out_img_double.convertTo(out_img, CV_8U);
330  return out_img;
331 
332 } // compenLightFO()
333 
339 void
341 {
342  cv::Mat inner_cir_mask_n;
343  cv::Mat ring_mask, ring_mask_unwarped;
344 
345  int Ws2 = m_ws2;
346  int Hs2 = m_hs2;
347  int Wd2 = m_wd2;
348  cv::bitwise_not(m_inner_cir_mask, inner_cir_mask_n);
349 
350  m_cir_mask.copyTo(ring_mask, inner_cir_mask_n); // masking
351 
352 #if MY_DEBUG
353  std::cout << "Ws = " << m_ws << ", Hs = " << m_hs << "\n";
354  std::cout << "Wd = " << m_wd << ", Hd = " << m_hd << "\n";
355  cv::imwrite("m_cir_mask.jpg", m_cir_mask);
356  cv::imwrite("ring_mask.jpg", ring_mask);
357 #endif
358 
359  cv::remap(ring_mask, ring_mask_unwarped, m_map_x, m_map_y, cv::INTER_LINEAR,
360  cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
361 
362  cv::Mat mask_ = ring_mask_unwarped(cv::Rect(Wd2-Ws2, 0, m_ws, m_hd));
363  mask_.convertTo(mask_, CV_8UC3);
364 
365 #if MY_DEBUG
366  cv::imwrite("mask_.jpg", mask_);
367 #endif
368 
369  int H_ = mask_.size().height;
370  int W_ = mask_.size().width;
371 
372  int ridx, cidx;
373 
374  // Hard-coded for dual-fisheye image of size 3840x1920
375  const int first_zero_col = 120; // first cidx that mask value is zero
376  const int first_zero_row = 45; // first ridx that mask value is zero
377 
378  // Clean up
379  for( ridx=0; ridx < H_; ++ridx)
380  {
381  if( cidx < first_zero_col || cidx > W_-first_zero_col )
382  {
383  mask_.at<cv::Vec3b>(cv::Point(cidx,ridx)) = cv::Vec3b(255,255,255);
384  }
385  }
386  for( ridx=0; ridx < H_; ++ridx )
387  {
388  for( cidx=0; cidx < W_; ++cidx )
389  {
390  if( (ridx < (static_cast<int>(H_/2)) ) &&
391  (cidx > first_zero_col - 1) &&
392  (cidx < W_ - first_zero_col + 1) )
393  {
394  mask_.at<cv::Vec3b>(cv::Point(cidx,ridx)) = cv::Vec3b(0, 0, 0);
395  }
396  }
397  }
398 
399  // Create m_blend_post
400  int offset = 15;
401  for( ridx=0; ridx < H_; ++ridx )
402  {
403  if( ridx > H_ - first_zero_row )
404  {
405  m_blend_post.push_back( 0 );
406  continue;
407  }
408  for( cidx=first_zero_col-10; cidx < W_/2+10; ++cidx )
409  {
410  cv::Vec3b color = mask_.at<cv::Vec3b>(cv::Point(cidx,ridx));
411  if( color == cv::Vec3b(0,0,0))
412  {
413  m_blend_post.push_back( cidx - offset );
414  break;
415  }
416  }
417  }
418 
419  // generate binary mask
420  cv::Mat binary_mask;
421  mask_.convertTo( binary_mask, CV_8UC3 );
422  binary_mask.copyTo( m_binary_mask );
423 
424 #if MY_DEBUG
425  std::cout << "size mask_ = " << mask_.size() << ", type = " << mask_.type()
426  << ", ch = " << mask_.channels() << "\n";
427  cv::imwrite("binary_mask.jpg", binary_mask);
428 #endif
429 
430 } // createBlendMask()
431 
432 //
433 // @brief Initialize common parameters for stitching
434 //
435 void
437 {
438  //------------------------------------------------------------------------//
439  // Create deformation maps //
440  //------------------------------------------------------------------------//
441  fish2Map(); // update m_map_x and m_map_y
442 
443  //------------------------------------------------------------------------//
444  // Create Circular mask to Crop the input W.R.T FOVD //
445  //------------------------------------------------------------------------//
446  // (mask all data outside the FOVD circle)
447  createMask(); // update m_cir_mask, m_inner_cir_mask
448 
449  //------------------------------------------------------------------------//
450  // Creat masks that used in blending the deformed images //
451  //------------------------------------------------------------------------//
452  createBlendMask(); // update m_blend_post, m_binary_mask
453 
454  //------------------------------------------------------------------------//
455  // Create scale_map for fisheye light fall-off compensation //
456  //------------------------------------------------------------------------//
457  genScaleMap(); // update m_scale_map
458 
459  //------------------------------------------------------------------------//
460  // Read rigid MLS interp grids from file //
461  //------------------------------------------------------------------------//
462  cv::Mat mls_map_x, mls_map_y;
463  // 3840x1920 resolution (C200 video)
464  cv::FileStorage fs(m_map_path, cv::FileStorage::READ);
465  if( fs.isOpened())
466  {
467  fs["Xd"] >> mls_map_x;
468  fs["Yd"] >> mls_map_y;
469  fs.release();
470  }
471  else
472  {
473  CV_Error_(cv::Error::StsBadArg,
474  ("Cannot open map file1: %s", m_map_path.c_str()));
475  }
476  mls_map_x.copyTo(m_mls_map_x);
477  mls_map_y.copyTo(m_mls_map_y);
478 
479 } // init()
480 
481 
488 cv::Point2f
489 FisheyeStitcher::findMatchLoc( const cv::Mat &Ref,
490  const cv::Mat &Tmpl,
491  const std::string &img_window,
492  const bool disable_display )
493 {
494  cv::Point2f matchLoc;
495  double tickStart, tickEnd, runTime;
496  cv::Mat img = Ref;
497  cv::Mat templ = Tmpl;
498  cv::Mat img_display, result;
499  img.copyTo(img_display);
500  int result_cols = img.cols - templ.cols + 1;
501  int result_rows = img.rows - templ.rows + 1;
502  result.create(result_rows, result_cols, CV_32FC1);
503 
504  // Select Normalized Cross-Correlation as Template Matching Method
505  int match_method = cv::TM_CCORR_NORMED;
506 
507  // Match template
508  cv::matchTemplate(img, templ, result, match_method);
509  cv::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());
510 
511  // Check for peak cross-correlation
512  double minVal, maxVal;
513  cv::Point minLoc, maxLoc;
514 
515  // Point matchLoc
516  cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
517 
518  if (match_method == cv::TM_SQDIFF || match_method == cv::TM_SQDIFF_NORMED)
519  {
520  matchLoc = minLoc;
521  }
522  else // cv::TM_CCORR_NORMED
523  {
524  matchLoc = maxLoc;
525  }
526 
527  if (!disable_display)
528  {
529  cv::rectangle(img_display, matchLoc,
530  cv::Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows),
531  cv::Scalar(0, 255, 0), 2, 8, 0);
532  cv::Mat RefTemplCat;
533  cv::hconcat(img_display, Tmpl, RefTemplCat);
534  cv::imshow(img_window, RefTemplCat);
535  }
536  return matchLoc;
537 
538 } // findMatchLoc()
539 
545 std::tuple<std::vector<cv::Point2f>, std::vector<cv::Point2f> >
546 FisheyeStitcher::createControlPoints( const cv::Point2f &matchLocLeft,
547  const cv::Point2f &matchLocRight, const int row_start,
548  const int row_end, const int p_wid, const int p_x1,
549  const int p_x2, const int p_x2_ref )
550 {
551  std::vector<cv::Point2f> movingPoints;
552  std::vector<cv::Point2f> fixedPoints;
553 
554  float x1 = matchLocLeft.x;
555  float y1 = matchLocLeft.y;
556  float x2 = matchLocRight.x;
557  float y2 = matchLocRight.y;
558 
559  //------------------------------------------------------------------------//
560  // Construct MovingPoints pRef (matched points of template on reference) //
561  //------------------------------------------------------------------------//
562  // Left Boundary
563  movingPoints.push_back(cv::Point2f(x1, y1 + row_start)); // pRef_11
564  movingPoints.push_back(cv::Point2f(x1 + p_wid, y1 + row_start)); // PRef_12
565  movingPoints.push_back(cv::Point2f(x1, y1 + row_end)); // pRef_13
566  movingPoints.push_back(cv::Point2f(x1 + p_wid, y1 + row_end)); // pRef_14
567  // Right Boundary
568  movingPoints.push_back(cv::Point2f(x2 + p_x2_ref, y2 + row_start)); // pRef_21
569  movingPoints.push_back(cv::Point2f(x2 + p_x2_ref + p_wid, y2 + row_start)); // pRef_22
570  movingPoints.push_back(cv::Point2f(x2 + p_x2_ref, y2 + row_end)); // pRef_23
571  movingPoints.push_back(cv::Point2f(x2 + p_x2_ref + p_wid, y2 + row_end)); // pRef_24
572 
573  //------------------------------------------------------------------------//
574  // Construct fixedPoint pTmpl (matched points of template on template) //
575  //------------------------------------------------------------------------//
576  // Left Boundary
577  fixedPoints.push_back(cv::Point2f(p_x1, row_start)); // pTmpl_11
578  fixedPoints.push_back(cv::Point2f(p_x1 + p_wid, row_start)); // pTmpl_12
579  fixedPoints.push_back(cv::Point2f(p_x1, row_end)); // pTmpl_13
580  fixedPoints.push_back(cv::Point2f(p_x1 + p_wid, row_end)); // pTmpl_14
581  // Right boundary
582  fixedPoints.push_back(cv::Point2f(p_x2, row_start)); // pTmpl_21
583  fixedPoints.push_back(cv::Point2f(p_x2 + p_wid, row_start)); // pTmpl_22
584  fixedPoints.push_back(cv::Point2f(p_x2, row_end)); // pTmpl_23
585  fixedPoints.push_back(cv::Point2f(p_x2 + p_wid, row_end)); // pTmpl_24
586 
587  return std::make_tuple(movingPoints, fixedPoints);
588 
589 } // createControlPoints()
590 
597 cv::Mat
598 FisheyeStitcher::blendRight( const cv::Mat &bg1, const cv::Mat &bg2 )
599 {
600  int h = bg1.size().height;
601  int w = bg1.size().width;
602  double wdb = static_cast<double>(w);
603  cv::Mat bg_ = cv::Mat::zeros(bg1.size(), CV_32F);
604  double alpha1, alpha2;
605  cv::Mat bg1_, bg2_;
606  bg1.convertTo(bg1_, CV_32F);
607  bg2.convertTo(bg2_, CV_32F);
608  //
609  cv::Mat bgr_bg[3], bgr_bg1[3], bgr_bg2[3]; //destination array
610  split(bg1_, bgr_bg1); //split source
611  split(bg2_, bgr_bg2); //split source
612  //
613  bgr_bg[0] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
614  bgr_bg[1] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
615  bgr_bg[2] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
616 
617  for (int r = 0; r < h; ++r)
618  {
619  for (int c = 0; c < w; ++c)
620  {
621  alpha1 = static_cast<double>(c) / wdb;
622  alpha2 = 1.0 - alpha1;
623  bgr_bg[0].at<float>(r, c) = alpha1*bgr_bg1[0].at<float>(r, c) +
624  alpha2*bgr_bg2[0].at<float>(r, c);
625  bgr_bg[1].at<float>(r, c) = alpha1*bgr_bg1[1].at<float>(r, c) +
626  alpha2*bgr_bg2[1].at<float>(r, c);
627  bgr_bg[2].at<float>(r, c) = alpha1*bgr_bg1[2].at<float>(r, c) +
628  alpha2*bgr_bg2[2].at<float>(r, c);
629  }
630  }
631  cv::Mat bg;
632  cv::merge(bgr_bg, 3, bg);
633  bg.convertTo(bg, CV_8U);
634  return bg;
635 
636 } // blendRight()
637 
644 cv::Mat
645 FisheyeStitcher::blendLeft( const cv::Mat &bg1, const cv::Mat &bg2 )
646 {
647  int h = bg1.size().height;
648  int w = bg1.size().width;
649  double wdb = static_cast<double>(w);
650  cv::Mat bg_ = cv::Mat::zeros(bg1.size(), CV_32F);
651  double alpha1, alpha2;
652  cv::Mat bg1_, bg2_;
653  bg1.convertTo(bg1_, CV_32F);
654  bg2.convertTo(bg2_, CV_32F);
655  //
656  cv::Mat bgr_bg[3], bgr_bg1[3], bgr_bg2[3]; //destination array
657  split(bg1_, bgr_bg1); //split source
658  split(bg2_, bgr_bg2); //split source
659  //
660  bgr_bg[0] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
661  bgr_bg[1] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
662  bgr_bg[2] = cv::Mat::zeros(bgr_bg1[1].size(), CV_32F);
663 
664  for (int r = 0; r < h; ++r)
665  {
666  for (int c = 0; c < w; ++c)
667  {
668  alpha1 = (wdb - c + 1) / wdb;
669  alpha2 = 1.0 - alpha1;
670  bgr_bg[0].at<float>(r, c) = alpha1*bgr_bg1[0].at<float>(r, c) +
671  alpha2*bgr_bg2[0].at<float>(r, c);
672  bgr_bg[1].at<float>(r, c) = alpha1*bgr_bg1[1].at<float>(r, c) +
673  alpha2*bgr_bg2[1].at<float>(r, c);
674  bgr_bg[2].at<float>(r, c) = alpha1*bgr_bg1[2].at<float>(r, c) +
675  alpha2*bgr_bg2[2].at<float>(r, c);
676  }
677  }
678  cv::Mat bg;
679  cv::merge(bgr_bg, 3, bg);
680  bg.convertTo(bg, CV_8U);
681  return bg;
682 
683 } // blendLeft()
684 
685 
692 cv::Mat
693 FisheyeStitcher::blend( const cv::Mat &left_img,
694  const cv::Mat &right_img_aligned )
695 {
696 #if GEAR360_C200
697  cv::Mat post;
698  // Read YML
699  cv::FileStorage fs("./utils/post_find.yml", cv::FileStorage::READ);
700  fs["post_ret"] >> post; // 1772 x 1
701  fs.release();
702  // Mask
703  cv::Mat mask = imread("./utils/mask_1920x1920_fovd_187.jpg", cv::IMREAD_COLOR);
704 #else
705  // use `m_blend_post` instead of `post`
706  // use `m_binary_mask` instead of `mask` from file
707  cv::Mat mask = m_binary_mask;
708 #endif
709  int H = mask.size().height;
710  int W = mask.size().width;
711 
712  //-----------------------------------------------------------------------//
713  // Prepare 2 blending patches //
714  //-----------------------------------------------------------------------//
715  // int Worg = 1920;
716  int Worg = m_ws;
717  int imH = left_img.size().height;
718  int imW = left_img.size().width;
719  cv::Mat left_img_cr = left_img(cv::Rect(imW / 2 + 1 - Worg / 2, 0, Worg, imH));
720 
721  int sideW = 45; // width in pixels
722  cv::Mat left_blend, right_blend;
723 
724  for (int r = 0; r < H; ++r)
725  {
726 #if GEAR360_C200
727  int p = post.at<float>(r, 0);
728 #else
729  int p = m_blend_post[r];
730 #endif
731  if (p == 0)
732  {
733  continue;
734  }
735  // Left boundary
736  cv::Mat lf_win_1 = left_img_cr(cv::Rect(p - sideW, r, 2 * sideW, 1));
737  cv::Mat rt_win_1 = right_img_aligned(cv::Rect(p - sideW, r, 2 * sideW, 1));
738  // Right boundary
739  cv::Mat lf_win_2 = left_img_cr(cv::Rect((W - p - sideW), r, 2 * sideW, 1));
740  cv::Mat rt_win_2 = right_img_aligned(cv::Rect((W - p - sideW), r, 2 * sideW, 1));
741  // Blend(ramp)
742  cv::Mat bleft, bright;
743  bleft = blendLeft(lf_win_1, rt_win_1);
744  bright = blendRight(lf_win_2, rt_win_2);
745  // Update left boundary
746  bleft.copyTo(lf_win_1);
747  bleft.copyTo(rt_win_1);
748  // Update right boundary
749  bright.copyTo(lf_win_2);
750  bright.copyTo(rt_win_2);
751  }
752 
753  if (m_save_unwarped) {
754  cv::imwrite("left_crop_blend.jpg", left_img_cr);
755  cv::imwrite("right_blend.jpg", right_img_aligned);
756  }
757 
758  //-----------------------------------------------------------------------//
759  // Blending //
760  //-----------------------------------------------------------------------//
761  cv::Mat mask_ = mask(cv::Rect(0, 0, mask.size().width,
762  mask.size().height - 2));
763  cv::Mat mask_n;
764  bitwise_not(mask_, mask_n);
765  bitwise_and(left_img_cr, mask_, left_img_cr); // Left image
766  //
767  cv::Mat temp1 = left_img(cv::Rect(0, 0, (imW / 2 - Worg / 2), imH));
768  cv::Mat temp2 = left_img(cv::Rect((imW / 2 + Worg / 2), 0,
769  (imW / 2 - Worg / 2), imH));
770  cv::Mat t;
771  cv::hconcat(temp1, left_img_cr, t);
772  cv::hconcat(t, temp2, left_img);
773  //
774  bitwise_and(right_img_aligned, mask_n, right_img_aligned); // Right image
775  //
776  cv::Mat pano;
777  pano = left_img;
778  cv::Mat temp = pano(cv::Rect((imW / 2 - Worg / 2), 0, Worg, imH));
779  cv::Mat t2;
780  cv::bitwise_or(temp, right_img_aligned, t2);
781  t2.copyTo(temp); // updated pano
782 
783  return pano;
784 
785 } // blend()
786 
787 
794 cv::Mat
795 FisheyeStitcher::stitch(const cv::Mat& in_img_L, const cv::Mat& in_img_R)
796 {
797  // int W_in = 1920;
798  int W_in = m_ws; // default: video 3840 x 1920
799  cv::Mat left_unwarped, right_unwarped;
800  double tickStart, tickEnd, runTime;
801 
802 #if PROFILING
803  tickStart = static_cast<double>(cv::getTickCount());
804 #endif
805 
806  //------------------------------------------------------------------------//
807  // Circular Crop //
808  //------------------------------------------------------------------------//
809  cv::bitwise_and(in_img_L, m_cir_mask, in_img_L); // Left image
810  cv::bitwise_and(in_img_R, m_cir_mask, in_img_R); // Right image
811 
812 #if PROFILING
813  tickEnd = static_cast<double>(cv::getTickCount());
814  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
815  tickStart = tickEnd;
816  std::cout << "run-time (Crop) = " << runTime << " (sec)" << "\n";
817 #endif
818 
819  //------------------------------------------------------------------------//
820  // Light Fall-off Compensation //
821  //------------------------------------------------------------------------//
822  cv::Mat left_img_compensated(in_img_L.size(), in_img_L.type());
823  cv::Mat right_img_compensated(in_img_R.size(), in_img_R.type());
824  if (!m_enb_light_compen) // skip LFOC
825  {
826  left_img_compensated = in_img_L;
827  right_img_compensated = in_img_R;
828  }
829  else
830  {
831  left_img_compensated = compenLightFO(in_img_L);
832  right_img_compensated = compenLightFO(in_img_R);
833  }
834 
835 #if PROFILING
836  tickEnd = static_cast<double>(cv::getTickCount());
837  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
838  tickStart = tickEnd;
839  std::cout << "run-time (LightCompen) = " << runTime << " (sec)" << "\n";
840 #endif
841 
842  //------------------------------------------------------------------------//
843  // Fisheye Unwarping //
844  //------------------------------------------------------------------------//
845  left_unwarped = unwarp(left_img_compensated);
846  right_unwarped = unwarp(right_img_compensated);
847 
848 #if PROFILING
849  tickEnd = static_cast<double>(cv::getTickCount());
850  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
851  tickStart = tickEnd;
852  std::cout << "run-time (Unwarp) = " << runTime << " (sec)" << "\n";
853 #endif
854 
855  if (m_save_unwarped) {
856  cv::imwrite("l.jpg", left_unwarped);
857  cv::imwrite("r.jpg", right_unwarped);
858  }
859 
860 #if PROFILING
861  tickStart = static_cast<double>(cv::getTickCount());
862 #endif
863 
864  //------------------------------------------------------------------------//
865  // Rigid Moving Least Squares Deformation //
866  //------------------------------------------------------------------------//
867  cv::Mat rightImg_crop, rightImg_mls_deformed;
868  rightImg_crop = right_unwarped(cv::Rect(int(m_wd / 2) - (W_in / 2), 0,
869  W_in, m_hd - 2)); // notice on (Hd-2) --> become: (Hd)
870  rightImg_mls_deformed = deform(rightImg_crop);
871 
872  if (m_save_unwarped) {
873  cv::imwrite("r_img_crop.jpg", rightImg_crop);
874  cv::imwrite("r_mls_deformed.jpg",rightImg_mls_deformed);
875  }
876 
877 #if PROFILING
878  tickEnd = static_cast<double>(cv::getTickCount());
879  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
880  tickStart = tickEnd;
881  std::cout << "run-time (MLS Deform) = " << runTime << " (sec)" << "\n";
882 #endif
883 
884  //------------------------------------------------------------------------//
885  // Rearrange Image for Adaptive Alignment //
886  //------------------------------------------------------------------------//
887  cv::Mat temp1 = left_unwarped(cv::Rect(0, 0, m_wd2, m_hd - 2));
888  cv::Mat temp2 = left_unwarped(cv::Rect(m_wd2, 0, m_wd2,
889  m_hd - 2));
890  cv::Mat left_unwarped_arr; // re-arranged left unwarped
891  cv::hconcat(temp2, temp1, left_unwarped_arr);
892  cv::Mat leftImg_crop;
893  leftImg_crop = left_unwarped_arr(cv::Rect(m_wd2 - (W_in / 2), 0,
894  W_in, m_hd - 2));
895  uint16_t crop = static_cast<uint16_t>(0.5f * m_ws * (MAX_FOVD - 180.0) / MAX_FOVD); // half overlap region
896 
897  //------------------------------------------------------------------------//
898  // PARAMETERS (hard-coded) for C200 videos //
899  //------------------------------------------------------------------------//
900  // (empirical experiment for dual-fisheye image of size 3840 x 1920)
901  uint16_t p_wid = 55;
902  uint16_t p_x1 = 90 - 15;
903  uint16_t p_x2 = 1780 - 5;
904  uint16_t p_x1_ref = 2 * crop;
905  uint16_t row_start = 590;
906  uint16_t row_end = 1320;
907  uint16_t p_x2_ref = m_ws - 2 * crop + 1;
908  //
909  cv::Mat Ref_1, Ref_2, Tmpl_1, Tmpl_2;
910  Ref_1 = leftImg_crop(cv::Rect(0, row_start, p_x1_ref, row_end - row_start));
911  Ref_2 = leftImg_crop(cv::Rect(p_x2_ref, row_start, m_ws - p_x2_ref, row_end - row_start));
912  Tmpl_1 = rightImg_mls_deformed(cv::Rect(p_x1, row_start, p_wid, row_end - row_start));
913  Tmpl_2 = rightImg_mls_deformed(cv::Rect(p_x2, row_start, p_wid, row_end - row_start));
914 
915  if (m_save_unwarped) {
916  cv::imwrite("l_img_crop.jpg", leftImg_crop);
917  cv::imwrite("Ref_1.jpg", Ref_1);
918  cv::imwrite("Ref_2.jpg", Ref_2);
919  cv::imwrite("Tmpl_1.jpg", Tmpl_1);
920  cv::imwrite("Tmpl_2.jpg", Tmpl_2);
921  }
922 
923  //------------------------------------------------------------------------//
924  // Adaptive Alignment (Norm XCorr) //
925  //------------------------------------------------------------------------//
926  bool disable_display = 1; // 1: display off, 0: display on
927  std::string wname1 = "Matching On Left Boundary";
928  std::string wname2 = "Matching On Right Boundary";
929 
930 #if PROFILING
931  tickStart = static_cast<double>(cv::getTickCount());
932 #endif
933 
934  cv::Mat warpedRightImg;
935  if (!m_enb_refine_align) // skip
936  {
937  warpedRightImg = rightImg_mls_deformed;
938  }
939  else
940  {
941  //--------------------------------------------------------------------//
942  // Find matching location (normalized XCorr) //
943  //--------------------------------------------------------------------//
944  cv::Point2f matchLocLeft, matchLocRight;
945  matchLocLeft = findMatchLoc(Ref_1, Tmpl_1, wname1, disable_display); // Left boundary
946  matchLocRight = findMatchLoc(Ref_2, Tmpl_2, wname2, disable_display); // Right boundary
947 
948 #if MY_DEBUG
949  std::cout << "matchLocLeft(x=" << matchLocLeft.x
950  << ", y=" << matchLocLeft.y
951  << "), matchLocRight(x=" << matchLocRight.x
952  << ", y=" << matchLocRight.y << ")\n";
953 #endif
954 
955  //--------------------------------------------------------------------//
956  // Construct control points //
957  //--------------------------------------------------------------------//
958  std::vector<cv::Point2f> movingPoints; // matched points in Refs
959  std::vector<cv::Point2f> fixedPoints; // matched points in Templates
960 
961  std::tie(movingPoints, fixedPoints) =
962  createControlPoints(matchLocLeft, matchLocRight, row_start, row_end,
963  p_wid, p_x1, p_x2, p_x2_ref);
964 
965 #if PROFILING
966  tickEnd = static_cast<double>(cv::getTickCount());
967  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
968  tickStart = tickEnd;
969  std::cout << "run-time (Xcorr & fitGeoTrans) = " << runTime << " (sec)" << "\n";
970 #endif
971 
972  //--------------------------------------------------------------------//
973  // Estimate affine matrix //
974  //--------------------------------------------------------------------//
975  cv::Mat tform_refine_mat;
976  tform_refine_mat = cv::findHomography(fixedPoints, movingPoints, 0);
977 
978  //--------------------------------------------------------------------//
979  // Warp Image //
980  //--------------------------------------------------------------------//
981  cv::warpPerspective(rightImg_mls_deformed, warpedRightImg,
982  tform_refine_mat, rightImg_mls_deformed.size(),
983  cv::INTER_LINEAR);
984 
985 #if PROFILING
986  tickEnd = static_cast<double>(cv::getTickCount());
987  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
988  tickStart = tickEnd;
989  std::cout << "run-time (estimate tform_mat & warping) = " << runTime << " (sec)" << "\n";
990 #endif
991 
992  } // Normalized xcorr
993 
994  //-----------------------------------------------------------------------//
995  // Blend Images //
996  //-----------------------------------------------------------------------//
997  cv::Mat pano;
998  pano = blend(left_unwarped_arr, warpedRightImg);
999 
1000 #if PROFILING
1001  tickEnd = static_cast<double>(cv::getTickCount());
1002  runTime = (tickEnd - tickStart) / cv::getTickFrequency();
1003  tickStart = tickEnd;
1004  std::cout << "run-time (Blending) = " << runTime << " (sec)" << "\n";
1005 #endif
1006 
1007  return pano;
1008 
1009 } // stitch()
1010 
1011 } // namespace
f
FisheyeStitcher(int width, int height, float in_fovd, bool enb_light_compen, bool enb_refine_align, bool save_unwarped, std::string map_path)
t
double cos()
double sin()
void createMask()
Mask creation for cropping image data inside the FOVD circle.
std::tuple< std::vector< cv::Point2f >, std::vector< cv::Point2f > > createControlPoints(const cv::Point2f &matchLocLeft, const cv::Point2f &matchLocRight, const int row_start, const int row_end, const int p_wid, const int p_x1, const int p_x2, const int p_x2_ref)
Construct control points for affine2D.
void genScaleMap()
Fisheye Light Fall-off Compensation: Scale_Map Construction.
int offset
cv::Mat blendLeft(const cv::Mat &bg1, const cv::Mat &bg2)
Ramp blending on the left patch.
#define MAX_FOVD
cv::Point2f findMatchLoc(const cv::Mat &Ref, const cv::Mat &Tmpl, const std::string &img_window, const bool disable_display)
Adaptive Alignment: Norm XCorr.
cv::Mat stitch(const cv::Mat &image1, const cv::Mat &image2)
single frame stitching
result
double atan2()
void fish2Map()
Map 2D fisheye image to 2D projected sphere.
double sqrt()
std::tuple< double, double > fish2Eqt(const double x_dest, const double y_dest, const double W_rad)
Convert fisheye-vertical to equirectangular (reference: Panotool)
short s
x
#define P6_
y
cv::Mat deform(const cv::Mat &in_img)
Rigid Moving Least Squares Interpolation.
std::vector< int > m_blend_post
cv::Mat unwarp(const cv::Mat &in_img)
Fisheye Unwarping.
void createBlendMask()
Create binary mask for blending.
cv::Mat blend(const cv::Mat &left_img, const cv::Mat &right_img_aligned)
Blending aligned images.
#define P4_
W
GLfloat v[8][3]
#define P1_
c
r
#define P3_
cv::Mat compenLightFO(const cv::Mat &in_img)
Fisheye Light Fall-off Compensation.
#define P5_
#define P2_
char a[26]
cv::Mat blendRight(const cv::Mat &bg1, const cv::Mat &bg2)
Ramp blending on the right patch.


jsk_perception
Author(s): Manabu Saito, Ryohei Ueda
autogenerated on Mon May 3 2021 03:03:27