rsutil.h
Go to the documentation of this file.
3
4 #ifndef LIBREALSENSE_RSUTIL2_H
5 #define LIBREALSENSE_RSUTIL2_H
6
7 #include "h/rs_types.h"
8 #include "h/rs_sensor.h"
9 #include "h/rs_frame.h"
10 #include "rs.h"
11 #include "assert.h"
12 #include <stdlib.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <math.h>
16 #include <float.h>
17
18 /* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
19 static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
20 {
21  float x = point[0] / point[2], y = point[1] / point[2];
22
25  {
26
27  float r2 = x*x + y*y;
28  float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
29  x *= f;
30  y *= f;
31  float dx = x + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
32  float dy = y + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
33  x = dx;
34  y = dy;
35  }
36
38  {
39  float r2 = x * x + y * y;
40  float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2*r2 + intrin->coeffs[4] * r2*r2*r2;
41
42  float xf = x * f;
43  float yf = y * f;
44
45  float dx = xf + 2 * intrin->coeffs[2] * x*y + intrin->coeffs[3] * (r2 + 2 * x*x);
46  float dy = yf + 2 * intrin->coeffs[3] * x*y + intrin->coeffs[2] * (r2 + 2 * y*y);
47
48  x = dx;
49  y = dy;
50  }
51
52  if (intrin->model == RS2_DISTORTION_FTHETA)
53  {
54  float r = sqrtf(x*x + y*y);
55  if (r < FLT_EPSILON)
56  {
57  r = FLT_EPSILON;
58  }
59  float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
60  x *= rd / r;
61  y *= rd / r;
62  }
64  {
65  float r = sqrtf(x*x + y*y);
66  if (r < FLT_EPSILON)
67  {
68  r = FLT_EPSILON;
69  }
70  float theta = atan(r);
71  float theta2 = theta*theta;
72  float series = 1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])));
73  float rd = theta*series;
74  x *= rd / r;
75  y *= rd / r;
76  }
77
78  pixel[0] = x * intrin->fx + intrin->ppx;
79  pixel[1] = y * intrin->fy + intrin->ppy;
80 }
81
82 /* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
83 static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics * intrin, const float pixel[2], float depth)
84 {
85  assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
86  //assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
87
88  float x = (pixel[0] - intrin->ppx) / intrin->fx;
89  float y = (pixel[1] - intrin->ppy) / intrin->fy;
90
91  float xo = x;
92  float yo = y;
93
95  {
96  // need to loop until convergence
97  // 10 iterations determined empirically
98  for (int i = 0; i < 10; i++)
99  {
100  float r2 = x * x + y * y;
101  float icdist = (float)1 / (float)(1 + ((intrin->coeffs[4] * r2 + intrin->coeffs[1])*r2 + intrin->coeffs[0])*r2);
102  float xq = x / icdist;
103  float yq = y / icdist;
104  float delta_x = 2 * intrin->coeffs[2] * xq*yq + intrin->coeffs[3] * (r2 + 2 * xq*xq);
105  float delta_y = 2 * intrin->coeffs[3] * xq*yq + intrin->coeffs[2] * (r2 + 2 * yq*yq);
106  x = (xo - delta_x)*icdist;
107  y = (yo - delta_y)*icdist;
108  }
109  }
111  {
112  // need to loop until convergence
113  // 10 iterations determined empirically
114  for (int i = 0; i < 10; i++)
115  {
116  float r2 = x * x + y * y;
117  float icdist = (float)1 / (float)(1 + ((intrin->coeffs[4] * r2 + intrin->coeffs[1])*r2 + intrin->coeffs[0])*r2);
118  float delta_x = 2 * intrin->coeffs[2] * x*y + intrin->coeffs[3] * (r2 + 2 * x*x);
119  float delta_y = 2 * intrin->coeffs[3] * x*y + intrin->coeffs[2] * (r2 + 2 * y*y);
120  x = (xo - delta_x)*icdist;
121  y = (yo - delta_y)*icdist;
122  }
123
124  }
125  if (intrin->model == RS2_DISTORTION_KANNALA_BRANDT4)
126  {
127  float rd = sqrtf(x*x + y*y);
128  if (rd < FLT_EPSILON)
129  {
130  rd = FLT_EPSILON;
131  }
132
133  float theta = rd;
134  float theta2 = rd*rd;
135  for (int i = 0; i < 4; i++)
136  {
137  float f = theta*(1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])))) - rd;
138  if (fabs(f) < FLT_EPSILON)
139  {
140  break;
141  }
142  float df = 1 + theta2*(3 * intrin->coeffs[0] + theta2*(5 * intrin->coeffs[1] + theta2*(7 * intrin->coeffs[2] + 9 * theta2*intrin->coeffs[3])));
143  theta -= f / df;
144  theta2 = theta*theta;
145  }
146  float r = tan(theta);
147  x *= r / rd;
148  y *= r / rd;
149  }
150  if (intrin->model == RS2_DISTORTION_FTHETA)
151  {
152  float rd = sqrtf(x*x + y*y);
153  if (rd < FLT_EPSILON)
154  {
155  rd = FLT_EPSILON;
156  }
157  float r = (float)(tan(intrin->coeffs[0] * rd) / atan(2 * tan(intrin->coeffs[0] / 2.0f)));
158  x *= r / rd;
159  y *= r / rd;
160  }
161
162  point[0] = depth * x;
163  point[1] = depth * y;
164  point[2] = depth;
165 }
166
167 /* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
168 static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
169 {
170  to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
171  to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
172  to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
173 }
174
175 /* Calculate horizontal and vertical feild of view, based on video intrinsics */
176 static void rs2_fov(const struct rs2_intrinsics * intrin, float to_fov[2])
177 {
178  to_fov[0] = (atan2f(intrin->ppx + 0.5f, intrin->fx) + atan2f(intrin->width - (intrin->ppx + 0.5f), intrin->fx)) * 57.2957795f;
179  to_fov[1] = (atan2f(intrin->ppy + 0.5f, intrin->fy) + atan2f(intrin->height - (intrin->ppy + 0.5f), intrin->fy)) * 57.2957795f;
180 }
181
182 static void next_pixel_in_line(float curr[2], const float start[2], const float end[2])
183 {
184  float line_slope = (end[1] - start[1]) / (end[0] - start[0]);
185  if (fabs(end[0] - curr[0]) > fabs(end[1] - curr[1]))
186  {
187  curr[0] = end[0] > curr[0] ? curr[0] + 1 : curr[0] - 1;
188  curr[1] = end[1] - line_slope * (end[0] - curr[0]);
189  }
190  else
191  {
192  curr[1] = end[1] > curr[1] ? curr[1] + 1 : curr[1] - 1;
193  curr[0] = end[0] - ((end[1] + curr[1]) / line_slope);
194  }
195 }
196
197 static bool is_pixel_in_line(const float curr[2], const float start[2], const float end[2])
198 {
199  return ((end[0] >= start[0] && end[0] >= curr[0] && curr[0] >= start[0]) || (end[0] <= start[0] && end[0] <= curr[0] && curr[0] <= start[0])) &&
200  ((end[1] >= start[1] && end[1] >= curr[1] && curr[1] >= start[1]) || (end[1] <= start[1] && end[1] <= curr[1] && curr[1] <= start[1]));
201 }
202
203 static void adjust_2D_point_to_boundary(float p[2], int width, int height)
204 {
205  if (p[0] < 0) p[0] = 0;
206  if (p[0] > width) p[0] = (float)width;
207  if (p[1] < 0) p[1] = 0;
208  if (p[1] > height) p[1] = (float)height;
209 }
210
211 /* Find projected pixel with unknown depth search along line. */
212 static void rs2_project_color_pixel_to_depth_pixel(float to_pixel[2],
213  const uint16_t* data, float depth_scale,
214  float depth_min, float depth_max,
215  const struct rs2_intrinsics* depth_intrin,
216  const struct rs2_intrinsics* color_intrin,
217  const struct rs2_extrinsics* color_to_depth,
218  const struct rs2_extrinsics* depth_to_color,
219  const float from_pixel[2])
220 {
221  //Find line start pixel
222  float start_pixel[2] = { 0 }, min_point[3] = { 0 }, min_transformed_point[3] = { 0 };
223  rs2_deproject_pixel_to_point(min_point, color_intrin, from_pixel, depth_min);
224  rs2_transform_point_to_point(min_transformed_point, color_to_depth, min_point);
225  rs2_project_point_to_pixel(start_pixel, depth_intrin, min_transformed_point);
227
228  //Find line end depth pixel
229  float end_pixel[2] = { 0 }, max_point[3] = { 0 }, max_transformed_point[3] = { 0 };
230  rs2_deproject_pixel_to_point(max_point, color_intrin, from_pixel, depth_max);
231  rs2_transform_point_to_point(max_transformed_point, color_to_depth, max_point);
232  rs2_project_point_to_pixel(end_pixel, depth_intrin, max_transformed_point);
234
235  //search along line for the depth pixel that it's projected pixel is the closest to the input pixel
236  float min_dist = -1;
237  for (float p[2] = { start_pixel[0], start_pixel[1] }; is_pixel_in_line(p, start_pixel, end_pixel); next_pixel_in_line(p, start_pixel, end_pixel))
238  {
239  float depth = depth_scale * data[(int)p[1] * depth_intrin->width + (int)p[0]];
240  if (depth == 0)
241  continue;
242
243  float projected_pixel[2] = { 0 }, point[3] = { 0 }, transformed_point[3] = { 0 };
244  rs2_deproject_pixel_to_point(point, depth_intrin, p, depth);
245  rs2_transform_point_to_point(transformed_point, depth_to_color, point);
246  rs2_project_point_to_pixel(projected_pixel, color_intrin, transformed_point);
247
248  float new_dist = (float)(pow((projected_pixel[1] - from_pixel[1]), 2) + pow((projected_pixel[0] - from_pixel[0]), 2));
249  if (new_dist < min_dist || min_dist < 0)
250  {
251  min_dist = new_dist;
252  to_pixel[0] = p[0];
253  to_pixel[1] = p[1];
254  }
255  }
256 }
257 #endif
static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics *extrin, const float from_point[3])
Definition: rsutil.h:168
GLuint GLuint end
GLint y
GLfloat GLfloat p
Definition: glext.h:12687
float translation[3]
Definition: rs_sensor.h:99
static void rs2_fov(const struct rs2_intrinsics *intrin, float to_fov[2])
Definition: rsutil.h:176
rs2_intrinsics intrin
GLint GLint GLsizei GLsizei GLsizei depth
unsigned short uint16_t
Definition: stdint.h:79
float coeffs[5]
Definition: rs_types.h:67
point
Definition: rmse.py:166
Exposes librealsense functionality for C compilers.
float rotation[9]
Definition: rs_sensor.h:98
GLdouble f
static float min_dist
Definition: rs-kinfu.cpp:17
Exposes RealSense frame functionality for C compilers.
static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics *intrin, const float pixel[2], float depth)
Definition: rsutil.h:83
Exposes RealSense structs.
GLdouble GLdouble r
static void adjust_2D_point_to_boundary(float p[2], int width, int height)
Definition: rsutil.h:203
GLdouble x
GLint GLsizei GLsizei height
GLuint start
Exposes RealSense sensor functionality for C compilers.
static void next_pixel_in_line(float curr[2], const float start[2], const float end[2])
Definition: rsutil.h:182
Cross-stream extrinsics: encodes the topology describing how the different devices are oriented...
Definition: rs_sensor.h:96
rs2_distortion model
Definition: rs_types.h:66
static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics *intrin, const float point[3])
Definition: rsutil.h:19
static bool is_pixel_in_line(const float curr[2], const float start[2], const float end[2])
Definition: rsutil.h:197
Video stream intrinsics.
Definition: rs_types.h:58
int i
static void rs2_project_color_pixel_to_depth_pixel(float to_pixel[2], const uint16_t *data, float depth_scale, float depth_min, float depth_max, const struct rs2_intrinsics *depth_intrin, const struct rs2_intrinsics *color_intrin, const struct rs2_extrinsics *color_to_depth, const struct rs2_extrinsics *depth_to_color, const float from_pixel[2])
Definition: rsutil.h:212
Definition: parser.hpp:150
GLint GLsizei width

librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:41