5 #include "../include/librealsense2/rsutil.h" 9 m.def(
"rs2_project_point_to_pixel", [](
const rs2_intrinsics&
intrin,
const std::array<float, 3>&
point)->std::array<float, 2>
11 std::array<float, 2>
pixel{};
14 },
"Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera",
15 "intrin"_a,
"point"_a);
17 m.def(
"rs2_deproject_pixel_to_point", [](
const rs2_intrinsics& intrin,
const std::array<float, 2>&
pixel,
float depth)->std::array<float, 3>
19 std::array<float, 3> point{};
22 },
"Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera",
23 "intrin"_a,
"pixel"_a,
"depth"_a);
25 m.def(
"rs2_transform_point_to_point", [](
const rs2_extrinsics& extrin,
const std::array<float, 3>& from_point)->std::array<float, 3>
27 std::array<float, 3> to_point{};
30 },
"Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint",
31 "extrin"_a,
"from_point"_a);
33 m.def(
"rs2_fov", [](
const rs2_intrinsics& intrin)->std::array<float, 2>
35 std::array<float, 2> to_fow{};
36 rs2_fov(&intrin, to_fow.data());
38 },
"Calculate horizontal and vertical field of view, based on video intrinsics",
"intrin"_a);
40 m.def(
"next_pixel_in_line", [](std::array<float, 2> curr,
const std::array<float, 2>
start,
const std::array<float, 2>
end)->std::array<float, 2>
44 },
"curr"_a,
"start"_a,
"end"_a);
46 m.def(
"is_pixel_in_line", [](std::array<float, 2> curr,
const std::array<float, 2> start,
const std::array<float, 2> end)->
bool 49 },
"curr"_a,
"start"_a,
"end"_a);
51 m.def(
"adjust_2D_point_to_boundary", [](std::array<float, 2>
p,
int width,
int height)->std::array<float, 2>
55 },
"p"_a,
"width"_a,
"height"_a);
60 std::array<float, 2> from_pixel)->std::array<float, 2>
62 std::array<float, 2> to_pixel;
64 depth_scale, depth_min, depth_max, &depth_intrin, &color_intrin, &depth_to_color,
65 &color_to_depth, from_pixel.data());
69 m.def(
"rs2_project_color_pixel_to_depth_pixel", cp_to_dp,
"data"_a,
"depth_scale"_a,
70 "depth_min"_a,
"depth_max"_a,
"depth_intrin"_a,
"color_intrin"_a,
"depth_to_color"_a,
71 "color_to_depth"_a,
"from_pixel"_a);
static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics *extrin, const float from_point[3])
static void rs2_fov(const struct rs2_intrinsics *intrin, float to_fov[2])
GLint GLint GLsizei GLsizei GLsizei depth
void init_util(py::module &m)
static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics *intrin, const float pixel[2], float depth)
static void adjust_2D_point_to_boundary(float p[2], int width, int height)
GLint GLsizei GLsizei height
static void next_pixel_in_line(float curr[2], const float start[2], const float end[2])
Cross-stream extrinsics: encodes the topology describing how the different devices are oriented...
static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics *intrin, const float point[3])
static bool is_pixel_in_line(const float curr[2], const float start[2], const float end[2])
std::pair< int, int > pixel
static void rs2_project_color_pixel_to_depth_pixel(float to_pixel[2], const uint16_t *data, float depth_scale, float depth_min, float depth_max, const struct rs2_intrinsics *depth_intrin, const struct rs2_intrinsics *color_intrin, const struct rs2_extrinsics *color_to_depth, const struct rs2_extrinsics *depth_to_color, const float from_pixel[2])