rs-ar-basic.cpp
Go to the documentation of this file.
1 // License: Apache 2.0. See LICENSE file in root directory.
2 // Copyright(c) 2019 Intel Corporation. All Rights Reserved.
3 #include <librealsense2/rs.hpp>
4 #include <librealsense2/rsutil.h>
5 #include <array>
6 #include <cmath>
7 #include <iostream>
8 #include <vector>
9 #include "example.hpp"
10 
11 struct point3d {
12  float f[3];
13 
14  point3d() {}
15  point3d(float x, float y, float z) : f{x, y, z} {}
16  float x() const { return f[0]; }
17  float y() const { return f[1]; }
18  float z() const { return f[2]; }
19 };
20 
21 struct pixel {
22  float f[2];
23 
24  pixel() {}
25  pixel(float x, float y) : f{x, y} {}
26  float x() const { return f[0]; }
27  float y() const { return f[1]; }
28 };
29 
30 // We define a virtual object as a collection of vertices that will be connected by lines
31 typedef std::array<point3d, 4> object;
32 
33 static rs2_pose identity_pose();
34 static rs2_pose reset_object_pose(const rs2_pose& device_pose_in_world = identity_pose());
35 static rs2_pose pose_inverse(const rs2_pose& p);
36 static rs2_pose pose_multiply(const rs2_pose& ref2_in_ref1, const rs2_pose& ref3_in_ref2);
41 static rs2_vector vector_addition(const rs2_vector& a, const rs2_vector& b);
42 static rs2_vector vector_negate(const rs2_vector& v);
43 
44 static object convert_object_coordinates(const object& obj, const rs2_pose& object_pose);
45 
46 static std::vector<point3d> raster_line(const point3d& a, const point3d& b, float step);
47 static void render_line(const std::vector<pixel>& line, int color_code);
48 static void render_text(int win_height, const std::string& text);
49 
50 int main(int argc, char * argv[]) try
51 {
52  std::cout << "Waiting for device..." << std::endl;
53 
54  // Declare RealSense pipeline, encapsulating the actual device and sensors
56  // Create a configuration for configuring the pipeline with a non default profile
58  // Enable fisheye and pose streams
62  // Start pipeline with chosen configuration
63  rs2::pipeline_profile pipe_profile = pipe.start(cfg);
64 
65  // T265 has two fisheye sensors, we can choose any of them (index 1 or 2)
66  const int fisheye_sensor_idx = 1;
67 
68  // Get fisheye sensor intrinsics parameters
69  rs2::stream_profile fisheye_stream = pipe_profile.get_stream(RS2_STREAM_FISHEYE, fisheye_sensor_idx);
70  rs2_intrinsics intrinsics = fisheye_stream.as<rs2::video_stream_profile>().get_intrinsics();
71 
72  rs2_extrinsics pose_to_fisheye_extrinsics = pipe_profile.get_stream(RS2_STREAM_POSE).get_extrinsics_to(fisheye_stream);
73 
74  std::cout << "Device got. Streaming data" << std::endl;
75 
76  // Create an OpenGL display window and a texture to draw the fisheye image
77  window app(intrinsics.width, intrinsics.height, "Intel RealSense T265 Augmented Reality Example");
78  window_key_listener key_watcher(app);
79  texture fisheye_image;
80 
81  // Create the vertices of a simple virtual object.
82  // This virtual object is 4 points in 3D space that describe 3 XYZ 20cm long axes.
83  // These vertices are relative to the object's own coordinate system.
84  const float length = 0.20f;
85  const object virtual_object = {{
86  { 0, 0, 0 }, // origin
87  { length, 0, 0 }, // X
88  { 0, length, 0 }, // Y
89  { 0, 0, length } // Z
90  }};
91 
92  // This variable will hold the pose of the virtual object in world coordinates.
93  // We we initialize it once we get the first pose frame.
94  rs2_pose object_pose_in_world;
95  bool object_pose_in_world_initialized = false;
96 
97  // Main loop
98  while (app)
99  {
100  rs2_pose device_pose_in_world; // This will contain the current device pose
101  {
102  // Wait for the next set of frames from the camera
103  auto frames = pipe.wait_for_frames();
104  // Get a frame from the fisheye stream
105  rs2::video_frame fisheye_frame = frames.get_fisheye_frame(fisheye_sensor_idx);
106  // Get a frame from the pose stream
107  rs2::pose_frame pose_frame = frames.get_pose_frame();
108 
109  // Copy current camera pose
110  device_pose_in_world = pose_frame.get_pose_data();
111 
112  // Render the fisheye image
113  fisheye_image.render(fisheye_frame, { 0, 0, app.width(), app.height() });
114 
115  // By closing the current scope we let frames be deallocated, so we do not fill up librealsense queues while we do other computation.
116  }
117 
118  // If we have not set the virtual object in the world yet, set it in front of the camera now.
119  if (!object_pose_in_world_initialized)
120  {
121  object_pose_in_world = reset_object_pose(device_pose_in_world);
122  object_pose_in_world_initialized = true;
123  }
124 
125  // Compute the pose of the object relative to the current pose of the device
126  rs2_pose world_pose_in_device = pose_inverse(device_pose_in_world);
127  rs2_pose object_pose_in_device = pose_multiply(world_pose_in_device, object_pose_in_world);
128 
129  // Get the object vertices in device coordinates
130  object object_in_device = convert_object_coordinates(virtual_object, object_pose_in_device);
131 
132  // Convert object vertices from device coordinates into fisheye sensor coordinates using extrinsics
133  object object_in_sensor;
134  for (size_t i = 0; i < object_in_device.size(); ++i)
135  {
136  rs2_transform_point_to_point(object_in_sensor[i].f, &pose_to_fisheye_extrinsics, object_in_device[i].f);
137  }
138 
139  for (size_t i = 1; i < object_in_sensor.size(); ++i)
140  {
141  // Discretize the virtual object line into smaller 1cm long segments
142  std::vector<point3d> points_in_sensor = raster_line(object_in_sensor[0], object_in_sensor[i], 0.01f);
143  std::vector<pixel> projected_line;
144  projected_line.reserve(points_in_sensor.size());
145  for (auto& point : points_in_sensor)
146  {
147  // A 3D point is visible in the image if its Z coordinate relative to the fisheye sensor is positive.
148  if (point.z() > 0)
149  {
150  // Project 3D sensor coordinates to 2D fisheye image coordinates using intrinsics
151  projected_line.emplace_back();
152  rs2_project_point_to_pixel(projected_line.back().f, &intrinsics, point.f);
153  }
154  }
155  // Display the line in the image
156  render_line(projected_line, static_cast<int>(i));
157  }
158 
159  // Display text in the image
160  render_text(static_cast<int>(app.height()), "Press spacebar to reset the pose of the virtual object. Press ESC to exit");
161 
162  // Check if some key is pressed
163  switch (key_watcher.get_key())
164  {
165  case GLFW_KEY_SPACE:
166  // Reset virtual object pose if user presses spacebar
167  object_pose_in_world = reset_object_pose(device_pose_in_world);
168  std::cout << "Setting new pose for virtual object: " << object_pose_in_world.translation << std::endl;
169  break;
170  case GLFW_KEY_ESCAPE:
171  // Exit if user presses escape
172  app.close();
173  break;
174  }
175  }
176 
177  return EXIT_SUCCESS;
178 }
179 catch (const rs2::error & e)
180 {
181  std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
182  return EXIT_FAILURE;
183 }
184 catch (const std::exception& e)
185 {
186  std::cerr << e.what() << std::endl;
187  return EXIT_FAILURE;
188 }
189 
191 {
192  // Return an identity pose (no translation, no rotation)
193  rs2_pose pose;
194  pose.translation.x = 0;
195  pose.translation.y = 0;
196  pose.translation.z = 0;
197  pose.rotation.x = 0;
198  pose.rotation.y = 0;
199  pose.rotation.z = 0;
200  pose.rotation.w = 1;
201  return pose;
202 }
203 
204 rs2_pose reset_object_pose(const rs2_pose& device_pose_in_world)
205 {
206  // Set the object 50 centimeter away in front of the camera.
207  // T265 coordinate system is defined here: https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#sensor-origin-and-coordinate-system
208  rs2_pose object_pose_in_device;
209  object_pose_in_device.translation.x = 0;
210  object_pose_in_device.translation.y = 0;
211  object_pose_in_device.translation.z = -0.50;
212  object_pose_in_device.rotation.x = 0;
213  object_pose_in_device.rotation.y = 0;
214  object_pose_in_device.rotation.z = 0;
215  object_pose_in_device.rotation.w = 1;
216 
217  // Convert the pose of the virtual object from camera coordinates into world coordinates
218  rs2_pose object_pose_in_world = pose_multiply(device_pose_in_world, object_pose_in_device);
219  return object_pose_in_world;
220 }
221 
223 {
224  rs2_pose i;
227  return i;
228 }
229 
230 rs2_pose pose_multiply(const rs2_pose& ref2_in_ref1, const rs2_pose& ref3_in_ref2)
231 {
232  rs2_pose ref3_in_ref1;
233  ref3_in_ref1.rotation = quaternion_multiply(ref2_in_ref1.rotation, ref3_in_ref2.rotation);
234  ref3_in_ref1.translation = vector_addition(quaternion_rotate_vector(ref2_in_ref1.rotation, ref3_in_ref2.translation), ref2_in_ref1.translation);
235  return ref3_in_ref1;
236 }
237 
239 {
241 }
242 
244 {
245  return rs2_quaternion {
246  a.x * b.w + a.w * b.x - a.z * b.y + a.y * b.z,
247  a.y * b.w + a.z * b.x + a.w * b.y - a.x * b.z,
248  a.z * b.w - a.y * b.x + a.x * b.y + a.w * b.z,
249  a.w * b.w - a.x * b.x - a.y * b.y - a.z * b.z,
250  };
251 }
252 
254 {
255  rs2_quaternion v_as_quaternion = { v.x, v.y, v.z, 0 };
256  rs2_quaternion rotated_v = quaternion_multiply(quaternion_multiply(q, v_as_quaternion), quaternion_conjugate(q));
257  return rs2_vector { rotated_v.x, rotated_v.y, rotated_v.z };
258 }
259 
261 {
262  return rs2_quaternion { -q.x, -q.y, -q.z, q.w };
263 }
264 
266 {
267  return rs2_vector { a.x + b.x, a.y + b.y, a.z + b.z };
268 }
269 
271 {
272  return rs2_vector { -v.x, -v.y, -v.z };
273 }
274 
275 object convert_object_coordinates(const object& obj, const rs2_pose& object_pose)
276 {
277  object transformed_obj;
278  for (size_t i = 0; i < obj.size(); ++i) {
279  rs2_vector v { obj[i].x(), obj[i].y(), obj[i].z() };
280  v = pose_transform_point(object_pose, v);
281  transformed_obj[i].f[0] = v.x;
282  transformed_obj[i].f[1] = v.y;
283  transformed_obj[i].f[2] = v.z;
284  }
285  return transformed_obj;
286 }
287 
288 std::vector<point3d> raster_line(const point3d& a, const point3d& b, float step)
289 {
290  rs2_vector direction = { b.x() - a.x(), b.y() - a.y(), b.z() - a.z() };
291  float distance = std::sqrt(direction.x*direction.x + direction.y*direction.y + direction.z*direction.z);
292  int npoints = static_cast<int>(distance / step + 1);
293 
294  std::vector<point3d> points;
295  if (npoints > 0)
296  {
297  direction.x = direction.x * step / distance;
298  direction.y = direction.y * step / distance;
299  direction.z = direction.z * step / distance;
300 
301  points.reserve(npoints);
302  points.emplace_back(a);
303  for (int i = 1; i < npoints; ++i)
304  {
305  points.emplace_back(a.x() + direction.x * i,
306  a.y() + direction.y * i,
307  a.z() + direction.z * i);
308  }
309  }
310  return points;
311 }
312 
313 void render_line(const std::vector<pixel>& line, int color_code)
314 {
315  if (!line.empty())
316  {
317  GLfloat current_color[4];
318  glGetFloatv(GL_CURRENT_COLOR, current_color);
319 
320  glLineWidth(5);
321  glColor3f(color_code == 1 ? 1.f : 0.f,
322  color_code == 2 ? 1.f : 0.f,
323  color_code == 3 ? 1.f : 0.f);
324 
326  for (auto& pixel : line)
327  {
328  glVertex3f(pixel.x(), pixel.y(), 0.f);
329  }
330  glEnd();
331 
332  glColor4fv(current_color);
333  }
334 }
335 
336 void render_text(int win_height, const std::string& text)
337 {
338  GLfloat current_color[4];
339  glGetFloatv(GL_CURRENT_COLOR, current_color);
340  glColor3f(0, 0.5, 1);
341  glScalef(2, 2, 2);
342  draw_text(15, (win_height - 10) / 2, text.c_str());
343  glScalef(1, 1, 1);
344  glColor4fv(current_color);
345 }
frameset wait_for_frames(unsigned int timeout_ms=RS2_DEFAULT_TIMEOUT) const
static void render_text(int win_height, const std::string &text)
static rs2_pose identity_pose()
static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics *extrin, const float from_point[3])
Definition: rsutil.h:168
float y() const
GLboolean GLboolean GLboolean b
GLint y
void render(const rs2::frame &frame, const rect &rect, float alpha=1.f)
Definition: example.hpp:471
khronos_float_t GLfloat
float z
Definition: rs_types.h:131
point3d(float x, float y, float z)
Definition: rs-ar-basic.cpp:15
static rs2_vector quaternion_rotate_vector(const rs2_quaternion &q, const rs2_vector &v)
int main(int argc, char *argv[])
Definition: rs-ar-basic.cpp:50
#define glBegin
GLfloat GLfloat p
Definition: glext.h:12687
static object convert_object_coordinates(const object &obj, const rs2_pose &object_pose)
static rs2_pose pose_multiply(const rs2_pose &ref2_in_ref1, const rs2_pose &ref3_in_ref2)
rs2_extrinsics get_extrinsics_to(const stream_profile &to) const
Definition: rs_frame.hpp:148
float z() const
Definition: rs-ar-basic.cpp:18
#define GLFW_KEY_SPACE
Definition: glfw3.h:360
rs2_vector translation
Definition: rs_types.h:142
GLsizei const GLchar *const * string
float x() const
Definition: rs-ar-basic.cpp:26
GLdouble GLdouble z
GLhandleARB obj
Definition: glext.h:4157
#define glVertex3f
e
Definition: rmse.py:177
GLsizei GLsizei GLfloat distance
Definition: glext.h:10563
point
Definition: rmse.py:166
rs2_pose get_pose_data() const
Definition: rs_frame.hpp:933
direction
Definition: rs-align.cpp:25
The texture class.
Definition: example.hpp:402
#define glGetFloatv
::realsense_legacy_msgs::pose_< std::allocator< void > > pose
Definition: pose.h:88
GLboolean GLboolean GLboolean GLboolean a
Quaternion used to represent rotation.
Definition: rs_types.h:135
const std::string & get_failed_args() const
Definition: rs_types.hpp:117
float y
Definition: rs_types.h:131
GLdouble f
float y() const
Definition: rs-ar-basic.cpp:27
float x() const
#define GL_CURRENT_COLOR
std::ostream & cout()
GLdouble x
pixel(float x, float y)
Definition: rs-ar-basic.cpp:25
float width() const
Definition: example.hpp:627
#define glLineWidth
#define GLFW_KEY_ESCAPE
Definition: glfw3.h:412
static rs2_vector vector_addition(const rs2_vector &a, const rs2_vector &b)
rs2_quaternion rotation
Definition: rs_types.h:145
stream_profile get_stream(rs2_stream stream_type, int stream_index=-1) const
Definition: rs_pipeline.hpp:60
#define glEnd
#define glColor4fv
static void render_line(const std::vector< pixel > &line, int color_code)
static std::vector< point3d > raster_line(const point3d &a, const point3d &b, float step)
float x
Definition: rs_types.h:131
void enable_stream(rs2_stream stream_type, int stream_index, int width, int height, rs2_format format=RS2_FORMAT_ANY, int framerate=0)
dictionary intrinsics
Definition: t265_stereo.py:142
std::array< point3d, 4 > object
Definition: rs-ar-basic.cpp:31
float height() const
Definition: example.hpp:628
static rs2_pose reset_object_pose(const rs2_pose &device_pose_in_world=identity_pose())
Cross-stream extrinsics: encodes the topology describing how the different devices are oriented...
Definition: rs_sensor.h:96
3D vector in Euclidean coordinate space
Definition: rs_types.h:129
static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics *intrin, const float point[3])
Definition: rsutil.h:19
#define glColor3f
GLdouble GLdouble GLdouble q
static rs2_vector vector_negate(const rs2_vector &v)
void close()
Definition: example.hpp:622
#define glScalef
void draw_text(int x, int y, const char *text)
Definition: example.hpp:109
static rs2_quaternion quaternion_conjugate(const rs2_quaternion &q)
Video stream intrinsics.
Definition: rs_types.h:58
std::ostream & cerr()
#define GL_LINE_STRIP
int i
GLenum GLuint GLenum GLsizei length
static rs2_pose pose_inverse(const rs2_pose &p)
pipeline_profile start()
static rs2_quaternion quaternion_multiply(const rs2_quaternion &a, const rs2_quaternion &b)
GLdouble v
const std::string & get_failed_function() const
Definition: rs_types.hpp:112
GLdouble GLdouble GLint GLint const GLdouble * points
static rs2_vector pose_transform_point(const rs2_pose &pose, const rs2_vector &p)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:40