rs-ar-advanced.cpp
Go to the documentation of this file.
1 // License: Apache 2.0. See LICENSE file in root directory.
2 // Copyright(c) 2019 Intel Corporation. All Rights Reserved.
3 #include <librealsense2/rs.hpp>
4 #include <librealsense2/rsutil.h>
5 #include <array>
6 #include <cmath>
7 #include <cstring>
8 #include <iostream>
9 #include <fstream>
10 #include <vector>
11 #include "example.hpp"
12 
13 struct point3d {
14  float f[3];
15 
16  point3d() {}
17  point3d(float x, float y, float z) : f{x, y, z} {}
18  float x() const { return f[0]; }
19  float y() const { return f[1]; }
20  float z() const { return f[2]; }
21 };
22 
23 struct pixel {
24  float f[2];
25 
26  pixel() {}
27  pixel(float x, float y) : f{x, y} {}
28  float x() const { return f[0]; }
29  float y() const { return f[1]; }
30 };
31 
32 // We define a virtual object as a collection of vertices that will be connected by lines
33 typedef std::array<point3d, 4> object;
34 
35 static rs2_pose identity_pose();
36 static rs2_pose reset_object_pose(const rs2_pose& device_pose_in_world = identity_pose());
37 static rs2_pose pose_inverse(const rs2_pose& p);
38 static rs2_pose pose_multiply(const rs2_pose& ref2_in_ref1, const rs2_pose& ref3_in_ref2);
43 static rs2_vector vector_addition(const rs2_vector& a, const rs2_vector& b);
44 static rs2_vector vector_negate(const rs2_vector& v);
45 
46 static object convert_object_coordinates(const object& obj, const rs2_pose& object_pose);
47 
48 static std::vector<point3d> raster_line(const point3d& a, const point3d& b, float step);
49 static void render_line(const std::vector<pixel>& line, int color_code);
50 static void render_text(int win_height, const std::string& text);
51 
52 void raw_file_from_bytes(const std::string& filename, const std::vector<uint8_t> bytes);
53 std::vector<uint8_t> bytes_from_raw_file(const std::string& filename);
54 
55 int main(int argc, char * argv[]) try
56 {
57  std::string out_map_filepath, in_map_filepath, default_filepath = "map.raw";
58  for (int c = 1; c < argc; ++c) {
59  if (!std::strcmp(argv[c], "-m") || !std::strcmp(argv[c], "--load_map")) {
60  in_map_filepath = (++c < argc) ? std::string(argv[c]) : default_filepath;
61  }
62  else if (!std::strcmp(argv[c], "-s") || !std::strcmp(argv[c], "--save_map")) {
63  out_map_filepath = (++c < argc) ? std::string(argv[c]) : default_filepath;
64  }
65  else {
66  std::cout <<
67  " usages : [-m|--load_map IN_FILEPATH][-s|--save_map OUT_FILEPATH] \n" <<
68  " -m load raw map from IN_FILEPATH at start. \n" <<
69  " -s save raw map to OUT_FILEPATH at the end. \n";
70  }
71  }
72 
73  std::cout << "Waiting for device..." << std::endl;
74 
75  // Declare RealSense pipeline, encapsulating the actual device and sensors
77  // Create a configuration for configuring the pipeline with a non default profile
79  // Enable fisheye and pose streams
83 
84  // Create the vertices of a simple virtual object.
85  // This virtual object is 4 points in 3D space that describe 3 XYZ 20cm long axes.
86  // These vertices are relative to the object's own coordinate system.
87  const float length = 0.20f;
88  const object virtual_object = { {
89  { 0, 0, 0 }, // origin
90  { length, 0, 0 }, // X
91  { 0, length, 0 }, // Y
92  { 0, 0, length } // Z
93  } };
94  // Set Guid of virtual object
95  const std::string virtual_object_guid = "node0";
96 
97  // This variable will hold the pose of the virtual object in world coordinates.
98  // We initialize it once we get the first pose frame.
99  rs2_pose object_pose_in_world;
100  bool object_pose_in_world_initialized = false;
101 
102  // Get pose sensor
103  auto tm_sensor = cfg.resolve(pipe).get_device().first<rs2::pose_sensor>();
104 
105  // Load raw map on request
106  if (!in_map_filepath.empty()) {
107  try {
108  tm_sensor.import_localization_map(bytes_from_raw_file(in_map_filepath));
109  std::cout << "Map loaded from " << in_map_filepath << std::endl;
110  }
111  catch (std::runtime_error e) { std::cout << e.what() << std::endl; }
112  }
113 
114  // Add relocalization callback
115  tm_sensor.set_notifications_callback([&](const rs2::notification& n) {
117  std::cout << "Relocalization Event Detected." << std::endl;
118  // Get static node if available
119  if (tm_sensor.get_static_node(virtual_object_guid, object_pose_in_world.translation, object_pose_in_world.rotation)) {
120  std::cout << "Virtual object loaded: " << object_pose_in_world.translation << std::endl;
121  object_pose_in_world_initialized = true;
122  }
123  }
124  });
125 
126  // Start pipeline with chosen configuration
127  rs2::pipeline_profile pipe_profile = pipe.start(cfg);
128 
129  // T265 has two fisheye sensors, we can choose any of them (index 1 or 2)
130  const int fisheye_sensor_idx = 1;
131 
132  // Get fisheye sensor intrinsics parameters
133  rs2::stream_profile fisheye_stream = pipe_profile.get_stream(RS2_STREAM_FISHEYE, fisheye_sensor_idx);
134  rs2_intrinsics intrinsics = fisheye_stream.as<rs2::video_stream_profile>().get_intrinsics();
135 
136  rs2_extrinsics pose_to_fisheye_extrinsics = pipe_profile.get_stream(RS2_STREAM_POSE).get_extrinsics_to(fisheye_stream);
137 
138  std::cout << "Device got. Streaming data" << std::endl;
139 
140  // Create an OpenGL display window and a texture to draw the fisheye image
141  window app(intrinsics.width, intrinsics.height, "Intel RealSense T265 Augmented Reality Example");
142  window_key_listener key_watcher(app);
143  texture fisheye_image;
144 
145  // Main loop
146  while (app)
147  {
148  rs2_pose device_pose_in_world; // This will contain the current device pose
149  {
150  // Wait for the next set of frames from the camera
151  auto frames = pipe.wait_for_frames();
152  // Get a frame from the fisheye stream
153  rs2::video_frame fisheye_frame = frames.get_fisheye_frame(fisheye_sensor_idx);
154  // Get a frame from the pose stream
155  rs2::pose_frame pose_frame = frames.get_pose_frame();
156 
157  // Copy current camera pose
158  device_pose_in_world = pose_frame.get_pose_data();
159 
160  // Render the fisheye image
161  fisheye_image.render(fisheye_frame, { 0, 0, app.width(), app.height() });
162 
163  // By closing the current scope we let frames be deallocated, so we do not fill up librealsense queues while we do other computation.
164  }
165 
166  // If we have not set the virtual object in the world yet, set it in front of the camera now.
167  if (!object_pose_in_world_initialized && in_map_filepath.empty())
168  {
169  object_pose_in_world = reset_object_pose(device_pose_in_world);
170  object_pose_in_world_initialized = true;
171  }
172 
173  // Compute the pose of the object relative to the current pose of the device
174  rs2_pose world_pose_in_device = pose_inverse(device_pose_in_world);
175  rs2_pose object_pose_in_device = pose_multiply(world_pose_in_device, object_pose_in_world);
176 
177  // Get the object vertices in device coordinates
178  object object_in_device = convert_object_coordinates(virtual_object, object_pose_in_device);
179 
180  // Convert object vertices from device coordinates into fisheye sensor coordinates using extrinsics
181  object object_in_sensor;
182  for (size_t i = 0; i < object_in_device.size(); ++i)
183  {
184  rs2_transform_point_to_point(object_in_sensor[i].f, &pose_to_fisheye_extrinsics, object_in_device[i].f);
185  }
186 
187  for (size_t i = 1; i < object_in_sensor.size(); ++i)
188  {
189  // Discretize the virtual object line into smaller 1cm long segments
190  std::vector<point3d> points_in_sensor = raster_line(object_in_sensor[0], object_in_sensor[i], 0.01f);
191  std::vector<pixel> projected_line;
192  projected_line.reserve(points_in_sensor.size());
193  for (auto& point : points_in_sensor)
194  {
195  // A 3D point is visible in the image if its Z coordinate relative to the fisheye sensor is positive.
196  if (point.z() > 0)
197  {
198  // Project 3D sensor coordinates to 2D fisheye image coordinates using intrinsics
199  projected_line.emplace_back();
200  rs2_project_point_to_pixel(projected_line.back().f, &intrinsics, point.f);
201  }
202  }
203  // Display the line in the image
204  render_line(projected_line, int(i));
205  }
206 
207  // Display text in the image
208  render_text((int)app.height(), device_pose_in_world.tracker_confidence > 2 ?
209  "Press spacebar to reset the pose of the virtual object. Press ESC to exit" :
210  "Move the camera around for saving the virtual object. Press ESC to exit" );
211 
212  // Check if some key is pressed
213  switch (key_watcher.get_key())
214  {
215  case GLFW_KEY_SPACE:
216  // Reset virtual object pose if user presses spacebar
217  object_pose_in_world = reset_object_pose(device_pose_in_world);
218  std::cout << "Setting new pose for virtual object: " << object_pose_in_world.translation << std::endl;
219  break;
220  case GLFW_KEY_ESCAPE:
221  // Exit if user presses escape
222  if (tm_sensor.set_static_node(virtual_object_guid, object_pose_in_world.translation, object_pose_in_world.rotation)) {
223  std::cout << "Saved virtual object as static node. " << std::endl;
224  }
225 
226  // Export map to a raw file
227  if (!out_map_filepath.empty()) {
228  pipe.stop();
229  raw_file_from_bytes(out_map_filepath, tm_sensor.export_localization_map());
230  std::cout << "Saved map to " << out_map_filepath << std::endl;
231  }
232  case GLFW_KEY_Q:
233  app.close();
234  break;
235  }
236  }
237 
238  return EXIT_SUCCESS;
239 }
240 catch (const rs2::error & e)
241 {
242  std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
243  return EXIT_FAILURE;
244 }
245 catch (const std::exception& e)
246 {
247  std::cerr << e.what() << std::endl;
248  return EXIT_FAILURE;
249 }
250 
252 {
253  // Return an identity pose (no translation, no rotation)
254  rs2_pose pose;
255  pose.translation.x = 0;
256  pose.translation.y = 0;
257  pose.translation.z = 0;
258  pose.rotation.x = 0;
259  pose.rotation.y = 0;
260  pose.rotation.z = 0;
261  pose.rotation.w = 1;
262  return pose;
263 }
264 
265 rs2_pose reset_object_pose(const rs2_pose& device_pose_in_world)
266 {
267  // Set the object 50 centimeter away in front of the camera.
268  // T265 coordinate system is defined here: https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#sensor-origin-and-coordinate-system
269  rs2_pose object_pose_in_device;
270  object_pose_in_device.translation.x = 0;
271  object_pose_in_device.translation.y = 0;
272  object_pose_in_device.translation.z = -0.50;
273  object_pose_in_device.rotation.x = 0;
274  object_pose_in_device.rotation.y = 0;
275  object_pose_in_device.rotation.z = 0;
276  object_pose_in_device.rotation.w = 1;
277 
278  // Convert the pose of the virtual object from camera coordinates into world coordinates
279  rs2_pose object_pose_in_world = pose_multiply(device_pose_in_world, object_pose_in_device);
280  return object_pose_in_world;
281 }
282 
284 {
285  rs2_pose i;
288  return i;
289 }
290 
291 rs2_pose pose_multiply(const rs2_pose& ref2_in_ref1, const rs2_pose& ref3_in_ref2)
292 {
293  rs2_pose ref3_in_ref1;
294  ref3_in_ref1.rotation = quaternion_multiply(ref2_in_ref1.rotation, ref3_in_ref2.rotation);
295  ref3_in_ref1.translation = vector_addition(quaternion_rotate_vector(ref2_in_ref1.rotation, ref3_in_ref2.translation), ref2_in_ref1.translation);
296  return ref3_in_ref1;
297 }
298 
300 {
302 }
303 
305 {
306  return rs2_quaternion {
307  a.x * b.w + a.w * b.x - a.z * b.y + a.y * b.z,
308  a.y * b.w + a.z * b.x + a.w * b.y - a.x * b.z,
309  a.z * b.w - a.y * b.x + a.x * b.y + a.w * b.z,
310  a.w * b.w - a.x * b.x - a.y * b.y - a.z * b.z,
311  };
312 }
313 
315 {
316  rs2_quaternion v_as_quaternion = { v.x, v.y, v.z, 0 };
317  rs2_quaternion rotated_v = quaternion_multiply(quaternion_multiply(q, v_as_quaternion), quaternion_conjugate(q));
318  return rs2_vector { rotated_v.x, rotated_v.y, rotated_v.z };
319 }
320 
322 {
323  return rs2_quaternion { -q.x, -q.y, -q.z, q.w };
324 }
325 
327 {
328  return rs2_vector { a.x + b.x, a.y + b.y, a.z + b.z };
329 }
330 
332 {
333  return rs2_vector { -v.x, -v.y, -v.z };
334 }
335 
336 object convert_object_coordinates(const object& obj, const rs2_pose& object_pose)
337 {
338  object transformed_obj;
339  for (size_t i = 0; i < obj.size(); ++i) {
340  rs2_vector v { obj[i].x(), obj[i].y(), obj[i].z() };
341  v = pose_transform_point(object_pose, v);
342  transformed_obj[i].f[0] = v.x;
343  transformed_obj[i].f[1] = v.y;
344  transformed_obj[i].f[2] = v.z;
345  }
346  return transformed_obj;
347 }
348 
349 std::vector<point3d> raster_line(const point3d& a, const point3d& b, float step)
350 {
351  rs2_vector direction = { b.x() - a.x(), b.y() - a.y(), b.z() - a.z() };
352  float distance = std::sqrt(direction.x*direction.x + direction.y*direction.y + direction.z*direction.z);
353  int npoints = (int)(distance / step + 1);
354 
355  std::vector<point3d> points;
356  if (npoints > 0)
357  {
358  direction.x = direction.x * step / distance;
359  direction.y = direction.y * step / distance;
360  direction.z = direction.z * step / distance;
361 
362  points.reserve(npoints);
363  points.emplace_back(a);
364  for (int i = 1; i < npoints; ++i)
365  {
366  points.emplace_back(a.x() + direction.x * i,
367  a.y() + direction.y * i,
368  a.z() + direction.z * i);
369  }
370  }
371  return points;
372 }
373 
374 void render_line(const std::vector<pixel>& line, int color_code)
375 {
376  if (!line.empty())
377  {
378  GLfloat current_color[4];
379  glGetFloatv(GL_CURRENT_COLOR, current_color);
380 
381  glLineWidth(5);
382  glColor3f(color_code == 1 ? 1.f : 0.f,
383  color_code == 2 ? 1.f : 0.f,
384  color_code == 3 ? 1.f : 0.f);
385 
387  for (auto& pixel : line)
388  {
389  glVertex3f(pixel.x(), pixel.y(), 0.f);
390  }
391  glEnd();
392 
393  glColor4fv(current_color);
394  }
395 }
396 
397 void render_text(int win_height, const std::string& text)
398 {
399  GLfloat current_color[4];
400  glGetFloatv(GL_CURRENT_COLOR, current_color);
401  glColor3f(0, 0.5, 1);
402  glScalef(2, 2, 2);
403  draw_text(15, (win_height - 10) / 2, text.c_str());
404  glScalef(1, 1, 1);
405  glColor4fv(current_color);
406 }
407 
408 void raw_file_from_bytes(const std::string& filename, const std::vector<uint8_t> bytes)
409 {
410  std::ofstream file(filename, std::ios::binary | std::ios::trunc);
411  if (!file.good())
412  throw std::runtime_error("Invalid binary file specified. Verify the target path and location permissions");
413  file.write((char*)bytes.data(), bytes.size());
414 }
415 
416 std::vector<uint8_t> bytes_from_raw_file(const std::string& filename)
417 {
418  std::ifstream file(filename.c_str(), std::ios::binary);
419  if (!file.good())
420  throw std::runtime_error("Invalid binary file specified. Verify the source path and location permissions");
421 
422  // Determine the file length
423  file.seekg(0, std::ios_base::end);
424  std::size_t size = file.tellg();
425  if (!size)
426  throw std::runtime_error("Invalid binary file -zero-size");
427  file.seekg(0, std::ios_base::beg);
428 
429  // Create a vector to store the data
430  std::vector<uint8_t> v(size);
431 
432  // Load the data
433  file.read((char*)&v[0], size);
434 
435  return v;
436 }
static rs2_quaternion quaternion_conjugate(const rs2_quaternion &q)
frameset wait_for_frames(unsigned int timeout_ms=RS2_DEFAULT_TIMEOUT) const
static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics *extrin, const float from_point[3])
Definition: rsutil.h:168
GLuint GLuint end
float y() const
GLboolean GLboolean GLboolean b
GLint y
void render(const rs2::frame &frame, const rect &rect, float alpha=1.f)
Definition: example.hpp:471
khronos_float_t GLfloat
float z
Definition: rs_types.h:131
point3d(float x, float y, float z)
unsigned int tracker_confidence
Definition: rs_types.h:148
#define glBegin
GLfloat GLfloat p
Definition: glext.h:12687
static rs2_vector quaternion_rotate_vector(const rs2_quaternion &q, const rs2_vector &v)
rs2_extrinsics get_extrinsics_to(const stream_profile &to) const
Definition: rs_frame.hpp:148
float z() const
#define GLFW_KEY_SPACE
Definition: glfw3.h:360
rs2_vector translation
Definition: rs_types.h:142
static void render_text(int win_height, const std::string &text)
GLsizei const GLchar *const * string
float x() const
std::array< point3d, 4 > object
static rs2_vector vector_negate(const rs2_vector &v)
GLdouble GLdouble z
GLdouble n
Definition: glext.h:1966
GLhandleARB obj
Definition: glext.h:4157
#define glVertex3f
e
Definition: rmse.py:177
GLsizei GLsizei GLfloat distance
Definition: glext.h:10563
point
Definition: rmse.py:166
rs2_pose get_pose_data() const
Definition: rs_frame.hpp:933
direction
Definition: rs-align.cpp:25
The texture class.
Definition: example.hpp:402
#define glGetFloatv
static rs2_vector vector_addition(const rs2_vector &a, const rs2_vector &b)
::realsense_legacy_msgs::pose_< std::allocator< void > > pose
Definition: pose.h:88
GLboolean GLboolean GLboolean GLboolean a
Quaternion used to represent rotation.
Definition: rs_types.h:135
const std::string & get_failed_args() const
Definition: rs_types.hpp:117
float y
Definition: rs_types.h:131
GLdouble f
float y() const
static rs2_pose pose_inverse(const rs2_pose &p)
GLsizeiptr size
float x() const
#define GL_CURRENT_COLOR
std::ostream & cout()
const GLubyte * c
Definition: glext.h:12690
GLdouble x
pixel(float x, float y)
int main(int argc, char *argv[])
float width() const
Definition: example.hpp:627
#define glLineWidth
#define GLFW_KEY_ESCAPE
Definition: glfw3.h:412
rs2_quaternion rotation
Definition: rs_types.h:145
stream_profile get_stream(rs2_stream stream_type, int stream_index=-1) const
Definition: rs_pipeline.hpp:60
static std::vector< point3d > raster_line(const point3d &a, const point3d &b, float step)
static void render_line(const std::vector< pixel > &line, int color_code)
#define glEnd
#define glColor4fv
float x
Definition: rs_types.h:131
void enable_stream(rs2_stream stream_type, int stream_index, int width, int height, rs2_format format=RS2_FORMAT_ANY, int framerate=0)
static rs2_pose pose_multiply(const rs2_pose &ref2_in_ref1, const rs2_pose &ref3_in_ref2)
dictionary intrinsics
Definition: t265_stereo.py:142
device get_device() const
Definition: rs_pipeline.hpp:83
rs2_notification_category get_category() const
Definition: rs_sensor.hpp:38
float height() const
Definition: example.hpp:628
static rs2_quaternion quaternion_multiply(const rs2_quaternion &a, const rs2_quaternion &b)
Cross-stream extrinsics: encodes the topology describing how the different devices are oriented...
Definition: rs_sensor.h:96
3D vector in Euclidean coordinate space
Definition: rs_types.h:129
static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics *intrin, const float point[3])
Definition: rsutil.h:19
#define glColor3f
GLdouble GLdouble GLdouble q
pipeline_profile resolve(std::shared_ptr< rs2_pipeline > p) const
static rs2_pose identity_pose()
static rs2_vector pose_transform_point(const rs2_pose &pose, const rs2_vector &p)
const GLuint GLenum const void * binary
Definition: glext.h:1882
bool import_localization_map(const std::vector< uint8_t > &lmap_buf) const
Definition: rs_sensor.hpp:534
void close()
Definition: example.hpp:622
#define glScalef
void draw_text(int x, int y, const char *text)
Definition: example.hpp:109
#define GLFW_KEY_Q
Definition: glfw3.h:394
Video stream intrinsics.
Definition: rs_types.h:58
std::ostream & cerr()
#define GL_LINE_STRIP
T first() const
Definition: rs_device.hpp:52
int i
GLenum GLuint GLenum GLsizei length
pipeline_profile start()
static object convert_object_coordinates(const object &obj, const rs2_pose &object_pose)
std::vector< uint8_t > bytes_from_raw_file(const std::string &filename)
void raw_file_from_bytes(const std::string &filename, const std::vector< uint8_t > bytes)
static rs2_pose reset_object_pose(const rs2_pose &device_pose_in_world=identity_pose())
GLdouble v
const std::string & get_failed_function() const
Definition: rs_types.hpp:112
GLdouble GLdouble GLint GLint const GLdouble * points


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:40