convert_to_bag.py
Go to the documentation of this file.
1 import numpy as np
2 import cv2
3 import pyrealsense2 as rs
4 import time, sys, glob
5 
6 focal = 0.0021
7 baseline = 0.08
8 
9 sd = rs.software_device()
10 depth_sensor = sd.add_sensor("Depth")
11 
12 intr = rs.intrinsics()
13 intr.width = 848
14 intr.height = 480
15 intr.ppx = 637.951293945312
16 intr.ppy = 360.783233642578
17 intr.fx = 638.864135742188
18 intr.fy = 638.864135742188
19 
20 vs = rs.video_stream()
21 vs.type = rs.stream.infrared
22 vs.fmt = rs.format.y8
23 vs.index = 1
24 vs.uid = 1
25 vs.width = intr.width
26 vs.height = intr.height
27 vs.fps = 30
28 vs.bpp = 1
29 vs.intrinsics = intr
30 depth_sensor.add_video_stream(vs)
31 
32 vs.type = rs.stream.depth
33 vs.fmt = rs.format.z16
34 vs.index = 1
35 vs.uid = 3
36 vs.bpp = 2
37 depth_sensor.add_video_stream(vs)
38 
39 vs.type = rs.stream.depth
40 vs.fmt = rs.format.z16
41 vs.index = 2
42 vs.uid = 4
43 vs.bpp = 2
44 depth_sensor.add_video_stream(vs)
45 
46 vs.type = rs.stream.depth
47 vs.fmt = rs.format.z16
48 vs.index = 3
49 vs.uid = 5
50 vs.bpp = 2
51 depth_sensor.add_video_stream(vs)
52 
53 depth_sensor.add_read_only_option(rs.option.depth_units, 0.001)
54 name = "virtual camera"
55 sd.register_info(rs.camera_info.name, name)
56 
57 ctx = rs.context()
58 sd.add_to(ctx)
59 
60 dev = ctx.query_devices()[0]
61 for d in ctx.query_devices():
62  if d.get_info(rs.camera_info.name) == name:
63  dev = d
64 
65 images_path = "."
66 if (len(sys.argv) > 1):
67  images_path = str(sys.argv[1])
68 
69 rec = rs.recorder(images_path + "/1.bag", dev)
70 sensor = rec.query_sensors()[0]
71 
72 q = rs.frame_queue()
73 sensor.open(sensor.get_stream_profiles())
74 sensor.start(q)
75 
76 files = glob.glob1(images_path, "gt*")
77 index = []
78 for f in files:
79  idx = (f.split('-')[1]).split('.')[0]
80  index.append(int(idx))
81 
82 for i in index:
83  left_name = images_path + "/left-" + str(i) + ".png"
84  depth_name = images_path + "/gt-" + str(i) + ".png"
85  result_name = images_path + "/res-" + str(i) + ".png"
86  denoised_name = images_path + "/res_denoised-" + str(i) + ".png"
87 
88  img = cv2.imread(left_name)
89  img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
90 
91  f = rs.software_video_frame()
92  f.stride = intr.width
93  f.bpp = 1
94  f.pixels = np.asarray(img, dtype="byte")
95  f.timestamp = i * 0.01
96  f.frame_number = i
97  f.profile = sensor.get_stream_profiles()[0].as_video_stream_profile()
98  depth_sensor.on_video_frame(f)
99 
100  time.sleep(0.01)
101 
102  f3 = rs.software_video_frame()
103  img = cv2.imread(result_name, cv2.IMREAD_ANYDEPTH)
104  f3.stride = 2 * intr.width
105  f3.bpp = 2
106  px = np.asarray(img, dtype="ushort")
107  f3.pixels = px
108  f3.timestamp = i * 0.01
109  f3.frame_number = i
110  f3.profile = sensor.get_stream_profiles()[1].as_video_stream_profile()
111  depth_sensor.on_video_frame(f3)
112 
113  time.sleep(0.01)
114 
115  f4 = rs.software_video_frame()
116  img = cv2.imread(depth_name, cv2.IMREAD_ANYDEPTH)
117  f4.stride = 2 * intr.width
118  f4.bpp = 2
119  px = np.asarray(img, dtype="ushort")
120  f4.pixels = px
121  f4.timestamp = i * 0.01
122  f4.frame_number = i
123  f4.profile = sensor.get_stream_profiles()[2].as_video_stream_profile()
124  depth_sensor.on_video_frame(f4)
125  time.sleep(0.01)
126 
127  f5 = rs.software_video_frame()
128  img = cv2.imread(denoised_name, cv2.IMREAD_ANYDEPTH)
129  f5.stride = 2 * intr.width
130  f5.bpp = 2
131  px = np.asarray(img, dtype="ushort")
132  f5.pixels = px
133  f5.timestamp = i * 0.01
134  f5.frame_number = i
135  f5.profile = sensor.get_stream_profiles()[3].as_video_stream_profile()
136  depth_sensor.on_video_frame(f5)
137  time.sleep(0.01)
138 
139  time.sleep(1)
140 
141 print("a")
142 f = q.wait_for_frame()
143 print("b")
144 
145 time.sleep(1)
146 
147 sensor.stop()
148 sensor.close()
std::vector< uint32_t > split(const std::string &s, char delim)
static std::string print(const transformation &tf)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Sat Apr 3 2021 02:47:53