9 import pyrealsense2
as rs
16 pipeline = rs.pipeline()
23 pipeline_wrapper = rs.pipeline_wrapper(pipeline)
24 pipeline_profile = config.resolve(pipeline_wrapper)
25 device = pipeline_profile.get_device()
26 device_product_line =
str(device.get_info(rs.camera_info.product_line))
28 config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
30 if device_product_line ==
'L500':
31 config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
33 config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
36 profile = pipeline.start(config)
39 depth_sensor = profile.get_device().first_depth_sensor()
40 depth_scale = depth_sensor.get_depth_scale()
41 print(
"Depth Scale is: " , depth_scale)
45 clipping_distance_in_meters = 1
46 clipping_distance = clipping_distance_in_meters / depth_scale
51 align_to = rs.stream.color
52 align = rs.align(align_to)
58 frames = pipeline.wait_for_frames()
62 aligned_frames = align.process(frames)
65 aligned_depth_frame = aligned_frames.get_depth_frame()
66 color_frame = aligned_frames.get_color_frame()
69 if not aligned_depth_frame
or not color_frame:
72 depth_image = np.asanyarray(aligned_depth_frame.get_data())
73 color_image = np.asanyarray(color_frame.get_data())
77 depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
78 bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
83 depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
84 images = np.hstack((bg_removed, depth_colormap))
86 cv2.namedWindow(
'Align Example', cv2.WINDOW_NORMAL)
87 cv2.imshow(
'Align Example', images)
90 if key & 0xFF == ord(
'q')
or key == 27:
91 cv2.destroyAllWindows()
static std::string print(const transformation &tf)