9 import pyrealsense2
as rs
16 pipeline = rs.pipeline()
23 pipeline_wrapper = rs.pipeline_wrapper(pipeline)
24 pipeline_profile = config.resolve(pipeline_wrapper)
25 device = pipeline_profile.get_device()
26 device_product_line =
str(device.get_info(rs.camera_info.product_line))
29 for s
in device.sensors:
30 if s.get_info(rs.camera_info.name) ==
'RGB Camera':
34 print(
"The demo requires Depth camera with Color sensor")
37 config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
38 config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
41 profile = pipeline.start(config)
44 depth_sensor = profile.get_device().first_depth_sensor()
45 depth_scale = depth_sensor.get_depth_scale()
46 print(
"Depth Scale is: " , depth_scale)
50 clipping_distance_in_meters = 1
51 clipping_distance = clipping_distance_in_meters / depth_scale
56 align_to = rs.stream.color
57 align = rs.align(align_to)
63 frames = pipeline.wait_for_frames()
67 aligned_frames = align.process(frames)
70 aligned_depth_frame = aligned_frames.get_depth_frame()
71 color_frame = aligned_frames.get_color_frame()
74 if not aligned_depth_frame
or not color_frame:
77 depth_image = np.asanyarray(aligned_depth_frame.get_data())
78 color_image = np.asanyarray(color_frame.get_data())
82 depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
83 bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
88 depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
89 images = np.hstack((bg_removed, depth_colormap))
91 cv2.namedWindow(
'Align Example', cv2.WINDOW_NORMAL)
92 cv2.imshow(
'Align Example', images)
95 if key & 0xFF == ord(
'q')
or key == 27:
96 cv2.destroyAllWindows()