box_dimensioner_multicam_demo.py
Go to the documentation of this file.
1 
14 
15 # Import RealSense, OpenCV and NumPy
16 import pyrealsense2 as rs
17 import cv2
18 import numpy as np
19 
20 # Import helper functions and classes written to wrap the RealSense, OpenCV and Kabsch Calibration usage
21 from collections import defaultdict
22 from realsense_device_manager import DeviceManager
23 from calibration_kabsch import PoseEstimation
24 from helper_functions import get_boundary_corners_2D
25 from measurement_task import calculate_boundingbox_points, calculate_cumulative_pointcloud, visualise_measurements
26 
27 def run_demo():
28 
29  # Define some constants
30  L515_resolution_width = 1024 # pixels
31  L515_resolution_height = 768 # pixels
32  L515_frame_rate = 30
33 
34  resolution_width = 1280 # pixels
35  resolution_height = 720 # pixels
36  frame_rate = 15 # fps
37 
38  dispose_frames_for_stablisation = 30 # frames
39 
40  chessboard_width = 6 # squares
41  chessboard_height = 9 # squares
42  square_size = 0.0253 # meters
43 
44  try:
45  # Enable the streams from all the intel realsense devices
46  L515_rs_config = rs.config()
47  L515_rs_config.enable_stream(rs.stream.depth, L515_resolution_width, L515_resolution_height, rs.format.z16, L515_frame_rate)
48  L515_rs_config.enable_stream(rs.stream.infrared, 0, L515_resolution_width, L515_resolution_height, rs.format.y8, L515_frame_rate)
49  L515_rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)
50 
51  rs_config = rs.config()
52  rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
53  rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate)
54  rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)
55 
56  # Use the device manager class to enable the devices and get the frames
57  device_manager = DeviceManager(rs.context(), rs_config, L515_rs_config)
58  device_manager.enable_all_devices()
59 
60  # Allow some frames for the auto-exposure controller to stablise
61  for frame in range(dispose_frames_for_stablisation):
62  frames = device_manager.poll_frames()
63 
64  assert( len(device_manager._available_devices) > 0 )
65  """
66  1: Calibration
67  Calibrate all the available devices to the world co-ordinates.
68  For this purpose, a chessboard printout for use with opencv based calibration process is needed.
69 
70  """
71  # Get the intrinsics of the realsense device
72  intrinsics_devices = device_manager.get_device_intrinsics(frames)
73 
74  # Set the chessboard parameters for calibration
75  chessboard_params = [chessboard_height, chessboard_width, square_size]
76 
77  # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method
78  calibrated_device_count = 0
79  while calibrated_device_count < len(device_manager._available_devices):
80  frames = device_manager.poll_frames()
81  pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params)
82  transformation_result_kabsch = pose_estimator.perform_pose_estimation()
83  object_point = pose_estimator.get_chessboard_corners_in3d()
84  calibrated_device_count = 0
85  for device_info in device_manager._available_devices:
86  device = device_info[0]
87  if not transformation_result_kabsch[device][0]:
88  print("Place the chessboard on the plane where the object needs to be detected..")
89  else:
90  calibrated_device_count += 1
91 
92  # Save the transformation object for all devices in an array to use for measurements
93  transformation_devices={}
94  chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose()
95  for device_info in device_manager._available_devices:
96  device = device_info[0]
97  transformation_devices[device] = transformation_result_kabsch[device][1].inverse()
98  points3D = object_point[device][2][:,object_point[device][3]]
99  points3D = transformation_devices[device].apply_transformation(points3D)
100  chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) )
101 
102  # Extract the bounds between which the object's dimensions are needed
103  # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard
104  chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1)
105  roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d)
106 
107  print("Calibration completed... \nPlace the box in the field of view of the devices...")
108 
109 
110  """
111  2: Measurement and display
112  Measure the dimension of the object using depth maps from multiple RealSense devices
113  The information from Phase 1 will be used here
114 
115  """
116 
117  # Enable the emitter of the devices
118  device_manager.enable_emitter(True)
119 
120  # Load the JSON settings file in order to enable High Accuracy preset for the realsense
121  device_manager.load_settings_json("./HighResHighAccuracyPreset.json")
122 
123  # Get the extrinsics of the device to be used later
124  extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames)
125 
126  # Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image
127  calibration_info_devices = defaultdict(list)
128  for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices):
129  for key, value in calibration_info.items():
130  calibration_info_devices[key].append(value)
131 
132  # Continue acquisition until terminated with Ctrl+C by the user
133  while 1:
134  # Get the frames from all the devices
135  frames_devices = device_manager.poll_frames()
136 
137  # Calculate the pointcloud using the depth frames from all the devices
138  point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D)
139 
140  # Get the bounding box for the pointcloud in image coordinates of the color imager
141  bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices )
142 
143  # Draw the bounding box points on the color image and visualise the results
144  visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height)
145 
146  except KeyboardInterrupt:
147  print("The program was interupted by the user. Closing the program...")
148 
149  finally:
150  device_manager.disable_streams()
151  cv2.destroyAllWindows()
152 
153 
154 if __name__ == "__main__":
155  run_demo()
pose inverse(const pose &a)
Definition: src/types.h:474
def calculate_boundingbox_points(point_cloud, calibration_info_devices, depth_threshold=0.01)
static std::string print(const transformation &tf)
def calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2d, depth_threshold=0.01)
def visualise_measurements(frames_devices, bounding_box_points_devices, length, width, height)
def get_boundary_corners_2D(points)


librealsense2
Author(s): LibRealSense ROS Team
autogenerated on Thu Dec 22 2022 03:41:42