measurement_task.py
Go to the documentation of this file.
1 ##################################################################################################
2 ## License: Apache 2.0. See LICENSE file in root directory. ####
3 ##################################################################################################
4 ## Box Dimensioner with multiple cameras: Helper files ####
5 ##################################################################################################
6 
7 import pyrealsense2 as rs
8 import numpy as np
9 import cv2
10 from realsense_device_manager import post_process_depth_frame
11 from helper_functions import convert_depth_frame_to_pointcloud, get_clipped_pointcloud
12 
13 
14 def calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2d, depth_threshold = 0.01):
15  """
16  Calculate the cumulative pointcloud from the multiple devices
17  Parameters:
18  -----------
19  frames_devices : dict
20  The frames from the different devices
21  keys: str
22  Serial number of the device
23  values: [frame]
24  frame: rs.frame()
25  The frameset obtained over the active pipeline from the realsense device
26 
27  calibration_info_devices : dict
28  keys: str
29  Serial number of the device
30  values: [transformation_devices, intrinsics_devices]
31  transformation_devices: Transformation object
32  The transformation object containing the transformation information between the device and the world coordinate systems
33  intrinsics_devices: rs.intrinscs
34  The intrinsics of the depth_frame of the realsense device
35 
36  roi_2d : array
37  The region of interest given in the following order [minX, maxX, minY, maxY]
38 
39  depth_threshold : double
40  The threshold for the depth value (meters) in world-coordinates beyond which the point cloud information will not be used.
41  Following the right-hand coordinate system, if the object is placed on the chessboard plane, the height of the object will increase along the negative Z-axis
42 
43  Return:
44  ----------
45  point_cloud_cumulative : array
46  The cumulative pointcloud from the multiple devices
47  """
48  # Use a threshold of 5 centimeters from the chessboard as the area where useful points are found
49  point_cloud_cumulative = np.array([-1, -1, -1]).transpose()
50  for (device, frame) in frames_devices.items() :
51  # Filter the depth_frame using the Temporal filter and get the corresponding pointcloud for each frame
52  filtered_depth_frame = post_process_depth_frame(frame[rs.stream.depth], temporal_smooth_alpha=0.1, temporal_smooth_delta=80)
53  point_cloud = convert_depth_frame_to_pointcloud( np.asarray( filtered_depth_frame.get_data()), calibration_info_devices[device][1][rs.stream.depth])
54  point_cloud = np.asanyarray(point_cloud)
55 
56  # Get the point cloud in the world-coordinates using the transformation
57  point_cloud = calibration_info_devices[device][0].apply_transformation(point_cloud)
58 
59  # Filter the point cloud based on the depth of the object
60  # The object placed has its height in the negative direction of z-axis due to the right-hand coordinate system
61  point_cloud = get_clipped_pointcloud(point_cloud, roi_2d)
62  point_cloud = point_cloud[:,point_cloud[2,:]<-depth_threshold]
63  point_cloud_cumulative = np.column_stack( ( point_cloud_cumulative, point_cloud ) )
64  point_cloud_cumulative = np.delete(point_cloud_cumulative, 0, 1)
65  return point_cloud_cumulative
66 
67 
68 
69 
70 def calculate_boundingbox_points(point_cloud, calibration_info_devices, depth_threshold = 0.01):
71  """
72  Calculate the top and bottom bounding box corner points for the point cloud in the image coordinates of the color imager of the realsense device
73 
74  Parameters:
75  -----------
76  point_cloud : ndarray
77  The (3 x N) array containing the pointcloud information
78 
79  calibration_info_devices : dict
80  keys: str
81  Serial number of the device
82  values: [transformation_devices, intrinsics_devices, extrinsics_devices]
83  transformation_devices: Transformation object
84  The transformation object containing the transformation information between the device and the world coordinate systems
85  intrinsics_devices: rs.intrinscs
86  The intrinsics of the depth_frame of the realsense device
87  extrinsics_devices: rs.extrinsics
88  The extrinsics between the depth imager 1 and the color imager of the realsense device
89 
90  depth_threshold : double
91  The threshold for the depth value (meters) in world-coordinates beyond which the point cloud information will not be used
92  Following the right-hand coordinate system, if the object is placed on the chessboard plane, the height of the object will increase along the negative Z-axis
93 
94  Return:
95  ----------
96  bounding_box_points_color_image : dict
97  The bounding box corner points in the image coordinate system for the color imager
98  keys: str
99  Serial number of the device
100  values: [points]
101  points: list
102  The (8x2) list of the upper corner points stacked above the lower corner points
103 
104  length : double
105  The length of the bounding box calculated in the world coordinates of the pointcloud
106 
107  width : double
108  The width of the bounding box calculated in the world coordinates of the pointcloud
109 
110  height : double
111  The height of the bounding box calculated in the world coordinates of the pointcloud
112  """
113  # Calculate the dimensions of the filtered and summed up point cloud
114  # Some dirty array manipulations are gonna follow
115  if point_cloud.shape[1] > 500:
116  # Get the bounding box in 2D using the X and Y coordinates
117  coord = np.c_[point_cloud[0,:], point_cloud[1,:]].astype('float32')
118  min_area_rectangle = cv2.minAreaRect(coord)
119  bounding_box_world_2d = cv2.boxPoints(min_area_rectangle)
120  # Caculate the height of the pointcloud
121  height = max(point_cloud[2,:]) - min(point_cloud[2,:]) + depth_threshold
122 
123  # Get the upper and lower bounding box corner points in 3D
124  height_array = np.array([[-height], [-height], [-height], [-height], [0], [0], [0], [0]])
125  bounding_box_world_3d = np.column_stack((np.row_stack((bounding_box_world_2d,bounding_box_world_2d)), height_array))
126 
127  # Get the bounding box points in the image coordinates
128  bounding_box_points_color_image={}
129  for (device, calibration_info) in calibration_info_devices.items():
130  # Transform the bounding box corner points to the device coordinates
131  bounding_box_device_3d = calibration_info[0].inverse().apply_transformation(bounding_box_world_3d.transpose())
132 
133  # Obtain the image coordinates in the color imager using the bounding box 3D corner points in the device coordinates
134  color_pixel=[]
135  bounding_box_device_3d = bounding_box_device_3d.transpose().tolist()
136  for bounding_box_point in bounding_box_device_3d:
137  bounding_box_color_image_point = rs.rs2_transform_point_to_point(calibration_info[2], bounding_box_point)
138  color_pixel.append(rs.rs2_project_point_to_pixel(calibration_info[1][rs.stream.color], bounding_box_color_image_point))
139 
140  bounding_box_points_color_image[device] = np.row_stack( color_pixel )
141  return bounding_box_points_color_image, min_area_rectangle[1][0], min_area_rectangle[1][1], height
142  else :
143  return {},0,0,0
144 
145 
146 
147 def visualise_measurements(frames_devices, bounding_box_points_devices, length, width, height):
148  """
149  Calculate the cumulative pointcloud from the multiple devices
150 
151  Parameters:
152  -----------
153  frames_devices : dict
154  The frames from the different devices
155  keys: str
156  Serial number of the device
157  values: [frame]
158  frame: rs.frame()
159  The frameset obtained over the active pipeline from the realsense device
160 
161  bounding_box_points_color_image : dict
162  The bounding box corner points in the image coordinate system for the color imager
163  keys: str
164  Serial number of the device
165  values: [points]
166  points: list
167  The (8x2) list of the upper corner points stacked above the lower corner points
168 
169  length : double
170  The length of the bounding box calculated in the world coordinates of the pointcloud
171 
172  width : double
173  The width of the bounding box calculated in the world coordinates of the pointcloud
174 
175  height : double
176  The height of the bounding box calculated in the world coordinates of the pointcloud
177  """
178  for (device, frame) in frames_devices.items():
179  color_image = np.asarray(frame[rs.stream.color].get_data())
180  if (length != 0 and width !=0 and height != 0):
181  bounding_box_points_device_upper = bounding_box_points_devices[device][0:4,:]
182  bounding_box_points_device_lower = bounding_box_points_devices[device][4:8,:]
183  box_info = "Length, Width, Height (mm): " + str(int(length*1000)) + ", " + str(int(width*1000)) + ", " + str(int(height*1000))
184 
185  # Draw the box as an overlay on the color image
186  bounding_box_points_device_upper = tuple(map(tuple,bounding_box_points_device_upper.astype(int)))
187  for i in range(len(bounding_box_points_device_upper)):
188  cv2.line(color_image, bounding_box_points_device_upper[i], bounding_box_points_device_upper[(i+1)%4], (0,255,0), 4)
189 
190  bounding_box_points_device_lower = tuple(map(tuple,bounding_box_points_device_lower.astype(int)))
191  for i in range(len(bounding_box_points_device_upper)):
192  cv2.line(color_image, bounding_box_points_device_lower[i], bounding_box_points_device_lower[(i+1)%4], (0,255,0), 1)
193 
194  cv2.line(color_image, bounding_box_points_device_upper[0], bounding_box_points_device_lower[0], (0,255,0), 1)
195  cv2.line(color_image, bounding_box_points_device_upper[1], bounding_box_points_device_lower[1], (0,255,0), 1)
196  cv2.line(color_image, bounding_box_points_device_upper[2], bounding_box_points_device_lower[2], (0,255,0), 1)
197  cv2.line(color_image, bounding_box_points_device_upper[3], bounding_box_points_device_lower[3], (0,255,0), 1)
198  cv2.putText(color_image, box_info, (50,50), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0) )
199 
200  # Visualise the results
201  cv2.imshow('Color image from RealSense Device Nr: ' + device, color_image)
202  cv2.waitKey(1)
def calculate_boundingbox_points(point_cloud, calibration_info_devices, depth_threshold=0.01)
def get_clipped_pointcloud(pointcloud, boundary)
def calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2d, depth_threshold=0.01)
def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics)
int min(int a, int b)
Definition: lz4s.c:73
def visualise_measurements(frames_devices, bounding_box_points_devices, length, width, height)
GeneratorWrapper< T > map(Func &&function, GeneratorWrapper< U > &&generator)
Definition: catch.hpp:4271
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:21