calibration_kabsch.py
Go to the documentation of this file.
1 ##################################################################################################
2 ## License: Apache 2.0. See LICENSE file in root directory. ####
3 ##################################################################################################
4 ## Box Dimensioner with multiple cameras: Helper files ####
5 ##################################################################################################
6 
7 import pyrealsense2 as rs
8 import calculate_rmsd_kabsch as rmsd
9 import numpy as np
10 from helper_functions import cv_find_chessboard, get_chessboard_points_3D, get_depth_at_pixel, convert_depth_pixel_to_metric_coordinate
11 from realsense_device_manager import post_process_depth_frame
12 
13 """
14  _ _ _ _____ _ _
15  | | | | ___ | | _ __ ___ _ __ | ___|_ _ _ __ ___ | |_ (_) ___ _ __ ___
16  | |_| | / _ \| || '_ \ / _ \| '__| | |_ | | | || '_ \ / __|| __|| | / _ \ | '_ \ / __|
17  | _ || __/| || |_) || __/| | | _| | |_| || | | || (__ | |_ | || (_) || | | |\__ \
18  |_| |_| \___||_|| .__/ \___||_| |_| \__,_||_| |_| \___| \__||_| \___/ |_| |_||___/
19  _|
20 """
21 
22 
23 def calculate_transformation_kabsch(src_points, dst_points):
24  """
25  Calculates the optimal rigid transformation from src_points to
26  dst_points
27  (regarding the least squares error)
28 
29  Parameters:
30  -----------
31  src_points: array
32  (3,N) matrix
33  dst_points: array
34  (3,N) matrix
35 
36  Returns:
37  -----------
38  rotation_matrix: array
39  (3,3) matrix
40 
41  translation_vector: array
42  (3,1) matrix
43 
44  rmsd_value: float
45 
46  """
47  assert src_points.shape == dst_points.shape
48  if src_points.shape[0] != 3:
49  raise Exception("The input data matrix had to be transposed in order to compute transformation.")
50 
51  src_points = src_points.transpose()
52  dst_points = dst_points.transpose()
53 
54  src_points_centered = src_points - rmsd.centroid(src_points)
55  dst_points_centered = dst_points - rmsd.centroid(dst_points)
56 
57  rotation_matrix = rmsd.kabsch(src_points_centered, dst_points_centered)
58  rmsd_value = rmsd.kabsch_rmsd(src_points_centered, dst_points_centered)
59 
60  translation_vector = rmsd.centroid(dst_points) - np.matmul(rmsd.centroid(src_points), rotation_matrix)
61 
62  return rotation_matrix.transpose(), translation_vector.transpose(), rmsd_value
63 
64 
65 
66 """
67  __ __ _ ____ _ _
68  | \/ | __ _ (_) _ __ / ___| ___ _ __ | |_ ___ _ __ | |_
69  | |\/| | / _` || || '_ \ | | / _ \ | '_ \ | __|/ _ \| '_ \ | __|
70  | | | || (_| || || | | | | |___| (_) || | | || |_| __/| | | || |_
71  |_| |_| \__,_||_||_| |_| \____|\___/ |_| |_| \__|\___||_| |_| \__|
72 
73 """
74 
76  def __init__(self, rotation_matrix, translation_vector):
77  self.pose_mat = np.zeros((4,4))
78  self.pose_mat[:3,:3] = rotation_matrix
79  self.pose_mat[:3,3] = translation_vector.flatten()
80  self.pose_mat[3,3] = 1
81 
82  def apply_transformation(self, points):
83  """
84  Applies the transformation to the pointcloud
85 
86  Parameters:
87  -----------
88  points : array
89  (3, N) matrix where N is the number of points
90 
91  Returns:
92  ----------
93  points_transformed : array
94  (3, N) transformed matrix
95  """
96  assert(points.shape[0] == 3)
97  n = points.shape[1]
98  points_ = np.vstack((points, np.ones((1,n))))
99  points_trans_ = np.matmul(self.pose_mat, points_)
100  points_transformed = np.true_divide(points_trans_[:3,:], points_trans_[[-1], :])
101  return points_transformed
102 
103  def inverse(self):
104  """
105  Computes the inverse transformation and returns a new Transformation object
106 
107  Returns:
108  -----------
109  inverse: Transformation
110 
111  """
112  rotation_matrix = self.pose_mat[:3,:3]
113  translation_vector = self.pose_mat[:3,3]
114 
115  rot = np.transpose(rotation_matrix)
116  trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)
117  return Transformation(rot, trans)
118 
119 
120 
122 
123  def __init__(self, frames, intrinsic, chessboard_params):
124  assert(len(chessboard_params) == 3)
125  self.frames = frames
126  self.intrinsic = intrinsic
127  self.chessboard_params = chessboard_params
128 
130  """
131  Searches the chessboard corners in the infrared images of
132  every connected device and uses the information in the
133  corresponding depth image to calculate the 3d
134  coordinates of the chessboard corners in the coordinate system of
135  the camera
136 
137  Returns:
138  -----------
139  corners3D : dict
140  keys: str
141  Serial number of the device
142  values: [success, points3D, validDepths]
143  success: bool
144  Indicates wether the operation was successfull
145  points3d: array
146  (3,N) matrix with the coordinates of the chessboard corners
147  in the coordinate system of the camera. N is the number of corners
148  in the chessboard. May contain points with invalid depth values
149  validDephts: [bool]*
150  Sequence with length N indicating which point in points3D has a valid depth value
151  """
152  corners3D = {}
153  for (serial, frameset) in self.frames.items():
154  depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
155  infrared_frame = frameset[(rs.stream.infrared, 1)]
156  depth_intrinsics = self.intrinsic[serial][rs.stream.depth]
157  found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
158  corners3D[serial] = [found_corners, None, None, None]
159  if found_corners:
160  points3D = np.zeros((3, len(points2D[0])))
161  validPoints = [False] * len(points2D[0])
162  for index in range(len(points2D[0])):
163  corner = points2D[:,index].flatten()
164  depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
165  if depth != 0 and depth is not None:
166  validPoints[index] = True
167  [X,Y,Z] = convert_depth_pixel_to_metric_coordinate(depth, corner[0], corner[1], depth_intrinsics)
168  points3D[0, index] = X
169  points3D[1, index] = Y
170  points3D[2, index] = Z
171  corners3D[serial] = found_corners, points2D, points3D, validPoints
172  return corners3D
173 
174 
176  """
177  Calculates the extrinsic calibration from the coordinate space of the camera to the
178  coordinate space spanned by a chessboard by retrieving the 3d coordinates of the
179  chessboard with the depth information and subsequently using the kabsch algortihm
180  for finding the optimal rigid transformation between the two coordinate spaces
181 
182  Returns:
183  -----------
184  retval : dict
185  keys: str
186  Serial number of the device
187  values: [success, transformation, points2D, rmsd]
188  success: bool
189  transformation: Transformation
190  Rigid transformation from the coordinate system of the camera to
191  the coordinate system of the chessboard
192  points2D: array
193  [2,N] array of the chessboard corners used for pose_estimation
194  rmsd:
195  Root mean square deviation between the observed chessboard corners and
196  the corners in the local coordinate system after transformation
197  """
198  corners3D = self.get_chessboard_corners_in3d()
199  retval = {}
200  for (serial, [found_corners, points2D, points3D, validPoints] ) in corners3D.items():
201  objectpoints = get_chessboard_points_3D(self.chessboard_params)
202  retval[serial] = [False, None, None, None]
203  if found_corners == True:
204  #initial vectors are just for correct dimension
205  valid_object_points = objectpoints[:,validPoints]
206  valid_observed_object_points = points3D[:,validPoints]
207 
208  #check for sufficient points
209  if valid_object_points.shape[1] < 5:
210  print("Not enough points have a valid depth for calculating the transformation")
211 
212  else:
213  [rotation_matrix, translation_vector, rmsd_value] = calculate_transformation_kabsch(valid_object_points, valid_observed_object_points)
214  retval[serial] =[True, Transformation(rotation_matrix, translation_vector), points2D, rmsd_value]
215  print("RMS error for calibration with device number", serial, "is :", rmsd_value, "m")
216  return retval
217 
218 
220  boundary = {}
221 
222  for (serial, frameset) in self.frames.items():
223 
224  depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
225  infrared_frame = frameset[(rs.stream.infrared, 1)]
226  found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
227  boundary[serial] = [np.floor(np.amin(points2D[0,:])).astype(int), np.floor(np.amax(points2D[0,:])).astype(int), np.floor(np.amin(points2D[1,:])).astype(int), np.floor(np.amax(points2D[1,:])).astype(int)]
228 
229  return boundary
def calculate_transformation_kabsch(src_points, dst_points)
def cv_find_chessboard(depth_frame, infrared_frame, chessboard_params)
def __init__(self, frames, intrinsic, chessboard_params)
def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics)
def __init__(self, rotation_matrix, translation_vector)
def get_chessboard_points_3D(chessboard_params)
static std::string print(const transformation &tf)
def get_depth_at_pixel(depth_frame, pixel_x, pixel_y)
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:45:07