calibration_kabsch.py
Go to the documentation of this file.
1 
6 
7 import pyrealsense2 as rs
8 import calculate_rmsd_kabsch as rmsd
9 import numpy as np
10 from helper_functions import cv_find_chessboard, get_chessboard_points_3D, get_depth_at_pixel, convert_depth_pixel_to_metric_coordinate
11 from realsense_device_manager import post_process_depth_frame
12 
13 """
14  _ _ _ _____ _ _
15  | | | | ___ | | _ __ ___ _ __ | ___|_ _ _ __ ___ | |_ (_) ___ _ __ ___
16  | |_| | / _ \| || '_ \ / _ \| '__| | |_ | | | || '_ \ / __|| __|| | / _ \ | '_ \ / __|
17  | _ || __/| || |_) || __/| | | _| | |_| || | | || (__ | |_ | || (_) || | | |\__ \
18  |_| |_| \___||_|| .__/ \___||_| |_| \__,_||_| |_| \___| \__||_| \___/ |_| |_||___/
19  _|
20 """
21 
22 
23 def calculate_transformation_kabsch(src_points, dst_points):
24  """
25  Calculates the optimal rigid transformation from src_points to
26  dst_points
27  (regarding the least squares error)
28 
29  Parameters:
30  -----------
31  src_points: array
32  (3,N) matrix
33  dst_points: array
34  (3,N) matrix
35 
36  Returns:
37  -----------
38  rotation_matrix: array
39  (3,3) matrix
40 
41  translation_vector: array
42  (3,1) matrix
43 
44  rmsd_value: float
45 
46  """
47  assert src_points.shape == dst_points.shape
48  if src_points.shape[0] != 3:
49  raise Exception("The input data matrix had to be transposed in order to compute transformation.")
50 
51  src_points = src_points.transpose()
52  dst_points = dst_points.transpose()
53 
54  src_points_centered = src_points - rmsd.centroid(src_points)
55  dst_points_centered = dst_points - rmsd.centroid(dst_points)
56 
57  rotation_matrix = rmsd.kabsch(src_points_centered, dst_points_centered)
58  rmsd_value = rmsd.kabsch_rmsd(src_points_centered, dst_points_centered)
59 
60  translation_vector = rmsd.centroid(dst_points) - np.matmul(rmsd.centroid(src_points), rotation_matrix)
61 
62  return rotation_matrix.transpose(), translation_vector.transpose(), rmsd_value
63 
64 
65 
66 """
67  __ __ _ ____ _ _
68  | \/ | __ _ (_) _ __ / ___| ___ _ __ | |_ ___ _ __ | |_
69  | |\/| | / _` || || '_ \ | | / _ \ | '_ \ | __|/ _ \| '_ \ | __|
70  | | | || (_| || || | | | | |___| (_) || | | || |_| __/| | | || |_
71  |_| |_| \__,_||_||_| |_| \____|\___/ |_| |_| \__|\___||_| |_| \__|
72 
73 """
74 
76  def __init__(self, rotation_matrix, translation_vector):
77  self.pose_mat = np.zeros((4,4))
78  self.pose_mat[:3,:3] = rotation_matrix
79  self.pose_mat[:3,3] = translation_vector.flatten()
80  self.pose_mat[3,3] = 1
81 
82  def apply_transformation(self, points):
83  """
84  Applies the transformation to the pointcloud
85 
86  Parameters:
87  -----------
88  points : array
89  (3, N) matrix where N is the number of points
90 
91  Returns:
92  ----------
93  points_transformed : array
94  (3, N) transformed matrix
95  """
96  assert(points.shape[0] == 3)
97  n = points.shape[1]
98  points_ = np.vstack((points, np.ones((1,n))))
99  points_trans_ = np.matmul(self.pose_mat, points_)
100  points_transformed = np.true_divide(points_trans_[:3,:], points_trans_[[-1], :])
101  return points_transformed
102 
103  def inverse(self):
104  """
105  Computes the inverse transformation and returns a new Transformation object
106 
107  Returns:
108  -----------
109  inverse: Transformation
110 
111  """
112  rotation_matrix = self.pose_mat[:3,:3]
113  translation_vector = self.pose_mat[:3,3]
114 
115  rot = np.transpose(rotation_matrix)
116  trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)
117  return Transformation(rot, trans)
118 
119 
120 
122 
123  def __init__(self, frames, intrinsic, chessboard_params):
124  assert(len(chessboard_params) == 3)
125  self.frames = frames
126  self.intrinsic = intrinsic
127  self.chessboard_params = chessboard_params
128 
130  """
131  Searches the chessboard corners in the infrared images of
132  every connected device and uses the information in the
133  corresponding depth image to calculate the 3d
134  coordinates of the chessboard corners in the coordinate system of
135  the camera
136 
137  Returns:
138  -----------
139  corners3D : dict
140  keys: str
141  Serial number of the device
142  values: [success, points3D, validDepths]
143  success: bool
144  Indicates wether the operation was successfull
145  points3d: array
146  (3,N) matrix with the coordinates of the chessboard corners
147  in the coordinate system of the camera. N is the number of corners
148  in the chessboard. May contain points with invalid depth values
149  validDephts: [bool]*
150  Sequence with length N indicating which point in points3D has a valid depth value
151  """
152  corners3D = {}
153  for (info, frameset) in self.frames.items():
154  serial = info[0]
155  product_line = info[1]
156  depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
157  infrared_frame = frameset[(rs.stream.infrared, 1)]
158  depth_intrinsics = self.intrinsic[serial][rs.stream.depth]
159  found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
160  corners3D[serial] = [found_corners, None, None, None]
161  if found_corners:
162  points3D = np.zeros((3, len(points2D[0])))
163  validPoints = [False] * len(points2D[0])
164  for index in range(len(points2D[0])):
165  corner = points2D[:,index].flatten()
166  depth = get_depth_at_pixel(depth_frame, corner[0], corner[1])
167  if depth != 0 and depth is not None:
168  validPoints[index] = True
169  [X,Y,Z] = convert_depth_pixel_to_metric_coordinate(depth, corner[0], corner[1], depth_intrinsics)
170  points3D[0, index] = X
171  points3D[1, index] = Y
172  points3D[2, index] = Z
173  corners3D[serial] = found_corners, points2D, points3D, validPoints
174  return corners3D
175 
176 
178  """
179  Calculates the extrinsic calibration from the coordinate space of the camera to the
180  coordinate space spanned by a chessboard by retrieving the 3d coordinates of the
181  chessboard with the depth information and subsequently using the kabsch algortihm
182  for finding the optimal rigid transformation between the two coordinate spaces
183 
184  Returns:
185  -----------
186  retval : dict
187  keys: str
188  Serial number of the device
189  values: [success, transformation, points2D, rmsd]
190  success: bool
191  transformation: Transformation
192  Rigid transformation from the coordinate system of the camera to
193  the coordinate system of the chessboard
194  points2D: array
195  [2,N] array of the chessboard corners used for pose_estimation
196  rmsd:
197  Root mean square deviation between the observed chessboard corners and
198  the corners in the local coordinate system after transformation
199  """
200  corners3D = self.get_chessboard_corners_in3d()
201  retval = {}
202  for (serial, [found_corners, points2D, points3D, validPoints] ) in corners3D.items():
203  objectpoints = get_chessboard_points_3D(self.chessboard_params)
204  retval[serial] = [False, None, None, None]
205  if found_corners == True:
206  #initial vectors are just for correct dimension
207  valid_object_points = objectpoints[:,validPoints]
208  valid_observed_object_points = points3D[:,validPoints]
209 
210  #check for sufficient points
211  if valid_object_points.shape[1] < 5:
212  print("Not enough points have a valid depth for calculating the transformation")
213 
214  else:
215  [rotation_matrix, translation_vector, rmsd_value] = calculate_transformation_kabsch(valid_object_points, valid_observed_object_points)
216  retval[serial] =[True, Transformation(rotation_matrix, translation_vector), points2D, rmsd_value]
217  print("RMS error for calibration with device number", serial, "is :", rmsd_value, "m")
218  return retval
219 
220 
222  boundary = {}
223 
224  for (info, frameset) in self.frames.items():
225  serial = info[0]
226  product_line = info[1]
227  depth_frame = post_process_depth_frame(frameset[rs.stream.depth])
228  infrared_frame = frameset[(rs.stream.infrared, 1)]
229  found_corners, points2D = cv_find_chessboard(depth_frame, infrared_frame, self.chessboard_params)
230  boundary[serial] = [np.floor(np.amin(points2D[0,:])).astype(int), np.floor(np.amax(points2D[0,:])).astype(int), np.floor(np.amin(points2D[1,:])).astype(int), np.floor(np.amax(points2D[1,:])).astype(int)]
231 
232  return boundary
calibration_kabsch.calculate_transformation_kabsch
def calculate_transformation_kabsch(src_points, dst_points)
Definition: calibration_kabsch.py:23
helper_functions.cv_find_chessboard
def cv_find_chessboard(depth_frame, infrared_frame, chessboard_params)
Definition: helper_functions.py:73
calibration_kabsch.PoseEstimation.intrinsic
intrinsic
Definition: calibration_kabsch.py:126
calibration_kabsch.PoseEstimation.__init__
def __init__(self, frames, intrinsic, chessboard_params)
Definition: calibration_kabsch.py:123
calibration_kabsch.Transformation.apply_transformation
def apply_transformation(self, points)
Definition: calibration_kabsch.py:82
calibration_kabsch.PoseEstimation.get_chessboard_corners_in3d
def get_chessboard_corners_in3d(self)
Definition: calibration_kabsch.py:129
calibration_kabsch.PoseEstimation.frames
frames
Definition: calibration_kabsch.py:125
helper_functions.get_chessboard_points_3D
def get_chessboard_points_3D(chessboard_params)
Definition: helper_functions.py:54
calibration_kabsch.Transformation.__init__
def __init__(self, rotation_matrix, translation_vector)
Definition: calibration_kabsch.py:76
calibration_kabsch.PoseEstimation.chessboard_params
chessboard_params
Definition: calibration_kabsch.py:127
calibration_kabsch.PoseEstimation.perform_pose_estimation
def perform_pose_estimation(self)
Definition: calibration_kabsch.py:177
realsense_device_manager.post_process_depth_frame
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20)
Definition: realsense_device_manager.py:53
calibration_kabsch.PoseEstimation
Definition: calibration_kabsch.py:121
calibration_kabsch.Transformation.pose_mat
pose_mat
Definition: calibration_kabsch.py:77
helper_functions.get_depth_at_pixel
def get_depth_at_pixel(depth_frame, pixel_x, pixel_y)
Definition: helper_functions.py:99
calibration_kabsch.Transformation
Definition: calibration_kabsch.py:75
realdds::print
std::string print(dds_guid const &guid, dds_guid_prefix const &base_prefix, bool readable_name)
Definition: dds-guid.cpp:75
calibration_kabsch.PoseEstimation.find_chessboard_boundary_for_depth_image
def find_chessboard_boundary_for_depth_image(self)
Definition: calibration_kabsch.py:221
assert
#define assert(condition)
Definition: lz4.c:245
calibration_kabsch.Transformation.inverse
def inverse(self)
Definition: calibration_kabsch.py:103
helper_functions.convert_depth_pixel_to_metric_coordinate
def convert_depth_pixel_to_metric_coordinate(depth, pixel_x, pixel_y, camera_intrinsics)
Definition: helper_functions.py:121


librealsense2
Author(s): LibRealSense ROS Team
autogenerated on Mon Apr 22 2024 02:12:55