44 from numpy
import matrix, reshape, array, zeros, diag, real
46 import roslib; roslib.load_manifest(
'calibration_estimation')
49 from sensor_msgs.msg
import JointState
53 Tool used to generate a list of CameraChain sensors from a single calibration sample message
62 - M_robot: A calibration sample of type calibration_msgs/RobotMeasurement
64 - sensors: A list of CameraChainSensor objects that corresponds to all camera chain sensors found
65 in the calibration message that was passed in.
69 if cur_config[
"sensor_id"]
in [ x.camera_id
for x
in M_robot.M_cam ]:
70 if "chain_id" in cur_config.keys()
and cur_config[
"chain_id"] !=
None and cur_config[
"chain_id"] !=
"NONE":
71 if cur_config[
"chain_id"]
in [ x.chain_id
for x
in M_robot.M_chain ] :
72 M_cam = M_robot.M_cam [ [ x.camera_id
for x
in M_robot.M_cam ].index(cur_config[
"sensor_id"])]
73 M_chain = M_robot.M_chain[ [ x.chain_id
for x
in M_robot.M_chain ].index(cur_config[
"chain_id"]) ]
74 if cur_config[
"frame_id"] != M_cam.cam_info.header.frame_id:
75 rospy.logwarn(
'WARNING frame_id of cam_info.header.frame_id({}) from bag file'.format(M_cam.cam_info.header.frame_id))
76 rospy.logwarn(
' and sensors.rectified_cams.{}.frame_id({}) of system.yaml differ'.format(cur_config[
"chain_id"],cur_config[
"frame_id"]))
80 M_cam = M_robot.M_cam [ [ x.camera_id
for x
in M_robot.M_cam ].index(cur_config[
"sensor_id"])]
82 cur_config[
"chain_id"] =
None
84 sensors.append(cur_sensor)
86 rospy.logdebug(
" Didn't find block")
90 def __init__(self, config_dict, M_cam, M_chain):
92 Generates a single sensor block for a single configuration
94 - config_dict: The configuration for this specific sensor. This looks exactly like
95 a single element from the valid_configs list passed into CameraChainBundler.__init__
96 - M_cam: The camera measurement of type calibration_msgs/CameraMeasurement
97 - M_chain: The chain measurement of type calibration_msgs/ChainMeasurement
109 if "baseline_rgbd" in config_dict.keys():
114 On each optimization cycle the set of system parameters that we're optimizing over will change. Thus,
115 after each change in parameters we must call update_config to ensure that our calculated residual reflects
116 the newest set of system parameters.
124 Computes the measurement residual for the current set of system parameters and target points
126 - target_pts: 4XN matrix, storing features point locations in world cartesian homogenous coordinates.
128 - r: terms_per_sample*N long vector, storing pixel residuals for the target points in the form of
129 [u1, v1, u2, v2, ..., uN, vN] or [u1, v1, u'1, u2....]
133 r = array(reshape(h_mat - z_mat, [-1,1]))[:,0]
138 Computes the residual, and then scales it by sqrt(Gamma), where Gamma
139 is the information matrix for this measurement (Cov^-1).
143 r_scaled = gamma_sqrt * matrix(r).T
144 return array(r_scaled.T)[0]
148 Calculates the square root of the information matrix for the measurement of the
149 current set of system parameters at the passed in set of target points.
153 gamma = matrix(zeros(cov.shape))
156 for k
in range(num_pts):
159 sub_cov = matrix(cov[first:last, first:last])
160 sub_gamma_sqrt_full = matrix(scipy.linalg.sqrtm(sub_cov.I))
161 sub_gamma_sqrt = real(sub_gamma_sqrt_full)
162 assert(scipy.linalg.norm(sub_gamma_sqrt_full - sub_gamma_sqrt) < 1e-6)
163 gamma[first:last, first:last] = sub_gamma_sqrt
171 Get the target's pixel coordinates as measured by the actual sensor
174 return numpy.matrix([[pt.x, pt.y]
for pt
in self.
_M_cam.image_points])
176 return numpy.matrix([[pt.x, pt.y, self.
_camera.get_disparity(self.
_M_cam.cam_info.P, pt.z)]
for pt
in self.
_M_cam.image_points])
180 Compute the expected pixel coordinates for a set of target points.
181 target_pts: 4xN matrix, storing feature points of the target, in homogeneous coords
182 Returns: target points projected into pixel coordinates, in a Nx2 matrix
190 Compute what measurement we would expect to see, based on the current system parameters
191 and the specified target point locations.
194 - chain_state: The joint angles of this sensor's chain of type sensor_msgs/JointState.
195 - target_pts: 4XN matrix, storing features point locations in world cartesian homogenous coordinates.
197 Returns: target points projected into pixel coordinates, in a Nx2 matrix
200 camera_pose_root = self.
_full_chain.calc_block.fk(chain_state)
201 cam_frame_pts = camera_pose_root.I * target_pts
203 pixel_pts = self.
_camera.project(self.
_M_cam.cam_info.P, cam_frame_pts)
208 The output Jacobian J shows how moving target_pts in cartesian space affects
209 the expected measurement in (u,v) camera coordinates.
210 For n points in target_pts, J is a 2nx3n matrix
211 Note: This doesn't seem to be used anywhere, except maybe in some drawing code
214 N = len(self.
_M_cam.image_points)
219 x = target_pts[:,k].copy()
224 sub_Jt[i,:] = array((fTest - f0) / epsilon)
231 Computes the measurement covariance in pixel coordinates for the given
232 set of target points (target_pts)
234 - target_pts: 4xN matrix, storing N feature points of the target, in homogeneous coords
254 num_joints = len(self.
_M_chain.chain_state.position)
258 x.position = self.
_M_chain.chain_state.position[:]
262 for i
in range(num_joints):
263 x.position = [cur_pos
for cur_pos
in self.
_M_chain.chain_state.position]
264 x.position[i] += epsilon
266 Jt[i] = (fTest - f0)/epsilon
267 cov_angles = [x*x
for x
in self.
_full_chain.calc_block._chain._cov_dict[
'joint_angles']]
270 return matrix(Jt).T * matrix(diag(cov_angles)) * matrix(Jt)
274 Build a dictionary that defines which parameters will in fact affect this measurement.
276 Returned dictionary should be of the following form:
278 my_transformA: [1, 1, 1, 1, 1, 1]
279 my_transformB: [1, 1, 1, 1, 1, 1]
282 gearing: [1, 1, ---, 1]
291 sparsity[
'rectified_cams'] = {}
292 sparsity[
'rectified_cams'][self.
sensor_id] = dict( [(x,1)
for x
in self.
_camera.get_param_names()] )