$search
00001 #! /usr/bin/env python 00002 00003 import sys, time, optparse 00004 import itertools 00005 import collections 00006 from numpy import * 00007 import random 00008 00009 import roslib 00010 roslib.load_manifest('camera_pose_calibration') 00011 import PyKDL 00012 from tf_conversions import posemath 00013 00014 from geometry_msgs.msg import Point32 00015 from calibration_msgs.msg import ImagePoint, CalibrationPattern 00016 from camera_pose_calibration.msg import CalibrationEstimate, CameraPose, RobotMeasurement, CameraMeasurement 00017 00018 from camera_pose_calibration import init_optimization_prior 00019 from camera_pose_calibration import estimate 00020 00021 00022 00023 # checkerboard points in checkerboard frame 00024 check_points = [] 00025 for x in range(0, 2): 00026 for y in range(-1, 1): 00027 check_points.append(PyKDL.Vector(x, y, 1)) 00028 00029 00030 # generate camera's and targets 00031 cal_estimate = CalibrationEstimate() 00032 00033 camera_a = CameraPose() 00034 camera_a.camera_id = 'cam_a' 00035 #camera_a.pose = posemath.toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(0, pi/2.0, 0), PyKDL.Vector(0, 0 ,0))) 00036 camera_a.pose = posemath.toMsg(PyKDL.Frame(PyKDL.Vector(0, 0 ,0))) 00037 00038 camera_b = CameraPose() 00039 camera_b.camera_id = 'cam_b' 00040 #camera_b.pose = posemath.toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(0,pi/2.0,0), PyKDL.Vector(0, -1, 0))) 00041 camera_b.pose = posemath.toMsg(PyKDL.Frame(PyKDL.Vector(0, -1, 0))) 00042 00043 #target_1 = posemath.toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(0, pi/7.0, 0), PyKDL.Vector(1, 0, 0))) 00044 target_1 = posemath.toMsg(PyKDL.Frame(PyKDL.Vector(0, -2, 1))) 00045 #target_2 = posemath.toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(0, pi/3.0, 0), PyKDL.Vector(2, 0, 0))) 00046 target_2 = posemath.toMsg(PyKDL.Frame(PyKDL.Vector(0, 2, 1))) 00047 00048 cal_estimate.cameras = [camera_a, camera_b] 00049 cal_estimate.targets = [target_1, target_2] 00050 print cal_estimate 00051 00052 00053 # generate samples 00054 noise = 5.0 00055 offset = PyKDL.Frame(PyKDL.Rotation.RPY(0.1, 0.1, 0), PyKDL.Vector(0, 0.1, 0)) 00056 00057 #P = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0] 00058 P = [525, 0, 319.5, 0, 0, 525, 239.5, 0, 0, 0, 1, 0] 00059 P_mat = reshape( matrix(P, float), (3,4) ) 00060 00061 cal_pattern = CalibrationPattern() 00062 for c in check_points: 00063 pnt = Point32() 00064 pnt.x = c[0] 00065 pnt.y = c[1] 00066 pnt.z = c[2] 00067 cal_pattern.object_points.append(pnt) 00068 00069 cal_samples = [] 00070 for target in cal_estimate.targets: 00071 cal_sample = RobotMeasurement() 00072 for camera in cal_estimate.cameras: 00073 meas = CameraMeasurement() 00074 meas.camera_id = camera.camera_id 00075 for pnt_c in check_points: 00076 pnt_msg = posemath.fromMsg(camera.pose).Inverse() * posemath.fromMsg(target) * pnt_c 00077 pnt_mat = P_mat * matrix([pnt_msg[0], pnt_msg[1], pnt_msg[2], 1]).T 00078 pnt = ImagePoint() 00079 pnt.x = (pnt_mat[0]/pnt_mat[2]) + random.random()*noise 00080 pnt.y = (pnt_mat[1]/pnt_mat[2]) + random.random()*noise 00081 pnt.d = 1 00082 meas.image_points.append(pnt) 00083 meas.cam_info.P = P 00084 meas.features = cal_pattern 00085 cal_sample.M_cam.append(meas) 00086 cal_samples.append(cal_sample) 00087 print cal_samples 00088 00089 00090 # add offset 00091 cal_estimate.cameras[0].pose = posemath.toMsg(posemath.fromMsg(cal_estimate.cameras[0].pose) * offset) 00092 #cal_estimate.cameras[1].pose = posemath.toMsg(posemath.fromMsg(cal_estimate.cameras[1].pose) * PyKDL.Frame(offset)) 00093 00094 # Run optimization 00095 estimate.enhance(cal_samples, cal_estimate) 00096 00097 #residual, J = estimate.calculate_residual_and_jacobian(cal_samples, cal_estimate) 00098 00099 #set_printoptions(linewidth=300, precision=5, suppress=True) 00100 #print "Jacobian:\n", J 00101 #print "Residual:\n", reshape(residual, [-1,2]) 00102 00103 #step = linalg.pinv(J)*residual 00104 #print "Unscaled Step:\n", reshape(step, [-1, 6]).T 00105 #print "RMS: ", estimate.rms(residual) 00106 #next_estimate = estimate.oplus(cal_estimate, step) 00107 #print "Next Estimate:\n", next_estimate