00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 from __future__ import with_statement
00015 __author__ = 'Rosen Diankov'
00016 __copyright__ = 'Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)'
00017 __license__ = 'Apache License, Version 2.0'
00018
00019 import roslib; roslib.load_manifest('openrave_calibration')
00020 import time, threading, pickle
00021 import numpy
00022 from numpy import *
00023 from optparse import OptionParser
00024 from openravepy import *
00025 from openravepy.interfaces import BaseManipulation
00026
00027 import rospy
00028 from roslib import rostime
00029 import message_filters
00030 import std_msgs.msg
00031 import sensor_msgs.msg
00032
00033 import cv
00034 from cv_bridge import CvBridge, CvBridgeError
00035
00036 from scipy.optimize import leastsq
00037
00038 from itertools import izip
00039
00040 class CalibrateRobotCamera(metaclass.AutoReloader):
00041 class CalibrationError(Exception):
00042 def __init__(self,value,action=None):
00043 self.value=value
00044 self.action=action
00045 def __str__(self):
00046 return repr(self.value)+'; action='+str(self.action)
00047
00048 def __init__(self,image_raw='image_raw',pattern=None,KK=None,kc=None,timeout=4.0):
00049 self.image_raw=image_raw
00050 self.bridge = CvBridge()
00051 self.pattern=pattern
00052 px=self.pattern['corners_x']*self.pattern['spacing_x']
00053 py=self.pattern['corners_y']*self.pattern['spacing_y']
00054 self.pattern['type'] = """<KinBody name="calibration">
00055 <Body name="calibration">
00056 <Geom type="box">
00057 <extents>%f %f 0.001</extents>
00058 <translation>%f %f 0</translation>
00059 <diffusecolor>0 0.5 0</diffusecolor>
00060 </Geom>
00061 <Geom type="box">
00062 <extents>%f %f 0.002</extents>
00063 <translation>%f %f 0</translation>
00064 <diffusecolor>0 1 0</diffusecolor>
00065 </Geom>
00066 </Body>
00067 </KinBody>
00068 """%(px*0.5+2.0*self.pattern['spacing_x'],py*0.5+2.0*self.pattern['spacing_y'],px*0.5,py*0.5,px*0.5,py*0.5,px*0.5,py*0.5)
00069 self.obstaclexml = """<KinBody name="obstacle">
00070 <Body name="obstacle">
00071 <Geom type="box">
00072 <extents>%f %f 0.001</extents>
00073 <translation>0 0 0.001</translation>
00074 <diffusecolor>1 0 0</diffusecolor>
00075 </Geom>
00076 </Body>
00077 </KinBody>"""%(px+0.1,py+0.1)
00078 self.timeout=timeout
00079 self.cvKK = None if KK is None else cv.fromarray(KK)
00080 self.cvkc = None if kc is None else cv.fromarray(kc)
00081
00082 def detect(self, cvimage):
00083 corners_x = self.pattern['corners_x']
00084 corners_y = self.pattern['corners_y']
00085 found, corners = cv.FindChessboardCorners(cvimage, (corners_x, corners_y), cv.CV_CALIB_CB_ADAPTIVE_THRESH)
00086 if found:
00087 board_corners = (corners[0], corners[corners_x - 1], corners[(corners_y - 1) * corners_x], corners[len(corners) - 1])
00088
00089 perimeter = 0.0
00090 for i in range(len(board_corners)):
00091 next = (i + 1) % 4
00092 xdiff = board_corners[i][0] - board_corners[next][0]
00093 ydiff = board_corners[i][1] - board_corners[next][1]
00094 perimeter += math.sqrt(xdiff * xdiff + ydiff * ydiff)
00095
00096 square_size = perimeter / ((corners_x - 1 + corners_y - 1) * 2)
00097 radius = int(square_size * 0.5 + 0.5)
00098 corners = array(cv.FindCornerSubPix(cvimage, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 30, 0.01)),float64)
00099 return corners
00100 else:
00101 return None
00102
00103 def getObjectPoints(self):
00104 object_points=zeros((self.pattern['corners_x']*self.pattern['corners_y'],3),float64)
00105 X,Y = meshgrid(arange(self.pattern['corners_x'])*self.pattern['spacing_x'],arange(self.pattern['corners_y'])*self.pattern['spacing_y'])
00106 object_points[:,0] = X.flatten()
00107 object_points[:,1] = Y.flatten()
00108 return object_points
00109
00110 def calibrateIntrinsicCamera(self,all_object_points,all_corners,imagesize,usenonlinoptim=True,fixprincipalpoint=False,computegradients=False):
00111 pointCounts = vstack([len(object_points) for object_points in all_object_points])
00112 cvKK = cv.CreateMat(3,3,cv.CV_64F)
00113 cvkc = cv.CreateMat(5,1,cv.CV_64F)
00114 cvrvecs = cv.CreateMat(len(pointCounts),3,cv.CV_64F)
00115 cvtvecs = cv.CreateMat(len(pointCounts),3,cv.CV_64F)
00116 flags = cv.CV_CALIB_FIX_PRINCIPAL_POINT if fixprincipalpoint else 0
00117 cv.CalibrateCamera2(cv.fromarray(vstack(all_object_points)),cv.fromarray(vstack(all_corners)),cv.fromarray(pointCounts), (1024,768),cvKK,cvkc,cvrvecs,cvtvecs,flags)
00118 rvecs = array(cvrvecs)
00119 tvecs = array(cvtvecs)
00120 KK = array(cvKK)
00121 kc = array(cvkc)
00122 Ts = []
00123 for i in range(len(pointCounts)):
00124 T = matrixFromAxisAngle(rvecs[i])
00125 T[0:3,3] = tvecs[i]
00126 Ts.append(T)
00127 error = None
00128 if usenonlinoptim:
00129
00130 x0 = r_[KK[0,0],KK[1,1]]
00131 if not fixprincipalpoint:
00132 x0 = r_[x0,KK[0,2],KK[1,2]]
00133 x0 = r_[x0,kc[:,0]]
00134 for i in range(len(pointCounts)):
00135 x0 = r_[x0,rvecs[i],tvecs[i]]
00136 N = pointCounts[0]
00137 cv_image_points = cv.CreateMat(N,2,cv.CV_64F)
00138 cv_object_points = [cv.fromarray(x) for x in all_object_points]
00139 def errorfn(x):
00140 xoff = 2
00141 cvKK[0,0] = x[0]; cvKK[1,1] = x[1];
00142 if not fixprincipalpoint:
00143 cvKK[0,2] = x[2]; cvKK[1,2] = x[3]
00144 xoff += 2
00145 for i in range(5):
00146 cvkc[i,0] = x[xoff+i]
00147 xoff += 5
00148 e = zeros(len(all_object_points)*N*2)
00149 off = 0
00150 for i in range(len(all_object_points)):
00151 for j in range(3):
00152 cvrvecs[0,j] = x[xoff+6*i+j]
00153 cvtvecs[0,j] = x[xoff+6*i+3+j]
00154 cv.ProjectPoints2(cv_object_points[i],cvrvecs[0],cvtvecs[0],cvKK,cvkc,cv_image_points)
00155 image_points = array(cv_image_points)
00156 e[off:(off+len(image_points))] = all_corners[i][:,0]-image_points[:,0]
00157 off += len(image_points)
00158 e[off:(off+len(image_points))] = all_corners[i][:,1]-image_points[:,1]
00159 off += len(image_points)
00160
00161 return e
00162
00163 x, success = leastsq(errorfn,x0,maxfev=100000,epsfcn=1e-7)
00164 if not success:
00165 raise CalibrationError('failed to converge to answer')
00166 e = errorfn(x)
00167 abse = sqrt(sum(e**2))
00168 e2=reshape(e,[len(all_object_points),2*N])**2
00169 error=mean(sqrt(e2[:,0:N]+e2[:,N:]),1)
00170 KK[0,0] = x[0]; KK[1,1] = x[1];
00171 xoff = 2
00172 if not fixprincipalpoint:
00173 KK[0,2] = x[2]; KK[1,2] = x[3]
00174 xoff += 2
00175 for i in range(5):
00176 kc[i,0] = x[xoff+i]
00177 if computegradients:
00178 deltas = r_[0.01*ones(2 if fixprincipalpoint else 4),0.0001*ones(5)]
00179 grad = []
00180 normalization = 1.0/(len(all_object_points)*N*2)
00181 for i,delta in enumerate(deltas):
00182 x[i] += delta
00183 e_p = errorfn(x)
00184 abse_p = sqrt(sum(e_p**2))
00185 x[i] -= 2*delta
00186 e_n = errorfn(x)
00187 abse_n = sqrt(sum(e_n**2))
00188 x[i] += delta
00189 grad.append(normalization*(abse_p+abse_n-2*abse)/(delta**2))
00190 return KK,kc,Ts,error,array(grad)
00191 return KK,kc,Ts,error
00192
00193 def validateCalibrationData(self,all_object_points,all_corners):
00194 """Builds up linear equations using method of Zhang 1999 "Flexible Camera Calibration By Viewing a Plane From Unknown Orientations". The condition number of the linear equations can tell us how good the data is. The object data all needs to lie on the Z=0 plane (actually any plane would probably do?)
00195 """
00196 cvH = cv.fromarray(eye(3))
00197 B = zeros((6,1))
00198 B[1,0] = 1
00199
00200 for corners,object_points in izip(all_corners,all_object_points):
00201 if not all(object_points[:,2]==0):
00202 raise ValueError('The object data all needs to lie on the Z=0 plane.')
00203
00204 cv.FindHomography(cv.fromarray(object_points[:,0:2]),cv.fromarray(corners),cvH,0)
00205 H = array(cvH)
00206 v = []
00207 for i,j in [(0,1),(0,0),(1,1)]:
00208 v.append([H[0,i]*H[0,j],H[0,i]*H[1,j]+H[1,i]*H[0,j],H[1,i]*H[1,j],H[2,i]*H[0,j]+H[0,i]*H[2,j],H[2,i]*H[1,j] + H[1,i]*H[2,j],H[2,i]*H[2,j]])
00209 B = c_[B,array(v[0]),array(v[1])-array(v[2])]
00210 U,s,Vh = linalg.svd(dot(B,transpose(B)))
00211 b = U[:,-1]
00212 A = array([[b[0],b[1],b[3]],[b[1],b[2],b[4]],[b[3],b[4],b[5]]])
00213
00214 Aeigs = linalg.eigvals(A)
00215 if not all(sign(Aeigs)>0) and not all(sign(Aeigs)<0):
00216 return None
00217
00218 print 'condition is: ',sqrt(s[-2]/s[-1])
00219 if False:
00220
00221 b = U[:,-1]
00222 v0 = (b[1]*b[3]-b[0]*b[4])/(b[0]*b[2]-b[1]*b[1])
00223 lm = b[5]-(b[3]*b[3]+v0*(b[1]*b[3]-b[0]*b[4]))/b[0]
00224 alpha = sqrt(lm/b[0])
00225 beta = sqrt(lm*b[0]/(b[0]*b[2]-b[1]*b[1]))
00226 c = -b[1]*alpha*alpha*beta/lm
00227 u0 = c*v0/alpha - b[3]*alpha*alpha/lm
00228 print alpha,beta,u0,v0
00229 return sqrt(s)/len(all_object_points)
00230
00231 def waitForRobot(self,robot):
00232
00233 lastvalues=robot.GetJointValues()
00234 print 'waiting for robot to stop...'
00235 while True:
00236 time.sleep(0.5)
00237 newvalues=robot.GetJointValues()
00238 if sqrt(sum((lastvalues-newvalues)**2)) < 0.001:
00239 break
00240 lastvalues=newvalues
00241 time.sleep(0.5)
00242
00243 def waitForPattern(self,timeout=None,waitforkey=False,robot=None):
00244
00245 if robot:
00246 self.waitForRobot(robot)
00247 if timeout is None:
00248 timeout = self.timeout
00249 timestart=rospy.get_rostime()
00250 donecond = threading.Condition()
00251 data = [None]
00252 def detcb(imagemsg):
00253 if imagemsg.header.stamp > timestart:
00254 cvimage = self.bridge.imgmsg_to_cv(imagemsg,'mono8')
00255 corners = self.detect(cvimage)
00256 if corners is not None:
00257 with donecond:
00258
00259 if self.cvKK is None or self.cvkc is None:
00260 KK,kc,Ts,error = self.calibrateIntrinsicCamera([self.getObjectPoints()],[corners],(cvimage.width,cvimage.height),usenonlinoptim=False)
00261 T = Ts[0]
00262 else:
00263 cvrvec = cv.CreateMat(1,3,cv.CV_64F)
00264 cvtvec = cv.CreateMat(1,3,cv.CV_64F)
00265 cv.FindExtrinsicCameraParams2(cv.fromarray(self.getObjectPoints()),cv.fromarray(corners),self.cvKK,self.cvkc,cvrvec,cvtvec)
00266 T = matrixFromAxisAngle(array(cvrvec)[0])
00267 T[0:3,3] = array(cvtvec)[0]
00268 data[0] = {'T':T, 'corners_raw':corners, 'image_raw':array(cvimage)}
00269 if 'type' in self.pattern:
00270 data[0]['type'] = self.pattern['type']
00271 donecond.notifyAll()
00272 image_sub = rospy.Subscriber(self.image_raw, sensor_msgs.msg.Image,detcb,queue_size=4)
00273 try:
00274 with donecond:
00275 donecond.wait(timeout)
00276 if data[0] is not None:
00277 print 'found pattern'
00278 finally:
00279 image_sub.unregister()
00280 if waitforkey:
00281 cmd=raw_input('press any key to continue (q-quit)... ')
00282 if cmd == 'q':
00283 raise KeyboardInterrupt()
00284 return data[0]
00285
00286 def waitForObjectDetectionMessage(self,timeout=None,waitforkey=False,robot=None):
00287
00288 if robot:
00289 self.waitForRobot(robot)
00290 if timeout is None:
00291 timeout = self.timeout
00292 timestart=rospy.get_rostime()
00293 donecond = threading.Condition()
00294 data = [None]
00295 def detcb(imagemsg, detmsg):
00296 if len(detmsg.objects) > 0 and detmsg.header.stamp > timestart:
00297 with donecond:
00298
00299 q = detmsg.objects[0].pose.orientation
00300 t = detmsg.objects[0].pose.position
00301 cvimage = self.bridge.imgmsg_to_cv(imagemsg)
00302 data[0] = {'T':matrixFromPose([q.w,q.x,q.y,q.z,t.x,t.y,t.z]), 'type':detmsg.objects[0].type, 'image':array(cvimage)}
00303 donecond.notifyAll()
00304 image_sub = message_filters.Subscriber(self.image_raw, sensor_msgs.msg.Image)
00305 det_sub = message_filters.Subscriber(self.objectdetection, posedetection_msgs.msg.ObjectDetection)
00306 ts = message_filters.TimeSynchronizer([image_sub, det_sub], 10)
00307 ts.registerCallback(detcb)
00308 try:
00309 with donecond:
00310 donecond.wait(timeout)
00311 finally:
00312 del ts
00313 if waitforkey:
00314 cmd=raw_input('press any key to continue (q-quit)... ')
00315 if cmd == 'q':
00316 raise KeyboardInterrupt()
00317 return data[0]
00318
00319 @staticmethod
00320 def moveRobot(robot,values):
00321 basemanip = BaseManipulation(robot)
00322 with robot:
00323 robot.SetActiveDOFs(arange(robot.GetDOF()))
00324 basemanip.MoveActiveJoints(values)
00325 while not robot.GetController().IsDone():
00326 time.sleep(0.01)
00327
00328 def gatherCalibrationData(self,robot,sensorname,waitforkey=False,forcecalibintrinsic=False,calibextrinsic=True,maxconedist=0.1,maxconeangle=0.6,sensorrobot=None,**kwargs):
00329 waitcond=lambda: self.waitForPattern(robot=robot,waitforkey=waitforkey)
00330 origvalues=None
00331 env=robot.GetEnv()
00332 obstacle = None
00333 observations = []
00334 try:
00335 if forcecalibintrinsic or (self.cvKK is None or self.cvkc is None):
00336 origvalues=robot.GetJointValues()
00337 averagedist=0.03
00338 angledelta=0.3
00339 while True:
00340 observations += examples.calibrationviews.CalibrationViews(robot=robot,sensorrobot=sensorrobot,sensorname=sensorname).computeAndMoveToObservations(waitcond=waitcond,usevisibility=False,angledelta=angledelta,maxconedist=maxconedist,maxconeangle=maxconeangle,averagedist=averagedist,**kwargs)
00341 pickle.dump(observations,open('observations_initial.pp','w'))
00342 try:
00343 KKorig,kcorig,Tcamera,info = self.calibrateFromObservations(observations,False,fixprincipalpoint=True)
00344 break
00345 except self.CalibrationError,e:
00346 print 'cannot compute stable KK, attempting more views',e
00347 self.moveRobot(robot,origvalues)
00348 averagedist = 0.02
00349 angledelta=0.2
00350 print 'num observations is %d: '%len(observations),KKorig,kcorig
00351 self.cvKK = cv.fromarray(KKorig)
00352 self.cvkc = cv.fromarray(kcorig)
00353 if not calibextrinsic:
00354 return KKorig,kcorig,None,{'observations':observations}
00355 if origvalues is not None:
00356 print 'moving robot back to original values'
00357 self.moveRobot(robot,origvalues)
00358
00359
00360 if self.obstaclexml is not None:
00361 data=waitcond()
00362 obstacle = env.ReadKinBodyXMLData(self.obstaclexml)
00363 env.AddKinBody(obstacle,True)
00364 obstacle.SetTransform(dot(robot.GetSensor(sensorname).GetTransform(),data['T']))
00365 newobservations,target = examples.calibrationviews.CalibrationViews.gatherCalibrationData(robot=robot,sensorrobot=sensorrobot,sensorname=sensorname,waitcond=waitcond,**kwargs)
00366 observations += newobservations
00367 KKorig,kcorig,Tcamera,info=self.calibrateFromObservations(observations,Tcameraestimate=array(robot.GetSensor(sensorname).GetRelativeTransform(),float64))
00368 info['observations'] = observations
00369 return KKorig,kcorig,Tcamera,info
00370 finally:
00371 if obstacle is not None:
00372 env.RemoveKinBody(obstacle)
00373
00374 def calibrateFromObservations(self,observations,calibextrinsic=True,pruneoutliers=True,full_output=True,fixprincipalpoint=False,Tcameraestimate=None):
00375 while True:
00376 if len(observations) < 3:
00377 raise self.CalibrationError('very little observations: %d!'%len(observations),action=1)
00378 object_points=self.getObjectPoints()
00379 all_object_points = []
00380 all_corners = []
00381 imagesize = None
00382 for o in observations:
00383 all_object_points.append(object_points)
00384 all_corners.append(o['corners_raw'])
00385 if imagesize:
00386 assert imagesize[0]==o['image_raw'].shape[1] and imagesize[1]==o['image_raw'].shape[0]
00387 else:
00388 imagesize = (o['image_raw'].shape[1],o['image_raw'].shape[0])
00389 intrinsiccondition = self.validateCalibrationData(all_object_points,all_corners)
00390 if intrinsiccondition is None:
00391 raise self.CalibrationError('bad condition number, %d observations'%(len(observations)),action=1)
00392 KKorig,kcorig,Traws,error_intrinsic,error_grad = self.calibrateIntrinsicCamera(all_object_points,all_corners,imagesize,fixprincipalpoint=fixprincipalpoint,computegradients=True)
00393 cvKKorig = cv.fromarray(KKorig)
00394 cvkcorig = cv.fromarray(kcorig)
00395 thresh=median(error_intrinsic)+0.5
00396 if any(error_intrinsic>thresh):
00397
00398 print 'pruning observations (thresh=%f) because intrinsic error is: '%thresh,error_intrinsic
00399 observations = [o for i,o in enumerate(observations) if error_intrinsic[i] <= thresh]
00400 else:
00401 if mean(error_intrinsic) > 0.6:
00402 raise self.CalibrationError('intrinsic error is huge (%s)! giving up, KK=(%s)'%(str(error_intrinsic),str(KKorig)))
00403 break
00404 if not calibextrinsic:
00405 return KKorig,kcorig,None,{'error_intrinsic':error_intrinsic,'intrinsiccondition':intrinsiccondition,'error_grad':error_grad}
00406 if Tcameraestimate is None:
00407 raise TypeError('Tcameraestimate needs to be initialized to a 4x4 matrix')
00408
00409 cvKK = cv.CreateMat(3,3,cv.CV_64F)
00410 cv.GetOptimalNewCameraMatrix(cvKKorig,cvkcorig,imagesize,0,cvKK)
00411 cvUndistortionMapX = cv.CreateMat(imagesize[1],imagesize[0],cv.CV_32F)
00412 cvUndistortionMapY = cv.CreateMat(imagesize[1],imagesize[0],cv.CV_32F)
00413 cv.InitUndistortRectifyMap(cvKKorig,cvkcorig,cv.fromarray(eye(3)),cvKK,cvUndistortionMapX, cvUndistortionMapY)
00414 KK = array(cvKK)
00415 KKinv = linalg.inv(KK)
00416 if full_output:
00417 print 'KKorig: ',KKorig,' KK:',KK
00418 cvimage = None
00419 cvkczero = cv.fromarray(zeros(kcorig.shape))
00420 cvrvec = cv.CreateMat(1,3,cv.CV_64F)
00421 cvtvec = cv.CreateMat(1,3,cv.CV_64F)
00422 all_optimization_data = []
00423 Tworldpatternestimates = []
00424 for i,o in enumerate(observations):
00425 cvimage_dist = cv.fromarray(o['image_raw'])
00426 if not cvimage:
00427 cvimage = cv.CloneMat(cvimage_dist)
00428 cv.Remap(cvimage_dist,cvimage,cvUndistortionMapX,cvUndistortionMapY,cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS)
00429 corners = self.detect(cvimage)
00430 if corners is not None:
00431 cv.FindExtrinsicCameraParams2(cv.fromarray(object_points),cv.fromarray(corners),cvKK,cvkczero,cvrvec,cvtvec)
00432 T = matrixFromAxisAngle(array(cvrvec)[0])
00433 T[0:3,3] = array(cvtvec)[0]
00434 Tworldpatternestimates.append(dot(dot(o['Tlink'],Tcameraestimate),T))
00435 all_optimization_data.append((transformPoints(KKinv,corners),linalg.inv(o['Tlink'])))
00436 else:
00437 print 'could not detect original pattern ',i
00438
00439
00440
00441
00442
00443 corners3d = self.getObjectPoints()
00444 quatWorldPatternEstimates = array([quatFromRotationMatrix(T[0:3,0:3]) for T in Tworldpatternestimates])
00445 quatWorldPatternInitial,success = leastsq(lambda q: quatArrayTDist(q/sqrt(sum(q**2)),quatWorldPatternEstimates), quatWorldPatternEstimates[0],maxfev=100000,epsfcn=1e-6)
00446
00447 rWorldPatternInitial = zeros(6,float64)
00448 rWorldPatternInitial[0:3] = axisAngleFromQuat(quatWorldPatternInitial)
00449 rWorldPatternInitial[3:6] = mean(array([T[0:3,3] for T in Tworldpatternestimates]),0)
00450 Tcameraestimateinv = linalg.inv(Tcameraestimate)
00451 rCameraInitial = zeros(6,float64)
00452 rCameraInitial[0:3] = axisAngleFromRotationMatrix(Tcameraestimateinv[0:3,0:3])
00453 rCameraInitial[3:6] = Tcameraestimateinv[0:3,3]
00454 def errorfn(x,optimization_data):
00455 Tworldpattern = matrixFromAxisAngle(x[0:3])
00456 Tworldpattern[0:3,3] = x[3:6]
00457 Tcamerainv = matrixFromAxisAngle(x[6:9])
00458 Tcamerainv[0:3,3] = x[9:12]
00459 err = zeros(len(optimization_data)*len(corners3d)*2)
00460 off = 0
00461 for measuredcameracorners,Tlinkinv in optimization_data:
00462 cameracorners3d = transformPoints(dot(dot(Tcamerainv,Tlinkinv),Tworldpattern),corners3d)
00463 iz=1.0/cameracorners3d[:,2]
00464 err[off:(off+len(corners3d))] = measuredcameracorners[:,0]-cameracorners3d[:,0]*iz
00465 off += len(corners3d)
00466 err[off:(off+len(corners3d))] = measuredcameracorners[:,1]-cameracorners3d[:,1]*iz
00467 off += len(corners3d)
00468 if full_output:
00469 print 'rms: ',sqrt(sum(err**2))
00470 return err
00471
00472 optimization_data=all_optimization_data
00473 xorig, cov_x, infodict, mesg, iter = leastsq(lambda x: errorfn(x,all_optimization_data),r_[rWorldPatternInitial,rCameraInitial],maxfev=100000,epsfcn=1e-6,full_output=1)
00474 if pruneoutliers:
00475
00476 errors=reshape(errorfn(xorig,all_optimization_data)**2,(len(all_optimization_data),len(corners3d)*2))
00477 errorreprojection=mean(sqrt(KK[0,0]**2*errors[:,0:len(corners3d)]+KK[1,1]**2*errors[:,len(corners3d):]),1)
00478
00479 thresh=median(errorreprojection)+20
00480 print 'thresh:',thresh,'errors:',errorreprojection
00481 optimization_data = [all_optimization_data[i] for i in flatnonzero(errorreprojection<=thresh)]
00482 x, cov_x, infodict, mesg, iter = leastsq(lambda x: errorfn(x,optimization_data),xorig,maxfev=100000,epsfcn=1e-6,full_output=1)
00483 else:
00484 x=xorig
00485 Tcamerainv = matrixFromAxisAngle(x[6:9])
00486 Tcamerainv[0:3,3] = x[9:12]
00487 Tcamera = linalg.inv(Tcamerainv)
00488 Tworldpattern = matrixFromAxisAngle(x[0:3])
00489 Tworldpattern[0:3,3] = x[3:6]
00490 points3d = self.Compute3DObservationPoints(Tcamerainv,optimization_data)
00491
00492 if full_output:
00493 errors=reshape(errorfn(x,optimization_data)**2,(len(optimization_data),len(corners3d)*2))
00494 error_reprojection=sqrt(KK[0,0]**2*errors[:,0:len(corners3d)]+KK[1,1]**2*errors[:,len(corners3d):]).flatten()
00495 print 'final reprojection error (pixels): ',mean(error_reprojection),std(error_reprojection)
00496 error_3d = sqrt(sum( (transformPoints(Tworldpattern,corners3d)-points3d)**2, 1))
00497 print '3d error: ',mean(error_3d),std(error_3d)
00498 return KKorig,kcorig,Tcamera,{'error_reprojection':error_reprojection,'error_3d':error_3d,'error_intrinsic':error_intrinsic,'intrinsiccondition':intrinsiccondition,'error_grad':error_grad,'KK':array(cvKK)}
00499 return KKorig,kcorig,Tcamera,None
00500
00501 @staticmethod
00502 def Compute3DObservationPoints(Tcamerainv,optimization_data):
00503 Ps = [dot(Tcamerainv,Tlinkinv) for measuredcameracorners,Tlinkinv in optimization_data]
00504 points = zeros((len(optimization_data[0][0]),3))
00505 for i in range(len(points)):
00506 points[i] = CalibrateRobotCamera.Compute3DPoint(Ps,[measuredcameracorners[i] for measuredcameracorners,Tlinkinv in optimization_data])
00507 return points
00508
00509 @staticmethod
00510 def Compute3DPoint(Ps,imagepoints):
00511 """Computes the 3D point from image correspondences"""
00512 A = zeros((2*len(imagepoints),3))
00513 b = zeros(2*len(imagepoints))
00514 for i in range(len(Ps)):
00515 A[2*i] = Ps[i][0,0:3]-imagepoints[i][0]*Ps[i][2,0:3]
00516 A[2*i+1] = Ps[i][1,0:3]-imagepoints[i][1]*Ps[i][2,0:3]
00517 b[2*i] = imagepoints[i][0]*Ps[i][2,3]-Ps[i][0,3]
00518 b[2*i+1] = imagepoints[i][1]*Ps[i][2,3]-Ps[i][1,3]
00519 x,residues,rank,s = linalg.lstsq(A,b)
00520 return x
00521
00522 @staticmethod
00523 def symbolicderivatives():
00524 """symbolic derivatives using sympy, for now too complicated to use... quaternion representation is simpler though..."""
00525 axis0 = [Symbol('rx0'),Symbol('ry0'),Symbol('rz0'),Symbol('a0')]
00526 axis1 = [Symbol('rx1'),Symbol('ry1'),Symbol('rz1'),Symbol('a1')]
00527 Ts = [eye(4),eye(4),eye(4)]
00528 Ts[0][0:3,0:3]=ikfast.IKFastSolver.rodrigues(Matrix(3,1,axis0[0:3]),axis0[3])
00529 for i in range(3):
00530 for j in range(3):
00531 Ts[1][i,j] = Symbol('r%d%d'%(i,j))
00532 Ts[2][0:3,0:3]=ikfast.IKFastSolver.rodrigues(Matrix(3,1,axis1[0:3]),axis1[3])
00533 quats = [[Symbol('q%dx'%i),Symbol('q%dy'%i), Symbol('q%dz'%i), Symbol('q%dw'%i)] for i in range(3)]
00534 translations = [Matrix(3,1,[Symbol('t%dx'%i),Symbol('t%dy'%i), Symbol('t%dz'%i)]) for i in range(3)]
00535 X = Matrix(3,1,[Symbol('x'),Symbol('y'),Symbol('z')])
00536 Xq = X
00537 for i in range(3):
00538 q = quats[i]
00539 xx = q[1] * q[1]
00540 xy = q[1] * q[2]
00541 xz = q[1] * q[3]
00542 xw = q[1] * q[0]
00543 yy = q[2] * q[2]
00544 yz = q[2] * q[3]
00545 yw = q[2] * q[0]
00546 zz = q[3] * q[3]
00547 zw = q[3] * q[0]
00548 Xq = translations[i]+Matrix(3,1,[(0.5-yy-zz)*Xq[0]+(xy-zw)*Xq[1]+(xz+yw)*Xq[2],(xy+zw)*Xq[0]+(0.5-xx-zz)*Xq[1]+(yz-xw)*Xq[2],(xz-yw)*Xq[0]+(yz+xw)*Xq[1]+(0.5-xx-yy)*Xq[2]])
00549 T = Ts[0]*Ts[1]*Ts[2]
00550 Xfinal=T[0:3,0:3]*X+T[0:3,3]
00551 cornerx=Xfinal[0]/Xfinal[2]
00552 cornery=Xfinal[1]/Xfinal[2]
00553 [diff(cornerx,axis0[i]) for i in range(i)] + [diff(cornerx,axis1[i]) for i in range(i)]
00554
00555 if __name__ == "__main__":
00556 parser = OptionParser(description='Views a calibration pattern from multiple locations.')
00557 parser.add_option('--scene',action="store",type='string',dest='scene',default='data/pa10calib.env.xml',
00558 help='Scene file to load (default=%default)')
00559 parser.add_option('--sensorname',action="store",type='string',dest='sensorname',default='wristcam',
00560 help='Name of the sensor whose views to generate (default=%default)')
00561 (options, args) = parser.parse_args()
00562
00563 rospy.init_node('calibraterobotcamera')
00564 env = Environment()
00565 try:
00566 env.SetViewer('qtcoin')
00567 env.Load(options.scene)
00568 env.UpdatePublishedBodies()
00569 time.sleep(0.1)
00570 self = CalibrateRobotCamera()
00571 self.gatherCalibrationData(env.GetRobots()[0],sensorname=options.sensorname)
00572 finally:
00573 env.Destroy()
00574
00575 def test():
00576 import calibraterobotcamera
00577 rospy.init_node('calibraterobotcamera',disable_signals=False)
00578 env = Environment()
00579 env.SetViewer('qtcoin')
00580 env.Load('scenes/pa10lab.env.xml')
00581 robot = env.GetRobots()[0]
00582 sensorname='wristcam'
00583 self = calibraterobotcamera.CalibrateRobotCamera()
00584 self.gatherCalibrationData(robot,sensorname=sensorname)