PointPoseExtraction.py
Go to the documentation of this file.
00001 #!/usr/bin/env python
00002 #
00003 # Copyright (C) 2009-2010 Rosen Diankov
00004 # 
00005 # Licensed under the Apache License, Version 2.0 (the "License");
00006 # you may not use this file except in compliance with the License.
00007 # You may obtain a copy of the License at
00008 #     http://www.apache.org/licenses/LICENSE-2.0
00009 # 
00010 # Unless required by applicable law or agreed to in writing, software
00011 # distributed under the License is distributed on an "AS IS" BASIS,
00012 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00013 # See the License for the specific language governing permissions and
00014 # limitations under the License. 
00015 from __future__ import with_statement # for python 2.5
00016 __author__ = 'Rosen Diankov'
00017 __copyright__ = 'Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)'
00018 __license__ = 'Apache License, Version 2.0'
00019 
00020 PKG = 'posedetectiondb' # this package name
00021 import roslib; roslib.load_manifest(PKG) 
00022 import os, sys, time, threading, struct, pickle
00023 from optparse import OptionParser
00024 import numpy, scipy # nice to be able to explicitly call some functions
00025 from numpy import *
00026 
00027 from openravepy import *
00028 from openravepy import pyANN
00029 from openravepy.misc import *
00030 
00031 import cv
00032 from cv_bridge import CvBridge, CvBridgeError
00033 
00034 import rospy
00035 from roslib import rostime
00036 import std_msgs.msg
00037 import posedetection_msgs.msg
00038 import posedetection_msgs.srv
00039 from posedetectiondb.srv import *
00040 
00041 class detection_error(Exception):
00042     def __init__(self,parameter=''):
00043         self.parameter = parameter
00044     def __str__(self):
00045         return 'detection error: '+repr(self.parameter)
00046 
00047 class PointPoseExtractor(metaclass.AutoReloader):
00048     """Core 2D/3D correspondence algorithm"""
00049     def __init__(self,type,points3d,descriptors):
00050         if len(descriptors) < 1:
00051             raise detection_error('no descriptor found')
00052         self.type = type
00053         self.points3d = points3d
00054         self.desckdtree = pyANN.KDTree(descriptors)
00055         self.besterr_thresh = 0.001
00056         self.cvKK = cv.fromarray(eye(3))
00057         self.cvDist = cv.fromarray(zeros((4,1)))
00058         self.rvec = cv.CreateMat(3,1,cv.CV_32FC1)
00059         self.tvec = cv.CreateMat(3,1,cv.CV_32FC1)
00060         self.ninitial = 4
00061         self.object_points = cv.CreateMatHeader(3,self.ninitial,cv.CV_32FC1)
00062         self.image_points = cv.CreateMatHeader(2,self.ninitial,cv.CV_32FC1)
00063 
00064     def extractpose(self,KK,positions,descriptors,conf,imagewidth,verboselevel=1,neighthresh=None,thresh=None,dminexpected=None,ransaciters=None):
00065         starttime = time.time()
00066         neighs,dists = self.desckdtree.kSearchArray(descriptors,2,0.0001)
00067         if neighthresh is None:
00068             neighthresh = 0.8-imagewidth*0.3/1024.0
00069         goodinds = flatnonzero(dists[:,0] < neighthresh**2 * dists[:,1])
00070         if len(goodinds) < 20:
00071             raise detection_error('not enough good indices')
00072         iKK = linalg.inv(KK)
00073         points2d = dot(positions[goodinds],transpose(iKK[0:2,0:2])) + tile(iKK[0:2,2],(len(goodinds),1))
00074         data = c_[points2d,self.points3d[neighs[goodinds,0],:]]#,conf[goodinds])
00075         if dminexpected is None:
00076             dminexpected = min(10+int(imagewidth*40.0/1024.0),len(goodinds)/2)
00077         if thresh is None:
00078             thresh = ((imagewidth/1024.0)*15.0/KK[0,0])
00079         if ransaciters is None:
00080             ransaciters = 1000
00081         if verboselevel > 0:
00082             print 'thresh=%f,neighthresh=%f,dmin=%d,ransaciters=%d,goodinds=%d'%(thresh,neighthresh,dminexpected,ransaciters,len(goodinds))
00083         T,infodict = self.ransac(data=data,model=self,n=self.ninitial,k=ransaciters,t=thresh,d=dminexpected,return_all=True,besterr_thresh=self.besterr_thresh)
00084         inliers = infodict['inliers']
00085         pts = transformPoints(T,data[inliers,2:5])
00086         iz = 1/pts[:,2]
00087         projerror = mean(sqrt((data[inliers,0]-pts[:,0]*iz)**2 + (data[inliers,1]-pts[:,1]*iz)**2))
00088         if verboselevel>0:
00089             print 'extract time: %fs, err: %f, inliers: %d'%(time.time()-starttime,projerror,len(inliers))
00090         return T, projerror
00091 
00092     def fit(self,data):
00093         m = mean(data,0)
00094         diff = data-tile(m,(data.shape[0],1))
00095         area0 = abs(linalg.det(dot(transpose(diff[:,0:2]),diff[:,0:2])))
00096         if area0 <0.00001: # check if point area is large enough
00097             #print 'degenerate 2d data %f'%area0
00098             return None
00099         # have to compute if the 3d points are collinear or not
00100         eigvalues = linalg.eigvalsh(dot(transpose(diff[:,2:5]),diff[:,2:5]))
00101         if sum(abs(eigvalues)<=1e-9) >= 2: # check if point area is large enough
00102             #print 'degenerate 3d points',eigvalues
00103             return None
00104         if data.shape[0] == self.ninitial:
00105             object_points = self.object_points
00106             image_points = self.image_points
00107         else:
00108             object_points = cv.CreateMatHeader(3,data.shape[0],cv.CV_32FC1)
00109             image_points = cv.CreateMatHeader(2,data.shape[0],cv.CV_32FC1)
00110         cv.SetData(object_points,struct.pack('f'*(data.shape[0]*3),*transpose(data[:,2:5]).flat),4*data.shape[0])
00111         cv.SetData(image_points,struct.pack('f'*(data.shape[0]*2),*transpose(data[:,0:2]).flat),4*data.shape[0])
00112         cv.FindExtrinsicCameraParams2(object_points,image_points,self.cvKK,self.cvDist,self.rvec,self.tvec)
00113         #cv.FindExtrinsicCameraParams2(cv.fromarray(data[:,2:5]),cv.fromarray(data[:,0:2]),self.cvKK,self.cvDist,self.rvec,self.tvec)
00114         T = matrixFromAxisAngle((self.rvec[0,0],self.rvec[1,0],self.rvec[2,0]))
00115         T[0:3,3] = [self.tvec[0,0],self.tvec[1,0],self.tvec[2,0]]
00116         # make sure that texture faces towards the image (ie, z axis has negative z component)
00117         if T[2,2] < 0:
00118             return None
00119         return T
00120     def get_error(self,data,T):
00121         if T is None:
00122             return inf
00123         pts = transformPoints(T,data[:,2:5])
00124         iz = 1/pts[:,2]
00125         return sqrt((data[:,0]-pts[:,0]*iz)**2 + (data[:,1]-pts[:,1]*iz)**2)
00126     def ransac(self,data,model,n,k,t,d,debug=False,return_all=False,besterr_thresh=None):
00127         """fit model parameters to data using the RANSAC algorithm
00128 
00129         This implementation written from pseudocode found at
00130         http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182
00131 
00132         {{{
00133         Given:
00134             data - a set of observed data points
00135             model - a model that can be fitted to data points
00136             n - the minimum number of data values required to fit the model
00137             k - the maximum number of iterations allowed in the algorithm
00138             t - a threshold value for determining when a data point fits a model
00139             d - the number of close data values required to assert that a model fits well to data
00140         Return:
00141             bestfit - model parameters which best fit the data (or nil if no good model is found)
00142         iterations = 0
00143         bestfit = nil
00144         besterr = something really large
00145         while iterations < k {
00146             maybeinliers = n randomly selected values from data
00147             maybemodel = model parameters fitted to maybeinliers
00148             alsoinliers = empty set
00149             for every point in data not in maybeinliers {
00150                 if point fits maybemodel with an error smaller than t
00151                      add point to alsoinliers
00152             }
00153             if the number of elements in alsoinliers is > d {
00154                 % this implies that we may have found a good model
00155                 % now test how good it is
00156                 bettermodel = model parameters fitted to all points in maybeinliers and alsoinliers
00157                 thiserr = a measure of how well model fits these points
00158                 if thiserr < besterr {
00159                     bestfit = bettermodel
00160                     besterr = thiserr
00161                 }
00162             }
00163             increment iterations
00164         }
00165         return bestfit
00166         }}}
00167 
00168          Copyright (c) 2004-2007, Andrew D. Straw. All rights reserved.
00169 
00170          Redistribution and use in source and binary forms, with or without
00171          modification, are permitted provided that the following conditions are
00172          met:
00173              * Redistributions of source code must retain the above copyright
00174                notice, this list of conditions and the following disclaimer.
00175 
00176              * Redistributions in binary form must reproduce the above
00177                copyright notice, this list of conditions and the following
00178                disclaimer in the documentation and/or other materials provided
00179                with the distribution.
00180 
00181              * Neither the name of the Andrew D. Straw nor the names of its
00182                contributors may be used to endorse or promote products derived
00183                from this software without specific prior written permission.
00184         """
00185         iterations = 0
00186         bestfit = None
00187         besterr = numpy.inf
00188         best_inlier_idxs = None
00189         while iterations < k:
00190             maybe_idxs, test_idxs = self.random_partition(n,data.shape[0])
00191             maybeinliers = data[maybe_idxs,:]
00192             test_points = data[test_idxs]
00193             maybemodel = model.fit(maybeinliers)
00194             if maybemodel is None:
00195                 iterations+=1
00196                 continue
00197             test_err = model.get_error( test_points, maybemodel)
00198             also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
00199             if debug:
00200                 print 'test_err.min()',test_err.min()
00201                 print 'test_err.max()',test_err.max()
00202                 print 'numpy.mean(test_err)',numpy.mean(test_err)
00203                 print 'iteration %d:len(alsoinliers) = %d'%(
00204                     iterations,len(also_idxs))
00205             if len(also_idxs) > d:
00206                 alsoinliers = data[also_idxs,:]
00207                 betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
00208                 bettermodel = model.fit(betterdata)
00209                 better_errs = model.get_error( betterdata, bettermodel)
00210                 thiserr = numpy.mean( better_errs )
00211                 if thiserr < besterr:
00212                     bestfit = bettermodel
00213                     besterr = thiserr
00214                     best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
00215                     if besterr_thresh is not None and besterr < besterr_thresh:
00216                         break
00217             iterations+=1
00218         if bestfit is None:
00219             raise detection_error("did not meet fit acceptance criteria")
00220         if return_all:
00221             return bestfit, {'inliers':best_inlier_idxs}
00222         else:
00223             return bestfit
00224 
00225     def random_partition(self,n,n_data):
00226         """return n random rows of data (and also the other len(data)-n rows)"""
00227         all_idxs = numpy.arange( n_data )
00228         numpy.random.shuffle(all_idxs)
00229         idxs1 = all_idxs[:n]
00230         idxs2 = all_idxs[n:]
00231         return idxs1, idxs2
00232 
00233 class ROSPlanarPoseProcessor(metaclass.AutoReloader):
00234     """Connects to ros feature0d message, extracts the pose, and publishes it"""
00235     def __init__(self,type=None,templateppfilename=None,errorthresh=None,neighthresh=None,thresh=None,dminexpected=None,Tright=eye(4),showgui=False,verboselevel=1,ransaciters=None):
00236         self.verboselevel=verboselevel
00237         self.Tright = Tright
00238         self.type=type
00239         self.pe = None
00240         self.boundingbox=None
00241         self.cv_image=None
00242         self.pe = None
00243         self.templateppfilename = templateppfilename
00244         self.featuretype = None
00245         if self.templateppfilename is not None:
00246             try:
00247                 template = pickle.load(open(templateppfilename,'r'))
00248                 self.type=template['type']
00249                 self.Itemplate = template.get('Itemplate',None)
00250                 self.boundingbox = template.get('boundingbox',None)
00251                 self.Tright = template.get('Tright',eye(4))
00252                 self.featuretype = template.get('featuretype',None)
00253                 self.pe = PointPoseExtractor(type=self.type,points3d=template['points3d'],descriptors=template['descriptors'])
00254             except IOError,e:
00255                 print 'failed to create template: ',e
00256         if self.templateppfilename is None:
00257             self.templateppfilename = 'template.pp'
00258         self.errorthresh = errorthresh
00259         self.neighthresh=neighthresh
00260         self.thresh=thresh
00261         self.dminexpected=dminexpected
00262         self.ransaciters=ransaciters
00263         self.imagepoints = []
00264         self.extractionlck = threading.Lock()
00265         self.bridge = CvBridge()
00266         self.cvwindow = None
00267         self.cvwindowstarted=False
00268         self.trimesh = None
00269         if showgui:
00270             # get the trimesh from openrave
00271             env = Environment()
00272             try:
00273                 if self.type is not None:
00274                     if self.type.startswith('package://'):
00275                         packagesplit = self.type[10:].find('/')
00276                         filename = roslib.packages.get_pkg_dir(self.type[10:(10+packagesplit)])+'/'+self.type[(10+packagesplit+1):]
00277                     else:
00278                         filename = self.type
00279                     env = Environment()
00280                     body = env.ReadKinBodyXMLFile(filename)
00281                     env.AddKinBody(body)
00282                     self.trimesh = env.Triangulate(body)
00283             except openrave_exception,e:
00284                 print 'cannot create trimesh for %s: '%self.type,e
00285             finally:
00286                 env.Destroy()
00287             self.cvwindow = 'ImageDisplay (L-add point,R-reset)'
00288         self.pub_objdet = rospy.Publisher('ObjectDetection', posedetection_msgs.msg.ObjectDetection)
00289         self.sub_feature = rospy.Subscriber('ImageFeature0D', posedetection_msgs.msg.ImageFeature0D,self.featureimagecb, queue_size=1)
00290         #self.srv_detection = rospy.Service('Detect',posedetection_msgs.srv.Detect,self.detect)
00291         self.srv_settemplate = rospy.Service('SetTemplate',SetTemplate,self.SetTemplateFn)
00292 
00293 
00294     def __del__(self):
00295         print 'deleting gui'
00296         try:
00297             self.sub_feature.unregister()
00298         except:
00299             pass
00300         try:
00301             self.pub_pose.unregister()
00302         except:
00303             pass
00304     def cvmousecb(self,event,x,y,flags,param):
00305         self.pe=None
00306         if event == cv.CV_EVENT_LBUTTONUP:
00307             #with self.extractionlck:
00308             print 'add correspondence',x,y
00309             self.imagepoints.append(array((x,y),float))
00310         elif event == cv.CV_EVENT_RBUTTONUP:
00311             #with self.extractionlck:
00312             print 'reset'
00313             self.imagepoints = []
00314             self.pe=None
00315     def drawpart(self,cv_image,T,KK):
00316         if self.trimesh is not None:
00317             vertices=self.trimesh.vertices
00318             indices=self.trimesh.indices
00319         elif self.boundingbox is not None:
00320             vertices,indices=ComputeBoxMesh(0.5*(self.boundingbox[1,:]-self.boundingbox[0,:]))
00321             vertices += tile(0.5*(self.boundingbox[1,:]+self.boundingbox[0,:]),(len(vertices),1))
00322         else:
00323             return
00324         N = vertices.shape[0]
00325         pts = dot(transformPoints(T,vertices),transpose(KK))
00326         imagepts = pts[0:N,0:2]/reshape(repeat(pts[0:N,2],2),[N,2])
00327         cvimagepts = [tuple(p) for p in array(imagepts,int)]
00328         for tri in indices:
00329             cv.Line(cv_image,cvimagepts[tri[0]],cvimagepts[tri[1]],(255,255,255),thickness=1)
00330             cv.Line(cv_image,cvimagepts[tri[1]],cvimagepts[tri[2]],(255,255,255),thickness=1)
00331             cv.Line(cv_image,cvimagepts[tri[2]],cvimagepts[tri[0]],(255,255,255),thickness=1)
00332     def drawcoordsys(self,cv_image,T,KK):
00333         points3d = array(((0,0,0),(0.05,0,0),(0,0.05,0),(0,0,0.05)))
00334         projpts = dot(transformPoints(T,points3d),transpose(KK))
00335         x = array(projpts[:,0]/projpts[:,2],int)
00336         y = array(projpts[:,1]/projpts[:,2],int)
00337         cv.Line(cv_image,(x[0],y[0]),(x[1],y[1]),(0,0,255),thickness=3)
00338         cv.Line(cv_image,(x[0],y[0]),(x[2],y[2]),(0,255,0),thickness=3)
00339         cv.Line(cv_image,(x[0],y[0]),(x[3],y[3]),(255,0,0),thickness=3)
00340 
00341     def SetTemplateFn(self,req):
00342         with self.extractionlck:
00343             try:
00344                 cv_image = self.bridge.imgmsg_to_cv(req.image, "bgr8")
00345                 res=rospy.ServiceProxy('Feature0DDetect',posedetection_msgs.srv.Feature0DDetect)(image=req.image)
00346                 N = len(res.features.positions)/2
00347                 positions = reshape(res.features.positions,(N,2))
00348                 descriptors = reshape(res.features.descriptors,(N,res.features.descriptor_dim))
00349                 points3d=c_[positions[:,0]*req.dimx/req.image.width,positions[:,1]*req.dimy/req.image.height,zeros(len(positions))]
00350                 self.Itemplate = array(cv_image)
00351                 self.Itemplate[:,:,(0,2)] = self.Itemplate[:,:,(2,0)]
00352                 self.Tright = matrixFromPose([req.relativepose.orientation.w,req.relativepose.orientation.x,req.relativepose.orientation.y,req.relativepose.orientation.z,req.relativepose.position.x,req.relativepose.position.y,req.relativepose.position.z])
00353                 self.boundingbox=transformPoints(linalg.inv(self.Tright),array(((0,0,0),(req.dimx,req.dimy,0))))
00354                 self.type=req.type
00355                 template = {'type':self.type,'points3d':points3d,'descriptors':descriptors,'Itemplate':self.Itemplate,'boundingbox':self.boundingbox,'Tright':self.Tright,'featuretype':res.features.type}
00356                 try:
00357                     self.pe = PointPoseExtractor(type=self.type,points3d=points3d,descriptors=descriptors)
00358                 except detection_error,e:
00359                     print e
00360                     self.pe=None
00361                 if len(req.savefilename) > 0:
00362                     pickle.dump(template,open(req.savefilename,'w'))
00363             except (rospy.service.ServiceException,CvBridgeError),e:
00364                 print e
00365                 return None
00366         return SetTemplateResponse()
00367 
00368     def featureimagecb(self,featuremsg):
00369         # if not in GUI mode and no one is subscribing, don't do anything
00370         if self.cvwindow is None and self.pub_objdet.get_num_connections()==0:
00371             rospy.logdebug('PointPoseExtraction.py no connections, so ignoring image')
00372             return
00373         self.featuremsg=featuremsg
00374         with self.extractionlck:
00375             try:
00376                 cv_image = self.bridge.imgmsg_to_cv(featuremsg.image, "bgr8")
00377             except CvBridgeError, e:
00378                 print e
00379                 return
00380             # decompose P=KK*Tcamera
00381             P = reshape(array(featuremsg.info.P),(3,4))
00382             cvKK = cv.fromarray(eye(3))
00383             cvRcamera = cv.fromarray(eye(3))
00384             cvTcamera = cv.fromarray(zeros((4,1)))
00385             cv.DecomposeProjectionMatrix(cv.fromarray(P),cvKK,cvRcamera,cvTcamera)
00386             KK = array(cvKK)
00387             Tcamera = eye(4)
00388             Tcamera[0:3,:] = c_[array(cvRcamera),-dot(array(cvRcamera),array(cvTcamera)[0:3]/cvTcamera[3,0])]
00389             print "Tcamera: ",Tcamera
00390             if self.pe is None:
00391                 if len(self.imagepoints) >= 4:
00392                     xaxis = self.imagepoints[1]-self.imagepoints[0]
00393                     yaxis = self.imagepoints[2]-self.imagepoints[1]
00394                     if xaxis[0]*yaxis[1]-xaxis[1]*yaxis[0] < 0:
00395                         print 'point order is not correct! need to specify points in clockwise order'
00396                         self.imagepoints=[]
00397                         return
00398                     # find the homography, warp the image, and get new features
00399                     width = int(sqrt(max(sum((self.imagepoints[1]-self.imagepoints[0])**2),sum((self.imagepoints[3]-self.imagepoints[2])**2))))
00400                     height = int(sqrt(max(sum((self.imagepoints[2]-self.imagepoints[1])**2),sum((self.imagepoints[3]-self.imagepoints[0])**2))))
00401                     cvimagepoints = cv.fromarray(array(self.imagepoints))
00402                     cvtexturepoints = cv.fromarray(array(((0,0),(width,0),(width,height),(0,height)),float))
00403                     cv_texture = cv.CreateMat(height,width,cv_image.type)
00404                     cv_texture2 = cv.CreateMat(height/2,width/2,cv_image.type)
00405                     cvH = cv.fromarray(eye(3))
00406                     cv.FindHomography(cvimagepoints,cvtexturepoints,cvH,0)
00407                     cv.WarpPerspective(cv_image,cv_texture,cvH)
00408                     try:
00409                         res=rospy.ServiceProxy('Feature0DDetect',posedetection_msgs.srv.Feature0DDetect)(image=self.bridge.cv_to_imgmsg(cv_texture))
00410                         N = len(res.features.positions)/2
00411                         positions = reshape(res.features.positions,(N,2))
00412                         descriptors = reshape(res.features.descriptors,(N,res.features.descriptor_dim))
00413                         texturedims_s = raw_input('creating template '+self.templateppfilename + ' enter the texture dimensions (2 values): ')
00414                         texturedims=[float(f) for f in texturedims_s.split()]
00415                         points3d=c_[positions[:,0]*texturedims[0]/width,positions[:,1]*texturedims[1]/height,zeros(len(positions))]
00416                         self.Itemplate = array(cv_texture)
00417                         self.Itemplate[:,:,(0,2)] = self.Itemplate[:,:,(2,0)]
00418                         self.boundingbox=transformPoints(linalg.inv(self.Tright),array(((0,0,0),(texturedims[0],texturedims[1],0))))
00419                         template = {'type':self.type,'points3d':points3d,'descriptors':descriptors,'Itemplate':self.Itemplate,'boundingbox':self.boundingbox,'Tright':self.Tright,'featuretype':featuremsg.features.type}
00420                         pickle.dump(template,open(self.templateppfilename,'w'))
00421                         scipy.misc.pilutil.imshow(self.Itemplate)
00422                         self.pe = PointPoseExtractor(type=self.type,points3d=points3d,descriptors=descriptors)
00423                         self.imagepoints = []
00424                     except rospy.service.ServiceException,e:
00425                         print e
00426                     return
00427             else:
00428                 success = False
00429                 projerror=-1.0
00430                 try:
00431                     if self.featuretype is not None and self.featuretype != featuremsg.features.type:
00432                         rospy.logwarn('feature types do not match: %s!=%s'%(self.featuretype,featuremsg.features.type))
00433                         raise detection_error()
00434                     starttime = time.time()
00435                     N = len(featuremsg.features.positions)/2
00436                     positions = reshape(featuremsg.features.positions,(N,2))
00437                     descriptors = reshape(featuremsg.features.descriptors,(N,featuremsg.features.descriptor_dim))
00438                     Tlocal,projerror = self.pe.extractpose(KK,positions,descriptors,featuremsg.features.confidences,cv_image.width,verboselevel=self.verboselevel,neighthresh=self.neighthresh,thresh=self.thresh,dminexpected=self.dminexpected,ransaciters=self.ransaciters)
00439                     success = projerror < self.errorthresh
00440                     if success: # publish if error is low enough
00441                         Tlocalobject = dot(Tlocal,self.Tright)
00442                         Tglobal = dot(linalg.inv(Tcamera),Tlocalobject)
00443                         poseglobal = poseFromMatrix(Tglobal)
00444                         pose = posedetection_msgs.msg.Object6DPose()
00445                         pose.type = self.type
00446                         pose.pose.orientation.w = poseglobal[0]
00447                         pose.pose.orientation.x = poseglobal[1]
00448                         pose.pose.orientation.y = poseglobal[2]
00449                         pose.pose.orientation.z = poseglobal[3]
00450                         pose.pose.position.x = poseglobal[4]
00451                         pose.pose.position.y = poseglobal[5]
00452                         pose.pose.position.z = poseglobal[6]
00453                         objdetmsg = posedetection_msgs.msg.ObjectDetection()
00454                         objdetmsg.objects=[pose]
00455                         objdetmsg.header = featuremsg.image.header
00456                         print 'local texture: ',repr(Tlocal)
00457                         self.pub_objdet.publish(objdetmsg)
00458                         self.drawpart(cv_image,Tlocalobject,KK)
00459                         self.drawcoordsys(cv_image,Tlocalobject,KK)
00460                 except detection_error,e:
00461                     pass
00462                 if self.verboselevel > 0:
00463                     rospy.loginfo('%s: %s, detection time: %fs, projerror %f < %f'%(self.type,'success' if success else 'failure',time.time()-starttime,projerror,self.errorthresh))
00464             if self.cvwindow is not None:
00465                 if not self.cvwindowstarted:
00466                     cv.NamedWindow(self.cvwindow, cv.CV_WINDOW_AUTOSIZE)
00467                     cv.SetMouseCallback(self.cvwindow,self.cvmousecb)
00468                     self.cvwindowwstarted=True
00469                 for x,y in self.imagepoints:
00470                     cv.Circle(cv_image,(int(x),int(y)),2,(0,0,255),2)
00471                 cv.ShowImage(self.cvwindow,cv_image)
00472                 char=cv.WaitKey(20)
00473 
00474 def CreateTemplateFn(type='',imagefilename='',Tright=eye(4),object_width=100,object_height=100):
00475     cv_texture = cv.LoadImageM(imagefilename)
00476     [width,height] = cv.GetSize(cv_texture)
00477 
00478     print 'image:name=%s, width=%d hegit=%d, object:width=%f hegith=%f'%(imagefilename,width,height,object_width,object_height)
00479 
00480     res=rospy.ServiceProxy('Feature0DDetect',posedetection_msgs.srv.Feature0DDetect)(image=CvBridge().cv_to_imgmsg(cv_texture))
00481     N = len(res.features.positions)/2
00482     positions = reshape(res.features.positions,(N,2))
00483     descriptors = reshape(res.features.descriptors,(N,res.features.descriptor_dim))
00484 
00485     templateppfilename = os.path.dirname(imagefilename)+'/template_'+os.path.basename(imagefilename)+'.pp'
00486     texturedims=[object_width,object_height]
00487     points3d=c_[positions[:,0]*texturedims[0]/width,positions[:,1]*texturedims[1]/height,zeros(len(positions))]
00488     Itemplate = array(cv_texture)
00489     Itemplate[:,:,(0,2)] = Itemplate[:,:,(2,0)]
00490     boundingbox=transformPoints(linalg.inv(Tright),array(((0,0,0),(texturedims[0],texturedims[1],0))))
00491     template = {'type':type,'points3d':points3d,'descriptors':descriptors,'Itemplate':Itemplate,'boundingbox':boundingbox,'Tright':Tright,'featuretype':res.features.type}
00492     pickle.dump(template,open(templateppfilename,'w'))
00493     #scipy.misc.pilutil.imshow(Itemplate)
00494 
00495 if __name__=='__main__':
00496     rospy.init_node('PointPoseExtract')
00497     parser = OptionParser(description='Connect to ROS and publish 6D poses if the template is discovered in the incoming images. Extracts poses from a set of point features. In order to use with 0D features, first need to create a template model and pickle it.')
00498     parser.add_option('--hidegui', action="store_false",dest='showgui',default=True,
00499                       help='Shows the image with the detected object')
00500     parser.add_option('-q','--quiet',action='store_true', dest='quiet',default=False,
00501                       help="If set will not print extraction output and times.")
00502     parser.add_option('--template', action='store', type='string', dest='template',default=None,
00503                       help='Python pickle file for description of template to detect. Requires a template variable with fieldnames type,points3d,Itemplate,descriptors')
00504     parser.add_option('--errorthresh',action='store', type='float', dest='errorthresh',default=0.004,
00505                       help='Threshold on the error of the pose to allow for publishing')
00506     parser.add_option('--neighthresh',action='store', type='float', dest='neighthresh',default=0.8,
00507                       help='Threshold on the descriptor vectors')
00508     parser.add_option('--thresh',action='store', type='float', dest='thresh',default=None,
00509                       help='The reprojection threshold when choosing inliers in ransac')
00510     parser.add_option('--dminexpected',action='store', type='int', dest='dminexpected',default=10,
00511                       help='Minimum number of features to match before outputting a pose')
00512     parser.add_option('--ransaciters',action='store', type='int', dest='ransaciters',default=200,
00513                       help='The number of iterations of ransac to perform')
00514     parser.add_option('--transform',action='store', type='string', dest='transform',default=None,
00515                       help="3x4 transformation (row-order row-major) of the template with respect to the object, used only for template creation")
00516     parser.add_option('--type', action='store', type='string', dest='type',default='',
00517                       help='The object type (filename for geometry), used only for template creation')
00518     parser.add_option('--imagefilename',
00519                       action="store",type='string',dest='imagefilename',default='',
00520                       help='Image file to create description of template')
00521     parser.add_option('--object-width',
00522                       action="store",type='float',dest='object_width',default=0.100,
00523                       help='Width of real object, used for creating template')
00524     parser.add_option('--object-height',
00525                       action="store",type='float',dest='object_height',default=0.100,
00526                       help='Height of real object, used for creating template')
00527     (options, args) = parser.parse_args()
00528     T = eye(4)
00529     if options.transform:
00530         T[0:3,0:4] = reshape([float(s) for s in options.transform.split()],(3,4))
00531     if options.imagefilename:
00532         CreateTemplateFn(type=options.type,imagefilename=options.imagefilename,Tright=T,object_width=options.object_width,object_height=options.object_height)
00533         sys.exit(0)
00534     try:
00535         processor = ROSPlanarPoseProcessor(templateppfilename=options.template,errorthresh=options.errorthresh,neighthresh=options.neighthresh,thresh=options.thresh,Tright=T,dminexpected=options.dminexpected,ransaciters=options.ransaciters,showgui=options.showgui,verboselevel=0 if options.quiet else 1,type=options.type)
00536         rospy.spin()
00537     except KeyboardInterrupt, e:
00538         pass
00539 
00540 def test():
00541     import PointPoseExtraction
00542     #self = PointPoseExtraction.ROSPlanarPoseProcessor(errorthresh=0.01,showgui=True)
00543     self = PointPoseExtraction.ROSPlanarPoseProcessor(showgui=True,dminexpected=20,neighthresh=0.85,errorthresh=0.005,templateppfilename='template_peachjuice0.pp',type='test')#scenes/cereal_frootloops.kinbody.xml')
00544 
00545     featuremsg=self.featuremsg
00546     cv_image = self.bridge.imgmsg_to_cv(featuremsg.image, "bgr8")
00547     newimage=cv.CloneMat(cv_image)
00548     ret=cv.PyrMeanShiftFiltering(cv_image,newimage,5,30)
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Properties Friends


posedetectiondb
Author(s): Rosen Diankov (rdiankov@cs.cmu.edu)
autogenerated on Sat Mar 23 2013 16:43:26