nao_vision.py
Go to the documentation of this file.
00001 #!/usr/bin/env python
00002 
00003 #
00004 # ROS node to serve NAOqi Vision functionalities
00005 # This code is currently compatible to NaoQI version 1.6
00006 #
00007 # Copyright 2014 Manos Tsardoulias, CERTH/ITI
00008 # http://www.ros.org/wiki/nao
00009 #
00010 # Redistribution and use in source and binary forms, with or without
00011 # modification, are permitted provided that the following conditions are met:
00012 #
00013 #    # Redistributions of source code must retain the above copyright
00014 #       notice, this list of conditions and the following disclaimer.
00015 #    # Redistributions in binary form must reproduce the above copyright
00016 #       notice, this list of conditions and the following disclaimer in the
00017 #       documentation and/or other materials provided with the distribution.
00018 #    # Neither the name of the CERTH/ITI nor the names of its
00019 #       contributors may be used to endorse or promote products derived from
00020 #       this software without specific prior written permission.
00021 #
00022 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00023 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00024 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00025 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
00026 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00027 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00028 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00029 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00030 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00031 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00032 # POSSIBILITY OF SUCH DAMAGE.
00033 #
00034 
00035 import rospy
00036 import naoqi
00037 
00038 from naoqi import ( 
00039     ALModule, 
00040     ALBroker, 
00041     ALProxy)
00042     
00043 from naoqi_driver.naoqi_node import NaoqiNode
00044 
00045 #~ ROS msgs
00046 from std_msgs.msg import (
00047     String, 
00048     Float32, 
00049     Int32)
00050     
00051 from std_srvs.srv import (
00052     EmptyResponse,
00053     Empty)
00054     
00055 from geometry_msgs.msg import (
00056     Point)
00057 
00058 #~ nao-ros msgs
00059 from nao_interaction_msgs.msg import (
00060     FaceDetected,
00061     MovementDetected,
00062     LandmarkDetected)
00063     
00064 from nao_interaction_msgs.srv import (
00065     LearnFace,
00066     LearnFaceResponse,
00067     VisionMotionSensitivity)
00068 
00069 class Constants:
00070     NODE_NAME = "nao_vision_interface"
00071 
00072 class NaoVisionInterface(ALModule, NaoqiNode):
00073     "sss"
00074     def __init__(self, moduleName):
00075         # ROS initialization
00076         NaoqiNode.__init__(self, Constants.NODE_NAME )
00077         
00078         # NAOQi initialization
00079         self.ip = ""
00080         self.port = 10601
00081         self.moduleName = moduleName
00082         self.init_almodule()
00083         
00084         #~ Variable initialization
00085         self.faces = FaceDetected()  
00086         self.face_detection_enabled = False
00087         self.motion_detection_enabled = False
00088         self.landmark_detection_enabled = False
00089              
00090         #~ ROS initializations
00091         self.subscribeFaceSrv = rospy.Service("nao_vision/face_detection/enable", Empty, self.serveSubscribeFaceSrv)
00092         self.unsubscribeFaceSrv = rospy.Service("nao_vision/face_detection/disable", Empty, self.serveUnsubscribeFaceSrv)
00093         self.subscribeMotionSrv = rospy.Service("nao_vision/motion_detection/enable", Empty, self.serveSubscribeMotionSrv)
00094         self.unsubscribeMotionSrv = rospy.Service("nao_vision/motion_detection/disable", Empty, self.serveUnsubscribeMotionSrv)
00095         self.subscribeLandmarkSrv = rospy.Service("nao_vision/landmark_detection/enable", Empty, self.serveSubscribeLandmarkSrv)
00096         self.unsubscribeLandmarkSrv = rospy.Service("nao_vision/landmark_detection/disable", Empty, self.serveUnsubscribeLandmarkSrv)
00097         self.learnFaceSrv = rospy.Service('nao_vision/face_detection/learn_face', LearnFace, self.learnFaceSrv)
00098         self.forgetPersonSrv = rospy.Service('nao_vision/face_detection/forget_person', LearnFace, self.forgetPersonSrv)
00099         
00100         self.subscribe()
00101         
00102         rospy.loginfo("nao_vision_interface initialized")
00103 
00104     def init_almodule(self):
00105         # before we can instantiate an ALModule, an ALBroker has to be created
00106         rospy.loginfo("Connecting to NaoQi at %s:%d", self.pip, self.pport)
00107         try:
00108             self.broker = ALBroker("%sBroker" % self.moduleName, self.ip, self.port, self.pip, self.pport)
00109         except RuntimeError,e:
00110             print("Could not connect to NaoQi's main broker")
00111             exit(1)
00112         ALModule.__init__(self, self.moduleName)
00113         
00114         self.memProxy = ALProxy("ALMemory",self.pip,self.pport)
00115         if self.memProxy is None:
00116             rospy.logerror("Could not get a proxy to ALMemory on %s:%d", self.pip, self.pport)
00117             exit(1)
00118             
00119         self.landmarkDetectionProxy = ALProxy("ALLandMarkDetection",self.pip,self.pport)
00120         if self.landmarkDetectionProxy is None:
00121             rospy.logerror("Could not get a proxy to ALLandMarkDetection on %s:%d", self.pip, self.pport)
00122             exit(1)
00123         
00124         self.movementDetectionProxy = ALProxy("ALMovementDetection",self.pip,self.pport)
00125         if self.movementDetectionProxy is None:
00126             rospy.logerror("Could not get a proxy to ALMovementDetection on %s:%d", self.pip, self.pport)
00127             exit(1)
00128 
00129         self.faceDetectionProxy = ALProxy("ALFaceDetection",self.pip,self.pport)
00130         if self.faceDetectionProxy is None:
00131             rospy.logerror("Could not get a proxy to ALFaceDetection on %s:%d", self.pip, self.pport)
00132             exit(1)
00133 
00134     def shutdown(self): 
00135         self.unsubscribe()
00136 
00137     def subscribe(self):
00138         #~ Subscriptions are performed via srv calls to save cpu power 
00139         pass
00140 
00141     def unsubscribe(self):
00142         if self.landmark_detection_enabled:
00143             self.memProxy.unsubscribeToEvent("LandmarkDetected", self.moduleName)
00144         if self.face_detection_enabled:
00145             self.memProxy.unsubscribeToEvent("FaceDetected", self.moduleName)
00146         if self.motion_detection_enabled:
00147             self.memProxy.unsubscribeToEvent("MovementDetection/MovementDetected", self.moduleName)
00148         
00149 
00150     def onLandmarkDetected(self, strVarName, value, strMessage):
00151         "Called when landmark was detected"
00152         if len(value) == 0:
00153             return
00154             
00155         # For the specific fields in the value variable check here:
00156         # https://community.aldebaran-robotics.com/doc/1-14/naoqi/vision/allandmarkdetection-api.html#landmarkdetected-value-structure
00157         
00158         msg = LandmarkDetected()  
00159         
00160         for i in range (0, len(value[1])):
00161             msg.shape_alpha.append(Float32(value[1][i][0][1]))
00162             msg.shape_beta.append(Float32(value[1][i][0][2]))
00163             msg.shape_sizex.append(Float32(value[1][i][0][3]))
00164             msg.shape_sizey.append(Float32(value[1][i][0][4]))
00165             msg.mark_ids.append(Int32(value[1][i][1][0]))
00166             
00167         msg.camera_local_pose.position.x = value[2][0]
00168         msg.camera_local_pose.position.y = value[2][1]
00169         msg.camera_local_pose.position.z = value[2][2]
00170         msg.camera_local_pose.orientation.x = value[2][3]
00171         msg.camera_local_pose.orientation.y = value[2][4]
00172         msg.camera_local_pose.orientation.z = value[2][5]
00173         
00174         msg.camera_world_pose.position.x = value[3][0]
00175         msg.camera_world_pose.position.y = value[3][1]
00176         msg.camera_world_pose.position.z = value[3][2]
00177         msg.camera_world_pose.orientation.x = value[3][3]
00178         msg.camera_world_pose.orientation.y = value[3][4]
00179         msg.camera_world_pose.orientation.z = value[3][5]
00180         
00181         msg.camera_name = String(value[4])
00182         
00183         self.landmarkPub.publish(msg)
00184 
00185     def onFaceDetected(self, strVarName, value, strMessage):
00186         "Called when a face was detected"
00187         
00188         if len(value) == 0:
00189           return
00190         
00191         # For the specific fields in the value variable check here:
00192         #https://community.aldebaran-robotics.com/doc/1-14/naoqi/vision/alfacedetection.html
00193         
00194         self.faces.camera_id.data = int(value[4]);
00195         
00196         self.faces.camera_0_pose.position.x = float(value[2][0])
00197         self.faces.camera_0_pose.position.y = float(value[2][1])
00198         self.faces.camera_0_pose.position.z = float(value[2][2])
00199         self.faces.camera_0_pose.orientation.x = float(value[2][3])
00200         self.faces.camera_0_pose.orientation.y = float(value[2][4])
00201         self.faces.camera_0_pose.orientation.z = float(value[2][5])
00202         
00203         self.faces.camera_1_pose.position.x = float(value[3][0])
00204         self.faces.camera_1_pose.position.y = float(value[3][1])
00205         self.faces.camera_1_pose.position.z = float(value[3][2])
00206         self.faces.camera_1_pose.orientation.x = float(value[3][3])
00207         self.faces.camera_1_pose.orientation.y = float(value[3][4])
00208         self.faces.camera_1_pose.orientation.z = float(value[3][5])
00209         
00210         self.faces.shape_alpha.data = float(value[1][0][0][1])
00211         self.faces.shape_beta.data = float(value[1][0][0][2])
00212         self.faces.shape_sizeX.data = float(value[1][0][0][3])
00213         self.faces.shape_sizeY.data = float(value[1][0][0][4])
00214         
00215         self.faces.face_id.data = float(value[1][0][1][0])
00216         self.faces.score_reco.data = float(value[1][0][1][1])
00217         self.faces.face_label.data = str(value[1][0][1][2])
00218         
00219         self.faces.left_eye_eyeCenter_x.data = float(value[1][0][1][3][0])
00220         self.faces.left_eye_eyeCenter_y.data = float(value[1][0][1][3][1])
00221         self.faces.left_eye_noseSideLimit_x.data = float(value[1][0][1][3][2])
00222         self.faces.left_eye_noseSideLimit_y.data = float(value[1][0][1][3][3])
00223         self.faces.left_eye_earSideLimit_x.data = float(value[1][0][1][3][4])
00224         self.faces.left_eye_earSideLimit_y.data = float(value[1][0][1][3][5])
00225         self.faces.left_eye_topLimit_x.data = float(value[1][0][1][3][6])
00226         self.faces.left_eye_topLimit_y.data = float(value[1][0][1][3][7])
00227         self.faces.left_eye_bottomLimit_x.data = float(value[1][0][1][3][8])
00228         self.faces.left_eye_bottomLimit_y.data = float(value[1][0][1][3][9])
00229         self.faces.left_eye_midTopEarLimit_x.data = float(value[1][0][1][3][10])
00230         self.faces.left_eye_midTopEarLimit_y.data = float(value[1][0][1][3][11])
00231         self.faces.left_eye_midTopNoseLimit_x.data = float(value[1][0][1][3][12])
00232         self.faces.left_eye_midTopNoseLimit_y.data = float(value[1][0][1][3][13])
00233         
00234         self.faces.right_eye_eyeCenter_x.data = float(value[1][0][1][4][0])
00235         self.faces.right_eye_eyeCenter_y.data = float(value[1][0][1][4][1])
00236         self.faces.right_eye_noseSideLimit_x.data = float(value[1][0][1][4][2])
00237         self.faces.right_eye_noseSideLimit_y.data = float(value[1][0][1][4][3])
00238         self.faces.right_eye_earSideLimit_x.data = float(value[1][0][1][4][4])
00239         self.faces.right_eye_earSideLimit_y.data = float(value[1][0][1][4][5])
00240         self.faces.right_eye_topLimit_x.data = float(value[1][0][1][4][6])
00241         self.faces.right_eye_topLimit_y.data = float(value[1][0][1][4][7])
00242         self.faces.right_eye_bottomLimit_x.data = float(value[1][0][1][4][8])
00243         self.faces.right_eye_bottomLimit_y.data = float(value[1][0][1][4][9])
00244         self.faces.right_eye_midTopEarLimit_x.data = float(value[1][0][1][4][10])
00245         self.faces.right_eye_midTopEarLimit_y.data = float(value[1][0][1][4][11])
00246         self.faces.right_eye_midTopNoseLimit_x.data = float(value[1][0][1][4][12])
00247         self.faces.right_eye_midTopNoseLimit_y.data = float(value[1][0][1][4][13])
00248         
00249         self.faces.left_eyebrow_noseSideLimit_x.data = float(value[1][0][1][5][0])
00250         self.faces.left_eyebrow_noseSideLimit_y.data = float(value[1][0][1][5][1])
00251         self.faces.left_eyebrow_center_x.data = float(value[1][0][1][5][2])
00252         self.faces.left_eyebrow_center_y.data = float(value[1][0][1][5][3])
00253         self.faces.left_eyebrow_earSideLimit_x.data = float(value[1][0][1][5][4])
00254         self.faces.left_eyebrow_earSideLimit_y.data = float(value[1][0][1][5][5])
00255         
00256         self.faces.right_eyebrow_noseSideLimit_x.data = float(value[1][0][1][6][0])
00257         self.faces.right_eyebrow_noseSideLimit_y.data = float(value[1][0][1][6][1])
00258         self.faces.right_eyebrow_center_x.data = float(value[1][0][1][6][2])
00259         self.faces.right_eyebrow_center_y.data = float(value[1][0][1][6][3])
00260         self.faces.right_eyebrow_earSideLimit_x.data = float(value[1][0][1][6][4])
00261         self.faces.right_eyebrow_earSideLimit_y.data = float(value[1][0][1][6][5])
00262         
00263         self.faces.nose_bottomCenterLimit_x.data = float(value[1][0][1][7][0])
00264         self.faces.nose_bottomCenterLimit_y.data = float(value[1][0][1][7][1])
00265         self.faces.nose_bottomLeftLimit_x.data = float(value[1][0][1][7][2])
00266         self.faces.nose_bottomLeftLimit_y.data = float(value[1][0][1][7][3])
00267         self.faces.nose_bottomRightLimit_x.data = float(value[1][0][1][7][4])
00268         self.faces.nose_bottomRightLimit_y.data = float(value[1][0][1][7][5])
00269         
00270         self.faces.mouth_leftLimit_x.data = float(value[1][0][1][8][0])
00271         self.faces.mouth_leftLimit_y.data = float(value[1][0][1][8][1])
00272         self.faces.mouth_rightLimit_x.data = float(value[1][0][1][8][2])
00273         self.faces.mouth_rightLimit_y.data = float(value[1][0][1][8][3])
00274         self.faces.mouth_topLimit_x.data = float(value[1][0][1][8][4])
00275         self.faces.mouth_topLimit_y.data = float(value[1][0][1][8][5])
00276         self.faces.mouth_bottomLimit_x.data = float(value[1][0][1][8][6])
00277         self.faces.mouth_bottomLimit_y.data = float(value[1][0][1][8][7])
00278         self.faces.mouth_midTopLeftLimit_x.data = float(value[1][0][1][8][8])
00279         self.faces.mouth_midTopLeftLimit_y.data = float(value[1][0][1][8][9])
00280         self.faces.mouth_midTopRightLimit_x.data = float(value[1][0][1][8][10])
00281         self.faces.mouth_midTopRightLimit_y.data = float(value[1][0][1][8][11])
00282         self.faces.mouth_midBottomRightLimit_x.data = float(value[1][0][1][8][12])
00283         self.faces.mouth_midBottomRightLimit_y.data = float(value[1][0][1][8][13])
00284         self.faces.mouth_midBottomLeftLimit_x.data = float(value[1][0][1][8][14])
00285         self.faces.mouth_midBottomLeftLimit_y.data = float(value[1][0][1][8][15])
00286 
00287         self.facesPub.publish(self.faces)
00288 
00289     def handleSensitivityChangeSrv(self, req):
00290         if (req.sensitivity.data < 0.0) or (req.sensitivity.data > 1.0):
00291             return
00292         
00293         self.movementDetectionProxy.setSensitivity(req.sensitivity.data)
00294         rospy.loginfo("Sensitivity of nao_movement_detection changed to %f", req.sensitivity.data)
00295 
00296     def onMovementDetected(self, strVarName, value, strMessage):
00297         "Called when movement was detected"
00298         datavar = self.memProxy.getData("MovementDetection/MovementInfo")
00299         
00300         movement = MovementDetected()  
00301         
00302         movement.gravity_center.x = datavar[0][0][0]
00303         movement.gravity_center.y = datavar[0][0][1]
00304 
00305         movement.mean_velocity.x = datavar[0][1][0]
00306         movement.mean_velocity.y = datavar[0][1][1]
00307 
00308         for i in range(0, len(datavar[0][2])):
00309           movement.points_poses.append(Point())
00310           movement.points_poses[i].x = datavar[0][2][i][0]
00311           movement.points_poses[i].y = datavar[0][2][i][1]
00312           
00313           movement.points_speeds.append(Point())
00314           movement.points_speeds[i].x = datavar[0][3][i][0]
00315           movement.points_speeds[i].y = datavar[0][3][i][1]
00316         
00317         self.movementPub.publish(movement)
00318 
00319     def serveSubscribeFaceSrv(self, req):
00320         self.facesPub = rospy.Publisher("nao_vision/faces_detected", FaceDetected)
00321         self.memProxy.subscribeToEvent("FaceDetected", self.moduleName, "onFaceDetected")
00322         self.face_detection_enabled = True
00323         
00324     def serveUnsubscribeFaceSrv(self, req):
00325         if self.face_detection_enabled:
00326             self.memProxy.unsubscribeToEvent("FaceDetected", self.moduleName)
00327             self.facesPub.unregister()
00328             self.face_detection_enabled = False
00329         
00330     def serveSubscribeMotionSrv(self, req):
00331         self.movementPub = rospy.Publisher("nao_vision/movement_detected", MovementDetected)
00332         self.sensitivitySrv = rospy.Service("nao_vision/movement_detection_sensitivity", VisionMotionSensitivity, self.handleSensitivityChangeSrv )
00333         self.memProxy.subscribeToEvent("MovementDetection/MovementDetected", self.moduleName, "onMovementDetected")
00334         self.motion_detection_enabled = True
00335         
00336     def serveUnsubscribeMotionSrv(self, req):
00337         if self.motion_detection_enabled:
00338             self.memProxy.unsubscribeToEvent("MovementDetection/MovementDetected", self.moduleName)
00339             self.movementPub.unregister()
00340             self.sensitivitySrv.shutdown()
00341             self.motion_detection_enabled = False
00342         
00343     def serveSubscribeLandmarkSrv(self, req):
00344         self.landmarkPub = rospy.Publisher("nao_vision/landmark_detected", LandmarkDetected)
00345         self.memProxy.subscribeToEvent("LandmarkDetected", self.moduleName, "onLandmarkDetected")
00346         self.landmarkDetectionProxy.updatePeriod("ROSNaoVisionModuleLandmarkDetected", 200)
00347         self.landmark_detection_enabled = True
00348         
00349     def serveUnsubscribeLandmarkSrv(self, req):
00350         if self.landmark_detection_enabled:
00351             self.memProxy.unsubscribeToEvent("LandmarkDetected", self.moduleName)
00352             self.landmarkPub.unregister()
00353             self.landmark_detection_enabled = False
00354 
00355     def learnFaceSrv(self, req):
00356         res = LearnFaceResponse()
00357         res.result.data = self.faceDetectionProxy.learnFace(req.name.data)
00358         return res
00359 
00360     def forgetPersonSrv(self, req):
00361         res = LearnFaceResponse()
00362         res.result.data = self.faceDetectionProxy.forgetPerson(req.name.data)
00363         return res
00364         
00365 if __name__ == '__main__':
00366   
00367     ROSNaoVisionModule = NaoVisionInterface("ROSNaoVisionModule")
00368     rospy.spin()
00369 
00370     rospy.loginfo("Stopping ROSNaoVisionModule ...")
00371     ROSNaoVisionModule.shutdown();        
00372     rospy.loginfo("ROSNaoVisionModule stopped.")
00373     exit(0)


nao_vision
Author(s): Manos Tsardoulias
autogenerated on Thu Feb 11 2016 23:34:43