00001
00002
00003 """ face_tracker.py - Version 0.22 2012-01-20
00004
00005 Track a face using the OpenCV Haar detector to initially locate the face, then OpenCV's
00006 Good-Features-to-Track and Lucas-Kanade Optical Flow to track the face features over
00007 subsequent frames.
00008
00009 Can also be used to track arbitrarily selected patches by setting the parameter
00010 auto_face_tracking to False and using the mouse to select the desired region.
00011
00012 Created for the Pi Robot Project: http://www.pirobot.org
00013 Copyright (c) 2011 Patrick Goebel. All rights reserved.
00014
00015 This program is free software; you can redistribute it and/or modify
00016 it under the terms of the GNU General Public License as published by
00017 the Free Software Foundation; either version 2 of the License, or
00018 (at your option) any later version.5
00019
00020 This program is distributed in the hope that it will be useful,
00021 but WITHOUT ANY WARRANTY; without even the implied warranty of
00022 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
00023 GNU General Public License for more details at:
00024
00025 http://www.gnu.org/licenses/gpl.html
00026
00027 """
00028
00029 import roslib
00030 roslib.load_manifest('pi_face_tracker')
00031 import rospy
00032 import cv
00033 import sys
00034 from sensor_msgs.msg import RegionOfInterest, Image
00035 from math import sqrt, isnan
00036 from ros2opencv import ROS2OpenCV
00037 from pi_face_tracker.srv import *
00038
00039 class PatchTracker(ROS2OpenCV):
00040 def __init__(self, node_name):
00041 ROS2OpenCV.__init__(self, node_name)
00042
00043 self.node_name = node_name
00044
00045 self.auto_face_tracking = rospy.get_param("~auto_face_tracking", True)
00046 self.use_haar_only = rospy.get_param("~use_haar_only", False)
00047 self.use_depth_for_detection = rospy.get_param("~use_depth_for_detection", False)
00048 self.fov_width = rospy.get_param("~fov_width", 1.094)
00049 self.fov_height = rospy.get_param("~fov_height", 1.094)
00050 self.max_face_size = rospy.get_param("~max_face_size", 0.28)
00051 self.use_depth_for_tracking = rospy.get_param("~use_depth_for_tracking", False)
00052 self.auto_min_features = rospy.get_param("~auto_min_features", True)
00053 self.min_features = rospy.get_param("~min_features", 50)
00054 self.abs_min_features = rospy.get_param("~abs_min_features", 6)
00055 self.std_err_xy = rospy.get_param("~std_err_xy", 2.5)
00056 self.pct_err_z = rospy.get_param("~pct_err_z", 0.42)
00057 self.max_mse = rospy.get_param("~max_mse", 10000)
00058 self.good_feature_distance = rospy.get_param("~good_feature_distance", 5)
00059 self.add_feature_distance = rospy.get_param("~add_feature_distance", 10)
00060 self.flip_image = rospy.get_param("~flip_image", False)
00061 self.feature_type = rospy.get_param("~feature_type", 0)
00062 self.expand_roi_init = rospy.get_param("~expand_roi", 1.02)
00063 self.expand_roi = self.expand_roi_init
00064
00065 self.camera_frame_id = "kinect_depth_optical_frame"
00066
00067 self.cog_x = self.cog_y = 0
00068 self.cog_z = -1
00069
00070 self.detect_box = None
00071 self.track_box = None
00072 self.features = []
00073
00074 self.grey = None
00075 self.pyramid = None
00076 self.small_image = None
00077
00078 """ Set up the face detection parameters """
00079 self.cascade_frontal_alt = rospy.get_param("~cascade_frontal_alt", "")
00080 self.cascade_frontal_alt2 = rospy.get_param("~cascade_frontal_alt2", "")
00081 self.cascade_profile = rospy.get_param("~cascade_profile", "")
00082
00083 self.cascade_frontal_alt = cv.Load(self.cascade_frontal_alt)
00084 self.cascade_frontal_alt2 = cv.Load(self.cascade_frontal_alt2)
00085 self.cascade_profile = cv.Load(self.cascade_profile)
00086
00087 self.min_size = (20, 20)
00088 self.image_scale = 2
00089 self.haar_scale = 1.5
00090 self.min_neighbors = 1
00091 self.haar_flags = cv.CV_HAAR_DO_CANNY_PRUNING
00092
00093 self.grey = None
00094 self.pyramid = None
00095
00096 """ Set the Good Features to Track and Lucas-Kanade parameters """
00097 self.night_mode = False
00098 self.quality = 0.01
00099 self.win_size = 10
00100 self.max_count = 200
00101 self.block_size = 3
00102 self.use_harris = False
00103 self.flags = 0
00104
00105 self.frame_count = 0
00106
00107 """ Set the SURF parameters """
00108 self.surf_hessian_quality = rospy.get_param("~surf_hessian_quality", 100)
00109
00110 """ A service to handle 'keystroke' commands sent from other nodes """
00111 self.key_command = None
00112 rospy.Service('~key_command', KeyCommand, self.key_command_callback)
00113
00114 """ A service to allow setting the ROI to track """
00115 rospy.Service('~set_roi', SetROI, self.set_roi_callback)
00116
00117 """ Wait until the image topics are ready before starting """
00118 rospy.wait_for_message(self.input_rgb_image, Image)
00119
00120 if self.use_depth_for_detection or self.use_depth_for_tracking:
00121 rospy.wait_for_message(self.input_depth_image, Image)
00122
00123 def process_image(self, cv_image):
00124
00125 """ If parameter use_haar_only is True, use only the OpenCV Haar detector to track the face """
00126 if (self.use_haar_only or not self.detect_box) and self.auto_face_tracking:
00127 self.detect_box = self.detect_face(cv_image)
00128
00129 """ Otherwise, track the face using Good Features to Track and Lucas-Kanade Optical Flow """
00130 if not self.use_haar_only:
00131 if self.detect_box:
00132 if not self.track_box or not self.is_rect_nonzero(self.track_box):
00133 self.features = []
00134 self.track_box = self.detect_box
00135 self.track_box = self.track_lk(cv_image)
00136
00137 """ Prune features that are too far from the main cluster """
00138 if len(self.features) > 0:
00139 ((mean_x, mean_y, mean_z), mse_xy, mse_z, score) = self.prune_features(min_features = self.abs_min_features, outlier_threshold = self.std_err_xy, mse_threshold=self.max_mse)
00140
00141 if score == -1:
00142 self.detect_box = None
00143 self.track_box = None
00144 return cv_image
00145
00146 """ Add features if the number is getting too low """
00147 if len(self.features) < self.min_features:
00148 self.expand_roi = self.expand_roi_init * self.expand_roi
00149 self.add_features(cv_image)
00150 else:
00151 self.expand_roi = self.expand_roi_init
00152 else:
00153 self.features = []
00154 self.track_box = None
00155
00156
00157 if len(self.features) > 0 and (self.use_depth_for_detection or self.use_depth_for_tracking):
00158 (self.cog_x, self.cog_y, self.cog_z) = self.get_cluster_centroid()
00159
00160 return cv_image
00161
00162 def detect_face(self, cv_image):
00163 if self.grey is None:
00164 """ Allocate temporary images """
00165 self.grey = cv.CreateImage(self.image_size, 8, 1)
00166
00167 if self.small_image is None:
00168 self.small_image = cv.CreateImage((cv.Round(self.image_size[0] / self.image_scale),
00169 cv.Round(self.image_size[1] / self.image_scale)), 8, 1)
00170
00171 """ Convert color input image to grayscale """
00172 cv.CvtColor(cv_image, self.grey, cv.CV_BGR2GRAY)
00173
00174 """ Equalize the histogram to reduce lighting effects. """
00175 cv.EqualizeHist(self.grey, self.grey)
00176
00177 """ Scale input image for faster processing """
00178 cv.Resize(self.grey, self.small_image, cv.CV_INTER_LINEAR)
00179
00180 """ First check one of the frontal templates """
00181 if self.cascade_frontal_alt:
00182 faces = cv.HaarDetectObjects(self.small_image, self.cascade_frontal_alt, cv.CreateMemStorage(0),
00183 self.haar_scale, self.min_neighbors, self.haar_flags, self.min_size)
00184
00185 """ If that fails, check the profile template """
00186 if not faces:
00187 if self.cascade_profile:
00188 faces = cv.HaarDetectObjects(self.small_image, self.cascade_profile, cv.CreateMemStorage(0),
00189 self.haar_scale, self.min_neighbors, self.haar_flags, self.min_size)
00190
00191 if not faces:
00192 """ If that fails, check a different frontal profile """
00193 if self.cascade_frontal_alt2:
00194 faces = cv.HaarDetectObjects(self.small_image, self.cascade_frontal_alt2, cv.CreateMemStorage(0),
00195 self.haar_scale, self.min_neighbors, self.haar_flags, self.min_size)
00196
00197 if not faces:
00198 if self.show_text:
00199 hscale = 0.4 * self.image_size[0] / 160. + 0.1
00200 vscale = 0.4 * self.image_size[1] / 120. + 0.1
00201 text_font = cv.InitFont(cv.CV_FONT_VECTOR0, hscale, vscale, 0, 1, 8)
00202 cv.PutText(self.marker_image, "LOST FACE!", (50, int(self.image_size[1] * 0.9)), text_font, cv.RGB(255, 255, 0))
00203 return None
00204
00205 for ((x, y, w, h), n) in faces:
00206 """ The input to cv.HaarDetectObjects was resized, so scale the
00207 bounding box of each face and convert it to two CvPoints """
00208 pt1 = (int(x * self.image_scale), int(y * self.image_scale))
00209 pt2 = (int((x + w) * self.image_scale), int((y + h) * self.image_scale))
00210 face_width = pt2[0] - pt1[0]
00211 face_height = pt2[1] - pt1[1]
00212
00213 if self.use_depth_for_detection:
00214 """ Get the average distance over the face box """
00215 ave_face_distance = 0
00216 i = 0
00217 for x in range(pt1[0], pt2[0]):
00218 for y in range(pt1[1], pt2[1]):
00219 try:
00220 face_distance = cv.Get2D(self.depth_image, y, x)
00221 z = face_distance[0]
00222 except:
00223 continue
00224 if isnan(z):
00225 continue
00226 else:
00227 ave_face_distance += z
00228 i = i + 1
00229
00230 """ If we are too close to the Kinect, we will get NaN for distances so just accept the detection. """
00231 if i == 0:
00232 face_size = 0
00233
00234 else:
00235 """ Compute the size of the face in meters (average of width and height)
00236 The Kinect's FOV is about 57 degrees wide which is, coincidentally about 1 radian.
00237 """
00238 ave_face_distance = ave_face_distance / float(i)
00239 arc = (self.fov_width * float(face_width) / float(self.image_size[0]) + self.fov_height * float(face_height) / float(self.image_size[1])) / 2.0
00240 face_size = ave_face_distance * arc
00241
00242 if face_size > self.max_face_size:
00243 continue
00244
00245 face_box = (pt1[0], pt1[1], face_width, face_height)
00246
00247 """ Break out of the loop after the first face """
00248 return face_box
00249
00250 def track_lk(self, cv_image):
00251 feature_box = None
00252
00253 """ Initialize intermediate images if necessary """
00254 if not self.pyramid:
00255 self.grey = cv.CreateImage(cv.GetSize (cv_image), 8, 1)
00256 self.prev_grey = cv.CreateImage(cv.GetSize (cv_image), 8, 1)
00257 self.pyramid = cv.CreateImage(cv.GetSize (cv_image), 8, 1)
00258 self.prev_pyramid = cv.CreateImage(cv.GetSize (cv_image), 8, 1)
00259 self.features = []
00260
00261 """ Create a grey version of the image """
00262 cv.CvtColor(cv_image, self.grey, cv.CV_BGR2GRAY)
00263
00264 """ Equalize the histogram to reduce lighting effects """
00265 cv.EqualizeHist(self.grey, self.grey)
00266
00267 if self.track_box and self.features != []:
00268 """ We have feature points, so track and display them """
00269
00270 """ Calculate the optical flow """
00271 self.features, status, track_error = cv.CalcOpticalFlowPyrLK(
00272 self.prev_grey, self.grey, self.prev_pyramid, self.pyramid,
00273 self.features,
00274 (self.win_size, self.win_size), 3,
00275 (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.01),
00276 self.flags)
00277
00278 """ Keep only high status points """
00279 self.features = [ p for (st,p) in zip(status, self.features) if st]
00280
00281 elif self.track_box and self.is_rect_nonzero(self.track_box):
00282 """ Get the initial features to track """
00283
00284 """ Create a mask image to be used to select the tracked points """
00285 mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
00286
00287 """ Begin with all black pixels """
00288 cv.Zero(mask)
00289
00290 """ Get the coordinates and dimensions of the track box """
00291 try:
00292 x,y,w,h = self.track_box
00293 except:
00294 return None
00295
00296 if self.auto_face_tracking:
00297
00298
00299
00300
00301
00302
00303 """ Get the center of the track box (type CvRect) so we can create the
00304 equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """
00305 center_x = int(x + w / 2)
00306 center_y = int(y + h / 2)
00307 roi_box = ((center_x, center_y), (w, h), 0)
00308
00309 """ Create a filled white ellipse within the track_box to define the ROI. """
00310 cv.EllipseBox(mask, roi_box, cv.CV_RGB(255,255, 255), cv.CV_FILLED)
00311 else:
00312 """ For manually selected regions, just use a rectangle """
00313 pt1 = (x, y)
00314 pt2 = (x + w, y + h)
00315 cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255,255, 255), cv.CV_FILLED)
00316
00317 """ Create the temporary scratchpad images """
00318 eig = cv.CreateImage (cv.GetSize(self.grey), 32, 1)
00319 temp = cv.CreateImage (cv.GetSize(self.grey), 32, 1)
00320
00321 if self.feature_type == 0:
00322 """ Find keypoints to track using Good Features to Track """
00323 self.features = cv.GoodFeaturesToTrack(self.grey, eig, temp, self.max_count,
00324 self.quality, self.good_feature_distance, mask=mask, blockSize=self.block_size, useHarris=self.use_harris, k=0.04)
00325
00326 elif self.feature_type == 1:
00327 """ Get the new features using SURF """
00328 (surf_features, descriptors) = cv.ExtractSURF(self.grey, mask, cv.CreateMemStorage(0), (0, self.surf_hessian_quality, 3, 1))
00329 for feature in surf_features:
00330 self.features.append(feature[0])
00331
00332 if self.auto_min_features:
00333 """ Since the detect box is larger than the actual face or desired patch, shrink the number of features by 10% """
00334 self.min_features = int(len(self.features) * 0.9)
00335 self.abs_min_features = int(0.5 * self.min_features)
00336
00337 """ Swapping the images """
00338 self.prev_grey, self.grey = self.grey, self.prev_grey
00339 self.prev_pyramid, self.pyramid = self.pyramid, self.prev_pyramid
00340
00341 """ If we have some features... """
00342 if len(self.features) > 0:
00343 """ The FitEllipse2 function below requires us to convert the feature array
00344 into a CvMat matrix """
00345 try:
00346 self.feature_matrix = cv.CreateMat(1, len(self.features), cv.CV_32SC2)
00347 except:
00348 pass
00349
00350 """ Draw the points as green circles and add them to the features matrix """
00351 i = 0
00352 for the_point in self.features:
00353 if self.show_features:
00354 cv.Circle(self.marker_image, (int(the_point[0]), int(the_point[1])), 2, (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
00355 try:
00356 cv.Set2D(self.feature_matrix, 0, i, (int(the_point[0]), int(the_point[1])))
00357 except:
00358 pass
00359 i = i + 1
00360
00361 """ Draw the best fit ellipse around the feature points """
00362 if len(self.features) > 6:
00363 feature_box = cv.FitEllipse2(self.feature_matrix)
00364 else:
00365 feature_box = None
00366
00367 """ Publish the ROI for the tracked object """
00368 try:
00369 (roi_center, roi_size, roi_angle) = feature_box
00370 except:
00371 rospy.loginfo("Patch box has shrunk to zero...")
00372 feature_box = None
00373
00374 if feature_box and not self.drag_start and self.is_rect_nonzero(self.track_box):
00375 self.ROI = RegionOfInterest()
00376 self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2)))
00377 self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2)))
00378 self.ROI.width = min(self.image_size[0], int(roi_size[0]))
00379 self.ROI.height = min(self.image_size[1], int(roi_size[1]))
00380
00381 self.pubROI.publish(self.ROI)
00382
00383 """ If using depth info Publish the centroid of the tracked cluster as a PointStamped message """
00384 if self.use_depth_for_detection or self.use_depth_for_tracking:
00385 if feature_box is not None and not self.drag_start and self.is_rect_nonzero(self.track_box):
00386 self.cluster3d.header.frame_id = self.camera_frame_id
00387 self.cluster3d.header.stamp = rospy.Time()
00388 self.cluster3d.point.x = self.cog_x
00389 self.cluster3d.point.y = self.cog_y
00390 self.cluster3d.point.z = self.cog_z
00391 self.pub_cluster3d.publish(self.cluster3d)
00392
00393 if feature_box is not None and len(self.features) > 0:
00394 return feature_box
00395 else:
00396 return None
00397
00398 def add_features(self, cv_image):
00399 """ Look for any new features around the current feature cloud """
00400
00401 """ Create the ROI mask"""
00402 roi = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
00403
00404 """ Begin with all black pixels """
00405 cv.Zero(roi)
00406
00407 """ Get the coordinates and dimensions of the current track box """
00408 try:
00409 ((x,y), (w,h), a) = self.track_box
00410 except:
00411 rospy.loginfo("Track box has shrunk to zero...")
00412 return
00413
00414 """ Expand the track box to look for new features """
00415 w = int(self.expand_roi * w)
00416 h = int(self.expand_roi * h)
00417
00418 roi_box = ((x,y), (w,h), a)
00419
00420 """ Create a filled white ellipse within the track_box to define the ROI. """
00421 cv.EllipseBox(roi, roi_box, cv.CV_RGB(255,255, 255), cv.CV_FILLED)
00422
00423 """ Create the temporary scratchpad images """
00424 eig = cv.CreateImage (cv.GetSize(self.grey), 32, 1)
00425 temp = cv.CreateImage (cv.GetSize(self.grey), 32, 1)
00426
00427 if self.feature_type == 0:
00428 """ Get the new features using Good Features to Track """
00429 features = cv.GoodFeaturesToTrack(self.grey, eig, temp, self.max_count,
00430 self.quality, self.good_feature_distance, mask=roi, blockSize=3, useHarris=0, k=0.04)
00431
00432 elif self.feature_type == 1:
00433 """ Get the new features using SURF """
00434 features = []
00435 (surf_features, descriptors) = cv.ExtractSURF(self.grey, roi, cv.CreateMemStorage(0), (0, self.surf_hessian_quality, 3, 1))
00436 for feature in surf_features:
00437 features.append(feature[0])
00438
00439 """ Append new features to the current list if they are not too far from the current cluster """
00440 for new_feature in features:
00441 try:
00442 distance = self.distance_to_cluster(new_feature, self.features)
00443 if distance > self.add_feature_distance:
00444 self.features.append(new_feature)
00445 except:
00446 pass
00447
00448 """ Remove duplicate features """
00449 self.features = list(set(self.features))
00450
00451 def distance_to_cluster(self, test_point, cluster):
00452 min_distance = 10000
00453 for point in cluster:
00454 if point == test_point:
00455 continue
00456 """ Use L1 distance since it is faster than L2 """
00457 distance = abs(test_point[0] - point[0]) + abs(test_point[1] - point[1])
00458 if distance < min_distance:
00459 min_distance = distance
00460 return min_distance
00461
00462 def get_cluster_centroid(self):
00463 """ compute the 3D centroid (COG) of the current cluster """
00464 n_xy = n_z = 0
00465 sum_x = sum_y = sum_z = 0
00466
00467 (cols, rows) = cv.GetSize(self.depth_image)
00468
00469 for point in self.features:
00470 sum_x = sum_x + point[0]
00471 sum_y = sum_y + point[1]
00472 n_xy += 1
00473
00474 try:
00475 z = cv.Get2D(self.depth_image, min(rows - 1, int(point[1])), min(cols - 1, int(point[0])))
00476 except cv.error:
00477 rospy.loginfo("Get2D Index Error: " + str(int(point[1])) + " x " + str(int(point[0])))
00478 continue
00479
00480 """ Depth values can be NaN which should be ignored """
00481 if isnan(z[0]):
00482 continue
00483 else:
00484 sum_z = sum_z + z[0]
00485 n_z += 1
00486
00487
00488
00489 if n_xy > 0:
00490 cog_x = sum_x / n_xy
00491 cog_y = sum_y / n_xy
00492
00493 """ The Kinect returns NaN depth values when closer than about 0.5 meters. If the target is closer than 0.5 meters
00494 then use 0.5 meters as a fudge """
00495 if n_z > 0:
00496 cog_z = sum_z / n_z
00497 else:
00498 cog_z = 0.5
00499
00500 cog_x = cog_z * self.fov_width * (cog_x - self.image_size[0] / 2.0) / float(self.image_size[0])
00501 cog_y = cog_z * self.fov_height * (cog_y - self.image_size[1] / 2.0) / float(self.image_size[1])
00502
00503 return (cog_x, cog_y, cog_z)
00504
00505 def prune_features(self, min_features, outlier_threshold, mse_threshold):
00506 sum_x = 0
00507 sum_y = 0
00508 sum_z = 0
00509 sse = 0
00510 features_xy = self.features
00511 features_z = self.features
00512 n_xy = len(self.features)
00513 n_z = 0
00514 mean_z = mse_z = -1
00515
00516 if self.use_depth_for_tracking:
00517 if not self.depth_image:
00518 return ((0, 0, 0), 0, 0, -1)
00519 else:
00520 (cols, rows) = cv.GetSize(self.depth_image)
00521
00522 """ If there are no features left to track, start over """
00523 if n_xy == 0:
00524 return ((0, 0, 0), 0, 0, -1)
00525
00526 """ Compute the COG (center of gravity) of the cluster """
00527 for point in self.features:
00528 sum_x = sum_x + point[0]
00529 sum_y = sum_y + point[1]
00530
00531 mean_x = sum_x / n_xy
00532 mean_y = sum_y / n_xy
00533
00534 """ Compute the x-y MSE (mean squared error) of the cluster in the camera plane """
00535 for point in self.features:
00536 sse = sse + (point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)
00537
00538
00539 """ Get the average over the number of feature points """
00540 mse_xy = sse / n_xy
00541
00542 """ The MSE must be > 0 for any sensible feature cluster """
00543 if mse_xy == 0 or mse_xy > mse_threshold:
00544 return ((0, 0, 0), 0, 0, -1)
00545
00546 """ Throw away the outliers based on the x-y variance """
00547 max_err = 0
00548 for point in self.features:
00549 std_err = ((point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)) / mse_xy
00550 if std_err > max_err:
00551 max_err = std_err
00552 if std_err > outlier_threshold:
00553 features_xy.remove(point)
00554 try:
00555 features_z.remove(point)
00556 n_z = n_z - 1
00557 except:
00558 pass
00559
00560 n_xy = n_xy - 1
00561
00562 """ Now do the same for depth """
00563 if self.use_depth_for_tracking:
00564 for point in self.features:
00565 try:
00566 z = cv.Get2D(self.depth_image, min(rows - 1, int(point[1])), min(cols - 1, int(point[0])))
00567 except:
00568 continue
00569
00570 """ Depth values can be NaN which should be ignored """
00571 if isnan(z[0]):
00572 continue
00573 else:
00574 sum_z = sum_z + z[0]
00575 n_z += 1
00576
00577 if n_z != 0:
00578 mean_z = sum_z / n_z
00579
00580 sse = 0
00581 n_z = 0
00582 for point in self.features:
00583 try:
00584 z = cv.Get2D(self.depth_image, min(rows - 1, int(point[1])), min(cols - 1, int(point[0])))
00585 sse = sse + (z[0] - mean_z) * (z[0] - mean_z)
00586 n_z += 1
00587 except:
00588 continue
00589
00590 if n_z != 0:
00591 mse_z = sse / n_z
00592
00593 """ Throw away the outliers based on depth using percent error rather than standard error since depth
00594 values can jump dramatically at object boundaries """
00595 for point in features_z:
00596 try:
00597 z = cv.Get2D(self.depth_image, min(rows - 1, int(point[1])), min(cols - 1, int(point[0])))
00598 except:
00599 continue
00600 try:
00601 pct_err = abs(z[0] - mean_z) / mean_z
00602 if pct_err > self.pct_err_z:
00603 features_xy.remove(point)
00604 except:
00605 pass
00606
00607 self.features = features_xy
00608
00609 """ Consider a cluster bad if we have fewer than abs_min_features left """
00610 if len(self.features) < self.abs_min_features:
00611 score = -1
00612 else:
00613 score = 1
00614
00615 return ((mean_x, mean_y, mean_z), mse_xy, mse_z, score)
00616
00617 def key_command_callback(self, req):
00618 self.key_command = req.command
00619 return KeyCommandResponse()
00620
00621 def set_roi_callback(self, req):
00622 self.keypoints = []
00623 self.track_box = None
00624 self.detect_box = (req.roi.x_offset, req.roi.y_offset, req.roi.width, req.roi.height)
00625 return SetROIResponse()
00626
00627 def main(args):
00628 """ Display a help message if appropriate """
00629 help_message = "Hot keys: \n" \
00630 "\tq - quit the program\n" \
00631 "\tc - delete current features\n" \
00632 "\tt - toggle text captions on/off\n" \
00633 "\tf - toggle display of features on/off\n" \
00634 "\tn - toggle \"night\" mode on/off\n" \
00635 "\ta - toggle auto face tracking on/off\n"
00636
00637 print help_message
00638
00639 """ Fire up the Face Tracker node """
00640 PT = PatchTracker("pi_face_tracker")
00641
00642 try:
00643 rospy.spin()
00644 except KeyboardInterrupt:
00645 print "Shutting down face tracker node."
00646 cv.DestroyAllWindows()
00647
00648 if __name__ == '__main__':
00649 main(sys.argv)