head_camera_tracking.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # coding: utf-8
3 
4 import rospy
5 import math
6 import sys
7 
8 # for ObjectTracker
9 import cv2
10 import numpy as np
11 from sensor_msgs.msg import Image
12 from cv_bridge import CvBridge, CvBridgeError
13 from geometry_msgs.msg import Point
14 
15 # for NeckYawPitch
16 import actionlib
17 from control_msgs.msg import (
18  FollowJointTrajectoryAction,
19  FollowJointTrajectoryGoal,
20  JointTrajectoryControllerState
21 )
22 from trajectory_msgs.msg import JointTrajectoryPoint
23 
25  def __init__(self):
26  self._bridge = CvBridge()
27  self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1)
28  self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
29  self._object_rect = [0,0,0,0]
30  self._image_shape = Point()
31  self._object_detected = False
32 
33  self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.')
34 
35  # カスケードファイルの読み込み
36  # 例
37  # self._face_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_frontalface_alt2.xml")
38  # self._eyes_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_eye.xml")
39  self._face_cascade = ""
40  self._eyes_cascade = ""
41 
42 
43  def _image_callback(self, ros_image):
44  try:
45  input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
46  except CvBridgeError as e:
47  rospy.logerr(e)
48 
49  # 画像のwidth, heightを取得
50  self._image_shape.x = input_image.shape[1]
51  self._image_shape.y = input_image.shape[0]
52 
53  # オブジェクト(特定色 or 顔) の検出
54  output_image = self._detect_orange_object(input_image)
55  # output_image = self._detect_blue_object(input_image)
56  # output_image = self._detect_face(input_image)
57 
58  self._image_pub.publish(self._bridge.cv2_to_imgmsg(output_image, "bgr8"))
59 
60 
62  # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
63  # オブジェクトの座標は-1.0 ~ 1.0に正規化される
64 
65  object_center = Point(
66  self._object_rect[0] + self._object_rect[2] * 0.5,
67  self._object_rect[1] + self._object_rect[3] * 0.5,
68  0)
69 
70  # 画像の中心を0, 0とした座標系に変換
71  translated_point = Point()
72  translated_point.x = object_center.x - self._image_shape.x * 0.5
73  translated_point.y = -(object_center.y - self._image_shape.y * 0.5)
74 
75  # 正規化
76  normalized_point = Point()
77  if self._image_shape.x != 0 and self._image_shape.y != 0:
78  normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
79  normalized_point.y = translated_point.y / (self._image_shape.y * 0.5)
80 
81  return normalized_point
82 
83 
84  def object_detected(self):
85  return self._object_detected
86 
87 
88  def _detect_color_object(self, bgr_image, lower_color, upper_color):
89  # 画像から指定された色の物体を検出する
90 
91  MIN_OBJECT_SIZE = 1000 # px * px
92 
93  # BGR画像をHSV色空間に変換
94  hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)
95 
96  # 色を抽出するマスクを生成
97  mask = cv2.inRange(hsv, lower_color, upper_color)
98 
99  # マスクから輪郭を抽出
100  if self._CV_MAJOR_VERSION == '4':
101  contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
102  else:
103  _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
104 
105  # 輪郭を長方形に変換し、配列に格納
106  rects = []
107  for contour in contours:
108  approx = cv2.convexHull(contour)
109  rect = cv2.boundingRect(approx)
110  rects.append(rect)
111 
112  self._object_detected = False
113  if len(rects) > 0:
114  # 最も大きい長方形を抽出
115  rect = max(rects, key=(lambda x: x[2] * x[3]))
116 
117  # 長方形が小さければ検出判定にしない
118  if rect[2] * rect[3] > MIN_OBJECT_SIZE:
119  # 抽出した長方形を画像に描画する
120  cv2.rectangle(bgr_image,
121  (rect[0], rect[1]),
122  (rect[0] + rect[2], rect[1] + rect[3]),
123  (0, 0, 255), thickness=2)
124 
125  self._object_rect = rect
126  self._object_detected = True
127 
128  return bgr_image
129 
130 
131  def _detect_orange_object(self, bgr_image):
132  # H: 0 ~ 179 (0 ~ 360°)
133  # S: 0 ~ 255 (0 ~ 100%)
134  # V: 0 ~ 255 (0 ~ 100%)
135  lower_orange = np.array([5,127,127])
136  upper_orange = np.array([20,255,255])
137 
138  return self._detect_color_object(bgr_image, lower_orange, upper_orange)
139 
140 
141  def _detect_blue_object(self, bgr_image):
142  # H: 0 ~ 179 (0 ~ 360°)
143  # S: 0 ~ 255 (0 ~ 100%)
144  # V: 0 ~ 255 (0 ~ 100%)
145  lower_blue = np.array([100,127,127])
146  upper_blue = np.array([110,255,255])
147 
148  return self._detect_color_object(bgr_image, lower_blue, upper_blue)
149 
150 
151  def _detect_face(self, bgr_image):
152  # 画像から顔(正面)を検出する
153 
154  SCALE = 4
155 
156  # カスケードファイルがセットされているかを確認
157  if self._face_cascade == "" or self._eyes_cascade == "":
158  rospy.logerr("cascade file does not set")
159  return bgr_image
160 
161  # BGR画像をグレー画像に変換
162  gray = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
163 
164  # 処理時間短縮のため画像を縮小
165  height, width = gray.shape[:2]
166  small_gray = cv2.resize(gray, (width/SCALE, height/SCALE))
167 
168  # カスケードファイルを使って顔認識
169  small_faces = self._face_cascade.detectMultiScale(small_gray)
170 
171  self._object_detected = False
172  for small_face in small_faces:
173  # 顔の領域を元のサイズに戻す
174  face = small_face*SCALE
175 
176  # グレー画像から顔部分を抽出
177  roi_gray = gray[
178  face[1]:face[1]+face[3],
179  face[0]:face[0]+face[2]]
180 
181  # 顔の中から目を検知
182  eyes = self._eyes_cascade.detectMultiScale(roi_gray)
183 
184  # 目を検出したら、対象のrect(座標と大きさ)を記録する
185  if len(eyes) > 0:
186  cv2.rectangle(bgr_image,
187  (face[0],face[1]),
188  (face[0]+face[2], face[1]+face[3]),
189  (0,0,255),2)
190 
191  self._object_rect = face
192  self._object_detected = True
193  break
194 
195  return bgr_image
196 
197 
198 class NeckYawPitch(object):
199  def __init__(self):
200  self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/neck_controller/follow_joint_trajectory",
201  FollowJointTrajectoryAction)
202  self.__client.wait_for_server(rospy.Duration(5.0))
203  if not self.__client.wait_for_server(rospy.Duration(5.0)):
204  rospy.logerr("Action Server Not Found")
205  rospy.signal_shutdown("Action Server not found")
206  sys.exit(1)
207 
208  self._state_sub = rospy.Subscriber("/sciurus17/controller3/neck_controller/state",
209  JointTrajectoryControllerState, self._state_callback, queue_size=1)
210 
211  self._state_received = False
212  self._current_yaw = 0.0 # Degree
213  self._current_pitch = 0.0 # Degree
214 
215 
216  def _state_callback(self, state):
217  # 首の現在角度を取得
218 
219  self._state_received = True
220  yaw_radian = state.actual.positions[0]
221  pitch_radian = state.actual.positions[1]
222 
223  self._current_yaw = math.degrees(yaw_radian)
224  self._current_pitch = math.degrees(pitch_radian)
225 
226 
227  def state_received(self):
228  return self._state_received
229 
230 
231  def get_current_yaw(self):
232  return self._current_yaw
233 
234 
235  def get_current_pitch(self):
236  return self._current_pitch
237 
238 
239  def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):
240  # 首を指定角度に動かす
241  goal = FollowJointTrajectoryGoal()
242  goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"]
243 
244  yawpoint = JointTrajectoryPoint()
245  yawpoint.positions.append(yaw_angle)
246  yawpoint.positions.append(pitch_angle)
247  yawpoint.time_from_start = rospy.Duration(goal_secs)
248  goal.trajectory.points.append(yawpoint)
249 
250  self.__client.send_goal(goal)
251  self.__client.wait_for_result(rospy.Duration(0.1))
252  return self.__client.get_result()
253 
254 
256  # shutdown時に0度へ戻る
257  neck.set_angle(math.radians(0), math.radians(0), 3.0)
258 
259 
260 def main():
261  r = rospy.Rate(60)
262 
263  rospy.on_shutdown(hook_shutdown)
264 
265  # オブジェクト追跡のしきい値
266  # 正規化された座標系(px, px)
267  THRESH_X = 0.05
268  THRESH_Y = 0.05
269 
270  # 首の初期角度 Degree
271  INITIAL_YAW_ANGLE = 0
272  INITIAL_PITCH_ANGLE = 0
273 
274  # 首の制御角度リミット値 Degree
275  MAX_YAW_ANGLE = 120
276  MIN_YAW_ANGLE = -120
277  MAX_PITCH_ANGLE = 50
278  MIN_PITCH_ANGLE = -70
279 
280  # 首の制御量
281  # 値が大きいほど首を大きく動かす
282  OPERATION_GAIN_X = 5.0
283  OPERATION_GAIN_Y = 5.0
284 
285  # 初期角度に戻る時の制御角度 Degree
286  RESET_OPERATION_ANGLE = 3
287 
288  # 現在の首角度を取得する
289  # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
290  while not neck.state_received():
291  pass
292  yaw_angle = neck.get_current_yaw()
293  pitch_angle = neck.get_current_pitch()
294 
295  look_object = False
296  detection_timestamp = rospy.Time.now()
297 
298  while not rospy.is_shutdown():
299  # 正規化されたオブジェクトの座標を取得
300  object_position = object_tracker.get_object_position()
301 
302  if object_tracker.object_detected():
303  detection_timestamp = rospy.Time.now()
304  look_object = True
305  else:
306  lost_time = rospy.Time.now() - detection_timestamp
307  # 一定時間オブジェクトが見つからない場合は初期角度に戻る
308  if lost_time.to_sec() > 1.0:
309  look_object = False
310 
311  if look_object:
312  # オブジェクトが画像中心にくるように首を動かす
313  if math.fabs(object_position.x) > THRESH_X:
314  yaw_angle += -object_position.x * OPERATION_GAIN_X
315 
316  if math.fabs(object_position.y) > THRESH_Y:
317  pitch_angle += object_position.y * OPERATION_GAIN_Y
318 
319  # 首の制御角度を制限する
320  if yaw_angle > MAX_YAW_ANGLE:
321  yaw_angle = MAX_YAW_ANGLE
322  if yaw_angle < MIN_YAW_ANGLE:
323  yaw_angle = MIN_YAW_ANGLE
324 
325  if pitch_angle > MAX_PITCH_ANGLE:
326  pitch_angle = MAX_PITCH_ANGLE
327  if pitch_angle < MIN_PITCH_ANGLE:
328  pitch_angle = MIN_PITCH_ANGLE
329 
330  else:
331  # ゆっくり初期角度へ戻る
332  diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
333  if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
334  yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
335  else:
336  yaw_angle = INITIAL_YAW_ANGLE
337 
338  diff_pitch_angle = pitch_angle - INITIAL_PITCH_ANGLE
339  if math.fabs(diff_pitch_angle) > RESET_OPERATION_ANGLE:
340  pitch_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_pitch_angle)
341  else:
342  pitch_angle = INITIAL_PITCH_ANGLE
343 
344  neck.set_angle(math.radians(yaw_angle), math.radians(pitch_angle))
345 
346  r.sleep()
347 
348 
349 if __name__ == '__main__':
350  rospy.init_node("head_camera_tracking")
351 
352  neck = NeckYawPitch()
353  object_tracker = ObjectTracker()
354 
355  main()
356 
def _detect_color_object(self, bgr_image, lower_color, upper_color)
def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9)


sciurus17_examples
Author(s): Daisuke Sato , Hiroyuki Nomura
autogenerated on Sun Oct 2 2022 02:21:39