head_camera_tracking.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # coding: utf-8
3 
4 # Copyright 2019 RT Corporation
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 
18 import rospy
19 import math
20 import sys
21 
22 # for ObjectTracker
23 import cv2
24 import numpy as np
25 from sensor_msgs.msg import Image
26 from cv_bridge import CvBridge, CvBridgeError
27 from geometry_msgs.msg import Point
28 
29 # for NeckYawPitch
30 import actionlib
31 from control_msgs.msg import (
32  FollowJointTrajectoryAction,
33  FollowJointTrajectoryGoal,
34  JointTrajectoryControllerState
35 )
36 from trajectory_msgs.msg import JointTrajectoryPoint
37 
39  def __init__(self):
40  self._bridge = CvBridge()
41  self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1)
42  self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
43  self._object_rect = [0,0,0,0]
44  self._image_shape = Point()
45  self._object_detected = False
46 
47  self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.')
48 
49  # カスケードファイルの読み込み
50  # Reads the cascade file
51  # 例 | Examples
52  # self._face_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_frontalface_alt2.xml")
53  # self._eyes_cascade = cv2.CascadeClassifier("/home/USER_NAME/.local/lib/python2.7/site-packages/cv2/data/haarcascade_eye.xml")
54  self._face_cascade = ""
55  self._eyes_cascade = ""
56 
57 
58  def _image_callback(self, ros_image):
59  try:
60  input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
61  except CvBridgeError as e:
62  rospy.logerr(e)
63 
64  # 画像のwidth, heightを取得
65  # Obtains the width and height of the image
66  self._image_shape.x = input_image.shape[1]
67  self._image_shape.y = input_image.shape[0]
68 
69  # オブジェクト(特定色 or 顔) の検出
70  # Detects the object (a specific color or a face)
71  output_image = self._detect_orange_object(input_image)
72  # output_image = self._detect_blue_object(input_image)
73  # output_image = self._detect_face(input_image)
74 
75  self._image_pub.publish(self._bridge.cv2_to_imgmsg(output_image, "bgr8"))
76 
77 
79  # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
80  # オブジェクトの座標は-1.0 ~ 1.0に正規化される
81  # Returns the object coordinates where the image center is 0, 0
82  # The coordinate is normalized into -1.0 to 1.0
83 
84  object_center = Point(
85  self._object_rect[0] + self._object_rect[2] * 0.5,
86  self._object_rect[1] + self._object_rect[3] * 0.5,
87  0)
88 
89  # 画像の中心を0, 0とした座標系に変換
90  # Converts the coordinate where the image center is 0, 0
91  translated_point = Point()
92  translated_point.x = object_center.x - self._image_shape.x * 0.5
93  translated_point.y = -(object_center.y - self._image_shape.y * 0.5)
94 
95  # 正規化
96  # Normalize
97  normalized_point = Point()
98  if self._image_shape.x != 0 and self._image_shape.y != 0:
99  normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
100  normalized_point.y = translated_point.y / (self._image_shape.y * 0.5)
101 
102  return normalized_point
103 
104 
105  def object_detected(self):
106  return self._object_detected
107 
108 
109  def _detect_color_object(self, bgr_image, lower_color, upper_color):
110  # 画像から指定された色の物体を検出する
111  # Detects an object with the specified color from the image
112 
113  MIN_OBJECT_SIZE = 1000 # px * px
114 
115  # BGR画像をHSV色空間に変換
116  # Converts the BGR image to HSV color space
117  hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)
118 
119  # 色を抽出するマスクを生成
120  # Creates a mask to extract the color
121  mask = cv2.inRange(hsv, lower_color, upper_color)
122 
123  # マスクから輪郭を抽出
124  # Extracts the contours with the mask
125  if self._CV_MAJOR_VERSION == '4':
126  contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
127  else:
128  _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
129 
130  # 輪郭を長方形に変換し、配列に格納
131  # Converts the contour to a rectangle and store it in a vector
132  rects = []
133  for contour in contours:
134  approx = cv2.convexHull(contour)
135  rect = cv2.boundingRect(approx)
136  rects.append(rect)
137 
138  self._object_detected = False
139  if len(rects) > 0:
140  # 最も大きい長方形を抽出
141  # Extracts the largest rectangle
142  rect = max(rects, key=(lambda x: x[2] * x[3]))
143 
144  # 長方形が小さければ検出判定にしない
145  # Updates the detection result to false if the rectangle is small
146  if rect[2] * rect[3] > MIN_OBJECT_SIZE:
147  # 抽出した長方形を画像に描画する
148  # Draw the rectangle on the image
149  cv2.rectangle(bgr_image,
150  (rect[0], rect[1]),
151  (rect[0] + rect[2], rect[1] + rect[3]),
152  (0, 0, 255), thickness=2)
153 
154  self._object_rect = rect
155  self._object_detected = True
156 
157  return bgr_image
158 
159 
160  def _detect_orange_object(self, bgr_image):
161  # H: 0 ~ 179 (0 ~ 360°)
162  # S: 0 ~ 255 (0 ~ 100%)
163  # V: 0 ~ 255 (0 ~ 100%)
164  lower_orange = np.array([5,127,127])
165  upper_orange = np.array([20,255,255])
166 
167  return self._detect_color_object(bgr_image, lower_orange, upper_orange)
168 
169 
170  def _detect_blue_object(self, bgr_image):
171  # H: 0 ~ 179 (0 ~ 360°)
172  # S: 0 ~ 255 (0 ~ 100%)
173  # V: 0 ~ 255 (0 ~ 100%)
174  lower_blue = np.array([100,127,127])
175  upper_blue = np.array([110,255,255])
176 
177  return self._detect_color_object(bgr_image, lower_blue, upper_blue)
178 
179 
180  def _detect_face(self, bgr_image):
181  # 画像から顔(正面)を検出する
182  # Detects a face (front) from the image
183 
184  SCALE = 4
185 
186  # カスケードファイルがセットされているかを確認
187  # Checks if a cascade file is set
188  if self._face_cascade == "" or self._eyes_cascade == "":
189  rospy.logerr("cascade file does not set")
190  return bgr_image
191 
192  # BGR画像をグレー画像に変換
193  # Converts the BGR image to a gray scaled image
194  gray = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
195 
196  # 処理時間短縮のため画像を縮小
197  # Compresses the image to shorten the process
198  height, width = gray.shape[:2]
199  small_gray = cv2.resize(gray, (width/SCALE, height/SCALE))
200 
201  # カスケードファイルを使って顔認識
202  # Uses the cascade file for face detection
203  small_faces = self._face_cascade.detectMultiScale(small_gray)
204 
205  self._object_detected = False
206  for small_face in small_faces:
207  # 顔の領域を元のサイズに戻す
208  # Resizes the face area to the original size
209  face = small_face*SCALE
210 
211  # グレー画像から顔部分を抽出
212  # Finds the face area from the gray scaled image
213  roi_gray = gray[
214  face[1]:face[1]+face[3],
215  face[0]:face[0]+face[2]]
216 
217  # 顔の中から目を検知
218  # Detects the eyes from the face
219  eyes = self._eyes_cascade.detectMultiScale(roi_gray)
220 
221  # 目を検出したら、対象のrect(座標と大きさ)を記録する
222  # If the eyes are detected, records the size and coordinates
223  if len(eyes) > 0:
224  cv2.rectangle(bgr_image,
225  (face[0],face[1]),
226  (face[0]+face[2], face[1]+face[3]),
227  (0,0,255),2)
228 
229  self._object_rect = face
230  self._object_detected = True
231  break
232 
233  return bgr_image
234 
235 
236 class NeckYawPitch(object):
237  def __init__(self):
238  self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/neck_controller/follow_joint_trajectory",
239  FollowJointTrajectoryAction)
240  self.__client.wait_for_server(rospy.Duration(5.0))
241  if not self.__client.wait_for_server(rospy.Duration(5.0)):
242  rospy.logerr("Action Server Not Found")
243  rospy.signal_shutdown("Action Server not found")
244  sys.exit(1)
245 
246  self._state_sub = rospy.Subscriber("/sciurus17/controller3/neck_controller/state",
247  JointTrajectoryControllerState, self._state_callback, queue_size=1)
248 
249  self._state_received = False
250  self._current_yaw = 0.0 # Degree
251  self._current_pitch = 0.0 # Degree
252 
253 
254  def _state_callback(self, state):
255  # 首の現在角度を取得
256  # Returns the current angle of the neck
257 
258  self._state_received = True
259  yaw_radian = state.actual.positions[0]
260  pitch_radian = state.actual.positions[1]
261 
262  self._current_yaw = math.degrees(yaw_radian)
263  self._current_pitch = math.degrees(pitch_radian)
264 
265 
266  def state_received(self):
267  return self._state_received
268 
269 
270  def get_current_yaw(self):
271  return self._current_yaw
272 
273 
274  def get_current_pitch(self):
275  return self._current_pitch
276 
277 
278  def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):
279  # 首を指定角度に動かす
280  # Moves the neck to the specified angle
281  goal = FollowJointTrajectoryGoal()
282  goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"]
283 
284  yawpoint = JointTrajectoryPoint()
285  yawpoint.positions.append(yaw_angle)
286  yawpoint.positions.append(pitch_angle)
287  yawpoint.time_from_start = rospy.Duration(goal_secs)
288  goal.trajectory.points.append(yawpoint)
289 
290  self.__client.send_goal(goal)
291  self.__client.wait_for_result(rospy.Duration(0.1))
292  return self.__client.get_result()
293 
294 
296  # shutdown時に0度へ戻る
297  # Sets the neck angle to 0 deg when shutting down
298  neck.set_angle(math.radians(0), math.radians(0), 3.0)
299 
300 
301 def main():
302  r = rospy.Rate(60)
303 
304  rospy.on_shutdown(hook_shutdown)
305 
306  # オブジェクト追跡のしきい値
307  # 正規化された座標系(px, px)
308  # The threshold of the object tracking
309  # Normalized coordinates is (px, px)
310  THRESH_X = 0.05
311  THRESH_Y = 0.05
312 
313  # 首の初期角度 Degree
314  # The inital angle of the neck in degrees
315  INITIAL_YAW_ANGLE = 0
316  INITIAL_PITCH_ANGLE = 0
317 
318  # 首の制御角度リミット値 Degree
319  # The neck angle limit (max/min) in degrees
320  MAX_YAW_ANGLE = 120
321  MIN_YAW_ANGLE = -120
322  MAX_PITCH_ANGLE = 50
323  MIN_PITCH_ANGLE = -70
324 
325  # 首の制御量
326  # 値が大きいほど首を大きく動かす
327  # The control amount of the neck
328  # The neck moves more if the gain is bigger
329  OPERATION_GAIN_X = 5.0
330  OPERATION_GAIN_Y = 5.0
331 
332  # 初期角度に戻る時の制御角度 Degree
333  # The degree when returning to the initial pose
334  RESET_OPERATION_ANGLE = 3
335 
336  # 現在の首角度を取得する
337  # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
338  # Recieves the current neck angle
339  # By recieving the neck angle here, it moves to the initial pose slowly
340  while not neck.state_received():
341  pass
342  yaw_angle = neck.get_current_yaw()
343  pitch_angle = neck.get_current_pitch()
344 
345  look_object = False
346  detection_timestamp = rospy.Time.now()
347 
348  while not rospy.is_shutdown():
349  # 正規化されたオブジェクトの座標を取得
350  # Recieves the normalized object coordinates
351  object_position = object_tracker.get_object_position()
352 
353  if object_tracker.object_detected():
354  detection_timestamp = rospy.Time.now()
355  look_object = True
356  else:
357  lost_time = rospy.Time.now() - detection_timestamp
358  # 一定時間オブジェクトが見つからない場合は初期角度に戻る
359  # If it doesn't detect any object for a certain amount of time,
360  # it will return to the initial angle
361  if lost_time.to_sec() > 1.0:
362  look_object = False
363 
364  if look_object:
365  # オブジェクトが画像中心にくるように首を動かす
366  # Moves the neck so that the object comes to the middle of the image
367  if math.fabs(object_position.x) > THRESH_X:
368  yaw_angle += -object_position.x * OPERATION_GAIN_X
369 
370  if math.fabs(object_position.y) > THRESH_Y:
371  pitch_angle += object_position.y * OPERATION_GAIN_Y
372 
373  # 首の制御角度を制限する
374  # Limits the neck angles
375  if yaw_angle > MAX_YAW_ANGLE:
376  yaw_angle = MAX_YAW_ANGLE
377  if yaw_angle < MIN_YAW_ANGLE:
378  yaw_angle = MIN_YAW_ANGLE
379 
380  if pitch_angle > MAX_PITCH_ANGLE:
381  pitch_angle = MAX_PITCH_ANGLE
382  if pitch_angle < MIN_PITCH_ANGLE:
383  pitch_angle = MIN_PITCH_ANGLE
384 
385  else:
386  # ゆっくり初期角度へ戻る
387  # Returns to the initial angle slowly
388  diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
389  if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
390  yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
391  else:
392  yaw_angle = INITIAL_YAW_ANGLE
393 
394  diff_pitch_angle = pitch_angle - INITIAL_PITCH_ANGLE
395  if math.fabs(diff_pitch_angle) > RESET_OPERATION_ANGLE:
396  pitch_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_pitch_angle)
397  else:
398  pitch_angle = INITIAL_PITCH_ANGLE
399 
400  neck.set_angle(math.radians(yaw_angle), math.radians(pitch_angle))
401 
402  r.sleep()
403 
404 
405 if __name__ == '__main__':
406  rospy.init_node("head_camera_tracking")
407 
408  neck = NeckYawPitch()
409  object_tracker = ObjectTracker()
410 
411  main()
412 
head_camera_tracking.NeckYawPitch.__client
__client
Definition: head_camera_tracking.py:238
head_camera_tracking.NeckYawPitch._state_callback
def _state_callback(self, state)
Definition: head_camera_tracking.py:254
head_camera_tracking.ObjectTracker._detect_orange_object
def _detect_orange_object(self, bgr_image)
Definition: head_camera_tracking.py:160
head_camera_tracking.ObjectTracker.object_detected
def object_detected(self)
Definition: head_camera_tracking.py:105
head_camera_tracking.NeckYawPitch.get_current_yaw
def get_current_yaw(self)
Definition: head_camera_tracking.py:270
head_camera_tracking.ObjectTracker._CV_MAJOR_VERSION
_CV_MAJOR_VERSION
Definition: head_camera_tracking.py:125
head_camera_tracking.main
def main()
Definition: head_camera_tracking.py:301
head_camera_tracking.NeckYawPitch._state_received
_state_received
Definition: head_camera_tracking.py:249
head_camera_tracking.ObjectTracker._image_shape
_image_shape
Definition: head_camera_tracking.py:44
head_camera_tracking.ObjectTracker._detect_color_object
def _detect_color_object(self, bgr_image, lower_color, upper_color)
Definition: head_camera_tracking.py:109
head_camera_tracking.ObjectTracker.get_object_position
def get_object_position(self)
Definition: head_camera_tracking.py:78
head_camera_tracking.NeckYawPitch.get_current_pitch
def get_current_pitch(self)
Definition: head_camera_tracking.py:274
head_camera_tracking.ObjectTracker._image_sub
_image_sub
Definition: head_camera_tracking.py:41
head_camera_tracking.NeckYawPitch._current_yaw
_current_yaw
Definition: head_camera_tracking.py:250
head_camera_tracking.ObjectTracker._bridge
_bridge
Definition: head_camera_tracking.py:40
head_camera_tracking.NeckYawPitch._state_sub
_state_sub
Definition: head_camera_tracking.py:246
actionlib::SimpleActionClient
head_camera_tracking.ObjectTracker._detect_blue_object
def _detect_blue_object(self, bgr_image)
Definition: head_camera_tracking.py:170
head_camera_tracking.ObjectTracker._image_callback
def _image_callback(self, ros_image)
Definition: head_camera_tracking.py:58
head_camera_tracking.NeckYawPitch
Definition: head_camera_tracking.py:236
head_camera_tracking.NeckYawPitch.state_received
def state_received(self)
Definition: head_camera_tracking.py:266
head_camera_tracking.NeckYawPitch.set_angle
def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9)
Definition: head_camera_tracking.py:278
head_camera_tracking.NeckYawPitch._current_pitch
_current_pitch
Definition: head_camera_tracking.py:251
head_camera_tracking.ObjectTracker._object_detected
_object_detected
Definition: head_camera_tracking.py:45
head_camera_tracking.ObjectTracker._eyes_cascade
_eyes_cascade
Definition: head_camera_tracking.py:55
head_camera_tracking.hook_shutdown
def hook_shutdown()
Definition: head_camera_tracking.py:295
head_camera_tracking.NeckYawPitch.__init__
def __init__(self)
Definition: head_camera_tracking.py:237
head_camera_tracking.ObjectTracker._face_cascade
_face_cascade
Definition: head_camera_tracking.py:54
head_camera_tracking.ObjectTracker.__init__
def __init__(self)
Definition: head_camera_tracking.py:39
head_camera_tracking.ObjectTracker
Definition: head_camera_tracking.py:38
head_camera_tracking.ObjectTracker._object_rect
_object_rect
Definition: head_camera_tracking.py:43
head_camera_tracking.ObjectTracker._detect_face
def _detect_face(self, bgr_image)
Definition: head_camera_tracking.py:180
head_camera_tracking.ObjectTracker._image_pub
_image_pub
Definition: head_camera_tracking.py:42


sciurus17_examples
Author(s): Daisuke Sato , Hiroyuki Nomura
autogenerated on Fri Aug 2 2024 08:37:20