depth_camera_tracking.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # coding: utf-8
3 
4 # Copyright 2019 RT Corporation
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 
18 import rospy
19 import math
20 import sys
21 
22 # for ObjectTracker
23 import cv2
24 import numpy as np
25 from std_msgs.msg import Int32
26 from sensor_msgs.msg import Image
27 from cv_bridge import CvBridge, CvBridgeError
28 from geometry_msgs.msg import Point
29 
30 # for NeckYawPitch
31 import actionlib
32 from control_msgs.msg import (
33  FollowJointTrajectoryAction,
34  FollowJointTrajectoryGoal,
35  JointTrajectoryControllerState
36 )
37 from trajectory_msgs.msg import JointTrajectoryPoint
38 
40  def __init__(self):
41  self._bridge = CvBridge()
42 
43  # 頭カメラのカラー画像
44  # The color image from the head camera
45  self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1)
46  # 頭カメラの深度画像
47  # カラー画像と視界が一致するように補正されている
48  # The depth image from the head camera
49  # It is aligned to the color image to match the view
50  self._depth_sub = rospy.Subscriber("/sciurus17/camera/aligned_depth_to_color/image_raw", Image, self._depth_callback, queue_size=1)
51 
52  self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
53  self._median_depth_pub = rospy.Publisher("~output_median_depth", Int32, queue_size=1)
54 
55  self._color_image = None
56  self._median_depth = 0
57 
58  self._object_rect = [0,0,0,0]
59  self._image_shape = Point()
60  self._object_detected = False
61 
62  self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.')
63 
64 
65  def _image_callback(self, ros_image):
66  try:
67  input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
68  except CvBridgeError as e:
69  rospy.logerr(e)
70  return
71 
72  self._color_image = input_image
73 
74 
75  def _depth_callback(self, ros_image):
76  try:
77  input_image = self._bridge.imgmsg_to_cv2(ros_image, "passthrough")
78  except CvBridgeError as e:
79  rospy.logerr(e)
80  return
81 
82  # 画像のwidth, heightを取得
83  # Obtains the width and height of the image
84  self._image_shape.x = input_image.shape[1]
85  self._image_shape.y = input_image.shape[0]
86 
87  output_image = self._detect_object(input_image)
88  if output_image is not False:
89  self._image_pub.publish(self._bridge.cv2_to_imgmsg(output_image, "bgr8"))
90 
91  self._median_depth_pub.publish(self._median_depth)
92 
93 
95  # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
96  # オブジェクトの座標は-1.0 ~ 1.0に正規化される
97  # Returns the object coordinates where the image center is 0, 0
98  # The coordinate is normalized into -1.0 to 1.0
99 
100  object_center = Point(
101  self._object_rect[0] + self._object_rect[2] * 0.5,
102  self._object_rect[1] + self._object_rect[3] * 0.5,
103  0)
104 
105  # 画像の中心を0, 0とした座標系に変換
106  # Converts the coordinate where the image center is 0, 0
107  translated_point = Point()
108  translated_point.x = object_center.x - self._image_shape.x * 0.5
109  translated_point.y = -(object_center.y - self._image_shape.y * 0.5)
110 
111  # 正規化
112  # Normalize
113  normalized_point = Point()
114  if self._image_shape.x != 0 and self._image_shape.y != 0:
115  normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
116  normalized_point.y = translated_point.y / (self._image_shape.y * 0.5)
117 
118  return normalized_point
119 
120 
121  def object_detected(self):
122  return self._object_detected
123 
124 
125  def _detect_object(self, input_depth_image):
126  # 検出するオブジェクトの大きさを制限する
127  # Limits the size of the object to be detected
128  MIN_OBJECT_SIZE = 10000 # px * px
129  MAX_OBJECT_SIZE = 80000 # px * px
130 
131  # 検出範囲を4段階設ける
132  # 単位はmm
133  # Sets 4 layers of detection range
134  # Unit is in mm
135  DETECTION_DEPTH = [
136  (500, 700),
137  (600, 800),
138  (700, 900),
139  (800, 1000)]
140  # 検出したオブジェクトを囲う長方形の色
141  # The color of the rectangle around the detected object
142  RECT_COLOR = [
143  (0, 0, 255),
144  (0, 255, 0),
145  (255, 0, 0),
146  (255, 255, 255)]
147 
148  # カメラのカラー画像を加工して出力する
149  # カラー画像を受け取ってない場合はFalseを返す
150  # Outputs an edited color image
151  output_image = self._color_image
152  if output_image is None:
153  return False
154 
155  self._object_detected = False
156  self._median_depth = 0
157  for i, depth in enumerate(DETECTION_DEPTH):
158  # depth[0] ~ depth[1]の範囲でオブジェクトを抽出する
159  # Extracts an object between depth[0] and depth[1]
160  mask = cv2.inRange(input_depth_image, depth[0], depth[1])
161 
162  # マスクから輪郭を抽出
163  # Extracts the contours with the mask
164  if self._CV_MAJOR_VERSION == '4':
165  contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
166  else:
167  _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
168 
169  # 輪郭を長方形に変換し、配列に格納
170  rects = []
171  for contour in contours:
172  approx = cv2.convexHull(contour)
173  rect = cv2.boundingRect(approx)
174  rects.append(rect)
175 
176  if len(rects) > 0:
177  # 最も大きい長方形を抽出
178  # Extracts the largest rectangle
179  rect = max(rects, key=(lambda x: x[2] * x[3]))
180 
181  rect_size = rect[2] * rect[3]
182 
183  # 長方形の大きさが指定範囲内であるかチェック
184  # Check if the size of the rectangle is within the limit
185  if rect_size > MIN_OBJECT_SIZE and rect_size < MAX_OBJECT_SIZE:
186  # 抽出した長方形を画像に描画する
187  # Draws the rectangle on the image
188  cv2.rectangle(output_image,
189  (rect[0], rect[1]),
190  (rect[0] + rect[2], rect[1] + rect[3]),
191  RECT_COLOR[i], thickness=2)
192 
193  self._object_rect = rect
194  self._object_detected = True
195 
196  # 深度画像から長方形の領域を切り取る
197  # Clips the range of the rectangle from the depth image
198  object_depth = input_depth_image[
199  rect[1]:rect[1]+rect[3],
200  rect[0]:rect[0]+rect[2]]
201  # 深度の中央値を求める
202  # Calculates the median of the depth
203  self._median_depth = int(np.median(object_depth))
204 
205  # 中央値のテキストを出力画像に描画する
206  # Writes the median on the output image
207  cv2.putText(output_image, str(self._median_depth),
208  (rect[0], rect[1]+30),
209  cv2.FONT_HERSHEY_SIMPLEX,
210  1, RECT_COLOR[i], 2)
211  break
212 
213  return output_image
214 
215 
216 class NeckYawPitch(object):
217  def __init__(self):
218  self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/neck_controller/follow_joint_trajectory",
219  FollowJointTrajectoryAction)
220  self.__client.wait_for_server(rospy.Duration(5.0))
221  if not self.__client.wait_for_server(rospy.Duration(5.0)):
222  rospy.logerr("Action Server Not Found")
223  rospy.signal_shutdown("Action Server not found")
224  sys.exit(1)
225 
226  self._state_sub = rospy.Subscriber("/sciurus17/controller3/neck_controller/state",
227  JointTrajectoryControllerState, self._state_callback, queue_size=1)
228 
229  self._state_received = False
230  self._current_yaw = 0.0 # Degree
231  self._current_pitch = 0.0 # Degree
232 
233 
234  def _state_callback(self, state):
235  # 首の現在角度を取得
236  # Returns the current neck angle
237 
238  self._state_received = True
239  yaw_radian = state.actual.positions[0]
240  pitch_radian = state.actual.positions[1]
241 
242  self._current_yaw = math.degrees(yaw_radian)
243  self._current_pitch = math.degrees(pitch_radian)
244 
245 
246  def state_received(self):
247  return self._state_received
248 
249 
250  def get_current_yaw(self):
251  return self._current_yaw
252 
253 
254  def get_current_pitch(self):
255  return self._current_pitch
256 
257 
258  def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):
259  # 首を指定角度に動かす
260  # Moves the neck to the specified angle
261  goal = FollowJointTrajectoryGoal()
262  goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"]
263 
264  yawpoint = JointTrajectoryPoint()
265  yawpoint.positions.append(yaw_angle)
266  yawpoint.positions.append(pitch_angle)
267  yawpoint.time_from_start = rospy.Duration(goal_secs)
268  goal.trajectory.points.append(yawpoint)
269 
270  self.__client.send_goal(goal)
271  self.__client.wait_for_result(rospy.Duration(0.1))
272  return self.__client.get_result()
273 
274 
276  # shutdown時に0度へ戻る
277  # Sets the neck angle to 0 deg when shutting down
278  neck.set_angle(math.radians(0), math.radians(0), 3.0)
279 
280 
281 def main():
282  r = rospy.Rate(60)
283 
284  rospy.on_shutdown(hook_shutdown)
285 
286  # オブジェクト追跡のしきい値
287  # 正規化された座標系(px, px)
288  # The threshold of the object tracking
289  # Normalized coordinates is (px, px)
290  THRESH_X = 0.05
291  THRESH_Y = 0.05
292 
293  # 首の初期角度 Degree
294  # The inital angle of the neck in degrees
295  INITIAL_YAW_ANGLE = 0
296  INITIAL_PITCH_ANGLE = 0
297 
298  # 首の制御角度リミット値 Degree
299  # The neck angle limit (max/min) in degrees
300  MAX_YAW_ANGLE = 120
301  MIN_YAW_ANGLE = -120
302  MAX_PITCH_ANGLE = 50
303  MIN_PITCH_ANGLE = -70
304 
305  # 首の制御量
306  # 値が大きいほど首を大きく動かす
307  # The control amount of the neck
308  # The neck moves more if the gain is bigger
309  OPERATION_GAIN_X = 5.0
310  OPERATION_GAIN_Y = 5.0
311 
312  # 初期角度に戻る時の制御角度 Degree
313  # The degree when returning to the initial pose
314  RESET_OPERATION_ANGLE = 3
315 
316  # 現在の首角度を取得する
317  # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
318  # Recieves the current neck angle
319  # By recieving the neck angle here, it moves to the initial pose slowly
320  while not neck.state_received():
321  pass
322  yaw_angle = neck.get_current_yaw()
323  pitch_angle = neck.get_current_pitch()
324 
325  look_object = False
326  detection_timestamp = rospy.Time.now()
327 
328  while not rospy.is_shutdown():
329  # 正規化されたオブジェクトの座標を取得
330  # Recieves the normalized object coordinates
331  object_position = object_tracker.get_object_position()
332 
333  if object_tracker.object_detected():
334  detection_timestamp = rospy.Time.now()
335  look_object = True
336  else:
337  lost_time = rospy.Time.now() - detection_timestamp
338  # 一定時間オブジェクトが見つからない場合は初期角度に戻る
339  # If it doesn't detect any object for a certain amount of time,
340  # it will return to the initial angle
341  if lost_time.to_sec() > 1.0:
342  look_object = False
343 
344  if look_object:
345  # オブジェクトが画像中心にくるように首を動かす
346  # Moves the neck so that the object comes to the middle of the image
347  if math.fabs(object_position.x) > THRESH_X:
348  yaw_angle += -object_position.x * OPERATION_GAIN_X
349 
350  if math.fabs(object_position.y) > THRESH_Y:
351  pitch_angle += object_position.y * OPERATION_GAIN_Y
352 
353  # 首の制御角度を制限する
354  # Limits the neck angles
355  if yaw_angle > MAX_YAW_ANGLE:
356  yaw_angle = MAX_YAW_ANGLE
357  if yaw_angle < MIN_YAW_ANGLE:
358  yaw_angle = MIN_YAW_ANGLE
359 
360  if pitch_angle > MAX_PITCH_ANGLE:
361  pitch_angle = MAX_PITCH_ANGLE
362  if pitch_angle < MIN_PITCH_ANGLE:
363  pitch_angle = MIN_PITCH_ANGLE
364 
365  else:
366  # ゆっくり初期角度へ戻る
367  # Returns to the initial angle slowly
368  diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
369  if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
370  yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
371  else:
372  yaw_angle = INITIAL_YAW_ANGLE
373 
374  diff_pitch_angle = pitch_angle - INITIAL_PITCH_ANGLE
375  if math.fabs(diff_pitch_angle) > RESET_OPERATION_ANGLE:
376  pitch_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_pitch_angle)
377  else:
378  pitch_angle = INITIAL_PITCH_ANGLE
379 
380  neck.set_angle(math.radians(yaw_angle), math.radians(pitch_angle))
381 
382  r.sleep()
383 
384 
385 if __name__ == '__main__':
386  rospy.init_node("depth_camera_tracking")
387 
388  neck = NeckYawPitch()
389  object_tracker = ObjectTracker()
390 
391  main()
392 
depth_camera_tracking.NeckYawPitch.get_current_pitch
def get_current_pitch(self)
Definition: depth_camera_tracking.py:254
depth_camera_tracking.ObjectTracker._object_detected
_object_detected
Definition: depth_camera_tracking.py:60
depth_camera_tracking.ObjectTracker._detect_object
def _detect_object(self, input_depth_image)
Definition: depth_camera_tracking.py:125
depth_camera_tracking.ObjectTracker.__init__
def __init__(self)
Definition: depth_camera_tracking.py:40
depth_camera_tracking.ObjectTracker._depth_callback
def _depth_callback(self, ros_image)
Definition: depth_camera_tracking.py:75
depth_camera_tracking.NeckYawPitch
Definition: depth_camera_tracking.py:216
depth_camera_tracking.NeckYawPitch.__client
__client
Definition: depth_camera_tracking.py:218
depth_camera_tracking.ObjectTracker._depth_sub
_depth_sub
Definition: depth_camera_tracking.py:50
depth_camera_tracking.ObjectTracker._image_pub
_image_pub
Definition: depth_camera_tracking.py:52
depth_camera_tracking.NeckYawPitch.set_angle
def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9)
Definition: depth_camera_tracking.py:258
depth_camera_tracking.ObjectTracker.get_object_position
def get_object_position(self)
Definition: depth_camera_tracking.py:94
depth_camera_tracking.ObjectTracker._median_depth
_median_depth
Definition: depth_camera_tracking.py:56
depth_camera_tracking.NeckYawPitch.get_current_yaw
def get_current_yaw(self)
Definition: depth_camera_tracking.py:250
depth_camera_tracking.NeckYawPitch.__init__
def __init__(self)
Definition: depth_camera_tracking.py:217
actionlib::SimpleActionClient
depth_camera_tracking.ObjectTracker._image_sub
_image_sub
Definition: depth_camera_tracking.py:45
depth_camera_tracking.ObjectTracker._bridge
_bridge
Definition: depth_camera_tracking.py:41
depth_camera_tracking.hook_shutdown
def hook_shutdown()
Definition: depth_camera_tracking.py:275
depth_camera_tracking.ObjectTracker._median_depth_pub
_median_depth_pub
Definition: depth_camera_tracking.py:53
depth_camera_tracking.NeckYawPitch._state_received
_state_received
Definition: depth_camera_tracking.py:229
depth_camera_tracking.ObjectTracker._object_rect
_object_rect
Definition: depth_camera_tracking.py:58
depth_camera_tracking.ObjectTracker._image_callback
def _image_callback(self, ros_image)
Definition: depth_camera_tracking.py:65
depth_camera_tracking.ObjectTracker._image_shape
_image_shape
Definition: depth_camera_tracking.py:59
depth_camera_tracking.ObjectTracker
Definition: depth_camera_tracking.py:39
depth_camera_tracking.NeckYawPitch._state_sub
_state_sub
Definition: depth_camera_tracking.py:226
depth_camera_tracking.main
def main()
Definition: depth_camera_tracking.py:281
depth_camera_tracking.NeckYawPitch._current_yaw
_current_yaw
Definition: depth_camera_tracking.py:230
depth_camera_tracking.ObjectTracker.object_detected
def object_detected(self)
Definition: depth_camera_tracking.py:121
depth_camera_tracking.ObjectTracker._CV_MAJOR_VERSION
_CV_MAJOR_VERSION
Definition: depth_camera_tracking.py:164
depth_camera_tracking.NeckYawPitch._current_pitch
_current_pitch
Definition: depth_camera_tracking.py:231
depth_camera_tracking.NeckYawPitch._state_callback
def _state_callback(self, state)
Definition: depth_camera_tracking.py:234
depth_camera_tracking.NeckYawPitch.state_received
def state_received(self)
Definition: depth_camera_tracking.py:246
depth_camera_tracking.ObjectTracker._color_image
_color_image
Definition: depth_camera_tracking.py:55


sciurus17_examples
Author(s): Daisuke Sato , Hiroyuki Nomura
autogenerated on Fri Aug 2 2024 08:37:20