depth_camera_tracking.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # coding: utf-8
3 
4 import rospy
5 import math
6 import sys
7 
8 # for ObjectTracker
9 import cv2
10 import numpy as np
11 from std_msgs.msg import Int32
12 from sensor_msgs.msg import Image
13 from cv_bridge import CvBridge, CvBridgeError
14 from geometry_msgs.msg import Point
15 
16 # for NeckYawPitch
17 import actionlib
18 from control_msgs.msg import (
19  FollowJointTrajectoryAction,
20  FollowJointTrajectoryGoal,
21  JointTrajectoryControllerState
22 )
23 from trajectory_msgs.msg import JointTrajectoryPoint
24 
26  def __init__(self):
27  self._bridge = CvBridge()
28 
29  # 頭カメラのカラー画像
30  self._image_sub = rospy.Subscriber("/sciurus17/camera/color/image_raw", Image, self._image_callback, queue_size=1)
31  # 頭カメラの深度画像
32  # カラー画像と視界が一致するように補正されている
33  self._depth_sub = rospy.Subscriber("/sciurus17/camera/aligned_depth_to_color/image_raw", Image, self._depth_callback, queue_size=1)
34 
35  self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
36  self._median_depth_pub = rospy.Publisher("~output_median_depth", Int32, queue_size=1)
37 
38  self._color_image = None
39  self._median_depth = 0
40 
41  self._object_rect = [0,0,0,0]
42  self._image_shape = Point()
43  self._object_detected = False
44 
45  self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.')
46 
47 
48  def _image_callback(self, ros_image):
49  try:
50  input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
51  except CvBridgeError as e:
52  rospy.logerr(e)
53  return
54 
55  self._color_image = input_image
56 
57 
58  def _depth_callback(self, ros_image):
59  try:
60  input_image = self._bridge.imgmsg_to_cv2(ros_image, "passthrough")
61  except CvBridgeError as e:
62  rospy.logerr(e)
63  return
64 
65  # 画像のwidth, heightを取得
66  self._image_shape.x = input_image.shape[1]
67  self._image_shape.y = input_image.shape[0]
68 
69  output_image = self._detect_object(input_image)
70  if output_image is not False:
71  self._image_pub.publish(self._bridge.cv2_to_imgmsg(output_image, "bgr8"))
72 
73  self._median_depth_pub.publish(self._median_depth)
74 
75 
77  # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
78  # オブジェクトの座標は-1.0 ~ 1.0に正規化される
79 
80  object_center = Point(
81  self._object_rect[0] + self._object_rect[2] * 0.5,
82  self._object_rect[1] + self._object_rect[3] * 0.5,
83  0)
84 
85  # 画像の中心を0, 0とした座標系に変換
86  translated_point = Point()
87  translated_point.x = object_center.x - self._image_shape.x * 0.5
88  translated_point.y = -(object_center.y - self._image_shape.y * 0.5)
89 
90  # 正規化
91  normalized_point = Point()
92  if self._image_shape.x != 0 and self._image_shape.y != 0:
93  normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
94  normalized_point.y = translated_point.y / (self._image_shape.y * 0.5)
95 
96  return normalized_point
97 
98 
99  def object_detected(self):
100  return self._object_detected
101 
102 
103  def _detect_object(self, input_depth_image):
104  # 検出するオブジェクトの大きさを制限する
105  MIN_OBJECT_SIZE = 10000 # px * px
106  MAX_OBJECT_SIZE = 80000 # px * px
107 
108  # 検出範囲を4段階設ける
109  # 単位はmm
110  DETECTION_DEPTH = [
111  (500, 700),
112  (600, 800),
113  (700, 900),
114  (800, 1000)]
115  # 検出したオブジェクトを囲う長方形の色
116  RECT_COLOR = [
117  (0, 0, 255),
118  (0, 255, 0),
119  (255, 0, 0),
120  (255, 255, 255)]
121 
122  # カメラのカラー画像を加工して出力する
123  # カラー画像を受け取ってない場合はFalseを返す
124  output_image = self._color_image
125  if output_image is None:
126  return False
127 
128  self._object_detected = False
129  self._median_depth = 0
130  for i, depth in enumerate(DETECTION_DEPTH):
131  # depth[0] ~ depth[1]の範囲でオブジェクトを抽出する
132  mask = cv2.inRange(input_depth_image, depth[0], depth[1])
133 
134  # マスクから輪郭を抽出
135  if self._CV_MAJOR_VERSION == '4':
136  contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
137  else:
138  _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
139 
140  # 輪郭を長方形に変換し、配列に格納
141  rects = []
142  for contour in contours:
143  approx = cv2.convexHull(contour)
144  rect = cv2.boundingRect(approx)
145  rects.append(rect)
146 
147  if len(rects) > 0:
148  # 最も大きい長方形を抽出
149  rect = max(rects, key=(lambda x: x[2] * x[3]))
150 
151  rect_size = rect[2] * rect[3]
152 
153  # 長方形の大きさが指定範囲内であるかチェック
154  if rect_size > MIN_OBJECT_SIZE and rect_size < MAX_OBJECT_SIZE:
155  # 抽出した長方形を画像に描画する
156  cv2.rectangle(output_image,
157  (rect[0], rect[1]),
158  (rect[0] + rect[2], rect[1] + rect[3]),
159  RECT_COLOR[i], thickness=2)
160 
161  self._object_rect = rect
162  self._object_detected = True
163 
164  # 深度画像から長方形の領域を切り取る
165  object_depth = input_depth_image[
166  rect[1]:rect[1]+rect[3],
167  rect[0]:rect[0]+rect[2]]
168  # 深度の中央値を求める
169  self._median_depth = int(np.median(object_depth))
170 
171  # 中央値のテキストを出力画像に描画する
172  cv2.putText(output_image, str(self._median_depth),
173  (rect[0], rect[1]+30),
174  cv2.FONT_HERSHEY_SIMPLEX,
175  1, RECT_COLOR[i], 2)
176  break
177 
178  return output_image
179 
180 
181 class NeckYawPitch(object):
182  def __init__(self):
183  self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/neck_controller/follow_joint_trajectory",
184  FollowJointTrajectoryAction)
185  self.__client.wait_for_server(rospy.Duration(5.0))
186  if not self.__client.wait_for_server(rospy.Duration(5.0)):
187  rospy.logerr("Action Server Not Found")
188  rospy.signal_shutdown("Action Server not found")
189  sys.exit(1)
190 
191  self._state_sub = rospy.Subscriber("/sciurus17/controller3/neck_controller/state",
192  JointTrajectoryControllerState, self._state_callback, queue_size=1)
193 
194  self._state_received = False
195  self._current_yaw = 0.0 # Degree
196  self._current_pitch = 0.0 # Degree
197 
198 
199  def _state_callback(self, state):
200  # 首の現在角度を取得
201 
202  self._state_received = True
203  yaw_radian = state.actual.positions[0]
204  pitch_radian = state.actual.positions[1]
205 
206  self._current_yaw = math.degrees(yaw_radian)
207  self._current_pitch = math.degrees(pitch_radian)
208 
209 
210  def state_received(self):
211  return self._state_received
212 
213 
214  def get_current_yaw(self):
215  return self._current_yaw
216 
217 
218  def get_current_pitch(self):
219  return self._current_pitch
220 
221 
222  def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9):
223  # 首を指定角度に動かす
224  goal = FollowJointTrajectoryGoal()
225  goal.trajectory.joint_names = ["neck_yaw_joint", "neck_pitch_joint"]
226 
227  yawpoint = JointTrajectoryPoint()
228  yawpoint.positions.append(yaw_angle)
229  yawpoint.positions.append(pitch_angle)
230  yawpoint.time_from_start = rospy.Duration(goal_secs)
231  goal.trajectory.points.append(yawpoint)
232 
233  self.__client.send_goal(goal)
234  self.__client.wait_for_result(rospy.Duration(0.1))
235  return self.__client.get_result()
236 
237 
239  # shutdown時に0度へ戻る
240  neck.set_angle(math.radians(0), math.radians(0), 3.0)
241 
242 
243 def main():
244  r = rospy.Rate(60)
245 
246  rospy.on_shutdown(hook_shutdown)
247 
248  # オブジェクト追跡のしきい値
249  # 正規化された座標系(px, px)
250  THRESH_X = 0.05
251  THRESH_Y = 0.05
252 
253  # 首の初期角度 Degree
254  INITIAL_YAW_ANGLE = 0
255  INITIAL_PITCH_ANGLE = 0
256 
257  # 首の制御角度リミット値 Degree
258  MAX_YAW_ANGLE = 120
259  MIN_YAW_ANGLE = -120
260  MAX_PITCH_ANGLE = 50
261  MIN_PITCH_ANGLE = -70
262 
263  # 首の制御量
264  # 値が大きいほど首を大きく動かす
265  OPERATION_GAIN_X = 5.0
266  OPERATION_GAIN_Y = 5.0
267 
268  # 初期角度に戻る時の制御角度 Degree
269  RESET_OPERATION_ANGLE = 3
270 
271  # 現在の首角度を取得する
272  # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
273  while not neck.state_received():
274  pass
275  yaw_angle = neck.get_current_yaw()
276  pitch_angle = neck.get_current_pitch()
277 
278  look_object = False
279  detection_timestamp = rospy.Time.now()
280 
281  while not rospy.is_shutdown():
282  # 正規化されたオブジェクトの座標を取得
283  object_position = object_tracker.get_object_position()
284 
285  if object_tracker.object_detected():
286  detection_timestamp = rospy.Time.now()
287  look_object = True
288  else:
289  lost_time = rospy.Time.now() - detection_timestamp
290  # 一定時間オブジェクトが見つからない場合は初期角度に戻る
291  if lost_time.to_sec() > 1.0:
292  look_object = False
293 
294  if look_object:
295  # オブジェクトが画像中心にくるように首を動かす
296  if math.fabs(object_position.x) > THRESH_X:
297  yaw_angle += -object_position.x * OPERATION_GAIN_X
298 
299  if math.fabs(object_position.y) > THRESH_Y:
300  pitch_angle += object_position.y * OPERATION_GAIN_Y
301 
302  # 首の制御角度を制限する
303  if yaw_angle > MAX_YAW_ANGLE:
304  yaw_angle = MAX_YAW_ANGLE
305  if yaw_angle < MIN_YAW_ANGLE:
306  yaw_angle = MIN_YAW_ANGLE
307 
308  if pitch_angle > MAX_PITCH_ANGLE:
309  pitch_angle = MAX_PITCH_ANGLE
310  if pitch_angle < MIN_PITCH_ANGLE:
311  pitch_angle = MIN_PITCH_ANGLE
312 
313  else:
314  # ゆっくり初期角度へ戻る
315  diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
316  if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
317  yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
318  else:
319  yaw_angle = INITIAL_YAW_ANGLE
320 
321  diff_pitch_angle = pitch_angle - INITIAL_PITCH_ANGLE
322  if math.fabs(diff_pitch_angle) > RESET_OPERATION_ANGLE:
323  pitch_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_pitch_angle)
324  else:
325  pitch_angle = INITIAL_PITCH_ANGLE
326 
327  neck.set_angle(math.radians(yaw_angle), math.radians(pitch_angle))
328 
329  r.sleep()
330 
331 
332 if __name__ == '__main__':
333  rospy.init_node("depth_camera_tracking")
334 
335  neck = NeckYawPitch()
336  object_tracker = ObjectTracker()
337 
338  main()
339 
def set_angle(self, yaw_angle, pitch_angle, goal_secs=1.0e-9)
def _detect_object(self, input_depth_image)


sciurus17_examples
Author(s): Daisuke Sato , Hiroyuki Nomura
autogenerated on Sun Oct 2 2022 02:21:39