chest_camera_tracking.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # coding: utf-8
3 
4 import rospy
5 import math
6 import sys
7 
8 # for ObjectTracker
9 import cv2
10 import numpy as np
11 from sensor_msgs.msg import Image
12 from cv_bridge import CvBridge, CvBridgeError
13 from geometry_msgs.msg import Point
14 
15 # for WaistYaw
16 import actionlib
17 from control_msgs.msg import (
18  FollowJointTrajectoryAction,
19  FollowJointTrajectoryGoal,
20  JointTrajectoryControllerState
21 )
22 from trajectory_msgs.msg import JointTrajectoryPoint
23 
25  def __init__(self):
26  self._bridge = CvBridge()
27  self._image_sub = rospy.Subscriber("/sciurus17/chest_camera_node/image", Image, self._image_callback, queue_size=1)
28  self._image_pub = rospy.Publisher("~output_image", Image, queue_size=1)
29  self._object_rect = [0,0,0,0]
30  self._image_shape = Point()
31  self._object_detected = False
32 
33  self._CV_MAJOR_VERSION, _, _ = cv2.__version__.split('.')
34 
35 
36  def _image_callback(self, ros_image):
37  try:
38  input_image = self._bridge.imgmsg_to_cv2(ros_image, "bgr8")
39  except CvBridgeError as e:
40  rospy.logerr(e)
41 
42  # 画像のwidth, heightを取得
43  self._image_shape.x = input_image.shape[1]
44  self._image_shape.y = input_image.shape[0]
45 
46  # 特定色のオブジェクトを検出
47  output_image = self._detect_orange_object(input_image)
48  # output_image = self._detect_blue_object(input_image)
49 
50  self._image_pub.publish(self._bridge.cv2_to_imgmsg(output_image, "bgr8"))
51 
52 
54  # 画像中心を0, 0とした座標系におけるオブジェクトの座標を出力
55  # オブジェクトの座標は-1.0 ~ 1.0に正規化される
56 
57  object_center = Point(
58  self._object_rect[0] + self._object_rect[2] * 0.5,
59  self._object_rect[1] + self._object_rect[3] * 0.5,
60  0)
61 
62  # 画像の中心を0, 0とした座標系に変換
63  translated_point = Point()
64  translated_point.x = object_center.x - self._image_shape.x * 0.5
65  translated_point.y = -(object_center.y - self._image_shape.y * 0.5)
66 
67  # 正規化
68  normalized_point = Point()
69  if self._image_shape.x != 0 and self._image_shape.y != 0:
70  normalized_point.x = translated_point.x / (self._image_shape.x * 0.5)
71  normalized_point.y = translated_point.y / (self._image_shape.y * 0.5)
72 
73  return normalized_point
74 
75 
76  def object_detected(self):
77  return self._object_detected
78 
79 
80  def _detect_color_object(self, bgr_image, lower_color, upper_color):
81  # 画像から指定された色の物体を検出する
82 
83  MIN_OBJECT_SIZE = 7000 # px * px
84 
85  # BGR画像をHSV色空間に変換
86  hsv = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)
87 
88  # 色を抽出するマスクを生成
89  mask = cv2.inRange(hsv, lower_color, upper_color)
90 
91  # マスクから輪郭を抽出
92  if self._CV_MAJOR_VERSION == '4':
93  contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
94  else:
95  _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
96 
97  # 輪郭を長方形に変換し、配列に格納
98  rects = []
99  for contour in contours:
100  approx = cv2.convexHull(contour)
101  rect = cv2.boundingRect(approx)
102  rects.append(rect)
103 
104  self._object_detected = False
105  if len(rects) > 0:
106  # 最も大きい長方形を抽出
107  rect = max(rects, key=(lambda x: x[2] * x[3]))
108 
109  # 長方形が小さければ検出判定にしない
110  if rect[2] * rect[3] > MIN_OBJECT_SIZE:
111  # 抽出した長方形を画像に描画する
112  cv2.rectangle(bgr_image,
113  (rect[0], rect[1]),
114  (rect[0] + rect[2], rect[1] + rect[3]),
115  (0, 0, 255), thickness=2)
116 
117  self._object_rect = rect
118  self._object_detected = True
119 
120  return bgr_image
121 
122 
123  def _detect_orange_object(self, bgr_image):
124  # H: 0 ~ 179 (0 ~ 360°)
125  # S: 0 ~ 255 (0 ~ 100%)
126  # V: 0 ~ 255 (0 ~ 100%)
127  lower_orange = np.array([5,127,127])
128  upper_orange = np.array([20,255,255])
129 
130  return self._detect_color_object(bgr_image, lower_orange, upper_orange)
131 
132 
133  def _detect_blue_object(self, bgr_image):
134  # H: 0 ~ 179 (0 ~ 360°)
135  # S: 0 ~ 255 (0 ~ 100%)
136  # V: 0 ~ 255 (0 ~ 100%)
137  lower_blue = np.array([100,127,127])
138  upper_blue = np.array([110,255,255])
139 
140  return self._detect_color_object(bgr_image, lower_blue, upper_blue)
141 
142 
143 class WaistYaw(object):
144  def __init__(self):
145  self.__client = actionlib.SimpleActionClient("/sciurus17/controller3/waist_yaw_controller/follow_joint_trajectory",
146  FollowJointTrajectoryAction)
147  self.__client.wait_for_server(rospy.Duration(5.0))
148  if not self.__client.wait_for_server(rospy.Duration(5.0)):
149  rospy.logerr("Action Server Not Found")
150  rospy.signal_shutdown("Action Server not found")
151  sys.exit(1)
152 
153  self._state_sub = rospy.Subscriber("/sciurus17/controller3/waist_yaw_controller/state",
154  JointTrajectoryControllerState, self._state_callback, queue_size=1)
155 
156  self._state_received = False
157  self._current_yaw = 0.0 # Degree
158 
159  def _state_callback(self, state):
160  # 腰の現在角度を取得
161 
162  self._state_received = True
163  yaw_radian = state.actual.positions[0]
164  self._current_yaw = math.degrees(yaw_radian)
165 
166 
167  def state_received(self):
168  return self._state_received
169 
170 
171  def get_current_yaw(self):
172  return self._current_yaw
173 
174 
175  def set_angle(self, yaw_angle, goal_secs=1.0e-9):
176  # 腰を指定角度に動かす
177  goal = FollowJointTrajectoryGoal()
178  goal.trajectory.joint_names = ["waist_yaw_joint"]
179 
180  yawpoint = JointTrajectoryPoint()
181  yawpoint.positions.append(yaw_angle)
182  yawpoint.time_from_start = rospy.Duration(goal_secs)
183  goal.trajectory.points.append(yawpoint)
184 
185  self.__client.send_goal(goal)
186  self.__client.wait_for_result(rospy.Duration(0.1))
187  return self.__client.get_result()
188 
189 
191  # shutdown時に0度へ戻る
192  waist_yaw.set_angle(math.radians(0), 3.0)
193 
194 
195 def main():
196  r = rospy.Rate(60)
197 
198  rospy.on_shutdown(hook_shutdown)
199 
200  # オブジェクト追跡のしきい値
201  # 正規化された座標系(px, px)
202  THRESH_X = 0.05
203 
204  # 腰の初期角度 Degree
205  INITIAL_YAW_ANGLE = 0
206 
207  # 腰の制御角度リミット値 Degree
208  MAX_YAW_ANGLE = 120
209  MIN_YAW_ANGLE = -120
210 
211  # 腰の制御量
212  # 値が大きいほど腰を大きく動かす
213  OPERATION_GAIN_X = 5.0
214 
215  # 初期角度に戻る時の制御角度 Degree
216  RESET_OPERATION_ANGLE = 1
217 
218  # 現在の腰角度を取得する
219  # ここで現在の腰角度を取得することで、ゆっくり初期角度へ戻る
220  while not waist_yaw.state_received():
221  pass
222  yaw_angle = waist_yaw.get_current_yaw()
223 
224  look_object = False
225  detection_timestamp = rospy.Time.now()
226 
227  while not rospy.is_shutdown():
228  # 正規化されたオブジェクトの座標を取得
229  object_position = object_tracker.get_object_position()
230 
231  if object_tracker.object_detected():
232  detection_timestamp = rospy.Time.now()
233  look_object = True
234  else:
235  lost_time = rospy.Time.now() - detection_timestamp
236  # 一定時間オブジェクトが見つからない場合は初期角度に戻る
237  if lost_time.to_sec() > 1.0:
238  look_object = False
239 
240  if look_object:
241  # オブジェクトが画像中心にくるように首を動かす
242  if math.fabs(object_position.x) > THRESH_X:
243  yaw_angle += -object_position.x * OPERATION_GAIN_X
244 
245  # 腰の角度を制限する
246  if yaw_angle > MAX_YAW_ANGLE:
247  yaw_angle = MAX_YAW_ANGLE
248  if yaw_angle < MIN_YAW_ANGLE:
249  yaw_angle = MIN_YAW_ANGLE
250 
251  else:
252  # ゆっくり初期角度へ戻る
253  diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
254  if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
255  yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
256  else:
257  yaw_angle = 0
258 
259  waist_yaw.set_angle(math.radians(yaw_angle))
260 
261  r.sleep()
262 
263 
264 if __name__ == '__main__':
265  rospy.init_node("chest_camera_tracking")
266 
267  waist_yaw = WaistYaw()
268  object_tracker = ObjectTracker()
269 
270  main()
271 
def set_angle(self, yaw_angle, goal_secs=1.0e-9)
def _detect_color_object(self, bgr_image, lower_color, upper_color)


sciurus17_examples
Author(s): Daisuke Sato , Hiroyuki Nomura
autogenerated on Sun Oct 2 2022 02:21:39