hand_pose_estimation_2d.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3 
4 # Reference
5 # SRHandNet: Real-time 2D Hand Pose Estimation
6 # with Simultaneous Region Localization
7 # SRHandNet model downloading
8 # https://www.yangangwang.com/papers/WANG-SRH-2019-07.html
9 #
10 # Code Reference
11 # https://github.com/agrajak/SRHandNet-demo
12 from __future__ import division
13 from __future__ import print_function
14 
15 from distutils.version import LooseVersion
16 import pkg_resources
17 import sys
18 
19 if LooseVersion(pkg_resources.get_distribution("torch").version) \
20  < LooseVersion('1.1.0'):
21  print('''torch >= 1.1.0 is required:
22  Download wheel file from https://download.pytorch.org/whl/cu90/torch_stable.html (melodic)
23  and install with pip as follow:
24  pip install --user torch-1.1.0-cp27-cp27mu-linux_x86_64.whl
25  pip install --user torchvision-0.3.0-cp27-cp27mu-manylinux1_x86_64.whl
26 
27 ''', file=sys.stderr)
28  sys.exit(1)
29 
30 import cv2
31 import cv_bridge
32 import message_filters
33 import jsk_data
34 import numpy as np
35 import os.path as osp
36 import rospkg
37 import rospy
38 from skimage import feature
39 import torch
40 
41 from image_geometry import PinholeCameraModel
42 from geometry_msgs.msg import Point
43 from geometry_msgs.msg import Pose
44 from geometry_msgs.msg import Quaternion
45 from jsk_recognition_msgs.msg import HandPose
46 from jsk_recognition_msgs.msg import HandPoseArray
47 from jsk_recognition_msgs.msg import HumanSkeleton
48 from jsk_recognition_msgs.msg import HumanSkeletonArray
49 from jsk_recognition_msgs.msg import Segment
50 from jsk_topic_tools import ConnectionBasedTransport
51 from sensor_msgs.msg import Image
52 from sensor_msgs.msg import CameraInfo
53 
54 
55 class HandPoseEstimation2D(ConnectionBasedTransport):
56 
57  HAND_COLORS = [
58  (100, 100, 100),
59  (100, 0, 0),
60  (150, 0, 0),
61  (200, 0, 0),
62  (255, 0, 0),
63  (100, 100, 0),
64  (150, 150, 0),
65  (200, 200, 0),
66  (255, 255, 0),
67  (0, 100, 50),
68  (0, 150, 75),
69  (0, 200, 100),
70  (0, 255, 125),
71  (0, 50, 100),
72  (0, 75, 150),
73  (0, 100, 200),
74  (0, 125, 255),
75  (100, 0, 100),
76  (150, 0, 150),
77  (200, 0, 200),
78  (255, 0, 255),
79  ]
80  # The order of keypoints is predefined in SRHandNet model
81  # Following the order of cooresponding 21-channel feature maps
82  INDEX2FINGERNAME = [
83  "wrist",
84  "thumb_mcp",
85  "thumb_pip",
86  "thumb_dip",
87  "thumb_tip",
88  "index_mcp",
89  "index_pip",
90  "index_dip",
91  "index_tip",
92  "middle_mcp",
93  "middle_pip",
94  "middle_dip",
95  "middle_tip",
96  "ring_mcp",
97  "ring_pip",
98  "ring_dip",
99  "ring_tip",
100  "little_mcp",
101  "little_pip",
102  "little_dip",
103  "little_tip",
104  ]
105 
106  FINGERNAME2INDEX = {name: i for i, name in enumerate(INDEX2FINGERNAME)}
107  NUMBER_OF_FINGERS = 5
108  NUMBER_OF_FINGER_JOINT = 3
109  CONNECTION_PAIR = [(0, 1), (0, 1), (1, 2), (2, 3),
110  (0, 5), (4, 5), (5, 6), (6, 7),
111  (0, 9), (8, 9), (9, 10), (10, 11),
112  (0, 13), (12, 13), (13, 14), (14, 15),
113  (0, 17), (16, 17), (17, 18), (18, 19)]
114 
115  def __init__(self):
116  super(self.__class__, self).__init__()
117  self.backend = rospy.get_param('~backend', 'torch')
118  self.gpu = rospy.get_param('~gpu', -1) # -1 is cpu mode
119  # default tensor size
121  self.TRAIN_IMAGE_WIDTH = 256
122  self.label_bbox_min = rospy.get_param('~thre1', 0.3)
123  self.label_hand_min = rospy.get_param('~thre2', 0.2)
124  # detection fail if more than ~thre3 keypoints missed
125  self.missing_point = rospy.get_param('~thre3', 5)
126  # model loading
127  self._load_model()
128  # image subscribe
129  # self.subscribe()
130  # topic advertise
131  self.image_pub = self.advertise(
132  '~output/vis', Image, queue_size=1)
133  self.hand_pose_pub = self.advertise(
134  '~output/pose', HandPoseArray, queue_size=1)
135  self.with_depth = rospy.get_param("~with_depth", False)
136  if self.with_depth is True:
137  self.hand_pose_2d_pub = self.advertise(
138  '~output/pose_2d', HandPoseArray, queue_size=1)
139  self.skeleton_pub = self.advertise(
140  '~skeleton', HumanSkeletonArray, queue_size=1)
141  self.bridge = cv_bridge.CvBridge()
142 
143  @property
144  def visualize(self):
145  return self.image_pub.get_num_connections() > 0
146 
147  def _load_model(self):
148  if self.backend == 'torch':
149  self._load_torch_model()
150  else:
151  raise RuntimeError('Unsupported backend: %s', self.backend)
152 
153  def _load_torch_model(self):
154  # model can be downloaded manually from:
155  # https://www.yangangwang.com/papers/WANG-SRH-2019-07.html
156  # or using _get_srhand_pretrained_model() from:
157  # https://drive.google.com/uc?id=16Jg8HhaFaThzFSbWbEixMLE3SAnOzvzL
158  rospy.loginfo('Loading model')
159  model_file = rospy.get_param('~model_file', None)
160  if not model_file:
161  model_file = self._get_srhand_pretrained_model()
162 
163  if self.gpu >= 0 and torch.cuda.is_available():
164  self.model = torch.jit.load(
165  model_file, map_location=torch.device('cuda'))
166  rospy.loginfo('Finished loading SRHandNet model to gpu')
167  else:
168  self.model = torch.jit.load(
169  model_file, map_location=torch.device('cpu'))
170  rospy.loginfo('Finished loading SRHandNet model to cpu')
171  if self.gpu >= 0:
172  rospy.loginfo('You need to check version match between PyTorch and CUDA. See https://jsk-docs.readthedocs.io/projects/jsk_recognition/en/latest/install_chainer_gpu.html and https://github.com/jsk-ros-pkg/jsk_recognition/pull/2601#issuecomment-876405219 for more info')
173  self.model.eval()
174 
176  pkg_name = 'jsk_perception'
177  filepath = 'trained_data/SRHandNet.pts'
178  jsk_data.download_data(
179  pkg_name=pkg_name,
180  path=filepath,
181  url='https://drive.google.com/'
182  'uc?id=10oGfGALjIIwdIWO9MQftN07A3TRdvp3b',
183  md5='9f39d3baa43cf1c962c8f752c009eb14',
184  )
185  rp = rospkg.RosPack()
186  pkgpath = rp.get_path(pkg_name)
187  return osp.join(pkgpath, filepath)
188 
189  def subscribe(self):
190  if self.with_depth:
191  queue_size = rospy.get_param('~queue_size', 10)
192  sub_img = message_filters.Subscriber(
193  '~input', Image, queue_size=1, buff_size=2**24)
194  sub_depth = message_filters.Subscriber(
195  '~input/depth', Image, queue_size=1, buff_size=2**24)
196  self.subs = [sub_img, sub_depth]
197 
198  # NOTE: Camera info is not synchronized by default.
199  # See https://github.com/jsk-ros-pkg/jsk_recognition/issues/2165
200  sync_cam_info = rospy.get_param("~sync_camera_info", False)
201  if sync_cam_info:
202  sub_info = message_filters.Subscriber(
203  '~input/info', CameraInfo, queue_size=1, buff_size=2**24)
204  self.subs.append(sub_info)
205  else:
206  self.sub_info = rospy.Subscriber(
207  '~input/info', CameraInfo, self._cb_cam_info)
208 
209  if rospy.get_param('~approximate_sync', True):
210  slop = rospy.get_param('~slop', 0.1)
211  sync = message_filters.ApproximateTimeSynchronizer(
212  fs=self.subs, queue_size=queue_size, slop=slop)
213  else:
215  fs=self.subs, queue_size=queue_size)
216  if sync_cam_info:
217  sync.registerCallback(self._cb_with_depth_info)
218  else:
219  self.camera_info_msg = None
220  sync.registerCallback(self._cb_with_depth)
221  else:
222  sub_img = rospy.Subscriber(
223  '~input', Image, self._cb, queue_size=1, buff_size=2**24)
224  self.subs = [sub_img]
225 
226  def unsubscribe(self):
227  for sub in self.subs:
228  sub.unregister()
229  if self.sub_info is not None:
230  self.sub_info.unregister()
231  self.sub_info = None
232 
233  def _cb_cam_info(self, msg):
234  self.camera_info_msg = msg
235  self.sub_info.unregister()
236  self.sub_info = None
237  rospy.loginfo("Received camera info")
238 
239  def _cb_with_depth(self, img_msg, depth_msg):
240  if self.camera_info_msg is None:
241  return
242  self._cb_with_depth_info(img_msg, depth_msg, self.camera_info_msg)
243 
244  def _cb_with_depth_info(self, img_msg, depth_msg, camera_info_msg):
245  camera_model = PinholeCameraModel()
246  camera_model.fromCameraInfo(camera_info_msg)
247  br = cv_bridge.CvBridge()
248  img = br.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
249  depth_img = br.imgmsg_to_cv2(depth_msg, 'passthrough')
250  if depth_msg.encoding == '16UC1':
251  depth_img = np.asarray(depth_img, dtype=np.float32)
252  depth_img /= 1000 # convert metric: mm -> m
253  elif depth_msg.encoding != '32FC1':
254  rospy.logerr('Unsupported depth encoding: %s' % depth_msg.encoding)
255 
256  hands_points, hands_point_scores, hands_score = self.hand_pose_estimate(img, img_msg.header)
257 
258  hand_pose_2d_msg = self._create_2d_hand_pose_array_msgs(
259  hands_points,
260  hands_point_scores,
261  hands_score,
262  img_msg.header)
263 
264  # calculate xyz-position
265  hand_pose_array_msg = HandPoseArray(header=img_msg.header)
266 
267  for hand in hand_pose_2d_msg.poses:
268  hand_pose_msg = HandPose(hand_score=hand.hand_score)
269  for pose, finger_name, score in zip(
270  hand.poses, hand.finger_names,
271  hand.point_scores):
272  u = pose.position.x
273  v = pose.position.y
274  if 0 <= u < depth_img.shape[1] and \
275  0 <= v < depth_img.shape[0]:
276  z = float(depth_img[int(v)][int(u)])
277  else:
278  continue
279  if np.isnan(z) or z <= 0:
280  continue
281  x = (u - camera_model.cx()) * z / camera_model.fx()
282  y = (v - camera_model.cy()) * z / camera_model.fy()
283  hand_pose_msg.finger_names.append(finger_name)
284  hand_pose_msg.poses.append(
285  Pose(position=Point(x=x, y=y, z=z),
286  orientation=Quaternion(w=1)))
287  hand_pose_msg.point_scores.append(score)
288  hand_pose_array_msg.poses.append(hand_pose_msg)
289 
290  # prepare HumanSkeletonArray for visualization.
291  skeleton_array_msg = HumanSkeletonArray(header=img_msg.header)
292  for hand_pose_msg in hand_pose_array_msg.poses:
293  hand_joint_index_to_list_index = {
294  self.FINGERNAME2INDEX[name]: i
295  for i, name in enumerate(hand_pose_msg.finger_names)}
296  skeleton_msg = HumanSkeleton(header=img_msg.header)
297  for index_a, index_b in self.CONNECTION_PAIR:
298  if index_a not in hand_joint_index_to_list_index \
299  or index_b not in hand_joint_index_to_list_index:
300  continue
301  joint_a_index = hand_joint_index_to_list_index[index_a]
302  joint_b_index = hand_joint_index_to_list_index[index_b]
303  bone = Segment(
304  start_point=hand_pose_msg.poses[joint_a_index].position,
305  end_point=hand_pose_msg.poses[joint_b_index].position)
306  bone_name = '{}->{}'.format(
307  self.INDEX2FINGERNAME[index_a],
308  self.INDEX2FINGERNAME[index_b])
309  skeleton_msg.bones.append(bone)
310  skeleton_msg.bone_names.append(bone_name)
311  skeleton_array_msg.skeletons.append(skeleton_msg)
312 
313  self.hand_pose_2d_pub.publish(hand_pose_2d_msg)
314  self.hand_pose_pub.publish(hand_pose_array_msg)
315  self.skeleton_pub.publish(skeleton_array_msg)
316 
317  def _cb(self, img_msg):
318  img = self.bridge.imgmsg_to_cv2(
319  img_msg, desired_encoding='bgr8')
320  hands_points, hands_point_scores, hands_score = \
321  self.hand_pose_estimate(img, img_msg.header)
322 
323  hand_pose_msg = self._create_2d_hand_pose_array_msgs(
324  hands_points,
325  hands_point_scores,
326  hands_score,
327  img_msg.header)
328 
329  self.hand_pose_pub.publish(hand_pose_msg)
330 
331  # create hand_pose_msg consists of
332  # hand_score got from bounding box feature map
333  # 21 keypoint positions(set to 0,0,0 if not detected)
334  # corresponding joint names
335  # corresponding scores got from feature map
336  # for each detected hand
338  self, hands_points,
339  hands_point_scores,
340  hands_score,
341  header
342  ):
343  hand_pose_msg = HandPoseArray(header=header)
344  for hand_points, point_scores, hand_score in \
345  zip(hands_points, hands_point_scores, hands_score):
346  pose_msg = HandPose()
347  pose_msg.hand_score = hand_score
348  for index, joint_pos in enumerate(hand_points):
349  pose_msg.finger_names.append(self.INDEX2FINGERNAME[index])
350  pose_msg.point_scores.append(point_scores[index])
351  if (len(joint_pos) == 2):
352  pose_msg.poses.append(
353  Pose(position=Point(
354  x=joint_pos[0], y=joint_pos[1], z=0.)))
355  else:
356  pose_msg.poses.append(
357  Pose(position=Point(x=0., y=0., z=0.)))
358  hand_pose_msg.poses.append(pose_msg)
359  return hand_pose_msg
360 
361  def hand_pose_estimate(self, bgr, header):
362  if self.backend == 'torch':
363  return self._hand_pose_estimate_torch_backend(bgr, header)
364  raise ValueError('Unsupported backend: {0}'.format(self.backend))
365 
366  def _hand_pose_estimate_torch_backend(self, frame, header):
367  hands_points, hands_rect, hands_point_scores, hands_score = \
368  self.pyramid_inference(frame)
369 
370  if self.visualize:
371  vis_img = self._draw_joints(frame, hands_points, hands_rect)
372  vis_msg = self.bridge.cv2_to_imgmsg(vis_img, encoding='bgr8')
373  vis_msg.header = header
374  self.image_pub.publish(vis_msg)
375 
376  return hands_points, hands_point_scores, hands_score
377 
378  def _draw_joints(self, frame, hands_points, hands_rect):
379  for rect_idx, points in enumerate(hands_points):
380  rect = hands_rect[rect_idx]
381  if rect is None:
382  continue
383  # bounding boxes
384  cv2.rectangle(frame, rect[0:2], rect[2:4], (0, 0, 255), 6)
385  missing = 0
386 
387  for i, point in enumerate(points):
388  if point is None or len(point) == 0:
389  missing += 1
390  continue
391  # joint keypoints
392  cv2.circle(frame, point, 6, self.HAND_COLORS[i], 6)
393 
394  for i in range(5):
395  for j in range(3):
396  cnt = j+i*4+1
397  if (len(points[cnt]) != 0
398  and len(points[cnt+1]) != 0):
399  # fingers
400  cv2.line(
401  frame, points[cnt], points[cnt+1],
402  (0, 255, 0), 2)
403 
404  per = '{}%'.format(int(2100-missing*100)//21)
405  text_pos = hands_rect[rect_idx][0:2]
406  text_pos = (text_pos[0], text_pos[1]+5)
407  cv2.putText(frame, per, text_pos, 1, 3, (0, 0, 255), 3)
408  return frame
409 
410  # finding local maximum
411  def nmslocation(self, src, threshold):
412  peak_loc = feature.peak_local_max(
413  src, min_distance=2, threshold_abs=threshold, exclude_border=True)
414  peak_pair = [(src[x][y], (x, y)) for x, y in peak_loc]
415  return peak_pair
416 
417  # resize src_img to fit the size of tensor
419  self, tensor, src_img, hands_rect=None, tensor_idx=0):
420  img = src_img.copy()
421  if hands_rect is not None:
422  l, t, r, b = hands_rect[tensor_idx]
423  img = img[t:b, l:r]
424 
425  rows, cols = img.shape[:2]
426  ratio = min(tensor.shape[2] / rows, tensor.shape[3] / cols)
427  mat = np.array([[ratio, 0, 0], [0, ratio, 0]], dtype=np.float32)
428 
429  dst = cv2.warpAffine(
430  img, mat, (tensor.shape[3], tensor.shape[2]),
431  flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,
432  borderValue=(128, 128, 128))
433 
434  dst = dst / 255. - 0.5
435  b, g, r = cv2.split(dst)
436 
437  if self.gpu >= 0 and torch.cuda.is_available():
438  tensor[tensor_idx][0] = torch.tensor(
439  b, device=torch.device('cuda')).float()
440  tensor[tensor_idx][1] = torch.tensor(
441  g, device=torch.device('cuda')).float()
442  tensor[tensor_idx][2] = torch.tensor(
443  r, device=torch.device('cuda')).float()
444  else:
445  tensor[tensor_idx][0] = torch.tensor(
446  b, device=torch.device('cpu')).float()
447  tensor[tensor_idx][1] = torch.tensor(
448  g, device=torch.device('cpu')).float()
449  tensor[tensor_idx][2] = torch.tensor(
450  r, device=torch.device('cpu')).float()
451  return ratio
452 
453  def detect_bbox(self, input_image):
454  if self.gpu >= 0 and torch.cuda.is_available():
455  tensor = torch.zeros(
456  [1, 3, self.TRAIN_IMAGE_HEIGHT, self.TRAIN_IMAGE_WIDTH],
457  device=torch.device('cuda'))
458  else:
459  tensor = torch.zeros(
460  [1, 3, self.TRAIN_IMAGE_HEIGHT, self.TRAIN_IMAGE_WIDTH],
461  device=torch.device('cpu'))
462  rows, cols, _ = input_image.shape
463  # transform the input data
464  ratio_input_to_net = self.transform_net_input(tensor, input_image)
465 
466  with torch.no_grad():
467  heatmap = self.model.forward(tensor)[3]
468 
469  # copy the 3-channel rectmap, each channel respectively representing:
470  # centre position of bounding box
471  # width of bounding box
472  # height of bounding box
473  ratio_net_downsample = \
474  self.TRAIN_IMAGE_HEIGHT / float(heatmap.shape[2])
475  rect_map_idx = heatmap.shape[1] - 3
476  rectmap = []
477  for i in range(3):
478  rectmap.append(
479  np.copy(heatmap[0][i+rect_map_idx].cpu().detach().numpy()))
480 
481  # centre position of bounding boxes
482  locations = self.nmslocation(rectmap[0], self.label_bbox_min)
483  hands_rect = []
484  hands_score = []
485  for loc_val, points in locations:
486  pos_x, pos_y = points
487  ratio_width = ratio_height = pixelcount = 0
488  for m in range(
489  max(pos_x-2, 0), min(pos_x+3, int(heatmap.shape[2]))):
490  for n in range(
491  max(pos_y-2, 0), min(pos_y+3, int(heatmap.shape[3]))):
492  ratio_width += rectmap[1][m][n]
493  ratio_height += rectmap[2][m][n]
494  pixelcount += 1
495 
496  if pixelcount > 0:
497  ratio_width = min(max(ratio_width / pixelcount, 0), 1)
498  ratio_height = min(max(ratio_height / pixelcount, 0), 1)
499  ratio = ratio_net_downsample / ratio_input_to_net
500  pos_x *= ratio # row
501  pos_y *= ratio # column
502 
503  rect_w = \
504  ratio_width * self.TRAIN_IMAGE_WIDTH / ratio_input_to_net
505  rect_h = \
506  ratio_height * self.TRAIN_IMAGE_HEIGHT / ratio_input_to_net
507  # left-top corner position
508  l_t = (
509  max(int(pos_x - rect_h/2), 0),
510  max(int(pos_y - rect_w/2), 0)
511  )
512  # right-bottom corner position
513  r_b = (
514  min(int(pos_x + rect_h/2), rows - 1),
515  min(int(pos_y + rect_w/2), cols - 1)
516  )
517 
518  hands_rect.append((l_t[1], l_t[0], r_b[1], r_b[0]))
519  hands_score.append(loc_val)
520  return hands_rect, hands_score
521 
522  def detect_hand(self, input_image, hands_rect):
523  if len(hands_rect) == 0:
524  return [], []
525 
526  ratio_input_to_net = [None]*len(hands_rect)
527 
528  if self.gpu >= 0 and torch.cuda.is_available():
529  tensor = torch.zeros(
530  [len(hands_rect), 3,
532  device=torch.device('cuda'))
533  else:
534  tensor = torch.zeros(
535  [len(hands_rect), 3,
537  device=torch.device('cpu'))
538 
539  # extract RoI of the input_image and resize to fit the tensor size
540  for i in range(len(hands_rect)):
541  ratio_input_to_net[i] = self.transform_net_input(
542  tensor, input_image, hands_rect, i)
543 
544  with torch.no_grad():
545  heatmaps = self.model.forward(tensor)[3]
546 
547  ratio_net_downsample = self.TRAIN_IMAGE_HEIGHT / heatmaps.size()[2]
548 
549  # joint position
550  hands_points = []
551  hands_point_scores = []
552  for rect_idx in range(len(hands_rect)):
553  hand_points = [[] for i in range(21)]
554  point_scores = [0. for i in range(21)]
555  x, y, _, _ = hands_rect[rect_idx]
556  ratio = ratio_net_downsample / ratio_input_to_net[rect_idx]
557  for i in range(21):
558  heatmap = heatmaps[rect_idx][i].cpu().detach().numpy()
559  points = self.nmslocation(heatmap, self.label_hand_min)
560  if len(points):
561  score, point = points[0]
562  hand_points[i] = (
563  int(point[1]*ratio)+x,
564  int(point[0]*ratio)+y
565  )
566  point_scores[i] = score
567  hands_points.append(hand_points)
568  hands_point_scores.append(point_scores)
569  return hands_points, hands_point_scores
570 
571  def pyramid_inference(self, input_image):
572  # Full Cycle Detection
573  # Region of Interest
574  hands_rect, hands_score = self.detect_bbox(input_image)
575 
576  if len(hands_rect) == 0:
577  return [], [], [], []
578 
579  # joints detection
580  hands_points, hands_point_scores = self.detect_hand(
581  input_image, hands_rect)
582 
583  for i in range(len(hands_rect)-1, -1, -1):
584  missing_points = 0
585  for j in range(21):
586  if len(hands_points[i][j]) != 2:
587  missing_points += 1
588  if missing_points > self.missing_point:
589  hands_rect.pop(i)
590  hands_score.pop(i)
591  hands_points.pop(i)
592  hands_point_scores.pop(i)
593 
594  return hands_points, hands_rect, hands_point_scores, hands_score
595 
596 
597 if __name__ == '__main__':
598  rospy.init_node('hand_pose_estimation_2d')
600  rospy.spin()
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._hand_pose_estimate_torch_backend
def _hand_pose_estimate_torch_backend(self, frame, header)
Definition: hand_pose_estimation_2d.py:366
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.gpu
gpu
Definition: hand_pose_estimation_2d.py:118
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.missing_point
missing_point
Definition: hand_pose_estimation_2d.py:125
ssd_train_dataset.float
float
Definition: ssd_train_dataset.py:180
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.TRAIN_IMAGE_WIDTH
TRAIN_IMAGE_WIDTH
Definition: hand_pose_estimation_2d.py:121
ssd_train_dataset.int
int
Definition: ssd_train_dataset.py:175
msg
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.subs
subs
Definition: hand_pose_estimation_2d.py:196
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._cb
def _cb(self, img_msg)
Definition: hand_pose_estimation_2d.py:317
image_geometry::PinholeCameraModel
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.label_bbox_min
label_bbox_min
Definition: hand_pose_estimation_2d.py:122
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._cb_cam_info
def _cb_cam_info(self, msg)
Definition: hand_pose_estimation_2d.py:233
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._cb_with_depth
def _cb_with_depth(self, img_msg, depth_msg)
Definition: hand_pose_estimation_2d.py:239
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._load_torch_model
def _load_torch_model(self)
Definition: hand_pose_estimation_2d.py:153
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.unsubscribe
def unsubscribe(self)
Definition: hand_pose_estimation_2d.py:226
message_filters::Subscriber
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.HAND_COLORS
list HAND_COLORS
Definition: hand_pose_estimation_2d.py:57
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.skeleton_pub
skeleton_pub
Definition: hand_pose_estimation_2d.py:139
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.visualize
def visualize(self)
Definition: hand_pose_estimation_2d.py:144
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.detect_hand
def detect_hand(self, input_image, hands_rect)
Definition: hand_pose_estimation_2d.py:522
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.subscribe
def subscribe(self)
Definition: hand_pose_estimation_2d.py:189
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.model
model
Definition: hand_pose_estimation_2d.py:164
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.with_depth
with_depth
Definition: hand_pose_estimation_2d.py:135
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.camera_info_msg
camera_info_msg
Definition: hand_pose_estimation_2d.py:219
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.bridge
bridge
Definition: hand_pose_estimation_2d.py:141
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.CONNECTION_PAIR
list CONNECTION_PAIR
Definition: hand_pose_estimation_2d.py:109
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D
Definition: hand_pose_estimation_2d.py:55
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.image_pub
image_pub
Definition: hand_pose_estimation_2d.py:131
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.FINGERNAME2INDEX
dictionary FINGERNAME2INDEX
Definition: hand_pose_estimation_2d.py:106
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._draw_joints
def _draw_joints(self, frame, hands_points, hands_rect)
Definition: hand_pose_estimation_2d.py:378
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.__init__
def __init__(self)
Definition: hand_pose_estimation_2d.py:115
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.hand_pose_2d_pub
hand_pose_2d_pub
Definition: hand_pose_estimation_2d.py:137
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.label_hand_min
label_hand_min
Definition: hand_pose_estimation_2d.py:123
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.hand_pose_pub
hand_pose_pub
Definition: hand_pose_estimation_2d.py:133
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.backend
backend
Definition: hand_pose_estimation_2d.py:117
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._create_2d_hand_pose_array_msgs
def _create_2d_hand_pose_array_msgs(self, hands_points, hands_point_scores, hands_score, header)
Definition: hand_pose_estimation_2d.py:337
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._get_srhand_pretrained_model
def _get_srhand_pretrained_model(self)
Definition: hand_pose_estimation_2d.py:175
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._load_model
def _load_model(self)
Definition: hand_pose_estimation_2d.py:147
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.sub_info
sub_info
Definition: hand_pose_estimation_2d.py:206
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D._cb_with_depth_info
def _cb_with_depth_info(self, img_msg, depth_msg, camera_info_msg)
Definition: hand_pose_estimation_2d.py:244
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.INDEX2FINGERNAME
list INDEX2FINGERNAME
Definition: hand_pose_estimation_2d.py:82
message_filters::TimeSynchronizer
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.nmslocation
def nmslocation(self, src, threshold)
Definition: hand_pose_estimation_2d.py:411
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.transform_net_input
def transform_net_input(self, tensor, src_img, hands_rect=None, tensor_idx=0)
Definition: hand_pose_estimation_2d.py:418
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.TRAIN_IMAGE_HEIGHT
TRAIN_IMAGE_HEIGHT
Definition: hand_pose_estimation_2d.py:120
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.detect_bbox
def detect_bbox(self, input_image)
Definition: hand_pose_estimation_2d.py:453
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.hand_pose_estimate
def hand_pose_estimate(self, bgr, header)
Definition: hand_pose_estimation_2d.py:361
node_scripts.hand_pose_estimation_2d.HandPoseEstimation2D.pyramid_inference
def pyramid_inference(self, input_image)
Definition: hand_pose_estimation_2d.py:571


jsk_perception
Author(s): Manabu Saito, Ryohei Ueda
autogenerated on Fri May 16 2025 03:11:17