00001
00002
00003
00004
00005
00006 import math
00007 import pickle
00008
00009 import chainer
00010 import chainer.functions as F
00011 from chainer import cuda
00012 import cv2
00013 import matplotlib
00014 import numpy as np
00015 import pylab as plt
00016
00017 import cv_bridge
00018 import message_filters
00019 import rospy
00020 from jsk_topic_tools import ConnectionBasedTransport
00021 from geometry_msgs.msg import Pose
00022 from geometry_msgs.msg import Point
00023 from geometry_msgs.msg import Quaternion
00024 from jsk_recognition_msgs.msg import PeoplePose
00025 from jsk_recognition_msgs.msg import PeoplePoseArray
00026 from sensor_msgs.msg import CameraInfo
00027 from sensor_msgs.msg import Image
00028
00029
00030 def padRightDownCorner(img, stride, padValue):
00031 h = img.shape[0]
00032 w = img.shape[1]
00033
00034 pad = 4 * [None]
00035 pad[0] = 0
00036 pad[1] = 0
00037 pad[2] = 0 if (h % stride == 0) else stride - (h % stride)
00038 pad[3] = 0 if (w % stride == 0) else stride - (w % stride)
00039
00040 img_padded = img
00041 pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
00042 img_padded = np.concatenate((pad_up, img_padded), axis=0)
00043 pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
00044 img_padded = np.concatenate((pad_left, img_padded), axis=1)
00045 pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
00046 img_padded = np.concatenate((img_padded, pad_down), axis=0)
00047 pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
00048 img_padded = np.concatenate((img_padded, pad_right), axis=1)
00049
00050 return img_padded, pad
00051
00052
00053 class PeoplePoseEstimation2D(ConnectionBasedTransport):
00054
00055
00056 limb_sequence = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9],
00057 [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1],
00058 [1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]]
00059
00060 map_idx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44],
00061 [19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30],
00062 [47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38],
00063 [45, 46]]
00064
00065 index2limbname = ["Nose",
00066 "Neck",
00067 "RShoulder",
00068 "RElbow",
00069 "RWrist",
00070 "LShoulder",
00071 "LElbow",
00072 "LWrist",
00073 "RHip",
00074 "RKnee",
00075 "RAnkle",
00076 "LHip",
00077 "LKnee",
00078 "LAnkle",
00079 "REye",
00080 "LEye",
00081 "REar",
00082 "LEar",
00083 "Bkg"]
00084
00085 colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
00086 [0, 255, 85], [0, 255, 170], [0, 255, 255], [
00087 0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
00088 [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
00089
00090 def __init__(self):
00091 super(self.__class__, self).__init__()
00092 self.backend = rospy.get_param('~backend', 'chainer')
00093 self.scales = rospy.get_param('~scales', [0.38])
00094 self.stride = rospy.get_param('~stride', 8)
00095 self.pad_value = rospy.get_param('~pad_value', 128)
00096 self.thre1 = rospy.get_param('~thre1', 0.1)
00097 self.thre2 = rospy.get_param('~thre2', 0.05)
00098 self.gpu = rospy.get_param('~gpu', -1)
00099 self.with_depth = rospy.get_param('~with_depth', False)
00100 self._load_model()
00101 self.image_pub = self.advertise('~output', Image, queue_size=1)
00102 self.pose_pub = self.advertise('~pose', PeoplePoseArray, queue_size=1)
00103 if self.with_depth is True:
00104 self.pose_2d_pub = self.advertise('~pose_2d', PeoplePoseArray, queue_size=1)
00105
00106 @property
00107 def visualize(self):
00108 return self.image_pub.get_num_connections() > 0
00109
00110 def _load_model(self):
00111 if self.backend == 'chainer':
00112 self._load_chainer_model()
00113 else:
00114 raise RuntimeError('Unsupported backend: %s', self.backend)
00115
00116 def _load_chainer_model(self):
00117 model_file = rospy.get_param('~model_file')
00118 self.func = pickle.load(open(model_file, 'rb'))
00119 rospy.loginfo('Finished loading trained model: {0}'.format(model_file))
00120 if self.gpu != -1:
00121 self.func.to_gpu(self.gpu)
00122
00123 def subscribe(self):
00124 if self.with_depth:
00125 queue_size = rospy.get_param('~queue_size', 10)
00126 sub_img = message_filters.Subscriber(
00127 '~input', Image, queue_size=1, buff_size=2**24)
00128 sub_depth = message_filters.Subscriber(
00129 '~input/depth', Image, queue_size=1, buff_size=2**24)
00130 sub_info = message_filters.Subscriber(
00131 '~input/info', CameraInfo, queue_size=1, buff_size=2**24)
00132 self.subs = [sub_img, sub_depth, sub_info]
00133 if rospy.get_param('~approximate_sync', True):
00134 slop = rospy.get_param('~slop', 0.1)
00135 sync = message_filters.ApproximateTimeSynchronizer(
00136 fs=self.subs, queue_size=queue_size, slop=slop)
00137 else:
00138 sync = message_filters.TimeSynchronizer(
00139 fs=self.subs, queue_size=queue_size)
00140 sync.registerCallback(self._cb_with_depth)
00141 else:
00142 sub_img = rospy.Subscriber(
00143 '~input', Image, self._cb, queue_size=1, buff_size=2**24)
00144 self.subs = [sub_img]
00145
00146 def unsubscribe(self):
00147 for sub in self.subs:
00148 sub.unregister()
00149
00150 def _cb_with_depth(self, img_msg, depth_msg, camera_info_msg):
00151 br = cv_bridge.CvBridge()
00152 img = br.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
00153 depth_img = br.imgmsg_to_cv2(depth_msg, 'passthrough')
00154 if depth_msg.encoding == '16UC1':
00155 depth_img = np.asarray(depth_img, dtype=np.float32)
00156 depth_img /= 1000
00157 elif depth_msg.encoding != '32FC1':
00158 rospy.logerr('Unsupported depth encoding: %s' % depth_msg.encoding)
00159
00160 pose_estimated_img, people_joint_positions = self.pose_estimate(img)
00161 pose_estimated_msg = br.cv2_to_imgmsg(
00162 pose_estimated_img.astype(np.uint8), encoding='bgr8')
00163 pose_estimated_msg.header = img_msg.header
00164
00165 people_pose_msg = PeoplePoseArray()
00166 people_pose_msg.header = img_msg.header
00167 people_pose_2d_msg = self._create_2d_people_pose_array_msgs(
00168 people_joint_positions,
00169 img_msg.header)
00170
00171
00172 fx = camera_info_msg.K[0]
00173 fy = camera_info_msg.K[4]
00174 cx = camera_info_msg.K[2]
00175 cy = camera_info_msg.K[5]
00176 for person_joint_positions in people_joint_positions:
00177 pose_msg = PeoplePose()
00178 for joint_pos in person_joint_positions:
00179 if joint_pos['score'] < 0:
00180 continue
00181 z = float(depth_img[int(joint_pos['y'])][int(joint_pos['x'])])
00182 if np.isnan(z):
00183 continue
00184 x = (joint_pos['x'] - cx) * z / fx
00185 y = (joint_pos['y'] - cy) * z / fy
00186 pose_msg.limb_names.append(joint_pos['limb'])
00187 pose_msg.scores.append(joint_pos['score'])
00188 pose_msg.poses.append(Pose(position=Point(x=x, y=y, z=z),
00189 orientation=Quaternion(w=1)))
00190 people_pose_msg.poses.append(pose_msg)
00191
00192 self.pose_2d_pub.publish(people_pose_2d_msg)
00193 self.pose_pub.publish(people_pose_msg)
00194 if self.visualize:
00195 self.image_pub.publish(pose_estimated_msg)
00196
00197 def _cb(self, img_msg):
00198 br = cv_bridge.CvBridge()
00199 img = br.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
00200 pose_estimated_img, people_joint_positions = self.pose_estimate(img)
00201 pose_estimated_msg = br.cv2_to_imgmsg(
00202 pose_estimated_img.astype(np.uint8), encoding='bgr8')
00203 pose_estimated_msg.header = img_msg.header
00204
00205 people_pose_msg = self._create_2d_people_pose_array_msgs(
00206 people_joint_positions,
00207 img_msg.header)
00208
00209 self.pose_pub.publish(people_pose_msg)
00210 if self.visualize:
00211 self.image_pub.publish(pose_estimated_msg)
00212
00213 def _create_2d_people_pose_array_msgs(self, people_joint_positions, header):
00214 people_pose_msg = PeoplePoseArray(header=header)
00215 for person_joint_positions in people_joint_positions:
00216 pose_msg = PeoplePose()
00217 for joint_pos in person_joint_positions:
00218 if joint_pos['score'] < 0:
00219 continue
00220 pose_msg.limb_names.append(joint_pos['limb'])
00221 pose_msg.scores.append(joint_pos['score'])
00222 pose_msg.poses.append(Pose(position=Point(x=joint_pos['x'],
00223 y=joint_pos['y'],
00224 z=0)))
00225 people_pose_msg.poses.append(pose_msg)
00226 return people_pose_msg
00227
00228 def pose_estimate(self, bgr):
00229 if self.backend == 'chainer':
00230 return self._pose_estimate_chainer_backend(bgr)
00231 raise ValueError('Unsupported backend: {0}'.format(self.backend))
00232
00233 def _pose_estimate_chainer_backend(self, bgr_img):
00234 xp = cuda.cupy if self.gpu != -1 else np
00235
00236 heatmap_avg = xp.zeros((bgr_img.shape[0], bgr_img.shape[1], 19),
00237 dtype=np.float32)
00238 paf_avg = xp.zeros((bgr_img.shape[0], bgr_img.shape[1], 38),
00239 dtype=np.float32)
00240
00241 for scale in self.scales:
00242 img = cv2.resize(bgr_img, (0, 0), fx=scale,
00243 fy=scale, interpolation=cv2.INTER_CUBIC)
00244 padded_img, pad = padRightDownCorner(
00245 img, self.stride, self.pad_value)
00246 x = np.transpose(np.float32(
00247 padded_img[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
00248 if self.gpu != -1:
00249 x = chainer.cuda.to_gpu(x)
00250 x = chainer.Variable(x)
00251 y = self.func(inputs={'image': x},
00252 outputs=['Mconv7_stage6_L2', 'Mconv7_stage6_L1'])
00253
00254
00255 y0 = F.resize_images(
00256 y[0], (y[0].data.shape[2] * self.stride, y[0].data.shape[3] * self.stride))
00257 heatmap = y0[:, :, :padded_img.shape[0] -
00258 pad[2], :padded_img.shape[1] - pad[3]]
00259 heatmap = F.resize_images(
00260 heatmap, (bgr_img.shape[0], bgr_img.shape[1]))
00261 heatmap = xp.transpose(xp.squeeze(heatmap.data), (1, 2, 0))
00262 y1 = F.resize_images(
00263 y[1], (y[1].data.shape[2] * self.stride, y[1].data.shape[3] * self.stride))
00264 paf = y1[:, :, :padded_img.shape[0] -
00265 pad[2], :padded_img.shape[1] - pad[3]]
00266 paf = F.resize_images(paf, (bgr_img.shape[0], bgr_img.shape[1]))
00267 paf = xp.transpose(xp.squeeze(paf.data), (1, 2, 0))
00268
00269 coeff = 1.0 / len(self.scales)
00270 paf_avg += paf * coeff
00271 heatmap_avg += heatmap * coeff
00272
00273 heatmav_left = xp.zeros_like(heatmap_avg)
00274 heatmav_left[1:, :] = heatmap_avg[:-1, :]
00275 heatmav_right = xp.zeros_like(heatmap_avg)
00276 heatmav_right[:-1, :] = heatmap_avg[1:, :]
00277 heatmav_up = xp.zeros_like(heatmap_avg)
00278 heatmav_up[:, 1:] = heatmap_avg[:, :-1]
00279 heatmav_down = xp.zeros_like(heatmap_avg)
00280 heatmav_down[:, :-1] = heatmap_avg[:, 1:]
00281 peaks_binary = (heatmap_avg >= heatmav_left) & \
00282 (heatmap_avg >= heatmav_right) & \
00283 (heatmap_avg >= heatmav_up) & \
00284 (heatmap_avg >= heatmav_down) & \
00285 (heatmap_avg > self.thre1)
00286
00287 peaks = xp.array(xp.nonzero(peaks_binary[..., :len(self.index2limbname)-1]), dtype=np.int32).T
00288 peak_counter = peaks.shape[0]
00289 all_peaks = xp.zeros((peak_counter, 4), dtype=np.float32)
00290 all_peaks[:, 0] = peaks[:, 1]
00291 all_peaks[:, 1] = peaks[:, 0]
00292 all_peaks[:, 2] = heatmap_avg[peaks.T.tolist()]
00293 peaks_order = peaks[..., 2]
00294 try:
00295 all_peaks = all_peaks[xp.argsort(peaks_order)]
00296 except AttributeError:
00297
00298 peaks_order = chainer.cuda.to_cpu(peaks_order)
00299 all_peaks = all_peaks[np.argsort(peaks_order)]
00300 all_peaks[:, 3] = xp.arange(peak_counter, dtype=np.float32)
00301 if self.gpu != -1:
00302 all_peaks = chainer.cuda.to_cpu(all_peaks)
00303 peaks_order = chainer.cuda.to_cpu(peaks_order)
00304 all_peaks = np.split(all_peaks, np.cumsum(
00305 np.bincount(peaks_order, minlength=len(self.index2limbname)-1)))
00306 connection_all = []
00307 mid_num = 10
00308 eps = 1e-8
00309 score_mid = paf_avg[:, :, [[x - 19 for x in self.map_idx[k]]
00310 for k in range(len(self.map_idx))]]
00311
00312 cands = np.array(all_peaks, dtype=object)[
00313 np.array(self.limb_sequence, dtype=np.int32) - 1]
00314 candAs = cands[:, 0]
00315 candBs = cands[:, 1]
00316 nAs = np.array([len(candA) for candA in candAs])
00317 nBs = np.array([len(candB) for candB in candBs])
00318 target_indices = np.nonzero(np.logical_and(nAs != 0, nBs != 0))[0]
00319 if len(target_indices) == 0:
00320 return bgr_img, []
00321
00322 all_candidates_A = [np.repeat(np.array(tmp_candA, dtype=np.float32), nB, axis=0)
00323 for tmp_candA, nB in zip(candAs, nBs)]
00324 all_candidates_B = [np.tile(np.array(tmp_candB, dtype=np.float32), (nA, 1))
00325 for tmp_candB, nA in zip(candBs, nAs)]
00326
00327 target_candidates_B = [all_candidates_B[index]
00328 for index in target_indices]
00329 target_candidates_A = [all_candidates_A[index]
00330 for index in target_indices]
00331
00332 vec = np.vstack(target_candidates_B)[
00333 :, :2] - np.vstack(target_candidates_A)[:, :2]
00334 if self.gpu != -1:
00335 vec = chainer.cuda.to_gpu(vec)
00336 norm = xp.sqrt(xp.sum(vec ** 2, axis=1)) + eps
00337 vec = vec / norm[:, None]
00338 start_end = zip(np.round(np.mgrid[np.vstack(target_candidates_A)[:, 1].reshape(-1, 1):np.vstack(target_candidates_B)[:, 1].reshape(-1, 1):(mid_num * 1j)]).astype(np.int32),
00339 np.round(np.mgrid[np.vstack(target_candidates_A)[:, 0].reshape(-1, 1):np.vstack(
00340 target_candidates_B)[:, 0].reshape(-1, 1):(mid_num * 1j)]).astype(np.int32),
00341 np.concatenate([[[index] * mid_num for i in range(len(c))] for index, c in zip(target_indices, target_candidates_B)]),)
00342
00343 v = score_mid[np.concatenate(
00344 start_end, axis=1).tolist()].reshape(-1, mid_num, 2)
00345 score_midpts = xp.sum(v * xp.repeat(vec, (mid_num),
00346 axis=0).reshape(-1, mid_num, 2), axis=2)
00347 score_with_dist_prior = xp.sum(score_midpts, axis=1) / mid_num + \
00348 xp.minimum(0.5 * bgr_img.shape[0] / norm - 1,
00349 xp.zeros_like(norm, dtype=np.float32))
00350 c1 = xp.sum(score_midpts > self.thre2, axis=1) > 0.8 * mid_num
00351 c2 = score_with_dist_prior > 0.0
00352 criterion = xp.logical_and(c1, c2)
00353
00354 indices_bins = np.cumsum(nAs * nBs)
00355 indices_bins = np.concatenate(
00356 [np.zeros(1), indices_bins]).astype(np.int32)
00357 target_candidate_indices = xp.nonzero(criterion)[0]
00358 if self.gpu != -1:
00359 target_candidate_indices = chainer.cuda.to_cpu(
00360 target_candidate_indices)
00361 score_with_dist_prior = chainer.cuda.to_cpu(score_with_dist_prior)
00362
00363 k_s = np.digitize(target_candidate_indices, indices_bins) - 1
00364 i_s = (target_candidate_indices - (indices_bins[k_s])) // nBs[k_s]
00365 j_s = (target_candidate_indices - (indices_bins[k_s])) % nBs[k_s]
00366
00367 connection_candidate = np.concatenate([k_s.reshape(-1, 1),
00368 i_s.reshape(-1, 1),
00369 j_s.reshape(-1, 1),
00370 score_with_dist_prior[
00371 target_candidate_indices][None, ].T,
00372 (score_with_dist_prior[target_candidate_indices][None, ] +
00373 np.concatenate(target_candidates_A)[target_candidate_indices, 2] + np.concatenate(target_candidates_B)[target_candidate_indices, 2]).T], axis=1)
00374
00375 sorted_indices = np.argsort(
00376 connection_candidate[:, 0] * 100 - connection_candidate[:, 3])
00377
00378 connection_all = []
00379 for _ in range(0, 19):
00380 connection = np.zeros((0, 5), dtype=np.float32)
00381 connection_all.append(connection)
00382
00383 for c_candidate in connection_candidate[sorted_indices]:
00384 k, i, j = c_candidate[0:3].astype(np.int32)
00385 score = c_candidate[3]
00386 if(len(connection_all[k]) >= min(nAs[k], nBs[k])):
00387 continue
00388 i *= nBs[k]
00389 if(i not in connection_all[k][:, 3] and j not in connection_all[k][:, 4]):
00390 connection_all[k] = np.vstack([connection_all[k], np.array(
00391 [all_candidates_A[k][i][3], all_candidates_B[k][j][3], score, i, j], dtype=np.float32)])
00392
00393 joint_cands_indices = -1 * np.ones((0, 20))
00394 candidate = np.array(
00395 [item for sublist in all_peaks for item in sublist])
00396 for k in range(len(self.map_idx)):
00397 partAs = connection_all[k][:, 0]
00398 partBs = connection_all[k][:, 1]
00399 indexA, indexB = np.array(self.limb_sequence[k]) - 1
00400 for i in range(len(connection_all[k])):
00401 found = 0
00402 joint_cands_indices_idx = [-1, -1]
00403
00404 for j in range(len(joint_cands_indices)):
00405 if joint_cands_indices[j][indexA] == float(partAs[i]) or joint_cands_indices[j][indexB] == float(partBs[i]):
00406 joint_cands_indices_idx[found] = j
00407 found += 1
00408
00409 if found == 1:
00410 j = joint_cands_indices_idx[0]
00411 if(joint_cands_indices[j][indexB] != float(partBs[i])):
00412 joint_cands_indices[j][indexB] = partBs[i]
00413 joint_cands_indices[j][-1] += 1
00414 joint_cands_indices[
00415 j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
00416 joint_cands_indices[
00417 j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
00418 elif found == 2:
00419 j1, j2 = joint_cands_indices_idx
00420 membership = ((joint_cands_indices[j1] >= 0).astype(
00421 int) + (joint_cands_indices[j2] >= 0).astype(int))[:-2]
00422 if len(np.nonzero(membership == 2)[0]) == 0:
00423 joint_cands_indices[j1][
00424 :-2] += (joint_cands_indices[j2][:-2] + 1)
00425 joint_cands_indices[
00426 j1][-2:] += joint_cands_indices[j2][-2:]
00427 joint_cands_indices[j1][-2] += connection_all[k][i][2]
00428 joint_cands_indices = np.delete(
00429 joint_cands_indices, j2, 0)
00430 else:
00431 joint_cands_indices[j1][indexB] = partBs[i]
00432 joint_cands_indices[j1][-1] += 1
00433 joint_cands_indices[
00434 j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
00435
00436
00437
00438 elif not found and k < len(self.index2limbname) - 2:
00439 row = -1 * np.ones(20)
00440 row[indexA] = partAs[i]
00441 row[indexB] = partBs[i]
00442 row[-1] = 2
00443 row[-2] = sum(candidate[connection_all[k]
00444 [i, :2].astype(int), 2]) + connection_all[k][i][2]
00445 joint_cands_indices = np.vstack([joint_cands_indices, row])
00446
00447
00448 deleteIdx = []
00449 for i in range(len(joint_cands_indices)):
00450 if joint_cands_indices[i][-1] < 4 or joint_cands_indices[i][-2] / joint_cands_indices[i][-1] < 0.4:
00451 deleteIdx.append(i)
00452 joint_cands_indices = np.delete(joint_cands_indices, deleteIdx, axis=0)
00453
00454 if self.visualize:
00455 result_img = self._visualize(
00456 bgr_img, joint_cands_indices, all_peaks, candidate)
00457 else:
00458 result_img = bgr_img
00459
00460 return result_img, self._extract_joint_position(joint_cands_indices, candidate)
00461
00462 def _visualize(self, img, joint_cands_indices, all_peaks, candidate):
00463
00464 cmap = matplotlib.cm.get_cmap('hsv')
00465 for i in range(len(self.index2limbname)-1):
00466 rgba = np.array(cmap(1 - i / 18. - 1. / 36))
00467 rgba[0:3] *= 255
00468 for j in range(len(all_peaks[i])):
00469 cv2.circle(img, (int(all_peaks[i][j][0]), int(
00470 all_peaks[i][j][1])), 4, self.colors[i], thickness=-1)
00471
00472 stickwidth = 4
00473 for i in range(len(self.index2limbname) - 2):
00474 for joint_cand_indices in joint_cands_indices:
00475 index = joint_cand_indices[np.array(self.limb_sequence[i],
00476 dtype=np.int32) - 1]
00477 if -1 in index:
00478 continue
00479 cur_img = img.copy()
00480 Y = candidate[index.astype(int), 0]
00481 X = candidate[index.astype(int), 1]
00482 mX = np.mean(X)
00483 mY = np.mean(Y)
00484 length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
00485 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
00486 polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(
00487 length / 2), stickwidth), int(angle), 0, 360, 1)
00488 cv2.fillConvexPoly(cur_img, polygon, self.colors[i])
00489 img = cv2.addWeighted(img, 0.4, cur_img, 0.6, 0)
00490
00491 return img
00492
00493 def _extract_joint_position(self, joint_cands_indices, candidate):
00494 people_joint_positions = []
00495 for joint_cand_idx in joint_cands_indices:
00496 person_joint_positions = []
00497 for i, limb_name in enumerate(self.index2limbname):
00498 cand_idx = int(joint_cand_idx[i])
00499 if cand_idx == -1 or cand_idx >= candidate.shape[0]:
00500 person_joint_positions.append(dict(limb=limb_name,
00501 x=0,
00502 y=0,
00503 score=-1))
00504 continue
00505 X, Y = candidate[cand_idx, :2]
00506 person_joint_positions.append(dict(limb=limb_name,
00507 x=X,
00508 y=Y,
00509 score=candidate[cand_idx, 2]))
00510 people_joint_positions.append(person_joint_positions)
00511
00512 return people_joint_positions
00513
00514
00515 if __name__ == '__main__':
00516 rospy.init_node('people_pose_estimation_2d')
00517 PeoplePoseEstimation2D()
00518 rospy.spin()