8 from __future__ 
import print_function
 
   10 import itertools, pkg_resources, sys
 
   11 from distutils.version 
import LooseVersion
 
   12 if LooseVersion(pkg_resources.get_distribution(
"chainer").version) >= LooseVersion(
'7.0.0') 
and \
 
   13         sys.version_info.major == 2:
 
   14     print(
'''Please install chainer < 7.0.0: 
   16     sudo pip install chainer==6.7.0 
   18 c.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485 
   21 if [p 
for p 
in list(itertools.chain(*[pkg_resources.find_distributions(_) 
for _ 
in sys.path])) 
if "cupy-" in p.project_name 
or "cupy" == p.project_name ] == []:
 
   22     print(
'''Please install CuPy 
   24     sudo pip install cupy-cuda[your cuda version] 
   26     sudo pip install cupy-cuda91 
   31 import chainer.functions 
as F
 
   32 from chainer 
import Variable
 
   39 import message_filters
 
   41 from jsk_topic_tools 
import ConnectionBasedTransport
 
   45 from jsk_recognition_msgs.msg 
import PeoplePose
 
   46 from jsk_recognition_msgs.msg 
import PeoplePoseArray
 
   47 from sensor_msgs.msg 
import Image
 
   49 from hmr.smpl 
import SMPL
 
   50 from hmr.net 
import EncoderFC3Dropout
 
   51 from hmr.resnet_v2_50 
import ResNet_v2_50
 
   56     for pose, score 
in zip(person_pose.poses, person_pose.scores):
 
   57         key_points.append(pose.position.x)
 
   58         key_points.append(pose.position.y)
 
   59         key_points.append(score)
 
   60     return np.array(key_points, 
'f').reshape(-1, 3)
 
   64     new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
 
   65     new_img = cv2.resize(img, (new_size[1], new_size[0]))
 
   68         new_size[0] / 
float(img.shape[0]), new_size[1] / 
float(img.shape[1])
 
   70     return new_img, actual_factor
 
   74     image_scaled, scale_factors = 
resize_img(image, scale)
 
   76     scale_factors = [scale_factors[1], scale_factors[0]]
 
   77     center_scaled = np.round(center * scale_factors).astype(np.int)
 
   79     margin = 
int(img_size / 2)
 
   81         image_scaled, ((margin, ), (margin, ), (0, )), mode=
'edge')
 
   82     center_pad = center_scaled + margin
 
   84     start_pt = center_pad - margin
 
   85     end_pt = center_pad + margin
 
   87     crop = image_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
 
   95     return crop, proc_param
 
  100     vis = key_points[:, 2] > vis_thr
 
  101     vis_kp = key_points[vis, :2]
 
  104     min_pt = np.min(vis_kp, axis=0)
 
  105     max_pt = np.max(vis_kp, axis=0)
 
  106     person_height = np.linalg.norm(max_pt - min_pt)
 
  107     if person_height == 0:
 
  109     center = (min_pt + max_pt) / 2.
 
  110     scale = 150. / person_height
 
  115     if key_points 
is None:
 
  117         center = np.round(np.array(img.shape[:2]) / 2).astype(int)
 
  119         center = center[::-1]
 
  121         scale, center = 
get_bbox(key_points, vis_thr=0.1)
 
  124             center = np.round(np.array(img.shape[:2]) / 2).astype(int)
 
  126             center = center[::-1]
 
  131     crop_img = 2 * ((crop_img / 255.) - 0.5)
 
  132     return crop_img, proc_param
 
  136     0.90365213, -0.00383353,  0.03301106,  3.14986515, -0.01883755,
 
  137     0.16895422, -0.15615709, -0.0058559,  0.07191881, -0.18924442,
 
  138     -0.04396844, -0.05114707,  0.24385466,  0.00881136,  0.02384637,
 
  139     0.2066803, -0.10190887, -0.03373535,  0.27340922,  0.00637481,
 
  140     0.07408072, -0.03409823, -0.00971786,  0.03841642,  0.0191336,
 
  141     0.10812955, -0.06782207, -0.08026548, -0.18373352,  0.16556455,
 
  142     0.03735897, -0.02497507,  0.02688527, -0.18802814,  0.17772846,
 
  143     0.13135587,  0.01438429,  0.15891947, -0.2279436, -0.07497088,
 
  144     0.05169746,  0.08784129,  0.02147929,  0.02320284, -0.42375749,
 
  145     -0.04963749,  0.08627309,  0.47963148,  0.26528436, -0.1028522,
 
  146     -0.02501041,  0.05762934, -0.26270828, -0.8297376,  0.13903582,
 
  147     0.30412629,  0.79824799,  0.12842464, -0.64139324,  0.16469972,
 
  148     -0.08669609,  0.55955994, -0.16742738, -0.03153928, -0.02245264,
 
  149     -0.02357809,  0.02061746,  0.02320515,  0.00869796, -0.1655257,
 
  150     -0.07094092, -0.1663706, -0.10953037,  0.11675739,  0.20495811,
 
  151     0.10592803,  0.14583197, -0.31755996,  0.13645983,  0.28833047,
 
  152     0.06303538,  0.48629287,  0.23359743, -0.02812484,  0.23948504]], 
'f')
 
  158         super(self.__class__, self).
__init__()
 
  159         self.
gpu = rospy.get_param(
'~gpu', -1)  
 
  168         self.
br = cv_bridge.CvBridge()
 
  170             '~output/pose', PeoplePoseArray, queue_size=1)
 
  173         smpl_model_file = rospy.get_param(
'~smpl_model_file')
 
  174         chainer.serializers.load_npz(smpl_model_file, self.
smpl)
 
  175         encoder_fc3_model_file = rospy.get_param(
'~encoder_model_file')
 
  176         chainer.serializers.load_npz(
 
  178         resnet_v2_50_model_file = rospy.get_param(
'~resnet_v2_50_model_file')
 
  179         chainer.serializers.load_npz(resnet_v2_50_model_file, self.
resnet_v2)
 
  181         rospy.loginfo(
'Finished loading trained model')
 
  183             chainer.cuda.get_device_from_id(self.
gpu).use()
 
  187         chainer.global_config.train = 
False 
  188         chainer.global_config.enable_backprop = 
False 
  192             queue_size = rospy.get_param(
'~queue_size', 10)
 
  194                 '~input', Image, queue_size=queue_size, buff_size=2**24)
 
  196                 '~input/pose', PeoplePoseArray,
 
  197                 queue_size=queue_size, buff_size=2**24)
 
  198             self.
subs = [sub_img, sub_pose]
 
  200             if rospy.get_param(
'~approximate_sync', 
False):
 
  201                 slop = rospy.get_param(
'~slop', 0.1)
 
  202                 sync = message_filters.ApproximateTimeSynchronizer(
 
  203                     fs=self.
subs, queue_size=queue_size, slop=slop)
 
  206                     fs=self.
subs, queue_size=queue_size)
 
  209             sub_img = rospy.Subscriber(
 
  210                 '~input', Image, self.
_cb,
 
  211                 queue_size=1, buff_size=2**24)
 
  212             self.
subs = [sub_img]
 
  215         for sub 
in self.
subs:
 
  220         img = br.imgmsg_to_cv2(img_msg, desired_encoding=
'bgr8')
 
  222         imgs = img.transpose(2, 0, 1)[
None, ]
 
  223         verts, Js, Rs, A, cams, poses, shapes = self.
pose_estimate(imgs)
 
  226             chainer.cuda.to_cpu(A.data), img_msg.header)
 
  227         self.
pose_pub.publish(people_pose_msg)
 
  231         img = br.imgmsg_to_cv2(img_msg, desired_encoding=
'bgr8')
 
  234         for person_pose 
in people_pose_msg.poses:
 
  237             imgs.append(crop_img)
 
  240             imgs = np.array(img[
None, ], 
'f').transpose(0, 3, 1, 2)
 
  242             imgs = np.array(imgs, 
'f').transpose(0, 3, 1, 2)
 
  243         verts, Js, Rs, A, cams, poses, shapes = self.
pose_estimate(imgs)
 
  246             chainer.cuda.to_cpu(A.data), img_msg.header)
 
  247         self.
pose_pub.publish(people_pose_msg)
 
  250         people_pose_msg = PeoplePoseArray(header=header)
 
  251         for i, person_joint_positions 
in enumerate(people_joint_positions):
 
  252             pose_msg = PeoplePose()
 
  253             for j, joint_pose 
in enumerate(person_joint_positions):
 
  254                 pose_msg.limb_names.append(
str(j))
 
  255                 pose_msg.scores.append(0.0)
 
  256                 q_xyzw = tf.transformations.quaternion_from_matrix(joint_pose)
 
  257                 pose_msg.poses.append(
 
  262                         orientation=Quaternion(
 
  267             people_pose_msg.poses.append(pose_msg)
 
  268         return people_pose_msg
 
  271         batch_size = imgs.shape[0]
 
  272         imgs = Variable(self.
resnet_v2.xp.array(imgs, 
'f'))
 
  273         img_feat = self.
resnet_v2(imgs).reshape(batch_size, -1)
 
  281             state = F.concat([img_feat, theta_prev], axis=1)
 
  283             theta_here = theta_prev + delta_theta
 
  285             cams = theta_here[:, :num_cam]
 
  286             poses = theta_here[:, num_cam:(num_cam + num_theta)]
 
  287             shapes = theta_here[:, (num_cam + num_theta):]
 
  289             verts, Js, Rs, A = self.
smpl(shapes, poses)
 
  293             theta_prev = theta_here
 
  294         return verts, Js, Rs, A, cams, poses, shapes
 
  297 if __name__ == 
'__main__':
 
  298     rospy.init_node(
'human_mesh_recovery')