1 from __future__
import print_function
4 import itertools, pkg_resources, sys
5 from distutils.version
import LooseVersion
6 if LooseVersion(pkg_resources.get_distribution(
"chainer").version) >= LooseVersion(
'7.0.0')
and \
7 sys.version_info.major == 2:
8 print(
'''Please install chainer < 7.0.0: 10 sudo pip install chainer==6.7.0 12 c.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485 15 if [p
for p
in list(itertools.chain(*[pkg_resources.find_distributions(_)
for _
in sys.path]))
if "cupy-" in p.project_name ] == []:
16 print(
'''Please install CuPy 18 sudo pip install cupy-cuda[your cuda version] 20 sudo pip install cupy-cuda91 25 import chainer.functions
as F
26 import chainer.links
as L
28 base_url =
'http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/' 30 'auto':
'coco/pose_iter_440000.chainermodel',
31 'coco':
'coco/pose_iter_440000.chainermodel',
32 'mpi':
'mpi/pose_iter_160000.chainermodel',
40 with self.init_scope():
42 in_channels=3, out_channels=64, ksize=3, stride=1, pad=1)
44 in_channels=64, out_channels=64, ksize=3, stride=1, pad=1)
46 in_channels=64, out_channels=128, ksize=3, stride=1, pad=1)
48 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
50 in_channels=128, out_channels=256, ksize=3, stride=1, pad=1)
52 in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
54 in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
56 in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
58 in_channels=256, out_channels=512, ksize=3, stride=1, pad=1)
60 in_channels=512, out_channels=512, ksize=3, stride=1, pad=1)
62 in_channels=512, out_channels=256, ksize=3, stride=1, pad=1)
64 in_channels=256, out_channels=128, ksize=3, stride=1, pad=1)
68 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
70 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
72 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
74 in_channels=128, out_channels=512, ksize=1, stride=1, pad=0)
76 in_channels=512, out_channels=38, ksize=1, stride=1, pad=0)
78 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
80 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
82 in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
84 in_channels=128, out_channels=512, ksize=1, stride=1, pad=0)
86 in_channels=512, out_channels=19, ksize=1, stride=1, pad=0)
90 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
92 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
94 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
96 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
98 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
100 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
102 in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
104 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
106 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
108 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
110 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
112 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
114 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
116 in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
120 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
122 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
124 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
126 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
128 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
130 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
132 in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
134 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
136 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
138 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
140 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
142 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
144 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
146 in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
150 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
152 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
154 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
156 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
158 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
160 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
162 in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
164 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
166 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
168 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
170 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
172 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
174 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
176 in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
180 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
182 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
184 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
186 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
188 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
190 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
192 in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
194 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
196 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
198 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
200 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
202 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
204 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
206 in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
210 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
212 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
214 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
216 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
218 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
220 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
222 in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
224 in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
226 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
228 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
230 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
232 in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
234 in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
236 in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
238 if pretrained_model
in models.keys():
239 data_dir = chainer.dataset.get_dataset_directory(
'openpose/pose')
240 model_path = os.path.join(data_dir, models[pretrained_model])
242 os.makedirs(os.path.dirname(model_path))
245 chainer.dataset.cache_or_load_file(
249 chainer.serializers.load_npz(model_path, self)
250 elif pretrained_model
is not None:
251 if not os.path.exists(pretrained_model):
252 raise OSError(
'model does not exists: "%s"' % pretrained_model)
253 chainer.serializers.load_npz(pretrained_model, self)
261 h = F.max_pooling_2d(h, ksize=2, stride=2)
264 h = F.max_pooling_2d(h, ksize=2, stride=2)
269 h = F.max_pooling_2d(h, ksize=2, stride=2)
291 h = F.concat((h1, h2, feature_map), axis=1)
310 h = F.concat((h1, h2, feature_map), axis=1)
329 h = F.concat((h1, h2, feature_map), axis=1)
348 h = F.concat((h1, h2, feature_map), axis=1)
367 h = F.concat((h1, h2, feature_map), axis=1)
385 return pafs, heatmaps
389 from chainer.links
import caffe
391 if os.path.exists(dest_path):
392 raise OSError(
'destination already exists: %s' % dest_path)
394 basename, ext = os.path.splitext(models[model_type])
395 url = base_url + basename +
'.caffemodel' 396 caffe_model_path = chainer.dataset.cached_download(url)
397 if not os.path.exists(caffe_model_path):
398 raise OSError(
'caffe model does not exist: %s' % caffe_model_path)
400 print(
'Converting to chainer model')
401 caffe_model = caffe.CaffeFunction(caffe_model_path)
402 chainer_model =
PoseNet(pretrained_model=
None)
403 for link
in chainer_model.links():
404 if not isinstance(link, chainer.Link)
or not link.name:
406 if eval(
'chainer_model.{0}.b.shape == caffe_model["{0}"].b.shape'.format(link.name))
and\
407 eval(
'chainer_model.{0}.W.shape == caffe_model["{0}"].W.shape'.format(link.name)):
408 exec(
'chainer_model.{0}.W.data = caffe_model["{0}"].W.data'.format(link.name))
409 exec(
'chainer_model.{0}.b.data = caffe_model["{0}"].b.data'.format(link.name))
410 print(
'Copied layer {0}'.format(link.name))
412 print(
'Failed to copy layer {0}'.format(link.name))
414 chainer.serializers.save_npz(dest_path, chainer_model)
def __init__(self, pretrained_model='auto')
def _download_pretrained_model(model_type, dest_path)