1 from __future__
import print_function
5 import itertools, pkg_resources, sys
6 from distutils.version
import LooseVersion
7 if LooseVersion(pkg_resources.get_distribution(
"chainer").version) >= LooseVersion(
'7.0.0')
and \
8 sys.version_info.major == 2:
9 print(
'''Please install chainer < 7.0.0: 11 sudo pip install chainer==6.7.0 13 c.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485 16 if [p
for p
in list(itertools.chain(*[pkg_resources.find_distributions(_)
for _
in sys.path]))
if "cupy-" in p.project_name ] == []:
17 print(
'''Please install CuPy 19 sudo pip install cupy-cuda[your cuda version] 21 sudo pip install cupy-cuda91 28 import DeepSortFeatureExtractor
30 from vis_bboxes
import vis_bboxes
35 """Extract image patch from bounding box. 37 https://github.com/nwojke/deep_sort/blob/master/tools/generate_detections.py 44 The bounding box in format (x, y, width, height). 45 patch_shape : Optional[array_like] 46 This parameter can be used to enforce a desired patch shape 47 (height, width). First, the `bbox` is adapted to the aspect ratio 48 of the patch shape, then it is clipped at the image boundaries. 49 If None, the shape is computed from :arg:`bbox`. 54 An image patch showing the :arg:`bbox`, optionally reshaped to 56 Returns None if the bounding box is empty or fully outside of the image 61 if patch_shape
is not None:
63 target_aspect = float(patch_shape[1]) / patch_shape[0]
64 new_width = target_aspect * bbox[3]
65 bbox[0] -= (new_width - bbox[2]) / 2
70 bbox = bbox.astype(np.int)
73 bbox[:2] = np.maximum(0, bbox[:2])
74 bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
75 if np.any(bbox[:2] >= bbox[2:]):
78 image = image[sy:ey, sx:ex]
79 image = cv2.resize(image, tuple(patch_shape[::-1]))
85 def _encoder(image, boxes):
86 image_shape = 128, 64, 3
90 image, box, image_shape[:2])
92 patch = np.random.uniform(
93 0., 255., image_shape).astype(np.uint8)
94 image_patches.append(patch)
95 image_patches = np.asarray(image_patches,
'f')
96 image_patches = image_patches.transpose(0, 3, 1, 2)
97 image_patches = image_encoder.xp.asarray(image_patches)
98 with chainer.using_config(
'train',
False):
99 ret = image_encoder(image_patches)
100 return chainer.cuda.to_cpu(ret.data)
108 pretrained_model=
None,
110 max_cosine_distance=0.2,
119 if pretrained_model
is not None:
120 chainer.serializers.load_npz(
136 metric = deep_sort.deep_sort.nn_matching.NearestNeighborDistanceMetric(
140 self.
tracker = deep_sort.deep_sort.tracker.Tracker(metric)
142 def track(self, frame, bboxes, scores):
144 indices = deep_sort.application_util.preprocessing.non_max_suppression(
146 bboxes = bboxes[indices]
147 scores = scores[indices]
150 features = self.
encoder(frame, np.array(bboxes))
153 deep_sort.deep_sort.detection.Detection(
154 bboxes[i], scores[i], features[i])
for i
in range(n_bbox)]
157 self.tracker.predict()
158 self.tracker.update(detections)
160 for target_object
in self.tracking_objects.values():
161 target_object[
'out_of_frame'] =
True 164 for track
in self.tracker.tracks:
165 if not track.is_confirmed()
or track.time_since_update > 1:
167 bbox = track.to_tlwh()
173 target_object[
'out_of_frame'] =
False 174 target_object[
'bbox'] = bbox
185 vis_frame = frame.copy()
186 for x1, y1, w, h
in bboxes:
187 x2, y2 = x1 + w, y1 + h
188 x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
189 cv2.rectangle(vis_frame,
192 labels, bboxes = [], []
193 for object_id, target_object
in self.tracking_objects.items():
194 if target_object[
'out_of_frame']:
196 labels.append(object_id)
197 bboxes.append(target_object[
'bbox'])
def track(self, frame, bboxes, scores)
def vis_bboxes(img, bboxes, labels, font_scale=0.8, thickness=1, font_face=cv2.FONT_HERSHEY_SIMPLEX, text_color=(255, 255, 255), max_label_num=1024)
def __init__(self, gpu=-1, pretrained_model=None, nms_max_overlap=1.0, max_cosine_distance=0.2, budget=None)
def extract_image_patch(image, bbox, patch_shape)
def visualize(self, frame, bboxes)
def encoder(image_encoder)