example1 - object detection.py
Go to the documentation of this file.
1 import pyrealsense2 as rs
2 import numpy as np
3 import cv2
4 import tensorflow as tf
5 
6 # Configure depth and color streams
7 pipeline = rs.pipeline()
8 config = rs.config()
9 config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
10 
11 print("[INFO] Starting streaming...")
12 pipeline.start(config)
13 print("[INFO] Camera ready.")
14 
15 # download model from: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API#run-network-in-opencv
16 print("[INFO] Loading model...")
17 PATH_TO_CKPT = "frozen_inference_graph.pb"
18 
19 # Load the Tensorflow model into memory.
20 detection_graph = tf.Graph()
21 with detection_graph.as_default():
22  od_graph_def = tf.compat.v1.GraphDef()
23  with tf.compat.v1.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
24  serialized_graph = fid.read()
25  od_graph_def.ParseFromString(serialized_graph)
26  tf.compat.v1.import_graph_def(od_graph_def, name='')
27  sess = tf.compat.v1.Session(graph=detection_graph)
28 
29 # Input tensor is the image
30 image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
31 # Output tensors are the detection boxes, scores, and classes
32 # Each box represents a part of the image where a particular object was detected
33 detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
34 # Each score represents level of confidence for each of the objects.
35 # The score is shown on the result image, together with the class label.
36 detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
37 detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
38 # Number of objects detected
39 num_detections = detection_graph.get_tensor_by_name('num_detections:0')
40 # code source of tensorflow model loading: https://www.geeksforgeeks.org/ml-training-image-classifier-using-tensorflow-object-detection-api/
41 print("[INFO] Model loaded.")
42 colors_hash = {}
43 while True:
44  frames = pipeline.wait_for_frames()
45  color_frame = frames.get_color_frame()
46 
47  # Convert images to numpy arrays
48  color_image = np.asanyarray(color_frame.get_data())
49  scaled_size = (color_frame.width, color_frame.height)
50  # expand image dimensions to have shape: [1, None, None, 3]
51  # i.e. a single-column array, where each item in the column has the pixel RGB value
52  image_expanded = np.expand_dims(color_image, axis=0)
53  # Perform the actual detection by running the model with the image as input
54  (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
55  feed_dict={image_tensor: image_expanded})
56 
57  boxes = np.squeeze(boxes)
58  classes = np.squeeze(classes).astype(np.int32)
59  scores = np.squeeze(scores)
60 
61  for idx in range(int(num)):
62  class_ = classes[idx]
63  score = scores[idx]
64  box = boxes[idx]
65 
66  if class_ not in colors_hash:
67  colors_hash[class_] = tuple(np.random.choice(range(256), size=3))
68 
69  if score > 0.6:
70  left = int(box[1] * color_frame.width)
71  top = int(box[0] * color_frame.height)
72  right = int(box[3] * color_frame.width)
73  bottom = int(box[2] * color_frame.height)
74 
75  p1 = (left, top)
76  p2 = (right, bottom)
77  # draw box
78  r, g, b = colors_hash[class_]
79  cv2.rectangle(color_image, p1, p2, (int(r), int(g), int(b)), 2, 1)
80 
81  cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
82  cv2.imshow('RealSense', color_image)
83  cv2.waitKey(1)
84 
85 print("[INFO] stop streaming ...")
86 pipeline.stop()
static std::string print(const transformation &tf)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:14