1 import pyrealsense2
as rs
4 import tensorflow
as tf
10 pipeline = rs.pipeline()
12 config.enable_stream(rs.stream.depth, W, H, rs.format.z16, 30)
13 config.enable_stream(rs.stream.color, W, H, rs.format.bgr8, 30)
16 print(
"[INFO] start streaming...")
17 pipeline.start(config)
19 aligned_stream = rs.align(rs.stream.color)
20 point_cloud = rs.pointcloud()
22 print(
"[INFO] loading model...")
23 PATH_TO_CKPT =
r"frozen_inference_graph.pb" 27 detection_graph = tf.Graph()
28 with detection_graph.as_default():
29 od_graph_def = tf.compat.v1.GraphDef()
30 with tf.compat.v1.gfile.GFile(PATH_TO_CKPT,
'rb')
as fid:
31 serialized_graph = fid.read()
32 od_graph_def.ParseFromString(serialized_graph)
33 tf.compat.v1.import_graph_def(od_graph_def, name=
'')
34 sess = tf.compat.v1.Session(graph=detection_graph)
37 image_tensor = detection_graph.get_tensor_by_name(
'image_tensor:0')
40 detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
43 detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
44 detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
46 num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
50 frames = pipeline.wait_for_frames()
51 frames = aligned_stream.process(frames)
52 depth_frame = frames.get_depth_frame()
53 color_frame = frames.get_color_frame()
54 points = point_cloud.calculate(depth_frame)
55 verts = np.asanyarray(points.get_vertices()).
view(np.float32).
reshape(-1, W, 3)
58 color_image = np.asanyarray(color_frame.get_data())
59 scaled_size = (int(W), int(H))
62 image_expanded = np.expand_dims(color_image, axis=0)
64 (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
65 feed_dict={image_tensor: image_expanded})
67 boxes = np.squeeze(boxes)
68 classes = np.squeeze(classes).astype(np.int32)
69 scores = np.squeeze(scores)
71 print(
"[INFO] drawing bounding box on detected objects...")
72 print(
"[INFO] each detected object has a unique color")
74 for idx
in range(int(num)):
78 print(
" [DEBUG] class : ", class_,
"idx : ", idx,
"num : ", num)
80 if score > 0.8
and class_ == 1:
88 bbox = (int(left), int(top), int(width), int(height))
89 p1 = (int(bbox[0]), int(bbox[1]))
90 p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
92 cv2.rectangle(color_image, p1, p2, (255,0,0), 2, 1)
95 obj_points = verts[int(bbox[1]):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])].
reshape(-1, 3)
100 ys = obj_points[:, 1]
101 ys = np.delete(ys, np.where(
102 (zs < z - 1) | (zs > z + 1)))
104 my = np.amin(ys, initial=1)
105 My = np.amax(ys, initial=-1)
108 height = float(
"{:.2f}".format(height))
109 print(
"[INFO] object height is: ", height,
"[m]")
110 height_txt =
str(height) +
"[m]" 113 font = cv2.FONT_HERSHEY_SIMPLEX
114 bottomLeftCornerOfText = (p1[0], p1[1] + 20)
116 fontColor = (255, 255, 255)
118 cv2.putText(color_image, height_txt,
119 bottomLeftCornerOfText,
126 cv2.namedWindow(
'RealSense', cv2.WINDOW_AUTOSIZE)
127 cv2.imshow(
'RealSense', color_image)
void reshape(GLFWwindow *window, int w, int h)
static std::string print(const transformation &tf)