add_pointclouds_to_bagfile.py
Go to the documentation of this file.
1 #!/usr/bin/python
2 # -*- coding: utf8 -*-
3 #
4 # Original code taken from the RGB-D benchmark: http://vision.in.tum.de/data/datasets/rgbd-dataset/tools
5 #
6 # Original author: Juergen Sturm
7 # modified by: Martin Guenther
8 
9 import argparse
10 import sys
11 import os
12 
13 if __name__ == '__main__':
14 
15  # parse command line
16  parser = argparse.ArgumentParser(description='''
17  This scripts reads a bag file containing RGBD data,
18  adds the corresponding PointCloud2 messages, and saves it again into a bag file.
19  Optional arguments allow to select only a portion of the original bag file.
20  ''')
21  parser.add_argument('--start', help='skip the first N seconds of input bag file (default: 0.0)',default=0.00)
22  parser.add_argument('--duration', help='only process N seconds of input bag file (default: off)')
23  parser.add_argument('--nth', help='only process every N-th frame of input bag file (default: 15)',default=15)
24  parser.add_argument('--skip', help='skip N blocks in the beginning (default: 1)', default=1)
25  parser.add_argument('--compress', help='compress output bag file', action='store_true')
26  parser.add_argument('inputbag', help='input bag file')
27  parser.add_argument('outputbag', nargs='?',help='output bag file')
28  args = parser.parse_args()
29 
30  import roslib; roslib.load_manifest('rgbd_rosbag_tools')
31  import rospy
32  import rosbag
33  import sensor_msgs.msg
34  import cv
35  from cv_bridge import CvBridge, CvBridgeError
36  import struct
37 
38  if not args.outputbag:
39  args.outputbag = os.path.splitext(args.inputbag)[0] + "-points.bag"
40 
41  print "Processing bag file:"
42  print " in:",args.inputbag
43  print " out:",args.outputbag
44  print " starting from: %s seconds"%(args.start)
45 
46  if args.duration:
47  print " duration: %s seconds"%(args.duration)
48 
49  print " saving every %s-th frame"%(args.nth)
50  args.skip = float(args.skip)
51  print " skipping %s blocks"%(args.skip)
52 
53  inbag = rosbag.Bag(args.inputbag,'r')
54  if args.compress:
55  param_compression = rosbag.bag.Compression.BZ2
56  else:
57  param_compression = rosbag.bag.Compression.NONE
58 
59  outbag = rosbag.Bag(args.outputbag, 'w', compression=param_compression)
60 
61  depth_camera_info = None
62  rgb_camera_info = None
63  depth_image = None
64  rgb_image_color = None
65  cortex = None
66 
67  nan = float('nan')
68  bridge = CvBridge()
69  frame = 0
70 
71  time_start = None
72  for topic, msg, t in inbag.read_messages():
73  if time_start==None:
74  time_start=t
75  if t - time_start < rospy.Duration.from_sec(float(args.start)):
76  continue
77  if args.duration and (t - time_start > rospy.Duration.from_sec(float(args.start) + float(args.duration))):
78  break
79  print "t=%f\r"%(t-time_start).to_sec(),
80  if topic == "/camera/depth_registered/camera_info":
81  depth_camera_info = msg
82  continue
83  if topic == "/camera/rgb/camera_info":
84  rgb_camera_info = msg
85  continue
86  if topic == "/camera/rgb/image_color" and rgb_camera_info:
87  rgb_image_color = msg
88  continue
89  if topic == "/camera/depth_registered/image" and depth_camera_info and rgb_image_color and rgb_camera_info:
90  depth_image = msg
91  # now process frame
92 
93  if depth_image.header.stamp - rgb_image_color.header.stamp > rospy.Duration.from_sec(1/30.0):
94  continue
95 
96  frame += 1
97  if frame % float(args.nth) == 0:
98  if args.skip > 0:
99  args.skip -= 1
100  else:
101  # store messages
102  outbag.write("/camera/depth_registered/camera_info",depth_camera_info,t)
103  outbag.write("/camera/depth_registered/image",depth_image,t)
104  outbag.write("/camera/rgb/camera_info",rgb_camera_info,t)
105  outbag.write("/camera/rgb/image_color",rgb_image_color,t)
106 
107  # generate monochrome image from color image
108  cv_rgb_image_mono = bridge.imgmsg_to_cv(rgb_image_color, "mono8")
109  rgb_image_mono = bridge.cv_to_imgmsg(cv_rgb_image_mono)
110  rgb_image_mono.header = rgb_image_color.header
111  outbag.write("/camera/rgb/image_mono",rgb_image_mono,t)
112 
113  # generate depth and colored point cloud
114  cv_depth_image = bridge.imgmsg_to_cv(depth_image, "passthrough")
115  cv_rgb_image_color = bridge.imgmsg_to_cv(rgb_image_color, "bgr8")
116 
117  centerX = depth_camera_info.K[2]
118  centerY = depth_camera_info.K[5]
119  depthFocalLength = depth_camera_info.K[0]
120  depth_points = sensor_msgs.msg.PointCloud2()
121  depth_points.header = depth_image.header
122  depth_points.width = depth_image.width
123  depth_points.height = depth_image.height
124  depth_points.fields.append(sensor_msgs.msg.PointField(
125  name = "x",offset = 0,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
126  depth_points.fields.append(sensor_msgs.msg.PointField(
127  name = "y",offset = 4,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
128  depth_points.fields.append(sensor_msgs.msg.PointField(
129  name = "z",offset = 8,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
130  depth_points.point_step = 16
131  depth_points.row_step = depth_points.point_step * depth_points.width
132  buffer = []
133  buffer_rgb = []
134  for v in range(depth_image.height):
135  for u in range(depth_image.width):
136  d = cv_depth_image[v,u]
137  ptx = (u - centerX) * d / depthFocalLength;
138  pty = (v - centerY) * d / depthFocalLength;
139  ptz = d;
140  buffer.append(struct.pack('ffff',ptx,pty,ptz,1.0))
141  depth_points.data = "".join(buffer)
142  outbag.write("/camera/depth/points", depth_points, t)
143 
144  centerX = depth_camera_info.K[2]
145  centerY = depth_camera_info.K[5]
146  depthFocalLength = depth_camera_info.K[0]
147  rgb_points = sensor_msgs.msg.PointCloud2()
148  rgb_points.header = rgb_image_color.header
149  rgb_points.width = depth_image.width
150  rgb_points.height = depth_image.height
151  rgb_points.fields.append(sensor_msgs.msg.PointField(
152  name = "x",offset = 0,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
153  rgb_points.fields.append(sensor_msgs.msg.PointField(
154  name = "y",offset = 4,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
155  rgb_points.fields.append(sensor_msgs.msg.PointField(
156  name = "z",offset = 8,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
157  rgb_points.fields.append(sensor_msgs.msg.PointField(
158  name = "rgb",offset = 16,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
159  rgb_points.point_step = 32
160  rgb_points.row_step = rgb_points.point_step * rgb_points.width
161  buffer = []
162  for v in range(depth_image.height):
163  for u in range(depth_image.width):
164  d = cv_depth_image[v,u]
165  rgb = cv_rgb_image_color[v,u]
166  ptx = (u - centerX) * d / depthFocalLength;
167  pty = (v - centerY) * d / depthFocalLength;
168  ptz = d;
169  buffer.append(struct.pack('ffffBBBBIII',
170  ptx,pty,ptz,1.0,
171  rgb[0],rgb[1],rgb[2],0,
172  0,0,0))
173  rgb_points.data = "".join(buffer)
174  outbag.write("/camera/depth_registered/points", rgb_points, t)
175  # consume the images
176  depth_image = None
177  rgb_image_color = None
178  continue
179  if topic not in ["/camera/depth_registered/camera_info","/camera/rgb/camera_info",
180  "/camera/rgb/image_color","/camera/depth_registered/image"]:
181  # anything else: pass thru
182  outbag.write(topic,msg,t)
183 
184  outbag.close()
185  print


rgbd_rosbag_tools
Author(s): Martin Guenther
autogenerated on Mon Jun 10 2019 15:49:16