add_pointclouds_to_bagfile.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 # -*- coding: utf8 -*-
00003 #
00004 # Original code taken from the RGB-D benchmark: http://vision.in.tum.de/data/datasets/rgbd-dataset/tools
00005 #
00006 # Original author: Juergen Sturm
00007 # modified by:     Martin Guenther
00008 
00009 import argparse
00010 import sys
00011 import os
00012 
00013 if __name__ == '__main__':
00014     
00015     # parse command line
00016     parser = argparse.ArgumentParser(description='''
00017     This scripts reads a bag file containing RGBD data, 
00018     adds the corresponding PointCloud2 messages, and saves it again into a bag file. 
00019     Optional arguments allow to select only a portion of the original bag file.  
00020     ''')
00021     parser.add_argument('--start', help='skip the first N seconds of  input bag file (default: 0.0)',default=0.00)
00022     parser.add_argument('--duration', help='only process N seconds of input bag file (default: off)')
00023     parser.add_argument('--nth', help='only process every N-th frame of input bag file (default: 15)',default=15)
00024     parser.add_argument('--skip', help='skip N blocks in the beginning (default: 1)', default=1)
00025     parser.add_argument('--compress', help='compress output bag file', action='store_true')
00026     parser.add_argument('inputbag', help='input bag file')
00027     parser.add_argument('outputbag', nargs='?',help='output bag file')
00028     args = parser.parse_args()
00029 
00030     import roslib; roslib.load_manifest('rgbd_rosbag_tools')
00031     import rospy
00032     import rosbag
00033     import sensor_msgs.msg
00034     import cv
00035     from cv_bridge import CvBridge, CvBridgeError
00036     import struct
00037     
00038     if not args.outputbag:
00039         args.outputbag = os.path.splitext(args.inputbag)[0] + "-points.bag"
00040       
00041     print "Processing bag file:"
00042     print "  in:",args.inputbag
00043     print "  out:",args.outputbag
00044     print "  starting from: %s seconds"%(args.start)
00045         
00046     if args.duration:
00047         print "  duration: %s seconds"%(args.duration)
00048         
00049     print "  saving every %s-th frame"%(args.nth)
00050     args.skip = float(args.skip)
00051     print "  skipping %s blocks"%(args.skip)
00052 
00053     inbag = rosbag.Bag(args.inputbag,'r')
00054     if args.compress:
00055         param_compression = rosbag.bag.Compression.BZ2
00056     else:
00057         param_compression = rosbag.bag.Compression.NONE
00058         
00059     outbag = rosbag.Bag(args.outputbag, 'w', compression=param_compression)
00060     
00061     depth_camera_info = None
00062     rgb_camera_info = None
00063     depth_image = None
00064     rgb_image_color = None
00065     cortex = None
00066 
00067     nan = float('nan')
00068     bridge = CvBridge()
00069     frame = 0 
00070     
00071     time_start = None
00072     for topic, msg, t in inbag.read_messages():
00073         if time_start==None:
00074             time_start=t
00075         if t - time_start < rospy.Duration.from_sec(float(args.start)):
00076             continue
00077         if args.duration and (t - time_start > rospy.Duration.from_sec(float(args.start) + float(args.duration))):
00078             break
00079         print "t=%f\r"%(t-time_start).to_sec(),
00080         if topic == "/camera/depth_registered/camera_info":
00081             depth_camera_info = msg
00082             continue
00083         if topic == "/camera/rgb/camera_info":
00084             rgb_camera_info = msg
00085             continue
00086         if topic == "/camera/rgb/image_color" and rgb_camera_info:
00087             rgb_image_color = msg
00088             continue
00089         if topic == "/camera/depth_registered/image" and depth_camera_info and rgb_image_color and rgb_camera_info:
00090             depth_image = msg
00091             # now process frame
00092             
00093             if depth_image.header.stamp - rgb_image_color.header.stamp > rospy.Duration.from_sec(1/30.0):
00094                 continue
00095             
00096             frame += 1
00097             if frame % float(args.nth) == 0:
00098                 if args.skip > 0:
00099                     args.skip -= 1
00100                 else:
00101                     # store messages
00102                     outbag.write("/camera/depth_registered/camera_info",depth_camera_info,t)
00103                     outbag.write("/camera/depth_registered/image",depth_image,t)
00104                     outbag.write("/camera/rgb/camera_info",rgb_camera_info,t)
00105                     outbag.write("/camera/rgb/image_color",rgb_image_color,t)
00106 
00107                     # generate monochrome image from color image
00108                     cv_rgb_image_mono = bridge.imgmsg_to_cv(rgb_image_color, "mono8")
00109                     rgb_image_mono = bridge.cv_to_imgmsg(cv_rgb_image_mono)
00110                     rgb_image_mono.header = rgb_image_color.header
00111                     outbag.write("/camera/rgb/image_mono",rgb_image_mono,t)
00112     
00113                     # generate depth and colored point cloud
00114                     cv_depth_image = bridge.imgmsg_to_cv(depth_image, "passthrough")
00115                     cv_rgb_image_color = bridge.imgmsg_to_cv(rgb_image_color, "bgr8")
00116 
00117                     centerX = depth_camera_info.K[2]
00118                     centerY = depth_camera_info.K[5]
00119                     depthFocalLength = depth_camera_info.K[0]
00120                     depth_points = sensor_msgs.msg.PointCloud2()
00121                     depth_points.header = depth_image.header
00122                     depth_points.width = depth_image.width
00123                     depth_points.height  = depth_image.height
00124                     depth_points.fields.append(sensor_msgs.msg.PointField(
00125                         name = "x",offset = 0,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00126                     depth_points.fields.append(sensor_msgs.msg.PointField(
00127                         name = "y",offset = 4,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00128                     depth_points.fields.append(sensor_msgs.msg.PointField(
00129                         name = "z",offset = 8,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00130                     depth_points.point_step = 16 
00131                     depth_points.row_step = depth_points.point_step * depth_points.width
00132                     buffer = []
00133                     buffer_rgb = []
00134                     for v in range(depth_image.height):
00135                         for u in range(depth_image.width):
00136                             d = cv_depth_image[v,u]
00137                             ptx = (u - centerX) * d / depthFocalLength;
00138                             pty = (v - centerY) * d / depthFocalLength;
00139                             ptz = d;
00140                             buffer.append(struct.pack('ffff',ptx,pty,ptz,1.0))
00141                     depth_points.data = "".join(buffer)
00142                     outbag.write("/camera/depth/points", depth_points, t)
00143                     
00144                     centerX = depth_camera_info.K[2]
00145                     centerY = depth_camera_info.K[5]
00146                     depthFocalLength = depth_camera_info.K[0]
00147                     rgb_points = sensor_msgs.msg.PointCloud2()
00148                     rgb_points.header = rgb_image_color.header
00149                     rgb_points.width = depth_image.width
00150                     rgb_points.height  = depth_image.height
00151                     rgb_points.fields.append(sensor_msgs.msg.PointField(
00152                         name = "x",offset = 0,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00153                     rgb_points.fields.append(sensor_msgs.msg.PointField(
00154                         name = "y",offset = 4,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00155                     rgb_points.fields.append(sensor_msgs.msg.PointField(
00156                         name = "z",offset = 8,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00157                     rgb_points.fields.append(sensor_msgs.msg.PointField(
00158                         name = "rgb",offset = 16,datatype = sensor_msgs.msg.PointField.FLOAT32,count = 1 ))
00159                     rgb_points.point_step = 32 
00160                     rgb_points.row_step = rgb_points.point_step * rgb_points.width
00161                     buffer = []
00162                     for v in range(depth_image.height):
00163                         for u in range(depth_image.width):
00164                             d = cv_depth_image[v,u]
00165                             rgb = cv_rgb_image_color[v,u]
00166                             ptx = (u - centerX) * d / depthFocalLength;
00167                             pty = (v - centerY) * d / depthFocalLength;
00168                             ptz = d;
00169                             buffer.append(struct.pack('ffffBBBBIII',
00170                                 ptx,pty,ptz,1.0,
00171                                 rgb[0],rgb[1],rgb[2],0,
00172                                 0,0,0))
00173                     rgb_points.data = "".join(buffer)
00174                     outbag.write("/camera/depth_registered/points", rgb_points, t)
00175             # consume the images
00176             depth_image = None
00177             rgb_image_color = None
00178             continue
00179         if topic not in ["/camera/depth_registered/camera_info","/camera/rgb/camera_info",
00180                          "/camera/rgb/image_color","/camera/depth_registered/image"]:
00181             # anything else: pass thru
00182             outbag.write(topic,msg,t)
00183                 
00184     outbag.close()
00185     print


rgbd_rosbag_tools
Author(s): Martin Guenther
autogenerated on Sat Jun 8 2019 19:40:38