00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 '''
00032 In order to run with pre-trained classifiers (default behavior),
00033 please set DATA_LOCATION below to point to the /classifiers folder
00034 inside this package. (TODO: make this happen by default).
00035
00036 Takes a pointcloud or laserscan, an image, the tf between them, and
00037 informatin from SVM trained classifiers and segments point cloud
00038 to 'clutter' vs 'surface'.
00039 Results are ouputed as published topics.
00040 Roadmap: we hope to improve types of inputs, and flexibility of ouputs.
00041
00042 '''
00043 import roslib; roslib.load_manifest('clutter_segmentation')
00044 roslib.load_manifest('pr2_clutter_helper')
00045 import rospy
00046 from sensor_msgs.msg import PointCloud
00047
00048
00049
00050
00051
00052 import acquire_pr2_data;
00053
00054 from hrl_lib.util import getTime
00055 print getTime(), 'START'
00056
00057 import processor
00058 import configuration
00059
00060 try: from placement import Placement
00061 except: print 'Cannot find placement.py for import. Ignoring'
00062 from label_object import label_object
00063 from scan_dataset import scan_dataset
00064
00065 import numpy as np
00066 import opencv as cv
00067 import opencv.highgui as hg
00068 print getTime(), 'IMPORTS DONE'
00069
00070
00071
00072
00073
00074 ROBOT='PR2'
00075
00076 DATASET_ID = 'pr2_example_0003'
00077 DATA_LOCATION = roslib.packages.get_pkg_dir('clutter_segmentation')+'/classifiers'
00078
00079
00080
00081 z_above_floor = 0
00082
00083
00084
00085
00086
00087
00088
00089 '''Number of points to '''
00090 NUMBER_OF_POINTS = 2000
00091 SCALE = 1
00092 IS_LABELED = True
00093 USE_POLYGONS = True
00094 SAVE_INTENSITY_IMAGE = False
00095 CalibrationVisual = True
00096
00097 MODE = 'all_post'
00098
00099
00100
00101
00102 displayOn = True
00103
00104
00105 '''
00106 @param z_above_floor Approximate height of hokuyo above the ground. Asssumed
00107 to be fixed for now. On Cody robot the camera and hokuyo were at same z-height.
00108 '''
00109 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00110 dataset = scan_dataset()
00111 dataset.table_plane_translation = np.matrix([0,0,0]).T
00112
00113 dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T
00114 dataset.ground_plane_rotation = ''
00115
00116 dataset.is_labeled = IS_LABELED
00117 dataset.id = unique_id
00118 return dataset
00119
00120 '''
00121 Define a region of interest polygon which crops camera image.
00122 Smaller size will result in a quicker processing time.
00123 Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00124 which range between 0 and 1. Perhaps this would be more intuitive as
00125 actual crop values so that '0' on all values meant the original image and
00126 '.2' on everything crops away the 20% on all edges.
00127 '''
00128
00129 def define_ROI_polygons(w, h, crop_regions):
00130 '''
00131 # Create dummy edges for placement
00132 # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00133 Usage:
00134 w = image_size.width; h = image_size.height
00135 crop_region = (0.2,0.1,.2,.1) # % to crop left, crop right, crop top, crop bot
00136 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region)
00137 pc.scan_dataset.polygons.append(surface_ROI)
00138 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00139 '''
00140 (left, right, top, bottom) = crop_regions
00141 width_min = w * left
00142 width_max = w * (1-right)
00143 height_min = h * top
00144 height_max = h * (1-bottom)
00145 p = label_object()
00146 p.set_label('surface')
00147 p.add_point((width_min,height_min))
00148 p.add_point((width_min,height_max))
00149 p.add_point((width_max,height_max))
00150 p.add_point((width_max,height_min))
00151 table_surface = p
00152 print 'surface',p.get_points()
00153 p = label_object()
00154 p.set_label('edge_down')
00155 p.add_point((width_min,height_min))
00156 p.add_point((width_min,height_max))
00157 p.add_point((width_max,height_max))
00158 p.add_point((width_max,height_min))
00159 p.add_point((width_min,height_min))
00160 list_of_edges = (p,)
00161
00162 print 'edge',p.get_points()
00163 return table_surface, list_of_edges
00164
00165 def show_image(window_name, img, wait=False):
00166 hg.cvStartWindowThread()
00167 RESIZABLE = 0
00168 hg.cvNamedWindow(window_name, RESIZABLE)
00169 hg.cvShowImage(window_name, img)
00170 if wait:
00171 print 'show_image: press any key to continue..'
00172 cv.highgui.cvWaitKey()
00173
00174
00175
00176
00177
00178 cfg = configuration.configuration(DATA_LOCATION, ROBOT)
00179 pc = processor.processor(cfg)
00180 pc.features_k_nearest_neighbors = None
00181
00182
00183 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)
00184
00185 '''
00186 PC.LOAD_RAW_DATA()
00187 Browses for content with the following names:
00188 data/UNIQUE_ID_image.png
00189 data/UNIQUE_ID_laserscans.pkl
00190 self.image_angle is set or zeroed
00191 Sets in pc as: self.img,
00192 self.laserscans,
00193 sets (or zeros) self.image_angle
00194
00195 '''
00196 if cfg.device=='PR2':
00197 rospy.init_node('rosSegmentCloud')
00198 print 'STARTING NEW COLLECTION NODE'
00199 ac = acquire_pr2_data.AcquireCloud()
00200
00201 print 'FINISHED COLLECTING, SENDING DATA BACK'
00202
00203 np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation()
00204
00205
00206 pc.pts3d = np.array(pc.pts3d)
00207 N = len(idx_list)
00208 Nbound = len( idx_list[idx_list] )
00209 print 'Camera view contains', Nbound, 'of', N, '3D points within its field of view.\n'
00210 if Nbound == 0:
00211 print 'WARNING: Cannot start segmentation if no 3D points overlap image. Exiting.'
00212 print 'This can happen if the pointcloud is empty [], if the transformation is wrong, \
00213 or the camera and scanner are not pointed in the same direction'
00214 import sys; sys.exit()
00215 pc.img = cv.cvCloneImage(np.array(np_img) )
00216 pc.laserscans = []
00217 pc.scan_indices = np.array( range(N) )
00218 pc.camPts = camPts
00219 pc.camPts_bound = camPts[:,idx_list]
00220 pc.pts3d_bound = pc.pts3d[:,idx_list]
00221 pc.scan_indices_bound = np.array(range(N))[idx_list]
00222 pc.intensities_bound = pc.intensities[idx_list]
00223 pc.idx_list = idx_list
00224
00225
00226
00227
00228
00229 image_size = cv.cvGetSize(pc.img)
00230 w = image_size.width
00231 h = image_size.height
00232 crop_region = (0.3,0.3,0.3,0.3)
00233 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region)
00234 pc.scan_dataset.polygons.append(surface_ROI)
00235 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00236
00237
00238 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00239 Optionally applies translate and rotate relative to a groundplane (if defined)
00240 '''
00241 if not cfg.device=='PR2':
00242 pc.do_all_point_cloud()
00243 else:
00244 pc.do_all_point_cloud(map_already_exists=True)
00245
00246 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN:
00247 truncate 3D data to a volume (center, and h,w,d). Numbers are in meters.
00248 Suggest right-arm values are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00249 Note, this increases speed by throwing out points outside likely table area.
00250 It also is meant to remove the 'floor' as a possible candidate for table surface fits.
00251 '''
00252 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 2, 2, 1.3)
00253
00254 ''' Visual to quickly check TF'''
00255 if CalibrationVisual:
00256 overlay_img = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00257 show_image('calibration_visual', overlay_img)
00258
00259 if USE_POLYGONS:
00260 print 'DO POLYGON MAPPING'
00261 pc.do_polygon_mapping()
00262 '''Visual based on accurate polygons.'''
00263
00264
00265 print 'map laser into image again'
00266 pc.img_mapped = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00267
00268 pc.create_polygon_images()
00269 pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(None, pc.pts3d, pc.img)
00270
00271 if SAVE_INTENSITY_IMAGE:
00272 print 'Begin Optional save of Intensity image'
00273
00274 pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])
00275
00276 else:
00277 n, m = np.shape(pc.pts3d_bound)
00278 polygon_labels = list(1 for x in range(m))
00279 pc.map_polys = polygon_labels
00280
00281
00282
00283
00284 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00285
00286 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00287 I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00288 '''
00289 print 'lucid - about to generate features'
00290 SAVE_UNUSED_NEIGHBORS_TO = False
00291
00292
00293
00294 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)
00295 print 'lucid - done generating features'
00296 labels, testresults = pc.load_classifier_and_test_on_dataset(MODE, feature_data)
00297
00298
00299
00300
00301 if False and displayOn:
00302 pc.display_segmentation_image('all')
00303 print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points. Please press a key"
00304 cv.highgui.cvWaitKey(0)
00305 pc.save_segmentation_image('all')
00306
00307
00308
00309
00310
00311
00312 pc.map_polys = pc.load_Classifier_labels(MODE)
00313
00314
00315 print 'pts3d_bound is size', pc.pts3d_bound.shape
00316 print 'map_polys is size', pc.map_polys.shape
00317
00318
00319 SHOW_IN_ROS= True
00320 if SHOW_IN_ROS:
00321 try: rospy.init_node('table_labeler')
00322 except: print 'rospy init node has already been called: skipping'
00323 pub = rospy.Publisher('labeled_cloud', PointCloud)
00324 while not rospy.is_shutdown():
00325 ac.pub_cloud()
00326 ac.pub_cam()
00327 ac.pub_img()
00328 colors = pc.map_polys
00329 acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00330 rospy.sleep(2)
00331 print 'published'
00332
00333
00334
00335 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00336
00337
00338 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00339 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00340 table_height = np.mean(data, axis=1)[2]
00341 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00342
00343
00344
00345 '''
00346 # Save results in a nice format.
00347 '''
00348 if False:
00349 filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00350 roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00351 save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00352 pc.idx_bound, pc.pts3d)
00353 save_labeled_cloud.save_feature_data(feature_data['features'],
00354 feature_data['point_indices'])
00355 save_labeled_cloud.save_map2D(pc.camPts_bound, table_height)
00356 ''' Comments
00357 * pc.pts3d_bound #3 by Nbound
00358 * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00359 * labels # Nbound, use if loading classifer on featuer_data
00360 * pc.map_polys # --> Nbound, labels created from polygons.
00361 # Overrided by leaning-based labels, so they are identical
00362 # at the end of this document.
00363 * Nbound = len(intensities_bound) #--> example range(19598)
00364 * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00365 * feature_data['point_indices']
00366 * Nfeature = feature_data['set_size'] #--> slightly less than maximum.
00367 #I assume this is at most NUMBER_OF_POINTS
00368 -Others:
00369 * pc.image_labels
00370 * pc.img_mapped
00371 * pc.img_mapped_polygons
00372 * testresults #Not sure what this is?
00373 * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00374 * data #3d values of pts3d_bound where label is nonzero...
00375 # However as a list so 3x longer.
00376 * scan_indices - an artifact from the hokuyo. Original acquired line order?
00377 '''
00378
00379 if False:
00380 object_height = 0.1
00381
00382 resolution = [.01*SCALE, .01*SCALE]
00383 print 'NOTE: Resolution is ',100*resolution[0], 'cm'
00384 polygon = label_object()
00385 polygon.add_point([0,0])
00386 polygon.add_point([0,5*SCALE])
00387 polygon.add_point([10*SCALE,5*SCALE])
00388 polygon.add_point([10*SCALE,0])
00389
00390
00391 print 'creating placement object'
00392 pl = Placement(pc, resolution)
00393
00394 if displayOn:
00395 placement_point = pl.test_placement(polygon, object_height)
00396 else:
00397 placement_point = pl.find_placement(polygon, object_height)
00398
00399 placement_point -= pc.scan_dataset.ground_plane_translation
00400
00401
00402
00403
00404
00405
00406 print 'placement point in global coordinate frame:', placement_point_global.T
00407
00408
00409 if displayOn:
00410 print 'displaying current placement'
00411 pl.display_current_placement_in_heightmap()
00412 cv.highgui.cvWaitKey()
00413
00414 print getTime(), 'DONE'
00415
00416
00417
00418