00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 import roslib; roslib.load_manifest('clutter_segmentation')
00032
00033 import rospy
00034 from sensor_msgs.msg import PointCloud
00035
00036
00037
00038
00039 import acquire_pr2_data;
00040
00041 from hrl_lib.util import getTime
00042 print getTime(), 'START'
00043
00044 import processor
00045 import configuration
00046
00047 try: from placement import Placement
00048 except: print 'Cannot find placement.py for import. Ignoring'
00049 from label_object import label_object
00050 from scan_dataset import scan_dataset
00051
00052 import numpy as np
00053 import opencv as cv
00054 import opencv.highgui as hg
00055 print getTime(), 'IMPORTS DONE'
00056
00057
00058
00059
00060
00061 ROBOT='PR2'
00062 DATASET_ID = 'pr2_example_0003'
00063 DATA_LOCATION = '/home/jokerman/Desktop/PR2/'
00064 z_above_floor = 0
00065
00066
00067
00068
00069
00070
00071
00072 '''Number of points to '''
00073 NUMBER_OF_POINTS = 3500
00074 SCALE = 1
00075 IS_LABELED = True
00076 USE_POLYGONS = True
00077 SAVE_INTENSITY_IMAGE = False
00078 CalibrationVisual = True
00079
00080 MODE = 'all_post'
00081
00082 displayOn = True
00083
00084
00085 '''
00086 @param z_above_floor Approximate height of hokuyo above the ground. Asssumed
00087 to be fixed for now. On Cody robot the camera and hokuyo were at same z-height.
00088 '''
00089 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00090 dataset = scan_dataset()
00091 dataset.table_plane_translation = np.matrix([0,0,0]).T
00092
00093 dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T
00094 dataset.ground_plane_rotation = ''
00095
00096 dataset.is_labeled = IS_LABELED
00097 dataset.id = unique_id
00098 return dataset
00099
00100 '''
00101 Define a region of interest polygon which crops camera image.
00102 Smaller size will result in a quicker processing time.
00103 Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00104 which range between 0 and 1. Perhaps this would be more intuitive as
00105 actual crop values so that '0' on all values meant the original image and
00106 '.2' on everything crops away the 20% on all edges.
00107 '''
00108
00109 def define_ROI_polygons(w, h, crop_regions):
00110 '''
00111 # Create dummy edges for placement
00112 # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00113 Usage:
00114 w = image_size.width; h = image_size.height
00115 crop_region = (0.2,0.1,.2,.1) # % to crop left, crop right, crop top, crop bot
00116 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region)
00117 pc.scan_dataset.polygons.append(surface_ROI)
00118 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00119 '''
00120 (left, right, top, bottom) = crop_regions
00121 width_min = w * left
00122 width_max = w * (1-right)
00123 height_min = h * top
00124 height_max = h * (1-bottom)
00125 p = label_object()
00126 p.set_label('surface')
00127 p.add_point((width_min,height_min))
00128 p.add_point((width_min,height_max))
00129 p.add_point((width_max,height_max))
00130 p.add_point((width_max,height_min))
00131 table_surface = p
00132 print 'surface',p.get_points()
00133 p = label_object()
00134 p.set_label('edge_down')
00135 p.add_point((width_min,height_min))
00136 p.add_point((width_min,height_max))
00137 p.add_point((width_max,height_max))
00138 p.add_point((width_max,height_min))
00139 p.add_point((width_min,height_min))
00140 list_of_edges = (p,)
00141
00142 print 'edge',p.get_points()
00143 return table_surface, list_of_edges
00144
00145 def show_image(window_name, img, wait=False):
00146 hg.cvStartWindowThread()
00147 RESIZABLE = 0
00148 hg.cvNamedWindow(window_name, RESIZABLE)
00149 hg.cvShowImage(window_name, img)
00150 if wait:
00151 print 'show_image: press any key to continue..'
00152 cv.highgui.cvWaitKey()
00153
00154
00155
00156
00157
00158 cfg = configuration.configuration(DATA_LOCATION, ROBOT)
00159 pc = processor.processor(cfg)
00160 pc.features_k_nearest_neighbors = None
00161
00162
00163 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)
00164
00165 '''
00166 PC.LOAD_RAW_DATA()
00167 Browses for content with the following names:
00168 data/UNIQUE_ID_image.png
00169 data/UNIQUE_ID_laserscans.pkl
00170 self.image_angle is set or zeroed
00171 Sets in pc as: self.img,
00172 self.laserscans,
00173 sets (or zeros) self.image_angle
00174
00175 '''
00176 if cfg.device=='PR2':
00177 rospy.init_node('rosSegmentCloud')
00178 print 'STARTING NEW COLLECTION NODE'
00179 ac = acquire_pr2_data.AcquireCloud()
00180
00181 print 'FINISHED COLLECTING, SENDING DATA BACK'
00182
00183 np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation()
00184
00185
00186 pc.pts3d = np.array(pc.pts3d)
00187 N = len(idx_list)
00188 Nbound = len( idx_list[idx_list] )
00189 print 'Camera view contains', Nbound, 'of', N, '3D points within its field of view.\n'
00190 if Nbound == 0:
00191 print 'WARNING: Cannot start segmentation if no 3D points overlap image. Exiting.'
00192 print 'This can happen if the pointcloud is empty [], if the transformation is wrong, \
00193 or the camera and scanner are not pointed in the same direction'
00194 import sys; sys.exit()
00195 pc.img = cv.cvCloneImage(np.array(np_img) )
00196 pc.laserscans = []
00197 pc.scan_indices = np.array( range(N) )
00198 pc.camPts = camPts
00199 pc.camPts_bound = camPts[:,idx_list]
00200 pc.pts3d_bound = pc.pts3d[:,idx_list]
00201 pc.scan_indices_bound = np.array(range(N))[idx_list]
00202 pc.intensities_bound = pc.intensities[idx_list]
00203 pc.idx_list = idx_list
00204
00205
00206
00207
00208
00209 image_size = cv.cvGetSize(pc.img)
00210 w = image_size.width
00211 h = image_size.height
00212 crop_region = (0.3,0.3,0.3,0.3)
00213 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region)
00214 pc.scan_dataset.polygons.append(surface_ROI)
00215 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00216
00217
00218 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00219 Optionally applies translate and rotate relative to a groundplane (if defined)
00220 '''
00221 if not cfg.device=='PR2':
00222 pc.do_all_point_cloud()
00223 else:
00224 pc.do_all_point_cloud(map_already_exists=True)
00225
00226 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN:
00227 truncate 3D data to a volume (center, and h,w,d). Numbers are in meters.
00228 Suggest right-arm values are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00229 Note, this increases speed by throwing out points outside likely table area.
00230 It also is meant to remove the 'floor' as a possible candidate for table surface fits.
00231 '''
00232 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 2, 2, 1.3)
00233
00234 ''' Visual to quickly check TF'''
00235 if CalibrationVisual:
00236 overlay_img = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00237 show_image('calibration_visual', overlay_img)
00238
00239 if USE_POLYGONS:
00240 print 'DO POLYGON MAPPING'
00241 pc.do_polygon_mapping()
00242 '''Visual based on accurate polygons.'''
00243
00244
00245 print 'map laser into image again'
00246 pc.img_mapped = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00247
00248 pc.create_polygon_images()
00249 pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(None, pc.pts3d, pc.img)
00250
00251 if SAVE_INTENSITY_IMAGE:
00252 print 'Begin Optional save of Intensity image'
00253
00254 pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])
00255
00256 else:
00257 n, m = np.shape(pc.pts3d_bound)
00258 polygon_labels = list(1 for x in range(m))
00259 pc.map_polys = polygon_labels
00260
00261
00262
00263
00264 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00265
00266 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00267 I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00268 '''
00269 print 'lucid - about to generate features'
00270 SAVE_UNUSED_NEIGHBORS_TO = False
00271
00272
00273
00274 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)
00275 print 'lucid - done generating features'
00276 labels, testresults = pc.load_classifier_and_test_on_dataset(MODE, feature_data)
00277
00278
00279
00280
00281 if False and displayOn:
00282 pc.display_segmentation_image('all')
00283 print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points. Please press a key"
00284 cv.highgui.cvWaitKey(0)
00285 pc.save_segmentation_image('all')
00286
00287
00288
00289
00290
00291
00292 pc.map_polys = pc.load_Classifier_labels(MODE)
00293
00294
00295 print 'pts3d_bound is size', pc.pts3d_bound.shape
00296 print 'map_polys is size', pc.map_polys.shape
00297
00298
00299 SHOW_IN_ROS= True
00300 if SHOW_IN_ROS:
00301 try: rospy.init_node('table_labeler')
00302 except: print 'rospy init node has already been called: skipping'
00303 pub = rospy.Publisher('labeled_cloud', PointCloud)
00304 while not rospy.is_shutdown():
00305 ac.pub_cloud()
00306 ac.pub_cam()
00307 ac.pub_img()
00308 colors = pc.map_polys
00309 acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00310 rospy.sleep(2)
00311 print 'published'
00312
00313
00314
00315 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00316
00317
00318 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00319 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00320 table_height = np.mean(data, axis=1)[2]
00321 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00322
00323
00324
00325 '''
00326 # Save results in a nice format.
00327 '''
00328 if False:
00329 filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00330 roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00331 save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00332 pc.idx_bound, pc.pts3d)
00333 save_labeled_cloud.save_feature_data(feature_data['features'],
00334 feature_data['point_indices'])
00335 save_labeled_cloud.save_map2D(pc.camPts_bound, table_height)
00336 ''' Comments
00337 * pc.pts3d_bound #3 by Nbound
00338 * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00339 * labels # Nbound, use if loading classifer on featuer_data
00340 * pc.map_polys # --> Nbound, labels created from polygons.
00341 # Overrided by leaning-based labels, so they are identical
00342 # at the end of this document.
00343 * Nbound = len(intensities_bound) #--> example range(19598)
00344 * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00345 * feature_data['point_indices']
00346 * Nfeature = feature_data['set_size'] #--> slightly less than maximum.
00347 #I assume this is at most NUMBER_OF_POINTS
00348 -Others:
00349 * pc.image_labels
00350 * pc.img_mapped
00351 * pc.img_mapped_polygons
00352 * testresults #Not sure what this is?
00353 * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00354 * data #3d values of pts3d_bound where label is nonzero...
00355 # However as a list so 3x longer.
00356 * scan_indices - an artifact from the hokuyo. Original acquired line order?
00357 '''
00358
00359 if False:
00360 object_height = 0.1
00361
00362 resolution = [.01*SCALE, .01*SCALE]
00363 print 'NOTE: Resolution is ',100*resolution[0], 'cm'
00364 polygon = label_object()
00365 polygon.add_point([0,0])
00366 polygon.add_point([0,5*SCALE])
00367 polygon.add_point([10*SCALE,5*SCALE])
00368 polygon.add_point([10*SCALE,0])
00369
00370
00371 print 'creating placement object'
00372 pl = Placement(pc, resolution)
00373
00374 if displayOn:
00375 placement_point = pl.test_placement(polygon, object_height)
00376 else:
00377 placement_point = pl.find_placement(polygon, object_height)
00378
00379 placement_point -= pc.scan_dataset.ground_plane_translation
00380
00381
00382
00383
00384
00385
00386 print 'placement point in global coordinate frame:', placement_point_global.T
00387
00388
00389 if displayOn:
00390 print 'displaying current placement'
00391 pl.display_current_placement_in_heightmap()
00392 cv.highgui.cvWaitKey()
00393
00394 print getTime(), 'DONE'
00395
00396
00397
00398