00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 import roslib; roslib.load_manifest('laser_camera_segmentation')
00032 roslib.load_manifest('pr2_clutter_helper')
00033 import acquire_pr2_data;
00034 import rospy
00035
00036 from hrl_lib.util import getTime
00037 print getTime(), 'START'
00038
00039 import laser_camera_segmentation.processor as processor
00040 import laser_camera_segmentation.configuration as configuration
00041
00042 try: from placement import Placement
00043 except: print 'Cannot find placement.py for import. Ignoring'
00044 from labeling.label_object import label_object
00045 from labeling.scan_dataset import scan_dataset
00046
00047 import numpy as np
00048 import opencv as cv
00049
00050
00051 ROBOT='PR2'
00052
00053 if 'desktopScanner'==ROBOT:
00054
00055 DATASET_ID = '2009Nov12_184413'
00056
00057 DATA_LOCATION = '/home/jokerman/svn/robot1_data/usr/martin/laser_camera_segmentation/labeling/'
00058 z_above_floor = 1.32
00059 elif 'codyRobot'==ROBOT or 'dummyScanner'==ROBOT:
00060 DATASET_ID = '2010May31_213250'
00061
00062 DATA_LOCATION = '/home/jokerman/svn/robot1_data/usr/jokerman/Placement/May31_2010_alpha'
00063 z_above_floor = 1.32
00064 elif 'PR2'==ROBOT or 'PR2example'==ROBOT or 'PR2bag'==ROBOT:
00065 DATASET_ID = 'pr2_example_0003'
00066 DATA_LOCATION = '/home/jokerman/Desktop/PR2/'
00067 z_above_floor = 0
00068
00069
00070
00071
00072
00073
00074
00075 '''Number of points to '''
00076 NUMBER_OF_POINTS = 7000
00077 SCALE = 1
00078 IS_LABELED = True
00079
00080
00081 displayOn = True
00082 print getTime(), 'IMPORTS DONE'
00083
00084
00085
00086 cfg = configuration.configuration(DATA_LOCATION, ROBOT)
00087 pc = processor.processor(cfg)
00088
00089 pc.features_k_nearest_neighbors = None
00090
00091 '''
00092 @param z_above_floor Approximate height of hokuyo above the ground. Asssumed
00093 to be fixed for now. On Cody robot the camera and hokuyo were at same z-height.
00094 '''
00095 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00096 dataset = scan_dataset()
00097 dataset.table_plane_translation = np.matrix([0,0,0]).T
00098
00099 dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T
00100 dataset.ground_plane_rotation = ''
00101
00102 dataset.is_labeled = IS_LABELED
00103 dataset.id = unique_id
00104 return dataset
00105
00106
00107 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)
00108
00109 '''
00110 PC.LOAD_RAW_DATA()
00111 Browses for content with the following names:
00112 data/UNIQUE_ID_image.png
00113 data/UNIQUE_ID_laserscans.pkl
00114 self.image_angle is set or zeroed
00115 Sets in pc as: self.img,
00116 self.laserscans,
00117 sets (or zeros) self.image_angle
00118
00119 '''
00120 if not cfg.device == 'PR2' and not cfg.device =='PR2example':
00121 pc.load_raw_data(pc.scan_dataset.id)
00122 '''If necessary, convert laser scan to pointcloud'''
00123 (pc.pts3d, pc.scan_indices, pc.intensities) = pc.create_pointcloud(pc.laserscans, True, True)
00124
00125 elif cfg.device=='PR2example':
00126 pc.load_raw_PR2_data()
00127 roslib.load_manifest('display_stuff')
00128 import pr2_small_table_cloud_object as pr2_example
00129 filename = pr2_example.grab_pr2_example_image_name()
00130 pc.img = cv.highgui.cvLoadImage(filename)
00131 pc.laserscans = []
00132 pc.pts3d = pr2_example.grab_pr2_example_points()
00133 pc.intensities = pr2_example.grab_pr2_example_intensities()
00134 pc.scan_indices = np.array(range(pc.intensities.size))
00135
00136 pc.image_angle = 45
00137
00138
00139 elif cfg.device=='PR2bag':
00140 ''' Instructions: $ roslaunch pr2_lcs_helper get_server2_client.launch
00141 <Opens up services which will be called>
00142 $ roslaunch pr2_lcs_helper laser_assembler_optical_frame_launch.launch
00143 <listens to tilt_scan, provides 'assemble_scan' service, in optical frame
00144 (which is actually left eye)>
00145 $ rosbag play <bag with tf, camera, camera_info, tilt_scan> --clock
00146 $ rosrun laser_camera_segmentation test_display_pr2_try1.py
00147 '''
00148 roslib.load_manifest('pr2_lcs_helper')
00149 import test_uv as PR2_access
00150 cloud, roscv_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = \
00151 PR2_access.fetch_PR2_data('all')
00152 N = len(idx_list)
00153 Nbound = len( idx_list[idx_list] )
00154 pc.img = cv.cvCloneImage(np.array(roscv_img) )
00155 pc.laserscans = []
00156 pc.scan_indices = np.array( range(N) )
00157
00158 camPts_bound = camPts[:,idx_list]
00159 pts3d_bound = pc.pts3d[:,idx_list]
00160 scan_indices_bound = np.array(range(N))[idx_list]
00161 intensities_bound = pc.intensities[idx_list]
00162 pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
00163 scan_indices_bound, intensities_bound)
00164 elif cfg.device=='PR2':
00165 rospy.init_node('rosSegmentCloud')
00166 print 'STARTING NEW COLLECTION NODE'
00167 ac = acquire_pr2_data.AcquireCloud()
00168 ac.print_test()
00169 print 'FINISHED COLLECTING, SENDING DATA BACK'
00170
00171 np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation()
00172
00173 pc.pts3d = np.array(pc.pts3d)
00174 N = len(idx_list)
00175 Nbound = len( idx_list[idx_list] )
00176 pc.img = cv.cvCloneImage(np.array(np_img) )
00177 pc.laserscans = []
00178 pc.scan_indices = np.array( range(N) )
00179 camPts_bound = camPts[:,idx_list]
00180 pts3d_bound = pc.pts3d[:,idx_list]
00181 scan_indices_bound = np.array(range(N))[idx_list]
00182 intensities_bound = pc.intensities[idx_list]
00183 pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
00184 scan_indices_bound, intensities_bound)
00185
00186
00187
00188
00189 '''
00190 Define a region of interest polygon which crops camera image.
00191 Smaller size will result in a quicker processing time.
00192 Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00193 which range between 0 and 1. Perhaps this would be more intuitive as
00194 actual crop values so that '0' on all values meant the original image and
00195 '.2' on everything crops away the 20% on all edges.
00196 '''
00197
00198 image_size = cv.cvGetSize(pc.img)
00199
00200 width_min = image_size.width * 0.0
00201 width_max = image_size.width * .9
00202 height_min = image_size.height * 0.0
00203 height_max = image_size.height * .9
00204
00205
00206 def define_ROI_polygons(width_min, width_max, height_min, height_max):
00207 p = label_object()
00208 p.set_label('surface')
00209 p.add_point((width_min,height_min))
00210 p.add_point((width_min,height_max))
00211 p.add_point((width_max,height_max))
00212 p.add_point((width_max,height_min))
00213 table_surface = p
00214 print 'surface',p.get_points()
00215 p = label_object()
00216 p.set_label('edge_down')
00217 p.add_point((width_min,height_min))
00218 p.add_point((width_min,height_max))
00219 p.add_point((width_max,height_max))
00220 p.add_point((width_max,height_min))
00221 p.add_point((width_min,height_min))
00222 list_of_edges = (p,)
00223
00224 print 'edge',p.get_points()
00225 return table_surface, list_of_edges
00226
00227
00228
00229 surface_ROI, list_of_edges = define_ROI_polygons(width_min, width_max,
00230 height_min, height_max)
00231 pc.scan_dataset.polygons.append(surface_ROI)
00232 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00233
00234
00235
00236 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00237 Optionally applies translate and rotate relative to a groundplane (if defined)
00238 '''
00239 if not cfg.device=='PR2':
00240 pc.do_all_point_cloud()
00241 else:
00242 pc.do_all_point_cloud(map_already_exists=True)
00243
00244 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN:
00245 truncate 3D data to a volume (center, and h,w,d). Numbers are in meters.
00246 Suggest right-arm valuse are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00247 Note, this increases speed by throwing out points outside likely table area.
00248 It also is meant to remove the 'floor' as a possible candidate for table surface fits.
00249 '''
00250
00251
00252 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 1, 1, 1.3)
00253 print 'finished truncate_pointcloud in top level function'
00254
00255
00256 ''' Visual to quickly check TF
00257 '''
00258 if True:
00259 print 'overlay_img soon to be obtained'
00260 overlay_img = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)
00261 print 'overlay_img obtained'
00262 import opencv.highgui as hg
00263 hg.cvStartWindowThread()
00264 hg.cvNamedWindow('ww',0)
00265 hg.cvShowImage('ww',overlay_img)
00266 print 'wait for key - line 264'
00267 cv.highgui.cvWaitKey()
00268 print 'finsihed showing mapped_laser image'
00269
00270 if True:
00271 print 'do polygon mapping'
00272 pc.do_polygon_mapping()
00273
00274
00275 print 'map laser into image again'
00276 pc.img_mapped = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)
00277
00278
00279 pc.create_polygon_images()
00280 pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(pc.map, pc.pts3d, pc.img)
00281 print 'Begin Optional save of Intensity image'
00282 if not cfg.device == 'PR2' and not cfg.device=='PR2example':
00283 pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])
00284
00285 else:
00286 n, m = np.shape(pc.pts3d_bound)
00287 polygon_mapping = list(1 for x in range(m))
00288 pc.map_polys = polygon_mapping
00289
00290
00291
00292
00293 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00294
00295 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00296 I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00297 '''
00298 print 'lucid - about to generate features'
00299 SAVE_UNUSED_NEIGHBORS_TO = False
00300
00301
00302
00303 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)
00304 print 'lucid - done generating features'
00305 labels, testresults = pc.load_classifier_and_test_on_dataset('all', feature_data)
00306
00307
00308
00309
00310 if displayOn:
00311 pc.display_segmentation_image('all')
00312 print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points. Please press a key"
00313 cv.highgui.cvWaitKey(0)
00314 pc.save_segmentation_image('all')
00315
00316
00317
00318
00319
00320
00321 pc.map_polys = pc.load_Classifier_labels('all')
00322
00323
00324 print 'pts3d_bound is size', pc.pts3d_bound.shape
00325 print 'map_polys is size', pc.map_polys.shape
00326
00327
00328 SHOW_IN_ROS= True
00329 if SHOW_IN_ROS:
00330 import roslib
00331 roslib.load_manifest('pr2_lcs_helper')
00332 from sensor_msgs.msg import PointCloud
00333 import acquire_pr2_data, rospy
00334 try: rospy.init_node('whatever')
00335 except: print 'rospy init node has already been called: skipping'
00336 pub = rospy.Publisher('temp_pub', PointCloud)
00337 while not rospy.is_shutdown():
00338 colors = pc.map_polys
00339 acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00340 rospy.sleep(1)
00341 print 'published'
00342
00343
00344
00345 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00346
00347
00348 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00349 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00350 table_height = np.mean(data, axis=1)[2]
00351 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00352
00353
00354
00355 '''
00356 # Save results in a nice format.
00357 '''
00358 if False:
00359 filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00360 roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00361 save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00362 pc.idx_bound, pc.pts3d)
00363 save_labeled_cloud.save_feature_data(feature_data['features'],
00364 feature_data['point_indices'])
00365 save_labeled_cloud.save_map2D(pc.map[0], table_height)
00366 ''' Comments
00367 * pc.pts3d_bound #3 by Nbound
00368 * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00369 * labels # Nbound, use if loading classifer on featuer_data
00370 * pc.map_polys # --> Nbound, labels created from polygons.
00371 # Overrided by leaning-based labels, so they are identical
00372 # at the end of this document.
00373 * Nbound = len(intensities_bound) #--> example range(19598)
00374 * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00375 * feature_data['point_indices']
00376 * Nfeature = feature_data['set_size'] #--> slightly less than maximum.
00377 #I assume this is at most NUMBER_OF_POINTS
00378 -Others:
00379 * pc.image_labels
00380 * pc.img_mapped
00381 * pc.img_mapped_polygons
00382 * testresults #Not sure what this is?
00383 * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00384 * data #3d values of pts3d_bound where label is nonzero...
00385 # However as a list so 3x longer.
00386 * scan_indices - an artifact from the hokuyo. Original acquired line order?
00387 '''
00388
00389 if False:
00390 object_height = 0.1
00391
00392 resolution = [.01*SCALE, .01*SCALE]
00393 print 'NOTE: Resolution is ',100*resolution[0], 'cm'
00394 polygon = label_object()
00395 polygon.add_point([0,0])
00396 polygon.add_point([0,5*SCALE])
00397 polygon.add_point([10*SCALE,5*SCALE])
00398 polygon.add_point([10*SCALE,0])
00399
00400
00401 print 'creating placement object'
00402 pl = Placement(pc, resolution)
00403
00404 if displayOn:
00405 placement_point = pl.test_placement(polygon, object_height)
00406 else:
00407 placement_point = pl.find_placement(polygon, object_height)
00408
00409 placement_point -= pc.scan_dataset.ground_plane_translation
00410
00411
00412
00413 import mekabot.coord_frames as mcf
00414 placement_point_global = mcf.thok0Tglobal(placement_point)
00415 print 'placement point in global coordinate frame:', placement_point_global.T
00416
00417
00418 if displayOn:
00419 print 'displaying current placement'
00420 pl.display_current_placement_in_heightmap()
00421 cv.highgui.cvWaitKey()
00422
00423 if displayOn:
00424 print 'importing mayavi'
00425
00426 print 'showing 3D mayavi'
00427
00428
00429
00430
00431 print getTime(), 'DONE'
00432
00433
00434
00435