run_segmentation_PR2.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2010, Georgia Tech Research Corporation
00004 # All rights reserved.
00005 #
00006 # Redistribution and use in source and binary forms, with or without
00007 # modification, are permitted provided that the following conditions are met:
00008 #     * Redistributions of source code must retain the above copyright
00009 #       notice, this list of conditions and the following disclaimer.
00010 #     * Redistributions in binary form must reproduce the above copyright
00011 #       notice, this list of conditions and the following disclaimer in the
00012 #       documentation and/or other materials provided with the distribution.
00013 #     * Neither the name of the Georgia Tech Research Corporation nor the
00014 #       names of its contributors may be used to endorse or promote products
00015 #       derived from this software without specific prior written permission.
00016 #
00017 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00018 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00019 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00020 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00021 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00022 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00023 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00024 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00025 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00026 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 
00029 #  \author Jason Okerman, original: Martin Schuster (Healthcare Robotics Lab, Georgia Tech.)
00030 
00031 ''' 
00032     In order to run with pre-trained classifiers (default behavior),
00033     please set DATA_LOCATION below to point to the /classifiers folder 
00034     inside this package.  (TODO: make this happen by default).  
00035     
00036     Takes a pointcloud or laserscan, an image, the tf between them, and 
00037         informatin from SVM trained classifiers and segments point cloud
00038         to 'clutter' vs 'surface'. 
00039     Results are ouputed as published topics.
00040     Roadmap: we hope to improve types of inputs, and flexibility of ouputs.
00041 
00042 '''
00043 import roslib; roslib.load_manifest('clutter_segmentation')
00044 roslib.load_manifest('pr2_clutter_helper')
00045 import rospy
00046 from sensor_msgs.msg import PointCloud
00047 
00048 ### Optional, below:
00049 #       roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00050 #       Dependancy that will be integrated in later. Removed for now.
00051 
00052 import acquire_pr2_data; #from pr2_clutter_svm_helper
00053 
00054 from hrl_lib.util import getTime
00055 print getTime(), 'START'
00056 
00057 import processor
00058 import configuration
00059 
00060 try: from placement import Placement
00061 except: print 'Cannot find placement.py for import.  Ignoring'
00062 from label_object import label_object
00063 from scan_dataset import scan_dataset
00064 
00065 import numpy as np
00066 import opencv as cv
00067 import opencv.highgui as hg
00068 print getTime(), 'IMPORTS DONE'
00069 
00070 
00071 
00072 
00073 #------------------------
00074 ROBOT='PR2' #'desktopcanner'#'dummyScanner'#'codyRobot' 
00075             #Only supported is PR2 for current version [November 2010 release]
00076 DATASET_ID = 'pr2_example_0003'
00077 DATA_LOCATION = roslib.packages.get_pkg_dir('clutter_segmentation')+'/classifiers'
00078             #If necessary change to: '[your-computer]/folder-containing-XML-files'
00079             #Note, will also be where results are saved by default (saving is optional). 
00080             #Specified folder should have subfolders 'results' and 'data'
00081 z_above_floor = 0
00082 #Theory:Imperitive to know "plane" of floor to do good classification in current scheme.
00083 #ground_plane_rotation = '' 
00084 #rot = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
00085 #[above] data location must hold xml training data to load. or get error!
00086 #[above] dataset id should be a unique name for files in subfolder data/
00087 
00088 #-------------------------
00089 '''Number of points to  '''
00090 NUMBER_OF_POINTS = 2000#some large number #Recommended between 1000 ~ 3000. If too large, will be truncated to maximum number visible points.
00091 SCALE = 1
00092 IS_LABELED = True #TODO: remove need for this.  Var used to indicate dataset which is fully defined with respect to ground plane.
00093 USE_POLYGONS = True #TODO: remove.  This can probably be skipped with same results.
00094 SAVE_INTENSITY_IMAGE = False
00095 CalibrationVisual = True #Whether to pop up a visual to confirm cam - to - laser mapping.
00096 
00097 MODE = 'all_post' # 'all', 'all_post' (ransac), 'color', 'laser'
00098 #Note Ransac settings are specified in processor.py. Search for "ransac"
00099 #TODO: bring those variables out to this file.
00100 
00101 
00102 displayOn = True 
00103 #---------------------------
00104 
00105 '''
00106     @param z_above_floor Approximate height of hokuyo above the ground.  Asssumed
00107     to be fixed for now.  On Cody robot the camera and hokuyo were at same z-height.
00108 '''
00109 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00110     dataset = scan_dataset()
00111     dataset.table_plane_translation = np.matrix([0,0,0]).T
00112     # approximate height of hokuyo above the ground, assume it to be fixed for now.
00113     dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T 
00114     dataset.ground_plane_rotation = ''  #expects a 3x3 numpy matrix
00115     #pc.scan_dataset.is_test_set = True
00116     dataset.is_labeled = IS_LABELED
00117     dataset.id = unique_id
00118     return dataset
00119 
00120 '''
00121     Define a region of interest polygon which crops camera image.
00122     Smaller size will result in a quicker processing time.
00123     Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00124     which range between 0 and 1.  Perhaps this would be more intuitive as
00125     actual crop values so that '0' on all values meant the original image and
00126     '.2' on everything crops away the 20% on all edges.
00127 '''
00128 ##MAKE SURE THAT THESE MATCH ORIGINAL TAKE: CROP CAMERA IMAGE TO RIO
00129 def define_ROI_polygons(w, h, crop_regions):
00130     '''
00131     # Create dummy edges for placement
00132     # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00133     Usage: 
00134         w = image_size.width; h = image_size.height
00135         crop_region = (0.2,0.1,.2,.1) # % to crop left, crop right, crop top, crop bot
00136         surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00137         pc.scan_dataset.polygons.append(surface_ROI)
00138         for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00139     '''
00140     (left, right, top, bottom) = crop_regions
00141     width_min = w * left
00142     width_max = w * (1-right)
00143     height_min = h * top
00144     height_max = h * (1-bottom)
00145     p = label_object()
00146     p.set_label('surface')
00147     p.add_point((width_min,height_min))
00148     p.add_point((width_min,height_max))
00149     p.add_point((width_max,height_max))
00150     p.add_point((width_max,height_min))
00151     table_surface = p #A polygon shape (rectangle) defining most generous guess of table surface.
00152     print 'surface',p.get_points()
00153     p = label_object()
00154     p.set_label('edge_down')
00155     p.add_point((width_min,height_min))
00156     p.add_point((width_min,height_max))
00157     p.add_point((width_max,height_max))
00158     p.add_point((width_max,height_min))
00159     p.add_point((width_min,height_min))
00160     list_of_edges = (p,) 
00161     # [Above] Any number of polygons which define surface border. More can be added.
00162     print 'edge',p.get_points()
00163     return table_surface, list_of_edges
00164 
00165 def show_image(window_name, img, wait=False):
00166     hg.cvStartWindowThread()
00167     RESIZABLE = 0
00168     hg.cvNamedWindow(window_name, RESIZABLE)
00169     hg.cvShowImage(window_name, img)
00170     if wait:
00171         print 'show_image: press any key to continue..'
00172         cv.highgui.cvWaitKey()
00173 
00174 #------------------------------------
00175 
00176 ###CHANGE THIS TO THE DIRECTORY WITH RESULTS FROM CODY:
00177 ###  change 'codyRobot' to 'dummyScanner' or code tries to find camera drivers, etc.
00178 cfg = configuration.configuration(DATA_LOCATION, ROBOT) #'dummyScanner'
00179 pc = processor.processor(cfg)
00180 pc.features_k_nearest_neighbors = None
00181 
00182 # Name of dataset will allow loading of saved image / scan.
00183 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)#1.32
00184 
00185 '''
00186     PC.LOAD_RAW_DATA()
00187     Browses for content with the following names:
00188         data/UNIQUE_ID_image.png
00189         data/UNIQUE_ID_laserscans.pkl
00190         self.image_angle is set or zeroed               
00191     Sets in pc as: self.img,
00192                    self.laserscans,
00193                    sets (or zeros) self.image_angle
00194     
00195 '''
00196 if cfg.device=='PR2':
00197     rospy.init_node('rosSegmentCloud')
00198     print 'STARTING NEW COLLECTION NODE'
00199     ac = acquire_pr2_data.AcquireCloud()
00200     #ac.print_test()
00201     print 'FINISHED COLLECTING, SENDING DATA BACK'
00202                
00203     np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
00204         # >> return self.img, self.pts3d, self.intensities, 
00205         #        self.camPts, self.colors, self.camPts_idx
00206     pc.pts3d = np.array(pc.pts3d)
00207     N = len(idx_list)
00208     Nbound = len( idx_list[idx_list] )
00209     print 'Camera view contains', Nbound, 'of', N, '3D points within its field of view.\n' 
00210     if Nbound == 0:
00211         print 'WARNING: Cannot start segmentation if no 3D points overlap image. Exiting.' 
00212         print 'This can happen if the pointcloud is empty [], if the transformation is wrong, \
00213                or the camera and scanner are not pointed in the same direction'
00214         import sys; sys.exit()
00215     pc.img = cv.cvCloneImage(np.array(np_img) ) #only works in CV 2.0, but not cv 2.1
00216     pc.laserscans = []
00217     pc.scan_indices = np.array( range(N) )
00218     pc.camPts = camPts
00219     pc.camPts_bound = camPts[:,idx_list]
00220     pc.pts3d_bound = pc.pts3d[:,idx_list]
00221     pc.scan_indices_bound = np.array(range(N))[idx_list]
00222     pc.intensities_bound = pc.intensities[idx_list]
00223     pc.idx_list = idx_list
00224     ###pc.map = (pc.camPts_bound, pc.camPts, pc.idx_list, pc.pts3d_bound,pc.scan_indices_bound, pc.intensities_bound)
00225 
00226 #create dummy surface polygon to define ROI
00227 # [Below] These polygons will be used to generate a 'mask' cv image and used to 
00228 # limit the area of image where 3D projected points are considered.
00229 image_size = cv.cvGetSize(pc.img)
00230 w = image_size.width
00231 h = image_size.height
00232 crop_region = (0.3,0.3,0.3,0.3) # % to crop left, crop right, crop top, crop bot
00233 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00234 pc.scan_dataset.polygons.append(surface_ROI)
00235 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00236 
00237 
00238 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00239     Optionally applies translate and rotate relative to a groundplane (if defined)
00240 '''
00241 if not cfg.device=='PR2':
00242     pc.do_all_point_cloud()
00243 else:
00244     pc.do_all_point_cloud(map_already_exists=True)
00245 
00246 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN: 
00247     truncate 3D data to a volume (center, and h,w,d).  Numbers are in meters.
00248     Suggest right-arm values are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00249     Note, this increases speed by throwing out points outside likely table area.
00250     It also is meant to remove the 'floor' as a possible candidate for table surface fits. 
00251 '''
00252 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 2, 2, 1.3)
00253 
00254 ''' Visual to quickly check TF'''
00255 if CalibrationVisual: 
00256     overlay_img = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00257     show_image('calibration_visual', overlay_img)
00258 
00259 if USE_POLYGONS:
00260     print 'DO POLYGON MAPPING'
00261     pc.do_polygon_mapping() #-
00262     '''Visual based on accurate polygons.'''
00263     ###pc.display_3d('labels')### 
00264 
00265     print 'map laser into image again'
00266     pc.img_mapped = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)  #-  
00267     #Below: create B & W images for manually entered 'table edge and center' values we specified. 
00268     pc.create_polygon_images()  #called twice? 
00269     pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(None, pc.pts3d, pc.img)    
00270     
00271 if SAVE_INTENSITY_IMAGE:
00272     print 'Begin Optional save of Intensity image'
00273     #This *may* involve assuming a tilting scan exists 
00274     pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])  #-  
00275     
00276 else:
00277     n, m = np.shape(pc.pts3d_bound)  #hack! hack!
00278     polygon_labels = list(1 for x in range(m)) # 0 == background/floor/unlabeled/other
00279     pc.map_polys = polygon_labels #Jas:filled with CLUTTER / SURFACE labels later
00280 #+
00281 
00282 ###pc.display_all_data()###
00283 
00284 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00285 
00286 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00287     I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00288 '''
00289 print 'lucid - about to generate features'
00290 SAVE_UNUSED_NEIGHBORS_TO = False #Jason had run with True, True.  
00291                                  #Takes a lot longer obviously!
00292                                  #Might slightly effect the 'global' 
00293                                  #feature vector on each point.
00294 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)  ### previously 1000, False,True
00295 print 'lucid - done generating features'
00296 labels, testresults = pc.load_classifier_and_test_on_dataset(MODE, feature_data) #'all_post'
00297 ###pc.display_3d('all')###
00298 #Labels are now generated --> can be saved to file?
00299 
00300 
00301 if False and displayOn:
00302     pc.display_segmentation_image('all')
00303     print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points.  Please press a key"
00304     cv.highgui.cvWaitKey(0)   
00305     pc.save_segmentation_image('all')
00306 
00307 #feature_data = {'point_indices':[], 'set_size':1} #dummy dict (->invalid statistics), baseline doesn't need real features
00308 #labels, testresults = pc.load_classifier_and_test_on_dataset('baseline', feature_data)
00309 ###pc.display_3d('baseline')###
00310 
00311 #test placement, use classification results:
00312 pc.map_polys = pc.load_Classifier_labels(MODE) #'all_post'
00313 #this function prints: "loading labels: len: ..."
00314 
00315 print 'pts3d_bound is size', pc.pts3d_bound.shape
00316 print 'map_polys is size', pc.map_polys.shape
00317 
00318 #+-
00319 SHOW_IN_ROS= True
00320 if SHOW_IN_ROS:
00321     try: rospy.init_node('table_labeler')
00322     except: print 'rospy init node has already been called: skipping'
00323     pub = rospy.Publisher('labeled_cloud', PointCloud)
00324     while not rospy.is_shutdown():
00325         ac.pub_cloud()
00326         ac.pub_cam()
00327         ac.pub_img()
00328         colors = pc.map_polys #actually labels
00329         acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00330         rospy.sleep(2)
00331         print 'published'
00332     # >> publish_cloud(pub, frame, pts3d, intensities=None, labels=None, colors=None):
00333 #+-
00334 
00335 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00336 #estimate table plance translation from mean of classified points:
00337 #Surface points are collected, and their average z-value is assumed as the table height
00338 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00339 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00340 table_height = np.mean(data, axis=1)[2]
00341 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00342 
00343 #----------------------------------------------------
00344 
00345 '''
00346 # Save results in a nice format.
00347 '''
00348 if False:
00349     filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00350     roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00351     save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00352                                     pc.idx_bound, pc.pts3d)
00353     save_labeled_cloud.save_feature_data(feature_data['features'],
00354                                          feature_data['point_indices'])
00355     save_labeled_cloud.save_map2D(pc.camPts_bound, table_height) #for placement.
00356 ''' Comments
00357     * pc.pts3d_bound #3 by Nbound
00358     * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00359     * labels # Nbound, use if loading classifer on featuer_data
00360     * pc.map_polys # --> Nbound, labels created from polygons.  
00361                    #     Overrided by leaning-based labels, so they are identical
00362                    #     at the end of this document.
00363     * Nbound = len(intensities_bound) #--> example range(19598)
00364     * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00365     * feature_data['point_indices']
00366     * Nfeature = feature_data['set_size'] #--> slightly less than maximum.  
00367                                           #I assume this is at most NUMBER_OF_POINTS
00368     -Others:
00369     * pc.image_labels
00370     * pc.img_mapped
00371     * pc.img_mapped_polygons
00372     * testresults #Not sure what this is?
00373     * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00374     * data #3d values of pts3d_bound where label is nonzero... 
00375            # However as a list so 3x longer.
00376     * scan_indices - an artifact from the hokuyo.  Original acquired line order?
00377 '''   
00378 
00379 if False: ####### Placement routine.
00380     object_height = 0.1
00381     #SCALE = 1
00382     resolution = [.01*SCALE, .01*SCALE]  #sets resolution of occupancy grid
00383     print 'NOTE: Resolution is ',100*resolution[0], 'cm' ###
00384     polygon = label_object()
00385     polygon.add_point([0,0])
00386     polygon.add_point([0,5*SCALE])
00387     polygon.add_point([10*SCALE,5*SCALE])
00388     polygon.add_point([10*SCALE,0])
00389     ###object_height = 0.1
00390 
00391     print 'creating placement object'
00392     pl = Placement(pc, resolution)  ###REPLACE WITH MY OWN CLASS DEFINITION WITH FUNCTIONs
00393 
00394     if displayOn:
00395         placement_point = pl.test_placement(polygon, object_height) 
00396     else:
00397         placement_point = pl.find_placement(polygon, object_height)#Add param True to get debug popups
00398         
00399     placement_point -= pc.scan_dataset.ground_plane_translation
00400     
00401     #Assumes 'codyRobot'==ROBOT
00402     #This should be optional
00403     ### Formerly, the robot would reach out and place object at this point
00404     #import mekabot.coord_frames as mcf
00405     #placement_point_global = mcf.thok0Tglobal(placement_point)
00406     print 'placement point in global coordinate frame:', placement_point_global.T
00407         
00408 
00409     if displayOn:    
00410         print 'displaying current placement'
00411         pl.display_current_placement_in_heightmap()
00412         cv.highgui.cvWaitKey()
00413 
00414     print getTime(), 'DONE'
00415     #print pts3d
00416     
00417     #==============
00418     


clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15