test_on_PR2_try4.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2010, Georgia Tech Research Corporation
00004 # All rights reserved.
00005 #
00006 # Redistribution and use in source and binary forms, with or without
00007 # modification, are permitted provided that the following conditions are met:
00008 #     * Redistributions of source code must retain the above copyright
00009 #       notice, this list of conditions and the following disclaimer.
00010 #     * Redistributions in binary form must reproduce the above copyright
00011 #       notice, this list of conditions and the following disclaimer in the
00012 #       documentation and/or other materials provided with the distribution.
00013 #     * Neither the name of the Georgia Tech Research Corporation nor the
00014 #       names of its contributors may be used to endorse or promote products
00015 #       derived from this software without specific prior written permission.
00016 #
00017 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00018 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00019 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00020 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00021 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00022 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00023 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00024 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00025 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00026 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 
00029 #  \author Martin Schuster (Healthcare Robotics Lab, Georgia Tech.)
00030 
00031 import roslib; roslib.load_manifest('clutter_segmentation')
00032 #roslib.load_manifest('pr2_clutter_helper')
00033 import rospy
00034 from sensor_msgs.msg import PointCloud
00035 
00036 ### Optional, below:
00037 #       roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00038 
00039 import acquire_pr2_data; #from pr2_lcs_helper
00040 
00041 from hrl_lib.util import getTime
00042 print getTime(), 'START'
00043 
00044 import processor
00045 import configuration
00046 
00047 try: from placement import Placement
00048 except: print 'Cannot find placement.py for import.  Ignoring'
00049 from label_object import label_object
00050 from scan_dataset import scan_dataset
00051 
00052 import numpy as np
00053 import opencv as cv
00054 import opencv.highgui as hg
00055 print getTime(), 'IMPORTS DONE'
00056 
00057 
00058 
00059 
00060 #------------------------
00061 ROBOT='PR2' #'PR2'#'desktopcanner'#'PR2' #'dummyScanner'=='codyRobot' #'desktopScanner'
00062 DATASET_ID = 'pr2_example_0003'
00063 DATA_LOCATION = '/home/jokerman/Desktop/PR2/'
00064 z_above_floor = 0
00065 #Theory:Imperitive to know "plane" of floor to do good classification in current scheme.
00066 #ground_plane_rotation = '' 
00067 #rot = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
00068 #[above] data location must hold xml training data to load. or get error!
00069 #[above] dataset id should be a unique name for files in subfolder data/
00070 
00071 #-------------------------
00072 '''Number of points to  '''
00073 NUMBER_OF_POINTS = 3500#some large number #Recommended between 1000 ~ 3000
00074 SCALE = 1
00075 IS_LABELED = True #What happens if false? Assume = No ROI cropping.  Might get skipped altogether.  TODO:check.
00076 USE_POLYGONS = True #this is required right now!  why?
00077 SAVE_INTENSITY_IMAGE = False
00078 CalibrationVisual = True
00079 
00080 MODE = 'all_post' # 'all', 'all_post' (ransac), 'color', 'laser'
00081 
00082 displayOn = True 
00083 #---------------------------
00084 
00085 '''
00086     @param z_above_floor Approximate height of hokuyo above the ground.  Asssumed
00087     to be fixed for now.  On Cody robot the camera and hokuyo were at same z-height.
00088 '''
00089 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00090     dataset = scan_dataset()
00091     dataset.table_plane_translation = np.matrix([0,0,0]).T
00092     # approximate height of hokuyo above the ground, assume it to be fixed for now.
00093     dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T 
00094     dataset.ground_plane_rotation = ''  #expects a 3x3 numpy matrix
00095     #pc.scan_dataset.is_test_set = True
00096     dataset.is_labeled = IS_LABELED
00097     dataset.id = unique_id
00098     return dataset
00099 
00100 '''
00101     Define a region of interest polygon which crops camera image.
00102     Smaller size will result in a quicker processing time.
00103     Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00104     which range between 0 and 1.  Perhaps this would be more intuitive as
00105     actual crop values so that '0' on all values meant the original image and
00106     '.2' on everything crops away the 20% on all edges.
00107 '''
00108 ##MAKE SURE THAT THESE MATCH ORIGINAL TAKE: CROP CAMERA IMAGE TO RIO
00109 def define_ROI_polygons(w, h, crop_regions):
00110     '''
00111     # Create dummy edges for placement
00112     # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00113     Usage: 
00114         w = image_size.width; h = image_size.height
00115         crop_region = (0.2,0.1,.2,.1) # % to crop left, crop right, crop top, crop bot
00116         surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00117         pc.scan_dataset.polygons.append(surface_ROI)
00118         for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00119     '''
00120     (left, right, top, bottom) = crop_regions
00121     width_min = w * left
00122     width_max = w * (1-right)
00123     height_min = h * top
00124     height_max = h * (1-bottom)
00125     p = label_object()
00126     p.set_label('surface')
00127     p.add_point((width_min,height_min))
00128     p.add_point((width_min,height_max))
00129     p.add_point((width_max,height_max))
00130     p.add_point((width_max,height_min))
00131     table_surface = p #A polygon shape (rectangle) defining most generous guess of table surface.
00132     print 'surface',p.get_points()
00133     p = label_object()
00134     p.set_label('edge_down')
00135     p.add_point((width_min,height_min))
00136     p.add_point((width_min,height_max))
00137     p.add_point((width_max,height_max))
00138     p.add_point((width_max,height_min))
00139     p.add_point((width_min,height_min))
00140     list_of_edges = (p,) 
00141     # [Above] Any number of polygons which define surface border. More can be added.
00142     print 'edge',p.get_points()
00143     return table_surface, list_of_edges
00144 
00145 def show_image(window_name, img, wait=False):
00146     hg.cvStartWindowThread()
00147     RESIZABLE = 0
00148     hg.cvNamedWindow(window_name, RESIZABLE)
00149     hg.cvShowImage(window_name, img)
00150     if wait:
00151         print 'show_image: press any key to continue..'
00152         cv.highgui.cvWaitKey()
00153 
00154 #------------------------------------
00155 
00156 ###CHANGE THIS TO THE DIRECTORY WITH RESULTS FROM CODY:
00157 ###  change 'codyRobot' to 'dummyScanner' or code tries to find camera drivers, etc.
00158 cfg = configuration.configuration(DATA_LOCATION, ROBOT) #'dummyScanner'
00159 pc = processor.processor(cfg)
00160 pc.features_k_nearest_neighbors = None
00161 
00162 # Name of dataset will allow loading of saved image / scan.
00163 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)#1.32
00164 
00165 '''
00166     PC.LOAD_RAW_DATA()
00167     Browses for content with the following names:
00168         data/UNIQUE_ID_image.png
00169         data/UNIQUE_ID_laserscans.pkl
00170         self.image_angle is set or zeroed               
00171     Sets in pc as: self.img,
00172                    self.laserscans,
00173                    sets (or zeros) self.image_angle
00174     
00175 '''
00176 if cfg.device=='PR2':
00177     rospy.init_node('rosSegmentCloud')
00178     print 'STARTING NEW COLLECTION NODE'
00179     ac = acquire_pr2_data.AcquireCloud()
00180     #ac.print_test()
00181     print 'FINISHED COLLECTING, SENDING DATA BACK'
00182                
00183     np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
00184         # >> return self.img, self.pts3d, self.intensities, 
00185         #        self.camPts, self.colors, self.camPts_idx
00186     pc.pts3d = np.array(pc.pts3d)
00187     N = len(idx_list)
00188     Nbound = len( idx_list[idx_list] )
00189     print 'Camera view contains', Nbound, 'of', N, '3D points within its field of view.\n' 
00190     if Nbound == 0:
00191         print 'WARNING: Cannot start segmentation if no 3D points overlap image. Exiting.' 
00192         print 'This can happen if the pointcloud is empty [], if the transformation is wrong, \
00193                or the camera and scanner are not pointed in the same direction'
00194         import sys; sys.exit()
00195     pc.img = cv.cvCloneImage(np.array(np_img) ) #only works in CV 2.0, but not cv 2.1
00196     pc.laserscans = []
00197     pc.scan_indices = np.array( range(N) )
00198     pc.camPts = camPts
00199     pc.camPts_bound = camPts[:,idx_list]
00200     pc.pts3d_bound = pc.pts3d[:,idx_list]
00201     pc.scan_indices_bound = np.array(range(N))[idx_list]
00202     pc.intensities_bound = pc.intensities[idx_list]
00203     pc.idx_list = idx_list
00204     ###pc.map = (pc.camPts_bound, pc.camPts, pc.idx_list, pc.pts3d_bound,pc.scan_indices_bound, pc.intensities_bound)
00205 
00206 #create dummy surface polygon to define ROI
00207 # [Below] These polygons will be used to generate a 'mask' cv image and used to 
00208 # limit the area of image where 3D projected points are considered.
00209 image_size = cv.cvGetSize(pc.img)
00210 w = image_size.width
00211 h = image_size.height
00212 crop_region = (0.3,0.3,0.3,0.3) # % to crop left, crop right, crop top, crop bot
00213 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00214 pc.scan_dataset.polygons.append(surface_ROI)
00215 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00216 
00217 
00218 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00219     Optionally applies translate and rotate relative to a groundplane (if defined)
00220 '''
00221 if not cfg.device=='PR2':
00222     pc.do_all_point_cloud()
00223 else:
00224     pc.do_all_point_cloud(map_already_exists=True)
00225 
00226 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN: 
00227     truncate 3D data to a volume (center, and h,w,d).  Numbers are in meters.
00228     Suggest right-arm values are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00229     Note, this increases speed by throwing out points outside likely table area.
00230     It also is meant to remove the 'floor' as a possible candidate for table surface fits. 
00231 '''
00232 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 2, 2, 1.3)
00233 
00234 ''' Visual to quickly check TF'''
00235 if CalibrationVisual: 
00236     overlay_img = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)
00237     show_image('calibration_visual', overlay_img)
00238 
00239 if USE_POLYGONS:
00240     print 'DO POLYGON MAPPING'
00241     pc.do_polygon_mapping() #-
00242     '''Visual based on accurate polygons.'''
00243     ###pc.display_3d('labels')### 
00244 
00245     print 'map laser into image again'
00246     pc.img_mapped = pc.draw_mapped_laser_into_image(None, pc.pts3d, pc.img)  #-  
00247     #Below: create B & W images for manually entered 'table edge and center' values we specified. 
00248     pc.create_polygon_images()  #called twice? 
00249     pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(None, pc.pts3d, pc.img)    
00250     
00251 if SAVE_INTENSITY_IMAGE:
00252     print 'Begin Optional save of Intensity image'
00253     #This *may* involve assuming a tilting scan exists 
00254     pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])  #-  
00255     
00256 else:
00257     n, m = np.shape(pc.pts3d_bound)  #hack! hack!
00258     polygon_labels = list(1 for x in range(m)) # 0 == background/floor/unlabeled/other
00259     pc.map_polys = polygon_labels #Jas:filled with CLUTTER / SURFACE labels later
00260 #+
00261 
00262 ###pc.display_all_data()###
00263 
00264 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00265 
00266 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00267     I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00268 '''
00269 print 'lucid - about to generate features'
00270 SAVE_UNUSED_NEIGHBORS_TO = False #Jason had run with True, True.  
00271                                  #Takes a lot longer obviously!
00272                                  #Might slightly effect the 'global' 
00273                                  #feature vector on each point.
00274 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)  ### previously 1000, False,True
00275 print 'lucid - done generating features'
00276 labels, testresults = pc.load_classifier_and_test_on_dataset(MODE, feature_data) #'all_post'
00277 ###pc.display_3d('all')###
00278 #Labels are now generated --> can be saved to file?
00279 
00280 
00281 if False and displayOn:
00282     pc.display_segmentation_image('all')
00283     print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points.  Please press a key"
00284     cv.highgui.cvWaitKey(0)   
00285     pc.save_segmentation_image('all')
00286 
00287 #feature_data = {'point_indices':[], 'set_size':1} #dummy dict (->invalid statistics), baseline doesn't need real features
00288 #labels, testresults = pc.load_classifier_and_test_on_dataset('baseline', feature_data)
00289 ###pc.display_3d('baseline')###
00290 
00291 #test placement, use classification results:
00292 pc.map_polys = pc.load_Classifier_labels(MODE) #'all_post'
00293 #this function prints: "loading labels: len: ..."
00294 
00295 print 'pts3d_bound is size', pc.pts3d_bound.shape
00296 print 'map_polys is size', pc.map_polys.shape
00297 
00298 #+-
00299 SHOW_IN_ROS= True
00300 if SHOW_IN_ROS:
00301     try: rospy.init_node('table_labeler')
00302     except: print 'rospy init node has already been called: skipping'
00303     pub = rospy.Publisher('labeled_cloud', PointCloud)
00304     while not rospy.is_shutdown():
00305         ac.pub_cloud()
00306         ac.pub_cam()
00307         ac.pub_img()
00308         colors = pc.map_polys #actually labels
00309         acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00310         rospy.sleep(2)
00311         print 'published'
00312     # >> publish_cloud(pub, frame, pts3d, intensities=None, labels=None, colors=None):
00313 #+-
00314 
00315 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00316 #estimate table plance translation from mean of classified points:
00317 #Surface points are collected, and their average z-value is assumed as the table height
00318 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00319 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00320 table_height = np.mean(data, axis=1)[2]
00321 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00322 
00323 #----------------------------------------------------
00324 
00325 '''
00326 # Save results in a nice format.
00327 '''
00328 if False:
00329     filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00330     roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00331     save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00332                                     pc.idx_bound, pc.pts3d)
00333     save_labeled_cloud.save_feature_data(feature_data['features'],
00334                                          feature_data['point_indices'])
00335     save_labeled_cloud.save_map2D(pc.camPts_bound, table_height) #for placement.
00336 ''' Comments
00337     * pc.pts3d_bound #3 by Nbound
00338     * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00339     * labels # Nbound, use if loading classifer on featuer_data
00340     * pc.map_polys # --> Nbound, labels created from polygons.  
00341                    #     Overrided by leaning-based labels, so they are identical
00342                    #     at the end of this document.
00343     * Nbound = len(intensities_bound) #--> example range(19598)
00344     * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00345     * feature_data['point_indices']
00346     * Nfeature = feature_data['set_size'] #--> slightly less than maximum.  
00347                                           #I assume this is at most NUMBER_OF_POINTS
00348     -Others:
00349     * pc.image_labels
00350     * pc.img_mapped
00351     * pc.img_mapped_polygons
00352     * testresults #Not sure what this is?
00353     * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00354     * data #3d values of pts3d_bound where label is nonzero... 
00355            # However as a list so 3x longer.
00356     * scan_indices - an artifact from the hokuyo.  Original acquired line order?
00357 '''   
00358 
00359 if False: ####### Placement routine.
00360     object_height = 0.1
00361     #SCALE = 1
00362     resolution = [.01*SCALE, .01*SCALE]  #sets resolution of occupancy grid
00363     print 'NOTE: Resolution is ',100*resolution[0], 'cm' ###
00364     polygon = label_object()
00365     polygon.add_point([0,0])
00366     polygon.add_point([0,5*SCALE])
00367     polygon.add_point([10*SCALE,5*SCALE])
00368     polygon.add_point([10*SCALE,0])
00369     ###object_height = 0.1
00370 
00371     print 'creating placement object'
00372     pl = Placement(pc, resolution)  ###REPLACE WITH MY OWN CLASS DEFINITION WITH FUNCTIONs
00373 
00374     if displayOn:
00375         placement_point = pl.test_placement(polygon, object_height) 
00376     else:
00377         placement_point = pl.find_placement(polygon, object_height)#Add param True to get debug popups
00378         
00379     placement_point -= pc.scan_dataset.ground_plane_translation
00380     
00381     #Assumes 'codyRobot'==ROBOT
00382     #This should be optional
00383     ### Formerly, the robot would reach out and place object at this point
00384     #import mekabot.coord_frames as mcf
00385     #placement_point_global = mcf.thok0Tglobal(placement_point)
00386     print 'placement point in global coordinate frame:', placement_point_global.T
00387         
00388 
00389     if displayOn:    
00390         print 'displaying current placement'
00391         pl.display_current_placement_in_heightmap()
00392         cv.highgui.cvWaitKey()
00393 
00394     print getTime(), 'DONE'
00395     #print pts3d
00396     
00397     #==============
00398     


clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15