test_display_pr2_try3.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2010, Georgia Tech Research Corporation
00004 # All rights reserved.
00005 #
00006 # Redistribution and use in source and binary forms, with or without
00007 # modification, are permitted provided that the following conditions are met:
00008 #     * Redistributions of source code must retain the above copyright
00009 #       notice, this list of conditions and the following disclaimer.
00010 #     * Redistributions in binary form must reproduce the above copyright
00011 #       notice, this list of conditions and the following disclaimer in the
00012 #       documentation and/or other materials provided with the distribution.
00013 #     * Neither the name of the Georgia Tech Research Corporation nor the
00014 #       names of its contributors may be used to endorse or promote products
00015 #       derived from this software without specific prior written permission.
00016 #
00017 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00018 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00019 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00020 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00021 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00022 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00023 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00024 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00025 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00026 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 
00029 #  \author Martin Schuster (Healthcare Robotics Lab, Georgia Tech.)
00030 
00031 import roslib; roslib.load_manifest('clutter_segmentation')
00032 roslib.load_manifest('pr2_clutter_helper')
00033 import acquire_pr2_data;
00034 import rospy
00035 
00036 from hrl_lib.util import getTime
00037 print getTime(), 'START'
00038 
00039 import processor
00040 import configuration
00041 
00042 try: from placement import Placement
00043 except: print 'Cannot find placement.py for import.  Ignoring'
00044 from label_object import label_object
00045 from scan_dataset import scan_dataset
00046 
00047 import numpy as np
00048 import opencv as cv
00049 import opencv.highgui as hg
00050 print getTime(), 'IMPORTS DONE'
00051 
00052 
00053 #------------------------
00054 ROBOT='desktopScanner' #'PR2'#'desktopcanner'#'PR2' #'dummyScanner'=='codyRobot' #'desktopScanner'
00055 DATASET_ID = 'pr2_example_0003'
00056 DATA_LOCATION = '/home/jokerman/Desktop/PR2/'
00057 z_above_floor = 0
00058 #Theory:Imperitive to know "plane" of floor to do good classification in current scheme.
00059 #ground_plane_rotation = '' 
00060 #rot = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
00061 #[above] data location must hold xml training data to load. or get error!
00062 #[above] dataset id should be a unique name for files in subfolder data/
00063 
00064 #-------------------------
00065 '''Number of points to  '''
00066 NUMBER_OF_POINTS = 1800#some large number #previously only 1000
00067 SCALE = 1
00068 IS_LABELED = True #What happens if false? Assume = No ROI cropping.  Might get skipped altogether.  TODO:check.
00069 USE_POLYGONS = True #this is required right now!  why?
00070 SAVE_INTENSITY_IMAGE = False
00071 CalibrationVisual = True
00072 
00073 MODE = 'all_post' # 'all', 'all_post' (ransac), 'color', 'laser'
00074 
00075 displayOn = True 
00076 #---------------------------
00077 
00078 '''
00079     @param z_above_floor Approximate height of hokuyo above the ground.  Asssumed
00080     to be fixed for now.  On Cody robot the camera and hokuyo were at same z-height.
00081 '''
00082 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00083     dataset = scan_dataset()
00084     dataset.table_plane_translation = np.matrix([0,0,0]).T
00085     # approximate height of hokuyo above the ground, assume it to be fixed for now.
00086     dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T 
00087     dataset.ground_plane_rotation = ''  #expects a 3x3 numpy matrix
00088     #pc.scan_dataset.is_test_set = True
00089     dataset.is_labeled = IS_LABELED
00090     dataset.id = unique_id
00091     return dataset
00092 
00093 '''
00094     Define a region of interest polygon which crops camera image.
00095     Smaller size will result in a quicker processing time.
00096     Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00097     which range between 0 and 1.  Perhaps this would be more intuitive as
00098     actual crop values so that '0' on all values meant the original image and
00099     '.2' on everything crops away the 20% on all edges.
00100 '''
00101 ##MAKE SURE THAT THESE MATCH ORIGINAL TAKE: CROP CAMERA IMAGE TO RIO
00102 def define_ROI_polygons(w, h, crop_regions):
00103     '''
00104     # Create dummy edges for placement
00105     # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00106     Usage: 
00107         w = image_size.width; h = image_size.height
00108         crop_region = (0.2,0.1,.2,.1) # % to crop left, crop right, crop top, crop bot
00109         surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00110         pc.scan_dataset.polygons.append(surface_ROI)
00111         for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00112     '''
00113     (left, right, top, bottom) = crop_regions
00114     width_min = w * left
00115     width_max = w * (1-right)
00116     height_min = h * top
00117     height_max = h * (1-bottom)
00118     p = label_object()
00119     p.set_label('surface')
00120     p.add_point((width_min,height_min))
00121     p.add_point((width_min,height_max))
00122     p.add_point((width_max,height_max))
00123     p.add_point((width_max,height_min))
00124     table_surface = p #A polygon shape (rectangle) defining most generous guess of table surface.
00125     print 'surface',p.get_points()
00126     p = label_object()
00127     p.set_label('edge_down')
00128     p.add_point((width_min,height_min))
00129     p.add_point((width_min,height_max))
00130     p.add_point((width_max,height_max))
00131     p.add_point((width_max,height_min))
00132     p.add_point((width_min,height_min))
00133     list_of_edges = (p,) 
00134     # [Above] Any number of polygons which define surface border. More can be added.
00135     print 'edge',p.get_points()
00136     return table_surface, list_of_edges
00137 
00138 def show_image(window_name, img, wait=False):
00139     hg.cvStartWindowThread()
00140     RESIZABLE = 0
00141     hg.cvNamedWindow(window_name, RESIZABLE)
00142     hg.cvShowImage(window_name, img)
00143     if wait:
00144         print 'show_image: press any key to continue..'
00145         cv.highgui.cvWaitKey()
00146 
00147 #------------------------------------
00148 
00149 ###CHANGE THIS TO THE DIRECTORY WITH RESULTS FROM CODY:
00150 ###  change 'codyRobot' to 'dummyScanner' or code tries to find camera drivers, etc.
00151 cfg = configuration.configuration(DATA_LOCATION, ROBOT) #'dummyScanner'
00152 pc = processor.processor(cfg)
00153 pc.features_k_nearest_neighbors = None
00154 
00155 # Name of dataset will allow loading of saved image / scan.
00156 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)#1.32
00157 
00158 '''
00159     PC.LOAD_RAW_DATA()
00160     Browses for content with the following names:
00161         data/UNIQUE_ID_image.png
00162         data/UNIQUE_ID_laserscans.pkl
00163         self.image_angle is set or zeroed               
00164     Sets in pc as: self.img,
00165                    self.laserscans,
00166                    sets (or zeros) self.image_angle
00167     
00168 '''
00169 if cfg.device=='PR2':
00170     rospy.init_node('rosSegmentCloud')
00171     print 'STARTING NEW COLLECTION NODE'
00172     ac = acquire_pr2_data.AcquireCloud()
00173     #ac.print_test()
00174     print 'FINISHED COLLECTING, SENDING DATA BACK'
00175                
00176     np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
00177         # >> return self.img, self.pts3d, self.intensities, 
00178         #        self.camPts, self.colors, self.camPts_idx
00179     pc.pts3d = np.array(pc.pts3d)
00180     N = len(idx_list)
00181     Nbound = len( idx_list[idx_list] )
00182     pc.img = cv.cvCloneImage(np.array(np_img) )
00183     pc.laserscans = []
00184     pc.scan_indices = np.array( range(N) )
00185     camPts_bound = camPts[:,idx_list]
00186     pts3d_bound = pc.pts3d[:,idx_list]
00187     scan_indices_bound = np.array(range(N))[idx_list]
00188     intensities_bound = pc.intensities[idx_list]
00189     pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
00190                             scan_indices_bound, intensities_bound)
00191 
00192 #create dummy surface polygon to define ROI
00193 # [Below] These polygons will be used to generate a 'mask' cv image and used to 
00194 # limit the area of image where 3D projected points are considered.
00195 image_size = cv.cvGetSize(pc.img)
00196 w = image_size.width
00197 h = image_size.height
00198 crop_region = (0.3,0.3,0.3,0.3) # % to crop left, crop right, crop top, crop bot
00199 surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
00200 pc.scan_dataset.polygons.append(surface_ROI)
00201 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00202 
00203 
00204 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00205     Optionally applies translate and rotate relative to a groundplane (if defined)
00206 '''
00207 if not cfg.device=='PR2':
00208     pc.do_all_point_cloud()
00209 else:
00210     pc.do_all_point_cloud(map_already_exists=True)
00211 
00212 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN: 
00213     truncate 3D data to a volume (center, and h,w,d).  Numbers are in meters.
00214     Suggest right-arm values are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00215     Note, this increases speed by throwing out points outside likely table area.
00216     It also is meant to remove the 'floor' as a possible candidate for table surface fits. 
00217 '''
00218 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 2, 2, 1.3)
00219 
00220 ''' Visual to quickly check TF'''
00221 if CalibrationVisual: 
00222     overlay_img = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)
00223     show_image('calibration_visual', overlay_img)
00224 
00225 if USE_POLYGONS:
00226     print 'do polygon mapping'
00227     pc.do_polygon_mapping() #-
00228     '''Visual based on accurate polygons.'''
00229     ###pc.display_3d('labels')### 
00230 
00231     print 'map laser into image again'
00232     pc.img_mapped = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)  #-  
00233     #Below: create B & W images for manually entered 'table edge and center' values we specified. 
00234     pc.create_polygon_images()  #called twice? 
00235     pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(pc.map, pc.pts3d, pc.img)    
00236     
00237 if SAVE_INTENSITY_IMAGE:
00238     print 'Begin Optional save of Intensity image'
00239     #This *may* involve assuming a tilting scan exists 
00240     pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])  #-  
00241     
00242 else:
00243     n, m = np.shape(pc.pts3d_bound)  #hack! hack!
00244     polygon_mapping = list(1 for x in range(m)) # 0 == background/floor/unlabeled/other
00245     pc.map_polys = polygon_mapping #Jas:filled with CLUTTER / SURFACE labels later
00246 #+
00247 
00248 ###pc.display_all_data()###
00249 
00250 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00251 
00252 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00253     I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00254 '''
00255 print 'lucid - about to generate features'
00256 SAVE_UNUSED_NEIGHBORS_TO = False #Jason had run with True, True.  
00257                                  #Takes a lot longer obviously!
00258                                  #Might slightly effect the 'global' 
00259                                  #feature vector on each point.
00260 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)  ### previously 1000, False,True
00261 print 'lucid - done generating features'
00262 labels, testresults = pc.load_classifier_and_test_on_dataset(MODE, feature_data) #'all_post'
00263 ###pc.display_3d('all')###
00264 #Labels are now generated --> can be saved to file?
00265 
00266 
00267 if False and displayOn:
00268     pc.display_segmentation_image('all')
00269     print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points.  Please press a key"
00270     cv.highgui.cvWaitKey(0)   
00271     pc.save_segmentation_image('all')
00272 
00273 #feature_data = {'point_indices':[], 'set_size':1} #dummy dict (->invalid statistics), baseline doesn't need real features
00274 #labels, testresults = pc.load_classifier_and_test_on_dataset('baseline', feature_data)
00275 ###pc.display_3d('baseline')###
00276 
00277 #test placement, use classification results:
00278 pc.map_polys = pc.load_Classifier_labels(MODE) #'all_post'
00279 #this function prints: "loading labels: len: ..."
00280 
00281 print 'pts3d_bound is size', pc.pts3d_bound.shape
00282 print 'map_polys is size', pc.map_polys.shape
00283 
00284 #+-
00285 SHOW_IN_ROS= True
00286 if SHOW_IN_ROS:
00287     import roslib
00288     roslib.load_manifest('pr2_lcs_helper')
00289     from sensor_msgs.msg import PointCloud
00290     import acquire_pr2_data, rospy
00291     try: rospy.init_node('table_labeler')
00292     except: print 'rospy init node has already been called: skipping'
00293     pub = rospy.Publisher('labeled_cloud', PointCloud)
00294     while not rospy.is_shutdown():
00295         ac.pub_cloud_bound()
00296         colors = pc.map_polys #actually labels
00297         acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00298         rospy.sleep(2)
00299         print 'published'
00300     # >> publish_cloud(pub, frame, pts3d, intensities=None, labels=None, colors=None):
00301 #+-
00302 
00303 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00304 #estimate table plance translation from mean of classified points:
00305 #Surface points are collected, and their average z-value is assumed as the table height
00306 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00307 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00308 table_height = np.mean(data, axis=1)[2]
00309 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00310 
00311 #----------------------------------------------------
00312 
00313 '''
00314 # Save results in a nice format.
00315 '''
00316 if False:
00317     filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00318     roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00319     save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00320                                     pc.idx_bound, pc.pts3d)
00321     save_labeled_cloud.save_feature_data(feature_data['features'],
00322                                          feature_data['point_indices'])
00323     save_labeled_cloud.save_map2D(pc.map[0], table_height) #for placement.
00324 ''' Comments
00325     * pc.pts3d_bound #3 by Nbound
00326     * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00327     * labels # Nbound, use if loading classifer on featuer_data
00328     * pc.map_polys # --> Nbound, labels created from polygons.  
00329                    #     Overrided by leaning-based labels, so they are identical
00330                    #     at the end of this document.
00331     * Nbound = len(intensities_bound) #--> example range(19598)
00332     * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00333     * feature_data['point_indices']
00334     * Nfeature = feature_data['set_size'] #--> slightly less than maximum.  
00335                                           #I assume this is at most NUMBER_OF_POINTS
00336     -Others:
00337     * pc.image_labels
00338     * pc.img_mapped
00339     * pc.img_mapped_polygons
00340     * testresults #Not sure what this is?
00341     * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00342     * data #3d values of pts3d_bound where label is nonzero... 
00343            # However as a list so 3x longer.
00344     * scan_indices - an artifact from the hokuyo.  Original acquired line order?
00345 '''   
00346 
00347 if False: ####### Placement routine.
00348     object_height = 0.1
00349     #SCALE = 1
00350     resolution = [.01*SCALE, .01*SCALE]  #sets resolution of occupancy grid
00351     print 'NOTE: Resolution is ',100*resolution[0], 'cm' ###
00352     polygon = label_object()
00353     polygon.add_point([0,0])
00354     polygon.add_point([0,5*SCALE])
00355     polygon.add_point([10*SCALE,5*SCALE])
00356     polygon.add_point([10*SCALE,0])
00357     ###object_height = 0.1
00358 
00359     print 'creating placement object'
00360     pl = Placement(pc, resolution)  ###REPLACE WITH MY OWN CLASS DEFINITION WITH FUNCTIONs
00361 
00362     if displayOn:
00363         placement_point = pl.test_placement(polygon, object_height) 
00364     else:
00365         placement_point = pl.find_placement(polygon, object_height)#Add param True to get debug popups
00366         
00367     placement_point -= pc.scan_dataset.ground_plane_translation
00368     
00369     #Assumes 'codyRobot'==ROBOT
00370     #This should be optional
00371     import mekabot.coord_frames as mcf
00372     placement_point_global = mcf.thok0Tglobal(placement_point)
00373     print 'placement point in global coordinate frame:', placement_point_global.T
00374         
00375 
00376     if displayOn:    
00377         print 'displaying current placement'
00378         pl.display_current_placement_in_heightmap()
00379         cv.highgui.cvWaitKey()
00380 
00381     if displayOn:
00382         print 'importing mayavi'
00383         ###from enthought.mayavi import mlab
00384         print 'showing 3D mayavi'
00385         ###mlab.show()
00386         #What does this do?
00387 
00388 
00389     print getTime(), 'DONE'
00390     #print pts3d
00391     
00392     #==============
00393     


clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15