test_display_pr2_try1.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2010, Georgia Tech Research Corporation
00004 # All rights reserved.
00005 #
00006 # Redistribution and use in source and binary forms, with or without
00007 # modification, are permitted provided that the following conditions are met:
00008 #     * Redistributions of source code must retain the above copyright
00009 #       notice, this list of conditions and the following disclaimer.
00010 #     * Redistributions in binary form must reproduce the above copyright
00011 #       notice, this list of conditions and the following disclaimer in the
00012 #       documentation and/or other materials provided with the distribution.
00013 #     * Neither the name of the Georgia Tech Research Corporation nor the
00014 #       names of its contributors may be used to endorse or promote products
00015 #       derived from this software without specific prior written permission.
00016 #
00017 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00018 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00019 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00020 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00021 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00022 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00023 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00024 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00025 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00026 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 
00029 #  \author Martin Schuster (Healthcare Robotics Lab, Georgia Tech.)
00030 
00031 import roslib; roslib.load_manifest('laser_camera_segmentation')
00032 roslib.load_manifest('pr2_clutter_helper')
00033 import acquire_pr2_data;
00034 import rospy
00035 
00036 from hrl_lib.util import getTime
00037 print getTime(), 'START'
00038 
00039 import laser_camera_segmentation.processor as processor
00040 import laser_camera_segmentation.configuration as configuration
00041 
00042 try: from placement import Placement
00043 except: print 'Cannot find placement.py for import.  Ignoring'
00044 from labeling.label_object import label_object
00045 from labeling.scan_dataset import scan_dataset
00046 
00047 import numpy as np
00048 import opencv as cv
00049 
00050 #----------------------
00051 ROBOT='PR2' #'PR2'#'desktopScanner'#'PR2' #'dummyScanner'=='codyRobot' #'desktopScanner'
00052 
00053 if 'desktopScanner'==ROBOT:
00054        
00055     DATASET_ID = '2009Nov12_184413'#'2009Oct30_172052' #'2010Jan26_165711' 
00056                       #'2010Jan26_164415' #'2010Mar13_133853' #2010Jan26_165711
00057     DATA_LOCATION = '/home/jokerman/svn/robot1_data/usr/martin/laser_camera_segmentation/labeling/'
00058     z_above_floor = 1.32
00059 elif 'codyRobot'==ROBOT or 'dummyScanner'==ROBOT:
00060     DATASET_ID = '2010May31_213250'#'2010May31_203324' #'2010May31_203158' 
00061                                    #'2010May31_203324' #'2010May31_222032'#  # 'testx'
00062     DATA_LOCATION = '/home/jokerman/svn/robot1_data/usr/jokerman/Placement/May31_2010_alpha'
00063     z_above_floor = 1.32
00064 elif 'PR2'==ROBOT or 'PR2example'==ROBOT or 'PR2bag'==ROBOT:
00065     DATASET_ID = 'pr2_example_0003'
00066     DATA_LOCATION = '/home/jokerman/Desktop/PR2/'
00067     z_above_floor = 0
00068     #Theory:Imperitive to know "plane" of floor to do good classification in current scheme.
00069     #ground_plane_rotation = '' 
00070     #rot = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
00071     #[above] data location must hold xml training data to load. or get error!
00072     #[above] dataset id should be a unique name for files in subfolder data/
00073 
00074 #----------------------
00075 '''Number of points to  '''
00076 NUMBER_OF_POINTS = 7000#some large number #previously only 1000
00077 SCALE = 1
00078 IS_LABELED = True #What happens if false? Assume = No ROI cropping.  Might get skipped altogether.  TODO:check.
00079 
00080 
00081 displayOn = True 
00082 print getTime(), 'IMPORTS DONE'
00083 
00084 ###CHANGE THIS TO THE DIRECTORY WITH RESULTS FROM CODY:
00085 ###  change 'codyRobot' to 'dummyScanner' or code tries to find camera drivers, etc.
00086 cfg = configuration.configuration(DATA_LOCATION, ROBOT) #'dummyScanner'
00087 pc = processor.processor(cfg)
00088 
00089 pc.features_k_nearest_neighbors = None
00090 
00091 '''
00092     @param z_above_floor Approximate height of hokuyo above the ground.  Asssumed
00093     to be fixed for now.  On Cody robot the camera and hokuyo were at same z-height.
00094 '''
00095 def create_default_scan_dataset(unique_id = DATASET_ID, z_above_floor=1.32):
00096     dataset = scan_dataset()
00097     dataset.table_plane_translation = np.matrix([0,0,0]).T
00098     # approximate height of hokuyo above the ground, assume it to be fixed for now.
00099     dataset.ground_plane_translation = np.matrix([0,0,z_above_floor]).T 
00100     dataset.ground_plane_rotation = ''  #expects a 3x3 numpy matrix
00101     #pc.scan_dataset.is_test_set = True
00102     dataset.is_labeled = IS_LABELED
00103     dataset.id = unique_id
00104     return dataset
00105 
00106 # Name of dataset will allow loading of saved image / scan.
00107 pc.scan_dataset = create_default_scan_dataset( DATASET_ID, z_above_floor)#1.32
00108 
00109 '''
00110     PC.LOAD_RAW_DATA()
00111     Browses for content with the following names:
00112         data/UNIQUE_ID_image.png
00113         data/UNIQUE_ID_laserscans.pkl
00114         self.image_angle is set or zeroed               
00115     Sets in pc as: self.img,
00116                    self.laserscans,
00117                    sets (or zeros) self.image_angle
00118     
00119 '''
00120 if not cfg.device == 'PR2' and not cfg.device =='PR2example': 
00121     pc.load_raw_data(pc.scan_dataset.id) 
00122     '''If necessary, convert laser scan to pointcloud'''
00123     (pc.pts3d, pc.scan_indices, pc.intensities) = pc.create_pointcloud(pc.laserscans, True, True)
00124     #At this stage the Cloud has been loaded.  I can overwrite with my own cloud.
00125 elif cfg.device=='PR2example': 
00126     pc.load_raw_PR2_data()
00127     roslib.load_manifest('display_stuff') 
00128     import pr2_small_table_cloud_object as pr2_example
00129     filename = pr2_example.grab_pr2_example_image_name()
00130     pc.img = cv.highgui.cvLoadImage(filename)
00131     pc.laserscans = []
00132     pc.pts3d = pr2_example.grab_pr2_example_points() #3xN np array
00133     pc.intensities = pr2_example.grab_pr2_example_intensities() #N length np array
00134     pc.scan_indices = np.array(range(pc.intensities.size)) #N length np array
00135     # [angle] important to know angle of RANSAC planes (confirm?)
00136     pc.image_angle = 45 #-45 
00137     #I can also retroactively set pc.config.camTlaser from here #4x4 python array
00138 
00139 elif cfg.device=='PR2bag':
00140     ''' Instructions: $ roslaunch pr2_lcs_helper get_server2_client.launch
00141                       <Opens up services which will be called>
00142        $ roslaunch pr2_lcs_helper laser_assembler_optical_frame_launch.launch 
00143        <listens to tilt_scan, provides 'assemble_scan' service, in optical frame 
00144         (which is actually left eye)>
00145        $ rosbag play <bag with tf, camera, camera_info, tilt_scan> --clock
00146        $ rosrun laser_camera_segmentation test_display_pr2_try1.py
00147     '''
00148     roslib.load_manifest('pr2_lcs_helper')
00149     import test_uv as PR2_access  #rename soon
00150     cloud, roscv_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = \
00151                                              PR2_access.fetch_PR2_data('all')
00152     N = len(idx_list)
00153     Nbound = len( idx_list[idx_list] )
00154     pc.img = cv.cvCloneImage(np.array(roscv_img) )
00155     pc.laserscans = []
00156     pc.scan_indices = np.array( range(N) )
00157     
00158     camPts_bound = camPts[:,idx_list]
00159     pts3d_bound = pc.pts3d[:,idx_list]
00160     scan_indices_bound = np.array(range(N))[idx_list]
00161     intensities_bound = pc.intensities[idx_list]
00162     pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
00163                             scan_indices_bound, intensities_bound)
00164 elif cfg.device=='PR2':
00165     rospy.init_node('rosSegmentCloud')
00166     print 'STARTING NEW COLLECTION NODE'
00167     ac = acquire_pr2_data.AcquireCloud()
00168     ac.print_test()
00169     print 'FINISHED COLLECTING, SENDING DATA BACK'
00170                
00171     np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
00172         # >> return self.img, self.pts3d, self.intensities, self.camPts, self.colors, self.camPts_idx
00173     pc.pts3d = np.array(pc.pts3d)
00174     N = len(idx_list)
00175     Nbound = len( idx_list[idx_list] )
00176     pc.img = cv.cvCloneImage(np.array(np_img) )
00177     pc.laserscans = []
00178     pc.scan_indices = np.array( range(N) )
00179     camPts_bound = camPts[:,idx_list]
00180     pts3d_bound = pc.pts3d[:,idx_list]
00181     scan_indices_bound = np.array(range(N))[idx_list]
00182     intensities_bound = pc.intensities[idx_list]
00183     pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
00184                             scan_indices_bound, intensities_bound)
00185 
00186 #hack
00187 #for i in range(len(pc.pts3d[1])): pc.pts3d[1,i] = -pc.pts3d[1,i]
00188 
00189 '''
00190     Define a region of interest polygon which crops camera image.
00191     Smaller size will result in a quicker processing time.
00192     Takes four crop values as % of the image size: xmin, xmax, ymin, ymax
00193     which range between 0 and 1.  Perhaps this would be more intuitive as
00194     actual crop values so that '0' on all values meant the original image and
00195     '.2' on everything crops away the 20% on all edges.
00196 '''
00197 #create dummy surface polygon to define ROI
00198 image_size = cv.cvGetSize(pc.img)
00199 ##MAKE SURE THAT THESE MATCH ORIGINAL TAKE: CROP CAMERA IMAGE TO RIO
00200 width_min = image_size.width * 0.0#.3
00201 width_max = image_size.width * .9#1#0.95
00202 height_min = image_size.height * 0.0#0.3
00203 height_max = image_size.height * .9#1#0.95
00204 #create dummy edges for placement
00205 # Note this assumes we will be using a SQUARE region of interest -- not true in general.
00206 def define_ROI_polygons(width_min, width_max, height_min, height_max):
00207     p = label_object()
00208     p.set_label('surface')
00209     p.add_point((width_min,height_min))
00210     p.add_point((width_min,height_max))
00211     p.add_point((width_max,height_max))
00212     p.add_point((width_max,height_min))
00213     table_surface = p #A polygon shape (rectangle) defining most generous guess of table surface.
00214     print 'surface',p.get_points()
00215     p = label_object()
00216     p.set_label('edge_down')
00217     p.add_point((width_min,height_min))
00218     p.add_point((width_min,height_max))
00219     p.add_point((width_max,height_max))
00220     p.add_point((width_max,height_min))
00221     p.add_point((width_min,height_min))
00222     list_of_edges = (p,) 
00223     # [Above] Any number of polygons which define surface border. More can be added.
00224     print 'edge',p.get_points()
00225     return table_surface, list_of_edges
00226 
00227 # [Below] These polygons will be used to generate a 'mask' cv image and used to 
00228 # limit which 3D points are considered when labeling.
00229 surface_ROI, list_of_edges = define_ROI_polygons(width_min, width_max, 
00230                                                  height_min, height_max) 
00231 pc.scan_dataset.polygons.append(surface_ROI)
00232 for p in list_of_edges: pc.scan_dataset.polygons.append(p)
00233 
00234 
00235 
00236 ''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
00237     Optionally applies translate and rotate relative to a groundplane (if defined)
00238 '''
00239 if not cfg.device=='PR2':
00240     pc.do_all_point_cloud()
00241 else:
00242     pc.do_all_point_cloud(map_already_exists=True)
00243 
00244 ''' [Below] MAKE SURE THESE MATCH ORIGINAL SCAN: 
00245     truncate 3D data to a volume (center, and h,w,d).  Numbers are in meters.
00246     Suggest right-arm valuse are: np.array([0.55,-0.4,1.0]), 0.6, 0.8, 1.3)
00247     Note, this increases speed by throwing out points outside likely table area.
00248     It also is meant to remove the 'floor' as a possible candidate for table surface fits. 
00249 '''
00250   
00251 
00252 pc.truncate_pointcloud_to_voi(np.array([0.55,-0.4,1.0]), 1, 1, 1.3)
00253 print 'finished truncate_pointcloud in top level function'
00254 #map polygons after translating 
00255 
00256 ''' Visual to quickly check TF
00257 '''
00258 if True:
00259     print 'overlay_img soon to be obtained'
00260     overlay_img = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)
00261     print 'overlay_img obtained'
00262     import opencv.highgui as hg
00263     hg.cvStartWindowThread()
00264     hg.cvNamedWindow('ww',0)
00265     hg.cvShowImage('ww',overlay_img)
00266     print 'wait for key - line 264'
00267     cv.highgui.cvWaitKey()
00268 print 'finsihed showing mapped_laser image'
00269 
00270 if True:
00271     print 'do polygon mapping'
00272     pc.do_polygon_mapping() #-
00273     ###pc.display_3d('labels')### This is based on accurate polygons.
00274 
00275     print 'map laser into image again'
00276     pc.img_mapped = pc.draw_mapped_laser_into_image(pc.map, pc.pts3d, pc.img)  #-  
00277     #Below: create B & W images corresponding to the artifical 'table edge and center' values we specified. 
00278     #Below: REMOVED DIAGNOSTIC IMAGES. cvPolyLine and cvFillPoly were acting up in opencv 2.0
00279     pc.create_polygon_images()  #called twice? #-  
00280     pc.img_mapped_polygons = pc.draw_mapped_laser_polygons_into_image(pc.map, pc.pts3d, pc.img)    #-  
00281 print 'Begin Optional save of Intensity image'
00282 if not cfg.device == 'PR2' and not cfg.device=='PR2example':
00283     pc.img_intensities = pc.create_intensity_image(pc.laserscans[0])  #-  
00284 #+
00285 else:
00286     n, m = np.shape(pc.pts3d_bound)  #hack! hack!
00287     polygon_mapping = list(1 for x in range(m)) # 0 == background/floor/unlabeled/other
00288     pc.map_polys = polygon_mapping #Jas:filled with CLUTTER / SURFACE labels later
00289 #+
00290 
00291 ###pc.display_all_data()###
00292 
00293 pc.scan_dataset.ground_plane_normal = np.matrix([0.,0.,1.]).T
00294 
00295 ''' Comment: nonzero_indices comes from pc.map_polys comes from other labeling technique.
00296     I have changed the second Param below to: generate_and_save_all_neighborhood_indices=True
00297 '''
00298 print 'lucid - about to generate features'
00299 SAVE_UNUSED_NEIGHBORS_TO = False #Jason had run with True, True.  
00300                                  #Takes a lot longer obviously!
00301                                  #Might slightly effect the 'global' 
00302                                  #feature vector on each point.
00303 feature_data = pc.generate_features(NUMBER_OF_POINTS,False, True)  ### previously 1000, False,True
00304 print 'lucid - done generating features'
00305 labels, testresults = pc.load_classifier_and_test_on_dataset('all', feature_data) #'all_post'
00306 ###pc.display_3d('all')###
00307 #Labels are now generated --> can be saved to file?
00308 
00309 
00310 if displayOn:
00311     pc.display_segmentation_image('all')
00312     print "TEST_DISPLAY: showing segmentation image |",NUMBER_OF_POINTS,"points.  Please press a key"
00313     cv.highgui.cvWaitKey(0)   
00314 pc.save_segmentation_image('all')
00315 
00316 #feature_data = {'point_indices':[], 'set_size':1} #dummy dict (->invalid statistics), baseline doesn't need real features
00317 #labels, testresults = pc.load_classifier_and_test_on_dataset('baseline', feature_data)
00318 ###pc.display_3d('baseline')###
00319 
00320 #test placement, use classification results:
00321 pc.map_polys = pc.load_Classifier_labels('all') #'all_post'
00322 #this function prints: "loading labels: len: ..."
00323 
00324 print 'pts3d_bound is size', pc.pts3d_bound.shape
00325 print 'map_polys is size', pc.map_polys.shape
00326 
00327 #+-
00328 SHOW_IN_ROS= True
00329 if SHOW_IN_ROS:
00330     import roslib
00331     roslib.load_manifest('pr2_lcs_helper')
00332     from sensor_msgs.msg import PointCloud
00333     import acquire_pr2_data, rospy
00334     try: rospy.init_node('whatever')
00335     except: print 'rospy init node has already been called: skipping'
00336     pub = rospy.Publisher('temp_pub', PointCloud)
00337     while not rospy.is_shutdown():
00338         colors = pc.map_polys
00339         acquire_pr2_data.publish_cloud(pub, '/base_footprint', pc.pts3d_bound[:,colors!=0], intensities=colors[colors!=0])
00340         rospy.sleep(1)
00341         print 'published'
00342     # >> publish_cloud(pub, frame, pts3d, intensities=None, labels=None, colors=None):
00343 #+-
00344 
00345 pc.scan_dataset.ground_plane_rotation = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]]).T
00346 #estimate table plance translation from mean of classified points:
00347 #Surface points are collected, and their average z-value is assumed as the table height
00348 data_idx = np.where(np.asarray(pc.map_polys) == processor.LABEL_SURFACE)[0]
00349 data = np.asarray(pc.pts3d_bound)[:,data_idx]
00350 table_height = np.mean(data, axis=1)[2]
00351 pc.scan_dataset.table_plane_translation = np.matrix([0.,0.,table_height]).T
00352 
00353 #----------------------------------------------------
00354 
00355 '''
00356 # Save results in a nice format.
00357 '''
00358 if False:
00359     filename = pc.config.path + 'data/' + unique_name + '_segmentation_results.py'
00360     roslib.load_manifest('display_stuff'); import save_labeled_cloud;
00361     save_labeled_cloud.save_results(pc.pts3d_bound, pc.intensities, labels,
00362                                     pc.idx_bound, pc.pts3d)
00363     save_labeled_cloud.save_feature_data(feature_data['features'],
00364                                          feature_data['point_indices'])
00365     save_labeled_cloud.save_map2D(pc.map[0], table_height) #for placement.
00366 ''' Comments
00367     * pc.pts3d_bound #3 by Nbound
00368     * pc.idx_bound #1 by Nbound, indexes to re-project information into full pointcloud.
00369     * labels # Nbound, use if loading classifer on featuer_data
00370     * pc.map_polys # --> Nbound, labels created from polygons.  
00371                    #     Overrided by leaning-based labels, so they are identical
00372                    #     at the end of this document.
00373     * Nbound = len(intensities_bound) #--> example range(19598)
00374     * feature_data['features'] # Nbound x 35, actually SLIGHTLY less, only 19541.
00375     * feature_data['point_indices']
00376     * Nfeature = feature_data['set_size'] #--> slightly less than maximum.  
00377                                           #I assume this is at most NUMBER_OF_POINTS
00378     -Others:
00379     * pc.image_labels
00380     * pc.img_mapped
00381     * pc.img_mapped_polygons
00382     * testresults #Not sure what this is?
00383     * data_idx #indexes taken from pts3d_bound ONLY where label is nonzero
00384     * data #3d values of pts3d_bound where label is nonzero... 
00385            # However as a list so 3x longer.
00386     * scan_indices - an artifact from the hokuyo.  Original acquired line order?
00387 '''   
00388 
00389 if False: ####### Placement routine.
00390     object_height = 0.1
00391     #SCALE = 1
00392     resolution = [.01*SCALE, .01*SCALE]  #sets resolution of occupancy grid
00393     print 'NOTE: Resolution is ',100*resolution[0], 'cm' ###
00394     polygon = label_object()
00395     polygon.add_point([0,0])
00396     polygon.add_point([0,5*SCALE])
00397     polygon.add_point([10*SCALE,5*SCALE])
00398     polygon.add_point([10*SCALE,0])
00399     ###object_height = 0.1
00400 
00401     print 'creating placement object'
00402     pl = Placement(pc, resolution)  ###REPLACE WITH MY OWN CLASS DEFINITION WITH FUNCTIONs
00403 
00404     if displayOn:
00405         placement_point = pl.test_placement(polygon, object_height) 
00406     else:
00407         placement_point = pl.find_placement(polygon, object_height)#Add param True to get debug popups
00408         
00409     placement_point -= pc.scan_dataset.ground_plane_translation
00410     
00411     #Assumes 'codyRobot'==ROBOT
00412     #This should be optional
00413     import mekabot.coord_frames as mcf
00414     placement_point_global = mcf.thok0Tglobal(placement_point)
00415     print 'placement point in global coordinate frame:', placement_point_global.T
00416         
00417 
00418     if displayOn:    
00419         print 'displaying current placement'
00420         pl.display_current_placement_in_heightmap()
00421         cv.highgui.cvWaitKey()
00422 
00423     if displayOn:
00424         print 'importing mayavi'
00425         ###from enthought.mayavi import mlab
00426         print 'showing 3D mayavi'
00427         ###mlab.show()
00428         #What does this do?
00429 
00430 
00431     print getTime(), 'DONE'
00432     #print pts3d
00433     
00434     #==============
00435     


clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15