00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 LOC_ARTAGS_SIMPLE = "/home/martin/artags/ARToolKitPlus_2.1.1/bin/simple"
00033 LOC_ARTAGS_LIB = "/home/martin/artags/ARToolKitPlus_2.1.1/lib:/usr/local/lib:"
00034
00035
00036 import roslib; roslib.load_manifest('opencv2')
00037 import cv as roscv
00038 import opencv as cv
00039 from opencv import highgui
00040
00041 from gaussian_histogram_features import gaussian_histogram_features
00042 from boosted_tree_classifier import boosted_tree_classifier
00043 from baseline_classifier import baseline_classifier
00044
00045 import scans_database
00046 import ransac
00047 import hrl_lib.util as ut
00048 import util as u2
00049
00050
00051
00052 import numpy as np
00053 import math, re, copy, subprocess
00054
00055 import tf
00056
00057
00058
00059 import matplotlib.pyplot as plt
00060 import scipy.stats as stats
00061 import scipy.spatial.kdtree as kdtree
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071 try:
00072 from enthought.mayavi import mlab
00073 MLAB_LOADED = True
00074 except:
00075 print "Warning: Failed to import enthought.mayavi.mlab. Ignoring call to 'draw_plane'."
00076 MLAB_LOADED = False
00077
00078
00079 LABEL_NONE = 0
00080 LABEL_SURFACE = 1
00081 LABEL_CLUTTER = 2
00082 LABEL_ROI = 3
00083
00084
00085
00086 def get_voi_indices_fancy(pts, poi, depth, width, height):
00087 pts = np.asarray(pts)
00088
00089 conditions = np.multiply(np.multiply(np.multiply(np.multiply(np.multiply(pts[0] < poi[0]+depth/2.0, pts[0] > poi[0]-depth/2.0),
00090 pts[1] < poi[1]+width/2.0), pts[1] > poi[1]-width/2.0),
00091 pts[2] < poi[2]+height/2.0), pts[2] > poi[2]-height/2.0)
00092 return conditions
00093
00094
00095 def rotate_to_plane(normal, points):
00096 n = np.copy(normal)
00097 n = n/np.linalg.norm(n)
00098 up = np.matrix([0,0,1]).T
00099 axis = np.matrix(np.cross(up.T, n.T)).T
00100 axis = axis / np.linalg.norm(axis)
00101 angle = np.arccos((up.T * n)[0,0])
00102 rot = tf.transformations.rotation_matrix(angle,axis)
00103 rot = rot[0:3,0:3].T
00104
00105 pts_rotated = np.asarray(rot * np.asmatrix(points))
00106 return pts_rotated
00107
00108 def apply_transform_matrix(T, p):
00109 '''This appears multiply a Transform matrix by a 3xN-element point or point set.
00110 Note that xyzToHomogenous is made to convert 3XN matrix, to 4XN matrix in homogenous coords
00111 '''
00112 pn = T * xyzToHomogenous(p)
00113
00114 return pn[0:3] / pn[3]
00115
00116
00117 def xyzToHomogenous(v, floating_vector=False):
00118 """This is redundantly defined in hrl_lib.transforms.py, as part of gt-ros-pkg
00119 convert 3XN matrix, to 4XN matrix in homogenous coords
00120 """
00121
00122
00123 if floating_vector == False:
00124 return np.row_stack((v, np.ones(v.shape[1])))
00125 else:
00126 return np.row_stack((v, np.zeros(v.shape[1])))
00127
00128 def draw_plane(table_plane_normal, table_plane_point):
00129 if not MLAB_LOADED: return
00130
00131 table_plane_normal = np.copy(table_plane_normal)
00132 table_plane_normal /= np.linalg.norm(table_plane_normal)
00133
00134
00135 def intersect(line, plane_normal, plane_point):
00136 gamma = (np.dot(plane_normal.T, plane_point)/np.dot(plane_normal.T, line))[0,0]
00137
00138 return gamma * line
00139
00140 p1 = intersect(np.matrix([[0,1,1]]).T,table_plane_normal,table_plane_point)
00141 p2 = intersect(np.matrix([[0,-1,1]]).T,table_plane_normal,table_plane_point)
00142 p3 = intersect(np.matrix([[1,1,1]]).T,table_plane_normal,table_plane_point)
00143 p4 = intersect(np.matrix([[1,-1,1]]).T,table_plane_normal,table_plane_point)
00144
00145 mlab.triangular_mesh([[float(p1[0]),float(p2[0]),float(p3[0]),float(p4[0])]],
00146 [[float(p1[1]),float(p2[1]),float(p3[1]),float(p4[1])]],
00147 [[float(p1[2]),float(p2[2]),float(p3[2]),float(p4[2])]],
00148 [(0,1,2),(1,2,3)], opacity=.2, colormap='gray')
00149
00150 '''
00151 Key Functions:
00152 create_pointcloud --> creates a pts3d cloud in frame of hokuyo from hokuyo laserscans.
00153 - Heavily depends on hokuyo library classes to do this. Preserves intensity field.
00154
00155
00156
00157 Key Variables:
00158 map = camPts_bound, camPts, idx_list, pts3d_bound, scan_indices_bound, intensities_bound
00159 map2d = first two parts of map = camPts_bound = (x,y) list of length Nbound
00160 * originally map[0] == camPts_bound had a '1' in the 3rd dimention.
00161 map_polys == poly_labels_bounded
00162 pts3d
00163 laserscans
00164 intensities
00165
00166 camPts_bound
00167 idx_list, #relative to full-length lists: intensity or pts3d
00168 pts3d_bound, # 3xNBound
00169 scan_indices_bound, #length Nbound
00170 intensities_bound #length Nbound
00171
00172 '''
00173
00174 class processor(object):
00175 '''
00176 classdocs
00177 '''
00178 pts = None
00179
00180 classifiers = None
00181
00182 features = None
00183
00184 feature_type = 'gaussian_histograms'
00185 feature_neighborhood = 20
00186 feature_radius = 0.03
00187
00188 classifier_training_size = 900000000
00189
00190
00191 point_of_interest = np.array([0.8,0.0,1.0])
00192 voi_width = 1.4
00193
00194
00195 ground_exclude_threshold = 0.3
00196
00197
00198
00199 features_k_nearest_neighbors = 50
00200
00201
00202 def __init__(self, configuration):
00203 '''
00204 Constructor
00205 '''
00206 self.features = gaussian_histogram_features(self)
00207 self.classifiers = {'range' : boosted_tree_classifier(self, 'range'),
00208 'color' : boosted_tree_classifier(self, 'color'),
00209 'all' : boosted_tree_classifier(self, 'all'),
00210 'baseline' : baseline_classifier(self, 'baseline')}
00211
00212 self.config = configuration
00213
00214 self.artag_transformation = np.array([])
00215
00216 self.img_artag = False
00217 self.map = None
00218
00219 try:
00220 self.scans_database = scans_database.scans_database()
00221 self.scans_database.load(self.config.path,'database.pkl')
00222 except IOError:
00223 print 'WARNING: processor::init: no database found none loaded'
00224
00225
00226 '''
00227 This function creates a pointcloud from laser-scan line (distance) and angle data.
00228 The conversion relies on method definition in hrl_tilting_hokuyo.py. This is NOT
00229 included by default because the PR2 does not require this conversion (has its own).
00230 This function also checks the self.config.device name in order to decide whether to
00231 flip the y-axis as a last step.
00232
00233 @Param reject_zero_ten: Set reject_zero_ten to False to not remove any points and
00234 preserve intensity->3d mapping. Be aware that points filtered by remove-graze-effect
00235 have range-values of +/-1000 mapped into 3d-space
00236 '''
00237 def create_pointcloud(self,laserscans, reject_zero_ten=True, get_intensities=True):
00238 import roslib; roslib.load_manifest('hrl_tilting_hokuyo')
00239 import hrl_tilting_hokuyo.processing_3d as hokuyo_p3d
00240
00241 pts = np.array([[],[],[]])
00242 intensities = np.array([[]])
00243 scan_indices = np.array([])
00244 for i, laserscan in enumerate(laserscans):
00245 pos_list,scan_list = laserscan
00246
00247 if get_intensities:
00248 new_pts, new_intensities = hokuyo_p3d.generate_pointcloud(pos_list, scan_list,
00249 math.radians(-180), math.radians(180),
00250 l1=self.config.thok_l1, l2=self.config.thok_l2, save_scan=False,
00251 max_dist=np.Inf, min_dist=-np.Inf,get_intensities=True, reject_zero_ten=reject_zero_ten)
00252 intensities = np.hstack((intensities,new_intensities))
00253 else:
00254 new_pts = hokuyo_p3d.generate_pointcloud(pos_list, scan_list,
00255 math.radians(-180), math.radians(180),
00256 l1=self.config.thok_l1, l2=self.config.thok_l2, save_scan=False,
00257 max_dist=np.Inf, min_dist=-np.Inf,get_intensities=False, reject_zero_ten=reject_zero_ten)
00258
00259 pts = np.hstack((pts,new_pts))
00260
00261
00262
00263 n, m = new_pts[0,:].shape
00264 scan_indices = np.hstack((scan_indices,np.ones(m)*i))
00265
00266 if self.config.device == 'codyRobot' or self.config.device == 'dummyScanner':
00267 pts[1,:] = -pts[1,:]
00268
00269 return (pts, scan_indices, np.asarray(intensities)[0])
00270
00271 ''' TODO: Consistency - stop using 'map'
00272 Internal: Modifies length of: pts3d_bound, intensities_bound, map2d == camPts_bound,
00273 Modifies idx_list (still will have length N, but now only Nbound_new 'True' entries.
00274 idx_list - length N
00275 idx_voi - only length Nbound
00276 idx_list = way to get from <...> to <...>_bound.
00277
00278 Truncates the full pts3d (generally much larger then table in span and shape 3xN)
00279 to tighter cubic region called pts3d_bound. Saved as a numpy array of shape 3xNbound_new
00280 '''
00281 def truncate_pointcloud_to_voi(self,poi, depth, width, height):
00282
00283 pts = np.asarray(self.pts3d_bound)
00284 _, Nbound = pts.shape
00285 (x,y,z) = (0,1,2)
00286 idx_voi = (pts[0] < poi[0]+depth/2.0) & (pts[0] > poi[0]-depth/2.0) & (pts[1] < poi[1]+width/2.0) & (pts[1] > poi[1]-width/2.0) & (pts[2] < poi[2]+height/2.0) & (pts[2] > poi[2]-height/2.0)
00287
00288 self.idx_voi = idx_voi
00289 self.pts3d_bound = self.pts3d_bound[:,idx_voi]
00290 _, Nbound_new = np.shape(self.pts3d_bound)
00291 print 'Truncated pts3d_bound by VOI from',Nbound,'to',Nbound_new,'points'
00292 if Nbound_new == 0:
00293 print "WARNING: all 3D points removed by VOI cropping. Exiting."
00294 import sys; sys.exit()
00295
00296 self.intensities_bound = self.intensities_bound[idx_voi]
00297 self.camPts_bound = self.camPts_bound[:,idx_voi]
00298 self.map2d = self.camPts_bound
00299
00300
00301 self.idx_list[self.idx_list] = self.idx_list[self.idx_list] & idx_voi
00302
00303
00304
00305
00306 def get_ransac_table_plane_from_labels(self):
00307
00308
00309
00310
00311
00312 labels = self.map_polys
00313 model = ransac.PlaneLeastSquaresModel(False)
00314 data_idx = np.where(np.asarray(labels) == LABEL_SURFACE)[0]
00315 data = np.asarray(self.pts3d_bound).T[data_idx]
00316 print 'About to run RANSAC plane with %i labeled points' %(len(data))
00317
00318 try:
00319 model = ransac.ransac(data,model,
00320
00321
00322
00323
00324 3, 1000, 0.08, len(data_idx)/4,
00325 debug=False,return_all=False)
00326 except ValueError:
00327
00328 print '[processor] Best ransac plane did not fit threshold criteria listed in processor.py'
00329 return False
00330 print 'ransac: model',model
00331 return model
00332
00333 def calculate_and_save_ground_and_table_transformations_for_all_scans(self, use_RANSAC_table_plane = True):
00334
00335 self.scans_database.add_attribute_to_every_dataset('ground_plane_rotation')
00336 self.scans_database.add_attribute_to_every_dataset('ground_plane_translation')
00337 self.scans_database.add_attribute_to_every_dataset('table_plane_translation')
00338 self.scans_database.save()
00339 self.scan_dataset = self.scans_database.get_dataset(0)
00340 while False != self.scan_dataset:
00341 if self.scan_dataset.is_labeled:
00342
00343 self.load_data(self.scan_dataset.id, False)
00344 self.process_raw_data(False)
00345
00346 rotation = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
00347 rotation = rotate_to_plane(self.scan_dataset.ground_plane_normal, rotation)
00348 ransac_plane = False
00349 if use_RANSAC_table_plane:
00350 ransac_plane = self.get_ransac_table_plane_from_labels()
00351 if False != ransac_plane:
00352 table_plane_point = np.matrix(ransac_plane[0]).T
00353 table_plane_normal = np.matrix(ransac_plane[1]).T
00354 rotation = rotate_to_plane(table_plane_normal, rotation)
00355 table_plane_point = rotate_to_plane(table_plane_normal,table_plane_point)
00356 elif False == use_RANSAC_table_plane:
00357 print "WARNING: Do NOT USE RANSAC FOR TABLE PLANE ESTIMATION!!!!"
00358 print "using 3pts-groundplane and manual labeled surface height instead"
00359 else:
00360 print "!!!!!!!!"
00361 print "ERROR: NO RANSAC PLANE FOUND!!!!"
00362 print "using 3pts-groundplane and manual labeled surface height instead"
00363 print "!!!!!!!!"
00364
00365 self.load_data(self.scan_dataset.id, True)
00366 self.scans_database.set_internal_pointer_to_dataset(self.scan_dataset.id)
00367
00368 self.scan_dataset.ground_plane_rotation = rotation
00369 self.scan_dataset.ground_plane_translation = self.get_groundplane_translation()
00370 if False != ransac_plane:
00371 table_plane_point += self.scan_dataset.ground_plane_translation
00372 self.scan_dataset.table_plane_translation = np.matrix([0.0,0.0,table_plane_point[2]]).T
00373 else:
00374
00375 self.scan_dataset.table_plane_translation = np.matrix([0.0,0.0,float(self.scan_dataset.surface_height) / 100.0]).T
00376 print 'rot', self.scan_dataset.ground_plane_rotation
00377 print 'gp transl',self.scan_dataset.ground_plane_translation
00378 print 'table transl', self.scan_dataset.table_plane_translation
00379 self.scans_database.save()
00380 self.scan_dataset = self.scans_database.get_next_dataset()
00381
00382 def get_mean_table_plane_height_from_labels(self):
00383 idx = np.where(np.asarray(self.map_polys) == LABEL_SURFACE)
00384 return np.mean(self.pts3d_bound[2,idx])
00385
00386
00387
00388 def map_image_point_on_3d_plane(self, point, ground_plane_rotation, ground_plane_translation, table_plane_translation):
00389
00390 point = np.matrix(np.copy(point))
00391 ground_plane_rotation = np.matrix(np.copy(ground_plane_rotation))
00392 ground_plane_translation = np.matrix(np.copy(ground_plane_translation))
00393 table_plane_translation = np.matrix(np.copy(table_plane_translation))
00394
00395
00396
00397
00398 point[0][0] -= self.config.cam_centers[0]
00399 point[1][0] -= self.config.cam_centers[1]
00400
00401
00402 cam_proj_inv = np.copy(self.config.cam_proj_mat)
00403 cam_proj_inv[0:3,0:3] = np.linalg.inv(cam_proj_inv[0:3,0:3])
00404
00405
00406 line = cam_proj_inv * xyzToHomogenous(point)
00407
00408
00409
00410 if self.config.device == 'codyRobot':
00411
00412 import mekabot.coord_frames as mcf
00413 line = mcf.thok0Tglobal(mcf.globalTutmcam0(line,self.image_angle))
00414 elif self.config.device == 'dummyScanner':
00415 import dummyScanner_coord_frames as dcf
00416 line = dcf.thok0Tglobal(dcf.globalTutmcam0(line,self.image_angle))
00417 else:
00418 laser_to_camera = self.config.camTlaser
00419 camera_to_laser = np.copy(laser_to_camera)
00420 camera_to_laser = np.linalg.inv(camera_to_laser)
00421
00422 line = apply_transform_matrix( camera_to_laser, line)
00423
00424
00425 line = ground_plane_rotation * line
00426
00427
00428 table_plane_point = table_plane_translation - ground_plane_translation
00429
00430
00431 table_normal_up = np.matrix([[0,0,1]]).T
00432
00433 gamma = (np.dot(table_normal_up.T, table_plane_point)/np.dot(table_normal_up.T, line))[0,0]
00434 point3d = gamma * line
00435
00436 point3d += ground_plane_translation
00437 return point3d
00438
00439 '''
00440 This function has different behavior based on the robot platform specified.
00441 This is because it needs the camera properties info, as well as static transforms.
00442 * Transforms 3D points to the coord fram of camera (camTlaser)
00443 * Sets camera points from 3D points
00444
00445 crops 3d points to "bounds" on camera. All "bounded" lists are cropped in this way.
00446
00447 Inputs: pts3d, img, scan_indices, intensities
00448 Outputs: camPts_bound, #list of (u,v) coords of each 3d points (2xNbound numpy matrix)
00449 - camPts, #2xN numpy matrix
00450 - idx_in_cam #relative to full pts3d,
00451 - pts3d_bound,
00452 #Cropped to a 3xNbound array where Nbound is number
00453 of 3d points within frame of camera.
00454 - scan_indices_bound,
00455 - intensities_bound
00456 *Note: some material borrowed from Travis-scanr:
00457 '''
00458
00459 def map_laser_into_cam_2D(self, pts3d, img, scan_indices, intensities):
00460
00461
00462
00463
00464 w = img.width
00465 h = img.height
00466
00467 if self.config.device == 'PR2':
00468 print 'Warning, still working on best way to do this'
00469
00470 XformPts = apply_transform_matrix( self.config.camTlaser, pts3d)
00471 elif self.config.device == 'codyRobot':
00472 import mekabot.coord_frames as mcf
00473 XformPts = mcf.utmcam0Tglobal(mcf.globalTthok0(pts3d),self.image_angle)
00474 elif self.config.device == 'dummyScanner':
00475 import dummyScanner_coord_frames as dcf
00476 XformPts = dcf.utmcam0Tglobal(dcf.globalTthok0(pts3d),self.image_angle)
00477 else:
00478 XformPts = apply_transform_matrix( self.config.camTlaser, pts3d)
00479
00480 (x,y) = (0,1)
00481 camPts = self.config.cam_proj_mat * xyzToHomogenous(XformPts)
00482 camPts = camPts / camPts[2]
00483
00484 camPts[x] = camPts[x] + self.config.cam_centers[x]
00485 camPts[y] = camPts[y] + self.config.cam_centers[y]
00486 camPts = np.matrix( np.round(camPts), 'int')
00487
00488
00489
00490 conditions = (camPts[x] >=0) | (camPts[x] < w) | (camPts[y] >= 0) | (camPts[y] < h)
00491 idx_list = conditions
00492
00493 self.camPts = camPts[0:2]
00494 self.camPts_bound = camPts[:, idx_list]
00495 self.pts3d_bound = pts3d[:, idx_list]
00496 self.scan_indices_bound = scan_indices[idx_list]
00497 self.intensities_bound = intensities[idx_list]
00498 self.idx_list = idx_list
00499 self.map2d = self.camPts_bound
00500
00501 return self.camPts_bound, self.camPts, idx_list, self.pts3d_bound, self.scan_indices_bound, self.intensities_bound
00502
00503
00504 def draw_mapped_laser_into_image(self,map,pts3d,img):
00505 '''colormap points in image
00506 Note: map[2] == self.idx_list #array of indices inside bounds
00507 '''
00508 imgTmp = cv.cvCloneImage(img)
00509 imNP = u2.cv2np(imgTmp,format='BGR')
00510
00511
00512 n,m = self.map2d.shape
00513 color = (0,255,0)
00514 for i in range(0,m):
00515 imNP[self.map2d[1,i],self.map2d[0,i],:] = color
00516
00517
00518
00519
00520 img_mapped = u2.np2cv(imNP)
00521 return img_mapped
00522
00523
00524
00525 def draw_mapped_masks_into_image(self, feature_type, show_clutter=False):
00526 '''@author: Jason Okerman
00527 prints either clutter or surface points on a black backdrop for use
00528 '''
00529
00530 if feature_type in ('range','color','all','all_post','baseline'):
00531 labels = self.load_Classifier_labels(feature_type)
00532 elif feature_type == 'labels':
00533 labels = self.map_polys
00534 else:
00535 print ut.getTime(), 'ERROR: draw_mapped_masks_into_image(): unsupported feature type:', feature_type
00536 return cv.cvCloneImage(self.img)
00537
00538
00539 try:
00540 img_mapped = highgui.cvLoadImage(self.config.path+'/data/'+'blank.png')
00541 except:
00542 print "blank.png does not exist in data directory"
00543 img_mapped = cv.cvCloneImage(self.img)
00544
00545
00546 n,m = self.map2d.shape
00547 red=100;
00548 green=0
00549 blue=0
00550 for i in range(0,m):
00551 if labels[i] == LABEL_SURFACE and not show_clutter:
00552 if red < 255:
00553 red+=1
00554 else:
00555 red = 100
00556 if green <250:
00557 green+=25
00558 else:
00559 green = 100
00560 if blue <250:
00561 blue+=50
00562 cv.cvCircle(img_mapped, cv.cvPoint(self.map2d[0,i],self.map2d[1,i]),2, cv.cvScalar(blue,red,green,0),3)
00563 elif labels[i] == LABEL_CLUTTER and show_clutter:
00564 if red < 255:
00565 red+=1
00566 else:
00567 red = 0
00568 if green <255:
00569 green+=20
00570 cv.cvCircle(img_mapped, cv.cvPoint(self.map2d[0,i],self.map2d[1,i]),2, cv.cvScalar(0, red, green, 0),3)
00571
00572 return img_mapped
00573
00574
00575 ''' DRAW MAPPED LABELS INTO IMAGE
00576 Inputs:
00577 (passed) type = 'range', 'color', 'all', 'all_post', 'baseline'
00578 (internal) self.img
00579 (internal) self.map
00580 - However only uses the camPts_bound portion of self.map (called map2d)
00581 Outputs:
00582 image with color circle indicators.
00583 Relies on the ability to load saved classifier information.
00584 '''
00585 def draw_mapped_labels_into_image(self, type):
00586
00587
00588 img_mapped = cv.cvCloneImage(self.img)
00589
00590 if type == 'labels':
00591 labels = self.map_polys
00592 elif type == 'range':
00593 labels = self.load_Classifier_labels('range')
00594 elif type == 'color':
00595 labels = self.load_Classifier_labels('color')
00596 elif type == 'all':
00597 labels = self.load_Classifier_labels('all')
00598 elif type == 'all_post':
00599 labels = self.load_Classifier_labels('all_post')
00600 elif type == 'baseline':
00601 labels = self.load_Classifier_labels('baseline')
00602 else:
00603 print ut.getTime(), 'WARNING: no method for drawing selected feature type:', type
00604 print 'Ignoring call draw labels and returning original image.'
00605 return img_mapped
00606
00607
00608 _, m = self.map2d.shape
00609 for i in range(0,m):
00610 if labels[i] == LABEL_SURFACE:
00611
00612 try:
00613
00614
00615
00616 cv.cvCircle(img_mapped, (self.map2d[0,i],self.map2d[1,i]), 3, (0, 255, 0))
00617 except:
00618 print 'having a hard time printing circles'
00619 temp = np.array(img_mapped)
00620 roscv.Circle(temp, (self.map2d[0,i],self.map2d[1,i]),2, (0, 255, 0))
00621 img_mapped = cv.cvCloneImage(temp)
00622 elif labels[i] == LABEL_CLUTTER:
00623
00624 try:
00625 cv.cvCircle(img_mapped, (self.map2d[0,i],self.self.map2d[1,i]), 3, (0, 0, 255))
00626 except:
00627 print 'having a hard time printing circles'
00628 temp = np.array(img_mapped)
00629 roscv.Circle(temp, (self.map2d[0,i],self.map2d[1,i]),2, (0, 0, 255))
00630 img_mapped = cv.cvCloneImage(temp)
00631 return img_mapped
00632
00633
00634
00635 def draw_mapped_laser_polygons_into_image(self,map,pts3d,img):
00636
00637
00638 color_list = [(255,255,0),(255,0,0),(0,255,255),(0,255,0),(0,0,255),(0,100,100),(100,100,0),
00639 (100,0,100),(100,200,100),(200,100,100),(100,100,200),(100,0,200),(0,200,100),
00640 (0,100,200),(200,0,100),(100,0,100),(255,152,7) ]
00641
00642 imgTmp = cv.cvCloneImage(img)
00643 imNP = u2.cv2np(imgTmp,format='BGR')
00644
00645
00646 n,m = self.map2d.shape
00647
00648 for i in range(0,m):
00649
00650 ximg = self.map2d[1,i]
00651 yimg = self.map2d[0,i]
00652
00653 imNP[ximg,yimg,0] = 255
00654 imNP[ximg,yimg,1] = 255
00655 imNP[ximg,yimg,2] = 255
00656
00657 for index, polygon in enumerate(self.scan_dataset.polygons):
00658 if len(polygon.get_points()) and polygon.get_type() == 'polygon':
00659
00660 color_index = min(index, len(color_list)-1)
00661 if 255 == polygon.cvImage[ximg][yimg]:
00662 imNP[ximg,yimg,0] = color_list[color_index][0]
00663 imNP[ximg,yimg,1] = color_list[color_index][1]
00664 imNP[ximg,yimg,2] = color_list[color_index][2]
00665
00666 img_mapped = u2.np2cv(imNP)
00667 return img_mapped
00668
00669
00670
00671
00672 def map_polygons_into_laser(self, map=None):
00673 n, m = self.pts3d_bound[0,:].shape
00674 polygon_mapping = list(0 for x in range(m))
00675
00676
00677 Ndim, N = self.map2d.shape
00678
00679 for i in range(0,m):
00680 ximg = self.map2d[1,i]
00681 yimg = self.map2d[0,i]
00682
00683 poly_count = 1
00684 for polygon in self.scan_dataset.polygons:
00685 if len(polygon.get_points()) and polygon.get_type() == 'polygon':
00686 poly_count = poly_count + 1
00687
00688 if 255 == polygon.cvImage[ximg][yimg]:
00689 polygon_mapping[i] = poly_count
00690
00691 return polygon_mapping
00692
00693 '''
00694 Internal. Uses: self.map2d, self.scan_dataset.is_labeled, self.
00695
00696 polygon_labels_bound # formerly called 'polygon_mapping' or 'map_polys'
00697
00698 (?) If Exclude_edges is True, then boundaries of photo and table surface will be
00699 tightened so that this does not introduce any strange artifacts (hopefully).
00700 Returns an array of length N, where 0 = unlabeled, 1 = surface, 2 = clutter
00701
00702 This function should be used for hand-labeled data which is
00703 specified as Polygons (list of (x,y) pairs). Used as ground truth when training/verifying.
00704 These polygons most likely are NOT closed so need to be auto-closed.
00705 They are given identifiers: 'surface', 'object' and 'rio'.
00706 Formerly called: map_polygon_labels_into_laser
00707 '''
00708
00709 def create_polygon_labels(self, map=None, exclude_edges = False):
00710 _, Nbound = np.shape(self.pts3d_bound)
00711 polygon_labels_bound = np.zeros(Nbound)
00712
00713
00714
00715 if self.scan_dataset.is_labeled == False:
00716 return polygon_labels_bound
00717
00718
00719 _, Nbound = self.map2d.shape
00720
00721 roi_polygons = []
00722 surface_polygons = []
00723 object_polygons = []
00724 for p in self.scan_dataset.polygons:
00725 if len(p.get_points()) and p.get_type() == 'polygon':
00726 if p.get_label() == 'roi': roi_polygons += [p]
00727 if p.get_label() == 'surface': surface_polygons += [p]
00728 if p.get_label() == 'object': object_polygons += [p]
00729
00730 for i in range(Nbound):
00731 ximg = self.map2d[1,i]
00732 yimg = self.map2d[0,i]
00733
00734
00735 for p in roi_polygons:
00736 if 255 == p.cvImage[ximg][yimg] and (exclude_edges == False or 255 != self.scan_dataset.cvEdgeImage[ximg][yimg]):
00737 polygon_labels_bound[i] = LABEL_ROI
00738
00739
00740
00741 for p in surface_polygons:
00742 bool1 = (255 == p.cvImage[ximg][yimg])
00743 bool2 = (255 != self.scan_dataset.cvEdgeImage[ximg][yimg])
00744 if bool1 and (exclude_edges == False or bool2):
00745
00746 polygon_labels_bound[i] = LABEL_SURFACE
00747
00748
00749 for p in object_polygons:
00750 if 255 == p.cvImage[ximg][yimg] and (exclude_edges == False or 255 != self.scan_dataset.cvEdgeImage[ximg][yimg]):
00751 polygon_labels_bound[i] = LABEL_CLUTTER
00752
00753 return polygon_labels_bound
00754
00755
00756 def remove_labels_from_low_points(self):
00757
00758 (z) = (2)
00759 idx = np.where(self.pts3d_bound[z,:] < self.ground_exclude_threshold)
00760 self.map_polys = np.asarray(self.map_polys)
00761 self.map_polys[idx] = LABEL_NONE
00762
00763
00764 def create_intensity_image(self, laserscan):
00765
00766 pos_list, scan_list = laserscan
00767
00768 for i,s in enumerate(scan_list):
00769 if 0 == i:
00770 imgNP = s.intensities
00771 else:
00772 imgNP = np.vstack((imgNP,s.intensities))
00773
00774 imgNP = imgNP / np.max(imgNP) * 255
00775 imgNP = np.asarray(imgNP)
00776 imgNP = imgNP[::-1]
00777
00778 return u2.np2cv(imgNP)
00779
00780
00781 def do_all_laser_image_mapping(self, translate = True):
00782
00783 self.map_laser_into_cam_2D(self.pts3d, self.img, self.scan_indices, self.intensities)
00784
00785 self.rotate_pointcloud_match_groundplane()
00786 if translate == True:
00787 self.z_translate_pointcloud_groundplane()
00788 ''' Sets the following:
00789 self.scan_indices_bound
00790 self.intensities_bound
00791 '''
00792 self.img_mapped = self.draw_mapped_laser_into_image(None, self.pts3d, self.img)
00793 self.create_polygon_images()
00794 self.img_mapped_polygons = self.draw_mapped_laser_polygons_into_image(None, self.pts3d, self.img)
00795
00796 self.map_polys = self.create_polygon_labels(self.map)
00797
00798 if translate == True:
00799 self.remove_labels_from_low_points()
00800
00801 self.img_intensities = self.create_intensity_image(self.laserscans[0])
00802
00803
00804 def do_all_point_cloud_mapping(self, exclude_edges = False):
00805 self.do_all_point_cloud()
00806 self.do_polygon_mapping(exclude_edges)
00807
00808
00809 def do_polygon_mapping(self, exclude_edges = False):
00810 self.create_polygon_images()
00811 self.map_polys = self.create_polygon_labels(None, exclude_edges)
00812 self.remove_labels_from_low_points()
00813
00814 '''
00815 Creates a map using map_laser_into_cam_2D
00816 - this is now internal (formerly standalone)
00817 takes pts3d, img, scan_indices, and intensities.
00818 Sets pts3d_bound based on points that could project in image
00819 - (returned as part of map)
00820 Does a z_translation and rotation (to groundplane) on pts3d_bound
00821 - intenal shift of pts3d and pts3d_bound
00822 - Amount to shift defined as var: scan_dataset.ground_plane_translation
00823 - If defined:
00824 Rotates by matrix in var: scan_dataset.ground_plane_rotation
00825 - Else if defined:
00826 Rotates to match plane in var: scan_dataset.ground_plane_normal
00827 Sets intensities_bound and scan_indices_bound based on returned map
00828 '''
00829
00830 def do_all_point_cloud(self, map_already_exists=False):
00831
00832 if not map_already_exists:
00833 self.map_laser_into_cam_2D(self.pts3d, self.img, self.scan_indices, self.intensities)
00834 ''' Sets the following:
00835 self.camPts_bound,
00836 self.camPts, idx_list
00837 self.pts3d_bound,
00838 self.scan_indices_bound
00839 self.intensities_bound
00840 '''
00841 self.rotate_pointcloud_match_groundplane()
00842 self.z_translate_pointcloud_groundplane()
00843
00844
00845 def load_data(self, name, reload_database = True):
00846 print ut.getTime(), 'processor: loading ', name
00847 self.load_metadata(name, reload_database)
00848 self.load_raw_data(name)
00849 '''
00850 In order to run on the PR2, we need to import data in slightly different form.
00851 This is the easiest way to accomodate that, create a separate loading function.
00852 '''
00853 def load_raw_PR2_data(self, unique_name=''):
00854 self.img = ''
00855 self.pts3d = []
00856 self.laserscans = []
00857 self.intensities = []
00858 self.scan_indices = []
00859 self.image_angle = 0
00860 '''
00861 This loads a laserscan from a pickel. ***The pickel relys on the classes found in
00862 hrl_hokuyo/hokuyo_scan.py. So we need to let python know where to find these.
00863 I am NOT including in manifest.xml because for PR2, you do NOT need this function
00864 '''
00865 def load_raw_data(self,name):
00866 import roslib; roslib.load_manifest('hrl_hokuyo')
00867 self.img = highgui.cvLoadImage(self.config.path+'/data/'+name+'_image.png')
00868 dict = ut.load_pickle(self.config.path+'/data/'+name+'_laserscans.pkl')
00869 self.laserscans = dict['laserscans']
00870 if 'image_angle' in dict:
00871 self.image_angle = dict['image_angle']
00872 else:
00873 self.image_angle = 0
00874
00875
00876 def display_all_data(self):
00877
00878
00879
00880
00881
00882
00883
00884
00885 window_name = "Image Poly Mapped"
00886 highgui.cvNamedWindow (window_name, highgui.CV_WINDOW_AUTOSIZE)
00887 highgui.cvShowImage (window_name, self.img_mapped_polygons)
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897 def display_segmentation_image(self, feature_type='all_post'):
00898 self.img_labels = self.draw_mapped_labels_into_image(feature_type)
00899 window_name = "Image Labels for classifier "+feature_type
00900 highgui.cvNamedWindow (window_name, highgui.CV_WINDOW_AUTOSIZE)
00901 highgui.cvShowImage (window_name, self.img_labels)
00902
00903
00904 def save_segmentation_image(self, feature_type='all_post', suffix=None):
00905 self.img_labels = self.draw_mapped_labels_into_image(feature_type)
00906 if not suffix: suffix = '_image_segmentation_'+feature_type
00907 filename = self.config.path+'/data/'+self.scan_dataset.id + suffix + '.png'
00908 print ut.getTime(), "Saving: "+filename
00909 highgui.cvSaveImage(filename,self.img_labels)
00910
00911 def get_intensity_image(self):
00912 return self.img_intensities
00913
00914 def display_intensities(self):
00915 window_name = "Image Intensities"
00916 highgui.cvNamedWindow (window_name, highgui.CV_WINDOW_AUTOSIZE)
00917 highgui.cvShowImage (window_name, self.img_intensities)
00918
00919
00920 def display_featurevector(self, featurevector):
00921 fv_range = np.asarray(featurevector)[self.features.get_indexvector('range')]
00922
00923 idx = np.asarray(range(len(self.features.get_indexvector('range'))))
00924 plot1 = plt.bar(idx,fv_range,color='b')
00925 print 'fv_range',fv_range
00926 plt.show()
00927
00928
00929 def display_stats(self, global_stats = False):
00930 if not MLAB_LOADED: return
00931
00932 import colorsys
00933
00934 h_values_plot = []
00935 s_values_plot = []
00936 v_values_plot = []
00937 i_values_plot = []
00938 colors_plot = []
00939 rgb_plot = []
00940
00941 range_hist_surface_plot = np.zeros(len(self.features.get_indexvector('range')))
00942 range_hist_clutter_plot = np.zeros(len(self.features.get_indexvector('range')))
00943
00944 count_surface = 0
00945 count_clutter = 0
00946
00947
00948 normals_x = []
00949 normals_y = []
00950 normals_z = []
00951 normals_label = []
00952
00953 measured_table_heights = []
00954
00955 surface_heights = []
00956 clutter_heights = []
00957
00958
00959 filename = self.get_features_filename()
00960 dict = ut.load_pickle(filename)
00961 loop_condition = True
00962 first_loop_run = True
00963
00964 dataset_count = 0
00965 last_surface_id = -1
00966 while loop_condition == True:
00967 count = 0
00968 if global_stats == True:
00969 if first_loop_run == True:
00970 self.scan_dataset = self.scans_database.get_dataset(0)
00971 first_loop_run = False
00972 else:
00973 print 'next'
00974 self.scan_dataset = self.scans_database.get_next_dataset()
00975
00976 if False == self.scan_dataset:
00977 break
00978
00979 if True != self.scan_dataset.is_labeled:
00980 continue
00981
00982 print 'load',filename
00983 filename = self.get_features_filename()
00984 dict = ut.load_pickle(filename)
00985 dataset_count += 1
00986
00987 if self.scan_dataset.surface_id != last_surface_id:
00988 measured_table_heights += [float(self.scan_dataset.surface_height)]
00989 last_surface_id = self.scan_dataset.surface_id
00990 else:
00991 loop_condition = False
00992
00993
00994 for index in dict['point_indices']:
00995
00996 fv_hs = (dict['features'][count])[self.features.get_indexvector('hsvi')]
00997 if dict['labels'][count] == LABEL_SURFACE or dict['labels'][count] == LABEL_CLUTTER:
00998 h_values_plot.append(fv_hs[0])
00999 s_values_plot.append(fv_hs[1])
01000 v_values_plot.append(fv_hs[2])
01001 i_values_plot.append(fv_hs[3])
01002
01003
01004 rvalue,gvalue,bvalue = colorsys.hsv_to_rgb(fv_hs[0], fv_hs[1], fv_hs[2])
01005
01006 rgb_plot.append([rvalue,gvalue,bvalue])
01007
01008 fv_range = (dict['features'][count])[self.features.get_indexvector('range')]
01009
01010
01011 if dict['labels'][count] == LABEL_SURFACE:
01012
01013 colors_plot.append(1)
01014 range_hist_surface_plot += np.asarray(fv_range)
01015 count_surface += 1
01016 elif dict['labels'][count] == LABEL_CLUTTER:
01017
01018 colors_plot.append(2)
01019 range_hist_clutter_plot += np.asarray(fv_range)
01020 count_clutter += 1
01021
01022
01023
01024 if len(fv_range) > 2:
01025 normals_x += [fv_range[1]]
01026 normals_y += [fv_range[2]]
01027 normals_z += [fv_range[3]]
01028 normals_label += [dict['labels'][count]]
01029
01030 count +=1
01031
01032
01033
01034
01035 print 'load data for ',self.scan_dataset.id
01036 self.load_data(self.scan_dataset.id, False)
01037 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
01038 self.do_all_point_cloud_mapping()
01039
01040
01041 surface_height_sum_this_table = 0
01042 surface_count_this_table = 0
01043 for index, label in enumerate(self.map_polys):
01044 if label == LABEL_SURFACE:
01045 surface_height_sum_this_table += np.asarray(self.pts3d_bound)[2,index]
01046 surface_count_this_table += 1
01047
01048 surface_height_mean_this_table = surface_height_sum_this_table / float(surface_count_this_table)
01049
01050 for index, label in enumerate(self.map_polys):
01051 height = np.asarray(self.pts3d_bound)[2,index]
01052 height -= surface_height_mean_this_table
01053 if label == LABEL_CLUTTER:
01054 clutter_heights += [height]
01055 elif label == LABEL_SURFACE:
01056 surface_heights += [height]
01057
01058 surface_heights = np.asarray(surface_heights)
01059 clutter_heights = np.asarray(clutter_heights)
01060
01061 surface_heights = surface_heights * 100
01062 clutter_heights = clutter_heights * 100
01063
01064
01065
01066 print clutter_heights
01067 fig_clutterheights = plt.figure()
01068 plt.title('clutter and surface heights above measured height of table')
01069
01070 plt.xlabel('height in cm')
01071 plt.ylabel('number of range measurements (points)')
01072 width = 0.35
01073 bins = np.asarray(range(-10,60))
01074 print 'len clutterheights',len(clutter_heights)
01075 surface_heights = surface_heights[np.where(surface_heights < 60)]
01076 surface_heights = surface_heights[np.where(surface_heights > -10)]
01077 clutter_heights = clutter_heights[np.where(clutter_heights < 60)]
01078 clutter_heights = clutter_heights[np.where(clutter_heights > -10)]
01079 print 'len clutterheights',len(clutter_heights)
01080 hist_surface = stats.histogram2(surface_heights,bins)
01081 hist_clutter = stats.histogram2(clutter_heights,bins)
01082 print bins
01083 print hist_surface
01084 print hist_clutter
01085
01086
01087
01088
01089 plot1 = plt.bar(bins,hist_surface, color='g', width=width,linewidth=0.1)
01090 plot1 = plt.bar(bins+width,hist_clutter, color='r', width=width,linewidth=0.1)
01091
01092 plt_filename = self.config.path+'/clutter_heights.png'
01093 print 'saving',plt_filename
01094 plt.savefig(plt_filename)
01095 plt.savefig(self.config.path+'/clutter_heights.pdf')
01096 plt.savefig(self.config.path+'/clutter_heights.eps')
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116 rgb_plot = np.array(rgb_plot)
01117
01118 v_values_plot = np.asarray(v_values_plot)
01119 s_values_plot = np.asarray(s_values_plot)
01120 h_values_plot = np.asarray(h_values_plot)
01121 colors_plot = np.asarray(colors_plot)
01122 rgb_plot = np.asarray(rgb_plot)
01123
01124
01125
01126
01127 dict = {'s_surface':s_values_plot[colors_plot == 1],
01128 'h_surface':h_values_plot[colors_plot == 1],
01129 's_clutter':s_values_plot[colors_plot == 2],
01130 'h_clutter':h_values_plot[colors_plot == 2]
01131 }
01132
01133
01134
01135 x = s_values_plot
01136 y = h_values_plot
01137 xlabel = 's'
01138 ylabel = 'h'
01139
01140 xlabel = 's_in_rgb'
01141 ylabel = 'h_in_rgb'
01142
01143 x = s_values_plot[colors_plot == 1]
01144 y = h_values_plot[colors_plot == 1]
01145 rgb = rgb_plot[colors_plot == 1]
01146 xlabel = 's, surface'
01147 ylabel = 'h, surface'
01148
01149 x = s_values_plot[colors_plot == 2]
01150 y = h_values_plot[colors_plot == 2]
01151 rgb = rgb_plot[colors_plot == 2]
01152 xlabel = 's, clutter'
01153 ylabel = 'h, clutter'
01154
01155
01156
01157 x = v_values_plot
01158 y = h_values_plot
01159 xlabel = 'v'
01160 ylabel = 'h'
01161
01162 xlabel = 'v_in_rgb'
01163 ylabel = 'h_in_rgb'
01164
01165 x = v_values_plot[colors_plot == 1]
01166 y = h_values_plot[colors_plot == 1]
01167 rgb = rgb_plot[colors_plot == 1]
01168 xlabel = 'v_in_rgb_surface'
01169 ylabel = 'h_in_rgb_surface'
01170
01171 x = v_values_plot[colors_plot == 2]
01172 y = h_values_plot[colors_plot == 2]
01173 rgb = rgb_plot[colors_plot == 2]
01174 xlabel = 'v_in_rgb_clutter'
01175 ylabel = 'h_in_rgb_clutter'
01176
01177
01178 x = i_values_plot
01179 y = h_values_plot
01180 xlabel = 'i'
01181 ylabel = 'h'
01182
01183
01184 x = v_values_plot
01185 y = s_values_plot
01186 xlabel = 'v'
01187 ylabel = 's'
01188
01189 xlabel = 'v_in_rgb'
01190 ylabel = 's_in_rgb'
01191
01192 x = v_values_plot[colors_plot == 1]
01193 y = s_values_plot[colors_plot == 1]
01194 rgb = rgb_plot[colors_plot == 1]
01195 xlabel = 'v_in_rgb_surface'
01196 ylabel = 's_in_rgb_surface'
01197
01198 x = v_values_plot[colors_plot == 2]
01199 y = s_values_plot[colors_plot == 2]
01200 rgb = rgb_plot[colors_plot == 2]
01201 xlabel = 'v_in_rgb_clutter'
01202 ylabel = 's_in_rgb_clutter'
01203
01204
01205 x = i_values_plot
01206 y = s_values_plot
01207 xlabel = 'i'
01208 ylabel = 's'
01209
01210
01211 x = i_values_plot
01212 y = v_values_plot
01213 xlabel = 'i'
01214 ylabel = 'v'
01215
01216
01217 if len(measured_table_heights):
01218 fig_heights = plt.figure()
01219 plt.title('table heights')
01220
01221 plt.xlabel('height in cm')
01222 plt.ylabel('number of tables')
01223 print 'table heights', measured_table_heights
01224 hist = np.histogram(measured_table_heights,10)
01225 plot1 = plt.bar(hist[1][0:-1],hist[0],width=3)
01226
01227 plt_filename = self.config.path+'/table_heights.png'
01228 print 'saving',plt_filename
01229 plt.savefig(plt_filename)
01230 plt.savefig(self.config.path+'/table_heights.pdf')
01231 plt.savefig(self.config.path+'/table_heights.eps')
01232
01233
01234
01235
01236
01237
01238
01239
01240 print 'number of points: ', len(normals_x)
01241
01242 normals_x = np.asarray(normals_x)
01243 normals_y = np.asarray(normals_y)
01244 normals_z = np.asarray(normals_z)
01245 normals_label = np.asarray(normals_label)
01246 import random
01247 sampel_size = min(len(normals_label),500000)
01248 rand_idx = np.array(random.sample(xrange(len(normals_label)),sampel_size))
01249
01250 normals_x = normals_x[rand_idx]
01251 normals_y = normals_y[rand_idx]
01252 normals_z = normals_z[rand_idx]
01253 normals_label = normals_label[rand_idx]
01254
01255 normals_x = np.hstack((normals_x,0))
01256 normals_y = np.hstack((normals_y,0))
01257 normals_z = np.hstack((normals_z,0))
01258 normals_label = np.hstack((normals_label,0))
01259
01260
01261
01262
01263
01264
01265
01266 print 'number of normals:',len(normals_x),len(normals_x),len(normals_x),len(normals_label)
01267 mlab.figure(fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))
01268 mlab.points3d(normals_x,normals_y,normals_z,normals_label,resolution=4,scale_factor=0.005,mode='point',scale_mode='none',colormap='jet')
01269 self.plot_axis(0,0,0, np.eye(3),0.5)
01270
01271
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281
01282
01283
01284
01285
01286
01287
01288 range_hist_surface_plot /= count_surface
01289 range_hist_clutter_plot /= count_clutter
01290
01291
01292 range_hist_surface_std = 0
01293 range_hist_clutter_std = 0
01294
01295
01296
01297 loop_condition = True
01298 first_loop_run = True
01299 while loop_condition == True:
01300 count = 0
01301 if global_stats == True:
01302 if first_loop_run == True:
01303 self.scan_dataset = self.scans_database.get_dataset(0)
01304 first_loop_run = False
01305 else:
01306 self.scan_dataset = self.scans_database.get_next_dataset()
01307
01308 if False == self.scan_dataset:
01309 break
01310
01311 if True != self.scan_dataset.is_labeled:
01312 continue
01313
01314 print 'load',filename
01315 filename = self.get_features_filename()
01316 dict = ut.load_pickle(filename)
01317 else:
01318 loop_condition = False
01319
01320 for index in dict['point_indices']:
01321 fv_range = (dict['features'][count])[self.features.get_indexvector('range')]
01322 if dict['labels'][count] == LABEL_SURFACE:
01323 range_hist_surface_std += np.square(np.asarray(fv_range) - range_hist_surface_plot)
01324
01325 elif dict['labels'][count] == LABEL_CLUTTER:
01326 range_hist_clutter_std += np.square(np.asarray(fv_range) - range_hist_clutter_plot)
01327 count +=1
01328
01329 range_hist_surface_std /= count_surface
01330 range_hist_clutter_std /= count_clutter
01331 range_hist_surface_std = np.sqrt(range_hist_surface_std)
01332 range_hist_clutter_std = np.sqrt(range_hist_clutter_std)
01333
01334
01335
01336
01337 width = 0.35
01338 idx = np.asarray(range(len(self.features.get_indexvector('range'))))
01339 print 'surface',range_hist_surface_plot
01340 print 'surface std',range_hist_surface_std
01341 print 'clutter',range_hist_clutter_plot
01342 print 'clutter std',range_hist_clutter_std
01343
01344 fig_range = plt.figure()
01345 plt.title('range features (normalized): mean and standard deviation')
01346
01347
01348 max = np.maximum(np.maximum(np.maximum(range_hist_surface_plot,range_hist_clutter_plot),range_hist_surface_std),range_hist_clutter_std)
01349 factor = np.divide(np.ones(len(range_hist_surface_plot)),max)
01350
01351 range_hist_surface_plot = np.multiply(range_hist_surface_plot,factor)
01352
01353
01354 range_hist_clutter_plot = np.multiply(range_hist_clutter_plot,factor)
01355 range_hist_surface_std = np.multiply(range_hist_surface_std,factor)
01356 range_hist_clutter_std = np.multiply(range_hist_clutter_std,factor)
01357
01358 plot1 = plt.bar(idx,range_hist_surface_plot,width,yerr=range_hist_surface_std,color='g')
01359 plot2 = plt.bar(idx+width,range_hist_clutter_plot,width,yerr=range_hist_clutter_std,color='r')
01360 plt.xticks(idx+width,('zhist','nx','ny','nz','ev1','ev2'))
01361 plt_filename = self.config.path+'/range_features.png'
01362 print 'saving',plt_filename
01363 plt.savefig(plt_filename)
01364 plt.savefig(self.config.path+'/range_features.pdf')
01365 plt.savefig(self.config.path+'/range_features.eps')
01366
01367
01368
01369 print 'creating plots done.'
01370
01371
01372 def polt_color_scatterplots_2d(self,x,y,xlabel,ylabel, colors_plot, alpha=0.02):
01373 fig = plt.figure()
01374 plt.title(xlabel+', '+ylabel)
01375 plt.xlabel(xlabel+' values')
01376 plt.ylabel(ylabel+' values')
01377
01378
01379 plot = plt.scatter(x,y,5,c=colors_plot,linewidths=0, alpha=alpha)
01380 plt_filename = self.config.path+'/colors_'+xlabel+'_'+ylabel
01381 print 'saving',plt_filename,'...',
01382 plt.savefig(plt_filename+'.png')
01383
01384
01385 print 'done'
01386
01387 def plot_s_h_probabilities(self,s_surface, h_surface, s_clutter, h_clutter):
01388 s_surface = np.hstack((s_surface,0))
01389 h_surface = np.hstack((h_surface,0))
01390 s_clutter = np.hstack((s_clutter,0))
01391 h_clutter = np.hstack((h_clutter,0))
01392 s_surface = np.hstack((s_surface,1))
01393 h_surface = np.hstack((h_surface,1))
01394 s_clutter = np.hstack((s_clutter,1))
01395 h_clutter = np.hstack((h_clutter,1))
01396
01397 bins = 256
01398 hist2d_surface = np.histogram2d(s_surface,h_surface,bins=bins)[0]
01399 hist2d_clutter = np.histogram2d(s_clutter,h_clutter,bins=bins)[0]
01400
01401 hist2d_surface = hist2d_surface[:,0:200]
01402 hist2d_clutter = hist2d_clutter[:,0:200]
01403
01404 fig = plt.figure()
01405 plt.xlabel('s values')
01406 plt.ylabel('h values')
01407
01408
01409
01410
01411
01412
01413
01414
01415 hist2d_sum = hist2d_clutter + hist2d_surface
01416 hist2d_sum[hist2d_sum == 0] = 1
01417
01418
01419
01420 hist_prob_surface = np.divide(hist2d_surface,hist2d_sum)
01421 hist_prob_clutter = np.divide(hist2d_clutter,hist2d_sum)
01422
01423
01424 prob_threshold = 0.80
01425 hist_prob_surface[hist_prob_surface < prob_threshold] = 0
01426 hist_prob_clutter[hist_prob_clutter < prob_threshold] = 0
01427
01428 meas_threshold = 5
01429 hist_prob_surface[hist2d_sum < meas_threshold] = 0
01430 hist_prob_clutter[hist2d_sum < meas_threshold] = 0
01431
01432 blue = np.zeros(np.shape(hist_prob_surface))
01433 idx_white = np.multiply(hist_prob_surface == 0, hist_prob_clutter == 0)
01434 blue[idx_white] = 1
01435 hist_prob_surface[idx_white] = 1
01436 hist_prob_clutter[idx_white] = 1
01437
01438 print hist_prob_surface
01439 print hist_prob_clutter
01440
01441 histcolored = np.array([hist_prob_clutter, hist_prob_surface, blue]).T
01442 plt.imshow(histcolored, interpolation='nearest', origin='lower')
01443 plt_filename = self.config.path+'/sh_probabilities'
01444 print 'saving',plt_filename,'...',
01445 plt.savefig(plt_filename+'.png')
01446 plt.savefig(plt_filename+'.pdf')
01447 plt.savefig(plt_filename+'.eps')
01448
01449 import sys
01450 print 'exit'
01451 plt.show()
01452 sys.exit()
01453
01454
01455 def plot_axis(self,x,y, z, directions, scale = 1):
01456 if not MLAB_LOADED: return
01457 scale *= 2.
01458 mlab.quiver3d([x-directions[0,0]/2.0], [y-directions[1,0]/2.0], [z-directions[1,0]/2.0], [directions[0,0]], [directions[1,0]], [directions[2,0]], scale_factor=scale, color=(1,0,0))
01459 if directions.shape[1] > 1:
01460 mlab.quiver3d([x-directions[0,1]/2.0], [y-directions[1,1]/2.0], [z-directions[2,0]/2.0], [directions[0,1]], [directions[1,1]], [directions[2,1]], scale_factor=scale, color=(0,1,0))
01461 mlab.quiver3d([x-directions[0,2]/2.0], [y-directions[1,2]/2.0], [z-directions[2,2]/2.0], [directions[0,2]], [directions[1,2]], [directions[2,2]], scale_factor=scale, color=(0,0,1))
01462
01463
01464 def draw_3d(self,type,spheres=False, new_figure = True):
01465 if not MLAB_LOADED: return
01466
01467
01468
01469
01470
01471
01472
01473
01474
01475
01476
01477
01478
01479
01480 mode = 'point'
01481 if spheres:
01482 mode = 'sphere'
01483 if new_figure:
01484 mlab.figure()
01485
01486 print ut.getTime(), type
01487 if type == 'height':
01488 labels = self.pts3d_bound[2,:]
01489 elif type == 'intensities':
01490 labels = self.intensities_bound
01491 elif type == 'labels':
01492 labels = self.map_polys
01493 elif type == 'range':
01494 labels = self.load_Classifier_labels('range')
01495 elif type == 'color':
01496 labels = self.load_Classifier_labels('color')
01497 elif type == 'all':
01498 labels = self.load_Classifier_labels('all')
01499 elif type == 'all_post':
01500 labels = self.load_Classifier_labels('all_post')
01501
01502 elif type == 'baseline':
01503 labels = self.load_Classifier_labels('baseline')
01504 elif type=='h' or type == 's' or type == 'v':
01505
01506
01507
01508 image_size = cv.cvGetSize(self.img)
01509 img_h = cv.cvCreateImage (image_size, 8, 1)
01510 img_s = cv.cvCreateImage (image_size, 8, 1)
01511 img_v = cv.cvCreateImage (image_size, 8, 1)
01512 img_hsv = cv.cvCreateImage (image_size, 8, 3)
01513 cv.cvCvtColor (self.img, img_hsv, cv.CV_BGR2HSV)
01514 cv.cvSplit (img_hsv, img_h, img_s, img_v, None)
01515
01516 labels = []
01517
01518 if type=='h':
01519 imNP = u2.cv2np(img_h)
01520 elif type == 's':
01521 imNP = u2.cv2np(img_s)
01522 elif type == 'v':
01523 imNP = u2.cv2np(img_v)
01524
01525
01526 for index in range(len(self.pts3d_bound[2,:].A1)):
01527 labels.append(float(imNP[self.map2d[1,index],self.map2d[0,index]]))
01528
01529
01530 print 'size X', np.size(self.pts3d_bound[0,:]), 'len labels:', len(labels)
01531
01532
01533 mlab.imshow([[1,1],[1,1]], extent=[-1, 1,
01534 -1, 1,
01535 0, 0], opacity=.5, colormap='jet')
01536
01537
01538
01539 if self.scan_dataset.table_plane_translation != '':
01540 print 'self.scan_dataset.table_plane_translation', self.scan_dataset.table_plane_translation
01541 table_pts = self.pts3d_bound[:,self.map_polys == LABEL_SURFACE]
01542 table_height = float(self.scan_dataset.table_plane_translation[2])
01543 if table_pts != []:
01544 mlab.imshow([[1,1],[1,1]], extent=[np.min(table_pts[0,:]), np.max(table_pts[0,:]),
01545 np.min(table_pts[1,:]), np.max(table_pts[1,:]),
01546 table_height, table_height], opacity=.5, colormap='jet')
01547
01548
01549
01550 mlab.points3d([0],[0],[float(self.scan_dataset.ground_plane_translation[2])],[0],mode='sphere',resolution=8,scale_mode='none',scale_factor=0.1,colormap='winter')
01551
01552 mlab.points3d(self.pts3d_bound[0,:],self.pts3d_bound[1,:],self.pts3d_bound[2,:],labels,mode=mode,resolution=8,scale_mode='none',scale_factor=0.01,colormap='jet')
01553
01554 mean_x = np.mean(self.pts3d_bound[0,:])
01555 mean_y = np.mean(self.pts3d_bound[1,:])
01556
01557 mlab.view(azimuth=0, elevation=-48, distance=3.75, focalpoint=(mean_x,mean_y,0.85))
01558
01559 def save_3d(self,type,spheres=False):
01560 if not MLAB_LOADED: return
01561 mlab.clf()
01562 self.draw_3d(type, spheres)
01563 size=(960,800)
01564 mlab.savefig(self.config.path+'/results/'+self.scan_dataset.id+'_'+type+'.png',size=size)
01565
01566
01567
01568 def display_3d(self,type,spheres=False, new_figure = True):
01569 if not MLAB_LOADED: return
01570
01571 self.draw_3d(type, spheres, new_figure)
01572
01573
01574
01575
01576
01577
01578
01579
01580
01581 print ut.getTime(), 'points:'
01582 print ut.getTime(), self.pts3d_bound[0,:].shape
01583
01584
01585 R = self.artag_transformation
01586 if np.any(R):
01587 print ut.getTime(), 'display ARTag detection coordinates...'
01588 scale = 0.003
01589 self.draw_3d_axis(scale)
01590
01591 R[:,3] = R[:,3] * 0.0005
01592 R = np.vstack((R,np.array([0,0,0,1.0])))
01593
01594
01595 v1 = np.array([0,0,0,1])
01596 v2 = np.array([80*scale,0,0,1])
01597 v1 = np.dot(R,v1)
01598 v2 = np.dot(R,v2)
01599 print ut.getTime(), v2
01600 self.draw_vector(v1[0:3],v2[0:3],scale, (1,0,0))
01601
01602 v1 = np.array([0,0,0,1])
01603 v2 = np.array([0,80*scale,0,1])
01604 v1 = np.dot(R,v1)
01605 v2 = np.dot(R,v2)
01606 print ut.getTime(), v2
01607 self.draw_vector(v1[0:3],v2[0:3],scale,(0,1,0))
01608
01609 v1 = np.array([0,0,0,1])
01610 v2 = np.array([0,0,80*scale,1])
01611 v1 = np.dot(R,v1)
01612 v2 = np.dot(R,v2)
01613 print ut.getTime(), v2
01614 self.draw_vector(v1[0:3],v2[0:3],scale,(0,0,1))
01615
01616
01617 mlab.colorbar()
01618 mlab.show()
01619
01620
01621
01622 def draw_vector(self, v1, v2, scale, color=(1,1,1)):
01623 if not MLAB_LOADED: return
01624
01625 mlab.triangular_mesh([[v1[0]-1*scale,v1[0]+1*scale,v2[0]-1*scale,v2[0]+1*scale]], [[v1[1]-1*scale,v1[1]+1*scale,v2[1]-1*scale,v2[1]+1*scale]], [[v1[2]-1*scale,v1[2]+1*scale,v2[2]-1*scale,v2[2]+1*scale]], [(0,1,2),(1,2,3)],color=color)
01626 def draw_3d_axis(self, scale):
01627
01628 mlab.triangular_mesh([np.array([0.0,0.0,100.0])*scale], [np.array([-0.3,0.3,0.0])*scale], [np.array([0.0,0.0,0.0])*scale], [(0,1,2)], color=(1,1,1))
01629 mlab.triangular_mesh([np.array([-0.3,0.3,0.0])*scale], [np.array([0.0,0.0,100.0])*scale], [np.array([0.0,0.0,0.0])*scale], [(0,1,2)], color=(1,1,1))
01630 mlab.triangular_mesh([np.array([-0.3,0.3,0.0])*scale], [np.array([0.0,0.0,0.0])*scale], [np.array([0.0,0.0,100.0])*scale], [(0,1,2)], color=(1,1,1))
01631
01632
01633 def read_artag(self, img):
01634
01635 grey = cv.cvCreateImage((img.width, img.height), 8,1)
01636 highgui.cvConvertImage(img, grey)
01637
01638 file = open('test.raw', 'wb')
01639
01640 for i in range(grey.height):
01641 for j in range(grey.width):
01642 file.write(chr(grey[i][j]))
01643
01644 file.close()
01645
01646 output = subprocess.Popen([LOC_ARTAGS_SIMPLE, ""], stdout=subprocess.PIPE, env={"LD_LIBRARY_PATH": LOC_ARTAGS_LIB}).communicate()[0]
01647 print ut.getTime(), output
01648
01649 try:
01650 m = re.search('getARMatrix.*\n([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*).*\n([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*).*\n([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*) ([0-9\-\.]*)', output)
01651 except:
01652
01653 print ut.getTime(), "ERROR parsing ARToolKitPlus output"
01654 return np.array([])
01655
01656 if None == m:
01657 print ut.getTime(), "ERROR parsing ARToolKitPlus output"
01658 return np.array([])
01659
01660 R = np.array(m.groups(),dtype=np.float)
01661 R = R.reshape(3,4)
01662
01663 if False == np.any(R):
01664 print ut.getTime(), "ERROR: failed to detect AR tag"
01665 return np.array([])
01666
01667
01668 print ut.getTime(), m.groups()
01669 print ut.getTime(), R
01670
01671 print ut.getTime(), 'success'
01672 return R
01673
01674
01675 def process_raw_data(self, translate = True):
01676 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
01677 self.do_all_laser_image_mapping(translate)
01678
01679 def process_intensities(self):
01680
01681
01682 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans, False)
01683 self.img_intensities = self.create_intensity_image(self.laserscans[0])
01684
01685 def process_labels(self, features):
01686 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
01687 self.do_all_point_cloud_mapping()
01688 self.img_labels = self.draw_mapped_labels_into_image(features)
01689
01690 def process_masks(self, features, show_clutter=False):
01691 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
01692 self.do_all_point_cloud_mapping()
01693 self.img_labels = self.draw_mapped_masks_into_image(features, show_clutter)
01694
01695 def save_mapped_image(self,name):
01696 prefix = self.config.path+'/data/'+name
01697 print ut.getTime(), "Saving: "+prefix+'_image_mapped.png'
01698 highgui.cvSaveImage(prefix+'_image_mapped.png',self.img_mapped)
01699
01700 def save_intensity_image(self, name):
01701 prefix = self.config.path+'/data/'+name
01702 print ut.getTime(), "Saving: "+prefix+'_image_intensities.png'
01703 highgui.cvSaveImage(prefix+'_image_intensities.png',self.img_intensities)
01704 return prefix+'_image_intensities.png'
01705
01706 ''' SAVE LABELS IMAGE (in same directory as config.path)
01707 Note it is possible to fail at this function since the
01708 path contains the keyword 'results'
01709 '''
01710 def save_labels_image(self, features):
01711 filename = self.config.path+'/results/'+self.scan_dataset.id+'_image_labels_'+features+'_'+self.feature_type+'_k'+str(self.feature_neighborhood)+'_r'+str(self.feature_radius)+'.png'
01712 print ut.getTime(), "Saving: "+filename
01713 highgui.cvSaveImage(filename,self.img_labels)
01714 return filename
01715
01716
01717 def save_masks_image(self, features, show_clutter=False):
01718 if not show_clutter:
01719 filename = self.config.path+'/results/'+self.scan_dataset.id+'_image_mask_surface'+'.png'
01720 else:
01721 filename = self.config.path+'/results/'+self.scan_dataset.id+'_image_mask_clutter'+'.png'
01722 print ut.getTime(), "Saving: "+filename
01723 try:
01724 highgui.cvSaveImage(filename,self.img_labels)
01725 except:
01726 print'processor.py::save_masks_image(): Unable to save file "'+filename+\
01727 '. Please check that folder exists and that file permissions allow this location to overwrite.'
01728 return filename
01729
01730 def load_metadata(self, id, reload_database = True):
01731
01732 if reload_database:
01733 self.scans_database = scans_database.scans_database()
01734 self.scans_database.load(self.config.path,'database.pkl')
01735 self.scan_dataset = self.scans_database.get_dataset_by_id(id)
01736
01737 '''
01738 Purpose: Exclude all edges when training.
01739
01740 This function creates superfluous images perhaps can be excluded. The images represent the ROI, which can specified by
01741 boundary points to a square (currently uses a square NOT a polygon) rather than complex images. This could be nice
01742 in the future if a map of the table were generated with respect to the surface. A boundary of 30 (or 30/2) px is used
01743 on the borders of the image to prevent placement there. A boundary of 10 (or 10/2) px is used for the borders of the table.
01744 Given the current setup, these boundaries are identical and awkward.
01745 '''
01746
01747 def create_polygon_images(self):
01748 self.scan_dataset.cvEdgeImage = cv.cvCreateImage(cv.cvSize (self.img.width, self.img.height),8,1)
01749 cv.cvSet(self.scan_dataset.cvEdgeImage, 0)
01750 self.scan_dataset.cvSurfaceEdgeImage = cv.cvCreateImage(cv.cvSize (self.img.width, self.img.height),8,1)
01751 cv.cvSet(self.scan_dataset.cvSurfaceEdgeImage, 0)
01752 ''' modified code '''
01753 for polygon in self.scan_dataset.polygons:
01754 print "Found hand-labeled polygon with length:", len(polygon.get_points()), 'label=', polygon.get_label()
01755 i=0
01756 for polygon in self.scan_dataset.polygons:
01757 i=i+1;
01758 if len(polygon.get_points()):
01759 polygon.cvImage = cv.cvCreateImage(cv.cvSize (self.img.width, self.img.height),8,1)
01760 cv.cvSet(polygon.cvImage, 0)
01761 try:
01762
01763 temp = np.array(polygon.cvImage)
01764 roscv.FillPoly(temp,(polygon.get_points(),), 255)
01765 polygon.cvImage = temp
01766 except:
01767 cv.cvSet(polygon.cvImage, 255)
01768 print 'Ignored command cvFillPoly() 1'
01769
01770 if len(polygon.get_points()) and polygon.get_type() == 'polygon':
01771 try:
01772
01773 temp = np.array(self.scan_dataset.cvEdgeImage)
01774 roscv.PolyLine(temp,[polygon.get_points()], True, 255,30)
01775 self.scan_dataset.cvEdgeImage = temp
01776 except: print 'Ignored command cvPolyLine() 2'
01777
01778
01779 if polygon.get_label() == 'surface' and len(polygon.get_points()) and polygon.get_type() == 'polygon':
01780 try:
01781
01782 temp = np.array(self.scan_dataset.cvSurfaceEdgeImage)
01783 roscv.PolyLine(temp,[polygon.get_points()], True, 255,10)
01784 self.scan_dataset.cvSurfaceEdgeImage = temp
01785 except: print 'Ignored command cvPolyLine() 3'
01786
01787 '''/author = Jokerman. Returns a grayscale masked image from polygons'''
01788 def create_combined_polygon_image(self):
01789 (w, h) = (self.img.width, self.img.height)
01790 poly_image = cv.cvCreateImage(cv.cvSize (w, h),8,1)
01791 cv.cvSet(poly_image, 0)
01792
01793 print 'draw poly combo image'
01794 for label_to_draw, color in [('surface',120), ('object', 255)]:
01795 for polygon in self.scan_dataset.polygons:
01796 pts = polygon.get_points()
01797 if len(pts) > 0 and polygon.get_type() == 'polygon':
01798 if polygon.get_label() == label_to_draw:
01799
01800 temp = np.array(poly_image);
01801 roscv.FillPoly(temp,(pts,), color);
01802 poly_image = temp;
01803
01804 self.combined_polygon_image = poly_image
01805 return poly_image
01806
01807
01808
01809 def get_3d_point_index(self, image_point):
01810 width = self.img_intensities.width
01811 height = self.img_intensities.height
01812 index = (height - image_point[1]) * width + image_point[0]
01813 return index
01814
01815
01816 def get_3d_point(self, point2d):
01817 index = self.get_3d_point_index(point2d)
01818 return np.asarray(self.pts3d)[:,index]
01819
01820
01821
01822
01823
01824
01825
01826
01827
01828
01829
01830
01831 def get_3d_plane_normal(self, image_points):
01832 points3d = []
01833
01834 print ut.getTime(), self.img_intensities.width * self.img_intensities.height
01835 for i,point2d in enumerate(image_points):
01836
01837
01838
01839 width = self.img_intensities.width
01840 height = self.img_intensities.height
01841 index = (height - point2d[1]) * width + point2d[0]
01842 print ut.getTime(), 'index: ' + str(index)
01843 print ut.getTime(), np.shape(np.asarray(self.pts3d))
01844 points3d.append(np.asarray(self.pts3d)[:,index])
01845
01846
01847 print ut.getTime(), 'image points: ' + str(image_points) + ' points3d: ' + str(points3d)
01848
01849 a = points3d[1] - points3d[0]
01850 b = points3d[2] - points3d[0]
01851 n = np.matrix(np.cross(a,b)).T
01852 n = n / np.linalg.norm(n)
01853
01854
01855 if n[2] < 0:
01856 n = -n
01857 print ut.getTime(), 'plane normal: ' + str(n)
01858 return n, points3d
01859
01860
01861
01862 def check_3d_plane_point(self, point2d):
01863
01864 width = self.img_intensities.width
01865 height = self.img_intensities.height
01866 index = (height - point2d[1]) * width + point2d[0]
01867 point3d = np.asarray(self.pts3d)[:,index]
01868 print ut.getTime(), 'check 3d point: ', point3d
01869
01870 if np.max(np.abs(point3d)) > 100:
01871 return False
01872 print ut.getTime(), point2d, point3d
01873 return True
01874
01875 def rotate_pointcloud_match_groundplane(self):
01876
01877 if self.scan_dataset.ground_plane_rotation == '':
01878
01879 print 'WARNING: no RANSAC groundplane calculated for',self.scan_dataset.id
01880 print 'WARNING: Falling back to 3point-estimation!'
01881
01882 n = self.scan_dataset.ground_plane_normal
01883
01884 if n == '':
01885 print ut.getTime(), 'WARNING: no groundplane defined!'
01886 return False
01887 print ut.getTime(), 'normal: ', n.T
01888
01889
01890 print 'shapes pts3d, pts3d_bound' ,np.shape(self.pts3d), np.shape(self.pts3d_bound)
01891 self.pts3d = rotate_to_plane(n, self.pts3d)
01892 self.pts3d_bound = rotate_to_plane(n, self.pts3d_bound)
01893 else:
01894
01895 self.pts3d = self.scan_dataset.ground_plane_rotation * self.pts3d
01896 print self.scan_dataset.ground_plane_rotation, self.pts3d_bound
01897 self.pts3d_bound = np.asarray(self.scan_dataset.ground_plane_rotation * np.asmatrix(self.pts3d_bound))
01898
01899 return True
01900
01901
01902 def z_translate_pointcloud_groundplane(self):
01903 if self.scan_dataset.ground_plane_translation == '':
01904 print ut.getTime(), 'WARNING: no groundplane translation defined! Ignoring. Translation'
01905 return False
01906 self.pts3d += self.scan_dataset.ground_plane_translation
01907 self.pts3d_bound += self.scan_dataset.ground_plane_translation
01908
01909 def get_groundplane_translation(self):
01910 gpp = self.scan_dataset.ground_plane_three_points
01911 if gpp == '':
01912 print ut.getTime(), 'WARNING: no groundplane defined! Ignoring Translation'
01913 return False
01914 if self.scan_dataset.ground_plane_rotation == '':
01915 print ut.getTime(), 'WARNING: no groundplane rotation defined! Ignoring Rotation'
01916 return False
01917
01918 gpp = np.asarray(gpp).T
01919 gpp = self.scan_dataset.ground_plane_rotation * np.matrix(gpp)
01920 z_mean = np.mean(gpp[2,:])
01921
01922 return np.matrix([0.0,0.0, -z_mean]).T
01923
01924
01925
01926 def generate_save_features(self, generate_and_save_all_neightborhood_indices = False, regenerate_neightborhood_indices = False):
01927
01928
01929 training_set_count = 0
01930 self.scan_dataset = self.scans_database.get_dataset(0)
01931 while False != self.scan_dataset:
01932 if self.scan_dataset.is_training_set:
01933 training_set_count += 1
01934 self.scan_dataset = self.scans_database.get_next_dataset()
01935
01936 training_size_per_scan = self.classifier_training_size / training_set_count
01937
01938
01939 self.scan_dataset = self.scans_database.get_dataset(0)
01940
01941 state_exclude_edges = False
01942
01943
01944 while False != self.scan_dataset:
01945 if self.scan_dataset.is_labeled:
01946
01947 self.load_raw_data(self.scan_dataset.id)
01948 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
01949 if state_exclude_edges == False:
01950 self.do_all_point_cloud_mapping(False)
01951 print ut.getTime(), 'generating features with edges', self.scan_dataset.id
01952 else:
01953 self.do_all_point_cloud_mapping(True)
01954 print ut.getTime(), 'generating features without edges', self.scan_dataset.id
01955
01956 dict = self.generate_features(training_size_per_scan,generate_and_save_all_neightborhood_indices,regenerate_neightborhood_indices)
01957
01958 filename = self.get_features_filename(state_exclude_edges)
01959 print ut.getTime(), "Saving: "+filename
01960 ut.save_pickle(dict,filename)
01961 del dict
01962
01963
01964
01965
01966
01967 if state_exclude_edges == False:
01968 state_exclude_edges = True
01969 else:
01970 state_exclude_edges = False
01971
01972 self.scan_dataset = self.scans_database.get_next_dataset()
01973 print ut.getTime(), 'generating features done.'
01974
01975
01976
01977 def generate_features(self,training_size_per_scan = 999999999999,generate_and_save_all_neightborhood_indices = False, regenerate_neightborhood_indices = False):
01978
01979 feature_vector_length = len(self.features.get_indexvector('all'))
01980 nonzero_indices = np.nonzero(self.map_polys)[0]
01981
01982
01983 labels = np.array(self.map_polys)[nonzero_indices]
01984 current_set_size_total = len(nonzero_indices)
01985 all_nonzero_indices = copy.copy(nonzero_indices)
01986
01987
01988 if False == self.scan_dataset.is_test_set and training_size_per_scan < current_set_size_total:
01989 training_size_this_scan = min(training_size_per_scan, current_set_size_total)
01990 t_idx = np.random.randint(0,current_set_size_total,size=training_size_this_scan)
01991 nonzero_indices = nonzero_indices[t_idx]
01992 labels = labels[t_idx]
01993 current_set_size = training_size_this_scan
01994 else:
01995 print 'generate on all points in this scan...'
01996 current_set_size = current_set_size_total
01997
01998 print 'generating',current_set_size,'features for', self.scan_dataset.id, ';fvlength',feature_vector_length
01999
02000 if False == generate_and_save_all_neightborhood_indices:
02001 self.features.prepare(self.features_k_nearest_neighbors, nonzero_indices)
02002 else:
02003 self.features.prepare(self.features_k_nearest_neighbors, all_nonzero_indices, True, regenerate_neightborhood_indices)
02004
02005
02006 train_data = np.array(np.zeros((current_set_size,feature_vector_length)))
02007 train_labels = np.array(np.zeros(current_set_size))
02008
02009 count = 0
02010 for index, label in zip(nonzero_indices,labels):
02011
02012
02013 fv = self.features.get_featurevector(index, count)
02014 for fv_index, fv_value in enumerate(fv):
02015 train_data[count][fv_index] = fv_value
02016 train_labels[count] = label
02017
02018
02019
02020
02021
02022
02023 if count % 1024 == 0:
02024 print ut.getTime(), 'label', label, 'fv', fv
02025 if count % 1024 == 0:
02026 print ut.getTime(), 'generate features:', count, 'of', current_set_size, '(',(float(count)/float(current_set_size)*100.0),'%)'
02027
02028 count += 1
02029
02030 dict = {'features' : train_data, 'labels' : train_labels,
02031 'feature_vector_length' : feature_vector_length,
02032 'set_size': current_set_size,
02033 'point_indices': nonzero_indices}
02034 return dict
02035
02036
02037 def get_features_filename(self,state_exclude_edges = False):
02038 if state_exclude_edges == True:
02039 return self.config.path+'/data/'+self.scan_dataset.id+'_features_without_edges_'+self.feature_type+'_k'+str(self.feature_neighborhood)+'_r'+str(self.feature_radius)+'.pkl'
02040 else:
02041 return self.config.path+'/data/'+self.scan_dataset.id+'_features_'+self.feature_type+'_k'+str(self.feature_neighborhood)+'_r'+str(self.feature_radius)+'.pkl'
02042
02043 def train_and_save_Classifiers(self):
02044 for features, classifier in self.classifiers.iteritems():
02045 print ut.getTime(), 'training:', features
02046
02047 classifier.train()
02048
02049 try:
02050 classifier.save()
02051 except SystemError:
02052 print 'ERROR: saving classifier for',features,'failed!'
02053
02054
02055
02056
02057 def load_Classifiers(self):
02058 for features, classifier in self.classifiers.iteritems():
02059 self.classifiers[features].load()
02060
02061
02062
02063
02064 def test_crossvalidation_on_valid_scans(self, fold=None, update_postprocess=False):
02065 print 'start crossvalidation'
02066
02067
02068
02069
02070 tables_per_fold = 1
02071
02072 testresults_crossvalidation = []
02073 last_fold_first_test_table_id = None
02074
02075 last_table_id = None
02076
02077 last_fold_last_test_table_id = None
02078
02079 current_fold = 0
02080
02081 debug_fold_count = 0
02082 while last_fold_last_test_table_id != last_table_id or last_fold_last_test_table_id == None:
02083 self.scan_dataset = self.scans_database.get_dataset(0)
02084 skip_test_table_count=0
02085 current_fold_test_table_count = 0
02086 state = 'before_test_tables'
02087 print '======================================'
02088 while False != self.scan_dataset:
02089
02090 if self.scan_dataset.is_labeled:
02091 if state == 'before_test_tables':
02092 if (last_fold_first_test_table_id == self.scan_dataset.surface_id or skip_test_table_count > 0) and last_table_id != self.scan_dataset.surface_id:
02093 skip_test_table_count += 1
02094 if last_fold_first_test_table_id != None and skip_test_table_count <= tables_per_fold:
02095 self.scan_dataset.is_test_set = False
02096 self.scan_dataset.is_training_set = True
02097 else:
02098 state = 'test_tables'
02099 last_fold_first_test_table_id = self.scan_dataset.surface_id
02100 continue
02101
02102 elif state == 'test_tables':
02103
02104 if last_table_id != self.scan_dataset.surface_id:
02105 current_fold_test_table_count += 1
02106
02107 if current_fold_test_table_count <= tables_per_fold:
02108 self.scan_dataset.is_test_set = True
02109 self.scan_dataset.is_training_set = False
02110 last_fold_last_test_table_id = self.scan_dataset.surface_id
02111 else:
02112 state = 'after_test_tables'
02113 continue
02114
02115 elif state == 'after_test_tables':
02116 self.scan_dataset.is_test_set = False
02117 self.scan_dataset.is_training_set = True
02118
02119 last_table_id = self.scan_dataset.surface_id
02120
02121 self.scan_dataset = self.scans_database.get_next_dataset()
02122
02123
02124 if fold == None or current_fold == fold:
02125 print ut.getTime(), 'start training, fold', fold
02126
02127 if False==update_postprocess:
02128
02129 self.train_and_save_Classifiers()
02130
02131
02132 testresults_all = self.test_classifiers_on_testset()
02133 else:
02134 testresults_all = self.update_test_postprocessing_on_testset()
02135
02136 testresults_crossvalidation += [testresults_all]
02137 if fold != None:
02138 self.save_testresults_all_crossvalidation_fold(testresults_all, fold)
02139
02140 current_fold = current_fold + 1
02141
02142
02143
02144
02145
02146
02147
02148
02149
02150 if fold == None:
02151 self.save_testresults_crossvalidation(testresults_crossvalidation)
02152
02153 return testresults_crossvalidation
02154
02155 def collect_and_save_testresults_crossvalidation(self, folds):
02156 testresults_crossvalidation = []
02157 for i in range(folds):
02158 try:
02159 testresults_all = self.load_testresults_all_crossvalidation_fold(i)
02160 testresults_crossvalidation += [testresults_all]
02161 except IOError:
02162 print 'ERROR: no data for fold',i,'found!'
02163
02164
02165
02166 self.save_testresults_crossvalidation(testresults_crossvalidation)
02167
02168 def save_testresults_all_crossvalidation_fold(self, testresults_all, fold):
02169
02170 filename = self.config.path+'/'+'testresults_all_crossvalidation_fold_'+str(fold)+'.pkl'
02171 print ut.getTime(), "Saving: "+filename
02172 ut.save_pickle(testresults_all,filename)
02173
02174 def load_testresults_all_crossvalidation_fold(self, fold):
02175 filename = self.config.path+'/'+'testresults_all_crossvalidation_fold_'+str(fold)+'.pkl'
02176 print ut.getTime(), "loading: "+filename
02177 return ut.load_pickle(filename)
02178
02179
02180 def save_testresults_crossvalidation(self, testresults_crossvalidation):
02181
02182 filename = self.config.path+'/'+'testresults_crossvalidation.pkl'
02183 print ut.getTime(), "Saving: "+filename
02184 ut.save_pickle(testresults_crossvalidation,filename)
02185
02186 def load_testresults_crossvalidation(self):
02187 filename = self.config.path+'/'+'testresults_crossvalidation.pkl'
02188
02189 return ut.load_pickle(filename)
02190
02191
02192
02193 def load_classifier_and_test_on_dataset(self, features, feature_data):
02194
02195
02196
02197
02198 if features == 'all_post':
02199 self.classifiers['all'].load()
02200 self.classifiers['all'].test(feature_data)
02201
02202
02203 labels, testresults = self.classifiers['all'].test_postprocess()
02204 self.save_classifier_labels(labels, testresults, 'all_post')
02205 else:
02206 self.classifiers[features].load()
02207 labels, testresults = self.classifiers[features].test(feature_data)
02208 self.save_classifier_labels(labels, testresults, features)
02209 return labels, testresults
02210
02211
02212 def test_classifiers_on_testset(self):
02213
02214 testresults_all = {}
02215
02216
02217
02218
02219 self.scan_dataset = self.scans_database.get_dataset(0)
02220
02221 for features, classifier in self.classifiers.iteritems():
02222 testresults_all[features] = {}
02223 testresults_all['all_post'] = {}
02224
02225 while False != self.scan_dataset:
02226 if self.scan_dataset.is_test_set:
02227
02228 self.load_data(self.scan_dataset.id, False)
02229
02230 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
02231 self.do_all_point_cloud_mapping()
02232
02233 for features, classifier in self.classifiers.iteritems():
02234 print ut.getTime(), 'start testing on ',features,'id',self.scan_dataset.id
02235
02236 labels, testresults = classifier.test()
02237
02238 self.save_classifier_labels(labels, testresults, features)
02239 testresults_all[features][self.scan_dataset.id] = testresults
02240
02241
02242 features = 'all_post'
02243 self.classifiers['all'].features = 'all_post'
02244 labels, testresults = self.classifiers['all'].test_postprocess()
02245 self.save_classifier_labels(labels, testresults, features)
02246 self.classifiers['all'].features = 'all'
02247 testresults_all[features][self.scan_dataset.id] = testresults
02248
02249
02250 self.scan_dataset = self.scans_database.get_next_dataset()
02251
02252 self.save_testresults_all(testresults_all)
02253 return testresults_all
02254
02255 def update_test_postprocessing_on_testset(self):
02256
02257 testresults_all = {}
02258
02259 self.scan_dataset = self.scans_database.get_dataset(0)
02260
02261 for features, classifier in self.classifiers.iteritems():
02262 testresults_all[features] = {}
02263 testresults_all['all_post'] = {}
02264
02265 while False != self.scan_dataset:
02266 if self.scan_dataset.is_test_set:
02267 print '0'
02268 self.load_data(self.scan_dataset.id, False)
02269
02270 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
02271 self.do_all_point_cloud_mapping()
02272 print '1'
02273
02274
02275 for features, classifier in self.classifiers.iteritems():
02276 print ut.getTime(), 'start testing on ',features,'id',self.scan_dataset.id
02277 self.classifiers[features].test_labels = self.load_Classifier_labels(features)
02278 self.classifiers[features].test_feature_dict = ut.load_pickle(self.get_features_filename())
02279 testresults = classifier.test_results(self.classifiers[features].test_feature_dict,self.classifiers[features].test_labels)
02280 testresults_all[features][self.scan_dataset.id] = testresults
02281
02282
02283 features = 'all_post'
02284 print 'perform postprocessing on ',self.scan_dataset.id
02285 self.classifiers['all'].features = 'all_post'
02286 labels, testresults = self.classifiers['all'].test_postprocess()
02287 self.save_classifier_labels(labels, testresults, features)
02288 self.classifiers['all'].features = 'all'
02289 testresults_all[features][self.scan_dataset.id] = testresults
02290
02291
02292 self.scan_dataset = self.scans_database.get_next_dataset()
02293
02294 self.save_testresults_all(testresults_all)
02295 return testresults_all
02296
02297
02298 def save_testresults_all(self, testresults_all):
02299
02300 filename = self.config.path+'/'+'testresults_all.pkl'
02301 print ut.getTime(), "Saving: "+filename
02302 ut.save_pickle(testresults_all,filename)
02303
02304 def load_testresults_all(self):
02305 filename = self.config.path+'/'+'testresults_all.pkl'
02306 print ut.getTime(), "loading: "+filename
02307 return ut.load_pickle(filename)
02308
02309 def print_testresults_all_latex(self, testresults_all):
02310
02311 testresults_total = {}
02312 for features, results in testresults_all.iteritems():
02313 testresults_total[features] = {'count_surface' : 0, 'count_clutter' : 0, 'count_surface_correct' : 0, 'count_clutter_correct' : 0}
02314
02315 testresults_all_reordered = {}
02316 for features, results in testresults_all.iteritems():
02317 for id, testresults in results.iteritems():
02318 testresults_all_reordered[id] = {}
02319 break
02320
02321
02322
02323 for features, results in testresults_all.iteritems():
02324 for id, testresults in results.iteritems():
02325 testresults_all_reordered[id][features] = testresults
02326
02327
02328
02329 ordered_features = ['range','color','all','all_post','baseline']
02330
02331
02332 ids = testresults_all_reordered.keys()
02333 ids.sort()
02334 for id in ids:
02335 results = testresults_all_reordered[id]
02336
02337 first = True
02338 for features in ordered_features:
02339 testresults = results[features]
02340 if first == True:
02341 print '\\multirow{5}{*}{',id.replace('_','-'),'}'
02342 first = False
02343 count_surface, count_clutter, count_surface_correct, count_clutter_correct, percent_surface_correct, percent_clutter_correct = testresults
02344
02345 testresults_total[features]['count_surface'] += count_surface
02346 testresults_total[features]['count_clutter'] += count_clutter
02347 testresults_total[features]['count_surface_correct'] += count_surface_correct
02348 testresults_total[features]['count_clutter_correct'] += count_clutter_correct
02349 if features != 'baseline':
02350 name = features
02351 else:
02352 name = 'baseline algo'
02353 if features == 'all_post':
02354 print '&\\bf{',name.replace('_','-'),'}&\\bf{',count_surface,'}&\\bf{',count_surface_correct,'}&\\bf{',count_clutter, \
02355 '}&\\bf{',count_clutter_correct,'}&\\bf{%(ps)02.2f}&\\bf{%(pc)02.2f}\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02356 else:
02357 print '&',name.replace('_','-'),'&',count_surface,'&',count_surface_correct,'&',count_clutter, \
02358 '&',count_clutter_correct,'&%(ps)02.2f&%(pc)02.2f\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02359 if features != 'baseline':
02360 print '\\cline{2-8}'
02361 else:
02362 print '\\hline\\hline'
02363
02364 print '\\hline\\hline'
02365
02366 first = True
02367 for features in ordered_features:
02368 total_counts = testresults_total[features]
02369
02370 count_surface = total_counts['count_surface']
02371 count_clutter = total_counts['count_clutter']
02372 count_surface_correct = total_counts['count_surface_correct']
02373 count_clutter_correct = total_counts['count_clutter_correct']
02374 percent_surface_correct = float(count_surface_correct)/float(count_surface) * 100
02375 percent_clutter_correct = float(count_clutter_correct)/float(count_clutter) * 100
02376 if first == True:
02377 print '\multirow{5}{*}{total}'
02378 first = False
02379 if features != 'baseline':
02380 name = features
02381 else:
02382 name = 'baseline algo'
02383 if features == 'all_post':
02384 print '&\\bf{',name.replace('_','-'),'}&\\bf{',count_surface,'}&\\bf{',count_surface_correct,'}&\\bf{',count_clutter, \
02385 '}&\\bf{',count_clutter_correct,'}&\\bf{%(ps)02.2f}&\\bf{%(pc)02.2f}\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02386 else:
02387 print '&',name.replace('_','-'),'&',count_surface,'&',count_surface_correct,'&',count_clutter, \
02388 '&',count_clutter_correct,'&%(ps)02.2f&%(pc)02.2f\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02389 if features != 'baseline':
02390 print '\\cline{2-8}'
02391 else:
02392 print '\\hline\\hline'
02393
02394 return testresults_total
02395
02396 def print_testrestults_crossvalidation_latex(self, testresults_crossvalidation):
02397
02398 ordered_features = ['range','color','all','all_post','baseline']
02399 testresults_total_crossvalidation = {}
02400 for features in ordered_features:
02401 testresults_total_crossvalidation[features] = {'count_surface' : 0, 'count_clutter' : 0, 'count_surface_correct' : 0, 'count_clutter_correct' : 0}
02402
02403
02404 latex_clearpage = False
02405
02406 for testresults_all in testresults_crossvalidation:
02407 print r"""\begin{table*}[p]
02408 \centering
02409
02410 \caption{Classification results }
02411 \label{tab:results}
02412 %\resizebox{1.80\columnwidth}{!} {
02413 \begin{tabular}{|c|c||c|c|c|c||c|c|}
02414 \hline
02415 \tbthc{Dataset}&\tbth{Features /}&\tbthc{\#points surface}&\tbth{\#points surface}&\tbthc{\#points clutter}&\tbth{\#points clutter}&\tbth{\% surface}&\tbth{\% clutter}
02416 \\ &\tbth{Algorithm}&&\tbth{correct}&&\tbth{correct}&\tbth{correct}&\tbth{correct}
02417 \\\hline"""
02418 testresults_total = self.print_testresults_all_latex(testresults_all)
02419 print r"""\end{tabular}
02420 %}
02421 \end{table*}"""
02422 if latex_clearpage:
02423 print r"""\clearpage"""
02424 latex_clearpage = False
02425 else:
02426 latex_clearpage = True
02427 for features in ordered_features:
02428 testresults_total_crossvalidation[features]['count_surface'] += testresults_total[features]['count_surface']
02429 testresults_total_crossvalidation[features]['count_clutter'] += testresults_total[features]['count_clutter']
02430 testresults_total_crossvalidation[features]['count_surface_correct'] += testresults_total[features]['count_surface_correct']
02431 testresults_total_crossvalidation[features]['count_clutter_correct'] += testresults_total[features]['count_clutter_correct']
02432
02433
02434
02435 print r"""\begin{table*}[p]
02436 \centering
02437 \caption{Classification results crossvalidation total}
02438 \label{tab:results}
02439 %\resizebox{1.80\columnwidth}{!} {
02440 \begin{tabular}{|c|c||c|c|c|c||c|c|}
02441 \hline
02442 \tbthc{Dataset}&\tbth{Features /}&\tbthc{\#points surface}&\tbth{\#points surface}&\tbthc{\#points clutter}&\tbth{\#points clutter}&\tbth{\% surface}&\tbth{\% clutter}
02443 \\ &\tbth{Algorithm}&&\tbth{correct}&&\tbth{correct}&\tbth{correct}&\tbth{correct}
02444 \\\hline"""
02445 first = True
02446 for features in ordered_features:
02447 count_surface = testresults_total_crossvalidation[features]['count_surface']
02448 count_clutter = testresults_total_crossvalidation[features]['count_clutter']
02449 count_surface_correct = testresults_total_crossvalidation[features]['count_surface_correct']
02450 count_clutter_correct = testresults_total_crossvalidation[features]['count_clutter_correct']
02451 percent_surface_correct = float(count_surface_correct)/float(count_surface) * 100
02452 percent_clutter_correct = float(count_clutter_correct)/float(count_clutter) * 100
02453
02454 if first == True:
02455 print '\multirow{5}{*}{crossvalidation}'
02456 first = False
02457 if features != 'baseline':
02458 name = features
02459 else:
02460 name = 'baseline algo'
02461 if features == 'all_post':
02462 print '&\\bf{',name.replace('_','-'),'}&\\bf{',count_surface,'}&\\bf{',count_surface_correct,'}&\\bf{',count_clutter, \
02463 '}&\\bf{',count_clutter_correct,'}&\\bf{%(ps)02.2f}&\\bf{%(pc)02.2f}\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02464 else:
02465 print '&',name.replace('_','-'),'&',count_surface,'&',count_surface_correct,'&',count_clutter, \
02466 '&',count_clutter_correct,'&%(ps)02.2f&%(pc)02.2f\\\\' % {'ps':percent_surface_correct, 'pc':percent_clutter_correct}
02467 if features != 'baseline':
02468 print '\\cline{2-8}'
02469 else:
02470 print '\\hline\\hline'
02471
02472 print r"""\end{tabular}
02473 %}
02474 \end{table*}"""
02475
02476
02477
02478
02479
02480 def generate_and_save_visualizations(self, id):
02481 feature_list = self.classifiers.keys()
02482 feature_list += ['labels', 'all_post']
02483
02484 self.load_data(id, False)
02485 self.process_raw_data()
02486
02487 for features in feature_list:
02488 self.img_labels = self.draw_mapped_labels_into_image(features)
02489 self.save_labels_image(features)
02490 self.save_3d(features, True)
02491
02492 def generate_and_save_visualizations_all(self):
02493
02494 self.scan_dataset = self.scans_database.get_dataset(0)
02495
02496 while False != self.scan_dataset:
02497 if self.scan_dataset.is_labeled:
02498 self.generate_and_save_visualizations(id)
02499
02500
02501 self.scan_dataset = self.scans_database.get_next_dataset()
02502
02503
02504 def generate_and_save_visualizations_height(self):
02505 self.scan_dataset = self.scans_database.get_dataset(0)
02506
02507 while False != self.scan_dataset:
02508 if self.scan_dataset.is_test_set:
02509 self.load_data(self.scan_dataset.id, False)
02510 self.process_raw_data()
02511 self.save_3d('height', True)
02512
02513
02514 self.scan_dataset = self.scans_database.get_next_dataset()
02515
02516 def test_Classifiers(self):
02517
02518 (self.pts3d, self.scan_indices, self.intensities) = self.create_pointcloud(self.laserscans)
02519 self.do_all_point_cloud_mapping()
02520 for features, classifier in self.classifiers.iteritems():
02521
02522 labels, testresults = classifier.test()
02523 self.save_classifier_labels(labels, testresults, features)
02524
02525
02526 features = 'all_post'
02527 labels, testresults = self.classifiers['all'].test_postprocess()
02528 self.save_classifier_labels(labels, testresults, features)
02529
02530 def get_point_label(self, index):
02531 return self.map_polys[index]
02532
02533
02534 def save_classifier_labels(self, labels, testresults, features):
02535
02536 dict = {'labels' : labels, 'testresults' : testresults}
02537 filename = self.get_Classifier_labels_filename(features)
02538 print ut.getTime(), "Saving: "+filename
02539 ut.save_pickle(dict,filename)
02540 print '...saving labels: len:', len(labels), 'testresults:',testresults
02541
02542 def load_Classifier_labels(self, features):
02543 filename = self.get_Classifier_labels_filename(features)
02544 print 'loading', filename
02545 dict = ut.load_pickle(filename)
02546 print '...loading labels: len:', len(dict['labels']), 'testresults:',dict['testresults']
02547 return dict['labels']
02548
02549 def get_Classifier_labels_filename(self, features):
02550 return self.config.path+'/data/'+self.scan_dataset.id+'_labels_Classifier_'+features+'_'+self.feature_type+'_k'+str(self.feature_neighborhood)+'_r'+str(self.feature_radius)+'.pkl'
02551
02552
02553
02554 def save_all_labeled_pointclouds(self):
02555
02556 self.scan_dataset = self.scans_database.get_dataset(0)
02557 while False != self.scan_dataset:
02558 if self.scan_dataset.is_labeled:
02559 self.load_data(self.scan_dataset.id, False)
02560 print "printing label #", self.scan_dataset.id
02561 self.process_raw_data()
02562 print "successfully processed raw data"
02563
02564 dict = {'points': self.pts3d,
02565 'intensities': self.intensities,
02566 'surface_height': self.scan_dataset.surface_height}
02567
02568 filename = self.config.path+'/data/labeled_pointclouds/pointcloud_'+self.scan_dataset.id+'.pkl'
02569 print 'saving', filename
02570 ut.save_pickle(dict, filename)
02571
02572 self.scan_dataset = self.scans_database.get_next_dataset()
02573
02574