00001 #! /usr/bin/env python 00002 # 00003 # Copyright (c) 2010, Georgia Tech Research Corporation 00004 # All rights reserved. 00005 # 00006 # Redistribution and use in source and binary forms, with or without 00007 # modification, are permitted provided that the following conditions are met: 00008 # * Redistributions of source code must retain the above copyright 00009 # notice, this list of conditions and the following disclaimer. 00010 # * Redistributions in binary form must reproduce the above copyright 00011 # notice, this list of conditions and the following disclaimer in the 00012 # documentation and/or other materials provided with the distribution. 00013 # * Neither the name of the Georgia Tech Research Corporation nor the 00014 # names of its contributors may be used to endorse or promote products 00015 # derived from this software without specific prior written permission. 00016 # 00017 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND 00018 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 00019 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 00020 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT, 00021 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 00022 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 00023 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00024 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 00025 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 00026 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00027 # 00028 00029 # \author Martin Schuster (Healthcare Robotics Lab, Georgia Tech.) 00030 00031 00032 import scanner 00033 #import processor 00034 import configuration 00035 #import hrl_lib.util as ut 00036 # 00037 #import roslib; roslib.load_manifest('laser_camera_segmentation') 00038 #import hrl_hokuyo.hokuyo_scan as hs 00039 #import hrl_hokuyo.hokuyo_scan as hokuyo_scan 00040 00041 00042 #import opencv as cv 00043 #from opencv import highgui 00044 # 00045 #import pylab 00046 #from matplotlib.patches import Rectangle 00047 # 00048 #my_svm = cv.CvSVM() 00049 ##print CvSVM::train(const CvMat* _train_data, const CvMat* _responses, const CvMat* _var_idx=0, const CvMat* _sample_idx=0, CvSVMParams _params=CvSVMParams()) 00050 #train_data = cv.cvCreateMat(10,2,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type) 00051 #train_data[0][0] = 1 00052 #train_data[1][0] = 2 00053 #train_data[2][0] = 3 00054 #train_data[3][0] = 4 00055 #train_data[4][0] = 5 00056 #train_data[5][0] = 6 00057 #train_data[6][0] = 7 00058 #train_data[7][0] = 8 00059 #train_data[8][0] = 9 00060 #train_data[9][0] = 10 00061 #train_data[0][1] = 1 00062 #train_data[1][1] = 2 00063 #train_data[2][1] = 3 00064 #train_data[3][1] = 4 00065 #train_data[4][1] = 5 00066 #train_data[5][1] = 6 00067 #train_data[6][1] = 7 00068 #train_data[7][1] = 8 00069 #train_data[8][1] = 9 00070 #train_data[9][1] = 10 00071 # 00072 #for i in range(10): 00073 # print train_data[i][0] 00074 # print train_data[i][1] 00075 # print '###' 00076 # 00077 #responses = cv.cvCreateMat(10,1,cv.CV_32FC1) 00078 #responses[0] = 1 00079 #responses[1] = 1 00080 #responses[2] = 1 00081 #responses[3] = 1 00082 #responses[4] = 1 00083 #responses[5] = 0 00084 #responses[6] = 0 00085 #responses[7] = 0 00086 #responses[8] = 0 00087 #responses[9] = 0 00088 # 00089 # 00090 #params = cv.CvSVMParams() 00091 #params.svm_type = cv.CvSVM.C_SVC 00092 ## Type of SVM, one of the following types: 00093 ## CvSVM::C_SVC - n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers. 00094 ## CvSVM::NU_SVC - n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C. 00095 ## CvSVM::ONE_CLASS - one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space. 00096 ## CvSVM::EPS_SVR - regression. The distance between feature vectors from the training set and the fitting hyperplane must be less than p. For outliers the penalty multiplier C is used. 00097 ## CvSVM::NU_SVR - regression; nu is used instead of p. 00098 #params.kernel_type = cv.CvSVM.SIGMOID 00099 ##CvSVM::LINEAR - no mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x*y == (x,y) 00100 ##CvSVM::POLY - polynomial kernel: d(x,y) = (gamma*(x*y)+coef0)degree 00101 ##CvSVM::RBF - radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|2) 00102 ##CvSVM::SIGMOID - sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(x*y)+coef0) 00103 # 00104 #print my_svm.train_auto(train_data, responses,None,None,params) 00105 #print my_svm.get_params() 00106 #test = cv.cvCreateMat(1,2,cv.CV_32FC1) 00107 #test[0] = 6 00108 #test[1] = 8.7878 00109 #print my_svm.predict(test) 00110 # 00111 #import matplotlib.pyplot as plt 00112 #import matplotlib.image as mpimg 00113 #import numpy as np 00114 # 00115 #n = 100 00116 #m = 100 00117 #results = np.array(-1*np.ones((n,m))) 00118 # 00119 #for i in range(n): 00120 # for j in range(m): 00121 # test[0]=i 00122 # test[1]=j 00123 # results[i][j] = my_svm.predict(test) 00124 # #print str(i) + ' ' + str(j) + ' ' + ' -> ' + str(results[i][j]) 00125 # 00126 ##print results 00127 # 00128 #imgplot = plt.imshow(results, cmap=pylab.cm.gray, interpolation='nearest') 00129 ##imgplot = plt.imshow(np.array(train_data).transpose()) 00130 ##imgscatter = plt.scatter(np.array(train_data)[:,0], np.array(train_data)[:,1]) 00131 #plt.show() 00132 # 00133 # 00134 # 00135 ##pylab.ion() #interactive 00136 ##pylab.figure(figsize=(8,4)) 00137 ##pylab.hold(True) 00138 ##pylab.subplot(121) 00139 ##pylab.title('test') 00140 ##pylab.imshow(responses, cmap=pylab.cm.gray, interpolation='nearest') 00141 ## 00142 ##pylab.draw() 00143 # 00144 # 00145 # 00146 ##cfg = configuration.configuration('/home/martin/robot1_data/usr/martin/laser_camera_segmentation/labeling') 00147 ###sc = scanner.scanner(cfg) 00148 ##pc = processor.processor(cfg) 00149 ## 00150 ###name = ut.formatted_time() 00151 ###sc.capture_and_save(name) 00152 ###pc.load_raw_data(name) 00153 ## 00154 ##id = '2009Sep14_095609' 00155 ##pc.load_raw_data(id) 00156 ##pc.load_metadata(id) 00157 ##print pc.scan_dataset.id 00158 ##print pc.scan_dataset.polygons 00159 ##pc.create_polygon_images() 00160 ##pc.process_raw_data() 00161 ###pc.save_mapped_image(name) 00162 ##pc.display_all_data() 00163 ## 00164 ##print pc.scan_dataset.polygons[0].cvImage[400] 00165 # 00166 # 00167 ##! /usr/bin/env python 00168 # 00169 # 00170 # 00171 cfg = configuration.configuration('/home/martin/robot1_data/usr/martin/laser_camera_segmentation/calib') 00172 cfg.webcam_id = 0 00173 sc = scanner.scanner(cfg) 00174 ##pc = processor.processor(cfg) 00175 ## 00176 ###name = ut.formatted_time() 00177 sc.capture_and_save('calib') 00178 ## 00179 ##pc.load_raw_data('2009Oct17_114217') 00180 ##pc.process_raw_data() 00181 ##pc.display_all_data() 00182 # 00183 # 00184 00185 print 'done' 00186 00187 00188 00189 00190