Classes | |
class | boosted_tree_classifier |
Functions | |
def | load |
def | postprocess |
def | release_train_datastructures |
def | save |
def | test |
def | test_postprocess |
def | train |
Variables | |
cv_classifier | |
test_feature_dict | |
test_labels | |
tuple | type_mask = cv.cvCreateMat(1, feature_vector_length+1, cv.CV_8UC1) |
subsample from the features, NOT USED/NOT WORKING? else: print ut.getTime(), 'more than',max_traning_size,'features, sample from them...' select 2040000 features: all_data = [] all_labels = [] for dict in data: for index in range(dict['set_size']): if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER: fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)] all_data += [fv] all_labels += [dict['labels'][index]] |
def boosted_tree_classifier.load | ( | self | ) |
Definition at line 313 of file boosted_tree_classifier.py.
def boosted_tree_classifier.postprocess | ( | self, | |
labels | |||
) |
Definition at line 277 of file boosted_tree_classifier.py.
def boosted_tree_classifier.release_train_datastructures | ( | self, | |
train_datastructures | |||
) |
Definition at line 215 of file boosted_tree_classifier.py.
def boosted_tree_classifier.save | ( | self | ) |
Definition at line 304 of file boosted_tree_classifier.py.
def boosted_tree_classifier.test | ( | self, | |
feature_data = None |
|||
) |
Definition at line 223 of file boosted_tree_classifier.py.
def boosted_tree_classifier.test_postprocess | ( | self | ) |
Definition at line 273 of file boosted_tree_classifier.py.
def boosted_tree_classifier.train | ( | self | ) |
Definition at line 194 of file boosted_tree_classifier.py.
Definition at line 194 of file boosted_tree_classifier.py.
Definition at line 223 of file boosted_tree_classifier.py.
Definition at line 223 of file boosted_tree_classifier.py.
tuple boosted_tree_classifier::type_mask = cv.cvCreateMat(1, feature_vector_length+1, cv.CV_8UC1) |
subsample from the features, NOT USED/NOT WORKING? else: print ut.getTime(), 'more than',max_traning_size,'features, sample from them...' select 2040000 features: all_data = [] all_labels = [] for dict in data: for index in range(dict['set_size']): if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER: fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)] all_data += [fv] all_labels += [dict['labels'][index]]
current_training_set_index = current_training_set_index + 1 if current_training_set_index % 16384 == 0: print ut.getTime(), 'reading features:', current_training_set_index, 'of', training_set_size, '(',(float(current_training_set_index)/float(training_set_size)*100.0),'%)'
del data indices = np.array(random.sample(xrange(len(all_labels)),max_traning_size)) all_data = np.asarray(all_data) all_labels = np.asarray(all_labels)
all_data = all_data[indices] all_labels = all_labels[indices]
train_data = cv.cvCreateMat(max_traning_size,feature_vector_length,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type) train_labels = cv.cvCreateMat(max_traning_size,1,cv.CV_32FC1)
for index in range(max_traning_size): for fv_index, fv_value in enumerate(all_data[index]): train_data[index][fv_index] = fv_value train_labels[index] = all_labels[index] if index % 16384 == 0: print ut.getTime(), 'setting features:', (float(index)/float(max_traning_size))
Definition at line 186 of file boosted_tree_classifier.py.