Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030 import label_object, scan_dataset
00031 import util as ut
00032 import shutil
00033
00034 class scans_database(object):
00035 '''
00036 classdocs
00037 '''
00038
00039
00040 def __init__(self):
00041 '''
00042 Constructor
00043 '''
00044 self.datasets = []
00045 self.current_index = 0
00046
00047
00048 def load(self, path, filename):
00049 self.filename = filename
00050 self.path = path
00051
00052 dict = ut.load_pickle(self.path+'/'+self.filename)
00053
00054
00055
00056
00057 self.datasets = dict['datasets']
00058
00059 def save(self):
00060 dict = {'datasets': self.datasets,'version': 0.1}
00061
00062
00063 database_filename = self.path+'/'+self.filename
00064 backup_filename = self.path+'/'+self.filename+'_backup_'+ut.formatted_time()
00065 print 'Backing up old database to ' + backup_filename
00066 shutil.copy(database_filename, backup_filename)
00067
00068 print "Saving: "+database_filename
00069 ut.save_pickle(dict,database_filename)
00070
00071
00072 def get_path(self):
00073 return self.path
00074
00075 def get_dataset(self, index):
00076 self.current_index = index
00077 return self.datasets[index]
00078
00079 def get_dataset_by_id(self, id):
00080
00081
00082 for dataset in self.datasets:
00083 if dataset.id == id:
00084 return dataset
00085 return False
00086
00087 def set_internal_pointer_to_dataset(self, id):
00088 self.current_index = 0
00089 for dataset in self.datasets:
00090 if dataset.id == id:
00091 return True
00092 self.current_index += 1
00093 return False
00094
00095 def get_next_dataset(self):
00096 if self.current_index < len(self.datasets) - 1:
00097 self.current_index = self.current_index + 1
00098 return self.datasets[self.current_index]
00099 else:
00100 return False
00101
00102 def get_prev_dataset(self):
00103 if self.current_index > 0:
00104 self.current_index = self.current_index - 1
00105 return self.datasets[self.current_index]
00106 else:
00107 return False
00108
00109 def get_first_dataset(self):
00110 if len(self.datasets) > 0:
00111 self.current_index = 0
00112 return self.datasets[self.current_index]
00113 else:
00114 return False
00115
00116 def get_last_dataset(self):
00117 if len(self.datasets) > 0:
00118 self.current_index = len(self.datasets) - 1
00119 return self.datasets[self.current_index]
00120 else:
00121 return False
00122
00123
00124 def get_count(self):
00125 return len(self.datasets)
00126
00127 def add_dataset(self, dataset):
00128 self.datasets.append(dataset)
00129
00130 def delete_current_dataset(self):
00131 del self.datasets[self.current_index]
00132 dataset = self.get_prev_dataset()
00133 if False != dataset:
00134 return dataset
00135 else:
00136 dataset = self.get_next_dataset()
00137 return dataset
00138
00139
00140 def add_attribute_to_every_dataset(self, name):
00141 for dataset in self.datasets:
00142 dataset.dict[name]=''
00143
00144
clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15