recognize_3d_result_plotter.py
Go to the documentation of this file.
00001 import roslib; roslib.load_manifest('hai_sandbox')
00002 import hrl_lib.util as ut
00003 import pylab as pb
00004 import numpy as np
00005 import pdb
00006 
00007 def conf_to_percent(rec):
00008     conf = rec['mat']
00009     conf[0,:] = conf[0,:] / rec['neg']
00010     conf[1,:] = conf[1,:] / rec['pos']
00011     return conf[0,0], conf[1,1]
00012 
00013 def plot_classifier_performance(fname, pname, plot_all):
00014     results = ut.load_pickle(fname)
00015     #pdb.set_trace()
00016     #results['train_set_statistics']    # [ {'conf', 'size'}, {}...]
00017     #results['current_scan_statistics'] # [ {'conf'} {}...]
00018     #results['perf_on_other_scans']     # [[{'name', 'conf'}, {}...] [{} {}...]...]
00019     #where conf is {'mat', 'neg', 'pos'}
00020     
00021     scores = {}
00022     for rlist in results['perf_on_other_scans']:
00023         for d in rlist:
00024             if scores.has_key(d['name']):
00025                 scores[d['name']].append(conf_to_percent(d['conf']))
00026             else:
00027                 scores[d['name']] = [conf_to_percent(d['conf'])]
00028     for k in scores.keys():
00029         scores[k] = zip(*scores[k])
00030     
00031     if results.has_key('train_set_statistics'):
00032         train_neg, train_pos = zip(*[conf_to_percent(d['conf']) for d in results['train_set_statistics']])
00033     else:
00034         train_neg = train_pos = None
00035 
00036     if results.has_key('current_scan_statistics'):
00037         pdb.set_trace()
00038         test_neg, test_pos = zip(*[conf_to_percent(d['conf']) for d in results['current_scan_statistics']])
00039     else:
00040         test_neg = test_pos = None
00041 
00042     n_iterations = np.array(range(len(results['train_set_statistics'])))
00043     
00044     #======================================================================
00045     pb.figure(1)
00046     if results.has_key('train_set_statistics'):
00047         pb.plot(n_iterations, train_neg, label='train ' + pname)
00048     if test_neg != None:
00049         pb.plot(n_iterations, test_neg, label='test ' + pname)
00050     if plot_all:
00051         for i, k in enumerate(scores.keys()):
00052             pb.plot(n_iterations, scores[k][0], '--', label=str(i))
00053     #if results.has_key('current_scan_statistics'):
00054     if results.has_key('converged_at_iter'):
00055         pb.plot([results['converged_at_iter'], results['converged_at_iter']], [0., 1.], 'r')
00056 
00057     pb.title('True negatives')
00058     pb.legend()
00059     
00060     #======================================================================
00061     pb.figure(2)
00062     if train_pos != None:
00063         pb.plot(n_iterations, train_pos, label='train ' + pname)
00064     if test_pos != None:
00065         pb.plot(n_iterations, test_pos, label='test ' + pname)
00066     #if results.has_key('current_scan_statistics'):
00067 
00068     print 'mapping from dataset to id'
00069     if plot_all:
00070         for i, k in enumerate(scores.keys()):
00071             pb.plot(n_iterations, scores[k][1], '--', label=str(i))
00072             print 'ID', i, 'dataset', k
00073 
00074     if results.has_key('converged_at_iter'):
00075         pb.plot([results['converged_at_iter'], results['converged_at_iter']], [0., 1.], 'r')
00076 
00077     pb.title('True positives')
00078     pb.legend()
00079 
00080 def plot_features_perf(fnames, pnames):
00081 
00082     all_scores = {}
00083     dset_names = None
00084     for fname, pname in zip(fnames, pnames):
00085         results = ut.load_pickle(fname)
00086         train_neg, train_pos = zip(*[conf_to_percent(d['conf']) for d in results['train_set_statistics']])
00087         scores = {}
00088         for rlist in results['perf_on_other_scans']:
00089             for d in rlist:
00090                 if scores.has_key(d['name']):
00091                     scores[d['name']].append(conf_to_percent(d['conf']))
00092                 else:
00093                     scores[d['name']] = [conf_to_percent(d['conf'])]
00094         for k in scores.keys():
00095             scores[k] = zip(*scores[k])
00096         scores['train'] = [(train_neg), (train_pos)]
00097         all_scores[pname] = scores
00098         if dset_names == None:
00099             dset_names = scores.keys()
00100 
00101 
00102     neg_by_dset = {}
00103     for n in dset_names:
00104         posn = []
00105         for pname in pnames:
00106             posn.append(all_scores[pname][n][0][0])
00107         neg_by_dset[n] = posn
00108 
00109     pos_by_dset = {}
00110     for n in dset_names:
00111         posn = []
00112         for pname in pnames:
00113             posn.append(all_scores[pname][n][1][0])
00114         pos_by_dset[n] = posn
00115 
00116     ind = np.arange(len(pnames))
00117     width = 0.05
00118 
00119 
00120     fig = pb.figure(1)
00121     ax = fig.add_subplot(111)
00122     rects=[]
00123     for i, name in enumerate(dset_names):
00124         rect = ax.bar(ind+(width*i), pos_by_dset[name], width, color=tuple(np.random.rand(3).tolist()))
00125         rects.append(rect)
00126     ax.set_ylabel('accuracy')
00127     ax.set_title('True positives by dataset and features used')
00128     ax.set_xticks(ind+width)
00129     ax.set_xticklabels(tuple(pnames))
00130 
00131     fig = pb.figure(2)
00132     ax = fig.add_subplot(111)
00133     rects=[]
00134     for i, name in enumerate(dset_names):
00135         rect = ax.bar(ind+(width*i), neg_by_dset[name], width, color=tuple(np.random.rand(3).tolist()))
00136         rects.append(rect)
00137     ax.set_ylabel('accuracy')
00138     ax.set_title('True negatives by dataset and features used')
00139     ax.set_xticks(ind+width)
00140     ax.set_xticklabels(tuple(pnames))
00141 
00142 if __name__ == '__main__':
00143     import sys
00144     import optparse
00145     p = optparse.OptionParser()
00146     p.add_option("-m", "--mode", action="store", type="string")
00147     p.add_option("-f", "--file", action="append", type="string")
00148     p.add_option('-n', '--name', action="append", type="string")
00149     opt, args = p.parse_args()
00150 
00151     if opt.mode == 'active':
00152         if len(opt.file) <= 1:
00153             plot_all = True
00154         else:
00155             plot_all = False
00156 
00157         for i in range(len(opt.file)):
00158             plot_classifier_performance(opt.file[i], opt.name[i], plot_all)
00159         pb.show()
00160 
00161     if opt.mode == 'features':
00162         plot_features_perf(opt.file, opt.name)
00163         pb.show()
00164 
00165 #For comparing between different algorithms, don't need to plot performance on all scans just
00166 
00167 
00168 
00169 
00170 
00171 
00172 
00173 
00174 


hai_sandbox
Author(s): Hai Nguyen
autogenerated on Wed Nov 27 2013 11:46:56