20 import matplotlib.pyplot
as plt
21 from matplotlib.ticker
import FuncFormatter, MaxNLocator
23 CSV_HEADER =
"start,goal,computation_time,path_length,curv_discont".split(
",")
26 p =
"%.1f" % (100 * x)
30 with open(filename,
'r')
as f:
32 lines = [line.strip().split(
",")
for line
in f.readlines()]
44 def load(self, fpath, fname):
45 if not os.path.exists(fpath + fname):
47 if "CC00_Dubins" in fname:
48 self.
id =
"CC$^{00}$-Dubins"
49 elif "CC0pm_Dubins" in fname:
50 self.
id =
"CC$^{0\pm}$-Dubins"
51 elif "CCpm0_Dubins" in fname:
52 self.
id =
"CC$^{\pm0}$-Dubins"
53 elif "CCpmpm_Dubins" in fname:
54 self.
id =
"CC$^{\pm\pm}$-Dubins"
55 elif "CC_Dubins" in fname:
57 elif "Dubins" in fname:
59 elif "CC00_RS" in fname:
60 self.
id =
"CC$^{00}$-RS"
61 elif "HC00_RS" in fname:
62 self.
id =
"HC$^{00}$-RS"
63 elif "HC0pm_RS" in fname:
64 self.
id =
"HC$^{0\pm}$-RS"
65 elif "HCpm0_RS" in fname:
66 self.
id =
"HC$^{\pm0}$-RS"
67 elif "HCpmpm_RS" in fname:
68 self.
id =
"HC$^{\pm\pm}$-RS"
69 elif "HC_RS" in fname:
77 self.
path_length.append(np.float(stat[CSV_HEADER.index(
"path_length")]))
78 self.
comp_time.append(np.float(stat[CSV_HEADER.index(
"computation_time")]))
79 self.
curv_discont.append(np.int(stat[CSV_HEADER.index(
"curv_discont")]))
85 if __name__ ==
"__main__":
96 if Dubins.load(filepath,
"Dubins_stats.csv"):
97 dubins_outputs.append(Dubins)
98 if CCpmpm_Dubins.load(filepath,
"CCpmpm_Dubins_stats.csv"):
99 dubins_outputs.append(CCpmpm_Dubins)
100 if CCpm0_Dubins.load(filepath,
"CCpm0_Dubins_stats.csv"):
101 dubins_outputs.append(CCpm0_Dubins)
102 if CC0pm_Dubins.load(filepath,
"CC0pm_Dubins_stats.csv"):
103 dubins_outputs.append(CC0pm_Dubins)
104 if CC00_Dubins.load(filepath,
"CC00_Dubins_stats.csv"):
105 dubins_outputs.append(CC00_Dubins)
106 if CC_Dubins.load(filepath,
"CC_Dubins_stats.csv"):
107 dubins_outputs.append(CC_Dubins)
117 if RS.load(filepath,
"RS_stats.csv"):
118 rs_outputs.append(RS)
119 if HCpmpm_RS.load(filepath,
"HCpmpm_RS_stats.csv"):
120 rs_outputs.append(HCpmpm_RS)
121 if HCpm0_RS.load(filepath,
"HCpm0_RS_stats.csv"):
122 rs_outputs.append(HCpm0_RS)
123 if HC0pm_RS.load(filepath,
"HC0pm_RS_stats.csv"):
124 rs_outputs.append(HC0pm_RS)
125 if HC00_RS.load(filepath,
"HC00_RS_stats.csv"):
126 rs_outputs.append(HC00_RS)
127 if HC_RS.load(filepath,
"HC_RS_stats.csv"):
128 rs_outputs.append(HC_RS)
129 if CC00_RS.load(filepath,
"CC00_RS_stats.csv"):
130 rs_outputs.append(CC00_RS)
133 print(
"\nComputation Times [µs]: mean ± std\n")
134 n_samples = rs_outputs[0].n_samples
135 for output
in dubins_outputs + rs_outputs:
136 assert output.n_samples == n_samples
137 print(output.id +
": %.2f ± %.2f" % (np.average(output.comp_time) * 1e6, np.std(output.comp_time) * 1e6))
140 dubins_path_length_hist = []
141 dubins_path_length_labels = []
142 for output
in dubins_outputs:
143 if output.id !=
"Dubins":
144 rel_path_length = (output.path_length - Dubins.path_length) / Dubins.path_length
145 dubins_path_length_hist.append(rel_path_length)
146 dubins_path_length_labels.append(output.id)
148 rs_path_length_hist = []
149 rs_path_length_labels = []
150 for output
in rs_outputs:
151 if output.id !=
"RS":
152 rel_path_length = (output.path_length - RS.path_length) / RS.path_length
153 rs_path_length_hist.append(rel_path_length)
154 rs_path_length_labels.append(output.id)
157 f, axarr = plt.subplots(2, 2, figsize=(10, 8), sharey=
True)
158 f.subplots_adjust(wspace=.4, hspace=.3)
159 weights = np.ones(n_samples, dtype=
'float') / n_samples
163 axarr[0, 0].plot([], [])
164 axarr[0, 0].
hist(dubins_path_length_hist, bins=9, range=xlim, weights=[weights]*len(dubins_path_length_hist),
165 label=dubins_path_length_labels, linewidth=.1)
166 axarr[0, 0].legend(loc=
'best')
167 axarr[0, 0].set_xlim(xlim)
168 axarr[0, 0].grid(
'on')
169 axarr[0, 0].set_xlabel(
'Rel. Difference in Path Length to Dubins [%]')
170 axarr[0, 0].set_ylabel(
'Normalized Frequency [%]')
171 axarr[0, 0].xaxis.set_major_formatter(FuncFormatter(to_percent))
172 axarr[0, 0].yaxis.set_major_formatter(FuncFormatter(to_percent))
174 axarr[0, 1].plot([], [])
175 axarr[0, 1].
hist(rs_path_length_hist, bins=9, range=xlim, weights=[weights]*len(rs_path_length_hist),
176 label=rs_path_length_labels, linewidth=.1)
177 axarr[0, 1].legend(loc=
'best')
178 axarr[0, 1].set_xlim(xlim)
179 axarr[0, 1].grid(
'on')
180 axarr[0, 1].set_xlabel(
'Rel. Difference in Path Length to Reeds-Shepp [%]')
181 axarr[0, 1].set_ylabel(
'Normalized Frequency [%]')
182 plt.setp(axarr[0, 1].get_yticklabels(), visible=
True)
183 axarr[0, 1].xaxis.set_major_formatter(FuncFormatter(to_percent))
184 axarr[0, 1].yaxis.set_major_formatter(FuncFormatter(to_percent))
188 for i, output
in enumerate(dubins_outputs):
189 hist, bin_edges = np.histogram(output.curv_discont, weights=weights, bins=
range(4))
190 axarr[1, 0].bar(bin_edges[:-1] + width*(i - len(dubins_outputs)/2.0), hist, width=width, label=output.id)
191 axarr[1, 0].legend(loc=
'best')
192 axarr[1, 0].set_xlim(right=4)
193 axarr[1, 0].grid(
'on')
194 axarr[1, 0].set_xlabel(
'Number of Curvature Discontinuities [-]')
195 axarr[1, 0].set_ylabel(
'Normalized Frequency [%]')
196 axarr[1, 0].xaxis.set_major_locator(MaxNLocator(integer=
True))
197 axarr[1, 0].yaxis.set_major_formatter(FuncFormatter(to_percent))
200 for i, output
in enumerate(rs_outputs):
201 hist, bin_edges = np.histogram(output.curv_discont, weights=weights, bins=
range(6))
202 axarr[1, 1].bar(bin_edges[:-1] + width*(i - len(rs_outputs)/2.0), hist, width=width, label=output.id)
203 axarr[1, 1].legend(loc=
'best')
204 axarr[1, 1].set_xlim(right=5)
205 axarr[1, 1].grid(
'on')
206 axarr[1, 1].set_xlabel(
'Number of Curvature Discontinuities [-]')
207 axarr[1, 1].set_ylabel(
'Normalized Frequency [%]')
208 plt.setp(axarr[1, 1].get_yticklabels(), visible=
True)
209 axarr[1, 1].xaxis.set_major_locator(MaxNLocator(integer=
True))
210 axarr[1, 1].yaxis.set_major_formatter(FuncFormatter(to_percent))