compare_results.py
Go to the documentation of this file.
1 # Copyright 2023 Ekumen, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 
15 import argparse
16 from pathlib import Path
17 import re
18 
19 from evo import main_ape
20 from evo.tools import file_interface
21 import matplotlib.pyplot as plt
22 from rosbag2_py._info import Info as Bag2Info
23 import pandas as pd
24 from itertools import cycle
25 
26 from . import timem_results
27 from .exceptions import ScriptError
28 
29 DIR_NAME_PATTERN = 'benchmark_([0-9]+)_particles_output'
30 SUPPORTED_TOPIC_NAMES = ('/pose', '/amcl_pose')
31 EVO_RESULTS_FILE_NAME = 'evo_results.zip'
32 TERMINAL_OUTPUT_LOG_FILE_NAME = 'output.log'
33 
34 
35 def get_bag_est_topic(bag_path: Path):
36  metadata = Bag2Info().read_metadata(str(bag_path), '')
37  for topic_name in SUPPORTED_TOPIC_NAMES:
38  if any(
39  topic_info.topic_metadata.name == topic_name
40  for topic_info in metadata.topics_with_message_count
41  ):
42  return topic_name
43  raise ScriptError(
44  f'Estimate pose topic was not found in bag file, expected names: {SUPPORTED_TOPIC_NAMES}'
45  )
46 
47 
48 def run_evo_ape(dir_path: Path):
49  evo_results_path = dir_path / EVO_RESULTS_FILE_NAME
50  if evo_results_path.exists():
51  return
52  bag_path = dir_path / 'rosbag'
53  est_topic = get_bag_est_topic(bag_path)
54  arg_parser = main_ape.parser()
55  args = arg_parser.parse_args(
56  [
57  'bag2',
58  str(bag_path),
59  '/odometry/ground_truth',
60  est_topic,
61  '--save_results',
62  str(evo_results_path.resolve()),
63  ]
64  )
65  main_ape.run(args)
66 
67 
68 def read_evo_stats(zip_file_path):
69  # keys: "rmse", "mean", "median", "std", "min", "max", "sse"
70  return file_interface.load_res_file(zip_file_path).stats
71 
72 
73 def parse_latency_data(output_file_path):
74  import re
75 
76  # Example line:
77  pattern = re.compile(
78  r'\[.*\] \[INFO\] \[([0-9]*\.[0-9]*)\] \[amcl\]: Particle filter update iteration stats: '
79  + r'[0-9]* particles [0-9]* points - ([0-9]*\.[0-9]*)ms'
80  )
81  latencies_seq = []
82  with open(output_file_path, 'r') as f:
83  for line in f:
84  if not pattern.match(line):
85  continue
86  # first match is the whole line, second match the timestamp, and third the latency
87  latency = pattern.match(line).group(2)
88  latencies_seq.append(float(latency) * 1e-3) # convert to seconds
89  return (
90  {
91  'latency_min': min(latencies_seq),
92  'latency_max': max(latencies_seq),
93  'latency_mean': sum(latencies_seq) / len(latencies_seq),
94  'latency_median': sorted(latencies_seq)[len(latencies_seq) // 2],
95  }
96  if latencies_seq
97  else {
98  'latency_min': 0.0,
99  'latency_max': 0.0,
100  'latency_mean': 0.0,
101  'latency_median': 0.0,
102  }
103  )
104 
105 
106 def create_parameterized_series(results_path: Path):
107  particles = []
108  peak_rss = []
109  cpu_usage = []
110  ape_rmse = []
111  ape_mean = []
112  ape_std = []
113  ape_median = []
114  ape_max = []
115  latency_min = []
116  latency_max = []
117  latency_mean = []
118  latency_median = []
119 
120  for dir in results_path.iterdir():
121  if not dir.is_dir():
122  continue
123  m = re.match(DIR_NAME_PATTERN, dir.name)
124  if not m:
125  continue
126  particles.append(int(m[1]))
127  timem_results_output = timem_results.read_timem_output(dir)
128  rss, cpu = (
129  timem_results.get_timem_metrics_values(timem_results_output)
130  if timem_results_output
131  else (None, None)
132  )
133  peak_rss.append(rss)
134  cpu_usage.append(cpu)
135  run_evo_ape(dir)
136  evo_results_path = dir / EVO_RESULTS_FILE_NAME
137  stats = read_evo_stats(evo_results_path)
138  ape_rmse.append(stats["rmse"])
139  ape_max.append(stats["max"])
140  ape_mean.append(stats["mean"])
141  ape_std.append(stats["std"])
142  ape_median.append(stats["median"])
143  terminal_output_path = dir / TERMINAL_OUTPUT_LOG_FILE_NAME
144  latency_stats = parse_latency_data(terminal_output_path)
145  latency_min.append(latency_stats['latency_min'])
146  latency_max.append(latency_stats['latency_max'])
147  latency_mean.append(latency_stats['latency_mean'])
148  latency_median.append(latency_stats['latency_median'])
149 
150  return (
151  pd.DataFrame(
152  {
153  'particles_n': particles,
154  'peak_rss': peak_rss,
155  'cpu_usage': cpu_usage,
156  'ape_rmse': ape_rmse,
157  'ape_mean': ape_mean,
158  'ape_std': ape_std,
159  'ape_median': ape_median,
160  'ape_max': ape_max,
161  'latency_min': latency_min,
162  'latency_max': latency_max,
163  'latency_mean': latency_mean,
164  'latency_median': latency_median,
165  }
166  )
167  .set_index('particles_n')
168  .sort_index()
169  )
170 
171 
172 def main():
173  arg_parser = argparse.ArgumentParser(
174  description='Script to compare parameterized run results of beluga with another'
175  ' implementation (e.g. nav2_amcl, another beluga version, etc).'
176  )
177  arg_parser.add_argument(
178  '--series',
179  '-s',
180  type=Path,
181  action='append',
182  help='Folder with parameterized benchmark results',
183  required=True,
184  )
185  arg_parser.add_argument(
186  '--label',
187  '-l',
188  type=str,
189  action='append',
190  help='Label for a series',
191  required=True,
192  )
193 
194  arg_parser.add_argument(
195  '--use-ylog',
196  action='store_true',
197  help='Use log scale on y axis',
198  )
199 
200  default_plots = ['cpu_usage', 'peak_rss', 'ape_rmse', 'latency_median']
201  arg_parser.add_argument(
202  '--plot-names',
203  type=str,
204  nargs='*', # 0 or more values expected => creates a list
205  default=default_plots,
206  help='List of plots to generate. Default: ' + ', '.join(default_plots),
207  )
208 
209  arg_parser.add_argument(
210  '--save-csv',
211  type=str,
212  action='store',
213  help='Instead of plotting, save all the results to a csv file',
214  )
215 
216  args = arg_parser.parse_args()
217 
218  assert len(args.series) == len(args.label), 'Number of series and labels must match'
219 
220  color_gen = cycle(
221  [
222  'red',
223  'green',
224  'blue',
225  'brown',
226  'purple',
227  'gray',
228  'orange',
229  ]
230  )
231 
232  if args.save_csv:
233  # create list of dataframes with no filtered columns
234  series = [
235  create_parameterized_series(series).add_prefix(label + '_')
236  for label, series in zip(args.label, args.series)
237  ]
238  # leave only the index
239  full_table = series[0].filter(items=[])
240  # concatenate data in a single table
241  for s in series:
242  full_table = pd.concat([full_table, s], axis=1)
243  print('Saving to CSV file: ', args.save_csv)
244  full_table.to_csv(args.save_csv)
245  return 0
246 
247  series = [
249  .filter(items=args.plot_names)
250  .add_prefix(label + '_')
251  for label, series in zip(args.label, args.series)
252  ]
253 
254  print('Plotting data...')
255 
256  marker_gen = cycle('o^sDvP*')
257 
258  ax = series[0].plot(
259  subplots=True,
260  color=next(color_gen),
261  marker=next(marker_gen),
262  linestyle='dashed',
263  )
264 
265  for other_series in series[1:]:
266  other_series.plot(
267  ax=ax,
268  subplots=True,
269  color=next(color_gen),
270  marker=next(marker_gen),
271  linestyle='dashed',
272  )
273 
274  for ax in plt.gcf().axes:
275  ax.set_xscale('log')
276  if args.use_ylog:
277  ax.set_yscale('log')
278  ax.grid(True, which="both", ls="-")
279  ax.legend(fontsize='small', loc='upper left', bbox_to_anchor=(1.01, 1))
280  current_bounds = ax.get_position().bounds
281  new_bounds = (0.05, *current_bounds[1:])
282  ax.set_position(new_bounds)
283  current_ylimits = ax.get_ylim()
284  new_limits = (
285  min(0.0, current_ylimits[0]), # include zero below
286  max(0.0, current_ylimits[1]), # include zero above
287  )
288  ax.set_ylim(new_limits)
289 
290  plt.show()
beluga_benchmark.compare_results.parse_latency_data
def parse_latency_data(output_file_path)
Definition: compare_results.py:73
beluga_benchmark.compare_results.main
def main()
Definition: compare_results.py:172
beluga_benchmark.compare_results.read_evo_stats
def read_evo_stats(zip_file_path)
Definition: compare_results.py:68
beluga_benchmark.compare_results.get_bag_est_topic
def get_bag_est_topic(Path bag_path)
Definition: compare_results.py:35
beluga_benchmark.compare_results.create_parameterized_series
def create_parameterized_series(Path results_path)
Definition: compare_results.py:106
beluga_benchmark.compare_results.run_evo_ape
def run_evo_ape(Path dir_path)
Definition: compare_results.py:48
beluga_benchmark.exceptions.ScriptError
Definition: exceptions.py:16


beluga_benchmark
Author(s):
autogenerated on Tue Jul 16 2024 03:00:12