compare_bench.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 """
3 compare_bench.py - Compare two benchmarks or their results and report the
4  difference.
5 """
6 import argparse
7 from argparse import ArgumentParser
8 import sys
9 import gbench
10 from gbench import util, report
11 from gbench.util import *
12 
13 def check_inputs(in1, in2, flags):
14  """
15  Perform checking on the user provided inputs and diagnose any abnormalities
16  """
17  in1_kind, in1_err = classify_input_file(in1)
18  in2_kind, in2_err = classify_input_file(in2)
19  output_file = find_benchmark_flag('--benchmark_out=', flags)
20  output_type = find_benchmark_flag('--benchmark_out_format=', flags)
21  if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
22  print(("WARNING: '--benchmark_out=%s' will be passed to both "
23  "benchmarks causing it to be overwritten") % output_file)
24  if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
25  print("WARNING: passing --benchmark flags has no effect since both "
26  "inputs are JSON")
27  if output_type is not None and output_type != 'json':
28  print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
29  " is not supported.") % output_type)
30  sys.exit(1)
31 
32 
33 def main():
34  parser = ArgumentParser(
35  description='compare the results of two benchmarks')
36  parser.add_argument(
37  'test1', metavar='test1', type=str, nargs=1,
38  help='A benchmark executable or JSON output file')
39  parser.add_argument(
40  'test2', metavar='test2', type=str, nargs=1,
41  help='A benchmark executable or JSON output file')
42  # FIXME this is a dummy argument which will never actually match
43  # any --benchmark flags but it helps generate a better usage message
44  parser.add_argument(
45  'benchmark_options', metavar='benchmark_option', nargs='*',
46  help='Arguments to pass when running benchmark executables'
47  )
48  args, unknown_args = parser.parse_known_args()
49  # Parse the command line flags
50  test1 = args.test1[0]
51  test2 = args.test2[0]
52  if args.benchmark_options:
53  print("Unrecognized positional argument arguments: '%s'"
54  % args.benchmark_options)
55  exit(1)
56  benchmark_options = unknown_args
57  check_inputs(test1, test2, benchmark_options)
58  # Run the benchmarks and report the results
59  json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
60  json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options)
61  output_lines = gbench.report.generate_difference_report(json1, json2)
62  print('Comparing %s to %s' % (test1, test2))
63  for ln in output_lines:
64  print(ln)
65 
66 
67 if __name__ == '__main__':
68  main()
compare_bench.main
def main()
Definition: compare_bench.py:33
compare_bench.check_inputs
def check_inputs(in1, in2, flags)
Definition: compare_bench.py:13
gbench.report.generate_difference_report
def generate_difference_report(json1, json2, use_color=True)
Definition: report.py:69
gbench.util.classify_input_file
def classify_input_file(filename)
Definition: util.py:54
gbench.util.run_or_load_benchmark
def run_or_load_benchmark(filename, benchmark_flags)
Definition: util.py:146
gbench.util
Definition: util.py:1
len
int len
Definition: php/ext/google/protobuf/map.c:206
print
static unsigned char * print(const cJSON *const item, cJSON_bool format, const internal_hooks *const hooks)
Definition: cJSON.c:1074
gbench.util.find_benchmark_flag
def find_benchmark_flag(prefix, benchmark_flags)
Definition: util.py:87


libaditof
Author(s):
autogenerated on Wed May 21 2025 02:06:48