3 compare_bench.py - Compare two benchmarks or their results and report the
7 from argparse
import ArgumentParser
10 from gbench
import util, report
15 Perform checking on the user provided inputs and diagnose any abnormalities
21 if in1_kind == IT_Executable
and in2_kind == IT_Executable
and output_file:
22 print((
"WARNING: '--benchmark_out=%s' will be passed to both "
23 "benchmarks causing it to be overwritten") % output_file)
24 if in1_kind == IT_JSON
and in2_kind == IT_JSON
and len(flags) > 0:
25 print(
"WARNING: passing --benchmark flags has no effect since both "
27 if output_type
is not None and output_type !=
'json':
28 print((
"ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
29 " is not supported.") % output_type)
34 parser = ArgumentParser(
35 description=
'compare the results of two benchmarks')
37 'test1', metavar=
'test1', type=str, nargs=1,
38 help=
'A benchmark executable or JSON output file')
40 'test2', metavar=
'test2', type=str, nargs=1,
41 help=
'A benchmark executable or JSON output file')
45 'benchmark_options', metavar=
'benchmark_option', nargs=
'*',
46 help=
'Arguments to pass when running benchmark executables'
48 args, unknown_args = parser.parse_known_args()
52 if args.benchmark_options:
53 print(
"Unrecognized positional argument arguments: '%s'"
54 % args.benchmark_options)
56 benchmark_options = unknown_args
62 print(
'Comparing %s to %s' % (test1, test2))
63 for ln
in output_lines:
67 if __name__ ==
'__main__':