5 compare.py - versatile benchmark output compare tool
9 from argparse
import ArgumentParser
13 from gbench
import util, report
19 Perform checking on the user provided inputs and diagnose any abnormalities
25 if in1_kind == IT_Executable
and in2_kind == IT_Executable
and output_file:
26 print((
"WARNING: '--benchmark_out=%s' will be passed to both "
27 "benchmarks causing it to be overwritten") % output_file)
28 if in1_kind == IT_JSON
and in2_kind == IT_JSON
and len(flags) > 0:
29 print(
"WARNING: passing optional flags has no effect since both "
31 if output_type
is not None and output_type !=
'json':
32 print((
"ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
33 " is not supported.") % output_type)
38 parser = ArgumentParser(
39 description=
'versatile benchmark output compare tool')
43 '--display_aggregates_only',
44 dest=
'display_aggregates_only',
46 help=
"If there are repetitions, by default, we display everything - the"
47 " actual runs, and the aggregates computed. Sometimes, it is "
48 "desirable to only view the aggregates. E.g. when there are a lot "
49 "of repetitions. Do note that only the display is affected. "
50 "Internally, all the actual runs are still used, e.g. for U test.")
57 help=
"Do not use colors in the terminal output"
64 help=
"Additionally, dump benchmark comparison output to this file in JSON format.")
66 utest = parser.add_argument_group()
72 help=
"The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".
format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
77 default=alpha_default,
79 help=(
"significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
82 subparsers = parser.add_subparsers(
83 help=
'This tool has multiple modes of operation:',
86 parser_a = subparsers.add_parser(
88 help=
'The most simple use-case, compare all the output of these two benchmarks')
89 baseline = parser_a.add_argument_group(
90 'baseline',
'The benchmark baseline')
91 baseline.add_argument(
93 metavar=
'test_baseline',
94 type=argparse.FileType(
'r'),
96 help=
'A benchmark executable or JSON output file')
97 contender = parser_a.add_argument_group(
98 'contender',
'The benchmark that will be compared against the baseline')
99 contender.add_argument(
101 metavar=
'test_contender',
102 type=argparse.FileType(
'r'),
104 help=
'A benchmark executable or JSON output file')
105 parser_a.add_argument(
107 metavar=
'benchmark_options',
108 nargs=argparse.REMAINDER,
109 help=
'Arguments to pass when running benchmark executables')
111 parser_b = subparsers.add_parser(
112 'filters', help=
'Compare filter one with the filter two of benchmark')
113 baseline = parser_b.add_argument_group(
114 'baseline',
'The benchmark baseline')
115 baseline.add_argument(
118 type=argparse.FileType(
'r'),
120 help=
'A benchmark executable or JSON output file')
121 baseline.add_argument(
123 metavar=
'filter_baseline',
126 help=
'The first filter, that will be used as baseline')
127 contender = parser_b.add_argument_group(
128 'contender',
'The benchmark that will be compared against the baseline')
129 contender.add_argument(
131 metavar=
'filter_contender',
134 help=
'The second filter, that will be compared against the baseline')
135 parser_b.add_argument(
137 metavar=
'benchmark_options',
138 nargs=argparse.REMAINDER,
139 help=
'Arguments to pass when running benchmark executables')
141 parser_c = subparsers.add_parser(
142 'benchmarksfiltered',
143 help=
'Compare filter one of first benchmark with filter two of the second benchmark')
144 baseline = parser_c.add_argument_group(
145 'baseline',
'The benchmark baseline')
146 baseline.add_argument(
148 metavar=
'test_baseline',
149 type=argparse.FileType(
'r'),
151 help=
'A benchmark executable or JSON output file')
152 baseline.add_argument(
154 metavar=
'filter_baseline',
157 help=
'The first filter, that will be used as baseline')
158 contender = parser_c.add_argument_group(
159 'contender',
'The benchmark that will be compared against the baseline')
160 contender.add_argument(
162 metavar=
'test_contender',
163 type=argparse.FileType(
'r'),
165 help=
'The second benchmark executable or JSON output file, that will be compared against the baseline')
166 contender.add_argument(
168 metavar=
'filter_contender',
171 help=
'The second filter, that will be compared against the baseline')
172 parser_c.add_argument(
174 metavar=
'benchmark_options',
175 nargs=argparse.REMAINDER,
176 help=
'Arguments to pass when running benchmark executables')
184 args, unknown_args = parser.parse_known_args()
185 if args.mode
is None:
188 assert not unknown_args
189 benchmark_options = args.benchmark_options
191 if args.mode ==
'benchmarks':
192 test_baseline = args.test_baseline[0].name
193 test_contender = args.test_contender[0].name
195 filter_contender =
''
199 description =
'Comparing %s to %s' % (test_baseline, test_contender)
200 elif args.mode ==
'filters':
201 test_baseline = args.test[0].name
202 test_contender = args.test[0].name
203 filter_baseline = args.filter_baseline[0]
204 filter_contender = args.filter_contender[0]
209 description =
'Comparing %s to %s (from %s)' % (
210 filter_baseline, filter_contender, args.test[0].name)
211 elif args.mode ==
'benchmarksfiltered':
212 test_baseline = args.test_baseline[0].name
213 test_contender = args.test_contender[0].name
214 filter_baseline = args.filter_baseline[0]
215 filter_contender = args.filter_contender[0]
220 description =
'Comparing %s (from %s) to %s (from %s)' % (
221 filter_baseline, test_baseline, filter_contender, test_contender)
224 print(
"Unrecognized mode of operation: '%s'" % args.mode)
228 check_inputs(test_baseline, test_contender, benchmark_options)
230 if args.display_aggregates_only:
231 benchmark_options += [
'--benchmark_display_aggregates_only=true']
233 options_baseline = []
234 options_contender = []
236 if filter_baseline
and filter_contender:
237 options_baseline = [
'--benchmark_filter=%s' % filter_baseline]
238 options_contender = [
'--benchmark_filter=%s' % filter_contender]
242 test_baseline, benchmark_options + options_baseline))
244 test_contender, benchmark_options + options_contender))
247 if filter_baseline
and filter_contender:
248 replacement =
'[%s vs. %s]' % (filter_baseline, filter_contender)
250 json1_orig, filter_baseline, replacement)
252 json2_orig, filter_contender, replacement)
255 json1, json2, args.utest)
258 args.display_aggregates_only,
259 args.utest, args.utest_alpha, args.color)
261 for ln
in output_lines:
265 if args.dump_to_json
is not None:
266 with open(args.dump_to_json,
'w')
as f_json:
267 json.dump(diff_report, f_json)
272 testInputs = os.path.join(
274 os.path.realpath(__file__)),
277 self.
testInput0 = os.path.join(testInputs,
'test1_run1.json')
278 self.
testInput1 = os.path.join(testInputs,
'test1_run2.json')
281 parsed = self.
parser.parse_args(
283 self.assertFalse(parsed.display_aggregates_only)
284 self.assertTrue(parsed.utest)
285 self.assertEqual(parsed.mode,
'benchmarks')
286 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
287 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
288 self.assertFalse(parsed.benchmark_options)
291 parsed = self.
parser.parse_args(
293 self.assertFalse(parsed.display_aggregates_only)
294 self.assertFalse(parsed.utest)
295 self.assertEqual(parsed.utest_alpha, 0.05)
296 self.assertEqual(parsed.mode,
'benchmarks')
297 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
298 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
299 self.assertFalse(parsed.benchmark_options)
302 parsed = self.
parser.parse_args(
304 self.assertTrue(parsed.display_aggregates_only)
305 self.assertTrue(parsed.utest)
306 self.assertEqual(parsed.mode,
'benchmarks')
307 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
308 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
309 self.assertFalse(parsed.benchmark_options)
312 parsed = self.
parser.parse_args(
314 self.assertFalse(parsed.display_aggregates_only)
315 self.assertTrue(parsed.utest)
316 self.assertEqual(parsed.utest_alpha, 0.314)
317 self.assertEqual(parsed.mode,
'benchmarks')
318 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
319 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
320 self.assertFalse(parsed.benchmark_options)
323 parsed = self.
parser.parse_args(
325 self.assertFalse(parsed.display_aggregates_only)
326 self.assertFalse(parsed.utest)
327 self.assertEqual(parsed.utest_alpha, 0.314)
328 self.assertEqual(parsed.mode,
'benchmarks')
329 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
330 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
331 self.assertFalse(parsed.benchmark_options)
334 parsed = self.
parser.parse_args(
336 self.assertFalse(parsed.display_aggregates_only)
337 self.assertTrue(parsed.utest)
338 self.assertEqual(parsed.mode,
'benchmarks')
339 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
340 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
341 self.assertEqual(parsed.benchmark_options, [
'd'])
344 parsed = self.
parser.parse_args(
346 self.assertFalse(parsed.display_aggregates_only)
347 self.assertTrue(parsed.utest)
348 self.assertEqual(parsed.mode,
'benchmarks')
349 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
350 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
351 self.assertEqual(parsed.benchmark_options, [
'e'])
354 parsed = self.
parser.parse_args(
356 self.assertFalse(parsed.display_aggregates_only)
357 self.assertTrue(parsed.utest)
358 self.assertEqual(parsed.mode,
'filters')
359 self.assertEqual(parsed.test[0].name, self.
testInput0)
360 self.assertEqual(parsed.filter_baseline[0],
'c')
361 self.assertEqual(parsed.filter_contender[0],
'd')
362 self.assertFalse(parsed.benchmark_options)
365 parsed = self.
parser.parse_args(
367 self.assertFalse(parsed.display_aggregates_only)
368 self.assertTrue(parsed.utest)
369 self.assertEqual(parsed.mode,
'filters')
370 self.assertEqual(parsed.test[0].name, self.
testInput0)
371 self.assertEqual(parsed.filter_baseline[0],
'c')
372 self.assertEqual(parsed.filter_contender[0],
'd')
373 self.assertEqual(parsed.benchmark_options, [
'e'])
376 parsed = self.
parser.parse_args(
377 [
'filters', self.
testInput0,
'c',
'd',
'--',
'f'])
378 self.assertFalse(parsed.display_aggregates_only)
379 self.assertTrue(parsed.utest)
380 self.assertEqual(parsed.mode,
'filters')
381 self.assertEqual(parsed.test[0].name, self.
testInput0)
382 self.assertEqual(parsed.filter_baseline[0],
'c')
383 self.assertEqual(parsed.filter_contender[0],
'd')
384 self.assertEqual(parsed.benchmark_options, [
'f'])
387 parsed = self.
parser.parse_args(
389 self.assertFalse(parsed.display_aggregates_only)
390 self.assertTrue(parsed.utest)
391 self.assertEqual(parsed.mode,
'benchmarksfiltered')
392 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
393 self.assertEqual(parsed.filter_baseline[0],
'c')
394 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
395 self.assertEqual(parsed.filter_contender[0],
'e')
396 self.assertFalse(parsed.benchmark_options)
399 parsed = self.
parser.parse_args(
401 self.assertFalse(parsed.display_aggregates_only)
402 self.assertTrue(parsed.utest)
403 self.assertEqual(parsed.mode,
'benchmarksfiltered')
404 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
405 self.assertEqual(parsed.filter_baseline[0],
'c')
406 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
407 self.assertEqual(parsed.filter_contender[0],
'e')
408 self.assertEqual(parsed.benchmark_options[0],
'f')
411 parsed = self.
parser.parse_args(
413 self.assertFalse(parsed.display_aggregates_only)
414 self.assertTrue(parsed.utest)
415 self.assertEqual(parsed.mode,
'benchmarksfiltered')
416 self.assertEqual(parsed.test_baseline[0].name, self.
testInput0)
417 self.assertEqual(parsed.filter_baseline[0],
'c')
418 self.assertEqual(parsed.test_contender[0].name, self.
testInput1)
419 self.assertEqual(parsed.filter_contender[0],
'e')
420 self.assertEqual(parsed.benchmark_options[0],
'g')
423 if __name__ ==
'__main__':