16 """ Python utility to run opt and counters benchmarks and save json output """
20 import multiprocessing
30 os.path.join(os.path.dirname(sys.argv[0]),
'..',
'..',
'..',
'run_tests',
35 argp = argparse.ArgumentParser(description=
'Runs microbenchmarks')
36 argp.add_argument(
'-b',
39 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
40 default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
41 help=
'Benchmarks to run')
42 argp.add_argument(
'-j',
45 default=multiprocessing.cpu_count(),
46 help=
'Number of CPUs to use')
52 'Unique name of the build to run. Needs to match the handle passed to bm_build.py'
54 argp.add_argument(
'-r',
58 help=
'Regex to filter benchmarks run')
65 'Number of times to loops the benchmarks. More loops cuts down on noise'
67 argp.add_argument(
'--counters', dest=
'counters', action=
'store_true')
68 argp.add_argument(
'--no-counters', dest=
'counters', action=
'store_false')
69 argp.set_defaults(counters=
True)
70 args = argp.parse_args()
73 print(
"WARNING: This run will likely be noisy. Increase loops to at "
80 for line
in subprocess.check_output([
81 'bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_list_tests',
82 '--benchmark_filter=%s' % regex
84 line = line.decode(
'UTF-8')
85 stripped_line = line.strip().replace(
"/",
86 "_").replace(
"<",
"_").replace(
87 ">",
"_").replace(
", ",
"_")
89 'bm_diff_%s/%s/%s' % (name, cfg, bm),
90 '--benchmark_filter=^%s$' % line,
91 '--benchmark_out=%s.%s.%s.%s.%d.json' %
92 (bm, stripped_line, cfg, name, idx),
93 '--benchmark_out_format=json',
97 shortname=
'%s %s %s %s %d/%d' %
98 (bm, line, cfg, name, idx + 1, loops),
101 timeout_seconds=60 * 60))
107 for loop
in range(0, loops):
108 for bm
in benchmarks:
113 random.shuffle(jobs_list, random.SystemRandom().random)
117 if __name__ ==
'__main__':
119 jobs_list =
create_jobs(args.name, args.benchmarks, args.loops, args.regex,
121 jobset.run(jobs_list, maxjobs=args.jobs)