18 import multiprocessing
27 os.path.join(os.path.dirname(sys.argv[0]),
'..',
'profiling',
28 'microbenchmarks',
'bm_diff'))
31 flamegraph_dir = os.path.join(os.path.expanduser(
'~'),
'FlameGraph')
33 os.chdir(os.path.join(os.path.dirname(sys.argv[0]),
'../..'))
34 if not os.path.exists(
'reports'):
35 os.makedirs(
'reports')
37 start_port_server.start_port_server()
44 if len(out)
and out[-1] ==
'_':
56 <title>Microbenchmark Results</title>
64 index_html +=
"<h1>%s</h1>\n" % name
69 index_html +=
"<p><a href=\"%s\">%s</a></p>\n" % (html.escape(
70 tgt, quote=
True), html.escape(txt))
75 index_html +=
"<p><pre>%s</pre></p>\n" % html.escape(txt)
79 """Build given benchmark with bazel"""
80 subprocess.check_call([
81 'tools/bazel',
'build',
83 '//test/cpp/microbenchmarks:%s' % bm_name
88 """generate latency profiles"""
93 heading(
'Latency Profiles: %s' % bm_name)
95 for line
in subprocess.check_output([
96 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
97 '--benchmark_list_tests'
98 ]).
decode(
'UTF-8').splitlines():
102 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
103 '--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'
106 'GRPC_LATENCY_TRACE':
'%s.trace' %
fnize(line)
108 shortname=
'profile-%s' %
fnize(line)))
109 profile_analysis.append(
112 'tools/profiling/latency_profile/profile_analyzer.py',
114 '%s.trace' %
fnize(line),
'--fmt',
'simple',
'--out',
115 'reports/%s.txt' %
fnize(line)
117 timeout_seconds=20 * 60,
118 shortname=
'analyze-%s' %
fnize(line)))
119 cleanup.append(jobset.JobSpec([
'rm',
'%s.trace' %
fnize(line)]))
124 if len(benchmarks) >=
min(16, multiprocessing.cpu_count()):
127 jobset.run(benchmarks,
129 multiprocessing.cpu_count() / 2))
130 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
131 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
133 profile_analysis = []
137 jobset.run(benchmarks, maxjobs=
max(1, multiprocessing.cpu_count() / 2))
138 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
139 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
143 """generate flamegraphs"""
144 heading(
'Flamegraphs: %s' % bm_name)
147 profile_analysis = []
149 for line
in subprocess.check_output([
150 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
151 '--benchmark_list_tests'
152 ]).
decode(
'UTF-8').splitlines():
156 'perf',
'record',
'-o',
157 '%s-perf.data' %
fnize(line),
'-g',
'-F',
'997',
158 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
159 '--benchmark_filter=^%s$' % line,
'--benchmark_min_time=10'
161 shortname=
'perf-%s' %
fnize(line)))
162 profile_analysis.append(
165 'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
168 'PERF_BASE_NAME':
fnize(line),
169 'OUTPUT_DIR':
'reports',
170 'OUTPUT_FILENAME':
fnize(line),
172 shortname=
'flame-%s' %
fnize(line)))
173 cleanup.append(jobset.JobSpec([
'rm',
'%s-perf.data' %
fnize(line)]))
174 cleanup.append(jobset.JobSpec([
'rm',
'%s-out.perf' %
fnize(line)]))
177 if len(benchmarks) >= 20:
180 jobset.run(benchmarks, maxjobs=1)
181 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
182 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
184 profile_analysis = []
188 jobset.run(benchmarks, maxjobs=1)
189 jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
190 jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
196 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
197 '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
198 '--benchmark_out_format=json'
200 if args.summary_time
is not None:
201 cmd += [
'--benchmark_min_time=%d' % args.summary_time]
202 return subprocess.check_output(cmd).
decode(
'UTF-8')
208 nocounters_heading =
'Summary: %s [no counters]' % bm_name
209 nocounters_summary =
run_summary(bm_name,
'opt', bm_name)
211 text(nocounters_summary)
212 print(nocounters_heading)
213 print(nocounters_summary)
217 counters_heading =
'Summary: %s [with counters]' % bm_name
218 counters_summary =
run_summary(bm_name,
'counters', bm_name)
220 text(counters_summary)
221 print(counters_heading)
222 print(counters_summary)
224 if args.bq_result_table:
225 with open(
'%s.csv' % bm_name,
'w')
as f:
227 subprocess.check_output([
228 'tools/profiling/microbenchmarks/bm2bq.py',
229 '%s.counters.json' % bm_name,
230 '%s.opt.json' % bm_name
232 subprocess.check_call(
234 '%s' % args.bq_result_table,
239 'latency': collect_latency,
240 'perf': collect_perf,
241 'summary': collect_summary,
244 argp = argparse.ArgumentParser(description=
'Collect data from microbenchmarks')
245 argp.add_argument(
'-c',
247 choices=sorted(collectors.keys()),
249 default=sorted(collectors.keys()),
250 help=
'Which collectors should be run against each benchmark')
251 argp.add_argument(
'-b',
253 choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
254 default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
257 help=
'Which microbenchmarks should be run')
262 help=
'Upload results from summary collection to a specified bigquery table.'
268 help=
'Minimum time to run benchmarks for the summary collection')
269 args = argp.parse_args()
272 for collect
in args.collect:
273 for bm_name
in args.benchmarks:
274 collectors[collect](bm_name, args)
276 if not os.path.exists(
'reports'):
277 os.makedirs(
'reports')
278 index_html +=
"</body>\n</html>\n"
279 with open(
'reports/index.html',
'w')
as f: