bm_main.py
Go to the documentation of this file.
1 #!/usr/bin/env python3
2 #
3 # Copyright 2017 gRPC authors.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 """ Runs the entire bm_*.py pipeline, and possible comments on the PR """
17 
18 import argparse
19 import multiprocessing
20 import os
21 import random
22 import subprocess
23 import sys
24 
25 sys.path.append(
26  os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'run_tests',
27  'python_utils'))
28 
29 sys.path.append(
30  os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
31  'python_utils'))
32 
33 import bm_build
34 import bm_constants
35 import bm_diff
36 import bm_run
37 import check_on_pr
38 import jobset
39 
40 
41 def _args():
42  argp = argparse.ArgumentParser(
43  description='Perform diff on microbenchmarks')
44  argp.add_argument('-t',
45  '--track',
46  choices=sorted(bm_constants._INTERESTING),
47  nargs='+',
48  default=sorted(bm_constants._INTERESTING),
49  help='Which metrics to track')
50  argp.add_argument('-b',
51  '--benchmarks',
52  nargs='+',
53  choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
54  default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
55  help='Which benchmarks to run')
56  argp.add_argument('-d',
57  '--diff_base',
58  type=str,
59  help='Commit or branch to compare the current one to')
60  argp.add_argument(
61  '-o',
62  '--old',
63  default='old',
64  type=str,
65  help='Name of baseline run to compare to. Usually just called "old"')
66  argp.add_argument('-r',
67  '--regex',
68  type=str,
69  default="",
70  help='Regex to filter benchmarks run')
71  argp.add_argument(
72  '-l',
73  '--loops',
74  type=int,
75  default=10,
76  help=
77  'Number of times to loops the benchmarks. More loops cuts down on noise'
78  )
79  argp.add_argument('-j',
80  '--jobs',
81  type=int,
82  default=multiprocessing.cpu_count(),
83  help='Number of CPUs to use')
84  argp.add_argument('--pr_comment_name',
85  type=str,
86  default="microbenchmarks",
87  help='Name that Jenkins will use to comment on the PR')
88  argp.add_argument('--counters', dest='counters', action='store_true')
89  argp.add_argument('--no-counters', dest='counters', action='store_false')
90  argp.set_defaults(counters=True)
91  args = argp.parse_args()
92  assert args.diff_base or args.old, "One of diff_base or old must be set!"
93  if args.loops < 3:
94  print("WARNING: This run will likely be noisy. Increase loops.")
95  return args
96 
97 
98 def eintr_be_gone(fn):
99  """Run fn until it doesn't stop because of EINTR"""
100 
101  def inner(*args):
102  while True:
103  try:
104  return fn(*args)
105  except IOError as e:
106  if e.errno != errno.EINTR:
107  raise
108 
109  return inner
110 
111 
112 def main(args):
113 
114  bm_build.build('new', args.benchmarks, args.jobs, args.counters)
115 
116  old = args.old
117  if args.diff_base:
118  old = 'old'
119  where_am_i = subprocess.check_output(
120  ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
121  subprocess.check_call(['git', 'checkout', args.diff_base])
122  try:
123  bm_build.build(old, args.benchmarks, args.jobs, args.counters)
124  finally:
125  subprocess.check_call(['git', 'checkout', where_am_i])
126  subprocess.check_call(['git', 'submodule', 'update'])
127 
128  jobs_list = []
129  jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops,
130  args.regex, args.counters)
131  jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops,
132  args.regex, args.counters)
133 
134  # shuffle all jobs to eliminate noise from GCE CPU drift
135  random.shuffle(jobs_list, random.SystemRandom().random)
136  jobset.run(jobs_list, maxjobs=args.jobs)
137 
138  diff, note, significance = bm_diff.diff(args.benchmarks, args.loops,
139  args.regex, args.track, old, 'new',
140  args.counters)
141  if diff:
142  text = '[%s] Performance differences noted:\n%s' % (
143  args.pr_comment_name, diff)
144  else:
145  text = '[%s] No significant performance differences' % args.pr_comment_name
146  if note:
147  text = note + '\n\n' + text
148  print('%s' % text)
149  check_on_pr.check_on_pr('Benchmark', '```\n%s\n```' % text)
150 
151 
152 if __name__ == '__main__':
153  args = _args()
154  main(args)
bm_run.create_jobs
def create_jobs(name, benchmarks, loops, regex, counters)
Definition: bm_run.py:105
bm_diff.diff
def diff(bms, loops, regex, track, old, new, counters)
Definition: bm_diff.py:164
bm_main.main
def main(args)
Definition: bm_main.py:112
generate-asm-lcov.fn
fn
Definition: generate-asm-lcov.py:146
bm_main._args
def _args()
Definition: bm_main.py:41
main
Definition: main.py:1
bm_main.eintr_be_gone
def eintr_be_gone(fn)
Definition: bm_main.py:98
bm_build.build
def build(name, benchmarks, jobs, counters)
Definition: bm_build.py:78


grpc
Author(s):
autogenerated on Fri May 16 2025 02:57:49