benchmark/tools/gbench/util.py
Go to the documentation of this file.
1 """util.py - General utilities for running, loading, and processing benchmarks
2 """
3 import json
4 import os
5 import tempfile
6 import subprocess
7 import sys
8 import functools
9 
10 # Input file type enumeration
11 IT_Invalid = 0
12 IT_JSON = 1
13 IT_Executable = 2
14 
15 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4
16 
17 
18 def is_executable_file(filename):
19  """
20  Return 'True' if 'filename' names a valid file which is likely
21  an executable. A file is considered an executable if it starts with the
22  magic bytes for a EXE, Mach O, or ELF file.
23  """
24  if not os.path.isfile(filename):
25  return False
26  with open(filename, mode='rb') as f:
27  magic_bytes = f.read(_num_magic_bytes)
28  if sys.platform == 'darwin':
29  return magic_bytes in [
30  b'\xfe\xed\xfa\xce', # MH_MAGIC
31  b'\xce\xfa\xed\xfe', # MH_CIGAM
32  b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
33  b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
34  b'\xca\xfe\xba\xbe', # FAT_MAGIC
35  b'\xbe\xba\xfe\xca' # FAT_CIGAM
36  ]
37  elif sys.platform.startswith('win'):
38  return magic_bytes == b'MZ'
39  else:
40  return magic_bytes == b'\x7FELF'
41 
42 
43 def is_json_file(filename):
44  """
45  Returns 'True' if 'filename' names a valid JSON output file.
46  'False' otherwise.
47  """
48  try:
49  with open(filename, 'r') as f:
50  json.load(f)
51  return True
52  except BaseException:
53  pass
54  return False
55 
56 
57 def classify_input_file(filename):
58  """
59  Return a tuple (type, msg) where 'type' specifies the classified type
60  of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
61  string represeting the error.
62  """
63  ftype = IT_Invalid
64  err_msg = None
65  if not os.path.exists(filename):
66  err_msg = "'%s' does not exist" % filename
67  elif not os.path.isfile(filename):
68  err_msg = "'%s' does not name a file" % filename
69  elif is_executable_file(filename):
70  ftype = IT_Executable
71  elif is_json_file(filename):
72  ftype = IT_JSON
73  else:
74  err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
75  return ftype, err_msg
76 
77 
78 def check_input_file(filename):
79  """
80  Classify the file named by 'filename' and return the classification.
81  If the file is classified as 'IT_Invalid' print an error message and exit
82  the program.
83  """
84  ftype, msg = classify_input_file(filename)
85  if ftype == IT_Invalid:
86  print("Invalid input file: %s" % msg)
87  sys.exit(1)
88  return ftype
89 
90 
91 def find_benchmark_flag(prefix, benchmark_flags):
92  """
93  Search the specified list of flags for a flag matching `<prefix><arg>` and
94  if it is found return the arg it specifies. If specified more than once the
95  last value is returned. If the flag is not found None is returned.
96  """
97  assert prefix.startswith('--') and prefix.endswith('=')
98  result = None
99  for f in benchmark_flags:
100  if f.startswith(prefix):
101  result = f[len(prefix):]
102  return result
103 
104 
105 def remove_benchmark_flags(prefix, benchmark_flags):
106  """
107  Return a new list containing the specified benchmark_flags except those
108  with the specified prefix.
109  """
110  assert prefix.startswith('--') and prefix.endswith('=')
111  return [f for f in benchmark_flags if not f.startswith(prefix)]
112 
113 
115  """
116  Read benchmark output from a file and return the JSON object.
117  REQUIRES: 'fname' names a file containing JSON benchmark output.
118  """
119  with open(fname, 'r') as f:
120  return json.load(f)
121 
122 
124  benchmarks = result['benchmarks']
125 
126  # From inner key to the outer key!
127  benchmarks = sorted(
128  benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
129  benchmarks = sorted(
130  benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
131  benchmarks = sorted(
132  benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
133  benchmarks = sorted(
134  benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
135 
136  result['benchmarks'] = benchmarks
137  return result
138 
139 
140 def run_benchmark(exe_name, benchmark_flags):
141  """
142  Run a benchmark specified by 'exe_name' with the specified
143  'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
144  real time console output.
145  RETURNS: A JSON object representing the benchmark output
146  """
147  output_name = find_benchmark_flag('--benchmark_out=',
148  benchmark_flags)
149  is_temp_output = False
150  if output_name is None:
151  is_temp_output = True
152  thandle, output_name = tempfile.mkstemp()
153  os.close(thandle)
154  benchmark_flags = list(benchmark_flags) + \
155  ['--benchmark_out=%s' % output_name]
156 
157  cmd = [exe_name] + benchmark_flags
158  print("RUNNING: %s" % ' '.join(cmd))
159  exitCode = subprocess.call(cmd)
160  if exitCode != 0:
161  print('TEST FAILED...')
162  sys.exit(exitCode)
163  json_res = load_benchmark_results(output_name)
164  if is_temp_output:
165  os.unlink(output_name)
166  return json_res
167 
168 
169 def run_or_load_benchmark(filename, benchmark_flags):
170  """
171  Get the results for a specified benchmark. If 'filename' specifies
172  an executable benchmark then the results are generated by running the
173  benchmark. Otherwise 'filename' must name a valid JSON output file,
174  which is loaded and the result returned.
175  """
176  ftype = check_input_file(filename)
177  if ftype == IT_JSON:
178  return load_benchmark_results(filename)
179  if ftype == IT_Executable:
180  return run_benchmark(filename, benchmark_flags)
181  raise ValueError('Unknown file type %s' % ftype)
gbench.util.run_benchmark
def run_benchmark(exe_name, benchmark_flags)
Definition: benchmark/tools/gbench/util.py:140
gbench.util.find_benchmark_flag
def find_benchmark_flag(prefix, benchmark_flags)
Definition: benchmark/tools/gbench/util.py:91
gbench.util.classify_input_file
def classify_input_file(filename)
Definition: benchmark/tools/gbench/util.py:57
gbench.util.load_benchmark_results
def load_benchmark_results(fname)
Definition: benchmark/tools/gbench/util.py:114
gbench.util.check_input_file
def check_input_file(filename)
Definition: benchmark/tools/gbench/util.py:78
gbench.util.remove_benchmark_flags
def remove_benchmark_flags(prefix, benchmark_flags)
Definition: benchmark/tools/gbench/util.py:105
open
#define open
Definition: test-fs.c:46
gbench.util.is_json_file
def is_json_file(filename)
Definition: benchmark/tools/gbench/util.py:43
len
int len
Definition: abseil-cpp/absl/base/internal/low_level_alloc_test.cc:46
gbench.util.sort_benchmark_results
def sort_benchmark_results(result)
Definition: benchmark/tools/gbench/util.py:123
gbench.util.is_executable_file
def is_executable_file(filename)
Definition: benchmark/tools/gbench/util.py:18
gbench.util.run_or_load_benchmark
def run_or_load_benchmark(filename, benchmark_flags)
Definition: benchmark/tools/gbench/util.py:169


grpc
Author(s):
autogenerated on Fri May 16 2025 03:00:49