1 """util.py - General utilities for running, loading, and processing benchmarks
15 _num_magic_bytes = 2
if sys.platform.startswith(
'win')
else 4
20 Return 'True' if 'filename' names a valid file which is likely
21 an executable. A file is considered an executable if it starts with the
22 magic bytes for a EXE, Mach O, or ELF file.
24 if not os.path.isfile(filename):
26 with open(filename, mode=
'rb')
as f:
27 magic_bytes = f.read(_num_magic_bytes)
28 if sys.platform ==
'darwin':
29 return magic_bytes
in [
37 elif sys.platform.startswith(
'win'):
38 return magic_bytes == b
'MZ'
40 return magic_bytes == b
'\x7FELF'
45 Returns 'True' if 'filename' names a valid JSON output file.
49 with open(filename,
'r')
as f:
59 Return a tuple (type, msg) where 'type' specifies the classified type
60 of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
61 string represeting the error.
65 if not os.path.exists(filename):
66 err_msg =
"'%s' does not exist" % filename
67 elif not os.path.isfile(filename):
68 err_msg =
"'%s' does not name a file" % filename
74 err_msg =
"'%s' does not name a valid benchmark executable or JSON file" % filename
80 Classify the file named by 'filename' and return the classification.
81 If the file is classified as 'IT_Invalid' print an error message and exit
85 if ftype == IT_Invalid:
86 print(
"Invalid input file: %s" % msg)
93 Search the specified list of flags for a flag matching `<prefix><arg>` and
94 if it is found return the arg it specifies. If specified more than once the
95 last value is returned. If the flag is not found None is returned.
97 assert prefix.startswith(
'--')
and prefix.endswith(
'=')
99 for f
in benchmark_flags:
100 if f.startswith(prefix):
101 result = f[
len(prefix):]
107 Return a new list containing the specified benchmark_flags except those
108 with the specified prefix.
110 assert prefix.startswith(
'--')
and prefix.endswith(
'=')
111 return [f
for f
in benchmark_flags
if not f.startswith(prefix)]
116 Read benchmark output from a file and return the JSON object.
117 REQUIRES: 'fname' names a file containing JSON benchmark output.
119 with open(fname,
'r')
as f:
124 benchmarks = result[
'benchmarks']
128 benchmarks, key=
lambda benchmark: benchmark[
'repetition_index']
if 'repetition_index' in benchmark
else -1)
130 benchmarks, key=
lambda benchmark: 1
if 'run_type' in benchmark
and benchmark[
'run_type'] ==
"aggregate" else 0)
132 benchmarks, key=
lambda benchmark: benchmark[
'per_family_instance_index']
if 'per_family_instance_index' in benchmark
else -1)
134 benchmarks, key=
lambda benchmark: benchmark[
'family_index']
if 'family_index' in benchmark
else -1)
136 result[
'benchmarks'] = benchmarks
142 Run a benchmark specified by 'exe_name' with the specified
143 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
144 real time console output.
145 RETURNS: A JSON object representing the benchmark output
149 is_temp_output =
False
150 if output_name
is None:
151 is_temp_output =
True
152 thandle, output_name = tempfile.mkstemp()
154 benchmark_flags = list(benchmark_flags) + \
155 [
'--benchmark_out=%s' % output_name]
157 cmd = [exe_name] + benchmark_flags
158 print(
"RUNNING: %s" %
' '.join(cmd))
159 exitCode = subprocess.call(cmd)
161 print(
'TEST FAILED...')
165 os.unlink(output_name)
171 Get the results for a specified benchmark. If 'filename' specifies
172 an executable benchmark then the results are generated by running the
173 benchmark. Otherwise 'filename' must name a valid JSON output file,
174 which is loaded and the result returned.
179 if ftype == IT_Executable:
181 raise ValueError(
'Unknown file type %s' % ftype)