1 """util.py - General utilities for running, loading, and processing benchmarks
14 _num_magic_bytes = 2
if sys.platform.startswith(
'win')
else 4
17 Return 'True' if 'filename' names a valid file which is likely
18 an executable. A file is considered an executable if it starts with the
19 magic bytes for a EXE, Mach O, or ELF file.
21 if not os.path.isfile(filename):
23 with open(filename, mode=
'rb')
as f:
24 magic_bytes = f.read(_num_magic_bytes)
25 if sys.platform ==
'darwin':
26 return magic_bytes
in [
34 elif sys.platform.startswith(
'win'):
35 return magic_bytes == b
'MZ'
37 return magic_bytes == b
'\x7FELF'
42 Returns 'True' if 'filename' names a valid JSON output file.
46 with open(filename,
'r')
as f:
56 Return a tuple (type, msg) where 'type' specifies the classified type
57 of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
58 string represeting the error.
62 if not os.path.exists(filename):
63 err_msg =
"'%s' does not exist" % filename
64 elif not os.path.isfile(filename):
65 err_msg =
"'%s' does not name a file" % filename
71 err_msg =
"'%s' does not name a valid benchmark executable or JSON file" % filename
77 Classify the file named by 'filename' and return the classification.
78 If the file is classified as 'IT_Invalid' print an error message and exit
82 if ftype == IT_Invalid:
83 print(
"Invalid input file: %s" % msg)
89 Search the specified list of flags for a flag matching `<prefix><arg>` and
90 if it is found return the arg it specifies. If specified more than once the
91 last value is returned. If the flag is not found None is returned.
93 assert prefix.startswith(
'--')
and prefix.endswith(
'=')
95 for f
in benchmark_flags:
96 if f.startswith(prefix):
97 result = f[
len(prefix):]
102 Return a new list containing the specified benchmark_flags except those
103 with the specified prefix.
105 assert prefix.startswith(
'--')
and prefix.endswith(
'=')
106 return [f
for f
in benchmark_flags
if not f.startswith(prefix)]
110 Read benchmark output from a file and return the JSON object.
111 REQUIRES: 'fname' names a file containing JSON benchmark output.
113 with open(fname,
'r')
as f:
119 Run a benchmark specified by 'exe_name' with the specified
120 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
121 real time console output.
122 RETURNS: A JSON object representing the benchmark output
126 is_temp_output =
False
127 if output_name
is None:
128 is_temp_output =
True
129 thandle, output_name = tempfile.mkstemp()
131 benchmark_flags = list(benchmark_flags) + \
132 [
'--benchmark_out=%s' % output_name]
134 cmd = [exe_name] + benchmark_flags
135 print(
"RUNNING: %s" %
' '.join(cmd))
136 exitCode = subprocess.call(cmd)
138 print(
'TEST FAILED...')
142 os.unlink(output_name)
148 Get the results for a specified benchmark. If 'filename' specifies
149 an executable benchmark then the results are generated by running the
150 benchmark. Otherwise 'filename' must name a valid JSON output file,
151 which is loaded and the result returned.
156 elif ftype == IT_Executable: