1 from __future__
import print_function
10 parser = argparse.ArgumentParser(description=
"Python protobuf benchmark")
11 parser.add_argument(
"data_files", metavar=
"dataFile", nargs=
"+",
12 help=
"testing data files.")
13 parser.add_argument(
"--json", action=
"store_const", dest=
"json",
14 const=
"yes", default=
"no",
15 help=
"Whether to output json results")
16 parser.add_argument(
"--behavior_prefix", dest=
"behavior_prefix",
17 help=
"The output json format's behavior's name's prefix",
20 parser.add_argument(
"--cpp_generated", action=
"store_const",
21 dest=
"cpp_generated", const=
"yes", default=
"no",
22 help=
"Whether to link generated code library")
24 args = parser.parse_args()
28 if args.cpp_generated !=
"no":
29 sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) +
"/.libs" )
30 import libbenchmark_messages
31 sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) +
"/tmp" )
35 import datasets.google_message1.proto2.benchmark_message1_proto2_pb2
as benchmark_message1_proto2_pb2
36 import datasets.google_message1.proto3.benchmark_message1_proto3_pb2
as benchmark_message1_proto3_pb2
37 import datasets.google_message2.benchmark_message2_pb2
as benchmark_message2_pb2
38 import datasets.google_message3.benchmark_message3_pb2
as benchmark_message3_pb2
39 import datasets.google_message4.benchmark_message4_pb2
as benchmark_message4_pb2
40 import benchmarks_pb2
as benchmarks_pb2
45 benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
46 benchmark_dataset.ParseFromString(data)
48 for payload
in benchmark_dataset.payload:
49 total_bytes +=
len(payload)
50 benchmark_util =
Benchmark(full_iteration=
len(benchmark_dataset.payload),
51 module=
"py_benchmark",
53 total_bytes=total_bytes)
55 result[
"filename"] = filename
56 result[
"message_name"] = benchmark_dataset.message_name
57 result[
"benchmarks"] = {}
58 benchmark_util.set_test_method(
"parse_from_benchmark")
59 result[
"benchmarks"][args.behavior_prefix +
"_parse_from_benchmark"] = \
60 benchmark_util.run_benchmark(setup_method_args=
'"%s"' % (filename))
61 benchmark_util.set_test_method(
"serialize_to_benchmark")
62 result[
"benchmarks"][args.behavior_prefix +
"_serialize_to_benchmark"] = \
63 benchmark_util.run_benchmark(setup_method_args=
'"%s"' % (filename))
68 global benchmark_dataset, message_class, message_list, counter, total_bytes
73 benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
74 benchmark_dataset.ParseFromString(data)
76 if benchmark_dataset.message_name ==
"benchmarks.proto3.GoogleMessage1":
77 message_class = benchmark_message1_proto3_pb2.GoogleMessage1
78 elif benchmark_dataset.message_name ==
"benchmarks.proto2.GoogleMessage1":
79 message_class = benchmark_message1_proto2_pb2.GoogleMessage1
80 elif benchmark_dataset.message_name ==
"benchmarks.proto2.GoogleMessage2":
81 message_class = benchmark_message2_pb2.GoogleMessage2
82 elif benchmark_dataset.message_name ==
"benchmarks.google_message3.GoogleMessage3":
83 message_class = benchmark_message3_pb2.GoogleMessage3
84 elif benchmark_dataset.message_name ==
"benchmarks.google_message4.GoogleMessage4":
85 message_class = benchmark_message4_pb2.GoogleMessage4
87 raise IOError(
"Message %s not found!" % (benchmark_dataset.message_name))
89 for one_payload
in benchmark_dataset.payload:
90 temp = message_class()
91 temp.ParseFromString(one_payload)
92 message_list.append(temp)
93 total_bytes +=
len(one_payload)
97 global counter, message_class, benchmark_dataset
98 m = message_class().
ParseFromString(benchmark_dataset.payload[counter %
len(benchmark_dataset.payload)])
103 global counter, message_list, message_class
105 counter = counter + 1
110 setup_method=None, total_bytes=None, full_iteration = 1):
124 setup_code +=
"%s(%s)\n" % (self.
setup_method, setup_method_args)
127 def dry_run(self, test_method_args='', setup_method_args=''):
128 return timeit.timeit(stmt=
"%s(%s)" % (self.
test_method, test_method_args),
134 t = self.
dry_run(test_method_args, setup_method_args);
138 t = timeit.timeit(stmt=
"%s(%s)" % (self.
test_method, test_method_args),
144 if __name__ ==
"__main__":
146 for file
in args.data_files:
149 if args.json !=
"no":
150 print(json.dumps(results))
152 for result
in results:
153 print(
"Message %s of dataset file %s" % \
154 (result[
"message_name"], result[
"filename"]))
155 print(
"Average throughput for parse_from_benchmark: %.2f MB/s" % \
156 (result[
"benchmarks"][ \
157 args.behavior_prefix +
"_parse_from_benchmark"]))
158 print(
"Average throughput for serialize_to_benchmark: %.2f MB/s" % \
159 (result[
"benchmarks"][ \
160 args.behavior_prefix +
"_serialize_to_benchmark"]))