blob: 6942d2085b6d409ff86719eb967af77493d0d6f7 [file] [log] [blame]
Austin Schuh40c16522018-10-28 20:27:54 -07001import sys
2import os
3import timeit
4import math
5import argparse
6import fnmatch
7import json
8
9parser = argparse.ArgumentParser(description="Python protobuf benchmark")
10parser.add_argument("data_files", metavar="dataFile", nargs="+",
11 help="testing data files.")
12parser.add_argument("--json", action="store_const", dest="json",
13 const="yes", default="no",
14 help="Whether to output json results")
15parser.add_argument("--behavior_prefix", dest="behavior_prefix",
16 help="The output json format's behavior's name's prefix",
17 default="")
18# BEGIN CPP GENERATED MESSAGE
19parser.add_argument("--cpp_generated", action="store_const",
20 dest="cpp_generated", const="yes", default="no",
21 help="Whether to link generated code library")
22# END CPP GENERATED MESSAGE
23args = parser.parse_args()
24# BEGIN CPP GENERATED MESSAGE
25# CPP generated code must be linked before importing the generated Python code
26# for the descriptor can be found in the pool
27if args.cpp_generated != "no":
28 sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/.libs" )
29 import libbenchmark_messages
30 sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/tmp" )
31# END CPP GENERATED MESSAGE
32
33
34import datasets.google_message1.proto2.benchmark_message1_proto2_pb2 as benchmark_message1_proto2_pb2
35import datasets.google_message1.proto3.benchmark_message1_proto3_pb2 as benchmark_message1_proto3_pb2
36import datasets.google_message2.benchmark_message2_pb2 as benchmark_message2_pb2
37import datasets.google_message3.benchmark_message3_pb2 as benchmark_message3_pb2
38import datasets.google_message4.benchmark_message4_pb2 as benchmark_message4_pb2
39import benchmarks_pb2 as benchmarks_pb2
40
41
42def run_one_test(filename):
43 data = open(filename).read()
44 benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
45 benchmark_dataset.ParseFromString(data)
46 benchmark_util = Benchmark(full_iteration=len(benchmark_dataset.payload),
47 module="py_benchmark",
48 setup_method="init")
49 result={}
50 result["filename"] = filename
51 result["message_name"] = benchmark_dataset.message_name
52 result["benchmarks"] = {}
53 benchmark_util.set_test_method("parse_from_benchmark")
54 result["benchmarks"][args.behavior_prefix + "_parse_from_benchmark"] = \
55 benchmark_util.run_benchmark(setup_method_args='"%s"' % (filename))
56 benchmark_util.set_test_method("serialize_to_benchmark")
57 result["benchmarks"][args.behavior_prefix + "_serialize_to_benchmark"] = \
58 benchmark_util.run_benchmark(setup_method_args='"%s"' % (filename))
59 return result
60
61
62def init(filename):
63 global benchmark_dataset, message_class, message_list, counter
64 message_list=[]
65 counter = 0
66 data = open(os.path.dirname(sys.argv[0]) + "/../" + filename).read()
67 benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
68 benchmark_dataset.ParseFromString(data)
69
70 if benchmark_dataset.message_name == "benchmarks.proto3.GoogleMessage1":
71 message_class = benchmark_message1_proto3_pb2.GoogleMessage1
72 elif benchmark_dataset.message_name == "benchmarks.proto2.GoogleMessage1":
73 message_class = benchmark_message1_proto2_pb2.GoogleMessage1
74 elif benchmark_dataset.message_name == "benchmarks.proto2.GoogleMessage2":
75 message_class = benchmark_message2_pb2.GoogleMessage2
76 elif benchmark_dataset.message_name == "benchmarks.google_message3.GoogleMessage3":
77 message_class = benchmark_message3_pb2.GoogleMessage3
78 elif benchmark_dataset.message_name == "benchmarks.google_message4.GoogleMessage4":
79 message_class = benchmark_message4_pb2.GoogleMessage4
80 else:
81 raise IOError("Message %s not found!" % (benchmark_dataset.message_name))
82
83 for one_payload in benchmark_dataset.payload:
84 temp = message_class()
85 temp.ParseFromString(one_payload)
86 message_list.append(temp)
87
88
89def parse_from_benchmark():
90 global counter, message_class, benchmark_dataset
91 m = message_class().ParseFromString(benchmark_dataset.payload[counter % len(benchmark_dataset.payload)])
92 counter = counter + 1
93
94
95def serialize_to_benchmark():
96 global counter, message_list, message_class
97 s = message_list[counter % len(benchmark_dataset.payload)].SerializeToString()
98 counter = counter + 1
99
100
101class Benchmark:
102 def __init__(self, module=None, test_method=None,
103 setup_method=None, full_iteration = 1):
104 self.full_iteration = full_iteration
105 self.module = module
106 self.test_method = test_method
107 self.setup_method = setup_method
108
109 def set_test_method(self, test_method):
110 self.test_method = test_method
111
112 def full_setup_code(self, setup_method_args=''):
113 setup_code = ""
114 setup_code += "from %s import %s\n" % (self.module, self.test_method)
115 setup_code += "from %s import %s\n" % (self.module, self.setup_method)
116 setup_code += "%s(%s)\n" % (self.setup_method, setup_method_args)
117 return setup_code
118
119 def dry_run(self, test_method_args='', setup_method_args=''):
120 return timeit.timeit(stmt="%s(%s)" % (self.test_method, test_method_args),
121 setup=self.full_setup_code(setup_method_args),
122 number=self.full_iteration);
123
124 def run_benchmark(self, test_method_args='', setup_method_args=''):
125 reps = self.full_iteration;
126 t = self.dry_run(test_method_args, setup_method_args);
127 if t < 3 :
128 reps = int(math.ceil(3 / t)) * self.full_iteration
129 t = timeit.timeit(stmt="%s(%s)" % (self.test_method, test_method_args),
130 setup=self.full_setup_code(setup_method_args),
131 number=reps);
132 return 1.0 * t / reps * (10 ** 9)
133
134
135if __name__ == "__main__":
136 results = []
137 for file in args.data_files:
138 results.append(run_one_test(file))
139
140 if args.json != "no":
141 print json.dumps(results)
142 else:
143 for result in results:
144 print "Message %s of dataset file %s" % \
145 (result["message_name"], result["filename"])
146 print "Average time for parse_from_benchmark: %.2f ns" % \
147 (result["benchmarks"][ \
148 args.behavior_prefix + "_parse_from_benchmark"])
149 print "Average time for serialize_to_benchmark: %.2f ns" % \
150 (result["benchmarks"][ \
151 args.behavior_prefix + "_serialize_to_benchmark"])
152 print ""