abseil-cpp/absl/container/btree_benchmark.cc
Go to the documentation of this file.
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <stdint.h>
16 
17 #include <algorithm>
18 #include <functional>
19 #include <map>
20 #include <numeric>
21 #include <random>
22 #include <set>
23 #include <string>
24 #include <type_traits>
25 #include <unordered_map>
26 #include <unordered_set>
27 #include <vector>
28 
29 #include "benchmark/benchmark.h"
30 #include "absl/base/internal/raw_logging.h"
31 #include "absl/container/btree_map.h"
32 #include "absl/container/btree_set.h"
33 #include "absl/container/btree_test.h"
34 #include "absl/container/flat_hash_map.h"
35 #include "absl/container/flat_hash_set.h"
36 #include "absl/container/internal/hashtable_debug.h"
37 #include "absl/flags/flag.h"
38 #include "absl/hash/hash.h"
39 #include "absl/memory/memory.h"
40 #include "absl/strings/cord.h"
41 #include "absl/strings/str_format.h"
42 #include "absl/time/time.h"
43 
44 namespace absl {
46 namespace container_internal {
47 namespace {
48 
49 constexpr size_t kBenchmarkValues = 1 << 20;
50 
51 // How many times we add and remove sub-batches in one batch of *AddRem
52 // benchmarks.
53 constexpr size_t kAddRemBatchSize = 1 << 2;
54 
55 // Generates n values in the range [0, 4 * n].
56 template <typename V>
57 std::vector<V> GenerateValues(int n) {
58  constexpr int kSeed = 23;
59  return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
60 }
61 
62 // Benchmark insertion of values into a container.
63 template <typename T>
64 void BM_InsertImpl(benchmark::State& state, bool sorted) {
66  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
67 
68  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
69  if (sorted) {
70  std::sort(values.begin(), values.end());
71  }
72  T container(values.begin(), values.end());
73 
74  // Remove and re-insert 10% of the keys per batch.
75  const int batch_size = (kBenchmarkValues + 9) / 10;
76  while (state.KeepRunningBatch(batch_size)) {
77  state.PauseTiming();
78  const auto i = static_cast<int>(state.iterations());
79 
80  for (int j = i; j < i + batch_size; j++) {
81  int x = j % kBenchmarkValues;
82  container.erase(key_of_value(values[x]));
83  }
84 
85  state.ResumeTiming();
86 
87  for (int j = i; j < i + batch_size; j++) {
88  int x = j % kBenchmarkValues;
89  container.insert(values[x]);
90  }
91  }
92 }
93 
94 template <typename T>
95 void BM_Insert(benchmark::State& state) {
96  BM_InsertImpl<T>(state, false);
97 }
98 
99 template <typename T>
100 void BM_InsertSorted(benchmark::State& state) {
101  BM_InsertImpl<T>(state, true);
102 }
103 
104 // Benchmark inserting the first few elements in a container. In b-tree, this is
105 // when the root node grows.
106 template <typename T>
107 void BM_InsertSmall(benchmark::State& state) {
109 
110  const int kSize = 8;
111  std::vector<V> values = GenerateValues<V>(kSize);
112  T container;
113 
114  while (state.KeepRunningBatch(kSize)) {
115  for (int i = 0; i < kSize; ++i) {
117  }
118  state.PauseTiming();
119  // Do not measure the time it takes to clear the container.
120  container.clear();
121  state.ResumeTiming();
122  }
123 }
124 
125 template <typename T>
126 void BM_LookupImpl(benchmark::State& state, bool sorted) {
128  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
129 
130  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
131  if (sorted) {
132  std::sort(values.begin(), values.end());
133  }
134  T container(values.begin(), values.end());
135 
136  while (state.KeepRunning()) {
137  int idx = state.iterations() % kBenchmarkValues;
138  benchmark::DoNotOptimize(container.find(key_of_value(values[idx])));
139  }
140 }
141 
142 // Benchmark lookup of values in a container.
143 template <typename T>
144 void BM_Lookup(benchmark::State& state) {
145  BM_LookupImpl<T>(state, false);
146 }
147 
148 // Benchmark lookup of values in a full container, meaning that values
149 // are inserted in-order to take advantage of biased insertion, which
150 // yields a full tree.
151 template <typename T>
152 void BM_FullLookup(benchmark::State& state) {
153  BM_LookupImpl<T>(state, true);
154 }
155 
156 // Benchmark erasing values from a container.
157 template <typename T>
158 void BM_Erase(benchmark::State& state) {
160  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
161  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
162  T container(values.begin(), values.end());
163 
164  // Remove and re-insert 10% of the keys per batch.
165  const int batch_size = (kBenchmarkValues + 9) / 10;
166  while (state.KeepRunningBatch(batch_size)) {
167  const int i = state.iterations();
168 
169  for (int j = i; j < i + batch_size; j++) {
170  int x = j % kBenchmarkValues;
171  container.erase(key_of_value(values[x]));
172  }
173 
174  state.PauseTiming();
175  for (int j = i; j < i + batch_size; j++) {
176  int x = j % kBenchmarkValues;
177  container.insert(values[x]);
178  }
179  state.ResumeTiming();
180  }
181 }
182 
183 // Benchmark erasing multiple values from a container.
184 template <typename T>
185 void BM_EraseRange(benchmark::State& state) {
187  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
188  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
189  T container(values.begin(), values.end());
190 
191  // Remove and re-insert 10% of the keys per batch.
192  const int batch_size = (kBenchmarkValues + 9) / 10;
193  while (state.KeepRunningBatch(batch_size)) {
194  const int i = state.iterations();
195 
196  const int start_index = i % kBenchmarkValues;
197 
198  state.PauseTiming();
199  {
200  std::vector<V> removed;
201  removed.reserve(batch_size);
202  auto itr = container.find(key_of_value(values[start_index]));
203  auto start = itr;
204  for (int j = 0; j < batch_size; j++) {
205  if (itr == container.end()) {
206  state.ResumeTiming();
207  container.erase(start, itr);
208  state.PauseTiming();
209  itr = container.begin();
210  start = itr;
211  }
212  removed.push_back(*itr++);
213  }
214 
215  state.ResumeTiming();
216  container.erase(start, itr);
217  state.PauseTiming();
218 
219  container.insert(removed.begin(), removed.end());
220  }
221  state.ResumeTiming();
222  }
223 }
224 
225 // Predicate that erases every other element. We can't use a lambda because
226 // C++11 doesn't support generic lambdas.
227 // TODO(b/207389011): consider adding benchmarks that remove different fractions
228 // of keys (e.g. 10%, 90%).
229 struct EraseIfPred {
230  uint64_t i = 0;
231  template <typename T>
232  bool operator()(const T&) {
233  return ++i % 2;
234  }
235 };
236 
237 // Benchmark erasing multiple values from a container with a predicate.
238 template <typename T>
239 void BM_EraseIf(benchmark::State& state) {
241  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
242 
243  // Removes half of the keys per batch.
244  const int batch_size = (kBenchmarkValues + 1) / 2;
245  EraseIfPred pred;
246  while (state.KeepRunningBatch(batch_size)) {
247  state.PauseTiming();
248  {
249  T container(values.begin(), values.end());
250  state.ResumeTiming();
251  erase_if(container, pred);
253  state.PauseTiming();
254  }
255  state.ResumeTiming();
256  }
257 }
258 
259 // Benchmark steady-state insert (into first half of range) and remove (from
260 // second half of range), treating the container approximately like a queue with
261 // log-time access for all elements. This benchmark does not test the case where
262 // insertion and removal happen in the same region of the tree. This benchmark
263 // counts two value constructors.
264 template <typename T>
265 void BM_QueueAddRem(benchmark::State& state) {
267  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
268 
269  ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
270 
271  T container;
272 
273  const size_t half = kBenchmarkValues / 2;
274  std::vector<int> remove_keys(half);
275  std::vector<int> add_keys(half);
276 
277  // We want to do the exact same work repeatedly, and the benchmark can end
278  // after a different number of iterations depending on the speed of the
279  // individual run so we use a large batch size here and ensure that we do
280  // deterministic work every batch.
281  while (state.KeepRunningBatch(half * kAddRemBatchSize)) {
282  state.PauseTiming();
283 
284  container.clear();
285 
286  for (size_t i = 0; i < half; ++i) {
287  remove_keys[i] = i;
288  add_keys[i] = i;
289  }
290  constexpr int kSeed = 5;
291  std::mt19937_64 rand(kSeed);
292  std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
293  std::shuffle(add_keys.begin(), add_keys.end(), rand);
294 
295  // Note needs lazy generation of values.
296  Generator<V> g(kBenchmarkValues * kAddRemBatchSize);
297 
298  for (size_t i = 0; i < half; ++i) {
299  container.insert(g(add_keys[i]));
300  container.insert(g(half + remove_keys[i]));
301  }
302 
303  // There are three parts each of size "half":
304  // 1 is being deleted from [offset - half, offset)
305  // 2 is standing [offset, offset + half)
306  // 3 is being inserted into [offset + half, offset + 2 * half)
307  size_t offset = 0;
308 
309  for (size_t i = 0; i < kAddRemBatchSize; ++i) {
310  std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
311  std::shuffle(add_keys.begin(), add_keys.end(), rand);
312  offset += half;
313 
314  state.ResumeTiming();
315  for (size_t idx = 0; idx < half; ++idx) {
316  container.erase(key_of_value(g(offset - half + remove_keys[idx])));
317  container.insert(g(offset + half + add_keys[idx]));
318  }
319  state.PauseTiming();
320  }
321  state.ResumeTiming();
322  }
323 }
324 
325 // Mixed insertion and deletion in the same range using pre-constructed values.
326 template <typename T>
327 void BM_MixedAddRem(benchmark::State& state) {
329  typename KeyOfValue<typename T::key_type, V>::type key_of_value;
330 
331  ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
332 
333  T container;
334 
335  // Create two random shuffles
336  std::vector<int> remove_keys(kBenchmarkValues);
337  std::vector<int> add_keys(kBenchmarkValues);
338 
339  // We want to do the exact same work repeatedly, and the benchmark can end
340  // after a different number of iterations depending on the speed of the
341  // individual run so we use a large batch size here and ensure that we do
342  // deterministic work every batch.
343  while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
344  state.PauseTiming();
345 
346  container.clear();
347 
348  constexpr int kSeed = 7;
349  std::mt19937_64 rand(kSeed);
350 
351  std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2);
352 
353  // Insert the first half of the values (already in random order)
354  container.insert(values.begin(), values.begin() + kBenchmarkValues);
355 
356  // Insert the first half of the values (already in random order)
357  for (size_t i = 0; i < kBenchmarkValues; ++i) {
358  // remove_keys and add_keys will be swapped before each round,
359  // therefore fill add_keys here w/ the keys being inserted, so
360  // they'll be the first to be removed.
361  remove_keys[i] = i + kBenchmarkValues;
362  add_keys[i] = i;
363  }
364 
365  for (size_t i = 0; i < kAddRemBatchSize; ++i) {
366  remove_keys.swap(add_keys);
367  std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
368  std::shuffle(add_keys.begin(), add_keys.end(), rand);
369 
370  state.ResumeTiming();
371  for (size_t idx = 0; idx < kBenchmarkValues; ++idx) {
372  container.erase(key_of_value(values[remove_keys[idx]]));
373  container.insert(values[add_keys[idx]]);
374  }
375  state.PauseTiming();
376  }
377  state.ResumeTiming();
378  }
379 }
380 
381 // Insertion at end, removal from the beginning. This benchmark
382 // counts two value constructors.
383 // TODO(ezb): we could add a GenerateNext version of generator that could reduce
384 // noise for string-like types.
385 template <typename T>
386 void BM_Fifo(benchmark::State& state) {
388 
389  T container;
390  // Need lazy generation of values as state.max_iterations is large.
391  Generator<V> g(kBenchmarkValues + state.max_iterations);
392 
393  for (int i = 0; i < kBenchmarkValues; i++) {
394  container.insert(g(i));
395  }
396 
397  while (state.KeepRunning()) {
398  container.erase(container.begin());
399  container.insert(container.end(), g(state.iterations() + kBenchmarkValues));
400  }
401 }
402 
403 // Iteration (forward) through the tree
404 template <typename T>
405 void BM_FwdIter(benchmark::State& state) {
407  using R = typename T::value_type const*;
408 
409  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
410  T container(values.begin(), values.end());
411 
412  auto iter = container.end();
413 
414  R r = nullptr;
415 
416  while (state.KeepRunning()) {
417  if (iter == container.end()) iter = container.begin();
418  r = &(*iter);
419  ++iter;
420  }
421 
423 }
424 
425 // Benchmark random range-construction of a container.
426 template <typename T>
427 void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) {
429 
430  std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
431  if (sorted) {
432  std::sort(values.begin(), values.end());
433  }
434  {
435  T container(values.begin(), values.end());
436  }
437 
438  while (state.KeepRunning()) {
439  T container(values.begin(), values.end());
441  }
442 }
443 
444 template <typename T>
445 void BM_InsertRangeRandom(benchmark::State& state) {
446  BM_RangeConstructionImpl<T>(state, false);
447 }
448 
449 template <typename T>
450 void BM_InsertRangeSorted(benchmark::State& state) {
451  BM_RangeConstructionImpl<T>(state, true);
452 }
453 
454 #define STL_ORDERED_TYPES(value) \
455  using stl_set_##value = std::set<value>; \
456  using stl_map_##value = std::map<value, intptr_t>; \
457  using stl_multiset_##value = std::multiset<value>; \
458  using stl_multimap_##value = std::multimap<value, intptr_t>
459 
460 using StdString = std::string;
463 STL_ORDERED_TYPES(StdString);
464 STL_ORDERED_TYPES(Cord);
465 STL_ORDERED_TYPES(Time);
466 
467 #define STL_UNORDERED_TYPES(value) \
468  using stl_unordered_set_##value = std::unordered_set<value>; \
469  using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
470  using flat_hash_set_##value = flat_hash_set<value>; \
471  using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \
472  using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
473  using stl_unordered_multimap_##value = \
474  std::unordered_multimap<value, intptr_t>
475 
476 #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \
477  using stl_unordered_set_##value = std::unordered_set<value, hash>; \
478  using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
479  using flat_hash_set_##value = flat_hash_set<value, hash>; \
480  using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \
481  using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
482  using stl_unordered_multimap_##value = \
483  std::unordered_multimap<value, intptr_t, hash>
484 
486 
489 STL_UNORDERED_TYPES(StdString);
491 
492 #define BTREE_TYPES(value) \
493  using btree_256_set_##value = \
494  btree_set<value, std::less<value>, std::allocator<value>>; \
495  using btree_256_map_##value = \
496  btree_map<value, intptr_t, std::less<value>, \
497  std::allocator<std::pair<const value, intptr_t>>>; \
498  using btree_256_multiset_##value = \
499  btree_multiset<value, std::less<value>, std::allocator<value>>; \
500  using btree_256_multimap_##value = \
501  btree_multimap<value, intptr_t, std::less<value>, \
502  std::allocator<std::pair<const value, intptr_t>>>
503 
506 BTREE_TYPES(StdString);
507 BTREE_TYPES(Cord);
508 BTREE_TYPES(Time);
509 
510 #define MY_BENCHMARK4(type, func) \
511  void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
512  BENCHMARK(BM_##type##_##func)
513 
514 #define MY_BENCHMARK3_STL(type) \
515  MY_BENCHMARK4(type, Insert); \
516  MY_BENCHMARK4(type, InsertSorted); \
517  MY_BENCHMARK4(type, InsertSmall); \
518  MY_BENCHMARK4(type, Lookup); \
519  MY_BENCHMARK4(type, FullLookup); \
520  MY_BENCHMARK4(type, Erase); \
521  MY_BENCHMARK4(type, EraseRange); \
522  MY_BENCHMARK4(type, QueueAddRem); \
523  MY_BENCHMARK4(type, MixedAddRem); \
524  MY_BENCHMARK4(type, Fifo); \
525  MY_BENCHMARK4(type, FwdIter); \
526  MY_BENCHMARK4(type, InsertRangeRandom); \
527  MY_BENCHMARK4(type, InsertRangeSorted)
528 
529 #define MY_BENCHMARK3(type) \
530  MY_BENCHMARK4(type, EraseIf); \
531  MY_BENCHMARK3_STL(type)
532 
533 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
534  MY_BENCHMARK3_STL(stl_##type); \
535  MY_BENCHMARK3_STL(stl_unordered_##type); \
536  MY_BENCHMARK3(btree_256_##type)
537 
538 #define MY_BENCHMARK2(type) \
539  MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
540  MY_BENCHMARK3(flat_hash_##type)
541 
542 // Define MULTI_TESTING to see benchmarks for multi-containers also.
543 //
544 // You can use --copt=-DMULTI_TESTING.
545 #ifdef MULTI_TESTING
546 #define MY_BENCHMARK(type) \
547  MY_BENCHMARK2(set_##type); \
548  MY_BENCHMARK2(map_##type); \
549  MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
550  MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
551 #else
552 #define MY_BENCHMARK(type) \
553  MY_BENCHMARK2(set_##type); \
554  MY_BENCHMARK2(map_##type)
555 #endif
556 
559 MY_BENCHMARK(StdString);
560 MY_BENCHMARK(Cord);
561 MY_BENCHMARK(Time);
562 
563 // Define a type whose size and cost of moving are independently customizable.
564 // When sizeof(value_type) increases, we expect btree to no longer have as much
565 // cache-locality advantage over STL. When cost of moving increases, we expect
566 // btree to actually do more work than STL because it has to move values around
567 // and STL doesn't have to.
568 template <int Size, int Copies>
569 struct BigType {
570  BigType() : BigType(0) {}
571  explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
572 
573  void Copy(const BigType& other) {
574  for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
575  // If Copies > Size, do extra copies.
576  for (int i = Size, idx = 0; i < Copies; ++i) {
577  int64_t tmp = other.values[idx];
579  idx = idx + 1 == Size ? 0 : idx + 1;
580  }
581  }
582 
583  BigType(const BigType& other) { Copy(other); }
584  BigType& operator=(const BigType& other) {
585  Copy(other);
586  return *this;
587  }
588 
589  // Compare only the first Copies elements if Copies is less than Size.
590  bool operator<(const BigType& other) const {
591  return std::lexicographical_compare(
592  values.begin(), values.begin() + std::min(Size, Copies),
593  other.values.begin(), other.values.begin() + std::min(Size, Copies));
594  }
595  bool operator==(const BigType& other) const {
596  return std::equal(values.begin(), values.begin() + std::min(Size, Copies),
597  other.values.begin());
598  }
599 
600  // Support absl::Hash.
601  template <typename State>
602  friend State AbslHashValue(State h, const BigType& b) {
603  for (int i = 0; i < Size && i < Copies; ++i)
604  h = State::combine(std::move(h), b.values[i]);
605  return h;
606  }
607 
608  std::array<int64_t, Size> values;
609 };
610 
611 #define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \
612  using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
613  using stl_map_size##SIZE##copies##COPIES = \
614  std::map<BigType<SIZE, COPIES>, intptr_t>; \
615  using stl_multiset_size##SIZE##copies##COPIES = \
616  std::multiset<BigType<SIZE, COPIES>>; \
617  using stl_multimap_size##SIZE##copies##COPIES = \
618  std::multimap<BigType<SIZE, COPIES>, intptr_t>; \
619  using stl_unordered_set_size##SIZE##copies##COPIES = \
620  std::unordered_set<BigType<SIZE, COPIES>, \
621  absl::Hash<BigType<SIZE, COPIES>>>; \
622  using stl_unordered_map_size##SIZE##copies##COPIES = \
623  std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \
624  absl::Hash<BigType<SIZE, COPIES>>>; \
625  using flat_hash_set_size##SIZE##copies##COPIES = \
626  flat_hash_set<BigType<SIZE, COPIES>>; \
627  using flat_hash_map_size##SIZE##copies##COPIES = \
628  flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \
629  using stl_unordered_multiset_size##SIZE##copies##COPIES = \
630  std::unordered_multiset<BigType<SIZE, COPIES>, \
631  absl::Hash<BigType<SIZE, COPIES>>>; \
632  using stl_unordered_multimap_size##SIZE##copies##COPIES = \
633  std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \
634  absl::Hash<BigType<SIZE, COPIES>>>; \
635  using btree_256_set_size##SIZE##copies##COPIES = \
636  btree_set<BigType<SIZE, COPIES>>; \
637  using btree_256_map_size##SIZE##copies##COPIES = \
638  btree_map<BigType<SIZE, COPIES>, intptr_t>; \
639  using btree_256_multiset_size##SIZE##copies##COPIES = \
640  btree_multiset<BigType<SIZE, COPIES>>; \
641  using btree_256_multimap_size##SIZE##copies##COPIES = \
642  btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \
643  MY_BENCHMARK(size##SIZE##copies##COPIES)
644 
645 // Define BIG_TYPE_TESTING to see benchmarks for more big types.
646 //
647 // You can use --copt=-DBIG_TYPE_TESTING.
648 #ifndef NODESIZE_TESTING
649 #ifdef BIG_TYPE_TESTING
650 BIG_TYPE_BENCHMARKS(1, 4);
651 BIG_TYPE_BENCHMARKS(4, 1);
652 BIG_TYPE_BENCHMARKS(4, 4);
653 BIG_TYPE_BENCHMARKS(1, 8);
654 BIG_TYPE_BENCHMARKS(8, 1);
655 BIG_TYPE_BENCHMARKS(8, 8);
656 BIG_TYPE_BENCHMARKS(1, 16);
657 BIG_TYPE_BENCHMARKS(16, 1);
658 BIG_TYPE_BENCHMARKS(16, 16);
659 BIG_TYPE_BENCHMARKS(1, 32);
660 BIG_TYPE_BENCHMARKS(32, 1);
661 BIG_TYPE_BENCHMARKS(32, 32);
662 #else
663 BIG_TYPE_BENCHMARKS(32, 32);
664 #endif
665 #endif
666 
667 // Benchmark using unique_ptrs to large value types. In order to be able to use
668 // the same benchmark code as the other types, use a type that holds a
669 // unique_ptr and has a copy constructor.
670 template <int Size>
671 struct BigTypePtr {
672  BigTypePtr() : BigTypePtr(0) {}
673  explicit BigTypePtr(int x) {
674  ptr = absl::make_unique<BigType<Size, Size>>(x);
675  }
676  BigTypePtr(const BigTypePtr& other) {
677  ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
678  }
679  BigTypePtr(BigTypePtr&& other) noexcept = default;
680  BigTypePtr& operator=(const BigTypePtr& other) {
681  ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
682  }
683  BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
684 
685  bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
686  bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
687 
688  std::unique_ptr<BigType<Size, Size>> ptr;
689 };
690 
691 template <int Size>
692 double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) {
693  const double bytes_used =
694  b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
695  const double bytes_per_value = bytes_used / b.size();
696  BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
697  return bytes_per_value;
698 }
699 template <int Size>
700 double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
701  const double bytes_used =
702  b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
703  const double bytes_per_value = bytes_used / b.size();
704  BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
705  return bytes_per_value;
706 }
707 
708 #define BIG_TYPE_PTR_BENCHMARKS(SIZE) \
709  using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
710  using stl_map_size##SIZE##copies##SIZE##ptr = \
711  std::map<int, BigType<SIZE, SIZE>>; \
712  using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \
713  std::unordered_set<BigType<SIZE, SIZE>, \
714  absl::Hash<BigType<SIZE, SIZE>>>; \
715  using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \
716  std::unordered_map<int, BigType<SIZE, SIZE>>; \
717  using flat_hash_set_size##SIZE##copies##SIZE##ptr = \
718  flat_hash_set<BigType<SIZE, SIZE>>; \
719  using flat_hash_map_size##SIZE##copies##SIZE##ptr = \
720  flat_hash_map<int, BigTypePtr<SIZE>>; \
721  using btree_256_set_size##SIZE##copies##SIZE##ptr = \
722  btree_set<BigTypePtr<SIZE>>; \
723  using btree_256_map_size##SIZE##copies##SIZE##ptr = \
724  btree_map<int, BigTypePtr<SIZE>>; \
725  MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \
726  MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
727  MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
728  MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
729  MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \
730  MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
731  MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
732  MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
733 
735 
736 } // namespace
737 } // namespace container_internal
739 } // namespace absl
ABSL_RAW_CHECK
#define ABSL_RAW_CHECK(condition, message)
Definition: abseil-cpp/absl/base/internal/raw_logging.h:59
absl::AbslHashValue
H AbslHashValue(H h, const absl::InlinedVector< T, N, A > &a)
Definition: abseil-cpp/absl/container/inlined_vector.h:858
absl::hash_internal::Hash
Definition: abseil-cpp/absl/hash/internal/hash.h:1221
MY_BENCHMARK
#define MY_BENCHMARK(type)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:552
BTREE_TYPES
#define BTREE_TYPES(value)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:492
BIG_TYPE_BENCHMARKS
#define BIG_TYPE_BENCHMARKS(SIZE, COPIES)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:611
STL_UNORDERED_TYPES
#define STL_UNORDERED_TYPES(value)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:467
testing::internal::string
::std::string string
Definition: bloaty/third_party/protobuf/third_party/googletest/googletest/include/gtest/internal/gtest-port.h:881
absl::container_internal::KeyOfValue::type
Definition: abseil-cpp/absl/container/btree_test.h:50
benchmark::DoNotOptimize
BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const &value)
Definition: benchmark/include/benchmark/benchmark.h:375
ABSL_NAMESPACE_END
#define ABSL_NAMESPACE_END
Definition: third_party/abseil-cpp/absl/base/config.h:171
T
#define T(upbtypeconst, upbtype, ctype, default_value)
STL_ORDERED_TYPES
#define STL_ORDERED_TYPES(value)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:454
BIG_TYPE_PTR_BENCHMARKS
#define BIG_TYPE_PTR_BENCHMARKS(SIZE)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:708
for
for(map_begin_internal(intern, &it);!map_done(&it);map_next(&it))
Definition: bloaty/third_party/protobuf/php/ext/google/protobuf/map.c:207
start
static uint64_t start
Definition: benchmark-pound.c:74
ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_BEGIN
Definition: third_party/abseil-cpp/absl/base/config.h:170
absl::move
constexpr absl::remove_reference_t< T > && move(T &&t) noexcept
Definition: abseil-cpp/absl/utility/utility.h:221
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
uint64_t
unsigned __int64 uint64_t
Definition: stdint-msvc2008.h:90
half
Definition: passthru_endpoint.cc:53
absl::compare_internal::value_type
int8_t value_type
Definition: abseil-cpp/absl/types/compare.h:45
absl::erase_if
btree_map< K, V, C, A >::size_type erase_if(btree_map< K, V, C, A > &map, Pred pred)
Definition: abseil-cpp/absl/container/btree_map.h:483
x
int x
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3610
absl::operator==
bool operator==(const absl::InlinedVector< T, N, A > &a, const absl::InlinedVector< T, N, A > &b)
Definition: abseil-cpp/absl/container/inlined_vector.h:794
min
#define min(a, b)
Definition: qsort.h:83
b
uint64_t b
Definition: abseil-cpp/absl/container/internal/layout_test.cc:53
g
struct @717 g
n
int n
Definition: abseil-cpp/absl/container/btree_test.cc:1080
stdint.h
setup.idx
idx
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:197
STL_UNORDERED_TYPES_CUSTOM_HASH
#define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash)
Definition: abseil-cpp/absl/container/btree_benchmark.cc:476
absl::container_internal::remove_pair_const::type
typename std::remove_const< T >::type type
Definition: abseil-cpp/absl/container/btree_test.h:38
benchmark::State
Definition: benchmark/include/benchmark/benchmark.h:503
Copy
@ Copy
Definition: upb/benchmarks/benchmark.cc:200
fix_build_deps.r
r
Definition: fix_build_deps.py:491
values
std::array< int64_t, Size > values
Definition: abseil-cpp/absl/container/btree_benchmark.cc:608
state
Definition: bloaty/third_party/zlib/contrib/blast/blast.c:41
ptr
std::unique_ptr< BigType< Size, Size > > ptr
Definition: abseil-cpp/absl/container/btree_benchmark.cc:688
iter
Definition: test_winkernel.cpp:47
absl
Definition: abseil-cpp/absl/algorithm/algorithm.h:31
asyncio_get_stats.type
type
Definition: asyncio_get_stats.py:37
autogen_x86imm.tmp
tmp
Definition: autogen_x86imm.py:12
absl::operator<
bool operator<(const absl::InlinedVector< T, N, A > &a, const absl::InlinedVector< T, N, A > &b)
Definition: abseil-cpp/absl/container/inlined_vector.h:815
int32_t
signed int int32_t
Definition: stdint-msvc2008.h:77
absl::str_format_internal::LengthMod::h
@ h
container
static struct async_container * container
Definition: benchmark-million-async.c:33
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
offset
voidpf uLong offset
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:142


grpc
Author(s):
autogenerated on Fri May 16 2025 02:57:49