24 #include <type_traits>
25 #include <unordered_map>
26 #include <unordered_set>
29 #include "benchmark/benchmark.h"
30 #include "absl/base/internal/raw_logging.h"
31 #include "absl/container/btree_map.h"
32 #include "absl/container/btree_set.h"
33 #include "absl/container/btree_test.h"
34 #include "absl/container/flat_hash_map.h"
35 #include "absl/container/flat_hash_set.h"
36 #include "absl/container/internal/hashtable_debug.h"
37 #include "absl/flags/flag.h"
38 #include "absl/hash/hash.h"
39 #include "absl/memory/memory.h"
40 #include "absl/strings/cord.h"
41 #include "absl/strings/str_format.h"
42 #include "absl/time/time.h"
46 namespace container_internal {
49 constexpr
size_t kBenchmarkValues = 1 << 20;
53 constexpr
size_t kAddRemBatchSize = 1 << 2;
57 std::vector<V> GenerateValues(
int n) {
58 constexpr
int kSeed = 23;
59 return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
68 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
75 const int batch_size = (kBenchmarkValues + 9) / 10;
76 while (
state.KeepRunningBatch(batch_size)) {
78 const auto i =
static_cast<int>(
state.iterations());
80 for (
int j = i;
j <
i + batch_size;
j++) {
81 int x =
j % kBenchmarkValues;
87 for (
int j = i;
j <
i + batch_size;
j++) {
88 int x =
j % kBenchmarkValues;
96 BM_InsertImpl<T>(
state,
false);
101 BM_InsertImpl<T>(
state,
true);
106 template <
typename T>
111 std::vector<V>
values = GenerateValues<V>(kSize);
114 while (
state.KeepRunningBatch(kSize)) {
115 for (
int i = 0;
i <
kSize; ++
i) {
121 state.ResumeTiming();
125 template <
typename T>
130 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
136 while (
state.KeepRunning()) {
137 int idx =
state.iterations() % kBenchmarkValues;
143 template <
typename T>
145 BM_LookupImpl<T>(
state,
false);
151 template <
typename T>
153 BM_LookupImpl<T>(
state,
true);
157 template <
typename T>
161 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
165 const int batch_size = (kBenchmarkValues + 9) / 10;
166 while (
state.KeepRunningBatch(batch_size)) {
167 const int i =
state.iterations();
169 for (
int j = i;
j <
i + batch_size;
j++) {
170 int x =
j % kBenchmarkValues;
175 for (
int j = i;
j <
i + batch_size;
j++) {
176 int x =
j % kBenchmarkValues;
179 state.ResumeTiming();
184 template <
typename T>
188 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
192 const int batch_size = (kBenchmarkValues + 9) / 10;
193 while (
state.KeepRunningBatch(batch_size)) {
194 const int i =
state.iterations();
196 const int start_index =
i % kBenchmarkValues;
200 std::vector<V> removed;
201 removed.reserve(batch_size);
204 for (
int j = 0;
j < batch_size;
j++) {
206 state.ResumeTiming();
212 removed.push_back(*itr++);
215 state.ResumeTiming();
219 container.insert(removed.begin(), removed.end());
221 state.ResumeTiming();
230 template <
typename T>
239 const size_t half = kBenchmarkValues / 2;
240 std::vector<int> remove_keys(
half);
241 std::vector<int> add_keys(
half);
247 while (
state.KeepRunningBatch(
half * kAddRemBatchSize)) {
252 for (
size_t i = 0;
i <
half; ++
i) {
256 constexpr
int kSeed = 5;
257 std::mt19937_64 rand(kSeed);
258 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
259 std::shuffle(add_keys.begin(), add_keys.end(), rand);
262 Generator<V>
g(kBenchmarkValues * kAddRemBatchSize);
264 for (
size_t i = 0;
i <
half; ++
i) {
275 for (
size_t i = 0;
i < kAddRemBatchSize; ++
i) {
276 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
277 std::shuffle(add_keys.begin(), add_keys.end(), rand);
280 state.ResumeTiming();
287 state.ResumeTiming();
292 template <
typename T>
302 std::vector<int> remove_keys(kBenchmarkValues);
303 std::vector<int> add_keys(kBenchmarkValues);
309 while (
state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
314 constexpr
int kSeed = 7;
315 std::mt19937_64 rand(kSeed);
317 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues * 2);
323 for (
size_t i = 0;
i < kBenchmarkValues; ++
i) {
327 remove_keys[
i] =
i + kBenchmarkValues;
331 for (
size_t i = 0;
i < kAddRemBatchSize; ++
i) {
332 remove_keys.swap(add_keys);
333 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
334 std::shuffle(add_keys.begin(), add_keys.end(), rand);
336 state.ResumeTiming();
337 for (
size_t idx = 0;
idx < kBenchmarkValues; ++
idx) {
343 state.ResumeTiming();
351 template <
typename T>
357 Generator<V>
g(kBenchmarkValues +
state.max_iterations);
359 for (
int i = 0;
i < kBenchmarkValues;
i++) {
363 while (
state.KeepRunning()) {
370 template <
typename T>
375 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
382 while (
state.KeepRunning()) {
392 template <
typename T>
396 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
404 while (
state.KeepRunning()) {
410 template <
typename T>
412 BM_RangeConstructionImpl<T>(
state,
false);
415 template <
typename T>
417 BM_RangeConstructionImpl<T>(
state,
true);
420 #define STL_ORDERED_TYPES(value) \
421 using stl_set_##value = std::set<value>; \
422 using stl_map_##value = std::map<value, intptr_t>; \
423 using stl_multiset_##value = std::multiset<value>; \
424 using stl_multimap_##value = std::multimap<value, intptr_t>
433 #define STL_UNORDERED_TYPES(value) \
434 using stl_unordered_set_##value = std::unordered_set<value>; \
435 using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
436 using flat_hash_set_##value = flat_hash_set<value>; \
437 using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \
438 using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
439 using stl_unordered_multimap_##value = \
440 std::unordered_multimap<value, intptr_t>
442 #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \
443 using stl_unordered_set_##value = std::unordered_set<value, hash>; \
444 using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
445 using flat_hash_set_##value = flat_hash_set<value, hash>; \
446 using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \
447 using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
448 using stl_unordered_multimap_##value = \
449 std::unordered_multimap<value, intptr_t, hash>
458 #define BTREE_TYPES(value) \
459 using btree_256_set_##value = \
460 btree_set<value, std::less<value>, std::allocator<value>>; \
461 using btree_256_map_##value = \
462 btree_map<value, intptr_t, std::less<value>, \
463 std::allocator<std::pair<const value, intptr_t>>>; \
464 using btree_256_multiset_##value = \
465 btree_multiset<value, std::less<value>, std::allocator<value>>; \
466 using btree_256_multimap_##value = \
467 btree_multimap<value, intptr_t, std::less<value>, \
468 std::allocator<std::pair<const value, intptr_t>>>
476 #define MY_BENCHMARK4(type, func) \
477 void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
478 BENCHMARK(BM_##type##_##func)
480 #define MY_BENCHMARK3(type) \
481 MY_BENCHMARK4(type, Insert); \
482 MY_BENCHMARK4(type, InsertSorted); \
483 MY_BENCHMARK4(type, InsertSmall); \
484 MY_BENCHMARK4(type, Lookup); \
485 MY_BENCHMARK4(type, FullLookup); \
486 MY_BENCHMARK4(type, Delete); \
487 MY_BENCHMARK4(type, DeleteRange); \
488 MY_BENCHMARK4(type, QueueAddRem); \
489 MY_BENCHMARK4(type, MixedAddRem); \
490 MY_BENCHMARK4(type, Fifo); \
491 MY_BENCHMARK4(type, FwdIter); \
492 MY_BENCHMARK4(type, InsertRangeRandom); \
493 MY_BENCHMARK4(type, InsertRangeSorted)
495 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
496 MY_BENCHMARK3(stl_##type); \
497 MY_BENCHMARK3(stl_unordered_##type); \
498 MY_BENCHMARK3(btree_256_##type)
500 #define MY_BENCHMARK2(type) \
501 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
502 MY_BENCHMARK3(flat_hash_##type)
508 #define MY_BENCHMARK(type) \
509 MY_BENCHMARK2(set_##type); \
510 MY_BENCHMARK2(map_##type); \
511 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
512 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
514 #define MY_BENCHMARK(type) \
515 MY_BENCHMARK2(set_##type); \
516 MY_BENCHMARK2(map_##type)
530 template <
int Size,
int Copies>
532 BigType() : BigType(0) {}
533 explicit BigType(
int x) { std::iota(
values.begin(),
values.end(), x); }
535 void Copy(
const BigType& other) {
536 for (
int i = 0;
i < Size &&
i < Copies; ++
i)
values[i] = other.values[i];
538 for (
int i = Size,
idx = 0;
i < Copies; ++
i) {
545 BigType(
const BigType& other) {
Copy(other); }
546 BigType& operator=(
const BigType& other) {
552 bool operator<(
const BigType& other)
const {
553 return std::lexicographical_compare(
555 other.values.begin(), other.values.begin() +
std::min(Size, Copies));
559 other.values.begin());
563 template <
typename State>
565 for (
int i = 0;
i < Size &&
i < Copies; ++
i)
566 h = State::combine(
std::move(h),
b.values[i]);
573 #define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \
574 using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
575 using stl_map_size##SIZE##copies##COPIES = \
576 std::map<BigType<SIZE, COPIES>, intptr_t>; \
577 using stl_multiset_size##SIZE##copies##COPIES = \
578 std::multiset<BigType<SIZE, COPIES>>; \
579 using stl_multimap_size##SIZE##copies##COPIES = \
580 std::multimap<BigType<SIZE, COPIES>, intptr_t>; \
581 using stl_unordered_set_size##SIZE##copies##COPIES = \
582 std::unordered_set<BigType<SIZE, COPIES>, \
583 absl::Hash<BigType<SIZE, COPIES>>>; \
584 using stl_unordered_map_size##SIZE##copies##COPIES = \
585 std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \
586 absl::Hash<BigType<SIZE, COPIES>>>; \
587 using flat_hash_set_size##SIZE##copies##COPIES = \
588 flat_hash_set<BigType<SIZE, COPIES>>; \
589 using flat_hash_map_size##SIZE##copies##COPIES = \
590 flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \
591 using stl_unordered_multiset_size##SIZE##copies##COPIES = \
592 std::unordered_multiset<BigType<SIZE, COPIES>, \
593 absl::Hash<BigType<SIZE, COPIES>>>; \
594 using stl_unordered_multimap_size##SIZE##copies##COPIES = \
595 std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \
596 absl::Hash<BigType<SIZE, COPIES>>>; \
597 using btree_256_set_size##SIZE##copies##COPIES = \
598 btree_set<BigType<SIZE, COPIES>>; \
599 using btree_256_map_size##SIZE##copies##COPIES = \
600 btree_map<BigType<SIZE, COPIES>, intptr_t>; \
601 using btree_256_multiset_size##SIZE##copies##COPIES = \
602 btree_multiset<BigType<SIZE, COPIES>>; \
603 using btree_256_multimap_size##SIZE##copies##COPIES = \
604 btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \
605 MY_BENCHMARK(size##SIZE##copies##COPIES)
610 #ifndef NODESIZE_TESTING
611 #ifdef BIG_TYPE_TESTING
634 BigTypePtr() : BigTypePtr(0) {}
635 explicit BigTypePtr(
int x) {
636 ptr = absl::make_unique<BigType<Size, Size>>(
x);
638 BigTypePtr(
const BigTypePtr& other) {
639 ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
641 BigTypePtr(BigTypePtr&& other) noexcept =
default;
642 BigTypePtr& operator=(
const BigTypePtr& other) {
643 ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
645 BigTypePtr& operator=(BigTypePtr&& other) noexcept =
default;
647 bool operator<(
const BigTypePtr& other)
const {
return *
ptr < *other.ptr; }
648 bool operator==(
const BigTypePtr& other)
const {
return *
ptr == *other.ptr; }
650 std::unique_ptr<BigType<Size, Size>>
ptr;
654 double ContainerInfo(
const btree_set<BigTypePtr<Size>>&
b) {
655 const double bytes_used =
656 b.bytes_used() +
b.size() *
sizeof(BigType<Size, Size>);
657 const double bytes_per_value = bytes_used /
b.size();
658 BtreeContainerInfoLog(
b, bytes_used, bytes_per_value);
659 return bytes_per_value;
662 double ContainerInfo(
const btree_map<
int, BigTypePtr<Size>>&
b) {
663 const double bytes_used =
664 b.bytes_used() +
b.size() *
sizeof(BigType<Size, Size>);
665 const double bytes_per_value = bytes_used /
b.size();
666 BtreeContainerInfoLog(
b, bytes_used, bytes_per_value);
667 return bytes_per_value;
670 #define BIG_TYPE_PTR_BENCHMARKS(SIZE) \
671 using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
672 using stl_map_size##SIZE##copies##SIZE##ptr = \
673 std::map<int, BigType<SIZE, SIZE>>; \
674 using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \
675 std::unordered_set<BigType<SIZE, SIZE>, \
676 absl::Hash<BigType<SIZE, SIZE>>>; \
677 using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \
678 std::unordered_map<int, BigType<SIZE, SIZE>>; \
679 using flat_hash_set_size##SIZE##copies##SIZE##ptr = \
680 flat_hash_set<BigType<SIZE, SIZE>>; \
681 using flat_hash_map_size##SIZE##copies##SIZE##ptr = \
682 flat_hash_map<int, BigTypePtr<SIZE>>; \
683 using btree_256_set_size##SIZE##copies##SIZE##ptr = \
684 btree_set<BigTypePtr<SIZE>>; \
685 using btree_256_map_size##SIZE##copies##SIZE##ptr = \
686 btree_map<int, BigTypePtr<SIZE>>; \
687 MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \
688 MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
689 MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
690 MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
691 MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \
692 MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
693 MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
694 MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)