24 #include <type_traits>
25 #include <unordered_map>
26 #include <unordered_set>
29 #include "benchmark/benchmark.h"
30 #include "absl/base/internal/raw_logging.h"
31 #include "absl/container/btree_map.h"
32 #include "absl/container/btree_set.h"
33 #include "absl/container/btree_test.h"
34 #include "absl/container/flat_hash_map.h"
35 #include "absl/container/flat_hash_set.h"
36 #include "absl/container/internal/hashtable_debug.h"
37 #include "absl/flags/flag.h"
38 #include "absl/hash/hash.h"
39 #include "absl/memory/memory.h"
40 #include "absl/strings/cord.h"
41 #include "absl/strings/str_format.h"
42 #include "absl/time/time.h"
46 namespace container_internal {
49 constexpr
size_t kBenchmarkValues = 1 << 20;
53 constexpr
size_t kAddRemBatchSize = 1 << 2;
57 std::vector<V> GenerateValues(
int n) {
58 constexpr
int kSeed = 23;
59 return GenerateValuesWithSeed<V>(
n, 4 *
n, kSeed);
68 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
75 const int batch_size = (kBenchmarkValues + 9) / 10;
76 while (
state.KeepRunningBatch(batch_size)) {
78 const auto i =
static_cast<int>(
state.iterations());
80 for (
int j =
i; j <
i + batch_size; j++) {
81 int x = j % kBenchmarkValues;
87 for (
int j =
i; j <
i + batch_size; j++) {
88 int x = j % kBenchmarkValues;
96 BM_InsertImpl<T>(
state,
false);
101 BM_InsertImpl<T>(
state,
true);
106 template <
typename T>
111 std::vector<V>
values = GenerateValues<V>(kSize);
114 while (
state.KeepRunningBatch(kSize)) {
115 for (
int i = 0;
i < kSize; ++
i) {
121 state.ResumeTiming();
125 template <
typename T>
130 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
136 while (
state.KeepRunning()) {
137 int idx =
state.iterations() % kBenchmarkValues;
143 template <
typename T>
145 BM_LookupImpl<T>(
state,
false);
151 template <
typename T>
153 BM_LookupImpl<T>(
state,
true);
157 template <
typename T>
161 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
165 const int batch_size = (kBenchmarkValues + 9) / 10;
166 while (
state.KeepRunningBatch(batch_size)) {
167 const int i =
state.iterations();
169 for (
int j =
i; j <
i + batch_size; j++) {
170 int x = j % kBenchmarkValues;
175 for (
int j =
i; j <
i + batch_size; j++) {
176 int x = j % kBenchmarkValues;
179 state.ResumeTiming();
184 template <
typename T>
188 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
192 const int batch_size = (kBenchmarkValues + 9) / 10;
193 while (
state.KeepRunningBatch(batch_size)) {
194 const int i =
state.iterations();
196 const int start_index =
i % kBenchmarkValues;
200 std::vector<V> removed;
201 removed.reserve(batch_size);
204 for (
int j = 0; j < batch_size; j++) {
206 state.ResumeTiming();
212 removed.push_back(*itr++);
215 state.ResumeTiming();
219 container.insert(removed.begin(), removed.end());
221 state.ResumeTiming();
231 template <
typename T>
232 bool operator()(
const T&) {
238 template <
typename T>
241 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
244 const int batch_size = (kBenchmarkValues + 1) / 2;
246 while (
state.KeepRunningBatch(batch_size)) {
250 state.ResumeTiming();
255 state.ResumeTiming();
264 template <
typename T>
273 const size_t half = kBenchmarkValues / 2;
274 std::vector<int> remove_keys(
half);
275 std::vector<int> add_keys(
half);
281 while (
state.KeepRunningBatch(
half * kAddRemBatchSize)) {
286 for (
size_t i = 0;
i <
half; ++
i) {
290 constexpr
int kSeed = 5;
291 std::mt19937_64 rand(kSeed);
292 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
293 std::shuffle(add_keys.begin(), add_keys.end(), rand);
296 Generator<V>
g(kBenchmarkValues * kAddRemBatchSize);
298 for (
size_t i = 0;
i <
half; ++
i) {
309 for (
size_t i = 0;
i < kAddRemBatchSize; ++
i) {
310 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
311 std::shuffle(add_keys.begin(), add_keys.end(), rand);
314 state.ResumeTiming();
321 state.ResumeTiming();
326 template <
typename T>
336 std::vector<int> remove_keys(kBenchmarkValues);
337 std::vector<int> add_keys(kBenchmarkValues);
343 while (
state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
348 constexpr
int kSeed = 7;
349 std::mt19937_64 rand(kSeed);
351 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues * 2);
357 for (
size_t i = 0;
i < kBenchmarkValues; ++
i) {
361 remove_keys[
i] =
i + kBenchmarkValues;
365 for (
size_t i = 0;
i < kAddRemBatchSize; ++
i) {
366 remove_keys.swap(add_keys);
367 std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
368 std::shuffle(add_keys.begin(), add_keys.end(), rand);
370 state.ResumeTiming();
371 for (
size_t idx = 0;
idx < kBenchmarkValues; ++
idx) {
377 state.ResumeTiming();
385 template <
typename T>
391 Generator<V>
g(kBenchmarkValues +
state.max_iterations);
393 for (
int i = 0;
i < kBenchmarkValues;
i++) {
397 while (
state.KeepRunning()) {
404 template <
typename T>
409 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
416 while (
state.KeepRunning()) {
426 template <
typename T>
430 std::vector<V>
values = GenerateValues<V>(kBenchmarkValues);
438 while (
state.KeepRunning()) {
444 template <
typename T>
446 BM_RangeConstructionImpl<T>(
state,
false);
449 template <
typename T>
451 BM_RangeConstructionImpl<T>(
state,
true);
454 #define STL_ORDERED_TYPES(value) \
455 using stl_set_##value = std::set<value>; \
456 using stl_map_##value = std::map<value, intptr_t>; \
457 using stl_multiset_##value = std::multiset<value>; \
458 using stl_multimap_##value = std::multimap<value, intptr_t>
467 #define STL_UNORDERED_TYPES(value) \
468 using stl_unordered_set_##value = std::unordered_set<value>; \
469 using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
470 using flat_hash_set_##value = flat_hash_set<value>; \
471 using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \
472 using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
473 using stl_unordered_multimap_##value = \
474 std::unordered_multimap<value, intptr_t>
476 #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \
477 using stl_unordered_set_##value = std::unordered_set<value, hash>; \
478 using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
479 using flat_hash_set_##value = flat_hash_set<value, hash>; \
480 using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \
481 using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
482 using stl_unordered_multimap_##value = \
483 std::unordered_multimap<value, intptr_t, hash>
492 #define BTREE_TYPES(value) \
493 using btree_256_set_##value = \
494 btree_set<value, std::less<value>, std::allocator<value>>; \
495 using btree_256_map_##value = \
496 btree_map<value, intptr_t, std::less<value>, \
497 std::allocator<std::pair<const value, intptr_t>>>; \
498 using btree_256_multiset_##value = \
499 btree_multiset<value, std::less<value>, std::allocator<value>>; \
500 using btree_256_multimap_##value = \
501 btree_multimap<value, intptr_t, std::less<value>, \
502 std::allocator<std::pair<const value, intptr_t>>>
510 #define MY_BENCHMARK4(type, func) \
511 void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
512 BENCHMARK(BM_##type##_##func)
514 #define MY_BENCHMARK3_STL(type) \
515 MY_BENCHMARK4(type, Insert); \
516 MY_BENCHMARK4(type, InsertSorted); \
517 MY_BENCHMARK4(type, InsertSmall); \
518 MY_BENCHMARK4(type, Lookup); \
519 MY_BENCHMARK4(type, FullLookup); \
520 MY_BENCHMARK4(type, Erase); \
521 MY_BENCHMARK4(type, EraseRange); \
522 MY_BENCHMARK4(type, QueueAddRem); \
523 MY_BENCHMARK4(type, MixedAddRem); \
524 MY_BENCHMARK4(type, Fifo); \
525 MY_BENCHMARK4(type, FwdIter); \
526 MY_BENCHMARK4(type, InsertRangeRandom); \
527 MY_BENCHMARK4(type, InsertRangeSorted)
529 #define MY_BENCHMARK3(type) \
530 MY_BENCHMARK4(type, EraseIf); \
531 MY_BENCHMARK3_STL(type)
533 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
534 MY_BENCHMARK3_STL(stl_##type); \
535 MY_BENCHMARK3_STL(stl_unordered_##type); \
536 MY_BENCHMARK3(btree_256_##type)
538 #define MY_BENCHMARK2(type) \
539 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
540 MY_BENCHMARK3(flat_hash_##type)
546 #define MY_BENCHMARK(type) \
547 MY_BENCHMARK2(set_##type); \
548 MY_BENCHMARK2(map_##type); \
549 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
550 MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
552 #define MY_BENCHMARK(type) \
553 MY_BENCHMARK2(set_##type); \
554 MY_BENCHMARK2(map_##type)
568 template <
int Size,
int Copies>
570 BigType() : BigType(0) {}
571 explicit BigType(
int x) { std::iota(
values.begin(),
values.end(), x); }
573 void Copy(
const BigType& other) {
574 for (
int i = 0;
i < Size &&
i < Copies; ++
i)
values[i] = other.values[i];
576 for (
int i = Size,
idx = 0;
i < Copies; ++
i) {
583 BigType(
const BigType& other) {
Copy(other); }
584 BigType& operator=(
const BigType& other) {
590 bool operator<(
const BigType& other)
const {
591 return std::lexicographical_compare(
593 other.values.begin(), other.values.begin() +
std::min(Size, Copies));
597 other.values.begin());
601 template <
typename State>
603 for (
int i = 0;
i < Size &&
i < Copies; ++
i)
604 h = State::combine(
std::move(h),
b.values[i]);
611 #define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \
612 using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
613 using stl_map_size##SIZE##copies##COPIES = \
614 std::map<BigType<SIZE, COPIES>, intptr_t>; \
615 using stl_multiset_size##SIZE##copies##COPIES = \
616 std::multiset<BigType<SIZE, COPIES>>; \
617 using stl_multimap_size##SIZE##copies##COPIES = \
618 std::multimap<BigType<SIZE, COPIES>, intptr_t>; \
619 using stl_unordered_set_size##SIZE##copies##COPIES = \
620 std::unordered_set<BigType<SIZE, COPIES>, \
621 absl::Hash<BigType<SIZE, COPIES>>>; \
622 using stl_unordered_map_size##SIZE##copies##COPIES = \
623 std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \
624 absl::Hash<BigType<SIZE, COPIES>>>; \
625 using flat_hash_set_size##SIZE##copies##COPIES = \
626 flat_hash_set<BigType<SIZE, COPIES>>; \
627 using flat_hash_map_size##SIZE##copies##COPIES = \
628 flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \
629 using stl_unordered_multiset_size##SIZE##copies##COPIES = \
630 std::unordered_multiset<BigType<SIZE, COPIES>, \
631 absl::Hash<BigType<SIZE, COPIES>>>; \
632 using stl_unordered_multimap_size##SIZE##copies##COPIES = \
633 std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \
634 absl::Hash<BigType<SIZE, COPIES>>>; \
635 using btree_256_set_size##SIZE##copies##COPIES = \
636 btree_set<BigType<SIZE, COPIES>>; \
637 using btree_256_map_size##SIZE##copies##COPIES = \
638 btree_map<BigType<SIZE, COPIES>, intptr_t>; \
639 using btree_256_multiset_size##SIZE##copies##COPIES = \
640 btree_multiset<BigType<SIZE, COPIES>>; \
641 using btree_256_multimap_size##SIZE##copies##COPIES = \
642 btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \
643 MY_BENCHMARK(size##SIZE##copies##COPIES)
648 #ifndef NODESIZE_TESTING
649 #ifdef BIG_TYPE_TESTING
672 BigTypePtr() : BigTypePtr(0) {}
673 explicit BigTypePtr(
int x) {
674 ptr = absl::make_unique<BigType<Size, Size>>(
x);
676 BigTypePtr(
const BigTypePtr& other) {
677 ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
679 BigTypePtr(BigTypePtr&& other) noexcept =
default;
680 BigTypePtr& operator=(
const BigTypePtr& other) {
681 ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
683 BigTypePtr& operator=(BigTypePtr&& other) noexcept =
default;
685 bool operator<(
const BigTypePtr& other)
const {
return *
ptr < *other.ptr; }
686 bool operator==(
const BigTypePtr& other)
const {
return *
ptr == *other.ptr; }
688 std::unique_ptr<BigType<Size, Size>>
ptr;
692 double ContainerInfo(
const btree_set<BigTypePtr<Size>>&
b) {
693 const double bytes_used =
694 b.bytes_used() +
b.size() *
sizeof(BigType<Size, Size>);
695 const double bytes_per_value = bytes_used /
b.size();
696 BtreeContainerInfoLog(
b, bytes_used, bytes_per_value);
697 return bytes_per_value;
700 double ContainerInfo(
const btree_map<
int, BigTypePtr<Size>>&
b) {
701 const double bytes_used =
702 b.bytes_used() +
b.size() *
sizeof(BigType<Size, Size>);
703 const double bytes_per_value = bytes_used /
b.size();
704 BtreeContainerInfoLog(
b, bytes_used, bytes_per_value);
705 return bytes_per_value;
708 #define BIG_TYPE_PTR_BENCHMARKS(SIZE) \
709 using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
710 using stl_map_size##SIZE##copies##SIZE##ptr = \
711 std::map<int, BigType<SIZE, SIZE>>; \
712 using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \
713 std::unordered_set<BigType<SIZE, SIZE>, \
714 absl::Hash<BigType<SIZE, SIZE>>>; \
715 using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \
716 std::unordered_map<int, BigType<SIZE, SIZE>>; \
717 using flat_hash_set_size##SIZE##copies##SIZE##ptr = \
718 flat_hash_set<BigType<SIZE, SIZE>>; \
719 using flat_hash_map_size##SIZE##copies##SIZE##ptr = \
720 flat_hash_map<int, BigTypePtr<SIZE>>; \
721 using btree_256_set_size##SIZE##copies##SIZE##ptr = \
722 btree_set<BigTypePtr<SIZE>>; \
723 using btree_256_map_size##SIZE##copies##SIZE##ptr = \
724 btree_map<int, BigTypePtr<SIZE>>; \
725 MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \
726 MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
727 MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
728 MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
729 MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \
730 MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
731 MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
732 MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)