abseil-cpp/absl/container/internal/raw_hash_set_test.cc
Go to the documentation of this file.
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/container/internal/raw_hash_set.h"
16 
17 #include <atomic>
18 #include <cmath>
19 #include <cstdint>
20 #include <deque>
21 #include <functional>
22 #include <memory>
23 #include <numeric>
24 #include <random>
25 #include <string>
26 #include <unordered_map>
27 #include <unordered_set>
28 
29 #include "gmock/gmock.h"
30 #include "gtest/gtest.h"
31 #include "absl/base/attributes.h"
32 #include "absl/base/config.h"
33 #include "absl/base/internal/cycleclock.h"
35 #include "absl/base/internal/raw_logging.h"
36 #include "absl/container/internal/container_memory.h"
37 #include "absl/container/internal/hash_function_defaults.h"
38 #include "absl/container/internal/hash_policy_testing.h"
39 #include "absl/container/internal/hashtable_debug.h"
40 #include "absl/strings/string_view.h"
41 
42 namespace absl {
44 namespace container_internal {
45 
46 struct RawHashSetTestOnlyAccess {
47  template <typename C>
48  static auto GetSlots(const C& c) -> decltype(c.slots_) {
49  return c.slots_;
50  }
51 };
52 
53 namespace {
54 
61 
62 // Convenience function to static cast to ctrl_t.
63 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
64 
65 TEST(Util, NormalizeCapacity) {
74  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
75  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
76 }
77 
78 TEST(Util, GrowthAndCapacity) {
79  // Verify that GrowthToCapacity gives the minimum capacity that has enough
80  // growth.
81  for (size_t growth = 0; growth < 10000; ++growth) {
82  SCOPED_TRACE(growth);
84  // The capacity is large enough for `growth`.
86  // For (capacity+1) < kWidth, growth should equal capacity.
87  if (capacity + 1 < Group::kWidth) {
89  } else {
91  }
92  if (growth != 0 && capacity > 1) {
93  // There is no smaller capacity that works.
94  EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
95  }
96  }
97 
98  for (size_t capacity = Group::kWidth - 1; capacity < 10000;
99  capacity = 2 * capacity + 1) {
101  size_t growth = CapacityToGrowth(capacity);
102  EXPECT_THAT(growth, Lt(capacity));
105  }
106 }
107 
108 TEST(Util, probe_seq) {
109  probe_seq<16> seq(0, 127);
110  auto gen = [&]() {
111  size_t res = seq.offset();
112  seq.next();
113  return res;
114  };
115  std::vector<size_t> offsets(8);
116  std::generate_n(offsets.begin(), 8, gen);
117  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
118  seq = probe_seq<16>(128, 127);
119  std::generate_n(offsets.begin(), 8, gen);
120  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
121 }
122 
123 TEST(BitMask, Smoke) {
124  EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
125  EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
126 
127  EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
128  EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
129  EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
130  EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
131  EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
132  EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
133  EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
134  EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
135 }
136 
137 TEST(BitMask, WithShift) {
138  // See the non-SSE version of Group for details on what this math is for.
139  uint64_t ctrl = 0x1716151413121110;
140  uint64_t hash = 0x12;
141  constexpr uint64_t msbs = 0x8080808080808080ULL;
142  constexpr uint64_t lsbs = 0x0101010101010101ULL;
143  auto x = ctrl ^ (lsbs * hash);
144  uint64_t mask = (x - lsbs) & ~x & msbs;
145  EXPECT_EQ(0x0000000080800000, mask);
146 
147  BitMask<uint64_t, 8, 3> b(mask);
148  EXPECT_EQ(*b, 2);
149 }
150 
151 TEST(BitMask, LeadingTrailing) {
152  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
153  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
154 
155  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
156  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
157 
158  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
159  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
160 
161  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
162  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
163 
164  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
165  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
166 
167  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
168  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
169 }
170 
172  for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
173 }
174 
175 TEST(Group, Match) {
176  if (Group::kWidth == 16) {
177  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
178  ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
179  CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
180  CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
181  EXPECT_THAT(Group{group}.Match(0), ElementsAre());
182  EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
183  EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
184  EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
185  EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
186  } else if (Group::kWidth == 8) {
187  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
188  ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
189  ctrl_t::kSentinel, CtrlT(1)};
190  EXPECT_THAT(Group{group}.Match(0), ElementsAre());
191  EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
192  EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
193  } else {
194  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
195  }
196 }
197 
198 TEST(Group, MaskEmpty) {
199  if (Group::kWidth == 16) {
200  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
201  ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
202  CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
203  CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
204  EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
205  EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
206  } else if (Group::kWidth == 8) {
207  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
208  ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
209  ctrl_t::kSentinel, CtrlT(1)};
210  EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
211  EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
212  } else {
213  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
214  }
215 }
216 
217 TEST(Group, MaskEmptyOrDeleted) {
218  if (Group::kWidth == 16) {
219  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
220  ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
221  CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
222  CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
223  EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
224  EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
225  } else if (Group::kWidth == 8) {
226  ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
227  ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
228  ctrl_t::kSentinel, CtrlT(1)};
229  EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
230  EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
231  } else {
232  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
233  }
234 }
235 
236 TEST(Batch, DropDeletes) {
237  constexpr size_t kCapacity = 63;
238  constexpr size_t kGroupWidth = container_internal::Group::kWidth;
239  std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
241  std::vector<ctrl_t> pattern = {
242  ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
243  ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
244  for (size_t i = 0; i != kCapacity; ++i) {
245  ctrl[i] = pattern[i % pattern.size()];
246  if (i < kGroupWidth - 1)
247  ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
248  }
251  for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
252  ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
253  if (i == kCapacity) expected = ctrl_t::kSentinel;
254  if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
255  if (IsFull(expected)) expected = ctrl_t::kDeleted;
256  EXPECT_EQ(ctrl[i], expected)
257  << i << " " << static_cast<int>(pattern[i % pattern.size()]);
258  }
259 }
260 
261 TEST(Group, CountLeadingEmptyOrDeleted) {
262  const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
263  const std::vector<ctrl_t> full_examples = {
264  CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
265  CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
266 
267  for (ctrl_t empty : empty_examples) {
268  std::vector<ctrl_t> e(Group::kWidth, empty);
269  EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
270  for (ctrl_t full : full_examples) {
271  for (size_t i = 0; i != Group::kWidth; ++i) {
272  std::vector<ctrl_t> f(Group::kWidth, empty);
273  f[i] = full;
274  EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
275  }
276  std::vector<ctrl_t> f(Group::kWidth, empty);
277  f[Group::kWidth * 2 / 3] = full;
278  f[Group::kWidth / 2] = full;
279  EXPECT_EQ(
280  Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
281  }
282  }
283 }
284 
285 template <class T>
286 struct ValuePolicy {
287  using slot_type = T;
288  using key_type = T;
289  using init_type = T;
290 
291  template <class Allocator, class... Args>
292  static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
294  std::forward<Args>(args)...);
295  }
296 
297  template <class Allocator>
298  static void destroy(Allocator* alloc, slot_type* slot) {
300  }
301 
302  template <class Allocator>
303  static void transfer(Allocator* alloc, slot_type* new_slot,
304  slot_type* old_slot) {
305  construct(alloc, new_slot, std::move(*old_slot));
306  destroy(alloc, old_slot);
307  }
308 
309  static T& element(slot_type* slot) { return *slot; }
310 
311  template <class F, class... Args>
313  std::declval<F>(), std::declval<Args>()...))
314  apply(F&& f, Args&&... args) {
316  std::forward<F>(f), std::forward<Args>(args)...);
317  }
318 };
319 
320 using IntPolicy = ValuePolicy<int64_t>;
321 using Uint8Policy = ValuePolicy<uint8_t>;
322 
323 class StringPolicy {
324  template <class F, class K, class V,
325  class = typename std::enable_if<
327  decltype(std::declval<F>()(
328  std::declval<const absl::string_view&>(), std::piecewise_construct,
329  std::declval<std::tuple<K>>(),
330  std::declval<V>())) static apply_impl(F&& f,
331  std::pair<std::tuple<K>, V> p) {
332  const absl::string_view& key = std::get<0>(p.first);
333  return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
334  std::move(p.second));
335  }
336 
337  public:
338  struct slot_type {
339  struct ctor {};
340 
341  template <class... Ts>
342  slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
343 
344  std::pair<std::string, std::string> pair;
345  };
346 
347  using key_type = std::string;
348  using init_type = std::pair<std::string, std::string>;
349 
350  template <class allocator_type, class... Args>
351  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
353  *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
354  }
355 
356  template <class allocator_type>
357  static void destroy(allocator_type* alloc, slot_type* slot) {
359  }
360 
361  template <class allocator_type>
362  static void transfer(allocator_type* alloc, slot_type* new_slot,
363  slot_type* old_slot) {
364  construct(alloc, new_slot, std::move(old_slot->pair));
365  destroy(alloc, old_slot);
366  }
367 
368  static std::pair<std::string, std::string>& element(slot_type* slot) {
369  return slot->pair;
370  }
371 
372  template <class F, class... Args>
373  static auto apply(F&& f, Args&&... args)
374  -> decltype(apply_impl(std::forward<F>(f),
375  PairArgs(std::forward<Args>(args)...))) {
376  return apply_impl(std::forward<F>(f),
377  PairArgs(std::forward<Args>(args)...));
378  }
379 };
380 
381 struct StringHash : absl::Hash<absl::string_view> {
382  using is_transparent = void;
383 };
384 struct StringEq : std::equal_to<absl::string_view> {
385  using is_transparent = void;
386 };
387 
388 struct StringTable
389  : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
390  using Base = typename StringTable::raw_hash_set;
391  StringTable() {}
392  using Base::Base;
393 };
394 
395 struct IntTable
396  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
397  std::equal_to<int64_t>, std::allocator<int64_t>> {
398  using Base = typename IntTable::raw_hash_set;
399  using Base::Base;
400 };
401 
402 struct Uint8Table
403  : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
404  std::equal_to<uint8_t>, std::allocator<uint8_t>> {
405  using Base = typename Uint8Table::raw_hash_set;
406  using Base::Base;
407 };
408 
409 template <typename T>
410 struct CustomAlloc : std::allocator<T> {
411  CustomAlloc() {}
412 
413  template <typename U>
414  CustomAlloc(const CustomAlloc<U>& other) {}
415 
416  template<class U> struct rebind {
417  using other = CustomAlloc<U>;
418  };
419 };
420 
421 struct CustomAllocIntTable
422  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
423  std::equal_to<int64_t>, CustomAlloc<int64_t>> {
424  using Base = typename CustomAllocIntTable::raw_hash_set;
425  using Base::Base;
426 };
427 
428 struct BadFastHash {
429  template <class T>
430  size_t operator()(const T&) const {
431  return 0;
432  }
433 };
434 
435 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
436  std::allocator<int>> {
437  using Base = typename BadTable::raw_hash_set;
438  BadTable() {}
439  using Base::Base;
440 };
441 
442 TEST(Table, EmptyFunctorOptimization) {
443  static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
444  static_assert(std::is_empty<std::allocator<int>>::value, "");
445 
446  struct MockTable {
447  void* ctrl;
448  void* slots;
449  size_t size;
450  size_t capacity;
451  size_t growth_left;
452  void* infoz;
453  };
454  struct MockTableInfozDisabled {
455  void* ctrl;
456  void* slots;
457  size_t size;
458  size_t capacity;
459  size_t growth_left;
460  };
461  struct StatelessHash {
462  size_t operator()(absl::string_view) const { return 0; }
463  };
464  struct StatefulHash : StatelessHash {
465  size_t dummy;
466  };
467 
469  EXPECT_EQ(sizeof(MockTableInfozDisabled),
470  sizeof(raw_hash_set<StringPolicy, StatelessHash,
471  std::equal_to<absl::string_view>,
472  std::allocator<int>>));
473 
474  EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash),
475  sizeof(raw_hash_set<StringPolicy, StatefulHash,
476  std::equal_to<absl::string_view>,
477  std::allocator<int>>));
478  } else {
479  EXPECT_EQ(sizeof(MockTable),
480  sizeof(raw_hash_set<StringPolicy, StatelessHash,
481  std::equal_to<absl::string_view>,
482  std::allocator<int>>));
483 
484  EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash),
485  sizeof(raw_hash_set<StringPolicy, StatefulHash,
486  std::equal_to<absl::string_view>,
487  std::allocator<int>>));
488  }
489 }
490 
491 TEST(Table, Empty) {
492  IntTable t;
493  EXPECT_EQ(0, t.size());
494  EXPECT_TRUE(t.empty());
495 }
496 
497 TEST(Table, LookupEmpty) {
498  IntTable t;
499  auto it = t.find(0);
500  EXPECT_TRUE(it == t.end());
501 }
502 
503 TEST(Table, Insert1) {
504  IntTable t;
505  EXPECT_TRUE(t.find(0) == t.end());
506  auto res = t.emplace(0);
507  EXPECT_TRUE(res.second);
508  EXPECT_THAT(*res.first, 0);
509  EXPECT_EQ(1, t.size());
510  EXPECT_THAT(*t.find(0), 0);
511 }
512 
513 TEST(Table, Insert2) {
514  IntTable t;
515  EXPECT_TRUE(t.find(0) == t.end());
516  auto res = t.emplace(0);
517  EXPECT_TRUE(res.second);
518  EXPECT_THAT(*res.first, 0);
519  EXPECT_EQ(1, t.size());
520  EXPECT_TRUE(t.find(1) == t.end());
521  res = t.emplace(1);
522  EXPECT_TRUE(res.second);
523  EXPECT_THAT(*res.first, 1);
524  EXPECT_EQ(2, t.size());
525  EXPECT_THAT(*t.find(0), 0);
526  EXPECT_THAT(*t.find(1), 1);
527 }
528 
529 TEST(Table, InsertCollision) {
530  BadTable t;
531  EXPECT_TRUE(t.find(1) == t.end());
532  auto res = t.emplace(1);
533  EXPECT_TRUE(res.second);
534  EXPECT_THAT(*res.first, 1);
535  EXPECT_EQ(1, t.size());
536 
537  EXPECT_TRUE(t.find(2) == t.end());
538  res = t.emplace(2);
539  EXPECT_THAT(*res.first, 2);
540  EXPECT_TRUE(res.second);
541  EXPECT_EQ(2, t.size());
542 
543  EXPECT_THAT(*t.find(1), 1);
544  EXPECT_THAT(*t.find(2), 2);
545 }
546 
547 // Test that we do not add existent element in case we need to search through
548 // many groups with deleted elements
549 TEST(Table, InsertCollisionAndFindAfterDelete) {
550  BadTable t; // all elements go to the same group.
551  // Have at least 2 groups with Group::kWidth collisions
552  // plus some extra collisions in the last group.
553  constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
554  for (size_t i = 0; i < kNumInserts; ++i) {
555  auto res = t.emplace(i);
556  EXPECT_TRUE(res.second);
557  EXPECT_THAT(*res.first, i);
558  EXPECT_EQ(i + 1, t.size());
559  }
560 
561  // Remove elements one by one and check
562  // that we still can find all other elements.
563  for (size_t i = 0; i < kNumInserts; ++i) {
564  EXPECT_EQ(1, t.erase(i)) << i;
565  for (size_t j = i + 1; j < kNumInserts; ++j) {
566  EXPECT_THAT(*t.find(j), j);
567  auto res = t.emplace(j);
568  EXPECT_FALSE(res.second) << i << " " << j;
569  EXPECT_THAT(*res.first, j);
570  EXPECT_EQ(kNumInserts - i - 1, t.size());
571  }
572  }
573  EXPECT_TRUE(t.empty());
574 }
575 
576 TEST(Table, InsertWithinCapacity) {
577  IntTable t;
578  t.reserve(10);
579  const size_t original_capacity = t.capacity();
580  const auto addr = [&](int i) {
581  return reinterpret_cast<uintptr_t>(&*t.find(i));
582  };
583  // Inserting an element does not change capacity.
584  t.insert(0);
585  EXPECT_THAT(t.capacity(), original_capacity);
586  const uintptr_t original_addr_0 = addr(0);
587  // Inserting another element does not rehash.
588  t.insert(1);
589  EXPECT_THAT(t.capacity(), original_capacity);
590  EXPECT_THAT(addr(0), original_addr_0);
591  // Inserting lots of duplicate elements does not rehash.
592  for (int i = 0; i < 100; ++i) {
593  t.insert(i % 10);
594  }
595  EXPECT_THAT(t.capacity(), original_capacity);
596  EXPECT_THAT(addr(0), original_addr_0);
597  // Inserting a range of duplicate elements does not rehash.
598  std::vector<int> dup_range;
599  for (int i = 0; i < 100; ++i) {
600  dup_range.push_back(i % 10);
601  }
602  t.insert(dup_range.begin(), dup_range.end());
603  EXPECT_THAT(t.capacity(), original_capacity);
604  EXPECT_THAT(addr(0), original_addr_0);
605 }
606 
607 TEST(Table, LazyEmplace) {
608  StringTable t;
609  bool called = false;
610  auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
611  called = true;
612  f("abc", "ABC");
613  });
614  EXPECT_TRUE(called);
615  EXPECT_THAT(*it, Pair("abc", "ABC"));
616  called = false;
617  it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
618  called = true;
619  f("abc", "DEF");
620  });
621  EXPECT_FALSE(called);
622  EXPECT_THAT(*it, Pair("abc", "ABC"));
623 }
624 
625 TEST(Table, ContainsEmpty) {
626  IntTable t;
627 
628  EXPECT_FALSE(t.contains(0));
629 }
630 
631 TEST(Table, Contains1) {
632  IntTable t;
633 
634  EXPECT_TRUE(t.insert(0).second);
635  EXPECT_TRUE(t.contains(0));
636  EXPECT_FALSE(t.contains(1));
637 
638  EXPECT_EQ(1, t.erase(0));
639  EXPECT_FALSE(t.contains(0));
640 }
641 
642 TEST(Table, Contains2) {
643  IntTable t;
644 
645  EXPECT_TRUE(t.insert(0).second);
646  EXPECT_TRUE(t.contains(0));
647  EXPECT_FALSE(t.contains(1));
648 
649  t.clear();
650  EXPECT_FALSE(t.contains(0));
651 }
652 
653 int decompose_constructed;
654 int decompose_copy_constructed;
655 int decompose_copy_assigned;
656 int decompose_move_constructed;
657 int decompose_move_assigned;
658 struct DecomposeType {
659  DecomposeType(int i = 0) : i(i) { // NOLINT
660  ++decompose_constructed;
661  }
662 
663  explicit DecomposeType(const char* d) : DecomposeType(*d) {}
664 
665  DecomposeType(const DecomposeType& other) : i(other.i) {
666  ++decompose_copy_constructed;
667  }
668  DecomposeType& operator=(const DecomposeType& other) {
669  ++decompose_copy_assigned;
670  i = other.i;
671  return *this;
672  }
673  DecomposeType(DecomposeType&& other) : i(other.i) {
674  ++decompose_move_constructed;
675  }
676  DecomposeType& operator=(DecomposeType&& other) {
677  ++decompose_move_assigned;
678  i = other.i;
679  return *this;
680  }
681 
682  int i;
683 };
684 
685 struct DecomposeHash {
686  using is_transparent = void;
687  size_t operator()(const DecomposeType& a) const { return a.i; }
688  size_t operator()(int a) const { return a; }
689  size_t operator()(const char* a) const { return *a; }
690 };
691 
692 struct DecomposeEq {
693  using is_transparent = void;
694  bool operator()(const DecomposeType& a, const DecomposeType& b) const {
695  return a.i == b.i;
696  }
697  bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
698  bool operator()(const DecomposeType& a, const char* b) const {
699  return a.i == *b;
700  }
701 };
702 
703 struct DecomposePolicy {
704  using slot_type = DecomposeType;
705  using key_type = DecomposeType;
706  using init_type = DecomposeType;
707 
708  template <typename T>
709  static void construct(void*, DecomposeType* slot, T&& v) {
710  ::new (slot) DecomposeType(std::forward<T>(v));
711  }
712  static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
713  static DecomposeType& element(slot_type* slot) { return *slot; }
714 
715  template <class F, class T>
716  static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
717  return std::forward<F>(f)(x, x);
718  }
719 };
720 
721 template <typename Hash, typename Eq>
722 void TestDecompose(bool construct_three) {
723  DecomposeType elem{0};
724  const int one = 1;
725  const char* three_p = "3";
726  const auto& three = three_p;
727  const int elem_vector_count = 256;
728  std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
729  std::iota(elem_vector.begin(), elem_vector.end(), 0);
730 
731  using DecomposeSet =
732  raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
733  DecomposeSet set1;
734 
735  decompose_constructed = 0;
736  int expected_constructed = 0;
737  EXPECT_EQ(expected_constructed, decompose_constructed);
738  set1.insert(elem);
739  EXPECT_EQ(expected_constructed, decompose_constructed);
740  set1.insert(1);
741  EXPECT_EQ(++expected_constructed, decompose_constructed);
742  set1.emplace("3");
743  EXPECT_EQ(++expected_constructed, decompose_constructed);
744  EXPECT_EQ(expected_constructed, decompose_constructed);
745 
746  { // insert(T&&)
747  set1.insert(1);
748  EXPECT_EQ(expected_constructed, decompose_constructed);
749  }
750 
751  { // insert(const T&)
752  set1.insert(one);
753  EXPECT_EQ(expected_constructed, decompose_constructed);
754  }
755 
756  { // insert(hint, T&&)
757  set1.insert(set1.begin(), 1);
758  EXPECT_EQ(expected_constructed, decompose_constructed);
759  }
760 
761  { // insert(hint, const T&)
762  set1.insert(set1.begin(), one);
763  EXPECT_EQ(expected_constructed, decompose_constructed);
764  }
765 
766  { // emplace(...)
767  set1.emplace(1);
768  EXPECT_EQ(expected_constructed, decompose_constructed);
769  set1.emplace("3");
770  expected_constructed += construct_three;
771  EXPECT_EQ(expected_constructed, decompose_constructed);
772  set1.emplace(one);
773  EXPECT_EQ(expected_constructed, decompose_constructed);
774  set1.emplace(three);
775  expected_constructed += construct_three;
776  EXPECT_EQ(expected_constructed, decompose_constructed);
777  }
778 
779  { // emplace_hint(...)
780  set1.emplace_hint(set1.begin(), 1);
781  EXPECT_EQ(expected_constructed, decompose_constructed);
782  set1.emplace_hint(set1.begin(), "3");
783  expected_constructed += construct_three;
784  EXPECT_EQ(expected_constructed, decompose_constructed);
785  set1.emplace_hint(set1.begin(), one);
786  EXPECT_EQ(expected_constructed, decompose_constructed);
787  set1.emplace_hint(set1.begin(), three);
788  expected_constructed += construct_three;
789  EXPECT_EQ(expected_constructed, decompose_constructed);
790  }
791 
792  decompose_copy_constructed = 0;
793  decompose_copy_assigned = 0;
794  decompose_move_constructed = 0;
795  decompose_move_assigned = 0;
796  int expected_copy_constructed = 0;
797  int expected_move_constructed = 0;
798  { // raw_hash_set(first, last) with random-access iterators
799  DecomposeSet set2(elem_vector.begin(), elem_vector.end());
800  // Expect exactly one copy-constructor call for each element if no
801  // rehashing is done.
802  expected_copy_constructed += elem_vector_count;
803  EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
804  EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
805  EXPECT_EQ(0, decompose_move_assigned);
806  EXPECT_EQ(0, decompose_copy_assigned);
807  }
808 
809  { // raw_hash_set(first, last) with forward iterators
810  std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
811  expected_copy_constructed = decompose_copy_constructed;
812  DecomposeSet set2(elem_list.begin(), elem_list.end());
813  // Expect exactly N elements copied into set, expect at most 2*N elements
814  // moving internally for all resizing needed (for a growth factor of 2).
815  expected_copy_constructed += elem_vector_count;
816  EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
817  expected_move_constructed += elem_vector_count;
818  EXPECT_LT(expected_move_constructed, decompose_move_constructed);
819  expected_move_constructed += elem_vector_count;
820  EXPECT_GE(expected_move_constructed, decompose_move_constructed);
821  EXPECT_EQ(0, decompose_move_assigned);
822  EXPECT_EQ(0, decompose_copy_assigned);
823  expected_copy_constructed = decompose_copy_constructed;
824  expected_move_constructed = decompose_move_constructed;
825  }
826 
827  { // insert(first, last)
828  DecomposeSet set2;
829  set2.insert(elem_vector.begin(), elem_vector.end());
830  // Expect exactly N elements copied into set, expect at most 2*N elements
831  // moving internally for all resizing needed (for a growth factor of 2).
832  const int expected_new_elements = elem_vector_count;
833  const int expected_max_element_moves = 2 * elem_vector_count;
834  expected_copy_constructed += expected_new_elements;
835  EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
836  expected_move_constructed += expected_max_element_moves;
837  EXPECT_GE(expected_move_constructed, decompose_move_constructed);
838  EXPECT_EQ(0, decompose_move_assigned);
839  EXPECT_EQ(0, decompose_copy_assigned);
840  expected_copy_constructed = decompose_copy_constructed;
841  expected_move_constructed = decompose_move_constructed;
842  }
843 }
844 
845 TEST(Table, Decompose) {
846  TestDecompose<DecomposeHash, DecomposeEq>(false);
847 
848  struct TransparentHashIntOverload {
849  size_t operator()(const DecomposeType& a) const { return a.i; }
850  size_t operator()(int a) const { return a; }
851  };
852  struct TransparentEqIntOverload {
853  bool operator()(const DecomposeType& a, const DecomposeType& b) const {
854  return a.i == b.i;
855  }
856  bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
857  };
858  TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
859  TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
860  TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
861 }
862 
863 // Returns the largest m such that a table with m elements has the same number
864 // of buckets as a table with n elements.
865 size_t MaxDensitySize(size_t n) {
866  IntTable t;
867  t.reserve(n);
868  for (size_t i = 0; i != n; ++i) t.emplace(i);
869  const size_t c = t.bucket_count();
870  while (c == t.bucket_count()) t.emplace(n++);
871  return t.size() - 1;
872 }
873 
874 struct Modulo1000Hash {
875  size_t operator()(int x) const { return x % 1000; }
876 };
877 
878 struct Modulo1000HashTable
879  : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
880  std::allocator<int>> {};
881 
882 // Test that rehash with no resize happen in case of many deleted slots.
883 TEST(Table, RehashWithNoResize) {
884  Modulo1000HashTable t;
885  // Adding the same length (and the same hash) strings
886  // to have at least kMinFullGroups groups
887  // with Group::kWidth collisions. Then fill up to MaxDensitySize;
888  const size_t kMinFullGroups = 7;
889  std::vector<int> keys;
890  for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
891  int k = i * 1000;
892  t.emplace(k);
893  keys.push_back(k);
894  }
895  const size_t capacity = t.capacity();
896 
897  // Remove elements from all groups except the first and the last one.
898  // All elements removed from full groups will be marked as ctrl_t::kDeleted.
899  const size_t erase_begin = Group::kWidth / 2;
900  const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
901  for (size_t i = erase_begin; i < erase_end; ++i) {
902  EXPECT_EQ(1, t.erase(keys[i])) << i;
903  }
904  keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
905 
906  auto last_key = keys.back();
907  size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
908 
909  // Make sure that we have to make a lot of probes for last key.
910  ASSERT_GT(last_key_num_probes, kMinFullGroups);
911 
912  int x = 1;
913  // Insert and erase one element, before inplace rehash happen.
914  while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
915  t.emplace(x);
916  ASSERT_EQ(capacity, t.capacity());
917  // All elements should be there.
918  ASSERT_TRUE(t.find(x) != t.end()) << x;
919  for (const auto& k : keys) {
920  ASSERT_TRUE(t.find(k) != t.end()) << k;
921  }
922  t.erase(x);
923  ++x;
924  }
925 }
926 
927 TEST(Table, InsertEraseStressTest) {
928  IntTable t;
929  const size_t kMinElementCount = 250;
930  std::deque<int> keys;
931  size_t i = 0;
932  for (; i < MaxDensitySize(kMinElementCount); ++i) {
933  t.emplace(i);
934  keys.push_back(i);
935  }
936  const size_t kNumIterations = 1000000;
937  for (; i < kNumIterations; ++i) {
938  ASSERT_EQ(1, t.erase(keys.front()));
939  keys.pop_front();
940  t.emplace(i);
941  keys.push_back(i);
942  }
943 }
944 
945 TEST(Table, InsertOverloads) {
946  StringTable t;
947  // These should all trigger the insert(init_type) overload.
948  t.insert({{}, {}});
949  t.insert({"ABC", {}});
950  t.insert({"DEF", "!!!"});
951 
952  EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
953  Pair("DEF", "!!!")));
954 }
955 
956 TEST(Table, LargeTable) {
957  IntTable t;
958  for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
959  for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
960 }
961 
962 // Timeout if copy is quadratic as it was in Rust.
963 TEST(Table, EnsureNonQuadraticAsInRust) {
964  static const size_t kLargeSize = 1 << 15;
965 
966  IntTable t;
967  for (size_t i = 0; i != kLargeSize; ++i) {
968  t.insert(i);
969  }
970 
971  // If this is quadratic, the test will timeout.
972  IntTable t2;
973  for (const auto& entry : t) t2.insert(entry);
974 }
975 
976 TEST(Table, ClearBug) {
977  IntTable t;
978  constexpr size_t capacity = container_internal::Group::kWidth - 1;
979  constexpr size_t max_size = capacity / 2 + 1;
980  for (size_t i = 0; i < max_size; ++i) {
981  t.insert(i);
982  }
983  ASSERT_EQ(capacity, t.capacity());
984  intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
985  t.clear();
986  ASSERT_EQ(capacity, t.capacity());
987  for (size_t i = 0; i < max_size; ++i) {
988  t.insert(i);
989  }
990  ASSERT_EQ(capacity, t.capacity());
991  intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
992  // We are checking that original and second are close enough to each other
993  // that they are probably still in the same group. This is not strictly
994  // guaranteed.
995  EXPECT_LT(std::abs(original - second),
996  capacity * sizeof(IntTable::value_type));
997 }
998 
999 TEST(Table, Erase) {
1000  IntTable t;
1001  EXPECT_TRUE(t.find(0) == t.end());
1002  auto res = t.emplace(0);
1003  EXPECT_TRUE(res.second);
1004  EXPECT_EQ(1, t.size());
1005  t.erase(res.first);
1006  EXPECT_EQ(0, t.size());
1007  EXPECT_TRUE(t.find(0) == t.end());
1008 }
1009 
1010 TEST(Table, EraseMaintainsValidIterator) {
1011  IntTable t;
1012  const int kNumElements = 100;
1013  for (int i = 0; i < kNumElements; i ++) {
1014  EXPECT_TRUE(t.emplace(i).second);
1015  }
1016  EXPECT_EQ(t.size(), kNumElements);
1017 
1018  int num_erase_calls = 0;
1019  auto it = t.begin();
1020  while (it != t.end()) {
1021  t.erase(it++);
1022  num_erase_calls++;
1023  }
1024 
1025  EXPECT_TRUE(t.empty());
1026  EXPECT_EQ(num_erase_calls, kNumElements);
1027 }
1028 
1029 // Collect N bad keys by following algorithm:
1030 // 1. Create an empty table and reserve it to 2 * N.
1031 // 2. Insert N random elements.
1032 // 3. Take first Group::kWidth - 1 to bad_keys array.
1033 // 4. Clear the table without resize.
1034 // 5. Go to point 2 while N keys not collected
1035 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
1036  static constexpr int kGroupSize = Group::kWidth - 1;
1037 
1038  auto topk_range = [](size_t b, size_t e,
1039  IntTable* t) -> std::vector<int64_t> {
1040  for (size_t i = b; i != e; ++i) {
1041  t->emplace(i);
1042  }
1043  std::vector<int64_t> res;
1044  res.reserve(kGroupSize);
1045  auto it = t->begin();
1046  for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
1047  res.push_back(*it);
1048  }
1049  return res;
1050  };
1051 
1052  std::vector<int64_t> bad_keys;
1053  bad_keys.reserve(N);
1054  IntTable t;
1055  t.reserve(N * 2);
1056 
1057  for (size_t b = 0; bad_keys.size() < N; b += N) {
1058  auto keys = topk_range(b, b + N, &t);
1059  bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
1060  t.erase(t.begin(), t.end());
1061  EXPECT_TRUE(t.empty());
1062  }
1063  return bad_keys;
1064 }
1065 
1066 struct ProbeStats {
1067  // Number of elements with specific probe length over all tested tables.
1068  std::vector<size_t> all_probes_histogram;
1069  // Ratios total_probe_length/size for every tested table.
1070  std::vector<double> single_table_ratios;
1071 
1072  friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
1073  ProbeStats res = a;
1074  res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
1075  b.all_probes_histogram.size()));
1076  std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
1077  res.all_probes_histogram.begin(),
1078  res.all_probes_histogram.begin(), std::plus<size_t>());
1079  res.single_table_ratios.insert(res.single_table_ratios.end(),
1080  b.single_table_ratios.begin(),
1081  b.single_table_ratios.end());
1082  return res;
1083  }
1084 
1085  // Average ratio total_probe_length/size over tables.
1086  double AvgRatio() const {
1087  return std::accumulate(single_table_ratios.begin(),
1088  single_table_ratios.end(), 0.0) /
1089  single_table_ratios.size();
1090  }
1091 
1092  // Maximum ratio total_probe_length/size over tables.
1093  double MaxRatio() const {
1094  return *std::max_element(single_table_ratios.begin(),
1095  single_table_ratios.end());
1096  }
1097 
1098  // Percentile ratio total_probe_length/size over tables.
1099  double PercentileRatio(double Percentile = 0.95) const {
1100  auto r = single_table_ratios;
1101  auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
1102  if (mid != r.end()) {
1103  std::nth_element(r.begin(), mid, r.end());
1104  return *mid;
1105  } else {
1106  return MaxRatio();
1107  }
1108  }
1109 
1110  // Maximum probe length over all elements and all tables.
1111  size_t MaxProbe() const { return all_probes_histogram.size(); }
1112 
1113  // Fraction of elements with specified probe length.
1114  std::vector<double> ProbeNormalizedHistogram() const {
1115  double total_elements = std::accumulate(all_probes_histogram.begin(),
1116  all_probes_histogram.end(), 0ull);
1117  std::vector<double> res;
1118  for (size_t p : all_probes_histogram) {
1119  res.push_back(p / total_elements);
1120  }
1121  return res;
1122  }
1123 
1124  size_t PercentileProbe(double Percentile = 0.99) const {
1125  size_t idx = 0;
1126  for (double p : ProbeNormalizedHistogram()) {
1127  if (Percentile > p) {
1128  Percentile -= p;
1129  ++idx;
1130  } else {
1131  return idx;
1132  }
1133  }
1134  return idx;
1135  }
1136 
1137  friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
1138  out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
1139  << ", PercentileRatio:" << s.PercentileRatio()
1140  << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
1141  for (double p : s.ProbeNormalizedHistogram()) {
1142  out << p << ",";
1143  }
1144  out << "]}";
1145 
1146  return out;
1147  }
1148 };
1149 
1150 struct ExpectedStats {
1151  double avg_ratio;
1152  double max_ratio;
1153  std::vector<std::pair<double, double>> pecentile_ratios;
1154  std::vector<std::pair<double, double>> pecentile_probes;
1155 
1156  friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1157  out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1158  << ", PercentileRatios: [";
1159  for (auto el : s.pecentile_ratios) {
1160  out << el.first << ":" << el.second << ", ";
1161  }
1162  out << "], PercentileProbes: [";
1163  for (auto el : s.pecentile_probes) {
1164  out << el.first << ":" << el.second << ", ";
1165  }
1166  out << "]}";
1167 
1168  return out;
1169  }
1170 };
1171 
1172 void VerifyStats(size_t size, const ExpectedStats& exp,
1173  const ProbeStats& stats) {
1174  EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1175  EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1176  for (auto pr : exp.pecentile_ratios) {
1177  EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1178  << size << " " << pr.first << " " << stats;
1179  }
1180 
1181  for (auto pr : exp.pecentile_probes) {
1182  EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1183  << size << " " << pr.first << " " << stats;
1184  }
1185 }
1186 
1187 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1188 
1189 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1190 // 1. Create new table and reserve it to keys.size() * 2
1191 // 2. Insert all keys xored with seed
1192 // 3. Collect ProbeStats from final table.
1193 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
1194  const std::vector<int64_t>& keys, size_t num_iters) {
1195  const size_t reserve_size = keys.size() * 2;
1196 
1197  ProbeStats stats;
1198 
1199  int64_t seed = 0x71b1a19b907d6e33;
1200  while (num_iters--) {
1201  seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1202  IntTable t1;
1203  t1.reserve(reserve_size);
1204  for (const auto& key : keys) {
1205  t1.emplace(key ^ seed);
1206  }
1207 
1208  auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1209  stats.all_probes_histogram.resize(
1210  std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1211  std::transform(probe_histogram.begin(), probe_histogram.end(),
1212  stats.all_probes_histogram.begin(),
1213  stats.all_probes_histogram.begin(), std::plus<size_t>());
1214 
1215  size_t total_probe_seq_length = 0;
1216  for (size_t i = 0; i < probe_histogram.size(); ++i) {
1217  total_probe_seq_length += i * probe_histogram[i];
1218  }
1219  stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1220  keys.size());
1221  t1.erase(t1.begin(), t1.end());
1222  }
1223  return stats;
1224 }
1225 
1226 ExpectedStats XorSeedExpectedStats() {
1227  constexpr bool kRandomizesInserts =
1228 #ifdef NDEBUG
1229  false;
1230 #else // NDEBUG
1231  true;
1232 #endif // NDEBUG
1233 
1234  // The effective load factor is larger in non-opt mode because we insert
1235  // elements out of order.
1237  case 8:
1238  if (kRandomizesInserts) {
1239  return {0.05,
1240  1.0,
1241  {{0.95, 0.5}},
1242  {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1243  } else {
1244  return {0.05,
1245  2.0,
1246  {{0.95, 0.1}},
1247  {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1248  }
1249  case 16:
1250  if (kRandomizesInserts) {
1251  return {0.1,
1252  2.0,
1253  {{0.95, 0.1}},
1254  {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1255  } else {
1256  return {0.05,
1257  1.0,
1258  {{0.95, 0.05}},
1259  {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1260  }
1261  }
1262  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1263  return {};
1264 }
1265 
1266 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
1267 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1268  ProbeStatsPerSize stats;
1269  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1270  for (size_t size : sizes) {
1271  stats[size] =
1272  CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1273  }
1274  auto expected = XorSeedExpectedStats();
1275  for (size_t size : sizes) {
1276  auto& stat = stats[size];
1277  VerifyStats(size, expected, stat);
1278  }
1279 }
1280 
1281 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1282 // 1. Create new table
1283 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1284 // 3. Collect ProbeStats from final table
1285 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1286  const std::vector<int64_t>& keys, size_t num_iters) {
1287  ProbeStats stats;
1288 
1289  std::random_device rd;
1290  std::mt19937 rng(rd());
1291  auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1292  std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
1293  while (num_iters--) {
1294  IntTable t1;
1295  size_t num_keys = keys.size() / 10;
1296  size_t start = dist(rng);
1297  for (size_t i = 0; i != num_keys; ++i) {
1298  for (size_t j = 0; j != 10; ++j) {
1299  t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1300  }
1301  }
1302 
1303  auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1304  stats.all_probes_histogram.resize(
1305  std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1306  std::transform(probe_histogram.begin(), probe_histogram.end(),
1307  stats.all_probes_histogram.begin(),
1308  stats.all_probes_histogram.begin(), std::plus<size_t>());
1309 
1310  size_t total_probe_seq_length = 0;
1311  for (size_t i = 0; i < probe_histogram.size(); ++i) {
1312  total_probe_seq_length += i * probe_histogram[i];
1313  }
1314  stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1315  t1.size());
1316  t1.erase(t1.begin(), t1.end());
1317  }
1318  return stats;
1319 }
1320 
1321 ExpectedStats LinearTransformExpectedStats() {
1322  constexpr bool kRandomizesInserts =
1323 #ifdef NDEBUG
1324  false;
1325 #else // NDEBUG
1326  true;
1327 #endif // NDEBUG
1328 
1329  // The effective load factor is larger in non-opt mode because we insert
1330  // elements out of order.
1332  case 8:
1333  if (kRandomizesInserts) {
1334  return {0.1,
1335  0.5,
1336  {{0.95, 0.3}},
1337  {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1338  } else {
1339  return {0.4,
1340  0.6,
1341  {{0.95, 0.5}},
1342  {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
1343  }
1344  case 16:
1345  if (kRandomizesInserts) {
1346  return {0.1,
1347  0.4,
1348  {{0.95, 0.3}},
1349  {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
1350  } else {
1351  return {0.05,
1352  0.2,
1353  {{0.95, 0.1}},
1354  {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1355  }
1356  }
1357  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1358  return {};
1359 }
1360 
1361 // TODO(b/80415403): Figure out why this test is so flaky.
1362 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1363  ProbeStatsPerSize stats;
1364  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1365  for (size_t size : sizes) {
1366  stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1367  CollectBadMergeKeys(size), 300);
1368  }
1369  auto expected = LinearTransformExpectedStats();
1370  for (size_t size : sizes) {
1371  auto& stat = stats[size];
1372  VerifyStats(size, expected, stat);
1373  }
1374 }
1375 
1376 TEST(Table, EraseCollision) {
1377  BadTable t;
1378 
1379  // 1 2 3
1380  t.emplace(1);
1381  t.emplace(2);
1382  t.emplace(3);
1383  EXPECT_THAT(*t.find(1), 1);
1384  EXPECT_THAT(*t.find(2), 2);
1385  EXPECT_THAT(*t.find(3), 3);
1386  EXPECT_EQ(3, t.size());
1387 
1388  // 1 DELETED 3
1389  t.erase(t.find(2));
1390  EXPECT_THAT(*t.find(1), 1);
1391  EXPECT_TRUE(t.find(2) == t.end());
1392  EXPECT_THAT(*t.find(3), 3);
1393  EXPECT_EQ(2, t.size());
1394 
1395  // DELETED DELETED 3
1396  t.erase(t.find(1));
1397  EXPECT_TRUE(t.find(1) == t.end());
1398  EXPECT_TRUE(t.find(2) == t.end());
1399  EXPECT_THAT(*t.find(3), 3);
1400  EXPECT_EQ(1, t.size());
1401 
1402  // DELETED DELETED DELETED
1403  t.erase(t.find(3));
1404  EXPECT_TRUE(t.find(1) == t.end());
1405  EXPECT_TRUE(t.find(2) == t.end());
1406  EXPECT_TRUE(t.find(3) == t.end());
1407  EXPECT_EQ(0, t.size());
1408 }
1409 
1410 TEST(Table, EraseInsertProbing) {
1411  BadTable t(100);
1412 
1413  // 1 2 3 4
1414  t.emplace(1);
1415  t.emplace(2);
1416  t.emplace(3);
1417  t.emplace(4);
1418 
1419  // 1 DELETED 3 DELETED
1420  t.erase(t.find(2));
1421  t.erase(t.find(4));
1422 
1423  // 1 10 3 11 12
1424  t.emplace(10);
1425  t.emplace(11);
1426  t.emplace(12);
1427 
1428  EXPECT_EQ(5, t.size());
1429  EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1430 }
1431 
1432 TEST(Table, Clear) {
1433  IntTable t;
1434  EXPECT_TRUE(t.find(0) == t.end());
1435  t.clear();
1436  EXPECT_TRUE(t.find(0) == t.end());
1437  auto res = t.emplace(0);
1438  EXPECT_TRUE(res.second);
1439  EXPECT_EQ(1, t.size());
1440  t.clear();
1441  EXPECT_EQ(0, t.size());
1442  EXPECT_TRUE(t.find(0) == t.end());
1443 }
1444 
1445 TEST(Table, Swap) {
1446  IntTable t;
1447  EXPECT_TRUE(t.find(0) == t.end());
1448  auto res = t.emplace(0);
1449  EXPECT_TRUE(res.second);
1450  EXPECT_EQ(1, t.size());
1451  IntTable u;
1452  t.swap(u);
1453  EXPECT_EQ(0, t.size());
1454  EXPECT_EQ(1, u.size());
1455  EXPECT_TRUE(t.find(0) == t.end());
1456  EXPECT_THAT(*u.find(0), 0);
1457 }
1458 
1459 TEST(Table, Rehash) {
1460  IntTable t;
1461  EXPECT_TRUE(t.find(0) == t.end());
1462  t.emplace(0);
1463  t.emplace(1);
1464  EXPECT_EQ(2, t.size());
1465  t.rehash(128);
1466  EXPECT_EQ(2, t.size());
1467  EXPECT_THAT(*t.find(0), 0);
1468  EXPECT_THAT(*t.find(1), 1);
1469 }
1470 
1471 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1472  IntTable t;
1473  t.emplace(0);
1474  t.emplace(1);
1475  auto* p = &*t.find(0);
1476  t.rehash(1);
1477  EXPECT_EQ(p, &*t.find(0));
1478 }
1479 
1480 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1481  IntTable t;
1482  t.rehash(0);
1483  EXPECT_EQ(0, t.bucket_count());
1484 }
1485 
1486 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1487  IntTable t;
1488  t.emplace(0);
1489  t.clear();
1490  EXPECT_NE(0, t.bucket_count());
1491  t.rehash(0);
1492  EXPECT_EQ(0, t.bucket_count());
1493 }
1494 
1495 TEST(Table, RehashZeroForcesRehash) {
1496  IntTable t;
1497  t.emplace(0);
1498  t.emplace(1);
1499  auto* p = &*t.find(0);
1500  t.rehash(0);
1501  EXPECT_NE(p, &*t.find(0));
1502 }
1503 
1504 TEST(Table, ConstructFromInitList) {
1505  using P = std::pair<std::string, std::string>;
1506  struct Q {
1507  operator P() const { return {}; }
1508  };
1509  StringTable t = {P(), Q(), {}, {{}, {}}};
1510 }
1511 
1512 TEST(Table, CopyConstruct) {
1513  IntTable t;
1514  t.emplace(0);
1515  EXPECT_EQ(1, t.size());
1516  {
1517  IntTable u(t);
1518  EXPECT_EQ(1, u.size());
1519  EXPECT_THAT(*u.find(0), 0);
1520  }
1521  {
1522  IntTable u{t};
1523  EXPECT_EQ(1, u.size());
1524  EXPECT_THAT(*u.find(0), 0);
1525  }
1526  {
1527  IntTable u = t;
1528  EXPECT_EQ(1, u.size());
1529  EXPECT_THAT(*u.find(0), 0);
1530  }
1531 }
1532 
1533 TEST(Table, CopyConstructWithAlloc) {
1534  StringTable t;
1535  t.emplace("a", "b");
1536  EXPECT_EQ(1, t.size());
1537  StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1538  EXPECT_EQ(1, u.size());
1539  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1540 }
1541 
1542 struct ExplicitAllocIntTable
1543  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
1544  std::equal_to<int64_t>, Alloc<int64_t>> {
1545  ExplicitAllocIntTable() {}
1546 };
1547 
1548 TEST(Table, AllocWithExplicitCtor) {
1549  ExplicitAllocIntTable t;
1550  EXPECT_EQ(0, t.size());
1551 }
1552 
1553 TEST(Table, MoveConstruct) {
1554  {
1555  StringTable t;
1556  t.emplace("a", "b");
1557  EXPECT_EQ(1, t.size());
1558 
1559  StringTable u(std::move(t));
1560  EXPECT_EQ(1, u.size());
1561  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1562  }
1563  {
1564  StringTable t;
1565  t.emplace("a", "b");
1566  EXPECT_EQ(1, t.size());
1567 
1568  StringTable u{std::move(t)};
1569  EXPECT_EQ(1, u.size());
1570  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1571  }
1572  {
1573  StringTable t;
1574  t.emplace("a", "b");
1575  EXPECT_EQ(1, t.size());
1576 
1577  StringTable u = std::move(t);
1578  EXPECT_EQ(1, u.size());
1579  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1580  }
1581 }
1582 
1583 TEST(Table, MoveConstructWithAlloc) {
1584  StringTable t;
1585  t.emplace("a", "b");
1586  EXPECT_EQ(1, t.size());
1587  StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1588  EXPECT_EQ(1, u.size());
1589  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1590 }
1591 
1592 TEST(Table, CopyAssign) {
1593  StringTable t;
1594  t.emplace("a", "b");
1595  EXPECT_EQ(1, t.size());
1596  StringTable u;
1597  u = t;
1598  EXPECT_EQ(1, u.size());
1599  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1600 }
1601 
1602 TEST(Table, CopySelfAssign) {
1603  StringTable t;
1604  t.emplace("a", "b");
1605  EXPECT_EQ(1, t.size());
1606  t = *&t;
1607  EXPECT_EQ(1, t.size());
1608  EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1609 }
1610 
1611 TEST(Table, MoveAssign) {
1612  StringTable t;
1613  t.emplace("a", "b");
1614  EXPECT_EQ(1, t.size());
1615  StringTable u;
1616  u = std::move(t);
1617  EXPECT_EQ(1, u.size());
1618  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1619 }
1620 
1621 TEST(Table, Equality) {
1622  StringTable t;
1623  std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1624  {"aa", "bb"}};
1625  t.insert(std::begin(v), std::end(v));
1626  StringTable u = t;
1627  EXPECT_EQ(u, t);
1628 }
1629 
1630 TEST(Table, Equality2) {
1631  StringTable t;
1632  std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1633  {"aa", "bb"}};
1634  t.insert(std::begin(v1), std::end(v1));
1635  StringTable u;
1636  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1637  {"aa", "aa"}};
1638  u.insert(std::begin(v2), std::end(v2));
1639  EXPECT_NE(u, t);
1640 }
1641 
1642 TEST(Table, Equality3) {
1643  StringTable t;
1644  std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1645  {"bb", "bb"}};
1646  t.insert(std::begin(v1), std::end(v1));
1647  StringTable u;
1648  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1649  {"aa", "aa"}};
1650  u.insert(std::begin(v2), std::end(v2));
1651  EXPECT_NE(u, t);
1652 }
1653 
1654 TEST(Table, NumDeletedRegression) {
1655  IntTable t;
1656  t.emplace(0);
1657  t.erase(t.find(0));
1658  // construct over a deleted slot.
1659  t.emplace(0);
1660  t.clear();
1661 }
1662 
1663 TEST(Table, FindFullDeletedRegression) {
1664  IntTable t;
1665  for (int i = 0; i < 1000; ++i) {
1666  t.emplace(i);
1667  t.erase(t.find(i));
1668  }
1669  EXPECT_EQ(0, t.size());
1670 }
1671 
1672 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1673  size_t n;
1674  {
1675  // Compute n such that n is the maximum number of elements before rehash.
1676  IntTable t;
1677  t.emplace(0);
1678  size_t c = t.bucket_count();
1679  for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1680  --n;
1681  }
1682  IntTable t;
1683  t.rehash(n);
1684  const size_t c = t.bucket_count();
1685  for (size_t i = 0; i != n; ++i) t.emplace(i);
1686  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1687  t.erase(0);
1688  t.emplace(0);
1689  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1690 }
1691 
1692 TEST(Table, NoThrowMoveConstruct) {
1693  ASSERT_TRUE(
1694  std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1695  ASSERT_TRUE(std::is_nothrow_copy_constructible<
1696  std::equal_to<absl::string_view>>::value);
1697  ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1699 }
1700 
1701 TEST(Table, NoThrowMoveAssign) {
1702  ASSERT_TRUE(
1703  std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1704  ASSERT_TRUE(
1705  std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1706  ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1707  ASSERT_TRUE(
1708  absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1710 }
1711 
1712 TEST(Table, NoThrowSwappable) {
1713  ASSERT_TRUE(
1716  std::equal_to<absl::string_view>>());
1717  ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1718  EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1719 }
1720 
1721 TEST(Table, HeterogeneousLookup) {
1722  struct Hash {
1723  size_t operator()(int64_t i) const { return i; }
1724  size_t operator()(double i) const {
1725  ADD_FAILURE();
1726  return i;
1727  }
1728  };
1729  struct Eq {
1730  bool operator()(int64_t a, int64_t b) const { return a == b; }
1731  bool operator()(double a, int64_t b) const {
1732  ADD_FAILURE();
1733  return a == b;
1734  }
1735  bool operator()(int64_t a, double b) const {
1736  ADD_FAILURE();
1737  return a == b;
1738  }
1739  bool operator()(double a, double b) const {
1740  ADD_FAILURE();
1741  return a == b;
1742  }
1743  };
1744 
1745  struct THash {
1746  using is_transparent = void;
1747  size_t operator()(int64_t i) const { return i; }
1748  size_t operator()(double i) const { return i; }
1749  };
1750  struct TEq {
1751  using is_transparent = void;
1752  bool operator()(int64_t a, int64_t b) const { return a == b; }
1753  bool operator()(double a, int64_t b) const { return a == b; }
1754  bool operator()(int64_t a, double b) const { return a == b; }
1755  bool operator()(double a, double b) const { return a == b; }
1756  };
1757 
1758  raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1759  // It will convert to int64_t before the query.
1760  EXPECT_EQ(1, *s.find(double{1.1}));
1761 
1762  raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1763  // It will try to use the double, and fail to find the object.
1764  EXPECT_TRUE(ts.find(1.1) == ts.end());
1765 }
1766 
1767 template <class Table>
1768 using CallFind = decltype(std::declval<Table&>().find(17));
1769 
1770 template <class Table>
1771 using CallErase = decltype(std::declval<Table&>().erase(17));
1772 
1773 template <class Table>
1774 using CallExtract = decltype(std::declval<Table&>().extract(17));
1775 
1776 template <class Table>
1777 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1778 
1779 template <class Table>
1780 using CallCount = decltype(std::declval<Table&>().count(17));
1781 
1782 template <template <typename> class C, class Table, class = void>
1783 struct VerifyResultOf : std::false_type {};
1784 
1785 template <template <typename> class C, class Table>
1786 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1787 
1788 TEST(Table, HeterogeneousLookupOverloads) {
1789  using NonTransparentTable =
1790  raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1791  std::equal_to<absl::string_view>, std::allocator<int>>;
1792 
1793  EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1794  EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1795  EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1796  EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1797  EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1798 
1799  using TransparentTable = raw_hash_set<
1800  StringPolicy,
1803  std::allocator<int>>;
1804 
1805  EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1806  EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1807  EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1808  EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1809  EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1810 }
1811 
1812 // TODO(alkis): Expand iterator tests.
1813 TEST(Iterator, IsDefaultConstructible) {
1816 }
1817 
1818 TEST(ConstIterator, IsDefaultConstructible) {
1819  StringTable::const_iterator i;
1820  EXPECT_TRUE(i == StringTable::const_iterator());
1821 }
1822 
1823 TEST(Iterator, ConvertsToConstIterator) {
1825  EXPECT_TRUE(i == StringTable::const_iterator());
1826 }
1827 
1828 TEST(Iterator, Iterates) {
1829  IntTable t;
1830  for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
1831  EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
1832 }
1833 
1834 TEST(Table, Merge) {
1835  StringTable t1, t2;
1836  t1.emplace("0", "-0");
1837  t1.emplace("1", "-1");
1838  t2.emplace("0", "~0");
1839  t2.emplace("2", "~2");
1840 
1841  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
1842  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
1843 
1844  t1.merge(t2);
1845  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
1846  Pair("2", "~2")));
1847  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
1848 }
1849 
1850 TEST(Table, IteratorEmplaceConstructibleRequirement) {
1851  struct Value {
1852  explicit Value(absl::string_view view) : value(view) {}
1854 
1855  bool operator==(const Value& other) const { return value == other.value; }
1856  };
1857  struct H {
1858  size_t operator()(const Value& v) const {
1859  return absl::Hash<std::string>{}(v.value);
1860  }
1861  };
1862 
1863  struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
1864  std::allocator<Value>> {
1865  using Base = typename Table::raw_hash_set;
1866  using Base::Base;
1867  };
1868 
1869  std::string input[3]{"A", "B", "C"};
1870 
1871  Table t(std::begin(input), std::end(input));
1872  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
1873 
1874  input[0] = "D";
1875  input[1] = "E";
1876  input[2] = "F";
1877  t.insert(std::begin(input), std::end(input));
1878  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
1879  Value{"D"}, Value{"E"}, Value{"F"}));
1880 }
1881 
1882 TEST(Nodes, EmptyNodeType) {
1883  using node_type = StringTable::node_type;
1884  node_type n;
1885  EXPECT_FALSE(n);
1886  EXPECT_TRUE(n.empty());
1887 
1888  EXPECT_TRUE((std::is_same<node_type::allocator_type,
1889  StringTable::allocator_type>::value));
1890 }
1891 
1892 TEST(Nodes, ExtractInsert) {
1893  constexpr char k0[] = "Very long string zero.";
1894  constexpr char k1[] = "Very long string one.";
1895  constexpr char k2[] = "Very long string two.";
1896  StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
1897  EXPECT_THAT(t,
1898  UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
1899 
1900  auto node = t.extract(k0);
1901  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1902  EXPECT_TRUE(node);
1903  EXPECT_FALSE(node.empty());
1904 
1905  StringTable t2;
1906  StringTable::insert_return_type res = t2.insert(std::move(node));
1907  EXPECT_TRUE(res.inserted);
1908  EXPECT_THAT(*res.position, Pair(k0, ""));
1909  EXPECT_FALSE(res.node);
1911 
1912  // Not there.
1913  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1914  node = t.extract("Not there!");
1915  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1916  EXPECT_FALSE(node);
1917 
1918  // Inserting nothing.
1919  res = t2.insert(std::move(node));
1920  EXPECT_FALSE(res.inserted);
1921  EXPECT_EQ(res.position, t2.end());
1922  EXPECT_FALSE(res.node);
1924 
1925  t.emplace(k0, "1");
1926  node = t.extract(k0);
1927 
1928  // Insert duplicate.
1929  res = t2.insert(std::move(node));
1930  EXPECT_FALSE(res.inserted);
1931  EXPECT_THAT(*res.position, Pair(k0, ""));
1932  EXPECT_TRUE(res.node);
1933  EXPECT_FALSE(node);
1934 }
1935 
1936 TEST(Nodes, HintInsert) {
1937  IntTable t = {1, 2, 3};
1938  auto node = t.extract(1);
1940  auto it = t.insert(t.begin(), std::move(node));
1941  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
1942  EXPECT_EQ(*it, 1);
1943  EXPECT_FALSE(node);
1944 
1945  node = t.extract(2);
1947  // reinsert 2 to make the next insert fail.
1948  t.insert(2);
1949  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
1950  it = t.insert(t.begin(), std::move(node));
1951  EXPECT_EQ(*it, 2);
1952  // The node was not emptied by the insert call.
1953  EXPECT_TRUE(node);
1954 }
1955 
1956 IntTable MakeSimpleTable(size_t size) {
1957  IntTable t;
1958  while (t.size() < size) t.insert(t.size());
1959  return t;
1960 }
1961 
1962 std::vector<int> OrderOfIteration(const IntTable& t) {
1963  return {t.begin(), t.end()};
1964 }
1965 
1966 // These IterationOrderChanges tests depend on non-deterministic behavior.
1967 // We are injecting non-determinism from the pointer of the table, but do so in
1968 // a way that only the page matters. We have to retry enough times to make sure
1969 // we are touching different memory pages to cause the ordering to change.
1970 // We also need to keep the old tables around to avoid getting the same memory
1971 // blocks over and over.
1972 TEST(Table, IterationOrderChangesByInstance) {
1973  for (size_t size : {2, 6, 12, 20}) {
1974  const auto reference_table = MakeSimpleTable(size);
1975  const auto reference = OrderOfIteration(reference_table);
1976 
1977  std::vector<IntTable> tables;
1978  bool found_difference = false;
1979  for (int i = 0; !found_difference && i < 5000; ++i) {
1980  tables.push_back(MakeSimpleTable(size));
1981  found_difference = OrderOfIteration(tables.back()) != reference;
1982  }
1983  if (!found_difference) {
1984  FAIL()
1985  << "Iteration order remained the same across many attempts with size "
1986  << size;
1987  }
1988  }
1989 }
1990 
1991 TEST(Table, IterationOrderChangesOnRehash) {
1992  std::vector<IntTable> garbage;
1993  for (int i = 0; i < 5000; ++i) {
1994  auto t = MakeSimpleTable(20);
1995  const auto reference = OrderOfIteration(t);
1996  // Force rehash to the same size.
1997  t.rehash(0);
1998  auto trial = OrderOfIteration(t);
1999  if (trial != reference) {
2000  // We are done.
2001  return;
2002  }
2003  garbage.push_back(std::move(t));
2004  }
2005  FAIL() << "Iteration order remained the same across many attempts.";
2006 }
2007 
2008 // Verify that pointers are invalidated as soon as a second element is inserted.
2009 // This prevents dependency on pointer stability on small tables.
2010 TEST(Table, UnstablePointers) {
2011  IntTable table;
2012 
2013  const auto addr = [&](int i) {
2014  return reinterpret_cast<uintptr_t>(&*table.find(i));
2015  };
2016 
2017  table.insert(0);
2018  const uintptr_t old_ptr = addr(0);
2019 
2020  // This causes a rehash.
2021  table.insert(1);
2022 
2023  EXPECT_NE(old_ptr, addr(0));
2024 }
2025 
2026 // Confirm that we assert if we try to erase() end().
2027 TEST(TableDeathTest, EraseOfEndAsserts) {
2028  // Use an assert with side-effects to figure out if they are actually enabled.
2029  bool assert_enabled = false;
2030  assert([&]() {
2031  assert_enabled = true;
2032  return true;
2033  }());
2034  if (!assert_enabled) return;
2035 
2036  IntTable t;
2037  // Extra simple "regexp" as regexp support is highly varied across platforms.
2038  constexpr char kDeathMsg[] = "erase.. called on invalid iterator";
2039  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
2040 }
2041 
2042 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
2043 TEST(RawHashSamplerTest, Sample) {
2044  // Enable the feature even if the prod default is off.
2045  SetHashtablezEnabled(true);
2047 
2048  auto& sampler = GlobalHashtablezSampler();
2049  size_t start_size = 0;
2050  std::unordered_set<const HashtablezInfo*> preexisting_info;
2051  start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2052  preexisting_info.insert(&info);
2053  ++start_size;
2054  });
2055 
2056  std::vector<IntTable> tables;
2057  for (int i = 0; i < 1000000; ++i) {
2058  tables.emplace_back();
2059 
2060  const bool do_reserve = (i % 10 > 5);
2061  const bool do_rehash = !do_reserve && (i % 10 > 0);
2062 
2063  if (do_reserve) {
2064  // Don't reserve on all tables.
2065  tables.back().reserve(10 * (i % 10));
2066  }
2067 
2068  tables.back().insert(1);
2069  tables.back().insert(i % 5);
2070 
2071  if (do_rehash) {
2072  // Rehash some other tables.
2073  tables.back().rehash(10 * (i % 10));
2074  }
2075  }
2076  size_t end_size = 0;
2077  std::unordered_map<size_t, int> observed_checksums;
2078  std::unordered_map<ssize_t, int> reservations;
2079  end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2080  if (preexisting_info.count(&info) == 0) {
2081  observed_checksums[info.hashes_bitwise_xor.load(
2082  std::memory_order_relaxed)]++;
2083  reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
2084  }
2085  EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
2086  ++end_size;
2087  });
2088 
2089  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2090  0.01, 0.005);
2091  EXPECT_EQ(observed_checksums.size(), 5);
2092  for (const auto& [_, count] : observed_checksums) {
2093  EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
2094  }
2095 
2096  EXPECT_EQ(reservations.size(), 10);
2097  for (const auto& [reservation, count] : reservations) {
2098  EXPECT_GE(reservation, 0);
2099  EXPECT_LT(reservation, 100);
2100 
2101  EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
2102  << reservation;
2103  }
2104 }
2105 #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
2106 
2107 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
2108  // Enable the feature even if the prod default is off.
2109  SetHashtablezEnabled(true);
2111 
2112  auto& sampler = GlobalHashtablezSampler();
2113  size_t start_size = 0;
2114  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
2115 
2116  std::vector<CustomAllocIntTable> tables;
2117  for (int i = 0; i < 1000000; ++i) {
2118  tables.emplace_back();
2119  tables.back().insert(1);
2120  }
2121  size_t end_size = 0;
2122  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
2123 
2124  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2125  0.00, 0.001);
2126 }
2127 
2128 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2129 TEST(Sanitizer, PoisoningUnused) {
2130  IntTable t;
2131  t.reserve(5);
2132  // Insert something to force an allocation.
2133  int64_t& v1 = *t.insert(0).first;
2134 
2135  // Make sure there is something to test.
2136  ASSERT_GT(t.capacity(), 1);
2137 
2139  for (size_t i = 0; i < t.capacity(); ++i) {
2140  EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
2141  }
2142 }
2143 
2144 TEST(Sanitizer, PoisoningOnErase) {
2145  IntTable t;
2146  int64_t& v = *t.insert(0).first;
2147 
2148  EXPECT_FALSE(__asan_address_is_poisoned(&v));
2149  t.erase(0);
2150  EXPECT_TRUE(__asan_address_is_poisoned(&v));
2151 }
2152 #endif // ABSL_HAVE_ADDRESS_SANITIZER
2153 
2154 TEST(Table, AlignOne) {
2155  // We previously had a bug in which we were copying a control byte over the
2156  // first slot when alignof(value_type) is 1. We test repeated
2157  // insertions/erases and verify that the behavior is correct.
2158  Uint8Table t;
2159  std::unordered_set<uint8_t> verifier; // NOLINT
2160 
2161  // Do repeated insertions/erases from the table.
2162  for (int64_t i = 0; i < 100000; ++i) {
2163  SCOPED_TRACE(i);
2164  const uint8_t u = (i * -i) & 0xFF;
2165  auto it = t.find(u);
2166  auto verifier_it = verifier.find(u);
2167  if (it == t.end()) {
2168  ASSERT_EQ(verifier_it, verifier.end());
2169  t.insert(u);
2170  verifier.insert(u);
2171  } else {
2172  ASSERT_NE(verifier_it, verifier.end());
2173  t.erase(it);
2174  verifier.erase(verifier_it);
2175  }
2176  }
2177 
2178  EXPECT_EQ(t.size(), verifier.size());
2179  for (uint8_t u : t) {
2180  EXPECT_EQ(verifier.count(u), 1);
2181  }
2182 }
2183 
2184 } // namespace
2185 } // namespace container_internal
2187 } // namespace absl
EXPECT_FALSE
#define EXPECT_FALSE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1970
ASSERT_NE
#define ASSERT_NE(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2060
absl::swap_internal::Swap
void Swap(T &lhs, T &rhs) noexcept(IsNothrowSwappable< T >::value)
Definition: abseil-cpp/absl/meta/type_traits.h:772
Hash
Hash
Definition: abseil-cpp/absl/container/internal/hash_function_defaults_test.cc:339
regen-readme.it
it
Definition: regen-readme.py:15
absl::str_format_internal::LengthMod::j
@ j
element
static std::function< Slot &(Slot *)> element
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:44
absl::hash_internal::Hash
Definition: abseil-cpp/absl/hash/internal/hash.h:1221
testing::Lt
internal::LtMatcher< Rhs > Lt(Rhs x)
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:8603
absl::container_internal::keys
auto keys(const Set &s) -> std::vector< typename std::decay< typename Set::key_type >::type >
Definition: abseil-cpp/absl/container/internal/hash_policy_testing.h:157
find
static void ** find(grpc_chttp2_stream_map *map, uint32_t key)
Definition: stream_map.cc:99
check_tracer_sanity.pattern
pattern
Definition: check_tracer_sanity.py:25
absl::container_internal::SetHashtablezEnabled
void SetHashtablezEnabled(bool enabled)
Definition: abseil-cpp/absl/container/internal/hashtablez_sampler.cc:191
fix_build_deps.c
list c
Definition: fix_build_deps.py:490
begin
char * begin
Definition: abseil-cpp/absl/strings/internal/str_format/float_conversion.cc:1007
EXPECT_THAT
#define EXPECT_THAT(value, matcher)
C
#define C(x)
Definition: abseil-cpp/absl/hash/internal/city_test.cc:49
capacity
uint16_t capacity
Definition: protobuf/src/google/protobuf/descriptor.cc:948
absl::operator<<
ABSL_NAMESPACE_BEGIN std::ostream & operator<<(std::ostream &os, absl::LogSeverity s)
Definition: abseil-cpp/absl/base/log_severity.cc:24
absl::container_internal::DecomposeValue
decltype(std::declval< F >()(std::declval< const Arg & >(), std::declval< Arg >())) DecomposeValue(F &&f, Arg &&arg)
Definition: abseil-cpp/absl/container/internal/container_memory.h:213
y
const double y
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3611
google::protobuf.internal::true_type
integral_constant< bool, true > true_type
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/template_util.h:89
seed
static const uint8_t seed[20]
Definition: dsa_test.cc:79
absl::container_internal::RawHashSetTestOnlyAccess::GetSlots
static auto GetSlots(const C &c) -> decltype(c.slots_)
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:48
absl::string_view
Definition: abseil-cpp/absl/strings/string_view.h:167
google::protobuf.internal::false_type
integral_constant< bool, false > false_type
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/template_util.h:90
kCapacity
static constexpr size_t kCapacity
Definition: abseil-cpp/absl/random/internal/pool_urbg.cc:53
elem
Timer elem
Definition: event_engine/iomgr_event_engine/timer_heap_test.cc:109
testing::internal::string
::std::string string
Definition: bloaty/third_party/protobuf/third_party/googletest/googletest/include/gtest/internal/gtest-port.h:881
single_table_ratios
std::vector< double > single_table_ratios
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1070
u
OPENSSL_EXPORT pem_password_cb void * u
Definition: pem.h:351
absl::hash_internal::k2
static const uint64_t k2
Definition: abseil-cpp/absl/hash/internal/city.cc:55
absl::TEST
TEST(NotificationTest, SanityTest)
Definition: abseil-cpp/absl/synchronization/notification_test.cc:126
absl::FormatConversionChar::s
@ s
a
int a
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:88
testing::Ge
internal::GeMatcher< Rhs > Ge(Rhs x)
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:8585
EXPECT_LE
#define EXPECT_LE(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2030
absl::container_internal::kEmpty
@ kEmpty
xds_manager.p
p
Definition: xds_manager.py:60
absl::allocator_traits
Definition: third_party/abseil-cpp/absl/memory/memory.h:427
second
StrT second
Definition: cxa_demangle.cpp:4885
pecentile_probes
std::vector< std::pair< double, double > > pecentile_probes
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1154
env.new
def new
Definition: env.py:51
ABSL_NAMESPACE_END
#define ABSL_NAMESPACE_END
Definition: third_party/abseil-cpp/absl/base/config.h:171
uint8_t
unsigned char uint8_t
Definition: stdint-msvc2008.h:78
absl::allocator_traits::construct
static void construct(Alloc &a, T *p, Args &&... args)
Definition: third_party/abseil-cpp/absl/memory/memory.h:539
iterator
const typedef MCPhysReg * iterator
Definition: MCRegisterInfo.h:27
setup.k
k
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:42
verifier
static void verifier(grpc_server *server, grpc_completion_queue *cq, void *)
Definition: badreq.cc:31
T
#define T(upbtypeconst, upbtype, ctype, default_value)
EXPECT_EQ
#define EXPECT_EQ(a, b)
Definition: iomgr/time_averaged_stats_test.cc:27
testing::ElementsAre
internal::ElementsAreMatcher< ::testing::tuple<> > ElementsAre()
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:13040
hash
uint64_t hash
Definition: ring_hash.cc:284
absl::container_internal::CapacityToGrowth
size_t CapacityToGrowth(size_t capacity)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:758
ULL
#define ULL(x)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/coded_stream_unittest.cc:57
absl::hash_internal::k0
static const uint64_t k0
Definition: abseil-cpp/absl/hash/internal/city.cc:53
start
static uint64_t start
Definition: benchmark-pound.c:74
absl::FormatConversionChar::e
@ e
ABSL_NAMESPACE_BEGIN
#define ABSL_NAMESPACE_BEGIN
Definition: third_party/abseil-cpp/absl/base/config.h:170
SCOPED_TRACE
#define SCOPED_TRACE(message)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2264
autogen_x86imm.f
f
Definition: autogen_x86imm.py:9
asyncio_get_stats.args
args
Definition: asyncio_get_stats.py:40
absl::container_internal::PairArgs
std::pair< std::tuple<>, std::tuple<> > PairArgs()
Definition: abseil-cpp/absl/container/internal/container_memory.h:178
absl::move
constexpr absl::remove_reference_t< T > && move(T &&t) noexcept
Definition: abseil-cpp/absl/utility/utility.h:221
end
char * end
Definition: abseil-cpp/absl/strings/internal/str_format/float_conversion.cc:1008
hpack_encoder_fixtures::Args
Args({0, 16384})
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
absl::container_internal::IsNoThrowSwappable
constexpr bool IsNoThrowSwappable(std::true_type={})
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:305
absl::container_internal::Group
GroupPortableImpl Group
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:715
max
int max
Definition: bloaty/third_party/zlib/examples/enough.c:170
EXPECT_NE
#define EXPECT_NE(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2028
absl::inlined_vector_internal::ConstIterator
ConstPointer< A > ConstIterator
Definition: abseil-cpp/absl/container/internal/inlined_vector.h:66
setup.v
v
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:42
absl::container_internal::ctrl_t
ctrl_t
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:422
testing::Eq
internal::EqMatcher< T > Eq(T x)
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:8561
gmock_output_test._
_
Definition: bloaty/third_party/googletest/googlemock/test/gmock_output_test.py:175
gen_stats_data.stats
list stats
Definition: gen_stats_data.py:58
uint64_t
unsigned __int64 uint64_t
Definition: stdint-msvc2008.h:90
absl::container_internal::EmptyGroup
ctrl_t * EmptyGroup()
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:457
absl::container_internal::GetHashtableDebugNumProbesHistogram
std::vector< size_t > GetHashtableDebugNumProbesHistogram(const C &container)
Definition: abseil-cpp/absl/container/internal/hashtable_debug.h:58
extract
int extract(FILE *in, struct access *index, off_t offset, unsigned char *buf, int len)
Definition: bloaty/third_party/zlib/examples/zran.c:249
pair
std::pair< std::string, std::string > pair
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:344
absl::compare_internal::value_type
int8_t value_type
Definition: abseil-cpp/absl/types/compare.h:45
FAIL
@ FAIL
Definition: call_creds.cc:42
absl::container_internal::GetHashtableDebugNumProbes
size_t GetHashtableDebugNumProbes(const C &c, const typename C::key_type &key)
Definition: abseil-cpp/absl/container/internal/hashtable_debug.h:49
intptr_t
_W64 signed int intptr_t
Definition: stdint-msvc2008.h:118
absl::synchronization_internal::Nodes
std::vector< int > Nodes
Definition: abseil-cpp/absl/synchronization/internal/graphcycles_test.cc:34
apply_impl
static std::function< int(int)> apply_impl
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:46
x
int x
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3610
absl::flags_internal::Alloc
void * Alloc(FlagOpFn op)
Definition: abseil-cpp/absl/flags/internal/flag.h:102
absl::operator==
bool operator==(const absl::InlinedVector< T, N, A > &a, const absl::InlinedVector< T, N, A > &b)
Definition: abseil-cpp/absl/container/inlined_vector.h:794
gen
OPENSSL_EXPORT GENERAL_NAME * gen
Definition: x509v3.h:495
uintptr_t
_W64 unsigned int uintptr_t
Definition: stdint-msvc2008.h:119
absl::container_internal::h2_t
uint8_t h2_t
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:403
b
uint64_t b
Definition: abseil-cpp/absl/container/internal/layout_test.cc:53
transfer
static std::function< void(void *, Slot *, Slot *)> transfer
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:58
ADD_FAILURE
#define ADD_FAILURE()
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1911
pecentile_ratios
std::vector< std::pair< double, double > > pecentile_ratios
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1153
google::protobuf.text_format.Merge
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False)
Definition: bloaty/third_party/protobuf/python/google/protobuf/text_format.py:659
F
#define F(b, c, d)
Definition: md4.c:112
n
int n
Definition: abseil-cpp/absl/container/btree_test.cc:1080
absl::container_internal::SetHashtablezSampleParameter
void SetHashtablezSampleParameter(int32_t rate)
Definition: abseil-cpp/absl/container/internal/hashtablez_sampler.cc:204
EXPECT_DEATH_IF_SUPPORTED
#define EXPECT_DEATH_IF_SUPPORTED(statement, regex)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest-death-test.h:335
H
#define H(b, c, d)
Definition: md4.c:114
setup.idx
idx
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:197
absl::container_internal::GroupPortableImpl::kWidth
static constexpr size_t kWidth
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:655
Empty
Definition: abseil-cpp/absl/container/internal/compressed_tuple_test.cc:33
testing::Pair
internal::PairMatcher< FirstMatcher, SecondMatcher > Pair(FirstMatcher first_matcher, SecondMatcher second_matcher)
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:9152
google_benchmark.example.empty
def empty(state)
Definition: example.py:31
Match
static bool Match(const upb_msgdef *m, const char *name, const upb_fielddef **f, const upb_oneofdef **o, const char *prefix, const char *suffix)
Definition: protobuf/ruby/ext/google/protobuf_c/message.c:195
absl::container_internal::GrowthToLowerboundCapacity
size_t GrowthToLowerboundCapacity(size_t growth)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:773
absl::container_internal::kSentinel
@ kSentinel
Definition: bloaty/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h:263
absl::container_internal::hash_default_hash
typename container_internal::HashEq< T >::Hash hash_default_hash
Definition: abseil-cpp/absl/container/internal/hash_function_defaults.h:150
value
const char * value
Definition: hpack_parser_table.cc:165
absl::container_internal::IsFull
bool IsFull(ctrl_t c)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:490
FATAL
#define FATAL(msg)
Definition: task.h:88
absl::void_t
typename type_traits_internal::VoidTImpl< Ts... >::type void_t
Definition: abseil-cpp/absl/meta/type_traits.h:218
absl::container_internal::kDeleted
@ kDeleted
Definition: bloaty/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h:262
absl::flags_internal::CopyConstruct
void CopyConstruct(FlagOpFn op, const void *src, void *dst)
Definition: abseil-cpp/absl/flags/internal/flag.h:115
construct
static std::function< void(void *, Slot *, Slot)> construct
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:41
P
Definition: miscompile_with_no_unique_address_test.cc:29
key
const char * key
Definition: hpack_parser_table.cc:164
absl::operator+
constexpr uint128 operator+(uint128 lhs, uint128 rhs)
Definition: abseil-cpp/absl/numeric/int128.h:942
upload.group
group
Definition: bloaty/third_party/googletest/googlemock/scripts/upload.py:397
absl::container_internal::StringHash::is_transparent
void is_transparent
Definition: abseil-cpp/absl/container/internal/hash_function_defaults.h:71
N
#define N
Definition: sync_test.cc:37
google::protobuf.internal.python_message.Clear
Clear
Definition: bloaty/third_party/protobuf/python/google/protobuf/internal/python_message.py:1430
count
int * count
Definition: bloaty/third_party/googletest/googlemock/test/gmock_stress_test.cc:96
Q
#define Q
Definition: hrss.c:904
absl::container_internal::hash_default_eq
typename container_internal::HashEq< T >::Eq hash_default_eq
Definition: abseil-cpp/absl/container/internal/hash_function_defaults.h:157
bssl::acvp::StringEq
static bool StringEq(Span< const uint8_t > a, const char *b)
Definition: modulewrapper.cc:795
absl::str_format_internal::LengthMod::t
@ t
absl::container_internal::GlobalHashtablezSampler
HashtablezSampler & GlobalHashtablezSampler()
Definition: abseil-cpp/absl/container/internal/hashtablez_sampler.cc:63
fix_build_deps.r
r
Definition: fix_build_deps.py:491
std
Definition: grpcpp/impl/codegen/async_unary_call.h:407
EXPECT_LT
#define EXPECT_LT(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2032
accumulate
static void accumulate(upb_pb_encoder *e)
Definition: bloaty/third_party/protobuf/php/ext/google/protobuf/upb.c:7694
absl::container_internal::TrailingZeros
uint32_t TrailingZeros(T x)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:315
ASSERT_TRUE
#define ASSERT_TRUE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1973
testing::UnorderedElementsAre
internal::UnorderedElementsAreMatcher< ::testing::tuple<> > UnorderedElementsAre()
Definition: cares/cares/test/gmock-1.8.0/gmock/gmock.h:13255
absl::hash_internal::k1
static const uint64_t k1
Definition: abseil-cpp/absl/hash/internal/city.cc:54
absl::container_internal::ConvertDeletedToEmptyAndFullToDeleted
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t *ctrl, size_t capacity)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.cc:56
absl::container_internal::Sample
HashtablezInfoHandle Sample(size_t inline_element_size ABSL_ATTRIBUTE_UNUSED)
Definition: abseil-cpp/absl/container/internal/hashtablez_sampler.h:251
absl::ABSL_NAMESPACE_BEGIN::dummy
int dummy
Definition: function_type_benchmark.cc:28
EXPECT_GE
#define EXPECT_GE(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2034
absl::inlined_vector_internal::Iterator
Pointer< A > Iterator
Definition: abseil-cpp/absl/container/internal/inlined_vector.h:64
input
std::string input
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc:197
EXPECT_TRUE
#define EXPECT_TRUE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1967
EXPECT_NEAR
#define EXPECT_NEAR(val1, val2, abs_error)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2143
stat
#define stat
Definition: test-fs.c:50
table
uint8_t table[256]
Definition: hpack_parser.cc:456
absl
Definition: abseil-cpp/absl/algorithm/algorithm.h:31
max_ratio
double max_ratio
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1152
all_probes_histogram
std::vector< size_t > all_probes_histogram
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1068
make_curve25519_tables.d
int d
Definition: make_curve25519_tables.py:53
absl::forward
constexpr T && forward(absl::remove_reference_t< T > &t) noexcept
Definition: abseil-cpp/absl/utility/utility.h:230
Base::Base
Base(int an_x)
Definition: bloaty/third_party/googletest/googletest/test/gtest_unittest.cc:5143
asyncio_get_stats.type
type
Definition: asyncio_get_stats.py:37
Value
struct Value Value
Definition: bloaty/third_party/protobuf/php/ext/google/protobuf/protobuf.h:676
absl::out
char * out
Definition: abseil-cpp/absl/synchronization/mutex.h:1048
avg_ratio
double avg_ratio
Definition: abseil-cpp/absl/container/internal/raw_hash_set_test.cc:1151
absl::container_internal::NormalizeCapacity
size_t NormalizeCapacity(size_t n)
Definition: abseil-cpp/absl/container/internal/raw_hash_set.h:744
ASSERT_GT
#define ASSERT_GT(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2076
size
voidpf void uLong size
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
key_type
upb_fieldtype_t key_type
Definition: bloaty/third_party/protobuf/php/ext/google/protobuf/protobuf.h:1071
absl::allocator_traits::destroy
static void destroy(Alloc &a, T *p)
Definition: third_party/abseil-cpp/absl/memory/memory.h:547
t1
Table t1
Definition: abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc:185
grpc_core::P
std::function< Poll< absl::StatusOr< T > >()> P
Definition: try_join_test.cc:27
absl::str_format_internal::LengthMod::h
@ h
absl::apply
auto apply(Functor &&functor, Tuple &&t) -> decltype(utility_internal::apply_helper(absl::forward< Functor >(functor), absl::forward< Tuple >(t), absl::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >
Definition: abseil-cpp/absl/utility/utility.h:289
ABSL_RAW_LOG
#define ABSL_RAW_LOG(severity,...)
Definition: abseil-cpp/absl/base/internal/raw_logging.h:44
addr
struct sockaddr_in addr
Definition: libuv/docs/code/tcp-echo-server/main.c:10
alloc
std::allocator< int > alloc
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:87
destroy
static std::function< void(void *, Slot *)> destroy
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:42
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
prefetch.h
ASSERT_EQ
#define ASSERT_EQ(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2056
absl::container_internal::StringEq::is_transparent
void is_transparent
Definition: abseil-cpp/absl/container/internal/hash_function_defaults.h:82


grpc
Author(s):
autogenerated on Fri May 16 2025 02:59:58