raw_hash_set_test.cc
Go to the documentation of this file.
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
16 
17 #include <cmath>
18 #include <cstdint>
19 #include <deque>
20 #include <functional>
21 #include <memory>
22 #include <numeric>
23 #include <random>
24 #include <string>
25 
26 #include "gmock/gmock.h"
27 #include "gtest/gtest.h"
28 #include "absl/base/attributes.h"
36 
37 namespace absl {
38 namespace container_internal {
39 
41  template <typename C>
42  static auto GetSlots(const C& c) -> decltype(c.slots_) {
43  return c.slots_;
44  }
45 };
46 
47 namespace {
48 
49 using ::testing::DoubleNear;
50 using ::testing::ElementsAre;
51 using ::testing::Ge;
52 using ::testing::Lt;
54 using ::testing::Pair;
55 using ::testing::UnorderedElementsAre;
56 
57 TEST(Util, NormalizeCapacity) {
58  EXPECT_EQ(1, NormalizeCapacity(0));
59  EXPECT_EQ(1, NormalizeCapacity(1));
60  EXPECT_EQ(3, NormalizeCapacity(2));
61  EXPECT_EQ(3, NormalizeCapacity(3));
62  EXPECT_EQ(7, NormalizeCapacity(4));
63  EXPECT_EQ(7, NormalizeCapacity(7));
64  EXPECT_EQ(15, NormalizeCapacity(8));
65  EXPECT_EQ(15, NormalizeCapacity(15));
66  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
67  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
68 }
69 
70 TEST(Util, GrowthAndCapacity) {
71  // Verify that GrowthToCapacity gives the minimum capacity that has enough
72  // growth.
73  for (size_t growth = 0; growth < 10000; ++growth) {
74  SCOPED_TRACE(growth);
75  size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
76  // The capacity is large enough for `growth`
77  EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
78  if (growth != 0 && capacity > 1) {
79  // There is no smaller capacity that works.
80  EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
81  }
82  }
83 
84  for (size_t capacity = Group::kWidth - 1; capacity < 10000;
85  capacity = 2 * capacity + 1) {
86  SCOPED_TRACE(capacity);
87  size_t growth = CapacityToGrowth(capacity);
88  EXPECT_THAT(growth, Lt(capacity));
89  EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
90  EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
91  }
92 }
93 
94 TEST(Util, probe_seq) {
95  probe_seq<16> seq(0, 127);
96  auto gen = [&]() {
97  size_t res = seq.offset();
98  seq.next();
99  return res;
100  };
101  std::vector<size_t> offsets(8);
102  std::generate_n(offsets.begin(), 8, gen);
103  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
104  seq = probe_seq<16>(128, 127);
105  std::generate_n(offsets.begin(), 8, gen);
106  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
107 }
108 
109 TEST(BitMask, Smoke) {
110  EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
111  EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
112 
113  EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
114  EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
115  EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
116  EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
117  EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
118  EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
119  EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
120  EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
121 }
122 
123 TEST(BitMask, WithShift) {
124  // See the non-SSE version of Group for details on what this math is for.
125  uint64_t ctrl = 0x1716151413121110;
126  uint64_t hash = 0x12;
127  constexpr uint64_t msbs = 0x8080808080808080ULL;
128  constexpr uint64_t lsbs = 0x0101010101010101ULL;
129  auto x = ctrl ^ (lsbs * hash);
130  uint64_t mask = (x - lsbs) & ~x & msbs;
131  EXPECT_EQ(0x0000000080800000, mask);
132 
134  EXPECT_EQ(*b, 2);
135 }
136 
137 TEST(BitMask, LeadingTrailing) {
138  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
139  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
140 
141  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
142  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
143 
144  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
145  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
146 
147  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
148  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
149 
150  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
151  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
152 
153  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
154  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
155 }
156 
158  for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
159 }
160 
161 TEST(Group, Match) {
162  if (Group::kWidth == 16) {
163  ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
164  7, 5, 3, 1, 1, 1, 1, 1};
165  EXPECT_THAT(Group{group}.Match(0), ElementsAre());
166  EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
167  EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
168  EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
169  EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
170  } else if (Group::kWidth == 8) {
171  ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
172  EXPECT_THAT(Group{group}.Match(0), ElementsAre());
173  EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
174  EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
175  } else {
176  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
177  }
178 }
179 
180 TEST(Group, MatchEmpty) {
181  if (Group::kWidth == 16) {
182  ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
183  7, 5, 3, 1, 1, 1, 1, 1};
184  EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
185  } else if (Group::kWidth == 8) {
186  ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
187  EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
188  } else {
189  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
190  }
191 }
192 
193 TEST(Group, MatchEmptyOrDeleted) {
194  if (Group::kWidth == 16) {
195  ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
196  7, 5, 3, 1, 1, 1, 1, 1};
197  EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
198  } else if (Group::kWidth == 8) {
199  ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
200  EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
201  } else {
202  FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
203  }
204 }
205 
206 TEST(Batch, DropDeletes) {
207  constexpr size_t kCapacity = 63;
208  constexpr size_t kGroupWidth = container_internal::Group::kWidth;
209  std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
210  ctrl[kCapacity] = kSentinel;
211  std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
212  for (size_t i = 0; i != kCapacity; ++i) {
213  ctrl[i] = pattern[i % pattern.size()];
214  if (i < kGroupWidth - 1)
215  ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
216  }
217  ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
218  ASSERT_EQ(ctrl[kCapacity], kSentinel);
219  for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
220  ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
221  if (i == kCapacity) expected = kSentinel;
222  if (expected == kDeleted) expected = kEmpty;
223  if (IsFull(expected)) expected = kDeleted;
224  EXPECT_EQ(ctrl[i], expected)
225  << i << " " << int{pattern[i % pattern.size()]};
226  }
227 }
228 
229 TEST(Group, CountLeadingEmptyOrDeleted) {
230  const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
231  const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
232 
233  for (ctrl_t empty : empty_examples) {
234  std::vector<ctrl_t> e(Group::kWidth, empty);
235  EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
236  for (ctrl_t full : full_examples) {
237  for (size_t i = 0; i != Group::kWidth; ++i) {
238  std::vector<ctrl_t> f(Group::kWidth, empty);
239  f[i] = full;
240  EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
241  }
242  std::vector<ctrl_t> f(Group::kWidth, empty);
243  f[Group::kWidth * 2 / 3] = full;
244  f[Group::kWidth / 2] = full;
245  EXPECT_EQ(
247  }
248  }
249 }
250 
251 struct IntPolicy {
252  using slot_type = int64_t;
253  using key_type = int64_t;
254  using init_type = int64_t;
255 
256  static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
257  static void destroy(void*, int64_t*) {}
258  static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
259  *new_slot = *old_slot;
260  }
261 
262  static int64_t& element(slot_type* slot) { return *slot; }
263 
264  template <class F>
265  static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
266  return std::forward<F>(f)(x, x);
267  }
268 };
269 
270 class StringPolicy {
271  template <class F, class K, class V,
272  class = typename std::enable_if<
274  decltype(std::declval<F>()(
275  std::declval<const absl::string_view&>(), std::piecewise_construct,
276  std::declval<std::tuple<K>>(),
277  std::declval<V>())) static apply_impl(F&& f,
278  std::pair<std::tuple<K>, V> p) {
279  const absl::string_view& key = std::get<0>(p.first);
280  return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
281  std::move(p.second));
282  }
283 
284  public:
285  struct slot_type {
286  struct ctor {};
287 
288  template <class... Ts>
289  slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
290 
291  std::pair<std::string, std::string> pair;
292  };
293 
294  using key_type = std::string;
295  using init_type = std::pair<std::string, std::string>;
296 
297  template <class allocator_type, class... Args>
298  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
300  *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
301  }
302 
303  template <class allocator_type>
304  static void destroy(allocator_type* alloc, slot_type* slot) {
306  }
307 
308  template <class allocator_type>
309  static void transfer(allocator_type* alloc, slot_type* new_slot,
310  slot_type* old_slot) {
311  construct(alloc, new_slot, std::move(old_slot->pair));
312  destroy(alloc, old_slot);
313  }
314 
315  static std::pair<std::string, std::string>& element(slot_type* slot) {
316  return slot->pair;
317  }
318 
319  template <class F, class... Args>
320  static auto apply(F&& f, Args&&... args)
321  -> decltype(apply_impl(std::forward<F>(f),
322  PairArgs(std::forward<Args>(args)...))) {
323  return apply_impl(std::forward<F>(f),
324  PairArgs(std::forward<Args>(args)...));
325  }
326 };
327 
328 struct StringHash : absl::Hash<absl::string_view> {
329  using is_transparent = void;
330 };
331 struct StringEq : std::equal_to<absl::string_view> {
332  using is_transparent = void;
333 };
334 
335 struct StringTable
336  : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
337  using Base = typename StringTable::raw_hash_set;
338  StringTable() {}
339  using Base::Base;
340 };
341 
342 struct IntTable
343  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
344  std::equal_to<int64_t>, std::allocator<int64_t>> {
345  using Base = typename IntTable::raw_hash_set;
346  using Base::Base;
347 };
348 
349 template <typename T>
350 struct CustomAlloc : std::allocator<T> {
351  CustomAlloc() {}
352 
353  template <typename U>
354  CustomAlloc(const CustomAlloc<U>& other) {}
355 
356  template<class U> struct rebind {
357  using other = CustomAlloc<U>;
358  };
359 };
360 
361 struct CustomAllocIntTable
362  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
363  std::equal_to<int64_t>, CustomAlloc<int64_t>> {
364  using Base = typename CustomAllocIntTable::raw_hash_set;
365  using Base::Base;
366 };
367 
368 struct BadFastHash {
369  template <class T>
370  size_t operator()(const T&) const {
371  return 0;
372  }
373 };
374 
375 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
376  std::allocator<int>> {
377  using Base = typename BadTable::raw_hash_set;
378  BadTable() {}
379  using Base::Base;
380 };
381 
382 TEST(Table, EmptyFunctorOptimization) {
383  static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
384  static_assert(std::is_empty<std::allocator<int>>::value, "");
385 
386  struct MockTable {
387  void* ctrl;
388  void* slots;
389  size_t size;
390  size_t capacity;
391  size_t growth_left;
392  void* infoz;
393  };
394  struct StatelessHash {
395  size_t operator()(absl::string_view) const { return 0; }
396  };
397  struct StatefulHash : StatelessHash {
398  size_t dummy;
399  };
400 
401  EXPECT_EQ(
402  sizeof(MockTable),
403  sizeof(
404  raw_hash_set<StringPolicy, StatelessHash,
405  std::equal_to<absl::string_view>, std::allocator<int>>));
406 
407  EXPECT_EQ(
408  sizeof(MockTable) + sizeof(StatefulHash),
409  sizeof(
410  raw_hash_set<StringPolicy, StatefulHash,
411  std::equal_to<absl::string_view>, std::allocator<int>>));
412 }
413 
414 TEST(Table, Empty) {
415  IntTable t;
416  EXPECT_EQ(0, t.size());
417  EXPECT_TRUE(t.empty());
418 }
419 
420 #ifdef __GNUC__
421 template <class T>
422 ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) {
423  asm volatile("" : : "r,m"(v) : "memory");
424 }
425 #endif
426 
427 TEST(Table, Prefetch) {
428  IntTable t;
429  t.emplace(1);
430  // Works for both present and absent keys.
431  t.prefetch(1);
432  t.prefetch(2);
433 
434  // Do not run in debug mode, when prefetch is not implemented, or when
435  // sanitizers are enabled, or on WebAssembly.
436 #if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \
437  !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
438  !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
439  !defined(__EMSCRIPTEN__)
440  const auto now = [] { return absl::base_internal::CycleClock::Now(); };
441 
442  // Make size enough to not fit in L2 cache (16.7 Mb)
443  static constexpr int size = 1 << 22;
444  for (int i = 0; i < size; ++i) t.insert(i);
445 
446  int64_t no_prefetch = 0, prefetch = 0;
447  for (int iter = 0; iter < 10; ++iter) {
448  int64_t time = now();
449  for (int i = 0; i < size; ++i) {
450  DoNotOptimize(t.find(i));
451  }
452  no_prefetch += now() - time;
453 
454  time = now();
455  for (int i = 0; i < size; ++i) {
456  t.prefetch(i + 20);
457  DoNotOptimize(t.find(i));
458  }
459  prefetch += now() - time;
460  }
461 
462  // no_prefetch is at least 30% slower.
463  EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3);
464 #endif
465 }
466 
467 TEST(Table, LookupEmpty) {
468  IntTable t;
469  auto it = t.find(0);
470  EXPECT_TRUE(it == t.end());
471 }
472 
473 TEST(Table, Insert1) {
474  IntTable t;
475  EXPECT_TRUE(t.find(0) == t.end());
476  auto res = t.emplace(0);
477  EXPECT_TRUE(res.second);
478  EXPECT_THAT(*res.first, 0);
479  EXPECT_EQ(1, t.size());
480  EXPECT_THAT(*t.find(0), 0);
481 }
482 
483 TEST(Table, Insert2) {
484  IntTable t;
485  EXPECT_TRUE(t.find(0) == t.end());
486  auto res = t.emplace(0);
487  EXPECT_TRUE(res.second);
488  EXPECT_THAT(*res.first, 0);
489  EXPECT_EQ(1, t.size());
490  EXPECT_TRUE(t.find(1) == t.end());
491  res = t.emplace(1);
492  EXPECT_TRUE(res.second);
493  EXPECT_THAT(*res.first, 1);
494  EXPECT_EQ(2, t.size());
495  EXPECT_THAT(*t.find(0), 0);
496  EXPECT_THAT(*t.find(1), 1);
497 }
498 
499 TEST(Table, InsertCollision) {
500  BadTable t;
501  EXPECT_TRUE(t.find(1) == t.end());
502  auto res = t.emplace(1);
503  EXPECT_TRUE(res.second);
504  EXPECT_THAT(*res.first, 1);
505  EXPECT_EQ(1, t.size());
506 
507  EXPECT_TRUE(t.find(2) == t.end());
508  res = t.emplace(2);
509  EXPECT_THAT(*res.first, 2);
510  EXPECT_TRUE(res.second);
511  EXPECT_EQ(2, t.size());
512 
513  EXPECT_THAT(*t.find(1), 1);
514  EXPECT_THAT(*t.find(2), 2);
515 }
516 
517 // Test that we do not add existent element in case we need to search through
518 // many groups with deleted elements
519 TEST(Table, InsertCollisionAndFindAfterDelete) {
520  BadTable t; // all elements go to the same group.
521  // Have at least 2 groups with Group::kWidth collisions
522  // plus some extra collisions in the last group.
523  constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
524  for (size_t i = 0; i < kNumInserts; ++i) {
525  auto res = t.emplace(i);
526  EXPECT_TRUE(res.second);
527  EXPECT_THAT(*res.first, i);
528  EXPECT_EQ(i + 1, t.size());
529  }
530 
531  // Remove elements one by one and check
532  // that we still can find all other elements.
533  for (size_t i = 0; i < kNumInserts; ++i) {
534  EXPECT_EQ(1, t.erase(i)) << i;
535  for (size_t j = i + 1; j < kNumInserts; ++j) {
536  EXPECT_THAT(*t.find(j), j);
537  auto res = t.emplace(j);
538  EXPECT_FALSE(res.second) << i << " " << j;
539  EXPECT_THAT(*res.first, j);
540  EXPECT_EQ(kNumInserts - i - 1, t.size());
541  }
542  }
543  EXPECT_TRUE(t.empty());
544 }
545 
546 TEST(Table, LazyEmplace) {
547  StringTable t;
548  bool called = false;
549  auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
550  called = true;
551  f("abc", "ABC");
552  });
553  EXPECT_TRUE(called);
554  EXPECT_THAT(*it, Pair("abc", "ABC"));
555  called = false;
556  it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
557  called = true;
558  f("abc", "DEF");
559  });
560  EXPECT_FALSE(called);
561  EXPECT_THAT(*it, Pair("abc", "ABC"));
562 }
563 
564 TEST(Table, ContainsEmpty) {
565  IntTable t;
566 
567  EXPECT_FALSE(t.contains(0));
568 }
569 
570 TEST(Table, Contains1) {
571  IntTable t;
572 
573  EXPECT_TRUE(t.insert(0).second);
574  EXPECT_TRUE(t.contains(0));
575  EXPECT_FALSE(t.contains(1));
576 
577  EXPECT_EQ(1, t.erase(0));
578  EXPECT_FALSE(t.contains(0));
579 }
580 
581 TEST(Table, Contains2) {
582  IntTable t;
583 
584  EXPECT_TRUE(t.insert(0).second);
585  EXPECT_TRUE(t.contains(0));
586  EXPECT_FALSE(t.contains(1));
587 
588  t.clear();
589  EXPECT_FALSE(t.contains(0));
590 }
591 
592 int decompose_constructed;
593 struct DecomposeType {
594  DecomposeType(int i) : i(i) { // NOLINT
595  ++decompose_constructed;
596  }
597 
598  explicit DecomposeType(const char* d) : DecomposeType(*d) {}
599 
600  int i;
601 };
602 
603 struct DecomposeHash {
604  using is_transparent = void;
605  size_t operator()(DecomposeType a) const { return a.i; }
606  size_t operator()(int a) const { return a; }
607  size_t operator()(const char* a) const { return *a; }
608 };
609 
610 struct DecomposeEq {
611  using is_transparent = void;
612  bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
613  bool operator()(DecomposeType a, int b) const { return a.i == b; }
614  bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
615 };
616 
617 struct DecomposePolicy {
618  using slot_type = DecomposeType;
619  using key_type = DecomposeType;
620  using init_type = DecomposeType;
621 
622  template <typename T>
623  static void construct(void*, DecomposeType* slot, T&& v) {
624  *slot = DecomposeType(std::forward<T>(v));
625  }
626  static void destroy(void*, DecomposeType*) {}
627  static DecomposeType& element(slot_type* slot) { return *slot; }
628 
629  template <class F, class T>
630  static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
631  return std::forward<F>(f)(x, x);
632  }
633 };
634 
635 template <typename Hash, typename Eq>
636 void TestDecompose(bool construct_three) {
637  DecomposeType elem{0};
638  const int one = 1;
639  const char* three_p = "3";
640  const auto& three = three_p;
641 
642  raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
643 
644  decompose_constructed = 0;
645  int expected_constructed = 0;
646  EXPECT_EQ(expected_constructed, decompose_constructed);
647  set1.insert(elem);
648  EXPECT_EQ(expected_constructed, decompose_constructed);
649  set1.insert(1);
650  EXPECT_EQ(++expected_constructed, decompose_constructed);
651  set1.emplace("3");
652  EXPECT_EQ(++expected_constructed, decompose_constructed);
653  EXPECT_EQ(expected_constructed, decompose_constructed);
654 
655  { // insert(T&&)
656  set1.insert(1);
657  EXPECT_EQ(expected_constructed, decompose_constructed);
658  }
659 
660  { // insert(const T&)
661  set1.insert(one);
662  EXPECT_EQ(expected_constructed, decompose_constructed);
663  }
664 
665  { // insert(hint, T&&)
666  set1.insert(set1.begin(), 1);
667  EXPECT_EQ(expected_constructed, decompose_constructed);
668  }
669 
670  { // insert(hint, const T&)
671  set1.insert(set1.begin(), one);
672  EXPECT_EQ(expected_constructed, decompose_constructed);
673  }
674 
675  { // emplace(...)
676  set1.emplace(1);
677  EXPECT_EQ(expected_constructed, decompose_constructed);
678  set1.emplace("3");
679  expected_constructed += construct_three;
680  EXPECT_EQ(expected_constructed, decompose_constructed);
681  set1.emplace(one);
682  EXPECT_EQ(expected_constructed, decompose_constructed);
683  set1.emplace(three);
684  expected_constructed += construct_three;
685  EXPECT_EQ(expected_constructed, decompose_constructed);
686  }
687 
688  { // emplace_hint(...)
689  set1.emplace_hint(set1.begin(), 1);
690  EXPECT_EQ(expected_constructed, decompose_constructed);
691  set1.emplace_hint(set1.begin(), "3");
692  expected_constructed += construct_three;
693  EXPECT_EQ(expected_constructed, decompose_constructed);
694  set1.emplace_hint(set1.begin(), one);
695  EXPECT_EQ(expected_constructed, decompose_constructed);
696  set1.emplace_hint(set1.begin(), three);
697  expected_constructed += construct_three;
698  EXPECT_EQ(expected_constructed, decompose_constructed);
699  }
700 }
701 
702 TEST(Table, Decompose) {
703  TestDecompose<DecomposeHash, DecomposeEq>(false);
704 
705  struct TransparentHashIntOverload {
706  size_t operator()(DecomposeType a) const { return a.i; }
707  size_t operator()(int a) const { return a; }
708  };
709  struct TransparentEqIntOverload {
710  bool operator()(DecomposeType a, DecomposeType b) const {
711  return a.i == b.i;
712  }
713  bool operator()(DecomposeType a, int b) const { return a.i == b; }
714  };
715  TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
716  TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
717  TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
718 }
719 
720 // Returns the largest m such that a table with m elements has the same number
721 // of buckets as a table with n elements.
722 size_t MaxDensitySize(size_t n) {
723  IntTable t;
724  t.reserve(n);
725  for (size_t i = 0; i != n; ++i) t.emplace(i);
726  const size_t c = t.bucket_count();
727  while (c == t.bucket_count()) t.emplace(n++);
728  return t.size() - 1;
729 }
730 
731 struct Modulo1000Hash {
732  size_t operator()(int x) const { return x % 1000; }
733 };
734 
735 struct Modulo1000HashTable
736  : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
737  std::allocator<int>> {};
738 
739 // Test that rehash with no resize happen in case of many deleted slots.
740 TEST(Table, RehashWithNoResize) {
741  Modulo1000HashTable t;
742  // Adding the same length (and the same hash) strings
743  // to have at least kMinFullGroups groups
744  // with Group::kWidth collisions. Then fill up to MaxDensitySize;
745  const size_t kMinFullGroups = 7;
746  std::vector<int> keys;
747  for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
748  int k = i * 1000;
749  t.emplace(k);
750  keys.push_back(k);
751  }
752  const size_t capacity = t.capacity();
753 
754  // Remove elements from all groups except the first and the last one.
755  // All elements removed from full groups will be marked as kDeleted.
756  const size_t erase_begin = Group::kWidth / 2;
757  const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
758  for (size_t i = erase_begin; i < erase_end; ++i) {
759  EXPECT_EQ(1, t.erase(keys[i])) << i;
760  }
761  keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
762 
763  auto last_key = keys.back();
764  size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
765 
766  // Make sure that we have to make a lot of probes for last key.
767  ASSERT_GT(last_key_num_probes, kMinFullGroups);
768 
769  int x = 1;
770  // Insert and erase one element, before inplace rehash happen.
771  while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
772  t.emplace(x);
773  ASSERT_EQ(capacity, t.capacity());
774  // All elements should be there.
775  ASSERT_TRUE(t.find(x) != t.end()) << x;
776  for (const auto& k : keys) {
777  ASSERT_TRUE(t.find(k) != t.end()) << k;
778  }
779  t.erase(x);
780  ++x;
781  }
782 }
783 
784 TEST(Table, InsertEraseStressTest) {
785  IntTable t;
786  const size_t kMinElementCount = 250;
787  std::deque<int> keys;
788  size_t i = 0;
789  for (; i < MaxDensitySize(kMinElementCount); ++i) {
790  t.emplace(i);
791  keys.push_back(i);
792  }
793  const size_t kNumIterations = 1000000;
794  for (; i < kNumIterations; ++i) {
795  ASSERT_EQ(1, t.erase(keys.front()));
796  keys.pop_front();
797  t.emplace(i);
798  keys.push_back(i);
799  }
800 }
801 
802 TEST(Table, InsertOverloads) {
803  StringTable t;
804  // These should all trigger the insert(init_type) overload.
805  t.insert({{}, {}});
806  t.insert({"ABC", {}});
807  t.insert({"DEF", "!!!"});
808 
809  EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
810  Pair("DEF", "!!!")));
811 }
812 
813 TEST(Table, LargeTable) {
814  IntTable t;
815  for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
816  for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
817 }
818 
819 // Timeout if copy is quadratic as it was in Rust.
820 TEST(Table, EnsureNonQuadraticAsInRust) {
821  static const size_t kLargeSize = 1 << 15;
822 
823  IntTable t;
824  for (size_t i = 0; i != kLargeSize; ++i) {
825  t.insert(i);
826  }
827 
828  // If this is quadratic, the test will timeout.
829  IntTable t2;
830  for (const auto& entry : t) t2.insert(entry);
831 }
832 
833 TEST(Table, ClearBug) {
834  IntTable t;
835  constexpr size_t capacity = container_internal::Group::kWidth - 1;
836  constexpr size_t max_size = capacity / 2 + 1;
837  for (size_t i = 0; i < max_size; ++i) {
838  t.insert(i);
839  }
840  ASSERT_EQ(capacity, t.capacity());
841  intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
842  t.clear();
843  ASSERT_EQ(capacity, t.capacity());
844  for (size_t i = 0; i < max_size; ++i) {
845  t.insert(i);
846  }
847  ASSERT_EQ(capacity, t.capacity());
848  intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
849  // We are checking that original and second are close enough to each other
850  // that they are probably still in the same group. This is not strictly
851  // guaranteed.
852  EXPECT_LT(std::abs(original - second),
853  capacity * sizeof(IntTable::value_type));
854 }
855 
856 TEST(Table, Erase) {
857  IntTable t;
858  EXPECT_TRUE(t.find(0) == t.end());
859  auto res = t.emplace(0);
860  EXPECT_TRUE(res.second);
861  EXPECT_EQ(1, t.size());
862  t.erase(res.first);
863  EXPECT_EQ(0, t.size());
864  EXPECT_TRUE(t.find(0) == t.end());
865 }
866 
867 TEST(Table, EraseMaintainsValidIterator) {
868  IntTable t;
869  const int kNumElements = 100;
870  for (int i = 0; i < kNumElements; i ++) {
871  EXPECT_TRUE(t.emplace(i).second);
872  }
873  EXPECT_EQ(t.size(), kNumElements);
874 
875  int num_erase_calls = 0;
876  auto it = t.begin();
877  while (it != t.end()) {
878  t.erase(it++);
879  num_erase_calls++;
880  }
881 
882  EXPECT_TRUE(t.empty());
883  EXPECT_EQ(num_erase_calls, kNumElements);
884 }
885 
886 // Collect N bad keys by following algorithm:
887 // 1. Create an empty table and reserve it to 2 * N.
888 // 2. Insert N random elements.
889 // 3. Take first Group::kWidth - 1 to bad_keys array.
890 // 4. Clear the table without resize.
891 // 5. Go to point 2 while N keys not collected
892 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
893  static constexpr int kGroupSize = Group::kWidth - 1;
894 
895  auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
896  for (size_t i = b; i != e; ++i) {
897  t->emplace(i);
898  }
899  std::vector<int64_t> res;
900  res.reserve(kGroupSize);
901  auto it = t->begin();
902  for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
903  res.push_back(*it);
904  }
905  return res;
906  };
907 
908  std::vector<int64_t> bad_keys;
909  bad_keys.reserve(N);
910  IntTable t;
911  t.reserve(N * 2);
912 
913  for (size_t b = 0; bad_keys.size() < N; b += N) {
914  auto keys = topk_range(b, b + N, &t);
915  bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
916  t.erase(t.begin(), t.end());
917  EXPECT_TRUE(t.empty());
918  }
919  return bad_keys;
920 }
921 
922 struct ProbeStats {
923  // Number of elements with specific probe length over all tested tables.
924  std::vector<size_t> all_probes_histogram;
925  // Ratios total_probe_length/size for every tested table.
926  std::vector<double> single_table_ratios;
927 
928  friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
929  ProbeStats res = a;
930  res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
931  b.all_probes_histogram.size()));
932  std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
933  res.all_probes_histogram.begin(),
934  res.all_probes_histogram.begin(), std::plus<size_t>());
935  res.single_table_ratios.insert(res.single_table_ratios.end(),
936  b.single_table_ratios.begin(),
937  b.single_table_ratios.end());
938  return res;
939  }
940 
941  // Average ratio total_probe_length/size over tables.
942  double AvgRatio() const {
943  return std::accumulate(single_table_ratios.begin(),
944  single_table_ratios.end(), 0.0) /
945  single_table_ratios.size();
946  }
947 
948  // Maximum ratio total_probe_length/size over tables.
949  double MaxRatio() const {
950  return *std::max_element(single_table_ratios.begin(),
951  single_table_ratios.end());
952  }
953 
954  // Percentile ratio total_probe_length/size over tables.
955  double PercentileRatio(double Percentile = 0.95) const {
956  auto r = single_table_ratios;
957  auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
958  if (mid != r.end()) {
959  std::nth_element(r.begin(), mid, r.end());
960  return *mid;
961  } else {
962  return MaxRatio();
963  }
964  }
965 
966  // Maximum probe length over all elements and all tables.
967  size_t MaxProbe() const { return all_probes_histogram.size(); }
968 
969  // Fraction of elements with specified probe length.
970  std::vector<double> ProbeNormalizedHistogram() const {
971  double total_elements = std::accumulate(all_probes_histogram.begin(),
972  all_probes_histogram.end(), 0ull);
973  std::vector<double> res;
974  for (size_t p : all_probes_histogram) {
975  res.push_back(p / total_elements);
976  }
977  return res;
978  }
979 
980  size_t PercentileProbe(double Percentile = 0.99) const {
981  size_t idx = 0;
982  for (double p : ProbeNormalizedHistogram()) {
983  if (Percentile > p) {
984  Percentile -= p;
985  ++idx;
986  } else {
987  return idx;
988  }
989  }
990  return idx;
991  }
992 
993  friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
994  out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
995  << ", PercentileRatio:" << s.PercentileRatio()
996  << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
997  for (double p : s.ProbeNormalizedHistogram()) {
998  out << p << ",";
999  }
1000  out << "]}";
1001 
1002  return out;
1003  }
1004 };
1005 
1006 struct ExpectedStats {
1007  double avg_ratio;
1008  double max_ratio;
1009  std::vector<std::pair<double, double>> pecentile_ratios;
1010  std::vector<std::pair<double, double>> pecentile_probes;
1011 
1012  friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1013  out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1014  << ", PercentileRatios: [";
1015  for (auto el : s.pecentile_ratios) {
1016  out << el.first << ":" << el.second << ", ";
1017  }
1018  out << "], PercentileProbes: [";
1019  for (auto el : s.pecentile_probes) {
1020  out << el.first << ":" << el.second << ", ";
1021  }
1022  out << "]}";
1023 
1024  return out;
1025  }
1026 };
1027 
1028 void VerifyStats(size_t size, const ExpectedStats& exp,
1029  const ProbeStats& stats) {
1030  EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1031  EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1032  for (auto pr : exp.pecentile_ratios) {
1033  EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1034  << size << " " << pr.first << " " << stats;
1035  }
1036 
1037  for (auto pr : exp.pecentile_probes) {
1038  EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1039  << size << " " << pr.first << " " << stats;
1040  }
1041 }
1042 
1043 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1044 
1045 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1046 // 1. Create new table and reserve it to keys.size() * 2
1047 // 2. Insert all keys xored with seed
1048 // 3. Collect ProbeStats from final table.
1049 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
1050  size_t num_iters) {
1051  const size_t reserve_size = keys.size() * 2;
1052 
1053  ProbeStats stats;
1054 
1055  int64_t seed = 0x71b1a19b907d6e33;
1056  while (num_iters--) {
1057  seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1058  IntTable t1;
1059  t1.reserve(reserve_size);
1060  for (const auto& key : keys) {
1061  t1.emplace(key ^ seed);
1062  }
1063 
1064  auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1065  stats.all_probes_histogram.resize(
1066  std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1067  std::transform(probe_histogram.begin(), probe_histogram.end(),
1068  stats.all_probes_histogram.begin(),
1069  stats.all_probes_histogram.begin(), std::plus<size_t>());
1070 
1071  size_t total_probe_seq_length = 0;
1072  for (size_t i = 0; i < probe_histogram.size(); ++i) {
1073  total_probe_seq_length += i * probe_histogram[i];
1074  }
1075  stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1076  keys.size());
1077  t1.erase(t1.begin(), t1.end());
1078  }
1079  return stats;
1080 }
1081 
1082 ExpectedStats XorSeedExpectedStats() {
1083  constexpr bool kRandomizesInserts =
1084 #ifdef NDEBUG
1085  false;
1086 #else // NDEBUG
1087  true;
1088 #endif // NDEBUG
1089 
1090  // The effective load factor is larger in non-opt mode because we insert
1091  // elements out of order.
1093  case 8:
1094  if (kRandomizesInserts) {
1095  return {0.05,
1096  1.0,
1097  {{0.95, 0.5}},
1098  {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1099  } else {
1100  return {0.05,
1101  2.0,
1102  {{0.95, 0.1}},
1103  {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1104  }
1105  case 16:
1106  if (kRandomizesInserts) {
1107  return {0.1,
1108  1.0,
1109  {{0.95, 0.1}},
1110  {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1111  } else {
1112  return {0.05,
1113  1.0,
1114  {{0.95, 0.05}},
1115  {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1116  }
1117  }
1118  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1119  return {};
1120 }
1121 
1122 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1123  ProbeStatsPerSize stats;
1124  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1125  for (size_t size : sizes) {
1126  stats[size] =
1127  CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1128  }
1129  auto expected = XorSeedExpectedStats();
1130  for (size_t size : sizes) {
1131  auto& stat = stats[size];
1132  VerifyStats(size, expected, stat);
1133  }
1134 }
1135 
1136 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1137 // 1. Create new table
1138 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1139 // 3. Collect ProbeStats from final table
1140 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1141  const std::vector<int64_t>& keys, size_t num_iters) {
1142  ProbeStats stats;
1143 
1144  std::random_device rd;
1145  std::mt19937 rng(rd());
1146  auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1147  std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
1148  while (num_iters--) {
1149  IntTable t1;
1150  size_t num_keys = keys.size() / 10;
1151  size_t start = dist(rng);
1152  for (size_t i = 0; i != num_keys; ++i) {
1153  for (size_t j = 0; j != 10; ++j) {
1154  t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1155  }
1156  }
1157 
1158  auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1159  stats.all_probes_histogram.resize(
1160  std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1161  std::transform(probe_histogram.begin(), probe_histogram.end(),
1162  stats.all_probes_histogram.begin(),
1163  stats.all_probes_histogram.begin(), std::plus<size_t>());
1164 
1165  size_t total_probe_seq_length = 0;
1166  for (size_t i = 0; i < probe_histogram.size(); ++i) {
1167  total_probe_seq_length += i * probe_histogram[i];
1168  }
1169  stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1170  t1.size());
1171  t1.erase(t1.begin(), t1.end());
1172  }
1173  return stats;
1174 }
1175 
1176 ExpectedStats LinearTransformExpectedStats() {
1177  constexpr bool kRandomizesInserts =
1178 #ifdef NDEBUG
1179  false;
1180 #else // NDEBUG
1181  true;
1182 #endif // NDEBUG
1183 
1184  // The effective load factor is larger in non-opt mode because we insert
1185  // elements out of order.
1187  case 8:
1188  if (kRandomizesInserts) {
1189  return {0.1,
1190  0.5,
1191  {{0.95, 0.3}},
1192  {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1193  } else {
1194  return {0.15,
1195  0.5,
1196  {{0.95, 0.3}},
1197  {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
1198  }
1199  case 16:
1200  if (kRandomizesInserts) {
1201  return {0.1,
1202  0.4,
1203  {{0.95, 0.3}},
1204  {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1205  } else {
1206  return {0.05,
1207  0.2,
1208  {{0.95, 0.1}},
1209  {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1210  }
1211  }
1212  ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1213  return {};
1214 }
1215 
1216 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1217  ProbeStatsPerSize stats;
1218  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1219  for (size_t size : sizes) {
1220  stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1221  CollectBadMergeKeys(size), 300);
1222  }
1223  auto expected = LinearTransformExpectedStats();
1224  for (size_t size : sizes) {
1225  auto& stat = stats[size];
1226  VerifyStats(size, expected, stat);
1227  }
1228 }
1229 
1230 TEST(Table, EraseCollision) {
1231  BadTable t;
1232 
1233  // 1 2 3
1234  t.emplace(1);
1235  t.emplace(2);
1236  t.emplace(3);
1237  EXPECT_THAT(*t.find(1), 1);
1238  EXPECT_THAT(*t.find(2), 2);
1239  EXPECT_THAT(*t.find(3), 3);
1240  EXPECT_EQ(3, t.size());
1241 
1242  // 1 DELETED 3
1243  t.erase(t.find(2));
1244  EXPECT_THAT(*t.find(1), 1);
1245  EXPECT_TRUE(t.find(2) == t.end());
1246  EXPECT_THAT(*t.find(3), 3);
1247  EXPECT_EQ(2, t.size());
1248 
1249  // DELETED DELETED 3
1250  t.erase(t.find(1));
1251  EXPECT_TRUE(t.find(1) == t.end());
1252  EXPECT_TRUE(t.find(2) == t.end());
1253  EXPECT_THAT(*t.find(3), 3);
1254  EXPECT_EQ(1, t.size());
1255 
1256  // DELETED DELETED DELETED
1257  t.erase(t.find(3));
1258  EXPECT_TRUE(t.find(1) == t.end());
1259  EXPECT_TRUE(t.find(2) == t.end());
1260  EXPECT_TRUE(t.find(3) == t.end());
1261  EXPECT_EQ(0, t.size());
1262 }
1263 
1264 TEST(Table, EraseInsertProbing) {
1265  BadTable t(100);
1266 
1267  // 1 2 3 4
1268  t.emplace(1);
1269  t.emplace(2);
1270  t.emplace(3);
1271  t.emplace(4);
1272 
1273  // 1 DELETED 3 DELETED
1274  t.erase(t.find(2));
1275  t.erase(t.find(4));
1276 
1277  // 1 10 3 11 12
1278  t.emplace(10);
1279  t.emplace(11);
1280  t.emplace(12);
1281 
1282  EXPECT_EQ(5, t.size());
1283  EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1284 }
1285 
1286 TEST(Table, Clear) {
1287  IntTable t;
1288  EXPECT_TRUE(t.find(0) == t.end());
1289  t.clear();
1290  EXPECT_TRUE(t.find(0) == t.end());
1291  auto res = t.emplace(0);
1292  EXPECT_TRUE(res.second);
1293  EXPECT_EQ(1, t.size());
1294  t.clear();
1295  EXPECT_EQ(0, t.size());
1296  EXPECT_TRUE(t.find(0) == t.end());
1297 }
1298 
1299 TEST(Table, Swap) {
1300  IntTable t;
1301  EXPECT_TRUE(t.find(0) == t.end());
1302  auto res = t.emplace(0);
1303  EXPECT_TRUE(res.second);
1304  EXPECT_EQ(1, t.size());
1305  IntTable u;
1306  t.swap(u);
1307  EXPECT_EQ(0, t.size());
1308  EXPECT_EQ(1, u.size());
1309  EXPECT_TRUE(t.find(0) == t.end());
1310  EXPECT_THAT(*u.find(0), 0);
1311 }
1312 
1313 TEST(Table, Rehash) {
1314  IntTable t;
1315  EXPECT_TRUE(t.find(0) == t.end());
1316  t.emplace(0);
1317  t.emplace(1);
1318  EXPECT_EQ(2, t.size());
1319  t.rehash(128);
1320  EXPECT_EQ(2, t.size());
1321  EXPECT_THAT(*t.find(0), 0);
1322  EXPECT_THAT(*t.find(1), 1);
1323 }
1324 
1325 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1326  IntTable t;
1327  t.emplace(0);
1328  t.emplace(1);
1329  auto* p = &*t.find(0);
1330  t.rehash(1);
1331  EXPECT_EQ(p, &*t.find(0));
1332 }
1333 
1334 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1335  IntTable t;
1336  t.rehash(0);
1337  EXPECT_EQ(0, t.bucket_count());
1338 }
1339 
1340 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1341  IntTable t;
1342  t.emplace(0);
1343  t.clear();
1344  EXPECT_NE(0, t.bucket_count());
1345  t.rehash(0);
1346  EXPECT_EQ(0, t.bucket_count());
1347 }
1348 
1349 TEST(Table, RehashZeroForcesRehash) {
1350  IntTable t;
1351  t.emplace(0);
1352  t.emplace(1);
1353  auto* p = &*t.find(0);
1354  t.rehash(0);
1355  EXPECT_NE(p, &*t.find(0));
1356 }
1357 
1358 TEST(Table, ConstructFromInitList) {
1359  using P = std::pair<std::string, std::string>;
1360  struct Q {
1361  operator P() const { return {}; }
1362  };
1363  StringTable t = {P(), Q(), {}, {{}, {}}};
1364 }
1365 
1366 TEST(Table, CopyConstruct) {
1367  IntTable t;
1368  t.emplace(0);
1369  EXPECT_EQ(1, t.size());
1370  {
1371  IntTable u(t);
1372  EXPECT_EQ(1, u.size());
1373  EXPECT_THAT(*u.find(0), 0);
1374  }
1375  {
1376  IntTable u{t};
1377  EXPECT_EQ(1, u.size());
1378  EXPECT_THAT(*u.find(0), 0);
1379  }
1380  {
1381  IntTable u = t;
1382  EXPECT_EQ(1, u.size());
1383  EXPECT_THAT(*u.find(0), 0);
1384  }
1385 }
1386 
1387 TEST(Table, CopyConstructWithAlloc) {
1388  StringTable t;
1389  t.emplace("a", "b");
1390  EXPECT_EQ(1, t.size());
1391  StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1392  EXPECT_EQ(1, u.size());
1393  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1394 }
1395 
1396 struct ExplicitAllocIntTable
1397  : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
1398  std::equal_to<int64_t>, Alloc<int64_t>> {
1399  ExplicitAllocIntTable() {}
1400 };
1401 
1402 TEST(Table, AllocWithExplicitCtor) {
1403  ExplicitAllocIntTable t;
1404  EXPECT_EQ(0, t.size());
1405 }
1406 
1407 TEST(Table, MoveConstruct) {
1408  {
1409  StringTable t;
1410  t.emplace("a", "b");
1411  EXPECT_EQ(1, t.size());
1412 
1413  StringTable u(std::move(t));
1414  EXPECT_EQ(1, u.size());
1415  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1416  }
1417  {
1418  StringTable t;
1419  t.emplace("a", "b");
1420  EXPECT_EQ(1, t.size());
1421 
1422  StringTable u{std::move(t)};
1423  EXPECT_EQ(1, u.size());
1424  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1425  }
1426  {
1427  StringTable t;
1428  t.emplace("a", "b");
1429  EXPECT_EQ(1, t.size());
1430 
1431  StringTable u = std::move(t);
1432  EXPECT_EQ(1, u.size());
1433  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1434  }
1435 }
1436 
1437 TEST(Table, MoveConstructWithAlloc) {
1438  StringTable t;
1439  t.emplace("a", "b");
1440  EXPECT_EQ(1, t.size());
1441  StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1442  EXPECT_EQ(1, u.size());
1443  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1444 }
1445 
1446 TEST(Table, CopyAssign) {
1447  StringTable t;
1448  t.emplace("a", "b");
1449  EXPECT_EQ(1, t.size());
1450  StringTable u;
1451  u = t;
1452  EXPECT_EQ(1, u.size());
1453  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1454 }
1455 
1456 TEST(Table, CopySelfAssign) {
1457  StringTable t;
1458  t.emplace("a", "b");
1459  EXPECT_EQ(1, t.size());
1460  t = *&t;
1461  EXPECT_EQ(1, t.size());
1462  EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1463 }
1464 
1465 TEST(Table, MoveAssign) {
1466  StringTable t;
1467  t.emplace("a", "b");
1468  EXPECT_EQ(1, t.size());
1469  StringTable u;
1470  u = std::move(t);
1471  EXPECT_EQ(1, u.size());
1472  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1473 }
1474 
1475 TEST(Table, Equality) {
1476  StringTable t;
1477  std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1478  {"aa", "bb"}};
1479  t.insert(std::begin(v), std::end(v));
1480  StringTable u = t;
1481  EXPECT_EQ(u, t);
1482 }
1483 
1484 TEST(Table, Equality2) {
1485  StringTable t;
1486  std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1487  {"aa", "bb"}};
1488  t.insert(std::begin(v1), std::end(v1));
1489  StringTable u;
1490  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1491  {"aa", "aa"}};
1492  u.insert(std::begin(v2), std::end(v2));
1493  EXPECT_NE(u, t);
1494 }
1495 
1496 TEST(Table, Equality3) {
1497  StringTable t;
1498  std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1499  {"bb", "bb"}};
1500  t.insert(std::begin(v1), std::end(v1));
1501  StringTable u;
1502  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1503  {"aa", "aa"}};
1504  u.insert(std::begin(v2), std::end(v2));
1505  EXPECT_NE(u, t);
1506 }
1507 
1508 TEST(Table, NumDeletedRegression) {
1509  IntTable t;
1510  t.emplace(0);
1511  t.erase(t.find(0));
1512  // construct over a deleted slot.
1513  t.emplace(0);
1514  t.clear();
1515 }
1516 
1517 TEST(Table, FindFullDeletedRegression) {
1518  IntTable t;
1519  for (int i = 0; i < 1000; ++i) {
1520  t.emplace(i);
1521  t.erase(t.find(i));
1522  }
1523  EXPECT_EQ(0, t.size());
1524 }
1525 
1526 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1527  size_t n;
1528  {
1529  // Compute n such that n is the maximum number of elements before rehash.
1530  IntTable t;
1531  t.emplace(0);
1532  size_t c = t.bucket_count();
1533  for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1534  --n;
1535  }
1536  IntTable t;
1537  t.rehash(n);
1538  const size_t c = t.bucket_count();
1539  for (size_t i = 0; i != n; ++i) t.emplace(i);
1540  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1541  t.erase(0);
1542  t.emplace(0);
1543  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1544 }
1545 
1546 TEST(Table, NoThrowMoveConstruct) {
1547  ASSERT_TRUE(
1548  std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1549  ASSERT_TRUE(std::is_nothrow_copy_constructible<
1550  std::equal_to<absl::string_view>>::value);
1551  ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1553 }
1554 
1555 TEST(Table, NoThrowMoveAssign) {
1556  ASSERT_TRUE(
1557  std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1558  ASSERT_TRUE(
1559  std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1560  ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1561  ASSERT_TRUE(
1562  absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1564 }
1565 
1566 TEST(Table, NoThrowSwappable) {
1567  ASSERT_TRUE(
1570  std::equal_to<absl::string_view>>());
1571  ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1572  EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1573 }
1574 
1575 TEST(Table, HeterogeneousLookup) {
1576  struct Hash {
1577  size_t operator()(int64_t i) const { return i; }
1578  size_t operator()(double i) const {
1579  ADD_FAILURE();
1580  return i;
1581  }
1582  };
1583  struct Eq {
1584  bool operator()(int64_t a, int64_t b) const { return a == b; }
1585  bool operator()(double a, int64_t b) const {
1586  ADD_FAILURE();
1587  return a == b;
1588  }
1589  bool operator()(int64_t a, double b) const {
1590  ADD_FAILURE();
1591  return a == b;
1592  }
1593  bool operator()(double a, double b) const {
1594  ADD_FAILURE();
1595  return a == b;
1596  }
1597  };
1598 
1599  struct THash {
1600  using is_transparent = void;
1601  size_t operator()(int64_t i) const { return i; }
1602  size_t operator()(double i) const { return i; }
1603  };
1604  struct TEq {
1605  using is_transparent = void;
1606  bool operator()(int64_t a, int64_t b) const { return a == b; }
1607  bool operator()(double a, int64_t b) const { return a == b; }
1608  bool operator()(int64_t a, double b) const { return a == b; }
1609  bool operator()(double a, double b) const { return a == b; }
1610  };
1611 
1612  raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1613  // It will convert to int64_t before the query.
1614  EXPECT_EQ(1, *s.find(double{1.1}));
1615 
1616  raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1617  // It will try to use the double, and fail to find the object.
1618  EXPECT_TRUE(ts.find(1.1) == ts.end());
1619 }
1620 
1621 template <class Table>
1622 using CallFind = decltype(std::declval<Table&>().find(17));
1623 
1624 template <class Table>
1625 using CallErase = decltype(std::declval<Table&>().erase(17));
1626 
1627 template <class Table>
1628 using CallExtract = decltype(std::declval<Table&>().extract(17));
1629 
1630 template <class Table>
1631 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1632 
1633 template <class Table>
1634 using CallCount = decltype(std::declval<Table&>().count(17));
1635 
1636 template <template <typename> class C, class Table, class = void>
1637 struct VerifyResultOf : std::false_type {};
1638 
1639 template <template <typename> class C, class Table>
1640 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1641 
1642 TEST(Table, HeterogeneousLookupOverloads) {
1643  using NonTransparentTable =
1644  raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1645  std::equal_to<absl::string_view>, std::allocator<int>>;
1646 
1647  EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1648  EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1649  EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1650  EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1651  EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1652 
1653  using TransparentTable = raw_hash_set<
1654  StringPolicy,
1657  std::allocator<int>>;
1658 
1659  EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1660  EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1661  EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1662  EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1663  EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1664 }
1665 
1666 // TODO(alkis): Expand iterator tests.
1667 TEST(Iterator, IsDefaultConstructible) {
1668  StringTable::iterator i;
1669  EXPECT_TRUE(i == StringTable::iterator());
1670 }
1671 
1672 TEST(ConstIterator, IsDefaultConstructible) {
1673  StringTable::const_iterator i;
1674  EXPECT_TRUE(i == StringTable::const_iterator());
1675 }
1676 
1677 TEST(Iterator, ConvertsToConstIterator) {
1678  StringTable::iterator i;
1679  EXPECT_TRUE(i == StringTable::const_iterator());
1680 }
1681 
1682 TEST(Iterator, Iterates) {
1683  IntTable t;
1684  for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
1685  EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
1686 }
1687 
1688 TEST(Table, Merge) {
1689  StringTable t1, t2;
1690  t1.emplace("0", "-0");
1691  t1.emplace("1", "-1");
1692  t2.emplace("0", "~0");
1693  t2.emplace("2", "~2");
1694 
1695  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
1696  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
1697 
1698  t1.merge(t2);
1699  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
1700  Pair("2", "~2")));
1701  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
1702 }
1703 
1704 TEST(Nodes, EmptyNodeType) {
1705  using node_type = StringTable::node_type;
1706  node_type n;
1707  EXPECT_FALSE(n);
1708  EXPECT_TRUE(n.empty());
1709 
1710  EXPECT_TRUE((std::is_same<node_type::allocator_type,
1711  StringTable::allocator_type>::value));
1712 }
1713 
1714 TEST(Nodes, ExtractInsert) {
1715  constexpr char k0[] = "Very long std::string zero.";
1716  constexpr char k1[] = "Very long std::string one.";
1717  constexpr char k2[] = "Very long std::string two.";
1718  StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
1719  EXPECT_THAT(t,
1720  UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
1721 
1722  auto node = t.extract(k0);
1723  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1724  EXPECT_TRUE(node);
1725  EXPECT_FALSE(node.empty());
1726 
1727  StringTable t2;
1728  StringTable::insert_return_type res = t2.insert(std::move(node));
1729  EXPECT_TRUE(res.inserted);
1730  EXPECT_THAT(*res.position, Pair(k0, ""));
1731  EXPECT_FALSE(res.node);
1732  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1733 
1734  // Not there.
1735  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1736  node = t.extract("Not there!");
1737  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1738  EXPECT_FALSE(node);
1739 
1740  // Inserting nothing.
1741  res = t2.insert(std::move(node));
1742  EXPECT_FALSE(res.inserted);
1743  EXPECT_EQ(res.position, t2.end());
1744  EXPECT_FALSE(res.node);
1745  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1746 
1747  t.emplace(k0, "1");
1748  node = t.extract(k0);
1749 
1750  // Insert duplicate.
1751  res = t2.insert(std::move(node));
1752  EXPECT_FALSE(res.inserted);
1753  EXPECT_THAT(*res.position, Pair(k0, ""));
1754  EXPECT_TRUE(res.node);
1755  EXPECT_FALSE(node);
1756 }
1757 
1758 IntTable MakeSimpleTable(size_t size) {
1759  IntTable t;
1760  while (t.size() < size) t.insert(t.size());
1761  return t;
1762 }
1763 
1764 std::vector<int> OrderOfIteration(const IntTable& t) {
1765  return {t.begin(), t.end()};
1766 }
1767 
1768 // These IterationOrderChanges tests depend on non-deterministic behavior.
1769 // We are injecting non-determinism from the pointer of the table, but do so in
1770 // a way that only the page matters. We have to retry enough times to make sure
1771 // we are touching different memory pages to cause the ordering to change.
1772 // We also need to keep the old tables around to avoid getting the same memory
1773 // blocks over and over.
1774 TEST(Table, IterationOrderChangesByInstance) {
1775  for (size_t size : {2, 6, 12, 20}) {
1776  const auto reference_table = MakeSimpleTable(size);
1777  const auto reference = OrderOfIteration(reference_table);
1778 
1779  std::vector<IntTable> tables;
1780  bool found_difference = false;
1781  for (int i = 0; !found_difference && i < 5000; ++i) {
1782  tables.push_back(MakeSimpleTable(size));
1783  found_difference = OrderOfIteration(tables.back()) != reference;
1784  }
1785  if (!found_difference) {
1786  FAIL()
1787  << "Iteration order remained the same across many attempts with size "
1788  << size;
1789  }
1790  }
1791 }
1792 
1793 TEST(Table, IterationOrderChangesOnRehash) {
1794  std::vector<IntTable> garbage;
1795  for (int i = 0; i < 5000; ++i) {
1796  auto t = MakeSimpleTable(20);
1797  const auto reference = OrderOfIteration(t);
1798  // Force rehash to the same size.
1799  t.rehash(0);
1800  auto trial = OrderOfIteration(t);
1801  if (trial != reference) {
1802  // We are done.
1803  return;
1804  }
1805  garbage.push_back(std::move(t));
1806  }
1807  FAIL() << "Iteration order remained the same across many attempts.";
1808 }
1809 
1810 // Verify that pointers are invalidated as soon as a second element is inserted.
1811 // This prevents dependency on pointer stability on small tables.
1812 TEST(Table, UnstablePointers) {
1813  IntTable table;
1814 
1815  const auto addr = [&](int i) {
1816  return reinterpret_cast<uintptr_t>(&*table.find(i));
1817  };
1818 
1819  table.insert(0);
1820  const uintptr_t old_ptr = addr(0);
1821 
1822  // This causes a rehash.
1823  table.insert(1);
1824 
1825  EXPECT_NE(old_ptr, addr(0));
1826 }
1827 
1828 // Confirm that we assert if we try to erase() end().
1829 TEST(TableDeathTest, EraseOfEndAsserts) {
1830  // Use an assert with side-effects to figure out if they are actually enabled.
1831  bool assert_enabled = false;
1832  assert([&]() {
1833  assert_enabled = true;
1834  return true;
1835  }());
1836  if (!assert_enabled) return;
1837 
1838  IntTable t;
1839  // Extra simple "regexp" as regexp support is highly varied across platforms.
1840  constexpr char kDeathMsg[] = "it != end";
1841  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
1842 }
1843 
1844 TEST(RawHashSamplerTest, Sample) {
1845  // Enable the feature even if the prod default is off.
1846  SetHashtablezEnabled(true);
1848 
1849  auto& sampler = HashtablezSampler::Global();
1850  size_t start_size = 0;
1851  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1852 
1853  std::vector<IntTable> tables;
1854  for (int i = 0; i < 1000000; ++i) {
1855  tables.emplace_back();
1856  tables.back().insert(1);
1857  }
1858  size_t end_size = 0;
1859  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1860 
1861  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1862  0.01, 0.005);
1863 }
1864 
1865 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
1866  // Enable the feature even if the prod default is off.
1867  SetHashtablezEnabled(true);
1869 
1870  auto& sampler = HashtablezSampler::Global();
1871  size_t start_size = 0;
1872  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1873 
1874  std::vector<CustomAllocIntTable> tables;
1875  for (int i = 0; i < 1000000; ++i) {
1876  tables.emplace_back();
1877  tables.back().insert(1);
1878  }
1879  size_t end_size = 0;
1880  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1881 
1882  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1883  0.00, 0.001);
1884 }
1885 
1886 #ifdef ADDRESS_SANITIZER
1887 TEST(Sanitizer, PoisoningUnused) {
1888  IntTable t;
1889  t.reserve(5);
1890  // Insert something to force an allocation.
1891  int64_t& v1 = *t.insert(0).first;
1892 
1893  // Make sure there is something to test.
1894  ASSERT_GT(t.capacity(), 1);
1895 
1896  int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
1897  for (size_t i = 0; i < t.capacity(); ++i) {
1898  EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
1899  }
1900 }
1901 
1902 TEST(Sanitizer, PoisoningOnErase) {
1903  IntTable t;
1904  int64_t& v = *t.insert(0).first;
1905 
1906  EXPECT_FALSE(__asan_address_is_poisoned(&v));
1907  t.erase(0);
1908  EXPECT_TRUE(__asan_address_is_poisoned(&v));
1909 }
1910 #endif // ADDRESS_SANITIZER
1911 
1912 } // namespace
1913 } // namespace container_internal
1914 } // namespace absl
int v
Definition: variant_test.cc:81
static std::function< Slot &(Slot *)> element
void SetHashtablezSampleParameter(int32_t rate)
std::vector< size_t > GetHashtableDebugNumProbesHistogram(const C &container)
char * begin
#define ABSL_RAW_LOG(severity,...)
Definition: raw_logging.h:42
#define ABSL_ATTRIBUTE_ALWAYS_INLINE
Definition: attributes.h:129
static const uint64_t k1
Definition: city.cc:53
auto keys(const Set &s) -> std::vector< typename std::decay< typename Set::key_type >::type >
void SetHashtablezEnabled(bool enabled)
static const uint64_t k0
Definition: city.cc:52
TEST(NotificationTest, SanityTest)
static std::function< int(int)> apply_impl
std::ostream & operator<<(std::ostream &os, absl::LogSeverity s)
Definition: log_severity.cc:21
BitMask< uint64_t, kWidth, 3 > MatchEmptyOrDeleted() const
Definition: raw_hash_set.h:419
void CopyConstruct(FlagOpFn op, const void *src, void *dst)
typename container_internal::HashEq< T >::Hash hash_default_hash
char * end
HashtablezInfoHandle Sample()
std::vector< std::pair< double, double > > pecentile_probes
Definition: algorithm.h:29
static std::function< void(void *, Slot *)> destroy
size_t GrowthToLowerboundCapacity(size_t growth)
Definition: raw_hash_set.h:489
void Swap(T &lhs, T &rhs) noexcept(IsNothrowSwappable< T >::value)
Definition: type_traits.h:673
std::pair< std::tuple<>, std::tuple<> > PairArgs()
size_t value
uint128 operator+(uint128 lhs, uint128 rhs)
Definition: int128.h:653
hash_default_hash< typename T::first_type > hash
size_t GetHashtableDebugNumProbes(const C &c, const typename C::key_type &key)
std::pair< std::string, std::string > pair
BitMask< uint64_t, kWidth, 3 > Match(h2_t hash) const
Definition: raw_hash_set.h:394
size_t CapacityToGrowth(size_t capacity)
Definition: raw_hash_set.h:478
static std::function< void(void *, Slot *, Slot *)> transfer
static std::function< void(void *, Slot *, Slot)> construct
static auto GetSlots(const C &c) -> decltype(c.slots_)
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t *ctrl, size_t capacity)
Definition: raw_hash_set.h:459
auto apply(Functor &&functor, Tuple &&t) -> decltype(utility_internal::apply_helper(absl::forward< Functor >(functor), absl::forward< Tuple >(t), absl::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >
Definition: utility.h:287
double max_ratio
constexpr bool IsNoThrowSwappable()
Definition: raw_hash_set.h:170
uintptr_t size
typename type_traits_internal::VoidTImpl< Ts... >::type void_t
Definition: type_traits.h:171
double avg_ratio
typename container_internal::HashEq< T >::Eq hash_default_eq
BitMask< uint64_t, kWidth, 3 > MatchEmpty() const
Definition: raw_hash_set.h:414
int i
static const uint64_t k2
Definition: city.cc:54
uint64_t b
Definition: layout_test.cc:50
std::allocator< int > alloc
std::vector< std::pair< double, double > > pecentile_ratios
constexpr absl::remove_reference_t< T > && move(T &&t) noexcept
Definition: utility.h:219
char * out
Definition: mutex.h:1013
static bool Optional(bool)
Definition: demangle.cc:319
std::vector< double > single_table_ratios
#define C(x)
Definition: city_test.cc:47
std::vector< size_t > all_probes_histogram
size_t NormalizeCapacity(size_t n)
Definition: raw_hash_set.h:472


abseil_cpp
Author(s):
autogenerated on Mon Feb 28 2022 21:31:20