raw_hash_set_test.cc
Go to the documentation of this file.
00001 // Copyright 2018 The Abseil Authors.
00002 //
00003 // Licensed under the Apache License, Version 2.0 (the "License");
00004 // you may not use this file except in compliance with the License.
00005 // You may obtain a copy of the License at
00006 //
00007 //      https://www.apache.org/licenses/LICENSE-2.0
00008 //
00009 // Unless required by applicable law or agreed to in writing, software
00010 // distributed under the License is distributed on an "AS IS" BASIS,
00011 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 // See the License for the specific language governing permissions and
00013 // limitations under the License.
00014 
00015 #include "absl/container/internal/raw_hash_set.h"
00016 
00017 #include <cmath>
00018 #include <cstdint>
00019 #include <deque>
00020 #include <functional>
00021 #include <memory>
00022 #include <numeric>
00023 #include <random>
00024 #include <string>
00025 
00026 #include "gmock/gmock.h"
00027 #include "gtest/gtest.h"
00028 #include "absl/base/attributes.h"
00029 #include "absl/base/internal/cycleclock.h"
00030 #include "absl/base/internal/raw_logging.h"
00031 #include "absl/container/internal/container_memory.h"
00032 #include "absl/container/internal/hash_function_defaults.h"
00033 #include "absl/container/internal/hash_policy_testing.h"
00034 #include "absl/container/internal/hashtable_debug.h"
00035 #include "absl/strings/string_view.h"
00036 
00037 namespace absl {
00038 namespace container_internal {
00039 
00040 struct RawHashSetTestOnlyAccess {
00041   template <typename C>
00042   static auto GetSlots(const C& c) -> decltype(c.slots_) {
00043     return c.slots_;
00044   }
00045 };
00046 
00047 namespace {
00048 
00049 using ::testing::DoubleNear;
00050 using ::testing::ElementsAre;
00051 using ::testing::Ge;
00052 using ::testing::Lt;
00053 using ::testing::Optional;
00054 using ::testing::Pair;
00055 using ::testing::UnorderedElementsAre;
00056 
00057 TEST(Util, NormalizeCapacity) {
00058   EXPECT_EQ(1, NormalizeCapacity(0));
00059   EXPECT_EQ(1, NormalizeCapacity(1));
00060   EXPECT_EQ(3, NormalizeCapacity(2));
00061   EXPECT_EQ(3, NormalizeCapacity(3));
00062   EXPECT_EQ(7, NormalizeCapacity(4));
00063   EXPECT_EQ(7, NormalizeCapacity(7));
00064   EXPECT_EQ(15, NormalizeCapacity(8));
00065   EXPECT_EQ(15, NormalizeCapacity(15));
00066   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
00067   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
00068 }
00069 
00070 TEST(Util, GrowthAndCapacity) {
00071   // Verify that GrowthToCapacity gives the minimum capacity that has enough
00072   // growth.
00073   for (size_t growth = 0; growth < 10000; ++growth) {
00074     SCOPED_TRACE(growth);
00075     size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
00076     // The capacity is large enough for `growth`
00077     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
00078     if (growth != 0 && capacity > 1) {
00079       // There is no smaller capacity that works.
00080       EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
00081     }
00082   }
00083 
00084   for (size_t capacity = Group::kWidth - 1; capacity < 10000;
00085        capacity = 2 * capacity + 1) {
00086     SCOPED_TRACE(capacity);
00087     size_t growth = CapacityToGrowth(capacity);
00088     EXPECT_THAT(growth, Lt(capacity));
00089     EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
00090     EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
00091   }
00092 }
00093 
00094 TEST(Util, probe_seq) {
00095   probe_seq<16> seq(0, 127);
00096   auto gen = [&]() {
00097     size_t res = seq.offset();
00098     seq.next();
00099     return res;
00100   };
00101   std::vector<size_t> offsets(8);
00102   std::generate_n(offsets.begin(), 8, gen);
00103   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
00104   seq = probe_seq<16>(128, 127);
00105   std::generate_n(offsets.begin(), 8, gen);
00106   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
00107 }
00108 
00109 TEST(BitMask, Smoke) {
00110   EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
00111   EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
00112 
00113   EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
00114   EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
00115   EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
00116   EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
00117   EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
00118   EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
00119   EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
00120   EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
00121 }
00122 
00123 TEST(BitMask, WithShift) {
00124   // See the non-SSE version of Group for details on what this math is for.
00125   uint64_t ctrl = 0x1716151413121110;
00126   uint64_t hash = 0x12;
00127   constexpr uint64_t msbs = 0x8080808080808080ULL;
00128   constexpr uint64_t lsbs = 0x0101010101010101ULL;
00129   auto x = ctrl ^ (lsbs * hash);
00130   uint64_t mask = (x - lsbs) & ~x & msbs;
00131   EXPECT_EQ(0x0000000080800000, mask);
00132 
00133   BitMask<uint64_t, 8, 3> b(mask);
00134   EXPECT_EQ(*b, 2);
00135 }
00136 
00137 TEST(BitMask, LeadingTrailing) {
00138   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
00139   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
00140 
00141   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
00142   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
00143 
00144   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
00145   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
00146 
00147   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
00148   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
00149 
00150   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
00151   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
00152 
00153   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
00154   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
00155 }
00156 
00157 TEST(Group, EmptyGroup) {
00158   for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
00159 }
00160 
00161 TEST(Group, Match) {
00162   if (Group::kWidth == 16) {
00163     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
00164                       7,      5, 3,        1, 1,      1, 1,         1};
00165     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
00166     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
00167     EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
00168     EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
00169     EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
00170   } else if (Group::kWidth == 8) {
00171     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
00172     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
00173     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
00174     EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
00175   } else {
00176     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
00177   }
00178 }
00179 
00180 TEST(Group, MatchEmpty) {
00181   if (Group::kWidth == 16) {
00182     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
00183                       7,      5, 3,        1, 1,      1, 1,         1};
00184     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
00185   } else if (Group::kWidth == 8) {
00186     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
00187     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
00188   } else {
00189     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
00190   }
00191 }
00192 
00193 TEST(Group, MatchEmptyOrDeleted) {
00194   if (Group::kWidth == 16) {
00195     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
00196                       7,      5, 3,        1, 1,      1, 1,         1};
00197     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
00198   } else if (Group::kWidth == 8) {
00199     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
00200     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
00201   } else {
00202     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
00203   }
00204 }
00205 
00206 TEST(Batch, DropDeletes) {
00207   constexpr size_t kCapacity = 63;
00208   constexpr size_t kGroupWidth = container_internal::Group::kWidth;
00209   std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
00210   ctrl[kCapacity] = kSentinel;
00211   std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
00212   for (size_t i = 0; i != kCapacity; ++i) {
00213     ctrl[i] = pattern[i % pattern.size()];
00214     if (i < kGroupWidth - 1)
00215       ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
00216   }
00217   ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
00218   ASSERT_EQ(ctrl[kCapacity], kSentinel);
00219   for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
00220     ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
00221     if (i == kCapacity) expected = kSentinel;
00222     if (expected == kDeleted) expected = kEmpty;
00223     if (IsFull(expected)) expected = kDeleted;
00224     EXPECT_EQ(ctrl[i], expected)
00225         << i << " " << int{pattern[i % pattern.size()]};
00226   }
00227 }
00228 
00229 TEST(Group, CountLeadingEmptyOrDeleted) {
00230   const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
00231   const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
00232 
00233   for (ctrl_t empty : empty_examples) {
00234     std::vector<ctrl_t> e(Group::kWidth, empty);
00235     EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
00236     for (ctrl_t full : full_examples) {
00237       for (size_t i = 0; i != Group::kWidth; ++i) {
00238         std::vector<ctrl_t> f(Group::kWidth, empty);
00239         f[i] = full;
00240         EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
00241       }
00242       std::vector<ctrl_t> f(Group::kWidth, empty);
00243       f[Group::kWidth * 2 / 3] = full;
00244       f[Group::kWidth / 2] = full;
00245       EXPECT_EQ(
00246           Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
00247     }
00248   }
00249 }
00250 
00251 struct IntPolicy {
00252   using slot_type = int64_t;
00253   using key_type = int64_t;
00254   using init_type = int64_t;
00255 
00256   static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
00257   static void destroy(void*, int64_t*) {}
00258   static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
00259     *new_slot = *old_slot;
00260   }
00261 
00262   static int64_t& element(slot_type* slot) { return *slot; }
00263 
00264   template <class F>
00265   static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
00266     return std::forward<F>(f)(x, x);
00267   }
00268 };
00269 
00270 class StringPolicy {
00271   template <class F, class K, class V,
00272             class = typename std::enable_if<
00273                 std::is_convertible<const K&, absl::string_view>::value>::type>
00274   decltype(std::declval<F>()(
00275       std::declval<const absl::string_view&>(), std::piecewise_construct,
00276       std::declval<std::tuple<K>>(),
00277       std::declval<V>())) static apply_impl(F&& f,
00278                                             std::pair<std::tuple<K>, V> p) {
00279     const absl::string_view& key = std::get<0>(p.first);
00280     return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
00281                               std::move(p.second));
00282   }
00283 
00284  public:
00285   struct slot_type {
00286     struct ctor {};
00287 
00288     template <class... Ts>
00289     slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
00290 
00291     std::pair<std::string, std::string> pair;
00292   };
00293 
00294   using key_type = std::string;
00295   using init_type = std::pair<std::string, std::string>;
00296 
00297   template <class allocator_type, class... Args>
00298   static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
00299     std::allocator_traits<allocator_type>::construct(
00300         *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
00301   }
00302 
00303   template <class allocator_type>
00304   static void destroy(allocator_type* alloc, slot_type* slot) {
00305     std::allocator_traits<allocator_type>::destroy(*alloc, slot);
00306   }
00307 
00308   template <class allocator_type>
00309   static void transfer(allocator_type* alloc, slot_type* new_slot,
00310                        slot_type* old_slot) {
00311     construct(alloc, new_slot, std::move(old_slot->pair));
00312     destroy(alloc, old_slot);
00313   }
00314 
00315   static std::pair<std::string, std::string>& element(slot_type* slot) {
00316     return slot->pair;
00317   }
00318 
00319   template <class F, class... Args>
00320   static auto apply(F&& f, Args&&... args)
00321       -> decltype(apply_impl(std::forward<F>(f),
00322                              PairArgs(std::forward<Args>(args)...))) {
00323     return apply_impl(std::forward<F>(f),
00324                       PairArgs(std::forward<Args>(args)...));
00325   }
00326 };
00327 
00328 struct StringHash : absl::Hash<absl::string_view> {
00329   using is_transparent = void;
00330 };
00331 struct StringEq : std::equal_to<absl::string_view> {
00332   using is_transparent = void;
00333 };
00334 
00335 struct StringTable
00336     : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
00337   using Base = typename StringTable::raw_hash_set;
00338   StringTable() {}
00339   using Base::Base;
00340 };
00341 
00342 struct IntTable
00343     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
00344                    std::equal_to<int64_t>, std::allocator<int64_t>> {
00345   using Base = typename IntTable::raw_hash_set;
00346   using Base::Base;
00347 };
00348 
00349 template <typename T>
00350 struct CustomAlloc : std::allocator<T> {
00351   CustomAlloc() {}
00352 
00353   template <typename U>
00354   CustomAlloc(const CustomAlloc<U>& other) {}
00355 
00356   template<class U> struct rebind {
00357     using other = CustomAlloc<U>;
00358   };
00359 };
00360 
00361 struct CustomAllocIntTable
00362     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
00363                    std::equal_to<int64_t>, CustomAlloc<int64_t>> {
00364   using Base = typename CustomAllocIntTable::raw_hash_set;
00365   using Base::Base;
00366 };
00367 
00368 struct BadFastHash {
00369   template <class T>
00370   size_t operator()(const T&) const {
00371     return 0;
00372   }
00373 };
00374 
00375 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
00376                                std::allocator<int>> {
00377   using Base = typename BadTable::raw_hash_set;
00378   BadTable() {}
00379   using Base::Base;
00380 };
00381 
00382 TEST(Table, EmptyFunctorOptimization) {
00383   static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
00384   static_assert(std::is_empty<std::allocator<int>>::value, "");
00385 
00386   struct MockTable {
00387     void* ctrl;
00388     void* slots;
00389     size_t size;
00390     size_t capacity;
00391     size_t growth_left;
00392     void* infoz;
00393   };
00394   struct StatelessHash {
00395     size_t operator()(absl::string_view) const { return 0; }
00396   };
00397   struct StatefulHash : StatelessHash {
00398     size_t dummy;
00399   };
00400 
00401   EXPECT_EQ(
00402       sizeof(MockTable),
00403       sizeof(
00404           raw_hash_set<StringPolicy, StatelessHash,
00405                        std::equal_to<absl::string_view>, std::allocator<int>>));
00406 
00407   EXPECT_EQ(
00408       sizeof(MockTable) + sizeof(StatefulHash),
00409       sizeof(
00410           raw_hash_set<StringPolicy, StatefulHash,
00411                        std::equal_to<absl::string_view>, std::allocator<int>>));
00412 }
00413 
00414 TEST(Table, Empty) {
00415   IntTable t;
00416   EXPECT_EQ(0, t.size());
00417   EXPECT_TRUE(t.empty());
00418 }
00419 
00420 #ifdef __GNUC__
00421 template <class T>
00422 ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) {
00423   asm volatile("" : : "r,m"(v) : "memory");
00424 }
00425 #endif
00426 
00427 TEST(Table, Prefetch) {
00428   IntTable t;
00429   t.emplace(1);
00430   // Works for both present and absent keys.
00431   t.prefetch(1);
00432   t.prefetch(2);
00433 
00434   // Do not run in debug mode, when prefetch is not implemented, or when
00435   // sanitizers are enabled, or on WebAssembly.
00436 #if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) &&          \
00437     !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) &&            \
00438     !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
00439     !defined(__EMSCRIPTEN__)
00440   const auto now = [] { return absl::base_internal::CycleClock::Now(); };
00441 
00442   // Make size enough to not fit in L2 cache (16.7 Mb)
00443   static constexpr int size = 1 << 22;
00444   for (int i = 0; i < size; ++i) t.insert(i);
00445 
00446   int64_t no_prefetch = 0, prefetch = 0;
00447   for (int iter = 0; iter < 10; ++iter) {
00448     int64_t time = now();
00449     for (int i = 0; i < size; ++i) {
00450       DoNotOptimize(t.find(i));
00451     }
00452     no_prefetch += now() - time;
00453 
00454     time = now();
00455     for (int i = 0; i < size; ++i) {
00456       t.prefetch(i + 20);
00457       DoNotOptimize(t.find(i));
00458     }
00459     prefetch += now() - time;
00460   }
00461 
00462   // no_prefetch is at least 30% slower.
00463   EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3);
00464 #endif
00465 }
00466 
00467 TEST(Table, LookupEmpty) {
00468   IntTable t;
00469   auto it = t.find(0);
00470   EXPECT_TRUE(it == t.end());
00471 }
00472 
00473 TEST(Table, Insert1) {
00474   IntTable t;
00475   EXPECT_TRUE(t.find(0) == t.end());
00476   auto res = t.emplace(0);
00477   EXPECT_TRUE(res.second);
00478   EXPECT_THAT(*res.first, 0);
00479   EXPECT_EQ(1, t.size());
00480   EXPECT_THAT(*t.find(0), 0);
00481 }
00482 
00483 TEST(Table, Insert2) {
00484   IntTable t;
00485   EXPECT_TRUE(t.find(0) == t.end());
00486   auto res = t.emplace(0);
00487   EXPECT_TRUE(res.second);
00488   EXPECT_THAT(*res.first, 0);
00489   EXPECT_EQ(1, t.size());
00490   EXPECT_TRUE(t.find(1) == t.end());
00491   res = t.emplace(1);
00492   EXPECT_TRUE(res.second);
00493   EXPECT_THAT(*res.first, 1);
00494   EXPECT_EQ(2, t.size());
00495   EXPECT_THAT(*t.find(0), 0);
00496   EXPECT_THAT(*t.find(1), 1);
00497 }
00498 
00499 TEST(Table, InsertCollision) {
00500   BadTable t;
00501   EXPECT_TRUE(t.find(1) == t.end());
00502   auto res = t.emplace(1);
00503   EXPECT_TRUE(res.second);
00504   EXPECT_THAT(*res.first, 1);
00505   EXPECT_EQ(1, t.size());
00506 
00507   EXPECT_TRUE(t.find(2) == t.end());
00508   res = t.emplace(2);
00509   EXPECT_THAT(*res.first, 2);
00510   EXPECT_TRUE(res.second);
00511   EXPECT_EQ(2, t.size());
00512 
00513   EXPECT_THAT(*t.find(1), 1);
00514   EXPECT_THAT(*t.find(2), 2);
00515 }
00516 
00517 // Test that we do not add existent element in case we need to search through
00518 // many groups with deleted elements
00519 TEST(Table, InsertCollisionAndFindAfterDelete) {
00520   BadTable t;  // all elements go to the same group.
00521   // Have at least 2 groups with Group::kWidth collisions
00522   // plus some extra collisions in the last group.
00523   constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
00524   for (size_t i = 0; i < kNumInserts; ++i) {
00525     auto res = t.emplace(i);
00526     EXPECT_TRUE(res.second);
00527     EXPECT_THAT(*res.first, i);
00528     EXPECT_EQ(i + 1, t.size());
00529   }
00530 
00531   // Remove elements one by one and check
00532   // that we still can find all other elements.
00533   for (size_t i = 0; i < kNumInserts; ++i) {
00534     EXPECT_EQ(1, t.erase(i)) << i;
00535     for (size_t j = i + 1; j < kNumInserts; ++j) {
00536       EXPECT_THAT(*t.find(j), j);
00537       auto res = t.emplace(j);
00538       EXPECT_FALSE(res.second) << i << " " << j;
00539       EXPECT_THAT(*res.first, j);
00540       EXPECT_EQ(kNumInserts - i - 1, t.size());
00541     }
00542   }
00543   EXPECT_TRUE(t.empty());
00544 }
00545 
00546 TEST(Table, LazyEmplace) {
00547   StringTable t;
00548   bool called = false;
00549   auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
00550     called = true;
00551     f("abc", "ABC");
00552   });
00553   EXPECT_TRUE(called);
00554   EXPECT_THAT(*it, Pair("abc", "ABC"));
00555   called = false;
00556   it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
00557     called = true;
00558     f("abc", "DEF");
00559   });
00560   EXPECT_FALSE(called);
00561   EXPECT_THAT(*it, Pair("abc", "ABC"));
00562 }
00563 
00564 TEST(Table, ContainsEmpty) {
00565   IntTable t;
00566 
00567   EXPECT_FALSE(t.contains(0));
00568 }
00569 
00570 TEST(Table, Contains1) {
00571   IntTable t;
00572 
00573   EXPECT_TRUE(t.insert(0).second);
00574   EXPECT_TRUE(t.contains(0));
00575   EXPECT_FALSE(t.contains(1));
00576 
00577   EXPECT_EQ(1, t.erase(0));
00578   EXPECT_FALSE(t.contains(0));
00579 }
00580 
00581 TEST(Table, Contains2) {
00582   IntTable t;
00583 
00584   EXPECT_TRUE(t.insert(0).second);
00585   EXPECT_TRUE(t.contains(0));
00586   EXPECT_FALSE(t.contains(1));
00587 
00588   t.clear();
00589   EXPECT_FALSE(t.contains(0));
00590 }
00591 
00592 int decompose_constructed;
00593 struct DecomposeType {
00594   DecomposeType(int i) : i(i) {  // NOLINT
00595     ++decompose_constructed;
00596   }
00597 
00598   explicit DecomposeType(const char* d) : DecomposeType(*d) {}
00599 
00600   int i;
00601 };
00602 
00603 struct DecomposeHash {
00604   using is_transparent = void;
00605   size_t operator()(DecomposeType a) const { return a.i; }
00606   size_t operator()(int a) const { return a; }
00607   size_t operator()(const char* a) const { return *a; }
00608 };
00609 
00610 struct DecomposeEq {
00611   using is_transparent = void;
00612   bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
00613   bool operator()(DecomposeType a, int b) const { return a.i == b; }
00614   bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
00615 };
00616 
00617 struct DecomposePolicy {
00618   using slot_type = DecomposeType;
00619   using key_type = DecomposeType;
00620   using init_type = DecomposeType;
00621 
00622   template <typename T>
00623   static void construct(void*, DecomposeType* slot, T&& v) {
00624     *slot = DecomposeType(std::forward<T>(v));
00625   }
00626   static void destroy(void*, DecomposeType*) {}
00627   static DecomposeType& element(slot_type* slot) { return *slot; }
00628 
00629   template <class F, class T>
00630   static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
00631     return std::forward<F>(f)(x, x);
00632   }
00633 };
00634 
00635 template <typename Hash, typename Eq>
00636 void TestDecompose(bool construct_three) {
00637   DecomposeType elem{0};
00638   const int one = 1;
00639   const char* three_p = "3";
00640   const auto& three = three_p;
00641 
00642   raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
00643 
00644   decompose_constructed = 0;
00645   int expected_constructed = 0;
00646   EXPECT_EQ(expected_constructed, decompose_constructed);
00647   set1.insert(elem);
00648   EXPECT_EQ(expected_constructed, decompose_constructed);
00649   set1.insert(1);
00650   EXPECT_EQ(++expected_constructed, decompose_constructed);
00651   set1.emplace("3");
00652   EXPECT_EQ(++expected_constructed, decompose_constructed);
00653   EXPECT_EQ(expected_constructed, decompose_constructed);
00654 
00655   {  // insert(T&&)
00656     set1.insert(1);
00657     EXPECT_EQ(expected_constructed, decompose_constructed);
00658   }
00659 
00660   {  // insert(const T&)
00661     set1.insert(one);
00662     EXPECT_EQ(expected_constructed, decompose_constructed);
00663   }
00664 
00665   {  // insert(hint, T&&)
00666     set1.insert(set1.begin(), 1);
00667     EXPECT_EQ(expected_constructed, decompose_constructed);
00668   }
00669 
00670   {  // insert(hint, const T&)
00671     set1.insert(set1.begin(), one);
00672     EXPECT_EQ(expected_constructed, decompose_constructed);
00673   }
00674 
00675   {  // emplace(...)
00676     set1.emplace(1);
00677     EXPECT_EQ(expected_constructed, decompose_constructed);
00678     set1.emplace("3");
00679     expected_constructed += construct_three;
00680     EXPECT_EQ(expected_constructed, decompose_constructed);
00681     set1.emplace(one);
00682     EXPECT_EQ(expected_constructed, decompose_constructed);
00683     set1.emplace(three);
00684     expected_constructed += construct_three;
00685     EXPECT_EQ(expected_constructed, decompose_constructed);
00686   }
00687 
00688   {  // emplace_hint(...)
00689     set1.emplace_hint(set1.begin(), 1);
00690     EXPECT_EQ(expected_constructed, decompose_constructed);
00691     set1.emplace_hint(set1.begin(), "3");
00692     expected_constructed += construct_three;
00693     EXPECT_EQ(expected_constructed, decompose_constructed);
00694     set1.emplace_hint(set1.begin(), one);
00695     EXPECT_EQ(expected_constructed, decompose_constructed);
00696     set1.emplace_hint(set1.begin(), three);
00697     expected_constructed += construct_three;
00698     EXPECT_EQ(expected_constructed, decompose_constructed);
00699   }
00700 }
00701 
00702 TEST(Table, Decompose) {
00703   TestDecompose<DecomposeHash, DecomposeEq>(false);
00704 
00705   struct TransparentHashIntOverload {
00706     size_t operator()(DecomposeType a) const { return a.i; }
00707     size_t operator()(int a) const { return a; }
00708   };
00709   struct TransparentEqIntOverload {
00710     bool operator()(DecomposeType a, DecomposeType b) const {
00711       return a.i == b.i;
00712     }
00713     bool operator()(DecomposeType a, int b) const { return a.i == b; }
00714   };
00715   TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
00716   TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
00717   TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
00718 }
00719 
00720 // Returns the largest m such that a table with m elements has the same number
00721 // of buckets as a table with n elements.
00722 size_t MaxDensitySize(size_t n) {
00723   IntTable t;
00724   t.reserve(n);
00725   for (size_t i = 0; i != n; ++i) t.emplace(i);
00726   const size_t c = t.bucket_count();
00727   while (c == t.bucket_count()) t.emplace(n++);
00728   return t.size() - 1;
00729 }
00730 
00731 struct Modulo1000Hash {
00732   size_t operator()(int x) const { return x % 1000; }
00733 };
00734 
00735 struct Modulo1000HashTable
00736     : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
00737                           std::allocator<int>> {};
00738 
00739 // Test that rehash with no resize happen in case of many deleted slots.
00740 TEST(Table, RehashWithNoResize) {
00741   Modulo1000HashTable t;
00742   // Adding the same length (and the same hash) strings
00743   // to have at least kMinFullGroups groups
00744   // with Group::kWidth collisions. Then fill up to MaxDensitySize;
00745   const size_t kMinFullGroups = 7;
00746   std::vector<int> keys;
00747   for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
00748     int k = i * 1000;
00749     t.emplace(k);
00750     keys.push_back(k);
00751   }
00752   const size_t capacity = t.capacity();
00753 
00754   // Remove elements from all groups except the first and the last one.
00755   // All elements removed from full groups will be marked as kDeleted.
00756   const size_t erase_begin = Group::kWidth / 2;
00757   const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
00758   for (size_t i = erase_begin; i < erase_end; ++i) {
00759     EXPECT_EQ(1, t.erase(keys[i])) << i;
00760   }
00761   keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
00762 
00763   auto last_key = keys.back();
00764   size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
00765 
00766   // Make sure that we have to make a lot of probes for last key.
00767   ASSERT_GT(last_key_num_probes, kMinFullGroups);
00768 
00769   int x = 1;
00770   // Insert and erase one element, before inplace rehash happen.
00771   while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
00772     t.emplace(x);
00773     ASSERT_EQ(capacity, t.capacity());
00774     // All elements should be there.
00775     ASSERT_TRUE(t.find(x) != t.end()) << x;
00776     for (const auto& k : keys) {
00777       ASSERT_TRUE(t.find(k) != t.end()) << k;
00778     }
00779     t.erase(x);
00780     ++x;
00781   }
00782 }
00783 
00784 TEST(Table, InsertEraseStressTest) {
00785   IntTable t;
00786   const size_t kMinElementCount = 250;
00787   std::deque<int> keys;
00788   size_t i = 0;
00789   for (; i < MaxDensitySize(kMinElementCount); ++i) {
00790     t.emplace(i);
00791     keys.push_back(i);
00792   }
00793   const size_t kNumIterations = 1000000;
00794   for (; i < kNumIterations; ++i) {
00795     ASSERT_EQ(1, t.erase(keys.front()));
00796     keys.pop_front();
00797     t.emplace(i);
00798     keys.push_back(i);
00799   }
00800 }
00801 
00802 TEST(Table, InsertOverloads) {
00803   StringTable t;
00804   // These should all trigger the insert(init_type) overload.
00805   t.insert({{}, {}});
00806   t.insert({"ABC", {}});
00807   t.insert({"DEF", "!!!"});
00808 
00809   EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
00810                                       Pair("DEF", "!!!")));
00811 }
00812 
00813 TEST(Table, LargeTable) {
00814   IntTable t;
00815   for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
00816   for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
00817 }
00818 
00819 // Timeout if copy is quadratic as it was in Rust.
00820 TEST(Table, EnsureNonQuadraticAsInRust) {
00821   static const size_t kLargeSize = 1 << 15;
00822 
00823   IntTable t;
00824   for (size_t i = 0; i != kLargeSize; ++i) {
00825     t.insert(i);
00826   }
00827 
00828   // If this is quadratic, the test will timeout.
00829   IntTable t2;
00830   for (const auto& entry : t) t2.insert(entry);
00831 }
00832 
00833 TEST(Table, ClearBug) {
00834   IntTable t;
00835   constexpr size_t capacity = container_internal::Group::kWidth - 1;
00836   constexpr size_t max_size = capacity / 2 + 1;
00837   for (size_t i = 0; i < max_size; ++i) {
00838     t.insert(i);
00839   }
00840   ASSERT_EQ(capacity, t.capacity());
00841   intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
00842   t.clear();
00843   ASSERT_EQ(capacity, t.capacity());
00844   for (size_t i = 0; i < max_size; ++i) {
00845     t.insert(i);
00846   }
00847   ASSERT_EQ(capacity, t.capacity());
00848   intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
00849   // We are checking that original and second are close enough to each other
00850   // that they are probably still in the same group.  This is not strictly
00851   // guaranteed.
00852   EXPECT_LT(std::abs(original - second),
00853             capacity * sizeof(IntTable::value_type));
00854 }
00855 
00856 TEST(Table, Erase) {
00857   IntTable t;
00858   EXPECT_TRUE(t.find(0) == t.end());
00859   auto res = t.emplace(0);
00860   EXPECT_TRUE(res.second);
00861   EXPECT_EQ(1, t.size());
00862   t.erase(res.first);
00863   EXPECT_EQ(0, t.size());
00864   EXPECT_TRUE(t.find(0) == t.end());
00865 }
00866 
00867 TEST(Table, EraseMaintainsValidIterator) {
00868   IntTable t;
00869   const int kNumElements = 100;
00870   for (int i = 0; i < kNumElements; i ++) {
00871     EXPECT_TRUE(t.emplace(i).second);
00872   }
00873   EXPECT_EQ(t.size(), kNumElements);
00874 
00875   int num_erase_calls = 0;
00876   auto it = t.begin();
00877   while (it != t.end()) {
00878     t.erase(it++);
00879     num_erase_calls++;
00880   }
00881 
00882   EXPECT_TRUE(t.empty());
00883   EXPECT_EQ(num_erase_calls, kNumElements);
00884 }
00885 
00886 // Collect N bad keys by following algorithm:
00887 // 1. Create an empty table and reserve it to 2 * N.
00888 // 2. Insert N random elements.
00889 // 3. Take first Group::kWidth - 1 to bad_keys array.
00890 // 4. Clear the table without resize.
00891 // 5. Go to point 2 while N keys not collected
00892 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
00893   static constexpr int kGroupSize = Group::kWidth - 1;
00894 
00895   auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
00896     for (size_t i = b; i != e; ++i) {
00897       t->emplace(i);
00898     }
00899     std::vector<int64_t> res;
00900     res.reserve(kGroupSize);
00901     auto it = t->begin();
00902     for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
00903       res.push_back(*it);
00904     }
00905     return res;
00906   };
00907 
00908   std::vector<int64_t> bad_keys;
00909   bad_keys.reserve(N);
00910   IntTable t;
00911   t.reserve(N * 2);
00912 
00913   for (size_t b = 0; bad_keys.size() < N; b += N) {
00914     auto keys = topk_range(b, b + N, &t);
00915     bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
00916     t.erase(t.begin(), t.end());
00917     EXPECT_TRUE(t.empty());
00918   }
00919   return bad_keys;
00920 }
00921 
00922 struct ProbeStats {
00923   // Number of elements with specific probe length over all tested tables.
00924   std::vector<size_t> all_probes_histogram;
00925   // Ratios total_probe_length/size for every tested table.
00926   std::vector<double> single_table_ratios;
00927 
00928   friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
00929     ProbeStats res = a;
00930     res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
00931                                              b.all_probes_histogram.size()));
00932     std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
00933                    res.all_probes_histogram.begin(),
00934                    res.all_probes_histogram.begin(), std::plus<size_t>());
00935     res.single_table_ratios.insert(res.single_table_ratios.end(),
00936                                    b.single_table_ratios.begin(),
00937                                    b.single_table_ratios.end());
00938     return res;
00939   }
00940 
00941   // Average ratio total_probe_length/size over tables.
00942   double AvgRatio() const {
00943     return std::accumulate(single_table_ratios.begin(),
00944                            single_table_ratios.end(), 0.0) /
00945            single_table_ratios.size();
00946   }
00947 
00948   // Maximum ratio total_probe_length/size over tables.
00949   double MaxRatio() const {
00950     return *std::max_element(single_table_ratios.begin(),
00951                              single_table_ratios.end());
00952   }
00953 
00954   // Percentile ratio total_probe_length/size over tables.
00955   double PercentileRatio(double Percentile = 0.95) const {
00956     auto r = single_table_ratios;
00957     auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
00958     if (mid != r.end()) {
00959       std::nth_element(r.begin(), mid, r.end());
00960       return *mid;
00961     } else {
00962       return MaxRatio();
00963     }
00964   }
00965 
00966   // Maximum probe length over all elements and all tables.
00967   size_t MaxProbe() const { return all_probes_histogram.size(); }
00968 
00969   // Fraction of elements with specified probe length.
00970   std::vector<double> ProbeNormalizedHistogram() const {
00971     double total_elements = std::accumulate(all_probes_histogram.begin(),
00972                                             all_probes_histogram.end(), 0ull);
00973     std::vector<double> res;
00974     for (size_t p : all_probes_histogram) {
00975       res.push_back(p / total_elements);
00976     }
00977     return res;
00978   }
00979 
00980   size_t PercentileProbe(double Percentile = 0.99) const {
00981     size_t idx = 0;
00982     for (double p : ProbeNormalizedHistogram()) {
00983       if (Percentile > p) {
00984         Percentile -= p;
00985         ++idx;
00986       } else {
00987         return idx;
00988       }
00989     }
00990     return idx;
00991   }
00992 
00993   friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
00994     out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
00995         << ", PercentileRatio:" << s.PercentileRatio()
00996         << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
00997     for (double p : s.ProbeNormalizedHistogram()) {
00998       out << p << ",";
00999     }
01000     out << "]}";
01001 
01002     return out;
01003   }
01004 };
01005 
01006 struct ExpectedStats {
01007   double avg_ratio;
01008   double max_ratio;
01009   std::vector<std::pair<double, double>> pecentile_ratios;
01010   std::vector<std::pair<double, double>> pecentile_probes;
01011 
01012   friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
01013     out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
01014         << ", PercentileRatios: [";
01015     for (auto el : s.pecentile_ratios) {
01016       out << el.first << ":" << el.second << ", ";
01017     }
01018     out << "], PercentileProbes: [";
01019     for (auto el : s.pecentile_probes) {
01020       out << el.first << ":" << el.second << ", ";
01021     }
01022     out << "]}";
01023 
01024     return out;
01025   }
01026 };
01027 
01028 void VerifyStats(size_t size, const ExpectedStats& exp,
01029                  const ProbeStats& stats) {
01030   EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
01031   EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
01032   for (auto pr : exp.pecentile_ratios) {
01033     EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
01034         << size << " " << pr.first << " " << stats;
01035   }
01036 
01037   for (auto pr : exp.pecentile_probes) {
01038     EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
01039         << size << " " << pr.first << " " << stats;
01040   }
01041 }
01042 
01043 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
01044 
01045 // Collect total ProbeStats on num_iters iterations of the following algorithm:
01046 // 1. Create new table and reserve it to keys.size() * 2
01047 // 2. Insert all keys xored with seed
01048 // 3. Collect ProbeStats from final table.
01049 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
01050                                                 size_t num_iters) {
01051   const size_t reserve_size = keys.size() * 2;
01052 
01053   ProbeStats stats;
01054 
01055   int64_t seed = 0x71b1a19b907d6e33;
01056   while (num_iters--) {
01057     seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
01058     IntTable t1;
01059     t1.reserve(reserve_size);
01060     for (const auto& key : keys) {
01061       t1.emplace(key ^ seed);
01062     }
01063 
01064     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
01065     stats.all_probes_histogram.resize(
01066         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
01067     std::transform(probe_histogram.begin(), probe_histogram.end(),
01068                    stats.all_probes_histogram.begin(),
01069                    stats.all_probes_histogram.begin(), std::plus<size_t>());
01070 
01071     size_t total_probe_seq_length = 0;
01072     for (size_t i = 0; i < probe_histogram.size(); ++i) {
01073       total_probe_seq_length += i * probe_histogram[i];
01074     }
01075     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
01076                                         keys.size());
01077     t1.erase(t1.begin(), t1.end());
01078   }
01079   return stats;
01080 }
01081 
01082 ExpectedStats XorSeedExpectedStats() {
01083   constexpr bool kRandomizesInserts =
01084 #ifdef NDEBUG
01085       false;
01086 #else   // NDEBUG
01087       true;
01088 #endif  // NDEBUG
01089 
01090   // The effective load factor is larger in non-opt mode because we insert
01091   // elements out of order.
01092   switch (container_internal::Group::kWidth) {
01093     case 8:
01094       if (kRandomizesInserts) {
01095   return {0.05,
01096           1.0,
01097           {{0.95, 0.5}},
01098           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
01099       } else {
01100   return {0.05,
01101           2.0,
01102           {{0.95, 0.1}},
01103           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
01104       }
01105     case 16:
01106       if (kRandomizesInserts) {
01107         return {0.1,
01108                 1.0,
01109                 {{0.95, 0.1}},
01110                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
01111       } else {
01112         return {0.05,
01113                 1.0,
01114                 {{0.95, 0.05}},
01115                 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
01116       }
01117   }
01118   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
01119   return {};
01120 }
01121 
01122 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
01123   ProbeStatsPerSize stats;
01124   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
01125   for (size_t size : sizes) {
01126     stats[size] =
01127         CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
01128   }
01129   auto expected = XorSeedExpectedStats();
01130   for (size_t size : sizes) {
01131     auto& stat = stats[size];
01132     VerifyStats(size, expected, stat);
01133   }
01134 }
01135 
01136 // Collect total ProbeStats on num_iters iterations of the following algorithm:
01137 // 1. Create new table
01138 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
01139 // 3. Collect ProbeStats from final table
01140 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
01141     const std::vector<int64_t>& keys, size_t num_iters) {
01142   ProbeStats stats;
01143 
01144   std::random_device rd;
01145   std::mt19937 rng(rd());
01146   auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
01147   std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
01148   while (num_iters--) {
01149     IntTable t1;
01150     size_t num_keys = keys.size() / 10;
01151     size_t start = dist(rng);
01152     for (size_t i = 0; i != num_keys; ++i) {
01153       for (size_t j = 0; j != 10; ++j) {
01154         t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
01155       }
01156     }
01157 
01158     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
01159     stats.all_probes_histogram.resize(
01160         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
01161     std::transform(probe_histogram.begin(), probe_histogram.end(),
01162                    stats.all_probes_histogram.begin(),
01163                    stats.all_probes_histogram.begin(), std::plus<size_t>());
01164 
01165     size_t total_probe_seq_length = 0;
01166     for (size_t i = 0; i < probe_histogram.size(); ++i) {
01167       total_probe_seq_length += i * probe_histogram[i];
01168     }
01169     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
01170                                         t1.size());
01171     t1.erase(t1.begin(), t1.end());
01172   }
01173   return stats;
01174 }
01175 
01176 ExpectedStats LinearTransformExpectedStats() {
01177   constexpr bool kRandomizesInserts =
01178 #ifdef NDEBUG
01179       false;
01180 #else   // NDEBUG
01181       true;
01182 #endif  // NDEBUG
01183 
01184   // The effective load factor is larger in non-opt mode because we insert
01185   // elements out of order.
01186   switch (container_internal::Group::kWidth) {
01187     case 8:
01188       if (kRandomizesInserts) {
01189         return {0.1,
01190                 0.5,
01191                 {{0.95, 0.3}},
01192                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
01193       } else {
01194         return {0.15,
01195                 0.5,
01196                 {{0.95, 0.3}},
01197                 {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
01198       }
01199     case 16:
01200       if (kRandomizesInserts) {
01201         return {0.1,
01202                 0.4,
01203                 {{0.95, 0.3}},
01204                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
01205       } else {
01206         return {0.05,
01207                 0.2,
01208                 {{0.95, 0.1}},
01209                 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
01210       }
01211   }
01212   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
01213   return {};
01214 }
01215 
01216 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
01217   ProbeStatsPerSize stats;
01218   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
01219   for (size_t size : sizes) {
01220     stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
01221         CollectBadMergeKeys(size), 300);
01222   }
01223   auto expected = LinearTransformExpectedStats();
01224   for (size_t size : sizes) {
01225     auto& stat = stats[size];
01226     VerifyStats(size, expected, stat);
01227   }
01228 }
01229 
01230 TEST(Table, EraseCollision) {
01231   BadTable t;
01232 
01233   // 1 2 3
01234   t.emplace(1);
01235   t.emplace(2);
01236   t.emplace(3);
01237   EXPECT_THAT(*t.find(1), 1);
01238   EXPECT_THAT(*t.find(2), 2);
01239   EXPECT_THAT(*t.find(3), 3);
01240   EXPECT_EQ(3, t.size());
01241 
01242   // 1 DELETED 3
01243   t.erase(t.find(2));
01244   EXPECT_THAT(*t.find(1), 1);
01245   EXPECT_TRUE(t.find(2) == t.end());
01246   EXPECT_THAT(*t.find(3), 3);
01247   EXPECT_EQ(2, t.size());
01248 
01249   // DELETED DELETED 3
01250   t.erase(t.find(1));
01251   EXPECT_TRUE(t.find(1) == t.end());
01252   EXPECT_TRUE(t.find(2) == t.end());
01253   EXPECT_THAT(*t.find(3), 3);
01254   EXPECT_EQ(1, t.size());
01255 
01256   // DELETED DELETED DELETED
01257   t.erase(t.find(3));
01258   EXPECT_TRUE(t.find(1) == t.end());
01259   EXPECT_TRUE(t.find(2) == t.end());
01260   EXPECT_TRUE(t.find(3) == t.end());
01261   EXPECT_EQ(0, t.size());
01262 }
01263 
01264 TEST(Table, EraseInsertProbing) {
01265   BadTable t(100);
01266 
01267   // 1 2 3 4
01268   t.emplace(1);
01269   t.emplace(2);
01270   t.emplace(3);
01271   t.emplace(4);
01272 
01273   // 1 DELETED 3 DELETED
01274   t.erase(t.find(2));
01275   t.erase(t.find(4));
01276 
01277   // 1 10 3 11 12
01278   t.emplace(10);
01279   t.emplace(11);
01280   t.emplace(12);
01281 
01282   EXPECT_EQ(5, t.size());
01283   EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
01284 }
01285 
01286 TEST(Table, Clear) {
01287   IntTable t;
01288   EXPECT_TRUE(t.find(0) == t.end());
01289   t.clear();
01290   EXPECT_TRUE(t.find(0) == t.end());
01291   auto res = t.emplace(0);
01292   EXPECT_TRUE(res.second);
01293   EXPECT_EQ(1, t.size());
01294   t.clear();
01295   EXPECT_EQ(0, t.size());
01296   EXPECT_TRUE(t.find(0) == t.end());
01297 }
01298 
01299 TEST(Table, Swap) {
01300   IntTable t;
01301   EXPECT_TRUE(t.find(0) == t.end());
01302   auto res = t.emplace(0);
01303   EXPECT_TRUE(res.second);
01304   EXPECT_EQ(1, t.size());
01305   IntTable u;
01306   t.swap(u);
01307   EXPECT_EQ(0, t.size());
01308   EXPECT_EQ(1, u.size());
01309   EXPECT_TRUE(t.find(0) == t.end());
01310   EXPECT_THAT(*u.find(0), 0);
01311 }
01312 
01313 TEST(Table, Rehash) {
01314   IntTable t;
01315   EXPECT_TRUE(t.find(0) == t.end());
01316   t.emplace(0);
01317   t.emplace(1);
01318   EXPECT_EQ(2, t.size());
01319   t.rehash(128);
01320   EXPECT_EQ(2, t.size());
01321   EXPECT_THAT(*t.find(0), 0);
01322   EXPECT_THAT(*t.find(1), 1);
01323 }
01324 
01325 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
01326   IntTable t;
01327   t.emplace(0);
01328   t.emplace(1);
01329   auto* p = &*t.find(0);
01330   t.rehash(1);
01331   EXPECT_EQ(p, &*t.find(0));
01332 }
01333 
01334 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
01335   IntTable t;
01336   t.rehash(0);
01337   EXPECT_EQ(0, t.bucket_count());
01338 }
01339 
01340 TEST(Table, RehashZeroDeallocatesEmptyTable) {
01341   IntTable t;
01342   t.emplace(0);
01343   t.clear();
01344   EXPECT_NE(0, t.bucket_count());
01345   t.rehash(0);
01346   EXPECT_EQ(0, t.bucket_count());
01347 }
01348 
01349 TEST(Table, RehashZeroForcesRehash) {
01350   IntTable t;
01351   t.emplace(0);
01352   t.emplace(1);
01353   auto* p = &*t.find(0);
01354   t.rehash(0);
01355   EXPECT_NE(p, &*t.find(0));
01356 }
01357 
01358 TEST(Table, ConstructFromInitList) {
01359   using P = std::pair<std::string, std::string>;
01360   struct Q {
01361     operator P() const { return {}; }
01362   };
01363   StringTable t = {P(), Q(), {}, {{}, {}}};
01364 }
01365 
01366 TEST(Table, CopyConstruct) {
01367   IntTable t;
01368   t.emplace(0);
01369   EXPECT_EQ(1, t.size());
01370   {
01371     IntTable u(t);
01372     EXPECT_EQ(1, u.size());
01373     EXPECT_THAT(*u.find(0), 0);
01374   }
01375   {
01376     IntTable u{t};
01377     EXPECT_EQ(1, u.size());
01378     EXPECT_THAT(*u.find(0), 0);
01379   }
01380   {
01381     IntTable u = t;
01382     EXPECT_EQ(1, u.size());
01383     EXPECT_THAT(*u.find(0), 0);
01384   }
01385 }
01386 
01387 TEST(Table, CopyConstructWithAlloc) {
01388   StringTable t;
01389   t.emplace("a", "b");
01390   EXPECT_EQ(1, t.size());
01391   StringTable u(t, Alloc<std::pair<std::string, std::string>>());
01392   EXPECT_EQ(1, u.size());
01393   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01394 }
01395 
01396 struct ExplicitAllocIntTable
01397     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
01398                    std::equal_to<int64_t>, Alloc<int64_t>> {
01399   ExplicitAllocIntTable() {}
01400 };
01401 
01402 TEST(Table, AllocWithExplicitCtor) {
01403   ExplicitAllocIntTable t;
01404   EXPECT_EQ(0, t.size());
01405 }
01406 
01407 TEST(Table, MoveConstruct) {
01408   {
01409     StringTable t;
01410     t.emplace("a", "b");
01411     EXPECT_EQ(1, t.size());
01412 
01413     StringTable u(std::move(t));
01414     EXPECT_EQ(1, u.size());
01415     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01416   }
01417   {
01418     StringTable t;
01419     t.emplace("a", "b");
01420     EXPECT_EQ(1, t.size());
01421 
01422     StringTable u{std::move(t)};
01423     EXPECT_EQ(1, u.size());
01424     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01425   }
01426   {
01427     StringTable t;
01428     t.emplace("a", "b");
01429     EXPECT_EQ(1, t.size());
01430 
01431     StringTable u = std::move(t);
01432     EXPECT_EQ(1, u.size());
01433     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01434   }
01435 }
01436 
01437 TEST(Table, MoveConstructWithAlloc) {
01438   StringTable t;
01439   t.emplace("a", "b");
01440   EXPECT_EQ(1, t.size());
01441   StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
01442   EXPECT_EQ(1, u.size());
01443   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01444 }
01445 
01446 TEST(Table, CopyAssign) {
01447   StringTable t;
01448   t.emplace("a", "b");
01449   EXPECT_EQ(1, t.size());
01450   StringTable u;
01451   u = t;
01452   EXPECT_EQ(1, u.size());
01453   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01454 }
01455 
01456 TEST(Table, CopySelfAssign) {
01457   StringTable t;
01458   t.emplace("a", "b");
01459   EXPECT_EQ(1, t.size());
01460   t = *&t;
01461   EXPECT_EQ(1, t.size());
01462   EXPECT_THAT(*t.find("a"), Pair("a", "b"));
01463 }
01464 
01465 TEST(Table, MoveAssign) {
01466   StringTable t;
01467   t.emplace("a", "b");
01468   EXPECT_EQ(1, t.size());
01469   StringTable u;
01470   u = std::move(t);
01471   EXPECT_EQ(1, u.size());
01472   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
01473 }
01474 
01475 TEST(Table, Equality) {
01476   StringTable t;
01477   std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
01478                                                         {"aa", "bb"}};
01479   t.insert(std::begin(v), std::end(v));
01480   StringTable u = t;
01481   EXPECT_EQ(u, t);
01482 }
01483 
01484 TEST(Table, Equality2) {
01485   StringTable t;
01486   std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
01487                                                          {"aa", "bb"}};
01488   t.insert(std::begin(v1), std::end(v1));
01489   StringTable u;
01490   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
01491                                                          {"aa", "aa"}};
01492   u.insert(std::begin(v2), std::end(v2));
01493   EXPECT_NE(u, t);
01494 }
01495 
01496 TEST(Table, Equality3) {
01497   StringTable t;
01498   std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
01499                                                          {"bb", "bb"}};
01500   t.insert(std::begin(v1), std::end(v1));
01501   StringTable u;
01502   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
01503                                                          {"aa", "aa"}};
01504   u.insert(std::begin(v2), std::end(v2));
01505   EXPECT_NE(u, t);
01506 }
01507 
01508 TEST(Table, NumDeletedRegression) {
01509   IntTable t;
01510   t.emplace(0);
01511   t.erase(t.find(0));
01512   // construct over a deleted slot.
01513   t.emplace(0);
01514   t.clear();
01515 }
01516 
01517 TEST(Table, FindFullDeletedRegression) {
01518   IntTable t;
01519   for (int i = 0; i < 1000; ++i) {
01520     t.emplace(i);
01521     t.erase(t.find(i));
01522   }
01523   EXPECT_EQ(0, t.size());
01524 }
01525 
01526 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
01527   size_t n;
01528   {
01529     // Compute n such that n is the maximum number of elements before rehash.
01530     IntTable t;
01531     t.emplace(0);
01532     size_t c = t.bucket_count();
01533     for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
01534     --n;
01535   }
01536   IntTable t;
01537   t.rehash(n);
01538   const size_t c = t.bucket_count();
01539   for (size_t i = 0; i != n; ++i) t.emplace(i);
01540   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
01541   t.erase(0);
01542   t.emplace(0);
01543   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
01544 }
01545 
01546 TEST(Table, NoThrowMoveConstruct) {
01547   ASSERT_TRUE(
01548       std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
01549   ASSERT_TRUE(std::is_nothrow_copy_constructible<
01550               std::equal_to<absl::string_view>>::value);
01551   ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
01552   EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
01553 }
01554 
01555 TEST(Table, NoThrowMoveAssign) {
01556   ASSERT_TRUE(
01557       std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
01558   ASSERT_TRUE(
01559       std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
01560   ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
01561   ASSERT_TRUE(
01562       absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
01563   EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
01564 }
01565 
01566 TEST(Table, NoThrowSwappable) {
01567   ASSERT_TRUE(
01568       container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
01569   ASSERT_TRUE(container_internal::IsNoThrowSwappable<
01570               std::equal_to<absl::string_view>>());
01571   ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
01572   EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
01573 }
01574 
01575 TEST(Table, HeterogeneousLookup) {
01576   struct Hash {
01577     size_t operator()(int64_t i) const { return i; }
01578     size_t operator()(double i) const {
01579       ADD_FAILURE();
01580       return i;
01581     }
01582   };
01583   struct Eq {
01584     bool operator()(int64_t a, int64_t b) const { return a == b; }
01585     bool operator()(double a, int64_t b) const {
01586       ADD_FAILURE();
01587       return a == b;
01588     }
01589     bool operator()(int64_t a, double b) const {
01590       ADD_FAILURE();
01591       return a == b;
01592     }
01593     bool operator()(double a, double b) const {
01594       ADD_FAILURE();
01595       return a == b;
01596     }
01597   };
01598 
01599   struct THash {
01600     using is_transparent = void;
01601     size_t operator()(int64_t i) const { return i; }
01602     size_t operator()(double i) const { return i; }
01603   };
01604   struct TEq {
01605     using is_transparent = void;
01606     bool operator()(int64_t a, int64_t b) const { return a == b; }
01607     bool operator()(double a, int64_t b) const { return a == b; }
01608     bool operator()(int64_t a, double b) const { return a == b; }
01609     bool operator()(double a, double b) const { return a == b; }
01610   };
01611 
01612   raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
01613   // It will convert to int64_t before the query.
01614   EXPECT_EQ(1, *s.find(double{1.1}));
01615 
01616   raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
01617   // It will try to use the double, and fail to find the object.
01618   EXPECT_TRUE(ts.find(1.1) == ts.end());
01619 }
01620 
01621 template <class Table>
01622 using CallFind = decltype(std::declval<Table&>().find(17));
01623 
01624 template <class Table>
01625 using CallErase = decltype(std::declval<Table&>().erase(17));
01626 
01627 template <class Table>
01628 using CallExtract = decltype(std::declval<Table&>().extract(17));
01629 
01630 template <class Table>
01631 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
01632 
01633 template <class Table>
01634 using CallCount = decltype(std::declval<Table&>().count(17));
01635 
01636 template <template <typename> class C, class Table, class = void>
01637 struct VerifyResultOf : std::false_type {};
01638 
01639 template <template <typename> class C, class Table>
01640 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
01641 
01642 TEST(Table, HeterogeneousLookupOverloads) {
01643   using NonTransparentTable =
01644       raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
01645                    std::equal_to<absl::string_view>, std::allocator<int>>;
01646 
01647   EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
01648   EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
01649   EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
01650   EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
01651   EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
01652 
01653   using TransparentTable = raw_hash_set<
01654       StringPolicy,
01655       absl::container_internal::hash_default_hash<absl::string_view>,
01656       absl::container_internal::hash_default_eq<absl::string_view>,
01657       std::allocator<int>>;
01658 
01659   EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
01660   EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
01661   EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
01662   EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
01663   EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
01664 }
01665 
01666 // TODO(alkis): Expand iterator tests.
01667 TEST(Iterator, IsDefaultConstructible) {
01668   StringTable::iterator i;
01669   EXPECT_TRUE(i == StringTable::iterator());
01670 }
01671 
01672 TEST(ConstIterator, IsDefaultConstructible) {
01673   StringTable::const_iterator i;
01674   EXPECT_TRUE(i == StringTable::const_iterator());
01675 }
01676 
01677 TEST(Iterator, ConvertsToConstIterator) {
01678   StringTable::iterator i;
01679   EXPECT_TRUE(i == StringTable::const_iterator());
01680 }
01681 
01682 TEST(Iterator, Iterates) {
01683   IntTable t;
01684   for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
01685   EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
01686 }
01687 
01688 TEST(Table, Merge) {
01689   StringTable t1, t2;
01690   t1.emplace("0", "-0");
01691   t1.emplace("1", "-1");
01692   t2.emplace("0", "~0");
01693   t2.emplace("2", "~2");
01694 
01695   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
01696   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
01697 
01698   t1.merge(t2);
01699   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
01700                                        Pair("2", "~2")));
01701   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
01702 }
01703 
01704 TEST(Nodes, EmptyNodeType) {
01705   using node_type = StringTable::node_type;
01706   node_type n;
01707   EXPECT_FALSE(n);
01708   EXPECT_TRUE(n.empty());
01709 
01710   EXPECT_TRUE((std::is_same<node_type::allocator_type,
01711                             StringTable::allocator_type>::value));
01712 }
01713 
01714 TEST(Nodes, ExtractInsert) {
01715   constexpr char k0[] = "Very long std::string zero.";
01716   constexpr char k1[] = "Very long std::string one.";
01717   constexpr char k2[] = "Very long std::string two.";
01718   StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
01719   EXPECT_THAT(t,
01720               UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
01721 
01722   auto node = t.extract(k0);
01723   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
01724   EXPECT_TRUE(node);
01725   EXPECT_FALSE(node.empty());
01726 
01727   StringTable t2;
01728   StringTable::insert_return_type res = t2.insert(std::move(node));
01729   EXPECT_TRUE(res.inserted);
01730   EXPECT_THAT(*res.position, Pair(k0, ""));
01731   EXPECT_FALSE(res.node);
01732   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
01733 
01734   // Not there.
01735   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
01736   node = t.extract("Not there!");
01737   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
01738   EXPECT_FALSE(node);
01739 
01740   // Inserting nothing.
01741   res = t2.insert(std::move(node));
01742   EXPECT_FALSE(res.inserted);
01743   EXPECT_EQ(res.position, t2.end());
01744   EXPECT_FALSE(res.node);
01745   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
01746 
01747   t.emplace(k0, "1");
01748   node = t.extract(k0);
01749 
01750   // Insert duplicate.
01751   res = t2.insert(std::move(node));
01752   EXPECT_FALSE(res.inserted);
01753   EXPECT_THAT(*res.position, Pair(k0, ""));
01754   EXPECT_TRUE(res.node);
01755   EXPECT_FALSE(node);
01756 }
01757 
01758 IntTable MakeSimpleTable(size_t size) {
01759   IntTable t;
01760   while (t.size() < size) t.insert(t.size());
01761   return t;
01762 }
01763 
01764 std::vector<int> OrderOfIteration(const IntTable& t) {
01765   return {t.begin(), t.end()};
01766 }
01767 
01768 // These IterationOrderChanges tests depend on non-deterministic behavior.
01769 // We are injecting non-determinism from the pointer of the table, but do so in
01770 // a way that only the page matters. We have to retry enough times to make sure
01771 // we are touching different memory pages to cause the ordering to change.
01772 // We also need to keep the old tables around to avoid getting the same memory
01773 // blocks over and over.
01774 TEST(Table, IterationOrderChangesByInstance) {
01775   for (size_t size : {2, 6, 12, 20}) {
01776     const auto reference_table = MakeSimpleTable(size);
01777     const auto reference = OrderOfIteration(reference_table);
01778 
01779     std::vector<IntTable> tables;
01780     bool found_difference = false;
01781     for (int i = 0; !found_difference && i < 5000; ++i) {
01782       tables.push_back(MakeSimpleTable(size));
01783       found_difference = OrderOfIteration(tables.back()) != reference;
01784     }
01785     if (!found_difference) {
01786       FAIL()
01787           << "Iteration order remained the same across many attempts with size "
01788           << size;
01789     }
01790   }
01791 }
01792 
01793 TEST(Table, IterationOrderChangesOnRehash) {
01794   std::vector<IntTable> garbage;
01795   for (int i = 0; i < 5000; ++i) {
01796     auto t = MakeSimpleTable(20);
01797     const auto reference = OrderOfIteration(t);
01798     // Force rehash to the same size.
01799     t.rehash(0);
01800     auto trial = OrderOfIteration(t);
01801     if (trial != reference) {
01802       // We are done.
01803       return;
01804     }
01805     garbage.push_back(std::move(t));
01806   }
01807   FAIL() << "Iteration order remained the same across many attempts.";
01808 }
01809 
01810 // Verify that pointers are invalidated as soon as a second element is inserted.
01811 // This prevents dependency on pointer stability on small tables.
01812 TEST(Table, UnstablePointers) {
01813   IntTable table;
01814 
01815   const auto addr = [&](int i) {
01816     return reinterpret_cast<uintptr_t>(&*table.find(i));
01817   };
01818 
01819   table.insert(0);
01820   const uintptr_t old_ptr = addr(0);
01821 
01822   // This causes a rehash.
01823   table.insert(1);
01824 
01825   EXPECT_NE(old_ptr, addr(0));
01826 }
01827 
01828 // Confirm that we assert if we try to erase() end().
01829 TEST(TableDeathTest, EraseOfEndAsserts) {
01830   // Use an assert with side-effects to figure out if they are actually enabled.
01831   bool assert_enabled = false;
01832   assert([&]() {
01833     assert_enabled = true;
01834     return true;
01835   }());
01836   if (!assert_enabled) return;
01837 
01838   IntTable t;
01839   // Extra simple "regexp" as regexp support is highly varied across platforms.
01840   constexpr char kDeathMsg[] = "it != end";
01841   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
01842 }
01843 
01844 TEST(RawHashSamplerTest, Sample) {
01845   // Enable the feature even if the prod default is off.
01846   SetHashtablezEnabled(true);
01847   SetHashtablezSampleParameter(100);
01848 
01849   auto& sampler = HashtablezSampler::Global();
01850   size_t start_size = 0;
01851   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
01852 
01853   std::vector<IntTable> tables;
01854   for (int i = 0; i < 1000000; ++i) {
01855     tables.emplace_back();
01856     tables.back().insert(1);
01857   }
01858   size_t end_size = 0;
01859   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
01860 
01861   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
01862               0.01, 0.005);
01863 }
01864 
01865 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
01866   // Enable the feature even if the prod default is off.
01867   SetHashtablezEnabled(true);
01868   SetHashtablezSampleParameter(100);
01869 
01870   auto& sampler = HashtablezSampler::Global();
01871   size_t start_size = 0;
01872   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
01873 
01874   std::vector<CustomAllocIntTable> tables;
01875   for (int i = 0; i < 1000000; ++i) {
01876     tables.emplace_back();
01877     tables.back().insert(1);
01878   }
01879   size_t end_size = 0;
01880   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
01881 
01882   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
01883               0.00, 0.001);
01884 }
01885 
01886 #ifdef ADDRESS_SANITIZER
01887 TEST(Sanitizer, PoisoningUnused) {
01888   IntTable t;
01889   t.reserve(5);
01890   // Insert something to force an allocation.
01891   int64_t& v1 = *t.insert(0).first;
01892 
01893   // Make sure there is something to test.
01894   ASSERT_GT(t.capacity(), 1);
01895 
01896   int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
01897   for (size_t i = 0; i < t.capacity(); ++i) {
01898     EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
01899   }
01900 }
01901 
01902 TEST(Sanitizer, PoisoningOnErase) {
01903   IntTable t;
01904   int64_t& v = *t.insert(0).first;
01905 
01906   EXPECT_FALSE(__asan_address_is_poisoned(&v));
01907   t.erase(0);
01908   EXPECT_TRUE(__asan_address_is_poisoned(&v));
01909 }
01910 #endif  // ADDRESS_SANITIZER
01911 
01912 }  // namespace
01913 }  // namespace container_internal
01914 }  // namespace absl


abseil_cpp
Author(s):
autogenerated on Wed Jun 19 2019 19:42:15