21 #include "gmock/gmock.h" 22 #include "gtest/gtest.h" 32 #if SWISSTABLE_HAVE_SSE2 39 namespace container_internal {
43 return h.
info_ !=
nullptr;
50 using ::absl::synchronization_internal::ThreadPool;
52 using ::testing::UnorderedElementsAre;
55 std::vector<size_t> res;
57 res.push_back(info.
size.load(std::memory_order_acquire));
64 assert(info !=
nullptr);
65 info->size.store(size);
69 TEST(HashtablezInfoTest, PrepareForSampling) {
76 EXPECT_EQ(info.
size.load(), 0);
84 info.
capacity.store(1, std::memory_order_relaxed);
85 info.
size.store(1, std::memory_order_relaxed);
86 info.
num_erases.store(1, std::memory_order_relaxed);
95 EXPECT_EQ(info.
size.load(), 0);
104 TEST(HashtablezInfoTest, RecordStorageChanged) {
109 EXPECT_EQ(info.
size.load(), 17);
110 EXPECT_EQ(info.
capacity.load(), 47);
112 EXPECT_EQ(info.
size.load(), 20);
113 EXPECT_EQ(info.
capacity.load(), 20);
116 TEST(HashtablezInfoTest, RecordInsert) {
135 TEST(HashtablezInfoTest, RecordErase) {
140 EXPECT_EQ(info.
size.load(), 0);
142 EXPECT_EQ(info.
size.load(), 1);
144 EXPECT_EQ(info.
size.load(), 0);
148 TEST(HashtablezInfoTest, RecordRehash) {
156 EXPECT_EQ(info.
size.load(), 4);
161 EXPECT_EQ(info.
size.load(), 2);
166 EXPECT_EQ(info.
size.load(), 2);
171 TEST(HashtablezSamplerTest, SmallSampleParameter) {
175 for (
int i = 0;
i < 1000; ++
i) {
176 int64_t next_sample = 0;
178 EXPECT_GT(next_sample, 0);
179 EXPECT_NE(sample,
nullptr);
184 TEST(HashtablezSamplerTest, LargeSampleParameter) {
188 for (
int i = 0;
i < 1000; ++
i) {
189 int64_t next_sample = 0;
191 EXPECT_GT(next_sample, 0);
192 EXPECT_NE(sample,
nullptr);
200 int64_t num_sampled = 0;
203 for (
int i = 0;
i < 1000000; ++
i) {
209 sample_rate =
static_cast<double>(num_sampled) / total;
210 if (0.005 < sample_rate && sample_rate < 0.015)
break;
212 EXPECT_NEAR(sample_rate, 0.01, 0.005);
215 TEST(HashtablezSamplerTest, Handle) {
219 info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
244 TEST(HashtablezSamplerTest, Registration) {
246 auto* info1 = Register(&sampler, 1);
247 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
249 auto* info2 = Register(&sampler, 2);
250 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
251 info1->size.store(3);
252 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
258 TEST(HashtablezSamplerTest, Unregistration) {
260 std::vector<HashtablezInfo*> infos;
261 for (
size_t i = 0;
i < 3; ++
i) {
262 infos.push_back(Register(&sampler,
i));
264 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
267 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
269 infos.push_back(Register(&sampler, 3));
270 infos.push_back(Register(&sampler, 4));
271 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
273 EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
278 EXPECT_THAT(GetSizes(&sampler),
IsEmpty());
281 TEST(HashtablezSamplerTest, MultiThreaded) {
286 for (
int i = 0;
i < 10; ++
i) {
287 pool.Schedule([&sampler, &stop]() {
288 std::random_device rd;
289 std::mt19937 gen(rd());
291 std::vector<HashtablezInfo*> infoz;
294 infoz.push_back(sampler.
Register());
296 switch (std::uniform_int_distribution<>(0, 2)(gen)) {
298 infoz.push_back(sampler.
Register());
303 std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
305 infoz[p] = infoz.back();
328 TEST(HashtablezSamplerTest, Callback) {
331 auto* info1 = Register(&sampler, 1);
332 auto* info2 = Register(&sampler, 2);
339 EXPECT_EQ(&info, expected);
constexpr int kProbeLength
void SleepFor(absl::Duration duration)
void SetHashtablezSampleParameter(int32_t rate)
constexpr Duration Hours(int64_t n)
HashtablezInfo * SampleSlow(int64_t *next_sample)
void RecordInsertSlow(HashtablezInfo *info, size_t hash, size_t distance_from_desired)
bool HasBeenNotified() const
void SetHashtablezEnabled(bool enabled)
TEST(NotificationTest, SanityTest)
void RecordStorageChangedSlow(HashtablezInfo *info, size_t size, size_t capacity)
static bool IsSampled(const HashtablezInfoHandle &h)
void UnsampleSlow(HashtablezInfo *info)
void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu)
std::atomic< size_t > max_probe_length
void RecordEraseSlow(HashtablezInfo *info)
std::atomic< size_t > total_probe_length
HashtablezInfoHandle Sample()
std::atomic< size_t > hashes_bitwise_and
std::atomic< size_t > hashes_bitwise_or
void RecordRehashSlow(HashtablezInfo *info, size_t total_probe_length)
int64_t Iterate(const std::function< void(const HashtablezInfo &stack)> &f)
std::atomic< size_t > num_erases
static HashtablezInfo * GetInfo(HashtablezInfoHandle *h)
std::atomic< size_t > capacity
constexpr Duration Seconds(int64_t n)
static HashtablezSampler & Global()
DisposeCallback SetDisposeCallback(DisposeCallback f)
constexpr Duration ZeroDuration()
HashtablezInfo * Register()
void Unregister(HashtablezInfo *sample)
std::atomic< size_t > size