spinlock_test_common.cc
Go to the documentation of this file.
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // A bunch of threads repeatedly hash an array of ints protected by a
16 // spinlock. If the spinlock is working properly, all elements of the
17 // array should be equal at the end of the test.
18 
19 #include <cstdint>
20 #include <limits>
21 #include <random>
22 #include <thread> // NOLINT(build/c++11)
23 #include <vector>
24 
25 #include "gtest/gtest.h"
26 #include "absl/base/attributes.h"
31 #include "absl/base/macros.h"
34 
35 constexpr int32_t kNumThreads = 10;
36 constexpr int32_t kIters = 1000;
37 
38 namespace absl {
39 namespace base_internal {
40 
41 // This is defined outside of anonymous namespace so that it can be
42 // a friend of SpinLock to access protected methods for testing.
43 struct SpinLockTest {
44  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
45  int64_t wait_end_time) {
46  return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
47  }
48  static uint64_t DecodeWaitCycles(uint32_t lock_value) {
49  return SpinLock::DecodeWaitCycles(lock_value);
50  }
51 };
52 
53 namespace {
54 
55 static constexpr int kArrayLength = 10;
56 static uint32_t values[kArrayLength];
57 
58 static SpinLock static_spinlock(base_internal::kLinkerInitialized);
59 static SpinLock static_cooperative_spinlock(
62 static SpinLock static_noncooperative_spinlock(
64 
65 
66 // Simple integer hash function based on the public domain lookup2 hash.
67 // http://burtleburtle.net/bob/c/lookup2.c
68 static uint32_t Hash32(uint32_t a, uint32_t c) {
69  uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value.
70  a -= b; a -= c; a ^= (c >> 13);
71  b -= c; b -= a; b ^= (a << 8);
72  c -= a; c -= b; c ^= (b >> 13);
73  a -= b; a -= c; a ^= (c >> 12);
74  b -= c; b -= a; b ^= (a << 16);
75  c -= a; c -= b; c ^= (b >> 5);
76  a -= b; a -= c; a ^= (c >> 3);
77  b -= c; b -= a; b ^= (a << 10);
78  c -= a; c -= b; c ^= (b >> 15);
79  return c;
80 }
81 
82 static void TestFunction(int thread_salt, SpinLock* spinlock) {
83  for (int i = 0; i < kIters; i++) {
84  SpinLockHolder h(spinlock);
85  for (int j = 0; j < kArrayLength; j++) {
86  const int index = (j + thread_salt) % kArrayLength;
87  values[index] = Hash32(values[index], thread_salt);
88  std::this_thread::yield();
89  }
90  }
91 }
92 
93 static void ThreadedTest(SpinLock* spinlock) {
94  std::vector<std::thread> threads;
95  for (int i = 0; i < kNumThreads; ++i) {
96  threads.push_back(std::thread(TestFunction, i, spinlock));
97  }
98  for (auto& thread : threads) {
99  thread.join();
100  }
101 
102  SpinLockHolder h(spinlock);
103  for (int i = 1; i < kArrayLength; i++) {
104  EXPECT_EQ(values[0], values[i]);
105  }
106 }
107 
108 TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
110  spinlock.Lock();
112  spinlock.Unlock();
113 }
114 
115 TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
116  static_noncooperative_spinlock.Lock();
118  static_noncooperative_spinlock.Unlock();
119 }
120 
121 TEST(SpinLock, WaitCyclesEncoding) {
122  // These are implementation details not exported by SpinLock.
123  const int kProfileTimestampShift = 7;
124  const int kLockwordReservedShift = 3;
125  const uint32_t kSpinLockSleeper = 8;
126 
127  // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
128  // but the lower kProfileTimestampShift will be dropped.
129  const int kMaxCyclesShift =
130  32 - kLockwordReservedShift + kProfileTimestampShift;
131  const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
132 
133  // These bits should be zero after encoding.
134  const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
135 
136  // These bits are dropped when wait cycles are encoded.
137  const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
138 
139  // Test a bunch of random values
140  std::default_random_engine generator;
141  // Shift to avoid overflow below.
142  std::uniform_int_distribution<uint64_t> time_distribution(
143  0, std::numeric_limits<uint64_t>::max() >> 4);
144  std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
145 
146  for (int i = 0; i < 100; i++) {
147  int64_t start_time = time_distribution(generator);
148  int64_t cycles = cycle_distribution(generator);
149  int64_t end_time = start_time + cycles;
150  uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
151  EXPECT_EQ(0, lock_value & kLockwordReservedMask);
152  uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
153  EXPECT_EQ(0, decoded & kProfileTimestampMask);
154  EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
155  }
156 
157  // Test corner cases
158  int64_t start_time = time_distribution(generator);
159  EXPECT_EQ(kSpinLockSleeper,
160  SpinLockTest::EncodeWaitCycles(start_time, start_time));
161  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
162  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
163  EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
164  SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
165 
166  // Check that we cannot produce kSpinLockSleeper during encoding.
167  int64_t sleeper_cycles =
168  kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
169  uint32_t sleeper_value =
170  SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
171  EXPECT_NE(sleeper_value, kSpinLockSleeper);
172 
173  // Test clamping
174  uint32_t max_value =
175  SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
176  uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
177  uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
178  EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
179 
180  const int64_t step = (1 << kProfileTimestampShift);
181  uint32_t after_max_value =
182  SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
183  uint64_t after_max_value_decoded =
184  SpinLockTest::DecodeWaitCycles(after_max_value);
185  EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
186 
187  uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
188  start_time, start_time + kMaxCycles - step);
189  uint64_t before_max_value_decoded =
190  SpinLockTest::DecodeWaitCycles(before_max_value);
191  EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
192 }
193 
194 TEST(SpinLockWithThreads, StaticSpinLock) {
195  ThreadedTest(&static_spinlock);
196 }
197 TEST(SpinLockWithThreads, StackSpinLock) {
198  SpinLock spinlock;
199  ThreadedTest(&spinlock);
200 }
201 
202 TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
204  ThreadedTest(&spinlock);
205 }
206 
207 TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
209  ThreadedTest(&spinlock);
210 }
211 
212 TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
213  ThreadedTest(&static_cooperative_spinlock);
214 }
215 
216 TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
217  ThreadedTest(&static_noncooperative_spinlock);
218 }
219 
220 TEST(SpinLockWithThreads, DoesNotDeadlock) {
221  struct Helper {
222  static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
223  BlockingCounter* b) {
224  locked->WaitForNotification(); // Wait for LockThenWait() to hold "s".
225  b->DecrementCount();
226  SpinLockHolder l(spinlock);
227  }
228 
229  static void LockThenWait(Notification* locked, SpinLock* spinlock,
230  BlockingCounter* b) {
231  SpinLockHolder l(spinlock);
232  locked->Notify();
233  b->Wait();
234  }
235 
236  static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
237  Notification locked;
238  BlockingCounter counter(num_spinners);
239  std::vector<std::thread> threads;
240 
241  threads.push_back(
242  std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
243  for (int i = 0; i < num_spinners; ++i) {
244  threads.push_back(
245  std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
246  }
247 
248  for (auto& thread : threads) {
249  thread.join();
250  }
251  }
252  };
253 
254  SpinLock stack_cooperative_spinlock(
256  SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
257  Helper::DeadlockTest(&stack_cooperative_spinlock,
258  base_internal::NumCPUs() * 2);
259  Helper::DeadlockTest(&stack_noncooperative_spinlock,
260  base_internal::NumCPUs() * 2);
261  Helper::DeadlockTest(&static_cooperative_spinlock,
262  base_internal::NumCPUs() * 2);
263  Helper::DeadlockTest(&static_noncooperative_spinlock,
264  base_internal::NumCPUs() * 2);
265 }
266 
267 } // namespace
268 } // namespace base_internal
269 } // namespace absl
void Lock() EXCLUSIVE_LOCK_FUNCTION()
Definition: spinlock.h:82
TEST(NotificationTest, SanityTest)
static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time)
Definition: spinlock.cc:198
void Unlock() UNLOCK_FUNCTION()
Definition: spinlock.h:104
CONSTEXPR_F fields step(second_tag, fields f, diff_t n) noexcept
static uint64_t DecodeWaitCycles(uint32_t lock_value)
int * counter
Definition: algorithm.h:29
void WaitForNotification() const
Definition: notification.cc:56
static uint64_t DecodeWaitCycles(uint32_t lock_value)
Definition: spinlock.cc:222
static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time)
constexpr int32_t kNumThreads
uint64_t b
Definition: layout_test.cc:50
constexpr int32_t kIters


abseil_cpp
Author(s):
autogenerated on Tue Jun 18 2019 19:44:37