spinlock.h
Go to the documentation of this file.
1 //
2 // Copyright 2017 The Abseil Authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // https://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 // Most users requiring mutual exclusion should use Mutex.
18 // SpinLock is provided for use in three situations:
19 // - for use in code that Mutex itself depends on
20 // - to get a faster fast-path release under low contention (without an
21 // atomic read-modify-write) In return, SpinLock has worse behaviour under
22 // contention, which is why Mutex is preferred in most situations.
23 // - for async signal safety (see below)
24 
25 // SpinLock is async signal safe. If a spinlock is used within a signal
26 // handler, all code that acquires the lock must ensure that the signal cannot
27 // arrive while they are holding the lock. Typically, this is done by blocking
28 // the signal.
29 
30 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
31 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
32 
33 #include <stdint.h>
34 #include <sys/types.h>
35 
36 #include <atomic>
37 
38 #include "absl/base/attributes.h"
44 #include "absl/base/macros.h"
45 #include "absl/base/port.h"
47 
48 namespace absl {
49 namespace base_internal {
50 
52  public:
53  SpinLock() : lockword_(kSpinLockCooperative) {
54  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
55  }
56 
57  // Special constructor for use with static SpinLock objects. E.g.,
58  //
59  // static SpinLock lock(base_internal::kLinkerInitialized);
60  //
61  // When initialized using this constructor, we depend on the fact
62  // that the linker has already initialized the memory appropriately. The lock
63  // is initialized in non-cooperative mode.
64  //
65  // A SpinLock constructed like this can be freely used from global
66  // initializers without worrying about the order in which global
67  // initializers run.
69  // Does nothing; lockword_ is already initialized
70  ABSL_TSAN_MUTEX_CREATE(this, 0);
71  }
72 
73  // Constructors that allow non-cooperative spinlocks to be created for use
74  // inside thread schedulers. Normal clients should not use these.
78 
79  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
80 
81  // Acquire this SpinLock.
82  inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
83  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
84  if (!TryLockImpl()) {
85  SlowLock();
86  }
87  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
88  }
89 
90  // Try to acquire this SpinLock without blocking and return true if the
91  // acquisition was successful. If the lock was not acquired, false is
92  // returned. If this SpinLock is free at the time of the call, TryLock
93  // will return true with high probability.
94  inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
95  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
96  bool res = TryLockImpl();
98  this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
99  0);
100  return res;
101  }
102 
103  // Release this SpinLock, which must be held by the calling thread.
104  inline void Unlock() UNLOCK_FUNCTION() {
106  uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
107  lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
108  std::memory_order_release);
109 
110  if ((lock_value & kSpinLockDisabledScheduling) != 0) {
112  }
113  if ((lock_value & kWaitTimeMask) != 0) {
114  // Collect contentionz profile info, and speed the wakeup of any waiter.
115  // The wait_cycles value indicates how long this thread spent waiting
116  // for the lock.
117  SlowUnlock(lock_value);
118  }
120  }
121 
122  // Determine if the lock is held. When the lock is held by the invoking
123  // thread, true will always be returned. Intended to be used as
124  // CHECK(lock.IsHeld()).
125  inline bool IsHeld() const {
126  return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
127  }
128 
129  protected:
130  // These should not be exported except for testing.
131 
132  // Store number of cycles between wait_start_time and wait_end_time in a
133  // lock value.
134  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
135  int64_t wait_end_time);
136 
137  // Extract number of wait cycles in a lock value.
138  static uint64_t DecodeWaitCycles(uint32_t lock_value);
139 
140  // Provide access to protected method above. Use for testing only.
141  friend struct SpinLockTest;
142 
143  private:
144  // lockword_ is used to store the following:
145  //
146  // bit[0] encodes whether a lock is being held.
147  // bit[1] encodes whether a lock uses cooperative scheduling.
148  // bit[2] encodes whether a lock disables scheduling.
149  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
150  enum { kSpinLockHeld = 1 };
151  enum { kSpinLockCooperative = 2 };
152  enum { kSpinLockDisabledScheduling = 4 };
153  enum { kSpinLockSleeper = 8 };
154  enum { kWaitTimeMask = // Includes kSpinLockSleeper.
155  ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
156 
157  // Returns true if the provided scheduling mode is cooperative.
158  static constexpr bool IsCooperative(
159  base_internal::SchedulingMode scheduling_mode) {
160  return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
161  }
162 
163  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
164  void InitLinkerInitializedAndCooperative();
165  void SlowLock() ABSL_ATTRIBUTE_COLD;
166  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
167  uint32_t SpinLoop();
168 
169  inline bool TryLockImpl() {
170  uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
171  return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
172  }
173 
174  std::atomic<uint32_t> lockword_;
175 
176  SpinLock(const SpinLock&) = delete;
177  SpinLock& operator=(const SpinLock&) = delete;
178 };
179 
180 // Corresponding locker object that arranges to acquire a spinlock for
181 // the duration of a C++ scope.
183  public:
185  : lock_(l) {
186  l->Lock();
187  }
188  inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
189 
190  SpinLockHolder(const SpinLockHolder&) = delete;
191  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
192 
193  private:
195 };
196 
197 // Register a hook for profiling support.
198 //
199 // The function pointer registered here will be called whenever a spinlock is
200 // contended. The callback is given an opaque handle to the contended spinlock
201 // and the number of wait cycles. This is thread-safe, but only a single
202 // profiler can be registered. It is an error to call this function multiple
203 // times with different arguments.
204 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
205  int64_t wait_cycles));
206 
207 //------------------------------------------------------------------------------
208 // Public interface ends here.
209 //------------------------------------------------------------------------------
210 
211 // If (result & kSpinLockHeld) == 0, then *this was successfully locked.
212 // Otherwise, returns last observed value for lockword_.
213 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
214  uint32_t wait_cycles) {
215  if ((lock_value & kSpinLockHeld) != 0) {
216  return lock_value;
217  }
218 
219  uint32_t sched_disabled_bit = 0;
220  if ((lock_value & kSpinLockCooperative) == 0) {
221  // For non-cooperative locks we must make sure we mark ourselves as
222  // non-reschedulable before we attempt to CompareAndSwap.
224  sched_disabled_bit = kSpinLockDisabledScheduling;
225  }
226  }
227 
228  if (lockword_.compare_exchange_strong(
229  lock_value,
230  kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
231  std::memory_order_acquire, std::memory_order_relaxed)) {
232  } else {
234  }
235 
236  return lock_value;
237 }
238 
239 } // namespace base_internal
240 } // namespace absl
241 
242 #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_
#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
void Lock() EXCLUSIVE_LOCK_FUNCTION()
Definition: spinlock.h:82
#define EXCLUSIVE_LOCK_FUNCTION(...)
uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles)
Definition: spinlock.h:213
void RegisterSpinLockProfiler(void(*fn)(const void *contendedlock, int64_t wait_cycles))
Definition: spinlock.cc:63
#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
~SpinLockHolder() UNLOCK_FUNCTION()
Definition: spinlock.h:188
std::atomic< uint32_t > lockword_
Definition: spinlock.h:174
void Unlock() UNLOCK_FUNCTION()
Definition: spinlock.h:104
#define SCOPED_LOCKABLE
Definition: algorithm.h:29
#define LOCKABLE
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true)
Definition: spinlock.h:94
SpinLockHolder(SpinLock *l) EXCLUSIVE_LOCK_FUNCTION(l)
Definition: spinlock.h:184
#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
#define UNLOCK_FUNCTION(...)
SpinLock(base_internal::LinkerInitialized)
Definition: spinlock.h:68
#define ABSL_TSAN_MUTEX_DESTROY(...)
#define ABSL_TSAN_MUTEX_POST_LOCK(...)
#define ABSL_TSAN_MUTEX_CREATE(...)
static constexpr bool IsCooperative(base_internal::SchedulingMode scheduling_mode)
Definition: spinlock.h:158
#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
static void EnableRescheduling(bool disable_result)
#define ABSL_ATTRIBUTE_COLD
Definition: attributes.h:471


abseil_cpp
Author(s):
autogenerated on Tue Jun 18 2019 19:44:37