Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
00031 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
00032
00033 #include <stdint.h>
00034 #include <sys/types.h>
00035
00036 #include <atomic>
00037
00038 #include "absl/base/attributes.h"
00039 #include "absl/base/dynamic_annotations.h"
00040 #include "absl/base/internal/low_level_scheduling.h"
00041 #include "absl/base/internal/raw_logging.h"
00042 #include "absl/base/internal/scheduling_mode.h"
00043 #include "absl/base/internal/tsan_mutex_interface.h"
00044 #include "absl/base/macros.h"
00045 #include "absl/base/port.h"
00046 #include "absl/base/thread_annotations.h"
00047
00048 namespace absl {
00049 namespace base_internal {
00050
00051 class LOCKABLE SpinLock {
00052 public:
00053 SpinLock() : lockword_(kSpinLockCooperative) {
00054 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
00055 }
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068 explicit SpinLock(base_internal::LinkerInitialized) {
00069
00070 ABSL_TSAN_MUTEX_CREATE(this, 0);
00071 }
00072
00073
00074
00075 explicit SpinLock(base_internal::SchedulingMode mode);
00076 SpinLock(base_internal::LinkerInitialized,
00077 base_internal::SchedulingMode mode);
00078
00079 ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
00080
00081
00082 inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
00083 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
00084 if (!TryLockImpl()) {
00085 SlowLock();
00086 }
00087 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
00088 }
00089
00090
00091
00092
00093
00094 inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
00095 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
00096 bool res = TryLockImpl();
00097 ABSL_TSAN_MUTEX_POST_LOCK(
00098 this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
00099 0);
00100 return res;
00101 }
00102
00103
00104 inline void Unlock() UNLOCK_FUNCTION() {
00105 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
00106 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
00107 lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
00108 std::memory_order_release);
00109
00110 if ((lock_value & kSpinLockDisabledScheduling) != 0) {
00111 base_internal::SchedulingGuard::EnableRescheduling(true);
00112 }
00113 if ((lock_value & kWaitTimeMask) != 0) {
00114
00115
00116
00117 SlowUnlock(lock_value);
00118 }
00119 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
00120 }
00121
00122
00123
00124
00125 inline bool IsHeld() const {
00126 return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
00127 }
00128
00129 protected:
00130
00131
00132
00133
00134 static uint32_t EncodeWaitCycles(int64_t wait_start_time,
00135 int64_t wait_end_time);
00136
00137
00138 static uint64_t DecodeWaitCycles(uint32_t lock_value);
00139
00140
00141 friend struct SpinLockTest;
00142
00143 private:
00144
00145
00146
00147
00148
00149
00150 enum { kSpinLockHeld = 1 };
00151 enum { kSpinLockCooperative = 2 };
00152 enum { kSpinLockDisabledScheduling = 4 };
00153 enum { kSpinLockSleeper = 8 };
00154 enum { kWaitTimeMask =
00155 ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
00156
00157
00158 static constexpr bool IsCooperative(
00159 base_internal::SchedulingMode scheduling_mode) {
00160 return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
00161 }
00162
00163 uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
00164 void InitLinkerInitializedAndCooperative();
00165 void SlowLock() ABSL_ATTRIBUTE_COLD;
00166 void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
00167 uint32_t SpinLoop();
00168
00169 inline bool TryLockImpl() {
00170 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
00171 return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
00172 }
00173
00174 std::atomic<uint32_t> lockword_;
00175
00176 SpinLock(const SpinLock&) = delete;
00177 SpinLock& operator=(const SpinLock&) = delete;
00178 };
00179
00180
00181
00182 class SCOPED_LOCKABLE SpinLockHolder {
00183 public:
00184 inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
00185 : lock_(l) {
00186 l->Lock();
00187 }
00188 inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
00189
00190 SpinLockHolder(const SpinLockHolder&) = delete;
00191 SpinLockHolder& operator=(const SpinLockHolder&) = delete;
00192
00193 private:
00194 SpinLock* lock_;
00195 };
00196
00197
00198
00199
00200
00201
00202
00203
00204 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
00205 int64_t wait_cycles));
00206
00207
00208
00209
00210
00211
00212
00213 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
00214 uint32_t wait_cycles) {
00215 if ((lock_value & kSpinLockHeld) != 0) {
00216 return lock_value;
00217 }
00218
00219 uint32_t sched_disabled_bit = 0;
00220 if ((lock_value & kSpinLockCooperative) == 0) {
00221
00222
00223 if (base_internal::SchedulingGuard::DisableRescheduling()) {
00224 sched_disabled_bit = kSpinLockDisabledScheduling;
00225 }
00226 }
00227
00228 if (lockword_.compare_exchange_strong(
00229 lock_value,
00230 kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
00231 std::memory_order_acquire, std::memory_order_relaxed)) {
00232 } else {
00233 base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
00234 }
00235
00236 return lock_value;
00237 }
00238
00239 }
00240 }
00241
00242 #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_