57 namespace base_internal {
59 ABSL_CONST_INIT static base_internal::AtomicHook<void (*)(
const void *lock,
64 int64_t wait_cycles)) {
70 : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
109 int c = adaptive_spin_count;
112 lock_value =
lockword_.load(std::memory_order_relaxed);
128 uint32_t wait_cycles = 0;
129 int lock_wait_call_count = 0;
130 while ((lock_value & kSpinLockHeld) != 0) {
139 std::memory_order_relaxed, std::memory_order_relaxed)) {
144 }
else if ((lock_value & kSpinLockHeld) == 0) {
199 int64_t wait_end_time) {
200 static const int64_t kMaxWaitTime =
202 int64_t scaled_wait_time =
207 uint32_t clamped =
static_cast<uint32_t
>(
214 const uint32_t kMinWaitTime =
224 const uint64_t scaled_wait_time =
226 return scaled_wait_time
void Lock() EXCLUSIVE_LOCK_FUNCTION()
#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles)
void RegisterSpinLockProfiler(void(*fn)(const void *contendedlock, int64_t wait_cycles))
void SpinLockDelay(std::atomic< uint32_t > *w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode)
std::atomic< uint32_t > lockword_
static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time)
void Unlock() UNLOCK_FUNCTION()
void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD
void SpinLockWake(std::atomic< uint32_t > *w, bool all)
void LowLevelCallOnce(absl::once_flag *flag, Callable &&fn, Args &&...args)
#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
static uint64_t DecodeWaitCycles(uint32_t lock_value)
static ABSL_CONST_INIT base_internal::AtomicHook< void(*)(const void *lock, int64_t wait_cycles)> submit_profile_data
#define ABSL_TSAN_MUTEX_CREATE(...)
void SlowLock() ABSL_ATTRIBUTE_COLD
static constexpr bool IsCooperative(base_internal::SchedulingMode scheduling_mode)
void InitLinkerInitializedAndCooperative()