20 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING 30 namespace synchronization_internal {
60 thread_identity_freelist = identity;
67 return (addr + align - 1) & ~(align - 1);
80 pts->
state.store(base_internal::PerThreadSynch::State::kAvailable,
81 std::memory_order_relaxed);
88 identity->
ticker.store(0, std::memory_order_relaxed);
89 identity->
wait_start.store(0, std::memory_order_relaxed);
90 identity->
is_idle.store(
false, std::memory_order_relaxed);
91 identity->
next =
nullptr;
100 if (thread_identity_freelist) {
102 thread_identity_freelist = thread_identity_freelist->
next;
106 if (identity ==
nullptr) {
114 RoundUp(reinterpret_cast<intptr_t>(allocation),
136 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING
static void ResetThreadIdentity(base_internal::ThreadIdentity *identity)
static base_internal::SpinLock freelist_lock(base_internal::kLinkerInitialized)
static void Init(base_internal::ThreadIdentity *identity)
bool suppress_fatal_errors
std::atomic< bool > is_idle
static void * Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook)
int64_t next_priority_read_cycles
std::atomic< int > * blocked_count_ptr
CONSTEXPR_F fields align(second_tag, fields f) noexcept
static base_internal::ThreadIdentity * thread_identity_freelist
PerThreadSynch per_thread_synch
static intptr_t RoundUp(intptr_t addr, intptr_t align)
struct absl::base_internal::ThreadIdentity::WaiterState waiter_state
std::atomic< int > wait_start
base_internal::ThreadIdentity * CreateThreadIdentity()
static base_internal::ThreadIdentity * NewThreadIdentity()
void SetCurrentThreadIdentity(ThreadIdentity *identity, ThreadIdentityReclaimerFunction reclaimer)
void ClearCurrentThreadIdentity()
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook)
static constexpr int kAlignment
static void ReclaimThreadIdentity(void *v)
std::atomic< int > ticker
std::atomic< State > state
SynchLocksHeld * all_locks