00001 // Copyright 2017 The Abseil Authors. 00002 // 00003 // Licensed under the Apache License, Version 2.0 (the "License"); 00004 // you may not use this file except in compliance with the License. 00005 // You may obtain a copy of the License at 00006 // 00007 // https://www.apache.org/licenses/LICENSE-2.0 00008 // 00009 // Unless required by applicable law or agreed to in writing, software 00010 // distributed under the License is distributed on an "AS IS" BASIS, 00011 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00012 // See the License for the specific language governing permissions and 00013 // limitations under the License. 00014 00015 #include <stdint.h> 00016 #include <new> 00017 00018 // This file is a no-op if the required LowLevelAlloc support is missing. 00019 #include "absl/base/internal/low_level_alloc.h" 00020 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING 00021 00022 #include <string.h> 00023 00024 #include "absl/base/attributes.h" 00025 #include "absl/base/internal/spinlock.h" 00026 #include "absl/base/internal/thread_identity.h" 00027 #include "absl/synchronization/internal/per_thread_sem.h" 00028 00029 namespace absl { 00030 namespace synchronization_internal { 00031 00032 // ThreadIdentity storage is persistent, we maintain a free-list of previously 00033 // released ThreadIdentity objects. 00034 static base_internal::SpinLock freelist_lock(base_internal::kLinkerInitialized); 00035 static base_internal::ThreadIdentity* thread_identity_freelist; 00036 00037 // A per-thread destructor for reclaiming associated ThreadIdentity objects. 00038 // Since we must preserve their storage we cache them for re-use. 00039 static void ReclaimThreadIdentity(void* v) { 00040 base_internal::ThreadIdentity* identity = 00041 static_cast<base_internal::ThreadIdentity*>(v); 00042 00043 // all_locks might have been allocated by the Mutex implementation. 00044 // We free it here when we are notified that our thread is dying. 00045 if (identity->per_thread_synch.all_locks != nullptr) { 00046 base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks); 00047 } 00048 00049 // We must explicitly clear the current thread's identity: 00050 // (a) Subsequent (unrelated) per-thread destructors may require an identity. 00051 // We must guarantee a new identity is used in this case (this instructor 00052 // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case). 00053 // (b) ThreadIdentity implementations may depend on memory that is not 00054 // reinitialized before reuse. We must allow explicit clearing of the 00055 // association state in this case. 00056 base_internal::ClearCurrentThreadIdentity(); 00057 { 00058 base_internal::SpinLockHolder l(&freelist_lock); 00059 identity->next = thread_identity_freelist; 00060 thread_identity_freelist = identity; 00061 } 00062 } 00063 00064 // Return value rounded up to next multiple of align. 00065 // Align must be a power of two. 00066 static intptr_t RoundUp(intptr_t addr, intptr_t align) { 00067 return (addr + align - 1) & ~(align - 1); 00068 } 00069 00070 static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) { 00071 base_internal::PerThreadSynch* pts = &identity->per_thread_synch; 00072 pts->next = nullptr; 00073 pts->skip = nullptr; 00074 pts->may_skip = false; 00075 pts->waitp = nullptr; 00076 pts->suppress_fatal_errors = false; 00077 pts->readers = 0; 00078 pts->priority = 0; 00079 pts->next_priority_read_cycles = 0; 00080 pts->state.store(base_internal::PerThreadSynch::State::kAvailable, 00081 std::memory_order_relaxed); 00082 pts->maybe_unlocking = false; 00083 pts->wake = false; 00084 pts->cond_waiter = false; 00085 pts->all_locks = nullptr; 00086 identity->waiter_state = {}; 00087 identity->blocked_count_ptr = nullptr; 00088 identity->ticker.store(0, std::memory_order_relaxed); 00089 identity->wait_start.store(0, std::memory_order_relaxed); 00090 identity->is_idle.store(false, std::memory_order_relaxed); 00091 identity->next = nullptr; 00092 } 00093 00094 static base_internal::ThreadIdentity* NewThreadIdentity() { 00095 base_internal::ThreadIdentity* identity = nullptr; 00096 00097 { 00098 // Re-use a previously released object if possible. 00099 base_internal::SpinLockHolder l(&freelist_lock); 00100 if (thread_identity_freelist) { 00101 identity = thread_identity_freelist; // Take list-head. 00102 thread_identity_freelist = thread_identity_freelist->next; 00103 } 00104 } 00105 00106 if (identity == nullptr) { 00107 // Allocate enough space to align ThreadIdentity to a multiple of 00108 // PerThreadSynch::kAlignment. This space is never released (it is 00109 // added to a freelist by ReclaimThreadIdentity instead). 00110 void* allocation = base_internal::LowLevelAlloc::Alloc( 00111 sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1); 00112 // Round up the address to the required alignment. 00113 identity = reinterpret_cast<base_internal::ThreadIdentity*>( 00114 RoundUp(reinterpret_cast<intptr_t>(allocation), 00115 base_internal::PerThreadSynch::kAlignment)); 00116 } 00117 ResetThreadIdentity(identity); 00118 00119 return identity; 00120 } 00121 00122 // Allocates and attaches ThreadIdentity object for the calling thread. Returns 00123 // the new identity. 00124 // REQUIRES: CurrentThreadIdentity(false) == nullptr 00125 base_internal::ThreadIdentity* CreateThreadIdentity() { 00126 base_internal::ThreadIdentity* identity = NewThreadIdentity(); 00127 PerThreadSem::Init(identity); 00128 // Associate the value with the current thread, and attach our destructor. 00129 base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity); 00130 return identity; 00131 } 00132 00133 } // namespace synchronization_internal 00134 } // namespace absl 00135 00136 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING