15 #include "absl/synchronization/mutex.h"
41 #include "absl/base/attributes.h"
42 #include "absl/base/call_once.h"
43 #include "absl/base/config.h"
44 #include "absl/base/dynamic_annotations.h"
45 #include "absl/base/internal/atomic_hook.h"
46 #include "absl/base/internal/cycleclock.h"
47 #include "absl/base/internal/hide_ptr.h"
48 #include "absl/base/internal/low_level_alloc.h"
49 #include "absl/base/internal/raw_logging.h"
50 #include "absl/base/internal/spinlock.h"
51 #include "absl/base/internal/sysinfo.h"
52 #include "absl/base/internal/thread_identity.h"
53 #include "absl/base/internal/tsan_mutex_interface.h"
54 #include "absl/base/port.h"
55 #include "absl/debugging/stacktrace.h"
56 #include "absl/debugging/symbolize.h"
57 #include "absl/synchronization/internal/graphcycles.h"
58 #include "absl/synchronization/internal/per_thread_sem.h"
59 #include "absl/time/time.h"
74 std::this_thread::yield();
83 #if defined(ABSL_HAVE_THREAD_SANITIZER)
109 bool locking,
bool trylock,
132 enum DelayMode { AGGRESSIVE, GENTLE };
136 int spinloop_iterations = 0;
137 int32_t mutex_sleep_limit[2] = {};
140 const MutexGlobals &GetMutexGlobals() {
143 const int num_cpus = absl::base_internal::NumCPUs();
144 data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
151 data.mutex_sleep_limit[AGGRESSIVE] = 5000;
152 data.mutex_sleep_limit[GENTLE] = 250;
154 data.mutex_sleep_limit[AGGRESSIVE] = 0;
155 data.mutex_sleep_limit[GENTLE] = 0;
162 namespace synchronization_internal {
166 const int32_t limit = GetMutexGlobals().mutex_sleep_limit[
mode];
171 SchedulingGuard::ScopedEnable enable_rescheduling;
197 v = pv->load(std::memory_order_relaxed);
199 ((
v & wait_until_clear) != 0 ||
200 !pv->compare_exchange_weak(
v,
v |
bits,
201 std::memory_order_release,
202 std::memory_order_relaxed)));
213 v = pv->load(std::memory_order_relaxed);
214 }
while ((
v &
bits) != 0 &&
215 ((
v & wait_until_clear) != 0 ||
216 !pv->compare_exchange_weak(
v,
v & ~
bits,
217 std::memory_order_release,
218 std::memory_order_relaxed)));
240 SYNCH_EV_TRYLOCK_SUCCESS,
241 SYNCH_EV_TRYLOCK_FAILED,
242 SYNCH_EV_READERTRYLOCK_SUCCESS,
243 SYNCH_EV_READERTRYLOCK_FAILED,
245 SYNCH_EV_LOCK_RETURNING,
247 SYNCH_EV_READERLOCK_RETURNING,
249 SYNCH_EV_READERUNLOCK,
253 SYNCH_EV_WAIT_RETURNING,
262 SYNCH_F_UNLOCK = 0x08,
264 SYNCH_F_LCK_W = SYNCH_F_LCK,
265 SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
270 static const struct {
274 {SYNCH_F_LCK_W | SYNCH_F_TRY,
"TryLock succeeded "},
275 {0,
"TryLock failed "},
276 {SYNCH_F_LCK_R | SYNCH_F_TRY,
"ReaderTryLock succeeded "},
277 {0,
"ReaderTryLock failed "},
278 {0,
"Lock blocking "},
279 {SYNCH_F_LCK_W,
"Lock returning "},
280 {0,
"ReaderLock blocking "},
281 {SYNCH_F_LCK_R,
"ReaderLock returning "},
282 {SYNCH_F_LCK_W | SYNCH_F_UNLOCK,
"Unlock "},
283 {SYNCH_F_LCK_R | SYNCH_F_UNLOCK,
"ReaderUnlock "},
285 {0,
"Wait unblocked "},
287 {0,
"SignalAll on "},
332 for (
e = synch_event[h];
337 if (
name ==
nullptr) {
340 size_t l = strlen(
name);
345 e->invariant =
nullptr;
348 strcpy(
e->name,
name);
349 e->next = synch_event[h];
361 base_internal::LowLevelAlloc::Free(
e);
368 bool del = (--(
e->refcount) == 0);
385 for (pe = &synch_event[h];
392 del = (--(
e->refcount) == 0);
408 for (
e = synch_event[h];
425 if (
e ==
nullptr ||
e->log) {
432 for (
int i = 0;
i !=
n;
i++) {
436 (
e ==
nullptr ?
"" :
e->name),
buffer);
439 if ((
flags & SYNCH_F_LCK) != 0 &&
e !=
nullptr &&
e->invariant !=
nullptr) {
454 const bool locking = (
flags & SYNCH_F_UNLOCK) == 0;
455 const bool trylock = (
flags & SYNCH_F_TRY) != 0;
456 const bool read_lock = (
flags & SYNCH_F_R) != 0;
478 struct SynchWaitParams {
482 std::atomic<intptr_t> *cv_word_arg)
488 cv_word(cv_word_arg),
489 contention_start_cycles(base_internal::CycleClock::
Now()) {}
503 std::atomic<intptr_t> *cv_word;
505 int64_t contention_start_cycles;
509 struct SynchLocksHeld {
532 ret->overflow =
false;
555 if (
s->all_locks ==
nullptr) {
578 static_cast<void>(w);
579 bool res = PerThreadSem::Wait(t);
592 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
595 if (identity !=
nullptr) {
600 std::memory_order_release);
611 gettimeofday(&tv,
nullptr);
673 "PerThreadSynch::kAlignment must be greater than kMuLow");
709 ~static_cast<intptr_t>(0),
720 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
722 return how ==
kShared ? __tsan_mutex_read_lock : 0;
736 this->ForgetDeadlockInfo();
741 void Mutex::EnableDebugLog(
const char *
name) {
751 void Mutex::EnableInvariantDebugging(
void (*
invariant)(
void *),
772 return x->waitp->how ==
y->waitp->how &&
x->priority ==
y->priority &&
773 Condition::GuaranteedEqual(
x->waitp->cond,
y->waitp->cond);
844 while ((x0 = x1, x1 = x2, x2 = x2->
skip) !=
nullptr) {
857 if (ancestor->
skip == to_be_removed) {
858 if (to_be_removed->
skip !=
nullptr) {
859 ancestor->
skip = to_be_removed->
skip;
860 }
else if (ancestor->
next != to_be_removed) {
863 ancestor->
skip =
nullptr;
893 if (waitp->cv_word !=
nullptr) {
900 s->waitp ==
nullptr ||
902 s->suppress_fatal_errors,
903 "detected illegal recursion into Mutex code");
909 if (head ==
nullptr) {
912 s->maybe_unlocking =
false;
916 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
918 if (
s->next_priority_read_cycles < now_cycles) {
923 struct sched_param param;
924 const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
928 s->priority = param.sched_priority;
929 s->next_priority_read_cycles =
931 static_cast<int64_t>(base_internal::CycleClock::Frequency());
945 enqueue_after = advance_to;
947 advance_to =
Skip(enqueue_after->
next);
948 }
while (
s->priority <= advance_to->
priority);
952 Condition::GuaranteedEqual(waitp->cond,
nullptr)) {
956 enqueue_after = head;
960 if (enqueue_after !=
nullptr) {
961 s->next = enqueue_after->
next;
962 enqueue_after->
next =
s;
973 "Mutex Enqueue failure");
975 if (enqueue_after != head && enqueue_after->
may_skip &&
978 enqueue_after->
skip = enqueue_after->
next;
986 s->next = head->
next;
997 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1009 head = (pw == w) ?
nullptr : pw;
1041 w->
next = *wake_tail;
1043 wake_tail = &w->
next;
1060 }
while (orig_h == head && (pw != head || !
skipped));
1067 SchedulingGuard::ScopedDisable disable_rescheduling;
1072 std::memory_order_acquire,
1073 std::memory_order_relaxed)) {
1078 if ((w = pw->
next) != s) {
1093 }
while ((w = pw->
next) != s && pw != h);
1100 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1105 v =
mu_.load(std::memory_order_relaxed);
1110 h->maybe_unlocking =
false;
1112 }
while (!
mu_.compare_exchange_weak(
v,
nv,
1113 std::memory_order_release,
1114 std::memory_order_relaxed));
1124 while (
s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1125 if (!DecrementSynchSem(
this, s,
s->waitp->timeout)) {
1133 while (
s->next !=
nullptr) {
1142 s->waitp->timeout = KernelTimeout::Never();
1143 s->waitp->cond =
nullptr;
1147 "detected illegal recursion in Mutex code");
1155 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1156 IncrementSynchSem(
this, w);
1163 if (!deadlock_graph) {
1168 return deadlock_graph->
GetId(
mu);
1182 int n = held_locks->n;
1184 while (
i !=
n && held_locks->locks[
i].id !=
id) {
1189 held_locks->overflow =
true;
1191 held_locks->locks[
i].mu =
mu;
1192 held_locks->locks[
i].count = 1;
1193 held_locks->locks[
i].id =
id;
1194 held_locks->n =
n + 1;
1197 held_locks->locks[
i].count++;
1206 int n = held_locks->n;
1208 while (
i !=
n && held_locks->locks[
i].id !=
id) {
1212 if (!held_locks->overflow) {
1216 while (
i !=
n && held_locks->locks[
i].mu !=
mu) {
1222 "thread releasing lock it does not hold: %p %s; "
1224 static_cast<void *
>(
mu),
1225 mu_events ==
nullptr ?
"" : mu_events->
name);
1228 }
else if (held_locks->locks[
i].count == 1) {
1229 held_locks->n =
n - 1;
1230 held_locks->locks[
i] = held_locks->locks[
n - 1];
1232 held_locks->locks[
n - 1].mu =
1235 assert(held_locks->locks[
i].count > 0);
1236 held_locks->locks[
i].count--;
1244 OnDeadlockCycle::kIgnore) {
1254 OnDeadlockCycle::kIgnore) {
1264 OnDeadlockCycle::kIgnore) {
1272 static const int kSymLen = 200;
1275 for (
int i = 0;
i !=
n;
i++) {
1280 snprintf(
buf +
len, maxlen -
len,
"%s\t@ %p %s\n",
1281 (
i == 0 ?
"\n" :
""),
1298 enum { kMaxDeadlockPathLen = 10 };
1302 struct DeadlockReportBuffers {
1307 struct ScopedDeadlockReportBuffers {
1308 ScopedDeadlockReportBuffers() {
1309 b =
reinterpret_cast<DeadlockReportBuffers *
>(
1312 ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(
b); }
1313 DeadlockReportBuffers *
b;
1317 int GetStack(
void**
stack,
int max_depth) {
1326 OnDeadlockCycle::kIgnore) {
1335 if (all_locks->n == 0) {
1347 deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1350 for (
int i = 0;
i != all_locks->n;
i++) {
1351 const GraphId other_node_id = all_locks->locks[
i].id;
1352 const Mutex *other =
1353 static_cast<const Mutex *
>(deadlock_graph->Ptr(other_node_id));
1354 if (other ==
nullptr) {
1360 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1361 ScopedDeadlockReportBuffers scoped_buffers;
1362 DeadlockReportBuffers *
b = scoped_buffers.b;
1363 static int number_of_reported_deadlocks = 0;
1364 number_of_reported_deadlocks++;
1366 bool symbolize = number_of_reported_deadlocks <= 2;
1370 for (
int j = 0; j != all_locks->n; j++) {
1371 void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1372 if (pr !=
nullptr) {
1373 snprintf(
b->buf +
len, sizeof (
b->buf) -
len,
" %p", pr);
1374 len +=
static_cast<int>(strlen(&
b->buf[
len]));
1378 "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1379 "historical lock ordering graph has been observed",
1380 static_cast<void *
>(
mu),
b->buf);
1382 int path_len = deadlock_graph->FindPath(
1384 for (
int j = 0; j != path_len; j++) {
1386 Mutex *path_mu =
static_cast<Mutex *
>(deadlock_graph->Ptr(
id));
1387 if (path_mu ==
nullptr)
continue;
1389 int depth = deadlock_graph->GetStackTrace(
id, &
stack);
1390 snprintf(
b->buf,
sizeof(
b->buf),
1391 "mutex@%p stack: ",
static_cast<void *
>(path_mu));
1393 static_cast<int>(
sizeof(
b->buf) - strlen(
b->buf)),
1398 OnDeadlockCycle::kAbort) {
1414 OnDeadlockCycle::kIgnore) {
1421 void Mutex::ForgetDeadlockInfo() {
1423 OnDeadlockCycle::kIgnore) {
1425 if (deadlock_graph !=
nullptr) {
1426 deadlock_graph->RemoveNode(
this);
1432 void Mutex::AssertNotHeld()
const {
1438 OnDeadlockCycle::kIgnore) {
1441 for (
int i = 0;
i != locks->n;
i++) {
1442 if (locks->locks[
i].id ==
id) {
1445 static_cast<const void *
>(
this),
1446 (mu_events ==
nullptr ?
"" : mu_events->name));
1455 int c = GetMutexGlobals().spinloop_iterations;
1462 std::memory_order_acquire,
1463 std::memory_order_relaxed)) {
1477 std::memory_order_acquire,
1478 std::memory_order_relaxed)) {
1495 std::memory_order_acquire,
1496 std::memory_order_relaxed)) {
1497 this->LockSlow(
kShared,
nullptr, 0);
1533 bool Mutex::ReaderLockWhenWithTimeout(
const Condition &
cond,
1538 bool Mutex::ReaderLockWhenWithDeadline(
const Condition &
cond,
1551 this->AssertReaderHeld();
1555 "condition untrue on return from Await");
1566 this->AssertReaderHeld();
1572 bool res = this->AwaitCommon(
cond, t);
1574 "condition untrue on return from Await");
1579 this->AssertReaderHeld();
1583 SynchWaitParams waitp(
1587 if (!Condition::GuaranteedEqual(&
cond,
nullptr)) {
1590 this->UnlockSlow(&waitp);
1591 this->Block(waitp.thread);
1594 this->LockSlowLoop(&waitp,
flags);
1595 bool res = waitp.cond !=
nullptr ||
1606 std::memory_order_acquire,
1607 std::memory_order_relaxed)) {
1614 mu_.compare_exchange_strong(
1616 std::memory_order_acquire, std::memory_order_relaxed)) {
1626 this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1632 __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1640 std::memory_order_acquire,
1641 std::memory_order_relaxed)) {
1644 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1648 v =
mu_.load(std::memory_order_relaxed);
1654 std::memory_order_acquire,
1655 std::memory_order_relaxed)) {
1659 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1663 v =
mu_.load(std::memory_order_relaxed);
1670 __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1671 __tsan_mutex_try_lock_failed,
1683 static_cast<unsigned>(
v));
1703 static_cast<long long>(
v),
static_cast<long long>(
x),
1704 static_cast<long long>(
y));
1708 std::memory_order_release,
1709 std::memory_order_relaxed)) {
1712 this->UnlockSlow(
nullptr );
1725 return (
v & kMuMultipleWaitersMask) == 0;
1736 if (
mu_.compare_exchange_strong(
v,
v - clear,
1737 std::memory_order_release,
1738 std::memory_order_relaxed)) {
1743 this->UnlockSlow(
nullptr );
1751 ~static_cast<intptr_t>(0),
1752 ~static_cast<intptr_t>(
1760 ~static_cast<intptr_t>(0),
1761 ~static_cast<intptr_t>(
1769 this->LockSlowWithDeadline(how,
cond, KernelTimeout::Never(),
flags),
1770 "condition untrue on return from LockSlow");
1775 bool locking,
bool trylock,
1782 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1783 const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1784 const int tryflags =
flags | (trylock ? __tsan_mutex_try_lock : 0);
1810 static_cast<void>(
mu);
1811 static_cast<void>(trylock);
1812 static_cast<void>(read_lock);
1833 bool res =
cond->Eval();
1836 static_cast<void>(
mu);
1849 bool Mutex::LockSlowWithDeadline(MuHow how,
const Condition *
cond,
1852 bool unlock =
false;
1853 if ((
v & how->fast_need_zero) == 0 &&
1854 mu_.compare_exchange_strong(
1857 std::memory_order_acquire, std::memory_order_relaxed)) {
1858 if (
cond ==
nullptr ||
1864 SynchWaitParams waitp(
1867 if (!Condition::GuaranteedEqual(
cond,
nullptr)) {
1871 this->UnlockSlow(&waitp);
1872 this->Block(waitp.thread);
1875 this->LockSlowLoop(&waitp,
flags);
1876 return waitp.cond !=
nullptr ||
1884 #define RAW_CHECK_FMT(cond, ...) \
1886 if (ABSL_PREDICT_FALSE(!(cond))) { \
1887 ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1906 "%s: Mutex corrupt: both reader and writer lock held: %p",
1907 label,
reinterpret_cast<void *
>(
v));
1909 "%s: Mutex corrupt: waiting writer with no waiters: %p",
1910 label,
reinterpret_cast<void *
>(
v));
1914 void Mutex::LockSlowLoop(SynchWaitParams *waitp,
int flags) {
1915 SchedulingGuard::ScopedDisable disable_rescheduling;
1920 waitp->how ==
kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1923 waitp->thread->waitp ==
nullptr || waitp->thread->suppress_fatal_errors,
1924 "detected illegal recursion into Mutex code");
1926 v =
mu_.load(std::memory_order_relaxed);
1928 if ((
v & waitp->how->slow_need_zero) == 0) {
1929 if (
mu_.compare_exchange_strong(
1930 v, (waitp->how->fast_or |
1932 waitp->how->fast_add,
1933 std::memory_order_acquire, std::memory_order_relaxed)) {
1934 if (waitp->cond ==
nullptr ||
1939 this->UnlockSlow(waitp);
1940 this->Block(waitp->thread);
1951 ABSL_RAW_CHECK(new_h !=
nullptr,
"Enqueue to empty list failed");
1955 if (
mu_.compare_exchange_strong(
1957 std::memory_order_release, std::memory_order_relaxed)) {
1961 waitp->thread->waitp =
nullptr;
1963 }
else if ((
v & waitp->how->slow_inc_need_zero &
1967 if (
mu_.compare_exchange_strong(
1970 std::memory_order_acquire, std::memory_order_relaxed)) {
1974 v =
mu_.load(std::memory_order_relaxed);
1976 std::memory_order_release,
1977 std::memory_order_relaxed));
1978 if (waitp->cond ==
nullptr ||
1983 this->UnlockSlow(waitp);
1984 this->Block(waitp->thread);
1989 mu_.compare_exchange_strong(
1992 std::memory_order_acquire, std::memory_order_relaxed)) {
2001 v =
mu_.load(std::memory_order_relaxed);
2002 }
while (!
mu_.compare_exchange_weak(
2005 std::memory_order_release, std::memory_order_relaxed));
2009 this->Block(waitp->thread);
2015 waitp->thread->waitp ==
nullptr || waitp->thread->suppress_fatal_errors,
2016 "detected illegal recursion into Mutex code");
2021 waitp->thread->waitp ==
nullptr || waitp->thread->suppress_fatal_errors,
2022 "detected illegal recursion into Mutex code");
2025 waitp->how ==
kExclusive? SYNCH_EV_LOCK_RETURNING :
2026 SYNCH_EV_READERLOCK_RETURNING);
2036 SchedulingGuard::ScopedDisable disable_rescheduling;
2038 this->AssertReaderHeld();
2042 (
v &
kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2057 ABSL_RAW_CHECK(waitp ==
nullptr || waitp->thread->waitp ==
nullptr ||
2058 waitp->thread->suppress_fatal_errors,
2059 "detected illegal recursion into Mutex code");
2064 v =
mu_.load(std::memory_order_relaxed);
2069 std::memory_order_release,
2070 std::memory_order_relaxed)) {
2076 if (
mu_.compare_exchange_strong(
v,
v - clear,
2077 std::memory_order_release,
2078 std::memory_order_relaxed)) {
2083 std::memory_order_acquire,
2084 std::memory_order_relaxed)) {
2087 bool do_enqueue =
true;
2089 "UnlockSlow is confused");
2091 v =
mu_.load(std::memory_order_relaxed);
2100 do_enqueue = (waitp->cv_word ==
nullptr);
2108 if (new_h !=
nullptr) {
2118 }
while (!
mu_.compare_exchange_weak(
v,
nv,
2119 std::memory_order_release,
2120 std::memory_order_relaxed));
2131 if (waitp !=
nullptr) {
2134 "waiters disappeared during Enqueue()!");
2138 mu_.store(
nv, std::memory_order_release);
2146 "Mutex queue changed beneath us");
2149 if (old_h !=
nullptr &&
2158 Condition::GuaranteedEqual(
h->next->waitp->cond,
nullptr)) {
2175 if (pw ==
nullptr) {
2187 h->maybe_unlocking =
false;
2188 if (waitp !=
nullptr) {
2191 if (new_h !=
nullptr) {
2198 mu_.store(
nv, std::memory_order_release);
2205 if (old_h !=
nullptr) {
2207 w_walk = old_h->
next;
2214 h->may_skip =
false;
2218 h->maybe_unlocking =
true;
2224 mu_.store(
v, std::memory_order_release);
2237 while (pw_walk != h) {
2238 w_walk->
wake =
false;
2246 w_walk->
wake =
true;
2254 w_walk->
wake =
true;
2264 pw_walk =
Skip(w_walk);
2271 w_walk = pw_walk->
next;
2293 if (waitp !=
nullptr) {
2300 "unexpected empty wake list");
2304 h->maybe_unlocking =
false;
2310 mu_.store(
nv, std::memory_order_release);
2321 wake_list = Wakeup(wake_list);
2345 void Mutex::Trans(MuHow how) {
2354 SchedulingGuard::ScopedDisable disable_rescheduling;
2357 "Mutex::Fer while waiting on Condition");
2359 "Mutex::Fer while in timed wait");
2361 "Mutex::Fer with pending CondVar queueing");
2372 if ((
v & conflicting) == 0) {
2374 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2375 IncrementSynchSem(
this, w);
2383 if (
mu_.compare_exchange_strong(
2385 std::memory_order_release, std::memory_order_relaxed)) {
2395 v =
mu_.load(std::memory_order_relaxed);
2396 }
while (!
mu_.compare_exchange_weak(
2400 std::memory_order_release, std::memory_order_relaxed));
2408 void Mutex::AssertHeld()
const {
2409 if ((
mu_.load(std::memory_order_relaxed) &
kMuWriter) == 0) {
2412 static_cast<const void *
>(
this),
2413 (e ==
nullptr ?
"" :
e->name));
2417 void Mutex::AssertReaderHeld()
const {
2421 FATAL,
"thread should hold at least a read lock on Mutex %p %s",
2422 static_cast<const void *
>(
this), (e ==
nullptr ?
"" :
e->name));
2436 "PerThreadSynch::kAlignment must be greater than kCvLow");
2438 void CondVar::EnableDebugLog(
const char *
name) {
2444 CondVar::~CondVar() {
2445 if ((
cv_.load(std::memory_order_relaxed) &
kCvEvent) != 0) {
2453 SchedulingGuard::ScopedDisable disable_rescheduling;
2456 for (
v =
cv_.load(std::memory_order_relaxed);;
2457 v =
cv_.load(std::memory_order_relaxed)) {
2460 std::memory_order_acquire,
2461 std::memory_order_relaxed)) {
2465 while (w->
next != s && w->
next != h) {
2471 h = (w ==
s) ?
nullptr : w;
2474 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2479 std::memory_order_release);
2507 std::atomic<intptr_t> *cv_word = waitp->cv_word;
2508 waitp->cv_word =
nullptr;
2510 intptr_t v = cv_word->load(std::memory_order_relaxed);
2513 !cv_word->compare_exchange_weak(
v,
v |
kCvSpin,
2514 std::memory_order_acquire,
2515 std::memory_order_relaxed)) {
2517 v = cv_word->load(std::memory_order_relaxed);
2519 ABSL_RAW_CHECK(waitp->thread->waitp ==
nullptr,
"waiting when shouldn't be");
2520 waitp->thread->waitp = waitp;
2523 waitp->thread->next = waitp->thread;
2525 waitp->thread->next = h->next;
2526 h->next = waitp->thread;
2528 waitp->thread->state.store(PerThreadSynch::kQueued,
2529 std::memory_order_relaxed);
2531 std::memory_order_release);
2537 intptr_t mutex_v =
mutex->mu_.load(std::memory_order_relaxed);
2549 SynchWaitParams waitp(mutex_how,
nullptr, t,
mutex,
2554 mutex->UnlockSlow(&waitp);
2557 while (waitp.thread->state.load(std::memory_order_acquire) ==
2558 PerThreadSynch::kQueued) {
2559 if (!Mutex::DecrementSynchSem(
mutex, waitp.thread, t)) {
2560 this->
Remove(waitp.thread);
2565 ABSL_RAW_CHECK(waitp.thread->waitp !=
nullptr,
"not waiting when should be");
2566 waitp.thread->waitp =
nullptr;
2580 mutex->Trans(mutex_how);
2593 void CondVar::Wait(
Mutex *
mu) {
2594 WaitCommon(
mu, KernelTimeout::Never());
2607 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2608 Mutex::IncrementSynchSem(
mu, w);
2614 void CondVar::Signal() {
2615 SchedulingGuard::ScopedDisable disable_rescheduling;
2619 for (
v =
cv_.load(std::memory_order_relaxed);
v != 0;
2620 v =
cv_.load(std::memory_order_relaxed)) {
2623 std::memory_order_acquire,
2624 std::memory_order_relaxed)) {
2637 std::memory_order_release);
2654 void CondVar::SignalAll () {
2658 for (
v =
cv_.load(std::memory_order_relaxed);
v != 0;
2659 v =
cv_.load(std::memory_order_relaxed)) {
2666 cv_.compare_exchange_strong(
v,
v &
kCvEvent, std::memory_order_acquire,
2667 std::memory_order_relaxed)) {
2692 void ReleasableMutexLock::Release() {
2694 "ReleasableMutexLock::Release may only be called once");
2695 this->
mu_->Unlock();
2696 this->
mu_ =
nullptr;
2699 #ifdef ABSL_HAVE_THREAD_SANITIZER
2702 #define __tsan_read1(addr) // do nothing if TSan not enabled
2711 return *(
static_cast<bool *
>(
arg));
2718 : eval_(&CallVoidPtrFunction),
2723 bool Condition::CallVoidPtrFunction(
const Condition *
c) {
2724 return (*
c->function_)(
c->arg_);
2728 : eval_(CallVoidPtrFunction),
2734 bool Condition::Eval()
const {
2736 return (this->eval_ ==
nullptr) || (*this->eval_)(
this);
2741 return b ==
nullptr ||
b->eval_ ==
nullptr;
2743 if (
b ==
nullptr ||
b->eval_ ==
nullptr) {
2744 return a->eval_ ==
nullptr;
2746 return a->eval_ ==
b->eval_ &&
a->function_ ==
b->function_ &&
2747 a->arg_ ==
b->arg_ &&
a->method_ ==
b->method_;