15 #include "absl/synchronization/mutex.h"
41 #include "absl/base/attributes.h"
42 #include "absl/base/call_once.h"
43 #include "absl/base/config.h"
44 #include "absl/base/dynamic_annotations.h"
45 #include "absl/base/internal/atomic_hook.h"
46 #include "absl/base/internal/cycleclock.h"
47 #include "absl/base/internal/hide_ptr.h"
48 #include "absl/base/internal/low_level_alloc.h"
49 #include "absl/base/internal/raw_logging.h"
50 #include "absl/base/internal/spinlock.h"
51 #include "absl/base/internal/sysinfo.h"
52 #include "absl/base/internal/thread_identity.h"
53 #include "absl/base/internal/tsan_mutex_interface.h"
54 #include "absl/base/port.h"
55 #include "absl/debugging/stacktrace.h"
56 #include "absl/debugging/symbolize.h"
57 #include "absl/synchronization/internal/graphcycles.h"
58 #include "absl/synchronization/internal/per_thread_sem.h"
59 #include "absl/time/time.h"
74 std::this_thread::yield();
83 #if defined(ABSL_HAVE_THREAD_SANITIZER)
109 bool locking,
bool trylock,
132 enum DelayMode { AGGRESSIVE, GENTLE };
136 int spinloop_iterations = 0;
137 int32_t mutex_sleep_limit[2] = {};
140 const MutexGlobals &GetMutexGlobals() {
143 const int num_cpus = absl::base_internal::NumCPUs();
144 data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
151 data.mutex_sleep_limit[AGGRESSIVE] = 5000;
152 data.mutex_sleep_limit[GENTLE] = 250;
154 data.mutex_sleep_limit[AGGRESSIVE] = 0;
155 data.mutex_sleep_limit[GENTLE] = 0;
162 namespace synchronization_internal {
166 const int32_t limit = GetMutexGlobals().mutex_sleep_limit[
mode];
171 SchedulingGuard::ScopedEnable enable_rescheduling;
197 v = pv->load(std::memory_order_relaxed);
199 ((
v & wait_until_clear) != 0 ||
200 !pv->compare_exchange_weak(
v,
v |
bits,
201 std::memory_order_release,
202 std::memory_order_relaxed)));
213 v = pv->load(std::memory_order_relaxed);
214 }
while ((
v &
bits) != 0 &&
215 ((
v & wait_until_clear) != 0 ||
216 !pv->compare_exchange_weak(
v,
v & ~
bits,
217 std::memory_order_release,
218 std::memory_order_relaxed)));
240 SYNCH_EV_TRYLOCK_SUCCESS,
241 SYNCH_EV_TRYLOCK_FAILED,
242 SYNCH_EV_READERTRYLOCK_SUCCESS,
243 SYNCH_EV_READERTRYLOCK_FAILED,
245 SYNCH_EV_LOCK_RETURNING,
247 SYNCH_EV_READERLOCK_RETURNING,
249 SYNCH_EV_READERUNLOCK,
253 SYNCH_EV_WAIT_RETURNING,
262 SYNCH_F_UNLOCK = 0x08,
264 SYNCH_F_LCK_W = SYNCH_F_LCK,
265 SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
270 static const struct {
274 {SYNCH_F_LCK_W | SYNCH_F_TRY,
"TryLock succeeded "},
275 {0,
"TryLock failed "},
276 {SYNCH_F_LCK_R | SYNCH_F_TRY,
"ReaderTryLock succeeded "},
277 {0,
"ReaderTryLock failed "},
278 {0,
"Lock blocking "},
279 {SYNCH_F_LCK_W,
"Lock returning "},
280 {0,
"ReaderLock blocking "},
281 {SYNCH_F_LCK_R,
"ReaderLock returning "},
282 {SYNCH_F_LCK_W | SYNCH_F_UNLOCK,
"Unlock "},
283 {SYNCH_F_LCK_R | SYNCH_F_UNLOCK,
"ReaderUnlock "},
285 {0,
"Wait unblocked "},
287 {0,
"SignalAll on "},
332 for (
e = synch_event[h];
337 if (
name ==
nullptr) {
340 size_t l = strlen(
name);
345 e->invariant =
nullptr;
348 strcpy(
e->name,
name);
349 e->next = synch_event[h];
361 base_internal::LowLevelAlloc::Free(
e);
368 bool del = (--(
e->refcount) == 0);
385 for (pe = &synch_event[h];
392 del = (--(
e->refcount) == 0);
408 for (
e = synch_event[h];
425 if (
e ==
nullptr ||
e->log) {
432 for (
int i = 0;
i !=
n;
i++) {
436 (
e ==
nullptr ?
"" :
e->name),
buffer);
439 if ((
flags & SYNCH_F_LCK) != 0 &&
e !=
nullptr &&
e->invariant !=
nullptr) {
454 const bool locking = (
flags & SYNCH_F_UNLOCK) == 0;
455 const bool trylock = (
flags & SYNCH_F_TRY) != 0;
456 const bool read_lock = (
flags & SYNCH_F_R) != 0;
482 std::atomic<intptr_t> *cv_word_arg)
488 cv_word(cv_word_arg),
489 contention_start_cycles(base_internal::CycleClock::
Now()) {}
532 ret->overflow =
false;
555 if (
s->all_locks ==
nullptr) {
578 static_cast<void>(w);
579 bool res = PerThreadSem::Wait(t);
592 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
595 if (identity !=
nullptr) {
600 std::memory_order_release);
611 gettimeofday(&tv,
nullptr);
673 "PerThreadSynch::kAlignment must be greater than kMuLow");
709 ~static_cast<intptr_t>(0),
720 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
722 return how ==
kShared ? __tsan_mutex_read_lock : 0;
736 this->ForgetDeadlockInfo();
741 void Mutex::EnableDebugLog(
const char *
name) {
751 void Mutex::EnableInvariantDebugging(
void (*
invariant)(
void *),
772 return x->waitp->how ==
y->waitp->how &&
x->priority ==
y->priority &&
773 Condition::GuaranteedEqual(
x->waitp->cond,
y->waitp->cond);
844 while ((x0 = x1, x1 = x2, x2 = x2->
skip) !=
nullptr) {
857 if (ancestor->
skip == to_be_removed) {
858 if (to_be_removed->
skip !=
nullptr) {
859 ancestor->
skip = to_be_removed->
skip;
860 }
else if (ancestor->
next != to_be_removed) {
863 ancestor->
skip =
nullptr;
893 if (waitp->
cv_word !=
nullptr) {
900 s->waitp ==
nullptr ||
902 s->suppress_fatal_errors,
903 "detected illegal recursion into Mutex code");
909 if (head ==
nullptr) {
912 s->maybe_unlocking =
false;
916 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
918 if (
s->next_priority_read_cycles < now_cycles) {
923 struct sched_param param;
924 const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
928 s->priority = param.sched_priority;
929 s->next_priority_read_cycles =
931 static_cast<int64_t>(base_internal::CycleClock::Frequency());
945 enqueue_after = advance_to;
947 advance_to =
Skip(enqueue_after->
next);
948 }
while (
s->priority <= advance_to->
priority);
952 Condition::GuaranteedEqual(waitp->
cond,
nullptr)) {
956 enqueue_after = head;
960 if (enqueue_after !=
nullptr) {
961 s->next = enqueue_after->
next;
962 enqueue_after->
next =
s;
973 "Mutex Enqueue failure");
975 if (enqueue_after != head && enqueue_after->
may_skip &&
978 enqueue_after->
skip = enqueue_after->
next;
986 s->next = head->
next;
997 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1009 head = (pw == w) ?
nullptr : pw;
1041 w->
next = *wake_tail;
1043 wake_tail = &w->
next;
1060 }
while (orig_h == head && (pw != head || !
skipped));
1067 SchedulingGuard::ScopedDisable disable_rescheduling;
1072 std::memory_order_acquire,
1073 std::memory_order_relaxed)) {
1078 if ((w = pw->
next) !=
s) {
1093 }
while ((w = pw->
next) !=
s && pw != h);
1100 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1105 v =
mu_.load(std::memory_order_relaxed);
1110 h->maybe_unlocking =
false;
1112 }
while (!
mu_.compare_exchange_weak(
v,
nv,
1113 std::memory_order_release,
1114 std::memory_order_relaxed));
1124 while (
s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1125 if (!DecrementSynchSem(
this,
s,
s->waitp->timeout)) {
1133 while (
s->next !=
nullptr) {
1142 s->waitp->timeout = KernelTimeout::Never();
1143 s->waitp->cond =
nullptr;
1147 "detected illegal recursion in Mutex code");
1155 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1156 IncrementSynchSem(
this, w);
1163 if (!deadlock_graph) {
1168 return deadlock_graph->
GetId(
mu);
1182 int n = held_locks->
n;
1184 while (
i !=
n && held_locks->
locks[
i].
id !=
id) {
1194 held_locks->
n =
n + 1;
1206 int n = held_locks->
n;
1208 while (
i !=
n && held_locks->
locks[
i].
id !=
id) {
1222 "thread releasing lock it does not hold: %p %s; "
1224 static_cast<void *
>(
mu),
1225 mu_events ==
nullptr ?
"" : mu_events->
name);
1229 held_locks->
n =
n - 1;
1244 OnDeadlockCycle::kIgnore) {
1254 OnDeadlockCycle::kIgnore) {
1264 OnDeadlockCycle::kIgnore) {
1272 static const int kSymLen = 200;
1275 for (
int i = 0;
i !=
n;
i++) {
1280 snprintf(
buf +
len, maxlen -
len,
"%s\t@ %p %s\n",
1281 (
i == 0 ?
"\n" :
""),
1298 enum { kMaxDeadlockPathLen = 10 };
1302 struct DeadlockReportBuffers {
1307 struct ScopedDeadlockReportBuffers {
1308 ScopedDeadlockReportBuffers() {
1309 b =
reinterpret_cast<DeadlockReportBuffers *
>(
1312 ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(
b); }
1313 DeadlockReportBuffers *
b;
1317 int GetStack(
void**
stack,
int max_depth) {
1326 OnDeadlockCycle::kIgnore) {
1335 if (all_locks->
n == 0) {
1347 deadlock_graph->UpdateStackTrace(mu_id, all_locks->
n + 1, GetStack);
1350 for (
int i = 0;
i != all_locks->
n;
i++) {
1352 const Mutex *other =
1353 static_cast<const Mutex *
>(deadlock_graph->Ptr(other_node_id));
1354 if (other ==
nullptr) {
1360 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1361 ScopedDeadlockReportBuffers scoped_buffers;
1362 DeadlockReportBuffers *
b = scoped_buffers.b;
1363 static int number_of_reported_deadlocks = 0;
1364 number_of_reported_deadlocks++;
1366 bool symbolize = number_of_reported_deadlocks <= 2;
1370 for (
int j = 0; j != all_locks->
n; j++) {
1371 void* pr = deadlock_graph->Ptr(all_locks->
locks[j].
id);
1372 if (pr !=
nullptr) {
1373 snprintf(
b->buf +
len, sizeof (
b->buf) -
len,
" %p", pr);
1374 len +=
static_cast<int>(strlen(&
b->buf[
len]));
1378 "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1379 "historical lock ordering graph has been observed",
1380 static_cast<void *
>(
mu),
b->buf);
1382 int path_len = deadlock_graph->FindPath(
1384 for (
int j = 0; j != path_len; j++) {
1386 Mutex *path_mu =
static_cast<Mutex *
>(deadlock_graph->Ptr(
id));
1387 if (path_mu ==
nullptr)
continue;
1389 int depth = deadlock_graph->GetStackTrace(
id, &
stack);
1390 snprintf(
b->buf,
sizeof(
b->buf),
1391 "mutex@%p stack: ",
static_cast<void *
>(path_mu));
1393 static_cast<int>(
sizeof(
b->buf) - strlen(
b->buf)),
1398 OnDeadlockCycle::kAbort) {
1414 OnDeadlockCycle::kIgnore) {
1421 void Mutex::ForgetDeadlockInfo() {
1423 OnDeadlockCycle::kIgnore) {
1425 if (deadlock_graph !=
nullptr) {
1426 deadlock_graph->RemoveNode(
this);
1432 void Mutex::AssertNotHeld()
const {
1438 OnDeadlockCycle::kIgnore) {
1441 for (
int i = 0;
i != locks->
n;
i++) {
1445 static_cast<const void *
>(
this),
1446 (mu_events ==
nullptr ?
"" : mu_events->
name));
1455 int c = GetMutexGlobals().spinloop_iterations;
1462 std::memory_order_acquire,
1463 std::memory_order_relaxed)) {
1477 std::memory_order_acquire,
1478 std::memory_order_relaxed)) {
1495 std::memory_order_acquire,
1496 std::memory_order_relaxed)) {
1497 this->LockSlow(
kShared,
nullptr, 0);
1551 this->AssertReaderHeld();
1555 "condition untrue on return from Await");
1566 this->AssertReaderHeld();
1572 bool res = this->AwaitCommon(
cond, t);
1574 "condition untrue on return from Await");
1579 this->AssertReaderHeld();
1587 if (!Condition::GuaranteedEqual(&
cond,
nullptr)) {
1590 this->UnlockSlow(&waitp);
1591 this->Block(waitp.
thread);
1594 this->LockSlowLoop(&waitp,
flags);
1595 bool res = waitp.
cond !=
nullptr ||
1606 std::memory_order_acquire,
1607 std::memory_order_relaxed)) {
1614 mu_.compare_exchange_strong(
1616 std::memory_order_acquire, std::memory_order_relaxed)) {
1626 this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1632 __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1640 std::memory_order_acquire,
1641 std::memory_order_relaxed)) {
1644 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1648 v =
mu_.load(std::memory_order_relaxed);
1654 std::memory_order_acquire,
1655 std::memory_order_relaxed)) {
1659 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1663 v =
mu_.load(std::memory_order_relaxed);
1670 __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1671 __tsan_mutex_try_lock_failed,
1683 static_cast<unsigned>(
v));
1703 static_cast<long long>(
v),
static_cast<long long>(
x),
1704 static_cast<long long>(
y));
1708 std::memory_order_release,
1709 std::memory_order_relaxed)) {
1712 this->UnlockSlow(
nullptr );
1725 return (
v & kMuMultipleWaitersMask) == 0;
1736 if (
mu_.compare_exchange_strong(
v,
v - clear,
1737 std::memory_order_release,
1738 std::memory_order_relaxed)) {
1743 this->UnlockSlow(
nullptr );
1754 return ~static_cast<intptr_t>(0);
1756 return ~static_cast<intptr_t>(
kMuDesig);
1768 return ~static_cast<intptr_t>(0);
1770 return ~static_cast<intptr_t>(
kMuWrWait);
1779 this->LockSlowWithDeadline(how,
cond, KernelTimeout::Never(),
flags),
1780 "condition untrue on return from LockSlow");
1785 bool locking,
bool trylock,
1792 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1793 const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1794 const int tryflags =
flags | (trylock ? __tsan_mutex_try_lock : 0);
1820 static_cast<void>(
mu);
1821 static_cast<void>(trylock);
1822 static_cast<void>(read_lock);
1843 bool res =
cond->Eval();
1846 static_cast<void>(
mu);
1862 bool unlock =
false;
1864 mu_.compare_exchange_strong(
1869 std::memory_order_acquire, std::memory_order_relaxed)) {
1870 if (
cond ==
nullptr ||
1879 if (!Condition::GuaranteedEqual(
cond,
nullptr)) {
1883 this->UnlockSlow(&waitp);
1884 this->Block(waitp.
thread);
1887 this->LockSlowLoop(&waitp,
flags);
1888 return waitp.
cond !=
nullptr ||
1896 #define RAW_CHECK_FMT(cond, ...) \
1898 if (ABSL_PREDICT_FALSE(!(cond))) { \
1899 ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1918 "%s: Mutex corrupt: both reader and writer lock held: %p",
1919 label,
reinterpret_cast<void *
>(
v));
1921 "%s: Mutex corrupt: waiting writer with no waiters: %p",
1922 label,
reinterpret_cast<void *
>(
v));
1927 SchedulingGuard::ScopedDisable disable_rescheduling;
1932 waitp->
how ==
kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1936 "detected illegal recursion into Mutex code");
1938 v =
mu_.load(std::memory_order_relaxed);
1941 if (
mu_.compare_exchange_strong(
1946 std::memory_order_acquire, std::memory_order_relaxed)) {
1947 if (waitp->
cond ==
nullptr ||
1952 this->UnlockSlow(waitp);
1953 this->Block(waitp->
thread);
1965 ABSL_RAW_CHECK(new_h !=
nullptr,
"Enqueue to empty list failed");
1969 if (
mu_.compare_exchange_strong(
1971 std::memory_order_release, std::memory_order_relaxed)) {
1981 if (
mu_.compare_exchange_strong(
1985 std::memory_order_acquire, std::memory_order_relaxed)) {
1989 v =
mu_.load(std::memory_order_relaxed);
1991 std::memory_order_release,
1992 std::memory_order_relaxed));
1993 if (waitp->
cond ==
nullptr ||
1998 this->UnlockSlow(waitp);
1999 this->Block(waitp->
thread);
2004 mu_.compare_exchange_strong(
2008 std::memory_order_acquire, std::memory_order_relaxed)) {
2017 v =
mu_.load(std::memory_order_relaxed);
2018 }
while (!
mu_.compare_exchange_weak(
2021 std::memory_order_release, std::memory_order_relaxed));
2025 this->Block(waitp->
thread);
2032 "detected illegal recursion into Mutex code");
2038 "detected illegal recursion into Mutex code");
2042 SYNCH_EV_READERLOCK_RETURNING);
2052 SchedulingGuard::ScopedDisable disable_rescheduling;
2054 this->AssertReaderHeld();
2058 (
v &
kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2075 "detected illegal recursion into Mutex code");
2080 v =
mu_.load(std::memory_order_relaxed);
2085 std::memory_order_release,
2086 std::memory_order_relaxed)) {
2092 if (
mu_.compare_exchange_strong(
v,
v - clear,
2093 std::memory_order_release,
2094 std::memory_order_relaxed)) {
2099 std::memory_order_acquire,
2100 std::memory_order_relaxed)) {
2103 bool do_enqueue =
true;
2105 "UnlockSlow is confused");
2107 v =
mu_.load(std::memory_order_relaxed);
2116 do_enqueue = (waitp->
cv_word ==
nullptr);
2124 if (new_h !=
nullptr) {
2134 }
while (!
mu_.compare_exchange_weak(
v,
nv,
2135 std::memory_order_release,
2136 std::memory_order_relaxed));
2147 if (waitp !=
nullptr) {
2150 "waiters disappeared during Enqueue()!");
2154 mu_.store(
nv, std::memory_order_release);
2162 "Mutex queue changed beneath us");
2165 if (old_h !=
nullptr &&
2174 Condition::GuaranteedEqual(h->next->waitp->cond,
nullptr)) {
2191 if (pw ==
nullptr) {
2203 h->maybe_unlocking =
false;
2204 if (waitp !=
nullptr) {
2207 if (new_h !=
nullptr) {
2214 mu_.store(
nv, std::memory_order_release);
2221 if (old_h !=
nullptr) {
2223 w_walk = old_h->
next;
2230 h->may_skip =
false;
2234 h->maybe_unlocking =
true;
2240 mu_.store(
v, std::memory_order_release);
2253 while (pw_walk != h) {
2254 w_walk->
wake =
false;
2262 w_walk->
wake =
true;
2270 w_walk->
wake =
true;
2280 pw_walk =
Skip(w_walk);
2287 w_walk = pw_walk->
next;
2309 if (waitp !=
nullptr) {
2316 "unexpected empty wake list");
2320 h->maybe_unlocking =
false;
2326 mu_.store(
nv, std::memory_order_release);
2343 wake_list = Wakeup(wake_list);
2345 if (wait_cycles > 0) {
2372 SchedulingGuard::ScopedDisable disable_rescheduling;
2375 "Mutex::Fer while waiting on Condition");
2377 "Mutex::Fer while in timed wait");
2379 "Mutex::Fer with pending CondVar queueing");
2390 if ((
v & conflicting) == 0) {
2392 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2393 IncrementSynchSem(
this, w);
2401 if (
mu_.compare_exchange_strong(
2403 std::memory_order_release, std::memory_order_relaxed)) {
2413 v =
mu_.load(std::memory_order_relaxed);
2414 }
while (!
mu_.compare_exchange_weak(
2418 std::memory_order_release, std::memory_order_relaxed));
2426 void Mutex::AssertHeld()
const {
2427 if ((
mu_.load(std::memory_order_relaxed) &
kMuWriter) == 0) {
2430 static_cast<const void *
>(
this),
2431 (
e ==
nullptr ?
"" :
e->name));
2435 void Mutex::AssertReaderHeld()
const {
2439 FATAL,
"thread should hold at least a read lock on Mutex %p %s",
2440 static_cast<const void *
>(
this), (
e ==
nullptr ?
"" :
e->name));
2454 "PerThreadSynch::kAlignment must be greater than kCvLow");
2456 void CondVar::EnableDebugLog(
const char *
name) {
2462 CondVar::~CondVar() {
2463 if ((
cv_.load(std::memory_order_relaxed) &
kCvEvent) != 0) {
2471 SchedulingGuard::ScopedDisable disable_rescheduling;
2474 for (
v =
cv_.load(std::memory_order_relaxed);;
2475 v =
cv_.load(std::memory_order_relaxed)) {
2478 std::memory_order_acquire,
2479 std::memory_order_relaxed)) {
2489 h = (w ==
s) ?
nullptr : w;
2492 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2497 std::memory_order_release);
2525 std::atomic<intptr_t> *cv_word = waitp->
cv_word;
2528 intptr_t v = cv_word->load(std::memory_order_relaxed);
2531 !cv_word->compare_exchange_weak(
v,
v |
kCvSpin,
2532 std::memory_order_acquire,
2533 std::memory_order_relaxed)) {
2535 v = cv_word->load(std::memory_order_relaxed);
2546 waitp->
thread->
state.store(PerThreadSynch::kQueued,
2547 std::memory_order_relaxed);
2549 std::memory_order_release);
2555 intptr_t mutex_v =
mutex->mu_.load(std::memory_order_relaxed);
2572 mutex->UnlockSlow(&waitp);
2575 while (waitp.
thread->
state.load(std::memory_order_acquire) ==
2576 PerThreadSynch::kQueued) {
2577 if (!Mutex::DecrementSynchSem(
mutex, waitp.
thread, t)) {
2594 t = KernelTimeout::Never();
2615 mutex->Trans(mutex_how);
2629 WaitCommon(
mu, KernelTimeout::Never());
2642 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2643 Mutex::IncrementSynchSem(
mu, w);
2649 void CondVar::Signal() {
2650 SchedulingGuard::ScopedDisable disable_rescheduling;
2654 for (
v =
cv_.load(std::memory_order_relaxed);
v != 0;
2655 v =
cv_.load(std::memory_order_relaxed)) {
2658 std::memory_order_acquire,
2659 std::memory_order_relaxed)) {
2672 std::memory_order_release);
2689 void CondVar::SignalAll () {
2693 for (
v =
cv_.load(std::memory_order_relaxed);
v != 0;
2694 v =
cv_.load(std::memory_order_relaxed)) {
2701 cv_.compare_exchange_strong(
v,
v &
kCvEvent, std::memory_order_acquire,
2702 std::memory_order_relaxed)) {
2727 void ReleasableMutexLock::Release() {
2729 "ReleasableMutexLock::Release may only be called once");
2730 this->
mu_->Unlock();
2731 this->
mu_ =
nullptr;
2734 #ifdef ABSL_HAVE_THREAD_SANITIZER
2737 #define __tsan_read1(addr) // do nothing if TSan not enabled
2746 return *(
static_cast<bool *
>(
arg));
2753 : eval_(&CallVoidPtrFunction),
2759 return (*c->function_)(c->arg_);
2763 : eval_(CallVoidPtrFunction),
2771 return (this->
eval_ ==
nullptr) || (*this->
eval_)(
this);
2776 return b ==
nullptr ||
b->eval_ ==
nullptr;
2778 if (
b ==
nullptr ||
b->eval_ ==
nullptr) {
2779 return a->eval_ ==
nullptr;
2781 return a->eval_ ==
b->eval_ &&
a->function_ ==
b->function_ &&
2782 a->arg_ ==
b->arg_ &&
a->method_ ==
b->method_;