77 #if defined(THREAD_SANITIZER) 84 kDeadlockDetectionDefault);
92 static struct MutexGlobals {
107 "MutexGlobals must occupy an entire cacheline to prevent false sharing");
112 void (*)(
const char *
msg,
const void *obj, int64_t wait_cycles)> mutex_tracer;
114 void (*)(
const char *
msg,
const void *cv)> cond_var_tracer;
122 bool locking,
bool trylock,
126 submit_profile_data.Store(fn);
130 int64_t wait_cycles)) {
131 mutex_tracer.Store(fn);
135 cond_var_tracer.Store(fn);
139 symbolizer.Store(fn);
146 static int Delay(int32_t c, DelayMode mode) {
152 int32_t limit = (mutex_globals.num_cpus > 1) ?
153 ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
176 intptr_t wait_until_clear) {
179 v = pv->load(std::memory_order_relaxed);
180 }
while ((v & bits) != bits &&
181 ((v & wait_until_clear) != 0 ||
182 !pv->compare_exchange_weak(v, v | bits,
183 std::memory_order_release,
184 std::memory_order_relaxed)));
192 intptr_t wait_until_clear) {
195 v = pv->load(std::memory_order_relaxed);
196 }
while ((v & bits) != 0 &&
197 ((v & wait_until_clear) != 0 ||
198 !pv->compare_exchange_weak(v, v & ~bits,
199 std::memory_order_release,
200 std::memory_order_relaxed)));
222 SYNCH_EV_TRYLOCK_SUCCESS,
223 SYNCH_EV_TRYLOCK_FAILED,
224 SYNCH_EV_READERTRYLOCK_SUCCESS,
225 SYNCH_EV_READERTRYLOCK_FAILED,
227 SYNCH_EV_LOCK_RETURNING,
229 SYNCH_EV_READERLOCK_RETURNING,
231 SYNCH_EV_READERUNLOCK,
235 SYNCH_EV_WAIT_RETURNING,
244 SYNCH_F_UNLOCK = 0x08,
246 SYNCH_F_LCK_W = SYNCH_F_LCK,
247 SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
252 static const struct {
256 {SYNCH_F_LCK_W | SYNCH_F_TRY,
"TryLock succeeded "},
257 {0,
"TryLock failed "},
258 {SYNCH_F_LCK_R | SYNCH_F_TRY,
"ReaderTryLock succeeded "},
259 {0,
"ReaderTryLock failed "},
260 {0,
"Lock blocking "},
261 {SYNCH_F_LCK_W,
"Lock returning "},
262 {0,
"ReaderLock blocking "},
263 {SYNCH_F_LCK_R,
"ReaderLock returning "},
264 {SYNCH_F_LCK_W | SYNCH_F_UNLOCK,
"Unlock "},
265 {SYNCH_F_LCK_R | SYNCH_F_UNLOCK,
"ReaderUnlock "},
267 {0,
"Wait unblocked "},
269 {0,
"SignalAll on "},
309 const char *
name, intptr_t bits,
311 uint32_t h =
reinterpret_cast<intptr_t
>(addr) % kNSynchEvent;
314 synch_event_mu.
Lock();
315 for (e = synch_event[h];
320 if (name ==
nullptr) {
323 size_t l = strlen(name);
331 strcpy(e->
name, name);
332 e->next = synch_event[h];
350 synch_event_mu.
Lock();
351 bool del = (--(e->refcount) == 0);
364 uint32_t h =
reinterpret_cast<intptr_t
>(addr) % kNSynchEvent;
367 synch_event_mu.
Lock();
368 for (pe = &synch_event[h];
375 del = (--(e->refcount) == 0);
388 uint32_t h =
reinterpret_cast<intptr_t
>(addr) % kNSynchEvent;
390 synch_event_mu.
Lock();
391 for (e = synch_event[h];
408 if (e ==
nullptr || e->
log) {
414 int pos = snprintf(buffer,
sizeof (buffer),
" @");
415 for (
int i = 0;
i !=
n;
i++) {
416 pos += snprintf(&buffer[pos],
sizeof (buffer) - pos,
" %p", pcs[
i]);
419 (e ==
nullptr ?
"" : e->
name), buffer);
422 if ((flags & SYNCH_F_LCK) != 0 && e !=
nullptr && e->
invariant !=
nullptr) {
437 const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
438 const bool trylock = (flags & SYNCH_F_TRY) != 0;
439 const bool read_lock = (flags & SYNCH_F_R) != 0;
465 std::atomic<intptr_t> *cv_word_arg)
468 timeout(timeout_arg),
471 cv_word(cv_word_arg),
472 contention_start_cycles(base_internal::CycleClock::
Now()) {}
561 static_cast<void>(w);
562 bool res = PerThreadSem::Wait(t);
578 if (identity !=
nullptr) {
583 std::memory_order_release);
594 gettimeofday(&tv,
nullptr);
655 static_assert(PerThreadSynch::kAlignment > kMuLow,
656 "PerThreadSynch::kAlignment must be greater than kMuLow");
692 ~static_cast<intptr_t>(0),
700 static constexpr
bool kDebugMode =
true;
703 #ifdef THREAD_SANITIZER 705 return how == kShared ? __tsan_mutex_read_lock : 0;
714 intptr_t
v = mu_.load(std::memory_order_relaxed);
719 this->ForgetDeadlockInfo();
731 synch_check_invariants.store(enabled, std::memory_order_release);
736 if (synch_check_invariants.load(std::memory_order_acquire) &&
746 synch_deadlock_detection.store(mode, std::memory_order_release);
824 while ((x0 = x1, x1 = x2, x2 = x2->
skip) !=
nullptr) {
837 if (ancestor->
skip == to_be_removed) {
838 if (to_be_removed->
skip !=
nullptr) {
839 ancestor->
skip = to_be_removed->
skip;
840 }
else if (ancestor->
next != to_be_removed) {
843 ancestor->
skip =
nullptr;
873 if (waitp->
cv_word !=
nullptr) {
880 s->
waitp ==
nullptr ||
883 "detected illegal recursion into Mutex code");
889 if (head ==
nullptr) {
896 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM 903 struct sched_param param;
904 const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
906 ABSL_RAW_LOG(ERROR,
"pthread_getschedparam failed: %d", err);
924 enqueue_after = advance_to;
925 cur = enqueue_after->
next;
926 advance_to =
Skip(cur);
938 }
else if (waitp->
how == kExclusive &&
943 enqueue_after = head;
947 if (enqueue_after !=
nullptr) {
949 enqueue_after->
next = s;
960 "Mutex Enqueue failure");
962 if (enqueue_after != head && enqueue_after->
may_skip &&
965 enqueue_after->
skip = enqueue_after->
next;
984 s->
state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
996 head = (pw == w) ?
nullptr : pw;
1020 bool skipped =
false;
1028 w->
next = *wake_tail;
1030 wake_tail = &w->
next;
1047 }
while (orig_h == head && (pw != head || !skipped));
1054 intptr_t
v = mu_.load(std::memory_order_relaxed);
1056 if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1057 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1058 std::memory_order_acquire,
1059 std::memory_order_relaxed)) {
1064 if ((w = pw->
next) != s) {
1077 }
while ((w = pw->
next) != s && pw != h);
1084 s->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1089 v = mu_.load(std::memory_order_relaxed);
1092 nv |= kMuWait |
reinterpret_cast<intptr_t
>(h);
1096 }
while (!mu_.compare_exchange_weak(v, nv,
1097 std::memory_order_release,
1098 std::memory_order_relaxed));
1108 while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1109 if (!DecrementSynchSem(
this, s, s->waitp->timeout)) {
1117 while (s->next !=
nullptr) {
1118 c =
Delay(c, GENTLE);
1126 s->waitp->timeout = KernelTimeout::Never();
1127 s->waitp->cond =
nullptr;
1131 "detected illegal recursion in Mutex code");
1139 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1140 IncrementSynchSem(
this, w);
1147 if (!deadlock_graph) {
1152 return deadlock_graph->
GetId(mu);
1156 deadlock_graph_mu.
Lock();
1158 deadlock_graph_mu.
Unlock();
1166 int n = held_locks->
n;
1168 while (i != n && held_locks->
locks[i].
id !=
id) {
1178 held_locks->
n = n + 1;
1190 int n = held_locks->
n;
1192 while (i != n && held_locks->
locks[i].
id !=
id) {
1200 while (i != n && held_locks->
locks[i].
mu != mu) {
1206 "thread releasing lock it does not hold: %p %s; " 1208 static_cast<void *>(mu),
1209 mu_events ==
nullptr ?
"" : mu_events->
name);
1212 }
else if (held_locks->
locks[i].
count == 1) {
1213 held_locks->
n = n - 1;
1227 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1237 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1247 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1256 static const int kSymLen = 200;
1259 for (
int i = 0;
i !=
n;
i++) {
1261 if (!symbolizer(pcs[
i], sym, kSymLen)) {
1264 snprintf(buf + len, maxlen - len,
"%s\t@ %p %s\n",
1265 (i == 0 ?
"\n" :
""),
1268 snprintf(buf + len, maxlen - len,
" %p", pcs[
i]);
1270 len += strlen(&buf[len]);
1282 enum { kMaxDeadlockPathLen = 10 };
1286 struct DeadlockReportBuffers {
1291 struct ScopedDeadlockReportBuffers {
1292 ScopedDeadlockReportBuffers() {
1293 b =
reinterpret_cast<DeadlockReportBuffers *
>(
1297 DeadlockReportBuffers *
b;
1301 int GetStack(
void**
stack,
int max_depth) {
1309 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1319 if (all_locks->
n == 0) {
1331 deadlock_graph->UpdateStackTrace(mu_id, all_locks->
n + 1, GetStack);
1334 for (
int i = 0;
i != all_locks->
n;
i++) {
1336 const Mutex *other =
1337 static_cast<const Mutex *
>(deadlock_graph->Ptr(other_node_id));
1338 if (other ==
nullptr) {
1344 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1345 ScopedDeadlockReportBuffers scoped_buffers;
1346 DeadlockReportBuffers *
b = scoped_buffers.b;
1347 static int number_of_reported_deadlocks = 0;
1348 number_of_reported_deadlocks++;
1350 bool symbolize = number_of_reported_deadlocks <= 2;
1354 for (
int j = 0; j != all_locks->
n; j++) {
1355 void* pr = deadlock_graph->Ptr(all_locks->
locks[j].
id);
1356 if (pr !=
nullptr) {
1357 snprintf(b->buf + len, sizeof (b->buf) - len,
" %p", pr);
1358 len +=
static_cast<int>(strlen(&b->buf[len]));
1362 static_cast<void *>(mu), b->buf);
1364 int path_len = deadlock_graph->FindPath(
1366 for (
int j = 0; j != path_len; j++) {
1368 Mutex *path_mu =
static_cast<Mutex *
>(deadlock_graph->Ptr(
id));
1369 if (path_mu ==
nullptr)
continue;
1371 int depth = deadlock_graph->GetStackTrace(
id, &stack);
1372 snprintf(b->buf,
sizeof(b->buf),
1373 "mutex@%p stack: ", static_cast<void *>(path_mu));
1374 StackString(stack, depth, b->buf + strlen(b->buf),
1375 static_cast<int>(
sizeof(b->buf) - strlen(b->buf)),
1379 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1381 deadlock_graph_mu.
Unlock();
1382 ABSL_RAW_LOG(FATAL,
"dying due to potential deadlock");
1395 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1404 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1406 deadlock_graph_mu.
Lock();
1407 if (deadlock_graph !=
nullptr) {
1408 deadlock_graph->RemoveNode(
this);
1410 deadlock_graph_mu.
Unlock();
1418 (mu_.load(std::memory_order_relaxed) & (kMuWriter |
kMuReader)) != 0 &&
1419 synch_deadlock_detection.load(std::memory_order_acquire) !=
1423 for (
int i = 0;
i != locks->
n;
i++) {
1426 ABSL_RAW_LOG(FATAL,
"thread should not hold mutex %p %s",
1427 static_cast<const void *>(
this),
1428 (mu_events ==
nullptr ?
"" : mu_events->
name));
1437 int c = mutex_globals.spinloop_iterations;
1441 intptr_t
v = mu->load(std::memory_order_relaxed);
1442 if ((v & (kMuReader|kMuEvent)) != 0) {
1444 }
else if (((v & kMuWriter) == 0) &&
1445 mu->compare_exchange_strong(v, kMuWriter | v,
1446 std::memory_order_acquire,
1447 std::memory_order_relaxed)) {
1450 }
while (result == -1 && --c > 0);
1457 intptr_t
v = mu_.load(std::memory_order_relaxed);
1459 if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1460 !mu_.compare_exchange_strong(v, kMuWriter | v,
1461 std::memory_order_acquire,
1462 std::memory_order_relaxed)) {
1465 this->LockSlow(kExclusive,
nullptr, 0);
1475 intptr_t
v = mu_.load(std::memory_order_relaxed);
1477 if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1478 !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1479 std::memory_order_acquire,
1480 std::memory_order_relaxed)) {
1481 this->LockSlow(kShared,
nullptr, 0);
1490 this->LockSlow(kExclusive, &cond, 0);
1502 bool res = LockSlowWithDeadline(kExclusive, &cond,
1512 this->LockSlow(kShared, &cond, 0);
1526 bool res = LockSlowWithDeadline(kShared, &cond,
KernelTimeout(deadline), 0);
1535 this->AssertReaderHeld();
1539 "condition untrue on return from Await");
1550 this->AssertReaderHeld();
1556 bool res = this->AwaitCommon(cond, t);
1558 "condition untrue on return from Await");
1563 this->AssertReaderHeld();
1565 (mu_.load(std::memory_order_relaxed) &
kMuWriter) ? kExclusive : kShared;
1574 this->UnlockSlow(&waitp);
1575 this->Block(waitp.
thread);
1578 this->LockSlowLoop(&waitp, flags);
1579 bool res = waitp.
cond !=
nullptr ||
1587 intptr_t
v = mu_.load(std::memory_order_relaxed);
1588 if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 &&
1589 mu_.compare_exchange_strong(v, kMuWriter | v,
1590 std::memory_order_acquire,
1591 std::memory_order_relaxed)) {
1596 if ((v & kMuEvent) != 0) {
1598 mu_.compare_exchange_strong(
1600 std::memory_order_acquire, std::memory_order_relaxed)) {
1610 this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1616 __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1617 intptr_t
v = mu_.load(std::memory_order_relaxed);
1622 while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1623 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1624 std::memory_order_acquire,
1625 std::memory_order_relaxed)) {
1628 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1632 v = mu_.load(std::memory_order_relaxed);
1634 if ((v & kMuEvent) != 0) {
1637 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1638 std::memory_order_acquire,
1639 std::memory_order_relaxed)) {
1643 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1647 v = mu_.load(std::memory_order_relaxed);
1649 if ((v & kMuEvent) != 0) {
1654 __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1655 __tsan_mutex_try_lock_failed,
1663 intptr_t
v = mu_.load(std::memory_order_relaxed);
1665 if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1666 ABSL_RAW_LOG(FATAL,
"Mutex unlocked when destroyed or not locked: v=0x%x",
1667 static_cast<unsigned>(v));
1672 bool should_try_cas = ((v & (kMuEvent |
kMuWriter)) == kMuWriter &&
1683 if (kDebugMode && should_try_cas != (x < y)) {
1686 ABSL_RAW_LOG(FATAL,
"internal logic error %llx %llx %llx\n",
1687 static_cast<long long>(v), static_cast<long long>(x),
1688 static_cast<long long>(y));
1691 mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1692 std::memory_order_release,
1693 std::memory_order_relaxed)) {
1696 this->UnlockSlow(
nullptr );
1703 assert((v & (kMuWriter|kMuReader)) == kMuReader);
1704 assert((v & kMuHigh) != 0);
1708 constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^
kMuOne;
1709 return (v & kMuMultipleWaitersMask) == 0;
1715 intptr_t
v = mu_.load(std::memory_order_relaxed);
1716 assert((v & (kMuWriter|kMuReader)) == kMuReader);
1717 if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1720 if (mu_.compare_exchange_strong(v, v - clear,
1721 std::memory_order_release,
1722 std::memory_order_relaxed)) {
1727 this->UnlockSlow(
nullptr );
1735 ~static_cast<intptr_t>(0),
1736 ~static_cast<intptr_t>(
1744 ~static_cast<intptr_t>(0),
1745 ~static_cast<intptr_t>(
1752 this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1753 "condition untrue on return from LockSlow");
1758 bool locking,
bool trylock,
1765 #ifdef THREAD_SANITIZER 1766 const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1767 const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1793 static_cast<void>(mu);
1794 static_cast<void>(trylock);
1795 static_cast<void>(read_lock);
1816 bool res = cond->
Eval();
1819 static_cast<void>(mu);
1834 intptr_t
v = mu_.load(std::memory_order_relaxed);
1835 bool unlock =
false;
1837 mu_.compare_exchange_strong(
1838 v, (how->
fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
1840 std::memory_order_acquire, std::memory_order_relaxed)) {
1841 if (cond ==
nullptr ||
1854 this->UnlockSlow(&waitp);
1855 this->Block(waitp.
thread);
1858 this->LockSlowLoop(&waitp, flags);
1859 return waitp.
cond !=
nullptr ||
1867 #define RAW_CHECK_FMT(cond, ...) \ 1869 if (ABSL_PREDICT_FALSE(!(cond))) { \ 1870 ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \ 1878 const uintptr_t w = v ^
kMuWait;
1885 static_assert(kMuReader << 3 == kMuWriter,
"must match");
1886 static_assert(kMuWait << 3 == kMuWrWait,
"must match");
1888 RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1889 "%s: Mutex corrupt: both reader and writer lock held: %p",
1890 label, reinterpret_cast<void *>(v));
1892 "%s: Mutex corrupt: waiting writer with no waiters: %p",
1893 label, reinterpret_cast<void *>(v));
1899 intptr_t
v = mu_.load(std::memory_order_relaxed);
1900 if ((v & kMuEvent) != 0) {
1902 waitp->
how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1906 "detected illegal recursion into Mutex code");
1908 v = mu_.load(std::memory_order_relaxed);
1911 if (mu_.compare_exchange_strong(
1913 (v & zap_desig_waker[flags & kMuHasBlocked])) +
1915 std::memory_order_acquire, std::memory_order_relaxed)) {
1916 if (waitp->
cond ==
nullptr ||
1918 waitp->
how == kShared)) {
1921 this->UnlockSlow(waitp);
1922 this->Block(waitp->
thread);
1927 bool dowait =
false;
1928 if ((v & (kMuSpin|kMuWait)) == 0) {
1933 ABSL_RAW_CHECK(new_h !=
nullptr,
"Enqueue to empty list failed");
1934 if (waitp->
how == kExclusive && (v & kMuReader) != 0) {
1937 if (mu_.compare_exchange_strong(
1938 v, reinterpret_cast<intptr_t>(new_h) | nv,
1939 std::memory_order_release, std::memory_order_relaxed)) {
1946 ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
1949 if (mu_.compare_exchange_strong(
1950 v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1952 std::memory_order_acquire, std::memory_order_relaxed)) {
1956 v = mu_.load(std::memory_order_relaxed);
1957 }
while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
1958 std::memory_order_release,
1959 std::memory_order_relaxed));
1960 if (waitp->
cond ==
nullptr ||
1962 waitp->
how == kShared)) {
1965 this->UnlockSlow(waitp);
1966 this->Block(waitp->
thread);
1970 }
else if ((v & kMuSpin) == 0 &&
1971 mu_.compare_exchange_strong(
1972 v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1974 std::memory_order_acquire, std::memory_order_relaxed)) {
1977 intptr_t wr_wait = 0;
1979 if (waitp->
how == kExclusive && (v & kMuReader) != 0) {
1983 v = mu_.load(std::memory_order_relaxed);
1984 }
while (!mu_.compare_exchange_weak(
1985 v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
1986 reinterpret_cast<intptr_t>(new_h),
1987 std::memory_order_release, std::memory_order_relaxed));
1991 this->Block(waitp->
thread);
1998 "detected illegal recursion into Mutex code");
1999 c =
Delay(c, GENTLE);
2003 "detected illegal recursion into Mutex code");
2004 if ((v & kMuEvent) != 0) {
2006 waitp->
how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2007 SYNCH_EV_READERLOCK_RETURNING);
2017 intptr_t
v = mu_.load(std::memory_order_relaxed);
2018 this->AssertReaderHeld();
2020 if ((v & kMuEvent) != 0) {
2022 (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2034 intptr_t wr_wait = 0;
2039 "detected illegal recursion into Mutex code");
2044 v = mu_.load(std::memory_order_relaxed);
2045 if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2048 if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2049 std::memory_order_release,
2050 std::memory_order_relaxed)) {
2053 }
else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp ==
nullptr) {
2056 if (mu_.compare_exchange_strong(v, v - clear,
2057 std::memory_order_release,
2058 std::memory_order_relaxed)) {
2061 }
else if ((v & kMuSpin) == 0 &&
2062 mu_.compare_exchange_strong(v, v | kMuSpin,
2063 std::memory_order_acquire,
2064 std::memory_order_relaxed)) {
2065 if ((v & kMuWait) == 0) {
2067 bool do_enqueue =
true;
2069 "UnlockSlow is confused");
2071 v = mu_.load(std::memory_order_relaxed);
2073 intptr_t new_readers = (v >=
kMuOne)? v - kMuOne : v;
2080 do_enqueue = (waitp->cv_word ==
nullptr);
2081 new_h =
Enqueue(
nullptr, waitp, new_readers, kMuIsCond);
2087 nv = (v & kMuLow & ~clear & ~kMuSpin);
2088 if (new_h !=
nullptr) {
2089 nv |= kMuWait |
reinterpret_cast<intptr_t
>(new_h);
2098 }
while (!mu_.compare_exchange_weak(v, nv,
2099 std::memory_order_release,
2100 std::memory_order_relaxed));
2107 if ((v & kMuReader) != 0 && (h->
readers & kMuHigh) > kMuOne) {
2111 if (waitp !=
nullptr) {
2114 "waiters disappeared during Enqueue()!");
2116 nv |= kMuWait |
reinterpret_cast<intptr_t
>(new_h);
2118 mu_.store(nv, std::memory_order_release);
2126 "Mutex queue changed beneath us");
2129 if (old_h !=
nullptr &&
2151 }
else if (w !=
nullptr && (w->
waitp->
how == kExclusive || h == old_h)) {
2155 if (pw ==
nullptr) {
2165 intptr_t nv = (v & ~(kMuReader|kMuWriter|
kMuWrWait));
2168 if (waitp !=
nullptr) {
2171 if (new_h !=
nullptr) {
2172 nv |= kMuWait |
reinterpret_cast<intptr_t
>(new_h);
2178 mu_.store(nv, std::memory_order_release);
2185 if (old_h !=
nullptr) {
2187 w_walk = old_h->
next;
2204 mu_.store(v, std::memory_order_release);
2217 while (pw_walk != h) {
2218 w_walk->
wake =
false;
2226 w_walk->
wake =
true;
2229 if (w_walk->
waitp->
how == kExclusive) {
2233 }
else if (w_walk->
waitp->
how == kShared) {
2234 w_walk->
wake =
true;
2244 pw_walk =
Skip(w_walk);
2251 w_walk = pw_walk->
next;
2269 intptr_t nv = (v &
kMuEvent) | kMuDesig;
2273 if (waitp !=
nullptr) {
2274 h =
Enqueue(h, waitp, v, kMuIsCond);
2280 "unexpected empty wake list");
2285 nv |= wr_wait | kMuWait |
reinterpret_cast<intptr_t
>(h);
2290 mu_.store(nv, std::memory_order_release);
2293 c =
Delay(c, AGGRESSIVE);
2296 if (wake_list != kPerThreadSynchNull) {
2300 wake_list = Wakeup(wake_list);
2301 }
while (wake_list != kPerThreadSynchNull);
2306 mutex_tracer(
"slow release",
this, wait_cycles);
2324 this->LockSlow(how,
nullptr, kMuHasBlocked | kMuIsCond);
2334 "Mutex::Fer while waiting on Condition");
2336 "Mutex::Fer while in timed wait");
2338 "Mutex::Fer with pending CondVar queueing");
2340 intptr_t
v = mu_.load(std::memory_order_relaxed);
2347 const intptr_t conflicting =
2349 if ((v & conflicting) == 0) {
2351 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2352 IncrementSynchSem(
this, w);
2355 if ((v & (kMuSpin|kMuWait)) == 0) {
2360 if (mu_.compare_exchange_strong(
2361 v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2362 std::memory_order_release, std::memory_order_relaxed)) {
2365 }
else if ((v & kMuSpin) == 0 &&
2366 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2372 v = mu_.load(std::memory_order_relaxed);
2373 }
while (!mu_.compare_exchange_weak(
2375 (v & kMuLow & ~kMuSpin) | kMuWait |
2376 reinterpret_cast<intptr_t>(new_h),
2377 std::memory_order_release, std::memory_order_relaxed));
2381 c =
Delay(c, GENTLE);
2386 if ((mu_.load(std::memory_order_relaxed) &
kMuWriter) == 0) {
2388 ABSL_RAW_LOG(FATAL,
"thread should hold write lock on Mutex %p %s",
2389 static_cast<const void *>(
this),
2390 (e ==
nullptr ?
"" : e->
name));
2395 if ((mu_.load(std::memory_order_relaxed) & (kMuReader |
kMuWriter)) == 0) {
2398 FATAL,
"thread should hold at least a read lock on Mutex %p %s",
2399 static_cast<const void *>(
this), (e ==
nullptr ?
"" : e->
name));
2412 static_assert(PerThreadSynch::kAlignment > kCvLow,
2413 "PerThreadSynch::kAlignment must be greater than kCvLow");
2422 if ((cv_.load(std::memory_order_relaxed) &
kCvEvent) != 0) {
2432 for (v = cv_.load(std::memory_order_relaxed);;
2433 v = cv_.load(std::memory_order_relaxed)) {
2434 if ((v & kCvSpin) == 0 &&
2435 cv_.compare_exchange_strong(v, v | kCvSpin,
2436 std::memory_order_acquire,
2437 std::memory_order_relaxed)) {
2441 while (w->
next != s && w->
next != h) {
2447 h = (w == s) ?
nullptr : w;
2450 s->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2454 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2455 std::memory_order_release);
2458 c =
Delay(c, GENTLE);
2482 std::atomic<intptr_t> *cv_word = waitp->
cv_word;
2485 intptr_t
v = cv_word->load(std::memory_order_relaxed);
2487 while ((v & kCvSpin) != 0 ||
2488 !cv_word->compare_exchange_weak(v, v | kCvSpin,
2489 std::memory_order_acquire,
2490 std::memory_order_relaxed)) {
2491 c =
Delay(c, GENTLE);
2492 v = cv_word->load(std::memory_order_relaxed);
2503 waitp->
thread->
state.store(PerThreadSynch::kQueued,
2504 std::memory_order_relaxed);
2505 cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->
thread),
2506 std::memory_order_release);
2512 intptr_t mutex_v = mutex->
mu_.load(std::memory_order_relaxed);
2517 intptr_t
v = cv_.load(std::memory_order_relaxed);
2518 cond_var_tracer(
"Wait",
this);
2519 if ((v & kCvEvent) != 0) {
2532 while (waitp.
thread->
state.load(std::memory_order_acquire) ==
2533 PerThreadSynch::kQueued) {
2535 this->Remove(waitp.
thread);
2544 cond_var_tracer(
"Unwait",
this);
2545 if ((v & kCvEvent) != 0) {
2555 mutex->
Trans(mutex_how);
2569 WaitCommon(mu, KernelTimeout::Never());
2582 w->
state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2593 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2594 v = cv_.load(std::memory_order_relaxed)) {
2595 if ((v & kCvSpin) == 0 &&
2596 cv_.compare_exchange_strong(v, v | kCvSpin,
2597 std::memory_order_acquire,
2598 std::memory_order_relaxed)) {
2610 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2611 std::memory_order_release);
2614 cond_var_tracer(
"Signal wakeup",
this);
2616 if ((v & kCvEvent) != 0) {
2622 c =
Delay(c, GENTLE);
2632 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2633 v = cv_.load(std::memory_order_relaxed)) {
2639 if ((v & kCvSpin) == 0 &&
2640 cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2641 std::memory_order_relaxed)) {
2651 cond_var_tracer(
"SignalAll wakeup",
this);
2653 if ((v & kCvEvent) != 0) {
2659 c =
Delay(c, GENTLE);
2667 "ReleasableMutexLock::Release may only be called once");
2668 this->mu_->Unlock();
2669 this->mu_ =
nullptr;
2672 #ifdef THREAD_SANITIZER 2675 #define __tsan_read1(addr) // do nothing if TSan not enabled 2684 return *(
static_cast<bool *
>(
arg));
2691 : eval_(&CallVoidPtrFunction),
2701 : eval_(CallVoidPtrFunction),
2705 arg_(const_cast<bool *>(cond)) {}
2709 return (this->eval_ ==
nullptr) || (*this->eval_)(
this);
2714 return b ==
nullptr || b->
eval_ ==
nullptr;
2716 if (b ==
nullptr || b->
eval_ ==
nullptr) {
2717 return a->
eval_ ==
nullptr;
#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
static const int kMuIsCond
static const intptr_t kMuWrWait
#define EXCLUSIVE_LOCKS_REQUIRED(...)
void Fer(base_internal::PerThreadSynch *w)
bool AwaitWithDeadline(const Condition &cond, absl::Time deadline)
static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu)
static const Condition kTrue
void Lock() EXCLUSIVE_LOCK_FUNCTION()
static const intptr_t kMuWait
void ForgetDeadlockInfo()
void Wakeup(base_internal::PerThreadSynch *w)
#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
static char * CurrentStackString(char *buf, int maxlen, bool symbolize)
void ReaderLockWhen(const Condition &cond) SHARED_LOCK_FUNCTION()
ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield()
static char * StackString(void **pcs, int n, char *buf, int maxlen, bool symbolize)
bool WaitWithDeadline(Mutex *mu, absl::Time deadline)
void SleepFor(absl::Duration duration)
static absl::base_internal::SpinLock synch_event_mu(absl::base_internal::kLinkerInitialized)
void AssertNotHeld() const
static const intptr_t kMuLow
DeadlockReportBuffers * b
int64_t contention_start_cycles
#define ABSL_RAW_LOG(severity,...)
static PerThreadSynch *const kPerThreadSynchNull
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true)
static const intptr_t kMuSpin
static SynchLocksHeld * LocksHeldAlloc()
static bool Dereference(void *arg)
static const Mutex::MuHow kExclusive
static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y)
base_internal::PerThreadSynch * Wakeup(base_internal::PerThreadSynch *w)
static const Mutex::MuHow kShared
static const intptr_t kMuWriter
void LockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION()
static int Delay(int32_t c, DelayMode mode)
InternalMethodType method_
static void PostSynchEvent(void *obj, int ev)
void Unlock() UNLOCK_FUNCTION()
bool LockSlowWithDeadline(MuHow how, const Condition *cond, synchronization_internal::KernelTimeout t, int flags)
bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true)
bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline) SHARED_LOCK_FUNCTION()
base_internal::ThreadIdentity * GetOrCreateCurrentThreadIdentity()
bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t)
InternalFunctionType function_
static double Frequency()
ThreadIdentity * CurrentThreadIdentityIfPresent()
bool suppress_fatal_errors
static void * Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook)
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN()
static const intptr_t kCvSpin
constexpr Duration Microseconds(int64_t n)
static bool EvalConditionIgnored(Mutex *mu, const Condition *cond)
int64_t next_priority_read_cycles
static void LockLeave(Mutex *mu, GraphId id, SynchLocksHeld *held_locks)
void RegisterMutexTracer(void(*fn)(const char *msg, const void *obj, int64_t wait_cycles))
#define ABSL_CACHELINE_SIZE
static const uint32_t kNSynchEvent
static void DebugOnlyLockLeave(Mutex *mu)
#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
static bool TryAcquireWithSpinning(std::atomic< intptr_t > *mu)
static void CheckForMutexCorruption(intptr_t v, const char *label)
static void InternalAttemptToUseMutexInFatalSignalHandler()
void RegisterSymbolizer(bool(*)(const void *, char *, int))
PerThreadSynch per_thread_synch
static GraphId DebugOnlyDeadlockCheck(Mutex *mu)
static void AtomicClearBits(std::atomic< intptr_t > *pv, intptr_t bits, intptr_t wait_until_clear)
static void UnrefSynchEvent(SynchEvent *e)
void EnableInvariantDebugging(void(*invariant)(void *), void *arg)
static const intptr_t zap_desig_waker[]
#define RAW_CHECK_FMT(cond,...)
void EnableDebugLog(const char *name)
void EnableDebugLog(const char *name)
GraphId path[kMaxDeadlockPathLen]
static const intptr_t kCvLow
void(* invariant)(void *arg)
static const intptr_t kMuEvent
static absl::Time DeadlineFromTimeout(absl::Duration timeout)
static PerThreadSynch * Enqueue(PerThreadSynch *head, SynchWaitParams *waitp, intptr_t mu, int flags)
bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout) SHARED_LOCK_FUNCTION()
#define __tsan_read1(addr)
bool(* eval_)(const Condition *)
#define ABSL_RAW_CHECK(condition, message)
bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline) EXCLUSIVE_LOCK_FUNCTION()
void RegisterMutexProfiler(void(*fn)(int64_t wait_timestamp))
AllocList * next[kMaxLevel]
void ReaderUnlock() UNLOCK_FUNCTION()
uintptr_t HidePtr(T *ptr)
char padding[ABSL_CACHELINE_SIZE - 2 *sizeof(int)]
std::atomic< intptr_t > mu_
static PerThreadSynch * Synch_GetPerThreadAnnotated(Mutex *mu)
static PerThreadSynch * Dequeue(PerThreadSynch *head, PerThreadSynch *pw)
void LockSlow(MuHow how, const Condition *cond, int flags) ABSL_ATTRIBUTE_COLD
void Unlock() UNLOCK_FUNCTION()
static constexpr bool kDebugMode
static const MuHowS kSharedS
static PerThreadSynch * DequeueAllWakeable(PerThreadSynch *head, PerThreadSynch *pw, PerThreadSynch **wake_tail)
ThreadIdentity * thread_identity()
bool Symbolize(const void *pc, char *out, int out_size)
static bool ExactlyOneReader(intptr_t v)
static const intptr_t kMuOne
#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
static SynchEvent * EnsureSynchEvent(std::atomic< intptr_t > *addr, const char *name, intptr_t bits, intptr_t lockbit)
static bool DebugOnlyIsExiting()
#define LOCKS_EXCLUDED(...)
void Release() UNLOCK_FUNCTION()
bool AwaitCommon(const Condition &cond, synchronization_internal::KernelTimeout t)
static const intptr_t kMuReader
static GraphId GetGraphIdLocked(Mutex *mu) EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu)
static const intptr_t kMuDesig
#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) EXCLUSIVE_LOCK_FUNCTION()
void AssertHeld() const ASSERT_EXCLUSIVE_LOCK()
void(* invariant)(void *arg)
bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout)
static const MuHowS kExclusiveS
static PerThreadSynch * GetPerThreadSynch(intptr_t v)
void Lock() EXCLUSIVE_LOCK_FUNCTION()
static const intptr_t kMuHigh
#define ABSL_TSAN_MUTEX_DESTROY(...)
static GraphId DeadlockCheck(Mutex *mu)
#define ABSL_CACHELINE_ALIGNED
#define ABSL_ATTRIBUTE_WEAK
#define ABSL_PREDICT_TRUE(x)
#define ABSL_TSAN_MUTEX_POST_LOCK(...)
static PerThreadSynch * Skip(PerThreadSynch *x)
static bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, bool locking, bool trylock, bool read_lock)
static ABSL_CONST_INIT base_internal::AtomicHook< void(*)(const void *lock, int64_t wait_cycles)> submit_profile_data
#define ABSL_ARRAYSIZE(array)
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook)
#define ANNOTATE_IGNORE_READS_AND_WRITES_END()
static void DebugOnlyLockEnter(Mutex *mu)
struct absl::SynchWaitParams GUARDED_BY[kNSynchEvent]
static SynchLocksHeld * Synch_GetAllLocks()
static void AtomicSetBits(std::atomic< intptr_t > *pv, intptr_t bits, intptr_t wait_until_clear)
SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg, KernelTimeout timeout_arg, Mutex *cvmu_arg, PerThreadSynch *thread_arg, std::atomic< intptr_t > *cv_word_arg)
static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed)
struct absl::SynchLocksHeld::@26 locks[40]
#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
static const intptr_t ignore_waiting_writers[]
static SynchEvent * GetSynchEvent(const void *addr)
static void CondVarEnqueue(SynchWaitParams *waitp)
static void ForgetSynchEvent(std::atomic< intptr_t > *addr, intptr_t bits, intptr_t lockbit)
static void DeleteSynchEvent(SynchEvent *e)
void Await(const Condition &cond)
void Remove(base_internal::PerThreadSynch *s)
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode)
static absl::base_internal::SpinLock deadlock_graph_mu(absl::base_internal::kLinkerInitialized)
void EnableMutexInvariantDebugging(bool enabled)
static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w)
bool WaitWithTimeout(Mutex *mu, absl::Duration timeout)
static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, synchronization_internal::KernelTimeout t)
static bool GuaranteedEqual(const Condition *a, const Condition *b)
void TryRemove(base_internal::PerThreadSynch *s)
static PerThreadSynch * Synch_GetPerThread()
void ReaderLock() SHARED_LOCK_FUNCTION()
static void LockEnter(Mutex *mu, GraphId id, SynchLocksHeld *held_locks)
static bool CallVoidPtrFunction(const Condition *)
void AssertReaderHeld() const ASSERT_SHARED_LOCK()
static const struct absl::@21 event_properties[]
#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
static const int kMuHasBlocked
std::atomic< State > state
PerThreadSynch *const thread
void LockSlowLoop(SynchWaitParams *waitp, int flags)
void RegisterCondVarTracer(void(*fn)(const char *msg, const void *cv))
absl::Time TimeFromTimeval(timeval tv)
intptr_t slow_inc_need_zero
static const intptr_t kCvEvent
std::atomic< intptr_t > * cv_word
ABSL_XRAY_LOG_ARGS(1) void Mutex
SynchLocksHeld * all_locks
void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(void **result, int max_depth, int skip_count)