mutex.cc
Go to the documentation of this file.
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
16 
17 #ifdef _WIN32
18 #include <windows.h>
19 #ifdef ERROR
20 #undef ERROR
21 #endif
22 #else
23 #include <fcntl.h>
24 #include <pthread.h>
25 #include <sched.h>
26 #include <sys/time.h>
27 #endif
28 
29 #include <assert.h>
30 #include <errno.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <time.h>
35 
36 #include <algorithm>
37 #include <atomic>
38 #include <cinttypes>
39 #include <thread> // NOLINT(build/c++11)
40 
41 #include "absl/base/attributes.h"
42 #include "absl/base/config.h"
52 #include "absl/base/port.h"
57 #include "absl/time/time.h"
58 
68 
69 extern "C" {
70 ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
71 } // extern "C"
72 
73 namespace absl {
74 
75 namespace {
76 
77 #if defined(THREAD_SANITIZER)
78 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
79 #else
80 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
81 #endif
82 
83 ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
84  kDeadlockDetectionDefault);
85 ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
86 
87 // ------------------------------------------ spinlock support
88 
89 // Make sure read-only globals used in the Mutex code are contained on the
90 // same cacheline and cacheline aligned to eliminate any false sharing with
91 // other globals from this and other modules.
92 static struct MutexGlobals {
93  MutexGlobals() {
94  // Find machine-specific data needed for Delay() and
95  // TryAcquireWithSpinning(). This runs in the global constructor
96  // sequence, and before that zeros are safe values.
98  spinloop_iterations = num_cpus > 1 ? 1500 : 0;
99  }
100  int num_cpus;
102  // Pad this struct to a full cacheline to prevent false sharing.
103  char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)];
104 } ABSL_CACHELINE_ALIGNED mutex_globals;
105 static_assert(
106  sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE,
107  "MutexGlobals must occupy an entire cacheline to prevent false sharing");
108 
112  void (*)(const char *msg, const void *obj, int64_t wait_cycles)> mutex_tracer;
114  void (*)(const char *msg, const void *cv)> cond_var_tracer;
116  bool (*)(const void *pc, char *out, int out_size)>
117  symbolizer(absl::Symbolize);
118 
119 } // namespace
120 
121 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
122  bool locking, bool trylock,
123  bool read_lock);
124 
125 void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
126  submit_profile_data.Store(fn);
127 }
128 
129 void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
130  int64_t wait_cycles)) {
131  mutex_tracer.Store(fn);
132 }
133 
134 void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
135  cond_var_tracer.Store(fn);
136 }
137 
138 void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
139  symbolizer.Store(fn);
140 }
141 
142 // spinlock delay on iteration c. Returns new c.
143 namespace {
144  enum DelayMode { AGGRESSIVE, GENTLE };
145 };
146 static int Delay(int32_t c, DelayMode mode) {
147  // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
148  // aggressive then spin many times before yielding. If the mode is
149  // gentle then spin only a few times before yielding. Aggressive spinning is
150  // used to ensure that an Unlock() call, which must get the spin lock for
151  // any thread to make progress gets it without undue delay.
152  int32_t limit = (mutex_globals.num_cpus > 1) ?
153  ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
154  if (c < limit) {
155  c++; // spin
156  } else {
157  ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
158  if (c == limit) { // yield once
160  c++;
161  } else { // then wait
163  c = 0;
164  }
165  ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
166  }
167  return (c);
168 }
169 
170 // --------------------------Generic atomic ops
171 // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
172 // "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
173 // before making any change.
174 // This is used to set flags in mutex and condition variable words.
175 static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
176  intptr_t wait_until_clear) {
177  intptr_t v;
178  do {
179  v = pv->load(std::memory_order_relaxed);
180  } while ((v & bits) != bits &&
181  ((v & wait_until_clear) != 0 ||
182  !pv->compare_exchange_weak(v, v | bits,
183  std::memory_order_release,
184  std::memory_order_relaxed)));
185 }
186 
187 // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
188 // "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
189 // before making any change.
190 // This is used to unset flags in mutex and condition variable words.
191 static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
192  intptr_t wait_until_clear) {
193  intptr_t v;
194  do {
195  v = pv->load(std::memory_order_relaxed);
196  } while ((v & bits) != 0 &&
197  ((v & wait_until_clear) != 0 ||
198  !pv->compare_exchange_weak(v, v & ~bits,
199  std::memory_order_release,
200  std::memory_order_relaxed)));
201 }
202 
203 //------------------------------------------------------------------
204 
205 // Data for doing deadlock detection.
208 
209 // graph used to detect deadlocks.
210 static GraphCycles *deadlock_graph GUARDED_BY(deadlock_graph_mu)
211  PT_GUARDED_BY(deadlock_graph_mu);
212 
213 //------------------------------------------------------------------
214 // An event mechanism for debugging mutex use.
215 // It also allows mutexes to be given names for those who can't handle
216 // addresses, and instead like to give their data structures names like
217 // "Henry", "Fido", or "Rupert IV, King of Yondavia".
218 
219 namespace { // to prevent name pollution
220 enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
221  // Mutex events
222  SYNCH_EV_TRYLOCK_SUCCESS,
223  SYNCH_EV_TRYLOCK_FAILED,
224  SYNCH_EV_READERTRYLOCK_SUCCESS,
225  SYNCH_EV_READERTRYLOCK_FAILED,
226  SYNCH_EV_LOCK,
227  SYNCH_EV_LOCK_RETURNING,
228  SYNCH_EV_READERLOCK,
229  SYNCH_EV_READERLOCK_RETURNING,
230  SYNCH_EV_UNLOCK,
231  SYNCH_EV_READERUNLOCK,
232 
233  // CondVar events
234  SYNCH_EV_WAIT,
235  SYNCH_EV_WAIT_RETURNING,
236  SYNCH_EV_SIGNAL,
237  SYNCH_EV_SIGNALALL,
238 };
239 
240 enum { // Event flags
241  SYNCH_F_R = 0x01, // reader event
242  SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
243  SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
244  SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
245 
246  SYNCH_F_LCK_W = SYNCH_F_LCK,
247  SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
248 };
249 } // anonymous namespace
250 
251 // Properties of the events.
252 static const struct {
253  int flags;
254  const char *msg;
255 } event_properties[] = {
256  {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
257  {0, "TryLock failed "},
258  {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
259  {0, "ReaderTryLock failed "},
260  {0, "Lock blocking "},
261  {SYNCH_F_LCK_W, "Lock returning "},
262  {0, "ReaderLock blocking "},
263  {SYNCH_F_LCK_R, "ReaderLock returning "},
264  {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
265  {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
266  {0, "Wait on "},
267  {0, "Wait unblocked "},
268  {0, "Signal on "},
269  {0, "SignalAll on "},
270 };
271 
274 // protects synch_event
275 
276 // Hash table size; should be prime > 2.
277 // Can't be too small, as it's used for deadlock detection information.
278 static const uint32_t kNSynchEvent = 1031;
279 
280 static struct SynchEvent { // this is a trivial hash table for the events
281  // struct is freed when refcount reaches 0
282  int refcount GUARDED_BY(synch_event_mu);
283 
284  // buckets have linear, 0-terminated chains
285  SynchEvent *next GUARDED_BY(synch_event_mu);
287  // Constant after initialization
288  uintptr_t masked_addr; // object at this address is called "name"
289 
290  // No explicit synchronization used. Instead we assume that the
291  // client who enables/disables invariants/logging on a Mutex does so
292  // while the Mutex is not being concurrently accessed by others.
293  void (*invariant)(void *arg); // called on each event
294  void *arg; // first arg to (*invariant)()
295  bool log; // logging turned on
297  // Constant after initialization
298  char name[1]; // actually longer---null-terminated std::string
299 } *synch_event[kNSynchEvent] GUARDED_BY(synch_event_mu);
300 
301 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
302 // set "bits" in the word there (waiting until lockbit is clear before doing
303 // so), and return a refcounted reference that will remain valid until
304 // UnrefSynchEvent() is called. If a new SynchEvent is allocated,
305 // the string name is copied into it.
306 // When used with a mutex, the caller should also ensure that kMuEvent
307 // is set in the mutex word, and similarly for condition variables and kCVEvent.
308 static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
309  const char *name, intptr_t bits,
310  intptr_t lockbit) {
311  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
312  SynchEvent *e;
313  // first look for existing SynchEvent struct..
314  synch_event_mu.Lock();
315  for (e = synch_event[h];
316  e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
317  e = e->next) {
318  }
319  if (e == nullptr) { // no SynchEvent struct found; make one.
320  if (name == nullptr) {
321  name = "";
322  }
323  size_t l = strlen(name);
324  e = reinterpret_cast<SynchEvent *>(
325  base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
326  e->refcount = 2; // one for return value, one for linked list
328  e->invariant = nullptr;
329  e->arg = nullptr;
330  e->log = false;
331  strcpy(e->name, name); // NOLINT(runtime/printf)
332  e->next = synch_event[h];
333  AtomicSetBits(addr, bits, lockbit);
334  synch_event[h] = e;
335  } else {
336  e->refcount++; // for return value
337  }
338  synch_event_mu.Unlock();
339  return e;
340 }
341 
342 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
343 static void DeleteSynchEvent(SynchEvent *e) {
345 }
346 
347 // Decrement the reference count of *e, or do nothing if e==null.
348 static void UnrefSynchEvent(SynchEvent *e) {
349  if (e != nullptr) {
350  synch_event_mu.Lock();
351  bool del = (--(e->refcount) == 0);
352  synch_event_mu.Unlock();
353  if (del) {
354  DeleteSynchEvent(e);
355  }
356  }
357 }
358 
359 // Forget the mapping from the object (Mutex or CondVar) at address addr
360 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
361 // is clear before doing so).
362 static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
363  intptr_t lockbit) {
364  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
365  SynchEvent **pe;
366  SynchEvent *e;
367  synch_event_mu.Lock();
368  for (pe = &synch_event[h];
369  (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
370  pe = &e->next) {
371  }
372  bool del = false;
373  if (e != nullptr) {
374  *pe = e->next;
375  del = (--(e->refcount) == 0);
376  }
377  AtomicClearBits(addr, bits, lockbit);
378  synch_event_mu.Unlock();
379  if (del) {
380  DeleteSynchEvent(e);
381  }
382 }
383 
384 // Return a refcounted reference to the SynchEvent of the object at address
385 // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
386 // called.
387 static SynchEvent *GetSynchEvent(const void *addr) {
388  uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
389  SynchEvent *e;
390  synch_event_mu.Lock();
391  for (e = synch_event[h];
392  e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
393  e = e->next) {
394  }
395  if (e != nullptr) {
396  e->refcount++;
397  }
398  synch_event_mu.Unlock();
399  return e;
400 }
401 
402 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
403 // if event recording is on
404 static void PostSynchEvent(void *obj, int ev) {
405  SynchEvent *e = GetSynchEvent(obj);
406  // logging is on if event recording is on and either there's no event struct,
407  // or it explicitly says to log
408  if (e == nullptr || e->log) {
409  void *pcs[40];
410  int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
411  // A buffer with enough space for the ASCII for all the PCs, even on a
412  // 64-bit machine.
413  char buffer[ABSL_ARRAYSIZE(pcs) * 24];
414  int pos = snprintf(buffer, sizeof (buffer), " @");
415  for (int i = 0; i != n; i++) {
416  pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
417  }
418  ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
419  (e == nullptr ? "" : e->name), buffer);
420  }
421  const int flags = event_properties[ev].flags;
422  if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
423  // Calling the invariant as is causes problems under ThreadSanitizer.
424  // We are currently inside of Mutex Lock/Unlock and are ignoring all
425  // memory accesses and synchronization. If the invariant transitively
426  // synchronizes something else and we ignore the synchronization, we will
427  // get false positive race reports later.
428  // Reuse EvalConditionAnnotated to properly call into user code.
429  struct local {
430  static bool pred(SynchEvent *ev) {
431  (*ev->invariant)(ev->arg);
432  return false;
433  }
434  };
435  Condition cond(&local::pred, e);
436  Mutex *mu = static_cast<Mutex *>(obj);
437  const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
438  const bool trylock = (flags & SYNCH_F_TRY) != 0;
439  const bool read_lock = (flags & SYNCH_F_R) != 0;
440  EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
441  }
442  UnrefSynchEvent(e);
443 }
444 
445 //------------------------------------------------------------------
446 
447 // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
448 // whether it has a timeout, the condition, exclusive/shared, and whether a
449 // condition variable wait has an associated Mutex (as opposed to another
450 // type of lock). It also points to the PerThreadSynch struct of its thread.
451 // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
452 //
453 // This structure is held on the stack rather than directly in
454 // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
455 // while waiting on one Mutex, the implementation calls a client callback
456 // (such as a Condition function) that acquires another Mutex. We don't
457 // strictly need to allow this, but programmers become confused if we do not
458 // allow them to use functions such a LOG() within Condition functions. The
459 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
460 // the thread is on a Mutex's waiter queue.
462  SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
463  KernelTimeout timeout_arg, Mutex *cvmu_arg,
464  PerThreadSynch *thread_arg,
465  std::atomic<intptr_t> *cv_word_arg)
466  : how(how_arg),
467  cond(cond_arg),
468  timeout(timeout_arg),
469  cvmu(cvmu_arg),
470  thread(thread_arg),
471  cv_word(cv_word_arg),
472  contention_start_cycles(base_internal::CycleClock::Now()) {}
473 
474  const Mutex::MuHow how; // How this thread needs to wait.
475  const Condition *cond; // The condition that this thread is waiting for.
476  // In Mutex, this field is set to zero if a timeout
477  // expires.
478  KernelTimeout timeout; // timeout expiry---absolute time
479  // In Mutex, this field is set to zero if a timeout
480  // expires.
481  Mutex *const cvmu; // used for transfer from cond var to mutex
482  PerThreadSynch *const thread; // thread that is waiting
483 
484  // If not null, thread should be enqueued on the CondVar whose state
485  // word is cv_word instead of queueing normally on the Mutex.
486  std::atomic<intptr_t> *cv_word;
487 
488  int64_t contention_start_cycles; // Time (in cycles) when this thread started
489  // to contend for the mutex.
490 };
491 
493  int n; // number of valid entries in locks[]
494  bool overflow; // true iff we overflowed the array at some point
495  struct {
496  Mutex *mu; // lock acquired
497  int32_t count; // times acquired
498  GraphId id; // deadlock_graph id of acquired lock
499  } locks[40];
500  // If a thread overfills the array during deadlock detection, we
501  // continue, discarding information as needed. If no overflow has
502  // taken place, we can provide more error checking, such as
503  // detecting when a thread releases a lock it does not hold.
504 };
505 
506 // A sentinel value in lists that is not 0.
507 // A 0 value is used to mean "not on a list".
509  reinterpret_cast<PerThreadSynch *>(1);
510 
512  SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
514  ret->n = 0;
515  ret->overflow = false;
516  return ret;
517 }
518 
519 // Return the PerThreadSynch-struct for this thread.
522  return &identity->per_thread_synch;
523 }
524 
526  if (mu) {
528  }
530  if (mu) {
532  }
533  return w;
534 }
535 
538  if (s->all_locks == nullptr) {
539  s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
540  }
541  return s->all_locks;
542 }
543 
544 // Post on "w"'s associated PerThreadSem.
546  if (mu) {
548  }
549  PerThreadSem::Post(w->thread_identity());
550  if (mu) {
552  }
553 }
554 
555 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
557  if (mu) {
559  }
560  assert(w == Synch_GetPerThread());
561  static_cast<void>(w);
562  bool res = PerThreadSem::Wait(t);
563  if (mu) {
565  }
566  return res;
567 }
568 
569 // We're in a fatal signal handler that hopes to use Mutex and to get
570 // lucky by not deadlocking. We try to improve its chances of success
571 // by effectively disabling some of the consistency checks. This will
572 // prevent certain ABSL_RAW_CHECK() statements from being triggered when
573 // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
574 // Mutex code checking that the "waitp" field has not been reused.
576  // Fix the per-thread state only if it exists.
578  if (identity != nullptr) {
579  identity->per_thread_synch.suppress_fatal_errors = true;
580  }
581  // Don't do deadlock detection when we are already failing.
582  synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
583  std::memory_order_release);
584 }
585 
586 // --------------------------time support
587 
588 // Return the current time plus the timeout. Use the same clock as
589 // PerThreadSem::Wait() for consistency. Unfortunately, we don't have
590 // such a choice when a deadline is given directly.
592 #ifndef _WIN32
593  struct timeval tv;
594  gettimeofday(&tv, nullptr);
595  return absl::TimeFromTimeval(tv) + timeout;
596 #else
597  return absl::Now() + timeout;
598 #endif
599 }
600 
601 // --------------------------Mutexes
602 
603 // In the layout below, the msb of the bottom byte is currently unused. Also,
604 // the following constraints were considered in choosing the layout:
605 // o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
606 // 0xcd) are illegal: reader and writer lock both held.
607 // o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
608 // bit-twiddling trick in Mutex::Unlock().
609 // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
610 // to enable the bit-twiddling trick in CheckForMutexCorruption().
611 static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
612 static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
613 static const intptr_t kMuWait = 0x0004L; // threads are waiting
614 static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
615 static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
616 // INVARIANT1: there's a thread that was blocked on the mutex, is
617 // no longer, yet has not yet acquired the mutex. If there's a
618 // designated waker, all threads can avoid taking the slow path in
619 // unlock because the designated waker will subsequently acquire
620 // the lock and wake someone. To maintain INVARIANT1 the bit is
621 // set when a thread is unblocked(INV1a), and threads that were
622 // unblocked reset the bit when they either acquire or re-block
623 // (INV1b).
624 static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
625  // for a reader
626 static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
627 static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
628 static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
629 
630 // Hack to make constant values available to gdb pretty printer
631 enum {
640 };
641 
642 // kMuWrWait implies kMuWait.
643 // kMuReader and kMuWriter are mutually exclusive.
644 // If kMuReader is zero, there are no readers.
645 // Otherwise, if kMuWait is zero, the high order bits contain a count of the
646 // number of readers. Otherwise, the reader count is held in
647 // PerThreadSynch::readers of the most recently queued waiter, again in the
648 // bits above kMuLow.
649 static const intptr_t kMuOne = 0x0100; // a count of one reader
650 
651 // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
652 static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
653 static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
654 
655 static_assert(PerThreadSynch::kAlignment > kMuLow,
656  "PerThreadSynch::kAlignment must be greater than kMuLow");
657 
658 // This struct contains various bitmasks to be used in
659 // acquiring and releasing a mutex in a particular mode.
660 struct MuHowS {
661  // if all the bits in fast_need_zero are zero, the lock can be acquired by
662  // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
663  // this is the designated waker.
664  intptr_t fast_need_zero;
665  intptr_t fast_or;
666  intptr_t fast_add;
667 
668  intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
669 
670  intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
671  // zero a reader can acquire a read share by
672  // setting the reader bit and incrementing
673  // the reader count (in last waiter since
674  // we're now slow-path). kMuWrWait be may
675  // be ignored if we already waited once.
676 };
677 
678 static const MuHowS kSharedS = {
679  // shared or read lock
680  kMuWriter | kMuWait | kMuEvent, // fast_need_zero
681  kMuReader, // fast_or
682  kMuOne, // fast_add
683  kMuWriter | kMuWait, // slow_need_zero
684  kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
685 };
686 static const MuHowS kExclusiveS = {
687  // exclusive or write lock
688  kMuWriter | kMuReader | kMuEvent, // fast_need_zero
689  kMuWriter, // fast_or
690  0, // fast_add
691  kMuWriter | kMuReader, // slow_need_zero
692  ~static_cast<intptr_t>(0), // slow_inc_need_zero
693 };
694 static const Mutex::MuHow kShared = &kSharedS; // shared lock
695 static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
696 
697 #ifdef NDEBUG
698 static constexpr bool kDebugMode = false;
699 #else
700 static constexpr bool kDebugMode = true;
701 #endif
702 
703 #ifdef THREAD_SANITIZER
704 static unsigned TsanFlags(Mutex::MuHow how) {
705  return how == kShared ? __tsan_mutex_read_lock : 0;
706 }
707 #endif
708 
709 static bool DebugOnlyIsExiting() {
710  return false;
711 }
712 
713 Mutex::~Mutex() {
714  intptr_t v = mu_.load(std::memory_order_relaxed);
715  if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
716  ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
717  }
718  if (kDebugMode) {
719  this->ForgetDeadlockInfo();
720  }
721  ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
722 }
723 
724 void Mutex::EnableDebugLog(const char *name) {
725  SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
726  e->log = true;
727  UnrefSynchEvent(e);
728 }
729 
730 void EnableMutexInvariantDebugging(bool enabled) {
731  synch_check_invariants.store(enabled, std::memory_order_release);
732 }
733 
734 void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
735  void *arg) {
736  if (synch_check_invariants.load(std::memory_order_acquire) &&
737  invariant != nullptr) {
738  SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
739  e->invariant = invariant;
740  e->arg = arg;
741  UnrefSynchEvent(e);
742  }
743 }
744 
746  synch_deadlock_detection.store(mode, std::memory_order_release);
747 }
748 
749 // Return true iff threads x and y are waiting on the same condition for the
750 // same type of lock. Requires that x and y be waiting on the same Mutex
751 // queue.
753  return x->waitp->how == y->waitp->how &&
755 }
756 
757 // Given the contents of a mutex word containing a PerThreadSynch pointer,
758 // return the pointer.
759 static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
760  return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
761 }
762 
763 // The next several routines maintain the per-thread next and skip fields
764 // used in the Mutex waiter queue.
765 // The queue is a circular singly-linked list, of which the "head" is the
766 // last element, and head->next if the first element.
767 // The skip field has the invariant:
768 // For thread x, x->skip is one of:
769 // - invalid (iff x is not in a Mutex wait queue),
770 // - null, or
771 // - a pointer to a distinct thread waiting later in the same Mutex queue
772 // such that all threads in [x, x->skip] have the same condition and
773 // lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
774 // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
775 //
776 // By the spec of MuSameCondition(), it is not necessary when removing the
777 // first runnable thread y from the front a Mutex queue to adjust the skip
778 // field of another thread x because if x->skip==y, x->skip must (have) become
779 // invalid before y is removed. The function TryRemove can remove a specified
780 // thread from an arbitrary position in the queue whether runnable or not, so
781 // it fixes up skip fields that would otherwise be left dangling.
782 // The statement
783 // if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
784 // maintains the invariant provided x is not the last waiter in a Mutex queue
785 // The statement
786 // if (x->skip != null) { x->skip = x->skip->skip; }
787 // maintains the invariant.
788 
789 // Returns the last thread y in a mutex waiter queue such that all threads in
790 // [x, y] inclusive share the same condition. Sets skip fields of some threads
791 // in that range to optimize future evaluation of Skip() on x values in
792 // the range. Requires thread x is in a mutex waiter queue.
793 // The locking is unusual. Skip() is called under these conditions:
794 // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
795 // - Mutex is held in call from UnlockSlow() by last unlocker, with
796 // maybe_unlocking == true
797 // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
798 // UnlockSlow()) and TryRemove()
799 // These cases are mutually exclusive, so Skip() never runs concurrently
800 // with itself on the same Mutex. The skip chain is used in these other places
801 // that cannot occur concurrently:
802 // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
803 // - Dequeue() (with spinlock and Mutex held)
804 // - UnlockSlow() (with spinlock and Mutex held)
805 // A more complex case is Enqueue()
806 // - Enqueue() (with spinlock held and maybe_unlocking == false)
807 // This is the first case in which Skip is called, above.
808 // - Enqueue() (without spinlock held; but queue is empty and being freshly
809 // formed)
810 // - Enqueue() (with spinlock held and maybe_unlocking == true)
811 // The first case has mutual exclusion, and the second isolation through
812 // working on an otherwise unreachable data structure.
813 // In the last case, Enqueue() is required to change no skip/next pointers
814 // except those in the added node and the former "head" node. This implies
815 // that the new node is added after head, and so must be the new head or the
816 // new front of the queue.
818  PerThreadSynch *x0 = nullptr;
819  PerThreadSynch *x1 = x;
820  PerThreadSynch *x2 = x->skip;
821  if (x2 != nullptr) {
822  // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
823  // such that x1 == x0->skip && x2 == x1->skip
824  while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
825  x0->skip = x2; // short-circuit skip from x0 to x2
826  }
827  x->skip = x1; // short-circuit skip from x to result
828  }
829  return x1;
830 }
831 
832 // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
833 // The latter is going to be removed out of order, because of a timeout.
834 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
835 // and fix it if it does.
836 static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
837  if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
838  if (to_be_removed->skip != nullptr) {
839  ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
840  } else if (ancestor->next != to_be_removed) { // they are not adjacent
841  ancestor->skip = ancestor->next; // can skip one past ancestor
842  } else {
843  ancestor->skip = nullptr; // can't skip at all
844  }
845  }
846 }
847 
848 static void CondVarEnqueue(SynchWaitParams *waitp);
849 
850 // Enqueue thread "waitp->thread" on a waiter queue.
851 // Called with mutex spinlock held if head != nullptr
852 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
853 // idempotent; it alters no state associated with the existing (empty)
854 // queue.
855 //
856 // If waitp->cv_word == nullptr, queue the thread at either the front or
857 // the end (according to its priority) of the circular mutex waiter queue whose
858 // head is "head", and return the new head. mu is the previous mutex state,
859 // which contains the reader count (perhaps adjusted for the operation in
860 // progress) if the list was empty and a read lock held, and the holder hint if
861 // the list was empty and a write lock held. (flags & kMuIsCond) indicates
862 // whether this thread was transferred from a CondVar or is waiting for a
863 // non-trivial condition. In this case, Enqueue() never returns nullptr
864 //
865 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
866 // returned. This mechanism is used by CondVar to queue a thread on the
867 // condition variable queue instead of the mutex queue in implementing Wait().
868 // In this case, Enqueue() can return nullptr (if head==nullptr).
870  SynchWaitParams *waitp, intptr_t mu, int flags) {
871  // If we have been given a cv_word, call CondVarEnqueue() and return
872  // the previous head of the Mutex waiter queue.
873  if (waitp->cv_word != nullptr) {
874  CondVarEnqueue(waitp);
875  return head;
876  }
877 
878  PerThreadSynch *s = waitp->thread;
880  s->waitp == nullptr || // normal case
881  s->waitp == waitp || // Fer()---transfer from condition variable
883  "detected illegal recursion into Mutex code");
884  s->waitp = waitp;
885  s->skip = nullptr; // maintain skip invariant (see above)
886  s->may_skip = true; // always true on entering queue
887  s->wake = false; // not being woken
888  s->cond_waiter = ((flags & kMuIsCond) != 0);
889  if (head == nullptr) { // s is the only waiter
890  s->next = s; // it's the only entry in the cycle
891  s->readers = mu; // reader count is from mu word
892  s->maybe_unlocking = false; // no one is searching an empty list
893  head = s; // s is new head
894  } else {
895  PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
896 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
897  int64_t now_cycles = base_internal::CycleClock::Now();
898  if (s->next_priority_read_cycles < now_cycles) {
899  // Every so often, update our idea of the thread's priority.
900  // pthread_getschedparam() is 5% of the block/wakeup time;
901  // base_internal::CycleClock::Now() is 0.5%.
902  int policy;
903  struct sched_param param;
904  const int err = pthread_getschedparam(pthread_self(), &policy, &param);
905  if (err != 0) {
906  ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
907  } else {
908  s->priority = param.sched_priority;
910  now_cycles +
911  static_cast<int64_t>(base_internal::CycleClock::Frequency());
912  }
913  }
914  if (s->priority > head->priority) { // s's priority is above head's
915  // try to put s in priority-fifo order, or failing that at the front.
916  if (!head->maybe_unlocking) {
917  // No unlocker can be scanning the queue, so we can insert between
918  // skip-chains, and within a skip-chain if it has the same condition as
919  // s. We insert in priority-fifo order, examining the end of every
920  // skip-chain, plus every element with the same condition as s.
921  PerThreadSynch *advance_to = head; // next value of enqueue_after
922  PerThreadSynch *cur; // successor of enqueue_after
923  do {
924  enqueue_after = advance_to;
925  cur = enqueue_after->next; // this advance ensures progress
926  advance_to = Skip(cur); // normally, advance to end of skip chain
927  // (side-effect: optimizes skip chain)
928  if (advance_to != cur && s->priority > advance_to->priority &&
929  MuSameCondition(s, cur)) {
930  // but this skip chain is not a singleton, s has higher priority
931  // than its tail and has the same condition as the chain,
932  // so we can insert within the skip-chain
933  advance_to = cur; // advance by just one
934  }
935  } while (s->priority <= advance_to->priority);
936  // termination guaranteed because s->priority > head->priority
937  // and head is the end of a skip chain
938  } else if (waitp->how == kExclusive &&
939  Condition::GuaranteedEqual(waitp->cond, nullptr)) {
940  // An unlocker could be scanning the queue, but we know it will recheck
941  // the queue front for writers that have no condition, which is what s
942  // is, so an insert at front is safe.
943  enqueue_after = head; // add after head, at front
944  }
945  }
946 #endif
947  if (enqueue_after != nullptr) {
948  s->next = enqueue_after->next;
949  enqueue_after->next = s;
950 
951  // enqueue_after can be: head, Skip(...), or cur.
952  // The first two imply enqueue_after->skip == nullptr, and
953  // the last is used only if MuSameCondition(s, cur).
954  // We require this because clearing enqueue_after->skip
955  // is impossible; enqueue_after's predecessors might also
956  // incorrectly skip over s if we were to allow other
957  // insertion points.
959  enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
960  "Mutex Enqueue failure");
961 
962  if (enqueue_after != head && enqueue_after->may_skip &&
963  MuSameCondition(enqueue_after, enqueue_after->next)) {
964  // enqueue_after can skip to its new successor, s
965  enqueue_after->skip = enqueue_after->next;
966  }
967  if (MuSameCondition(s, s->next)) { // s->may_skip is known to be true
968  s->skip = s->next; // s may skip to its successor
969  }
970  } else { // enqueue not done any other way, so
971  // we're inserting s at the back
972  // s will become new head; copy data from head into it
973  s->next = head->next; // add s after head
974  head->next = s;
975  s->readers = head->readers; // reader count is from previous head
976  s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
977  if (head->may_skip && MuSameCondition(head, s)) {
978  // head now has successor; may skip
979  head->skip = s;
980  }
981  head = s; // s is new head
982  }
983  }
984  s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
985  return head;
986 }
987 
988 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
989 // whose last element is head. The new head element is returned, or null
990 // if the list is made empty.
991 // Dequeue is called with both spinlock and Mutex held.
993  PerThreadSynch *w = pw->next;
994  pw->next = w->next; // snip w out of list
995  if (head == w) { // we removed the head
996  head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
997  } else if (pw != head && MuSameCondition(pw, pw->next)) {
998  // pw can skip to its new successor
999  if (pw->next->skip !=
1000  nullptr) { // either skip to its successors skip target
1001  pw->skip = pw->next->skip;
1002  } else { // or to pw's successor
1003  pw->skip = pw->next;
1004  }
1005  }
1006  return head;
1007 }
1008 
1009 // Traverse the elements [ pw->next, h] of the circular list whose last element
1010 // is head.
1011 // Remove all elements with wake==true and place them in the
1012 // singly-linked list wake_list in the order found. Assumes that
1013 // there is only one such element if the element has how == kExclusive.
1014 // Return the new head.
1016  PerThreadSynch *pw,
1017  PerThreadSynch **wake_tail) {
1018  PerThreadSynch *orig_h = head;
1019  PerThreadSynch *w = pw->next;
1020  bool skipped = false;
1021  do {
1022  if (w->wake) { // remove this element
1023  ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1024  // we're removing pw's successor so either pw->skip is zero or we should
1025  // already have removed pw since if pw->skip!=null, pw has the same
1026  // condition as w.
1027  head = Dequeue(head, pw);
1028  w->next = *wake_tail; // keep list terminated
1029  *wake_tail = w; // add w to wake_list;
1030  wake_tail = &w->next; // next addition to end
1031  if (w->waitp->how == kExclusive) { // wake at most 1 writer
1032  break;
1033  }
1034  } else { // not waking this one; skip
1035  pw = Skip(w); // skip as much as possible
1036  skipped = true;
1037  }
1038  w = pw->next;
1039  // We want to stop processing after we've considered the original head,
1040  // orig_h. We can't test for w==orig_h in the loop because w may skip over
1041  // it; we are guaranteed only that w's predecessor will not skip over
1042  // orig_h. When we've considered orig_h, either we've processed it and
1043  // removed it (so orig_h != head), or we considered it and skipped it (so
1044  // skipped==true && pw == head because skipping from head always skips by
1045  // just one, leaving pw pointing at head). So we want to
1046  // continue the loop with the negation of that expression.
1047  } while (orig_h == head && (pw != head || !skipped));
1048  return head;
1049 }
1050 
1051 // Try to remove thread s from the list of waiters on this mutex.
1052 // Does nothing if s is not on the waiter list.
1054  intptr_t v = mu_.load(std::memory_order_relaxed);
1055  // acquire spinlock & lock
1056  if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1057  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1058  std::memory_order_acquire,
1059  std::memory_order_relaxed)) {
1061  if (h != nullptr) {
1062  PerThreadSynch *pw = h; // pw is w's predecessor
1063  PerThreadSynch *w;
1064  if ((w = pw->next) != s) { // search for thread,
1065  do { // processing at least one element
1066  if (!MuSameCondition(s, w)) { // seeking different condition
1067  pw = Skip(w); // so skip all that won't match
1068  // we don't have to worry about dangling skip fields
1069  // in the threads we skipped; none can point to s
1070  // because their condition differs from s
1071  } else { // seeking same condition
1072  FixSkip(w, s); // fix up any skip pointer from w to s
1073  pw = w;
1074  }
1075  // don't search further if we found the thread, or we're about to
1076  // process the first thread again.
1077  } while ((w = pw->next) != s && pw != h);
1078  }
1079  if (w == s) { // found thread; remove it
1080  // pw->skip may be non-zero here; the loop above ensured that
1081  // no ancestor of s can skip to s, so removal is safe anyway.
1082  h = Dequeue(h, pw);
1083  s->next = nullptr;
1084  s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1085  }
1086  }
1087  intptr_t nv;
1088  do { // release spinlock and lock
1089  v = mu_.load(std::memory_order_relaxed);
1090  nv = v & (kMuDesig | kMuEvent);
1091  if (h != nullptr) {
1092  nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1093  h->readers = 0; // we hold writer lock
1094  h->maybe_unlocking = false; // finished unlocking
1095  }
1096  } while (!mu_.compare_exchange_weak(v, nv,
1097  std::memory_order_release,
1098  std::memory_order_relaxed));
1099  }
1100 }
1101 
1102 // Wait until thread "s", which must be the current thread, is removed from the
1103 // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1104 // if the wait extends past the absolute time specified, even if "s" is still
1105 // on the mutex queue. In this case, remove "s" from the queue and return
1106 // true, otherwise return false.
1108  while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1109  if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1110  // After a timeout, we go into a spin loop until we remove ourselves
1111  // from the queue, or someone else removes us. We can't be sure to be
1112  // able to remove ourselves in a single lock acquisition because this
1113  // mutex may be held, and the holder has the right to read the centre
1114  // of the waiter queue without holding the spinlock.
1115  this->TryRemove(s);
1116  int c = 0;
1117  while (s->next != nullptr) {
1118  c = Delay(c, GENTLE);
1119  this->TryRemove(s);
1120  }
1121  if (kDebugMode) {
1122  // This ensures that we test the case that TryRemove() is called when s
1123  // is not on the queue.
1124  this->TryRemove(s);
1125  }
1126  s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1127  s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1128  }
1129  }
1130  ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1131  "detected illegal recursion in Mutex code");
1132  s->waitp = nullptr;
1133 }
1134 
1135 // Wake thread w, and return the next thread in the list.
1137  PerThreadSynch *next = w->next;
1138  w->next = nullptr;
1139  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1140  IncrementSynchSem(this, w);
1141 
1142  return next;
1143 }
1144 
1146  EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1147  if (!deadlock_graph) { // (re)create the deadlock graph.
1148  deadlock_graph =
1149  new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1150  GraphCycles;
1151  }
1152  return deadlock_graph->GetId(mu);
1153 }
1154 
1155 static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu) {
1156  deadlock_graph_mu.Lock();
1157  GraphId id = GetGraphIdLocked(mu);
1158  deadlock_graph_mu.Unlock();
1159  return id;
1160 }
1161 
1162 // Record a lock acquisition. This is used in debug mode for deadlock
1163 // detection. The held_locks pointer points to the relevant data
1164 // structure for each case.
1165 static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1166  int n = held_locks->n;
1167  int i = 0;
1168  while (i != n && held_locks->locks[i].id != id) {
1169  i++;
1170  }
1171  if (i == n) {
1172  if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1173  held_locks->overflow = true; // lost some data
1174  } else { // we have room for lock
1175  held_locks->locks[i].mu = mu;
1176  held_locks->locks[i].count = 1;
1177  held_locks->locks[i].id = id;
1178  held_locks->n = n + 1;
1179  }
1180  } else {
1181  held_locks->locks[i].count++;
1182  }
1183 }
1184 
1185 // Record a lock release. Each call to LockEnter(mu, id, x) should be
1186 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1187 // It does not process the event if is not needed when deadlock detection is
1188 // disabled.
1189 static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1190  int n = held_locks->n;
1191  int i = 0;
1192  while (i != n && held_locks->locks[i].id != id) {
1193  i++;
1194  }
1195  if (i == n) {
1196  if (!held_locks->overflow) {
1197  // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1198  // but in that case mu should still be present.
1199  i = 0;
1200  while (i != n && held_locks->locks[i].mu != mu) {
1201  i++;
1202  }
1203  if (i == n) { // mu missing means releasing unheld lock
1204  SynchEvent *mu_events = GetSynchEvent(mu);
1205  ABSL_RAW_LOG(FATAL,
1206  "thread releasing lock it does not hold: %p %s; "
1207  ,
1208  static_cast<void *>(mu),
1209  mu_events == nullptr ? "" : mu_events->name);
1210  }
1211  }
1212  } else if (held_locks->locks[i].count == 1) {
1213  held_locks->n = n - 1;
1214  held_locks->locks[i] = held_locks->locks[n - 1];
1215  held_locks->locks[n - 1].id = InvalidGraphId();
1216  held_locks->locks[n - 1].mu =
1217  nullptr; // clear mu to please the leak detector.
1218  } else {
1219  assert(held_locks->locks[i].count > 0);
1220  held_locks->locks[i].count--;
1221  }
1222 }
1223 
1224 // Call LockEnter() if in debug mode and deadlock detection is enabled.
1225 static inline void DebugOnlyLockEnter(Mutex *mu) {
1226  if (kDebugMode) {
1227  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1230  }
1231  }
1232 }
1233 
1234 // Call LockEnter() if in debug mode and deadlock detection is enabled.
1235 static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1236  if (kDebugMode) {
1237  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1239  LockEnter(mu, id, Synch_GetAllLocks());
1240  }
1241  }
1242 }
1243 
1244 // Call LockLeave() if in debug mode and deadlock detection is enabled.
1245 static inline void DebugOnlyLockLeave(Mutex *mu) {
1246  if (kDebugMode) {
1247  if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1250  }
1251  }
1252 }
1253 
1254 static char *StackString(void **pcs, int n, char *buf, int maxlen,
1255  bool symbolize) {
1256  static const int kSymLen = 200;
1257  char sym[kSymLen];
1258  int len = 0;
1259  for (int i = 0; i != n; i++) {
1260  if (symbolize) {
1261  if (!symbolizer(pcs[i], sym, kSymLen)) {
1262  sym[0] = '\0';
1263  }
1264  snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
1265  (i == 0 ? "\n" : ""),
1266  pcs[i], sym);
1267  } else {
1268  snprintf(buf + len, maxlen - len, " %p", pcs[i]);
1269  }
1270  len += strlen(&buf[len]);
1271  }
1272  return buf;
1273 }
1274 
1275 static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1276  void *pcs[40];
1277  return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1278  maxlen, symbolize);
1279 }
1280 
1281 namespace {
1282 enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
1283  // a path this long would be remarkable
1284 // Buffers required to report a deadlock.
1285 // We do not allocate them on stack to avoid large stack frame.
1286 struct DeadlockReportBuffers {
1287  char buf[6100];
1288  GraphId path[kMaxDeadlockPathLen];
1289 };
1290 
1291 struct ScopedDeadlockReportBuffers {
1292  ScopedDeadlockReportBuffers() {
1293  b = reinterpret_cast<DeadlockReportBuffers *>(
1295  }
1296  ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1297  DeadlockReportBuffers *b;
1298 };
1299 
1300 // Helper to pass to GraphCycles::UpdateStackTrace.
1301 int GetStack(void** stack, int max_depth) {
1302  return absl::GetStackTrace(stack, max_depth, 3);
1303 }
1304 } // anonymous namespace
1305 
1306 // Called in debug mode when a thread is about to acquire a lock in a way that
1307 // may block.
1309  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1311  return InvalidGraphId();
1312  }
1313 
1314  SynchLocksHeld *all_locks = Synch_GetAllLocks();
1315 
1316  absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1317  const GraphId mu_id = GetGraphIdLocked(mu);
1318 
1319  if (all_locks->n == 0) {
1320  // There are no other locks held. Return now so that we don't need to
1321  // call GetSynchEvent(). This way we do not record the stack trace
1322  // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1323  // it can't always be the first lock acquired by a thread.
1324  return mu_id;
1325  }
1326 
1327  // We prefer to keep stack traces that show a thread holding and acquiring
1328  // as many locks as possible. This increases the chances that a given edge
1329  // in the acquires-before graph will be represented in the stack traces
1330  // recorded for the locks.
1331  deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1332 
1333  // For each other mutex already held by this thread:
1334  for (int i = 0; i != all_locks->n; i++) {
1335  const GraphId other_node_id = all_locks->locks[i].id;
1336  const Mutex *other =
1337  static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1338  if (other == nullptr) {
1339  // Ignore stale lock
1340  continue;
1341  }
1342 
1343  // Add the acquired-before edge to the graph.
1344  if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1345  ScopedDeadlockReportBuffers scoped_buffers;
1346  DeadlockReportBuffers *b = scoped_buffers.b;
1347  static int number_of_reported_deadlocks = 0;
1348  number_of_reported_deadlocks++;
1349  // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1350  bool symbolize = number_of_reported_deadlocks <= 2;
1351  ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1352  CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1353  int len = 0;
1354  for (int j = 0; j != all_locks->n; j++) {
1355  void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1356  if (pr != nullptr) {
1357  snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1358  len += static_cast<int>(strlen(&b->buf[len]));
1359  }
1360  }
1361  ABSL_RAW_LOG(ERROR, "Acquiring %p Mutexes held: %s",
1362  static_cast<void *>(mu), b->buf);
1363  ABSL_RAW_LOG(ERROR, "Cycle: ");
1364  int path_len = deadlock_graph->FindPath(
1365  mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1366  for (int j = 0; j != path_len; j++) {
1367  GraphId id = b->path[j];
1368  Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1369  if (path_mu == nullptr) continue;
1370  void** stack;
1371  int depth = deadlock_graph->GetStackTrace(id, &stack);
1372  snprintf(b->buf, sizeof(b->buf),
1373  "mutex@%p stack: ", static_cast<void *>(path_mu));
1374  StackString(stack, depth, b->buf + strlen(b->buf),
1375  static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1376  symbolize);
1377  ABSL_RAW_LOG(ERROR, "%s", b->buf);
1378  }
1379  if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1381  deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1382  ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1383  return mu_id;
1384  }
1385  break; // report at most one potential deadlock per acquisition
1386  }
1387  }
1388 
1389  return mu_id;
1390 }
1391 
1392 // Invoke DeadlockCheck() iff we're in debug mode and
1393 // deadlock checking has been enabled.
1395  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1397  return DeadlockCheck(mu);
1398  } else {
1399  return InvalidGraphId();
1400  }
1401 }
1402 
1404  if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1406  deadlock_graph_mu.Lock();
1407  if (deadlock_graph != nullptr) {
1408  deadlock_graph->RemoveNode(this);
1409  }
1410  deadlock_graph_mu.Unlock();
1411  }
1412 }
1413 
1414 void Mutex::AssertNotHeld() const {
1415  // We have the data to allow this check only if in debug mode and deadlock
1416  // detection is enabled.
1417  if (kDebugMode &&
1418  (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1419  synch_deadlock_detection.load(std::memory_order_acquire) !=
1421  GraphId id = GetGraphId(const_cast<Mutex *>(this));
1422  SynchLocksHeld *locks = Synch_GetAllLocks();
1423  for (int i = 0; i != locks->n; i++) {
1424  if (locks->locks[i].id == id) {
1425  SynchEvent *mu_events = GetSynchEvent(this);
1426  ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1427  static_cast<const void *>(this),
1428  (mu_events == nullptr ? "" : mu_events->name));
1429  }
1430  }
1431  }
1432 }
1433 
1434 // Attempt to acquire *mu, and return whether successful. The implementation
1435 // may spin for a short while if the lock cannot be acquired immediately.
1436 static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1437  int c = mutex_globals.spinloop_iterations;
1438  int result = -1; // result of operation: 0=false, 1=true, -1=unknown
1439 
1440  do { // do/while somewhat faster on AMD
1441  intptr_t v = mu->load(std::memory_order_relaxed);
1442  if ((v & (kMuReader|kMuEvent)) != 0) { // a reader or tracing -> give up
1443  result = 0;
1444  } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1445  mu->compare_exchange_strong(v, kMuWriter | v,
1446  std::memory_order_acquire,
1447  std::memory_order_relaxed)) {
1448  result = 1;
1449  }
1450  } while (result == -1 && --c > 0);
1451  return result == 1;
1452 }
1453 
1454 ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
1455  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1456  GraphId id = DebugOnlyDeadlockCheck(this);
1457  intptr_t v = mu_.load(std::memory_order_relaxed);
1458  // try fast acquire, then spin loop
1459  if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1460  !mu_.compare_exchange_strong(v, kMuWriter | v,
1461  std::memory_order_acquire,
1462  std::memory_order_relaxed)) {
1463  // try spin acquire, then slow loop
1464  if (!TryAcquireWithSpinning(&this->mu_)) {
1465  this->LockSlow(kExclusive, nullptr, 0);
1466  }
1467  }
1468  DebugOnlyLockEnter(this, id);
1469  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1470 }
1471 
1473  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1474  GraphId id = DebugOnlyDeadlockCheck(this);
1475  intptr_t v = mu_.load(std::memory_order_relaxed);
1476  // try fast acquire, then slow loop
1477  if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1478  !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1479  std::memory_order_acquire,
1480  std::memory_order_relaxed)) {
1481  this->LockSlow(kShared, nullptr, 0);
1482  }
1483  DebugOnlyLockEnter(this, id);
1484  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1485 }
1486 
1487 void Mutex::LockWhen(const Condition &cond) {
1488  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1489  GraphId id = DebugOnlyDeadlockCheck(this);
1490  this->LockSlow(kExclusive, &cond, 0);
1491  DebugOnlyLockEnter(this, id);
1492  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1493 }
1494 
1495 bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1496  return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1497 }
1498 
1499 bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1500  ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1501  GraphId id = DebugOnlyDeadlockCheck(this);
1502  bool res = LockSlowWithDeadline(kExclusive, &cond,
1503  KernelTimeout(deadline), 0);
1504  DebugOnlyLockEnter(this, id);
1505  ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1506  return res;
1507 }
1508 
1509 void Mutex::ReaderLockWhen(const Condition &cond) {
1510  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1511  GraphId id = DebugOnlyDeadlockCheck(this);
1512  this->LockSlow(kShared, &cond, 0);
1513  DebugOnlyLockEnter(this, id);
1514  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1515 }
1516 
1518  absl::Duration timeout) {
1519  return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1520 }
1521 
1523  absl::Time deadline) {
1524  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1525  GraphId id = DebugOnlyDeadlockCheck(this);
1526  bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1527  DebugOnlyLockEnter(this, id);
1528  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1529  return res;
1530 }
1531 
1532 void Mutex::Await(const Condition &cond) {
1533  if (cond.Eval()) { // condition already true; nothing to do
1534  if (kDebugMode) {
1535  this->AssertReaderHeld();
1536  }
1537  } else { // normal case
1538  ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1539  "condition untrue on return from Await");
1540  }
1541 }
1542 
1543 bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1544  return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1545 }
1546 
1547 bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1548  if (cond.Eval()) { // condition already true; nothing to do
1549  if (kDebugMode) {
1550  this->AssertReaderHeld();
1551  }
1552  return true;
1553  }
1554 
1555  KernelTimeout t{deadline};
1556  bool res = this->AwaitCommon(cond, t);
1557  ABSL_RAW_CHECK(res || t.has_timeout(),
1558  "condition untrue on return from Await");
1559  return res;
1560 }
1561 
1563  this->AssertReaderHeld();
1564  MuHow how =
1565  (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1566  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1567  SynchWaitParams waitp(
1568  how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1569  nullptr /*no cv_word*/);
1570  int flags = kMuHasBlocked;
1571  if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1572  flags |= kMuIsCond;
1573  }
1574  this->UnlockSlow(&waitp);
1575  this->Block(waitp.thread);
1576  ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1577  ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1578  this->LockSlowLoop(&waitp, flags);
1579  bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
1580  EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1581  ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1582  return res;
1583 }
1584 
1586  ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1587  intptr_t v = mu_.load(std::memory_order_relaxed);
1588  if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1589  mu_.compare_exchange_strong(v, kMuWriter | v,
1590  std::memory_order_acquire,
1591  std::memory_order_relaxed)) {
1592  DebugOnlyLockEnter(this);
1593  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1594  return true;
1595  }
1596  if ((v & kMuEvent) != 0) { // we're recording events
1597  if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1598  mu_.compare_exchange_strong(
1599  v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1600  std::memory_order_acquire, std::memory_order_relaxed)) {
1601  DebugOnlyLockEnter(this);
1602  PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1603  ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1604  return true;
1605  } else {
1606  PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1607  }
1608  }
1610  this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1611  return false;
1612 }
1613 
1616  __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1617  intptr_t v = mu_.load(std::memory_order_relaxed);
1618  // The while-loops (here and below) iterate only if the mutex word keeps
1619  // changing (typically because the reader count changes) under the CAS. We
1620  // limit the number of attempts to avoid having to think about livelock.
1621  int loop_limit = 5;
1622  while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1623  if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1624  std::memory_order_acquire,
1625  std::memory_order_relaxed)) {
1626  DebugOnlyLockEnter(this);
1628  this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1629  return true;
1630  }
1631  loop_limit--;
1632  v = mu_.load(std::memory_order_relaxed);
1633  }
1634  if ((v & kMuEvent) != 0) { // we're recording events
1635  loop_limit = 5;
1636  while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1637  if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1638  std::memory_order_acquire,
1639  std::memory_order_relaxed)) {
1640  DebugOnlyLockEnter(this);
1641  PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1643  this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1644  return true;
1645  }
1646  loop_limit--;
1647  v = mu_.load(std::memory_order_relaxed);
1648  }
1649  if ((v & kMuEvent) != 0) {
1650  PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1651  }
1652  }
1654  __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1655  __tsan_mutex_try_lock_failed,
1656  0);
1657  return false;
1658 }
1659 
1661  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1662  DebugOnlyLockLeave(this);
1663  intptr_t v = mu_.load(std::memory_order_relaxed);
1664 
1665  if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1666  ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1667  static_cast<unsigned>(v));
1668  }
1669 
1670  // should_try_cas is whether we'll try a compare-and-swap immediately.
1671  // NOTE: optimized out when kDebugMode is false.
1672  bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1673  (v & (kMuWait | kMuDesig)) != kMuWait);
1674  // But, we can use an alternate computation of it, that compilers
1675  // currently don't find on their own. When that changes, this function
1676  // can be simplified.
1677  intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1678  intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1679  // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1680  // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1681  // all possible non-zero values for x exceed all possible values for y.
1682  // Therefore, (x == 0 && y > 0) == (x < y).
1683  if (kDebugMode && should_try_cas != (x < y)) {
1684  // We would usually use PRIdPTR here, but is not correctly implemented
1685  // within the android toolchain.
1686  ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1687  static_cast<long long>(v), static_cast<long long>(x),
1688  static_cast<long long>(y));
1689  }
1690  if (x < y &&
1691  mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1692  std::memory_order_release,
1693  std::memory_order_relaxed)) {
1694  // fast writer release (writer with no waiters or with designated waker)
1695  } else {
1696  this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1697  }
1698  ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1699 }
1700 
1701 // Requires v to represent a reader-locked state.
1702 static bool ExactlyOneReader(intptr_t v) {
1703  assert((v & (kMuWriter|kMuReader)) == kMuReader);
1704  assert((v & kMuHigh) != 0);
1705  // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1706  // on some architectures the following generates slightly smaller code.
1707  // It may be faster too.
1708  constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1709  return (v & kMuMultipleWaitersMask) == 0;
1710 }
1711 
1713  ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1714  DebugOnlyLockLeave(this);
1715  intptr_t v = mu_.load(std::memory_order_relaxed);
1716  assert((v & (kMuWriter|kMuReader)) == kMuReader);
1717  if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1718  // fast reader release (reader with no waiters)
1719  intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1720  if (mu_.compare_exchange_strong(v, v - clear,
1721  std::memory_order_release,
1722  std::memory_order_relaxed)) {
1723  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1724  return;
1725  }
1726  }
1727  this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1728  ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1729 }
1730 
1731 // The zap_desig_waker bitmask is used to clear the designated waker flag in
1732 // the mutex if this thread has blocked, and therefore may be the designated
1733 // waker.
1734 static const intptr_t zap_desig_waker[] = {
1735  ~static_cast<intptr_t>(0), // not blocked
1736  ~static_cast<intptr_t>(
1737  kMuDesig) // blocked; turn off the designated waker bit
1738 };
1739 
1740 // The ignore_waiting_writers bitmask is used to ignore the existence
1741 // of waiting writers if a reader that has already blocked once
1742 // wakes up.
1743 static const intptr_t ignore_waiting_writers[] = {
1744  ~static_cast<intptr_t>(0), // not blocked
1745  ~static_cast<intptr_t>(
1746  kMuWrWait) // blocked; pretend there are no waiting writers
1747 };
1748 
1749 // Internal version of LockWhen(). See LockSlowWithDeadline()
1750 void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) {
1752  this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1753  "condition untrue on return from LockSlow");
1754 }
1755 
1756 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
1757 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1758  bool locking, bool trylock,
1759  bool read_lock) {
1760  // Delicate annotation dance.
1761  // We are currently inside of read/write lock/unlock operation.
1762  // All memory accesses are ignored inside of mutex operations + for unlock
1763  // operation tsan considers that we've already released the mutex.
1764  bool res = false;
1765 #ifdef THREAD_SANITIZER
1766  const int flags = read_lock ? __tsan_mutex_read_lock : 0;
1767  const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1768 #endif
1769  if (locking) {
1770  // For lock we pretend that we have finished the operation,
1771  // evaluate the predicate, then unlock the mutex and start locking it again
1772  // to match the annotation at the end of outer lock operation.
1773  // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1774  // will think the lock acquisition is recursive which will trigger
1775  // deadlock detector.
1776  ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1777  res = cond->Eval();
1778  // There is no "try" version of Unlock, so use flags instead of tryflags.
1779  ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1780  ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1781  ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1782  } else {
1783  // Similarly, for unlock we pretend that we have unlocked the mutex,
1784  // lock the mutex, evaluate the predicate, and start unlocking it again
1785  // to match the annotation at the end of outer unlock operation.
1786  ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1787  ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1788  ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1789  res = cond->Eval();
1790  ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1791  }
1792  // Prevent unused param warnings in non-TSAN builds.
1793  static_cast<void>(mu);
1794  static_cast<void>(trylock);
1795  static_cast<void>(read_lock);
1796  return res;
1797 }
1798 
1799 // Compute cond->Eval() hiding it from race detectors.
1800 // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1801 // that was just added by a concurrent Lock operation; Lock adds the predicate
1802 // to the internal Mutex list without actually acquiring the Mutex
1803 // (it only acquires the internal spinlock, which is rightfully invisible for
1804 // tsan). As the result there is no tsan-visible synchronization between the
1805 // addition and this thread. So if we would enable race detection here,
1806 // it would race with the predicate initialization.
1807 static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1808  // Memory accesses are already ignored inside of lock/unlock operations,
1809  // but synchronization operations are also ignored. When we evaluate the
1810  // predicate we must ignore only memory accesses but not synchronization,
1811  // because missed synchronization can lead to false reports later.
1812  // So we "divert" (which un-ignores both memory accesses and synchronization)
1813  // and then separately turn on ignores of memory accesses.
1816  bool res = cond->Eval();
1819  static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
1820  return res;
1821 }
1822 
1823 // Internal equivalent of *LockWhenWithDeadline(), where
1824 // "t" represents the absolute timeout; !t.has_timeout() means "forever".
1825 // "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1826 // In flags, bits are ored together:
1827 // - kMuHasBlocked indicates that the client has already blocked on the call so
1828 // the designated waker bit must be cleared and waiting writers should not
1829 // obstruct this call
1830 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1831 // Await, LockWhen) so contention profiling should be suppressed.
1833  KernelTimeout t, int flags) {
1834  intptr_t v = mu_.load(std::memory_order_relaxed);
1835  bool unlock = false;
1836  if ((v & how->fast_need_zero) == 0 && // try fast acquire
1837  mu_.compare_exchange_strong(
1838  v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
1839  how->fast_add,
1840  std::memory_order_acquire, std::memory_order_relaxed)) {
1841  if (cond == nullptr ||
1842  EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1843  return true;
1844  }
1845  unlock = true;
1846  }
1847  SynchWaitParams waitp(
1848  how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1849  nullptr /*no cv_word*/);
1850  if (!Condition::GuaranteedEqual(cond, nullptr)) {
1851  flags |= kMuIsCond;
1852  }
1853  if (unlock) {
1854  this->UnlockSlow(&waitp);
1855  this->Block(waitp.thread);
1856  flags |= kMuHasBlocked;
1857  }
1858  this->LockSlowLoop(&waitp, flags);
1859  return waitp.cond != nullptr || // => cond known true from LockSlowLoop
1860  cond == nullptr ||
1861  EvalConditionAnnotated(cond, this, true, false, how == kShared);
1862 }
1863 
1864 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1865 // the printf-style argument list. The format string must be a literal.
1866 // Arguments after the first are not evaluated unless the condition is true.
1867 #define RAW_CHECK_FMT(cond, ...) \
1868  do { \
1869  if (ABSL_PREDICT_FALSE(!(cond))) { \
1870  ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1871  } \
1872  } while (0)
1873 
1874 static void CheckForMutexCorruption(intptr_t v, const char* label) {
1875  // Test for either of two situations that should not occur in v:
1876  // kMuWriter and kMuReader
1877  // kMuWrWait and !kMuWait
1878  const uintptr_t w = v ^ kMuWait;
1879  // By flipping that bit, we can now test for:
1880  // kMuWriter and kMuReader in w
1881  // kMuWrWait and kMuWait in w
1882  // We've chosen these two pairs of values to be so that they will overlap,
1883  // respectively, when the word is left shifted by three. This allows us to
1884  // save a branch in the common (correct) case of them not being coincident.
1885  static_assert(kMuReader << 3 == kMuWriter, "must match");
1886  static_assert(kMuWait << 3 == kMuWrWait, "must match");
1887  if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1888  RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1889  "%s: Mutex corrupt: both reader and writer lock held: %p",
1890  label, reinterpret_cast<void *>(v));
1891  RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1892  "%s: Mutex corrupt: waiting writer with no waiters: %p",
1893  label, reinterpret_cast<void *>(v));
1894  assert(false);
1895 }
1896 
1898  int c = 0;
1899  intptr_t v = mu_.load(std::memory_order_relaxed);
1900  if ((v & kMuEvent) != 0) {
1901  PostSynchEvent(this,
1902  waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1903  }
1905  waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1906  "detected illegal recursion into Mutex code");
1907  for (;;) {
1908  v = mu_.load(std::memory_order_relaxed);
1909  CheckForMutexCorruption(v, "Lock");
1910  if ((v & waitp->how->slow_need_zero) == 0) {
1911  if (mu_.compare_exchange_strong(
1912  v, (waitp->how->fast_or |
1913  (v & zap_desig_waker[flags & kMuHasBlocked])) +
1914  waitp->how->fast_add,
1915  std::memory_order_acquire, std::memory_order_relaxed)) {
1916  if (waitp->cond == nullptr ||
1917  EvalConditionAnnotated(waitp->cond, this, true, false,
1918  waitp->how == kShared)) {
1919  break; // we timed out, or condition true, so return
1920  }
1921  this->UnlockSlow(waitp); // got lock but condition false
1922  this->Block(waitp->thread);
1923  flags |= kMuHasBlocked;
1924  c = 0;
1925  }
1926  } else { // need to access waiter list
1927  bool dowait = false;
1928  if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
1929  // This thread tries to become the one and only waiter.
1930  PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1931  intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
1932  kMuWait;
1933  ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
1934  if (waitp->how == kExclusive && (v & kMuReader) != 0) {
1935  nv |= kMuWrWait;
1936  }
1937  if (mu_.compare_exchange_strong(
1938  v, reinterpret_cast<intptr_t>(new_h) | nv,
1939  std::memory_order_release, std::memory_order_relaxed)) {
1940  dowait = true;
1941  } else { // attempted Enqueue() failed
1942  // zero out the waitp field set by Enqueue()
1943  waitp->thread->waitp = nullptr;
1944  }
1945  } else if ((v & waitp->how->slow_inc_need_zero &
1946  ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
1947  // This is a reader that needs to increment the reader count,
1948  // but the count is currently held in the last waiter.
1949  if (mu_.compare_exchange_strong(
1950  v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1951  kMuReader,
1952  std::memory_order_acquire, std::memory_order_relaxed)) {
1954  h->readers += kMuOne; // inc reader count in waiter
1955  do { // release spinlock
1956  v = mu_.load(std::memory_order_relaxed);
1957  } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
1958  std::memory_order_release,
1959  std::memory_order_relaxed));
1960  if (waitp->cond == nullptr ||
1961  EvalConditionAnnotated(waitp->cond, this, true, false,
1962  waitp->how == kShared)) {
1963  break; // we timed out, or condition true, so return
1964  }
1965  this->UnlockSlow(waitp); // got lock but condition false
1966  this->Block(waitp->thread);
1967  flags |= kMuHasBlocked;
1968  c = 0;
1969  }
1970  } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
1971  mu_.compare_exchange_strong(
1972  v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
1973  kMuWait,
1974  std::memory_order_acquire, std::memory_order_relaxed)) {
1976  PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
1977  intptr_t wr_wait = 0;
1978  ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
1979  if (waitp->how == kExclusive && (v & kMuReader) != 0) {
1980  wr_wait = kMuWrWait; // give priority to a waiting writer
1981  }
1982  do { // release spinlock
1983  v = mu_.load(std::memory_order_relaxed);
1984  } while (!mu_.compare_exchange_weak(
1985  v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
1986  reinterpret_cast<intptr_t>(new_h),
1987  std::memory_order_release, std::memory_order_relaxed));
1988  dowait = true;
1989  }
1990  if (dowait) {
1991  this->Block(waitp->thread); // wait until removed from list or timeout
1992  flags |= kMuHasBlocked;
1993  c = 0;
1994  }
1995  }
1997  waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1998  "detected illegal recursion into Mutex code");
1999  c = Delay(c, GENTLE); // delay, then try again
2000  }
2002  waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2003  "detected illegal recursion into Mutex code");
2004  if ((v & kMuEvent) != 0) {
2005  PostSynchEvent(this,
2006  waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2007  SYNCH_EV_READERLOCK_RETURNING);
2008  }
2009 }
2010 
2011 // Unlock this mutex, which is held by the current thread.
2012 // If waitp is non-zero, it must be the wait parameters for the current thread
2013 // which holds the lock but is not runnable because its condition is false
2014 // or it is in the process of blocking on a condition variable; it must requeue
2015 // itself on the mutex/condvar to wait for its condition to become true.
2017  intptr_t v = mu_.load(std::memory_order_relaxed);
2018  this->AssertReaderHeld();
2019  CheckForMutexCorruption(v, "Unlock");
2020  if ((v & kMuEvent) != 0) {
2021  PostSynchEvent(this,
2022  (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2023  }
2024  int c = 0;
2025  // the waiter under consideration to wake, or zero
2026  PerThreadSynch *w = nullptr;
2027  // the predecessor to w or zero
2028  PerThreadSynch *pw = nullptr;
2029  // head of the list searched previously, or zero
2030  PerThreadSynch *old_h = nullptr;
2031  // a condition that's known to be false.
2032  const Condition *known_false = nullptr;
2033  PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
2034  intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2035  // later writer could have acquired the lock
2036  // (starvation avoidance)
2037  ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2038  waitp->thread->suppress_fatal_errors,
2039  "detected illegal recursion into Mutex code");
2040  // This loop finds threads wake_list to wakeup if any, and removes them from
2041  // the list of waiters. In addition, it places waitp.thread on the queue of
2042  // waiters if waitp is non-zero.
2043  for (;;) {
2044  v = mu_.load(std::memory_order_relaxed);
2045  if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2046  waitp == nullptr) {
2047  // fast writer release (writer with no waiters or with designated waker)
2048  if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2049  std::memory_order_release,
2050  std::memory_order_relaxed)) {
2051  return;
2052  }
2053  } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2054  // fast reader release (reader with no waiters)
2055  intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2056  if (mu_.compare_exchange_strong(v, v - clear,
2057  std::memory_order_release,
2058  std::memory_order_relaxed)) {
2059  return;
2060  }
2061  } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
2062  mu_.compare_exchange_strong(v, v | kMuSpin,
2063  std::memory_order_acquire,
2064  std::memory_order_relaxed)) {
2065  if ((v & kMuWait) == 0) { // no one to wake
2066  intptr_t nv;
2067  bool do_enqueue = true; // always Enqueue() the first time
2068  ABSL_RAW_CHECK(waitp != nullptr,
2069  "UnlockSlow is confused"); // about to sleep
2070  do { // must loop to release spinlock as reader count may change
2071  v = mu_.load(std::memory_order_relaxed);
2072  // decrement reader count if there are readers
2073  intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
2074  PerThreadSynch *new_h = nullptr;
2075  if (do_enqueue) {
2076  // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2077  // we must not retry here. The initial attempt will always have
2078  // succeeded, further attempts would enqueue us against *this due to
2079  // Fer() handling.
2080  do_enqueue = (waitp->cv_word == nullptr);
2081  new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2082  }
2083  intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
2084  if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
2085  clear = kMuWrWait | kMuReader; // clear read bit
2086  }
2087  nv = (v & kMuLow & ~clear & ~kMuSpin);
2088  if (new_h != nullptr) {
2089  nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2090  } else { // new_h could be nullptr if we queued ourselves on a
2091  // CondVar
2092  // In that case, we must place the reader count back in the mutex
2093  // word, as Enqueue() did not store it in the new waiter.
2094  nv |= new_readers & kMuHigh;
2095  }
2096  // release spinlock & our lock; retry if reader-count changed
2097  // (writer count cannot change since we hold lock)
2098  } while (!mu_.compare_exchange_weak(v, nv,
2099  std::memory_order_release,
2100  std::memory_order_relaxed));
2101  break;
2102  }
2103 
2104  // There are waiters.
2105  // Set h to the head of the circular waiter list.
2107  if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2108  // a reader but not the last
2109  h->readers -= kMuOne; // release our lock
2110  intptr_t nv = v; // normally just release spinlock
2111  if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2112  PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2113  ABSL_RAW_CHECK(new_h != nullptr,
2114  "waiters disappeared during Enqueue()!");
2115  nv &= kMuLow;
2116  nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2117  }
2118  mu_.store(nv, std::memory_order_release); // release spinlock
2119  // can release with a store because there were waiters
2120  break;
2121  }
2122 
2123  // Either we didn't search before, or we marked the queue
2124  // as "maybe_unlocking" and no one else should have changed it.
2125  ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2126  "Mutex queue changed beneath us");
2127 
2128  // The lock is becoming free, and there's a waiter
2129  if (old_h != nullptr &&
2130  !old_h->may_skip) { // we used old_h as a terminator
2131  old_h->may_skip = true; // allow old_h to skip once more
2132  ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2133  if (h != old_h && MuSameCondition(old_h, old_h->next)) {
2134  old_h->skip = old_h->next; // old_h not head & can skip to successor
2135  }
2136  }
2137  if (h->next->waitp->how == kExclusive &&
2138  Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2139  // easy case: writer with no condition; no need to search
2140  pw = h; // wake w, the successor of h (=pw)
2141  w = h->next;
2142  w->wake = true;
2143  // We are waking up a writer. This writer may be racing against
2144  // an already awake reader for the lock. We want the
2145  // writer to usually win this race,
2146  // because if it doesn't, we can potentially keep taking a reader
2147  // perpetually and writers will starve. Worse than
2148  // that, this can also starve other readers if kMuWrWait gets set
2149  // later.
2150  wr_wait = kMuWrWait;
2151  } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2152  // we found a waiter w to wake on a previous iteration and either it's
2153  // a writer, or we've searched the entire list so we have all the
2154  // readers.
2155  if (pw == nullptr) { // if w's predecessor is unknown, it must be h
2156  pw = h;
2157  }
2158  } else {
2159  // At this point we don't know all the waiters to wake, and the first
2160  // waiter has a condition or is a reader. We avoid searching over
2161  // waiters we've searched on previous iterations by starting at
2162  // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2163  if (old_h == h) { // we've searched before, and nothing's new
2164  // so there's no one to wake.
2165  intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2166  h->readers = 0;
2167  h->maybe_unlocking = false; // finished unlocking
2168  if (waitp != nullptr) { // we must queue ourselves and sleep
2169  PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2170  nv &= kMuLow;
2171  if (new_h != nullptr) {
2172  nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2173  } // else new_h could be nullptr if we queued ourselves on a
2174  // CondVar
2175  }
2176  // release spinlock & lock
2177  // can release with a store because there were waiters
2178  mu_.store(nv, std::memory_order_release);
2179  break;
2180  }
2181 
2182  // set up to walk the list
2183  PerThreadSynch *w_walk; // current waiter during list walk
2184  PerThreadSynch *pw_walk; // previous waiter during list walk
2185  if (old_h != nullptr) { // we've searched up to old_h before
2186  pw_walk = old_h;
2187  w_walk = old_h->next;
2188  } else { // no prior search, start at beginning
2189  pw_walk =
2190  nullptr; // h->next's predecessor may change; don't record it
2191  w_walk = h->next;
2192  }
2193 
2194  h->may_skip = false; // ensure we never skip past h in future searches
2195  // even if other waiters are queued after it.
2196  ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2197 
2198  h->maybe_unlocking = true; // we're about to scan the waiter list
2199  // without the spinlock held.
2200  // Enqueue must be conservative about
2201  // priority queuing.
2202 
2203  // We must release the spinlock to evaluate the conditions.
2204  mu_.store(v, std::memory_order_release); // release just spinlock
2205  // can release with a store because there were waiters
2206 
2207  // h is the last waiter queued, and w_walk the first unsearched waiter.
2208  // Without the spinlock, the locations mu_ and h->next may now change
2209  // underneath us, but since we hold the lock itself, the only legal
2210  // change is to add waiters between h and w_walk. Therefore, it's safe
2211  // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2212  // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2213 
2214  old_h = h; // remember we searched to here
2215 
2216  // Walk the path upto and including h looking for waiters we can wake.
2217  while (pw_walk != h) {
2218  w_walk->wake = false;
2219  if (w_walk->waitp->cond ==
2220  nullptr || // no condition => vacuously true OR
2221  (w_walk->waitp->cond != known_false &&
2222  // this thread's condition is not known false, AND
2223  // is in fact true
2224  EvalConditionIgnored(this, w_walk->waitp->cond))) {
2225  if (w == nullptr) {
2226  w_walk->wake = true; // can wake this waiter
2227  w = w_walk;
2228  pw = pw_walk;
2229  if (w_walk->waitp->how == kExclusive) {
2230  wr_wait = kMuWrWait;
2231  break; // bail if waking this writer
2232  }
2233  } else if (w_walk->waitp->how == kShared) { // wake if a reader
2234  w_walk->wake = true;
2235  } else { // writer with true condition
2236  wr_wait = kMuWrWait;
2237  }
2238  } else { // can't wake; condition false
2239  known_false = w_walk->waitp->cond; // remember last false condition
2240  }
2241  if (w_walk->wake) { // we're waking reader w_walk
2242  pw_walk = w_walk; // don't skip similar waiters
2243  } else { // not waking; skip as much as possible
2244  pw_walk = Skip(w_walk);
2245  }
2246  // If pw_walk == h, then load of pw_walk->next can race with
2247  // concurrent write in Enqueue(). However, at the same time
2248  // we do not need to do the load, because we will bail out
2249  // from the loop anyway.
2250  if (pw_walk != h) {
2251  w_walk = pw_walk->next;
2252  }
2253  }
2254 
2255  continue; // restart for(;;)-loop to wakeup w or to find more waiters
2256  }
2257  ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2258  // The first (and perhaps only) waiter we've chosen to wake is w, whose
2259  // predecessor is pw. If w is a reader, we must wake all the other
2260  // waiters with wake==true as well. We may also need to queue
2261  // ourselves if waitp != null. The spinlock and the lock are still
2262  // held.
2263 
2264  // This traverses the list in [ pw->next, h ], where h is the head,
2265  // removing all elements with wake==true and placing them in the
2266  // singly-linked list wake_list. Returns the new head.
2267  h = DequeueAllWakeable(h, pw, &wake_list);
2268 
2269  intptr_t nv = (v & kMuEvent) | kMuDesig;
2270  // assume no waiters left,
2271  // set kMuDesig for INV1a
2272 
2273  if (waitp != nullptr) { // we must queue ourselves and sleep
2274  h = Enqueue(h, waitp, v, kMuIsCond);
2275  // h is new last waiter; could be null if we queued ourselves on a
2276  // CondVar
2277  }
2278 
2279  ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2280  "unexpected empty wake list");
2281 
2282  if (h != nullptr) { // there are waiters left
2283  h->readers = 0;
2284  h->maybe_unlocking = false; // finished unlocking
2285  nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2286  }
2287 
2288  // release both spinlock & lock
2289  // can release with a store because there were waiters
2290  mu_.store(nv, std::memory_order_release);
2291  break; // out of for(;;)-loop
2292  }
2293  c = Delay(c, AGGRESSIVE); // aggressive here; no one can proceed till we do
2294  } // end of for(;;)-loop
2295 
2296  if (wake_list != kPerThreadSynchNull) {
2297  int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
2298  bool cond_waiter = wake_list->cond_waiter;
2299  do {
2300  wake_list = Wakeup(wake_list); // wake waiters
2301  } while (wake_list != kPerThreadSynchNull);
2302  if (!cond_waiter) {
2303  // Sample lock contention events only if the (first) waiter was trying to
2304  // acquire the lock, not waiting on a condition variable or Condition.
2305  int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp;
2306  mutex_tracer("slow release", this, wait_cycles);
2307  ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2308  submit_profile_data(enqueue_timestamp);
2309  ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2310  }
2311  }
2312 }
2313 
2314 // Used by CondVar implementation to reacquire mutex after waking from
2315 // condition variable. This routine is used instead of Lock() because the
2316 // waiting thread may have been moved from the condition variable queue to the
2317 // mutex queue without a wakeup, by Trans(). In that case, when the thread is
2318 // finally woken, the woken thread will believe it has been woken from the
2319 // condition variable (i.e. its PC will be in when in the CondVar code), when
2320 // in fact it has just been woken from the mutex. Thus, it must enter the slow
2321 // path of the mutex in the same state as if it had just woken from the mutex.
2322 // That is, it must ensure to clear kMuDesig (INV1b).
2323 void Mutex::Trans(MuHow how) {
2324  this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2325 }
2326 
2327 // Used by CondVar implementation to effectively wake thread w from the
2328 // condition variable. If this mutex is free, we simply wake the thread.
2329 // It will later acquire the mutex with high probability. Otherwise, we
2330 // enqueue thread w on this mutex.
2332  int c = 0;
2333  ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2334  "Mutex::Fer while waiting on Condition");
2336  "Mutex::Fer while in timed wait");
2337  ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2338  "Mutex::Fer with pending CondVar queueing");
2339  for (;;) {
2340  intptr_t v = mu_.load(std::memory_order_relaxed);
2341  // Note: must not queue if the mutex is unlocked (nobody will wake it).
2342  // For example, we can have only kMuWait (conditional) or maybe
2343  // kMuWait|kMuWrWait.
2344  // conflicting != 0 implies that the waking thread cannot currently take
2345  // the mutex, which in turn implies that someone else has it and can wake
2346  // us if we queue.
2347  const intptr_t conflicting =
2348  kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2349  if ((v & conflicting) == 0) {
2350  w->next = nullptr;
2351  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2352  IncrementSynchSem(this, w);
2353  return;
2354  } else {
2355  if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
2356  // This thread tries to become the one and only waiter.
2357  PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2358  ABSL_RAW_CHECK(new_h != nullptr,
2359  "Enqueue failed"); // we must queue ourselves
2360  if (mu_.compare_exchange_strong(
2361  v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2362  std::memory_order_release, std::memory_order_relaxed)) {
2363  return;
2364  }
2365  } else if ((v & kMuSpin) == 0 &&
2366  mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2368  PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2369  ABSL_RAW_CHECK(new_h != nullptr,
2370  "Enqueue failed"); // we must queue ourselves
2371  do {
2372  v = mu_.load(std::memory_order_relaxed);
2373  } while (!mu_.compare_exchange_weak(
2374  v,
2375  (v & kMuLow & ~kMuSpin) | kMuWait |
2376  reinterpret_cast<intptr_t>(new_h),
2377  std::memory_order_release, std::memory_order_relaxed));
2378  return;
2379  }
2380  }
2381  c = Delay(c, GENTLE);
2382  }
2383 }
2384 
2385 void Mutex::AssertHeld() const {
2386  if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2387  SynchEvent *e = GetSynchEvent(this);
2388  ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2389  static_cast<const void *>(this),
2390  (e == nullptr ? "" : e->name));
2391  }
2392 }
2393 
2394 void Mutex::AssertReaderHeld() const {
2395  if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2396  SynchEvent *e = GetSynchEvent(this);
2397  ABSL_RAW_LOG(
2398  FATAL, "thread should hold at least a read lock on Mutex %p %s",
2399  static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2400  }
2401 }
2402 
2403 // -------------------------------- condition variables
2404 static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
2405 static const intptr_t kCvEvent = 0x0002L; // record events
2406 
2407 static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2408 
2409 // Hack to make constant values available to gdb pretty printer
2411 
2412 static_assert(PerThreadSynch::kAlignment > kCvLow,
2413  "PerThreadSynch::kAlignment must be greater than kCvLow");
2414 
2415 void CondVar::EnableDebugLog(const char *name) {
2416  SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2417  e->log = true;
2418  UnrefSynchEvent(e);
2419 }
2420 
2422  if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2423  ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2424  }
2425 }
2426 
2427 
2428 // Remove thread s from the list of waiters on this condition variable.
2430  intptr_t v;
2431  int c = 0;
2432  for (v = cv_.load(std::memory_order_relaxed);;
2433  v = cv_.load(std::memory_order_relaxed)) {
2434  if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2435  cv_.compare_exchange_strong(v, v | kCvSpin,
2436  std::memory_order_acquire,
2437  std::memory_order_relaxed)) {
2438  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2439  if (h != nullptr) {
2440  PerThreadSynch *w = h;
2441  while (w->next != s && w->next != h) { // search for thread
2442  w = w->next;
2443  }
2444  if (w->next == s) { // found thread; remove it
2445  w->next = s->next;
2446  if (h == s) {
2447  h = (w == s) ? nullptr : w;
2448  }
2449  s->next = nullptr;
2450  s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2451  }
2452  }
2453  // release spinlock
2454  cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2455  std::memory_order_release);
2456  return;
2457  } else {
2458  c = Delay(c, GENTLE); // try again after a delay
2459  }
2460  }
2461 }
2462 
2463 // Queue thread waitp->thread on condition variable word cv_word using
2464 // wait parameters waitp.
2465 // We split this into a separate routine, rather than simply doing it as part
2466 // of WaitCommon(). If we were to queue ourselves on the condition variable
2467 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2468 // the logging code, or via a Condition function) and might potentially attempt
2469 // to block this thread. That would be a problem if the thread were already on
2470 // a the condition variable waiter queue. Thus, we use the waitp->cv_word
2471 // to tell the unlock code to call CondVarEnqueue() to queue the thread on the
2472 // condition variable queue just before the mutex is to be unlocked, and (most
2473 // importantly) after any call to an external routine that might re-enter the
2474 // mutex code.
2475 static void CondVarEnqueue(SynchWaitParams *waitp) {
2476  // This thread might be transferred to the Mutex queue by Fer() when
2477  // we are woken. To make sure that is what happens, Enqueue() doesn't
2478  // call CondVarEnqueue() again but instead uses its normal code. We
2479  // must do this before we queue ourselves so that cv_word will be null
2480  // when seen by the dequeuer, who may wish immediately to requeue
2481  // this thread on another queue.
2482  std::atomic<intptr_t> *cv_word = waitp->cv_word;
2483  waitp->cv_word = nullptr;
2484 
2485  intptr_t v = cv_word->load(std::memory_order_relaxed);
2486  int c = 0;
2487  while ((v & kCvSpin) != 0 || // acquire spinlock
2488  !cv_word->compare_exchange_weak(v, v | kCvSpin,
2489  std::memory_order_acquire,
2490  std::memory_order_relaxed)) {
2491  c = Delay(c, GENTLE);
2492  v = cv_word->load(std::memory_order_relaxed);
2493  }
2494  ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2495  waitp->thread->waitp = waitp; // prepare ourselves for waiting
2496  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2497  if (h == nullptr) { // add this thread to waiter list
2498  waitp->thread->next = waitp->thread;
2499  } else {
2500  waitp->thread->next = h->next;
2501  h->next = waitp->thread;
2502  }
2503  waitp->thread->state.store(PerThreadSynch::kQueued,
2504  std::memory_order_relaxed);
2505  cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2506  std::memory_order_release);
2507 }
2508 
2510  bool rc = false; // return value; true iff we timed-out
2511 
2512  intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2513  Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2514  ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2515 
2516  // maybe trace this call
2517  intptr_t v = cv_.load(std::memory_order_relaxed);
2518  cond_var_tracer("Wait", this);
2519  if ((v & kCvEvent) != 0) {
2520  PostSynchEvent(this, SYNCH_EV_WAIT);
2521  }
2522 
2523  // Release mu and wait on condition variable.
2524  SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2525  Synch_GetPerThreadAnnotated(mutex), &cv_);
2526  // UnlockSlow() will call CondVarEnqueue() just before releasing the
2527  // Mutex, thus queuing this thread on the condition variable. See
2528  // CondVarEnqueue() for the reasons.
2529  mutex->UnlockSlow(&waitp);
2530 
2531  // wait for signal
2532  while (waitp.thread->state.load(std::memory_order_acquire) ==
2533  PerThreadSynch::kQueued) {
2534  if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2535  this->Remove(waitp.thread);
2536  rc = true;
2537  }
2538  }
2539 
2540  ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2541  waitp.thread->waitp = nullptr; // cleanup
2542 
2543  // maybe trace this call
2544  cond_var_tracer("Unwait", this);
2545  if ((v & kCvEvent) != 0) {
2546  PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2547  }
2548 
2549  // From synchronization point of view Wait is unlock of the mutex followed
2550  // by lock of the mutex. We've annotated start of unlock in the beginning
2551  // of the function. Now, finish unlock and annotate lock of the mutex.
2552  // (Trans is effectively lock).
2553  ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2554  ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2555  mutex->Trans(mutex_how); // Reacquire mutex
2556  ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2557  return rc;
2558 }
2559 
2560 bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2561  return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2562 }
2563 
2564 bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2565  return WaitCommon(mu, KernelTimeout(deadline));
2566 }
2567 
2568 void CondVar::Wait(Mutex *mu) {
2569  WaitCommon(mu, KernelTimeout::Never());
2570 }
2571 
2572 // Wake thread w
2573 // If it was a timed wait, w will be waiting on w->cv
2574 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2575 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
2577  if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2578  // The waiting thread only needs to observe "w->state == kAvailable" to be
2579  // released, we must cache "cvmu" before clearing "next".
2580  Mutex *mu = w->waitp->cvmu;
2581  w->next = nullptr;
2582  w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2583  Mutex::IncrementSynchSem(mu, w);
2584  } else {
2585  w->waitp->cvmu->Fer(w);
2586  }
2587 }
2588 
2589 void CondVar::Signal() {
2590  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2591  intptr_t v;
2592  int c = 0;
2593  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2594  v = cv_.load(std::memory_order_relaxed)) {
2595  if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2596  cv_.compare_exchange_strong(v, v | kCvSpin,
2597  std::memory_order_acquire,
2598  std::memory_order_relaxed)) {
2599  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2600  PerThreadSynch *w = nullptr;
2601  if (h != nullptr) { // remove first waiter
2602  w = h->next;
2603  if (w == h) {
2604  h = nullptr;
2605  } else {
2606  h->next = w->next;
2607  }
2608  }
2609  // release spinlock
2610  cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2611  std::memory_order_release);
2612  if (w != nullptr) {
2613  CondVar::Wakeup(w); // wake waiter, if there was one
2614  cond_var_tracer("Signal wakeup", this);
2615  }
2616  if ((v & kCvEvent) != 0) {
2617  PostSynchEvent(this, SYNCH_EV_SIGNAL);
2618  }
2619  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2620  return;
2621  } else {
2622  c = Delay(c, GENTLE);
2623  }
2624  }
2625  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2626 }
2627 
2628 void CondVar::SignalAll () {
2629  ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2630  intptr_t v;
2631  int c = 0;
2632  for (v = cv_.load(std::memory_order_relaxed); v != 0;
2633  v = cv_.load(std::memory_order_relaxed)) {
2634  // empty the list if spinlock free
2635  // We do this by simply setting the list to empty using
2636  // compare and swap. We then have the entire list in our hands,
2637  // which cannot be changing since we grabbed it while no one
2638  // held the lock.
2639  if ((v & kCvSpin) == 0 &&
2640  cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2641  std::memory_order_relaxed)) {
2642  PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2643  if (h != nullptr) {
2644  PerThreadSynch *w;
2645  PerThreadSynch *n = h->next;
2646  do { // for every thread, wake it up
2647  w = n;
2648  n = n->next;
2649  CondVar::Wakeup(w);
2650  } while (w != h);
2651  cond_var_tracer("SignalAll wakeup", this);
2652  }
2653  if ((v & kCvEvent) != 0) {
2654  PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2655  }
2656  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2657  return;
2658  } else {
2659  c = Delay(c, GENTLE); // try again after a delay
2660  }
2661  }
2662  ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2663 }
2664 
2666  ABSL_RAW_CHECK(this->mu_ != nullptr,
2667  "ReleasableMutexLock::Release may only be called once");
2668  this->mu_->Unlock();
2669  this->mu_ = nullptr;
2670 }
2671 
2672 #ifdef THREAD_SANITIZER
2673 extern "C" void __tsan_read1(void *addr);
2674 #else
2675 #define __tsan_read1(addr) // do nothing if TSan not enabled
2676 #endif
2677 
2678 // A function that just returns its argument, dereferenced
2679 static bool Dereference(void *arg) {
2680  // ThreadSanitizer does not instrument this file for memory accesses.
2681  // This function dereferences a user variable that can participate
2682  // in a data race, so we need to manually tell TSan about this memory access.
2683  __tsan_read1(arg);
2684  return *(static_cast<bool *>(arg));
2685 }
2686 
2687 Condition::Condition() {} // null constructor, used for kTrue only
2689 
2690 Condition::Condition(bool (*func)(void *), void *arg)
2691  : eval_(&CallVoidPtrFunction),
2692  function_(func),
2693  method_(nullptr),
2694  arg_(arg) {}
2695 
2697  return (*c->function_)(c->arg_);
2698 }
2699 
2700 Condition::Condition(const bool *cond)
2701  : eval_(CallVoidPtrFunction),
2702  function_(Dereference),
2703  method_(nullptr),
2704  // const_cast is safe since Dereference does not modify arg
2705  arg_(const_cast<bool *>(cond)) {}
2706 
2707 bool Condition::Eval() const {
2708  // eval_ == null for kTrue
2709  return (this->eval_ == nullptr) || (*this->eval_)(this);
2710 }
2711 
2713  if (a == nullptr) {
2714  return b == nullptr || b->eval_ == nullptr;
2715  }
2716  if (b == nullptr || b->eval_ == nullptr) {
2717  return a->eval_ == nullptr;
2718  }
2719  return a->eval_ == b->eval_ && a->function_ == b->function_ &&
2720  a->arg_ == b->arg_ && a->method_ == b->method_;
2721 }
2722 
2723 } // namespace absl
int v
Definition: variant_test.cc:81
#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
static const int kMuIsCond
Definition: mutex.cc:653
static const intptr_t kMuWrWait
Definition: mutex.cc:624
#define EXCLUSIVE_LOCKS_REQUIRED(...)
void Fer(base_internal::PerThreadSynch *w)
Definition: mutex.cc:2331
bool AwaitWithDeadline(const Condition &cond, absl::Time deadline)
static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu)
Definition: mutex.cc:1155
static const Condition kTrue
Definition: mutex.h:707
void Lock() EXCLUSIVE_LOCK_FUNCTION()
Definition: spinlock.h:82
static const intptr_t kMuWait
Definition: mutex.cc:613
void ForgetDeadlockInfo()
void Wakeup(base_internal::PerThreadSynch *w)
Definition: mutex.cc:2576
#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
static char * CurrentStackString(char *buf, int maxlen, bool symbolize)
Definition: mutex.cc:1275
char name[1]
Definition: mutex.cc:298
void ReaderLockWhen(const Condition &cond) SHARED_LOCK_FUNCTION()
ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield()
Definition: mutex.cc:70
static char * StackString(void **pcs, int n, char *buf, int maxlen, bool symbolize)
Definition: mutex.cc:1254
bool WaitWithDeadline(Mutex *mu, absl::Time deadline)
void SleepFor(absl::Duration duration)
Definition: clock.h:68
static absl::base_internal::SpinLock synch_event_mu(absl::base_internal::kLinkerInitialized)
bool Eval() const
void * arg_
Definition: mutex.h:730
static const intptr_t kMuLow
Definition: mutex.cc:627
DeadlockReportBuffers * b
Definition: mutex.cc:1297
#define ABSL_CONST_INIT
Definition: attributes.h:605
Time Now()
Definition: clock.cc:37
int64_t contention_start_cycles
Definition: mutex.cc:488
#define ABSL_RAW_LOG(severity,...)
Definition: raw_logging.h:42
static PerThreadSynch *const kPerThreadSynchNull
Definition: mutex.cc:508
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true)
static const intptr_t kMuSpin
Definition: mutex.cc:626
static SynchLocksHeld * LocksHeldAlloc()
Definition: mutex.cc:511
static bool Dereference(void *arg)
static const Mutex::MuHow kExclusive
Definition: mutex.cc:695
static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y)
Definition: mutex.cc:752
base_internal::PerThreadSynch * Wakeup(base_internal::PerThreadSynch *w)
Definition: mutex.cc:1136
static const Mutex::MuHow kShared
Definition: mutex.cc:694
static const intptr_t kMuWriter
Definition: mutex.cc:614
intptr_t fast_or
Definition: mutex.cc:665
int num_cpus
Definition: mutex.cc:100
OnDeadlockCycle
Definition: mutex.h:1027
void LockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION()
static int Delay(int32_t c, DelayMode mode)
Definition: mutex.cc:146
InternalMethodType method_
Definition: mutex.h:729
void Trans(MuHow how)
Definition: mutex.cc:2323
static void PostSynchEvent(void *obj, int ev)
Definition: mutex.cc:404
void Unlock() UNLOCK_FUNCTION()
Definition: spinlock.h:104
bool LockSlowWithDeadline(MuHow how, const Condition *cond, synchronization_internal::KernelTimeout t, int flags)
Definition: mutex.cc:1832
bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true)
bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline) SHARED_LOCK_FUNCTION()
base_internal::ThreadIdentity * GetOrCreateCurrentThreadIdentity()
bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t)
Definition: mutex.cc:2509
InternalFunctionType function_
Definition: mutex.h:728
ThreadIdentity * CurrentThreadIdentityIfPresent()
static void * Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook)
intptr_t fast_add
Definition: mutex.cc:666
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN()
static const intptr_t kCvSpin
Definition: mutex.cc:2404
constexpr Duration Microseconds(int64_t n)
Definition: time.h:1455
static bool EvalConditionIgnored(Mutex *mu, const Condition *cond)
Definition: mutex.cc:1807
static void LockLeave(Mutex *mu, GraphId id, SynchLocksHeld *held_locks)
Definition: mutex.cc:1189
const Condition * cond
Definition: mutex.cc:475
uintptr_t masked_addr
Definition: mutex.cc:288
void RegisterMutexTracer(void(*fn)(const char *msg, const void *obj, int64_t wait_cycles))
Definition: mutex.cc:129
#define ABSL_CACHELINE_SIZE
Definition: optimization.h:146
const Mutex::MuHow how
Definition: mutex.cc:474
static const uint32_t kNSynchEvent
Definition: mutex.cc:278
static void DebugOnlyLockLeave(Mutex *mu)
Definition: mutex.cc:1245
#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
Definition: algorithm.h:29
static bool TryAcquireWithSpinning(std::atomic< intptr_t > *mu)
Definition: mutex.cc:1436
static void CheckForMutexCorruption(intptr_t v, const char *label)
Definition: mutex.cc:1874
static void InternalAttemptToUseMutexInFatalSignalHandler()
Definition: mutex.cc:575
void RegisterSymbolizer(bool(*)(const void *, char *, int))
static GraphId DebugOnlyDeadlockCheck(Mutex *mu)
Definition: mutex.cc:1394
char padding[ABSL_CACHELINE_SIZE-2 *sizeof(int)]
Definition: mutex.cc:103
char int out_size
Definition: mutex.h:1013
static void AtomicClearBits(std::atomic< intptr_t > *pv, intptr_t bits, intptr_t wait_until_clear)
Definition: mutex.cc:191
static void UnrefSynchEvent(SynchEvent *e)
Definition: mutex.cc:348
void EnableInvariantDebugging(void(*invariant)(void *), void *arg)
static const intptr_t zap_desig_waker[]
Definition: mutex.cc:1734
#define RAW_CHECK_FMT(cond,...)
Definition: mutex.cc:1867
void EnableDebugLog(const char *name)
void EnableDebugLog(const char *name)
GraphId path[kMaxDeadlockPathLen]
Definition: mutex.cc:1288
static const intptr_t kCvLow
Definition: mutex.cc:2407
int spinloop_iterations
Definition: mutex.cc:101
void(* invariant)(void *arg)
Definition: mutex.cc:291
static const intptr_t kMuEvent
Definition: mutex.cc:615
static absl::Time DeadlineFromTimeout(absl::Duration timeout)
Definition: mutex.cc:591
static PerThreadSynch * Enqueue(PerThreadSynch *head, SynchWaitParams *waitp, intptr_t mu, int flags)
Definition: mutex.cc:869
bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout) SHARED_LOCK_FUNCTION()
#define __tsan_read1(addr)
Definition: mutex.cc:2675
bool(* eval_)(const Condition *)
Definition: mutex.h:727
#define ABSL_RAW_CHECK(condition, message)
Definition: raw_logging.h:57
bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline) EXCLUSIVE_LOCK_FUNCTION()
void RegisterMutexProfiler(void(*fn)(int64_t wait_timestamp))
Definition: mutex.cc:125
AllocList * next[kMaxLevel]
void ReaderUnlock() UNLOCK_FUNCTION()
uintptr_t HidePtr(T *ptr)
Definition: hide_ptr.h:33
std::atomic< intptr_t > mu_
Definition: mutex.h:473
static PerThreadSynch * Synch_GetPerThreadAnnotated(Mutex *mu)
Definition: mutex.cc:525
static PerThreadSynch * Dequeue(PerThreadSynch *head, PerThreadSynch *pw)
Definition: mutex.cc:992
Mutex *const cvmu
Definition: mutex.cc:481
void LockSlow(MuHow how, const Condition *cond, int flags) ABSL_ATTRIBUTE_COLD
Definition: mutex.cc:1750
#define PT_GUARDED_BY(x)
void Unlock() UNLOCK_FUNCTION()
static constexpr bool kDebugMode
Definition: mutex.cc:700
static const MuHowS kSharedS
Definition: mutex.cc:678
static PerThreadSynch * DequeueAllWakeable(PerThreadSynch *head, PerThreadSynch *pw, PerThreadSynch **wake_tail)
Definition: mutex.cc:1015
bool Symbolize(const void *pc, char *out, int out_size)
static bool ExactlyOneReader(intptr_t v)
Definition: mutex.cc:1702
intptr_t fast_need_zero
Definition: mutex.cc:664
static const intptr_t kMuOne
Definition: mutex.cc:649
#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
static SynchEvent * EnsureSynchEvent(std::atomic< intptr_t > *addr, const char *name, intptr_t bits, intptr_t lockbit)
Definition: mutex.cc:308
static bool DebugOnlyIsExiting()
Definition: mutex.cc:709
#define LOCKS_EXCLUDED(...)
void Release() UNLOCK_FUNCTION()
Definition: mutex.cc:2665
bool AwaitCommon(const Condition &cond, synchronization_internal::KernelTimeout t)
Definition: mutex.cc:1562
static const intptr_t kMuReader
Definition: mutex.cc:611
char name[1]
Definition: mutex.cc:296
const char * msg
Definition: mutex.cc:254
static GraphId GetGraphIdLocked(Mutex *mu) EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu)
Definition: mutex.cc:1145
static const intptr_t kMuDesig
Definition: mutex.cc:612
#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
void * stack[40]
Definition: graphcycles.cc:286
bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) EXCLUSIVE_LOCK_FUNCTION()
void AssertHeld() const ASSERT_EXCLUSIVE_LOCK()
void(* invariant)(void *arg)
Definition: mutex.cc:293
bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout)
static const MuHowS kExclusiveS
Definition: mutex.cc:686
char buf[6100]
Definition: mutex.cc:1287
static PerThreadSynch * GetPerThreadSynch(intptr_t v)
Definition: mutex.cc:759
void Lock() EXCLUSIVE_LOCK_FUNCTION()
static const intptr_t kMuHigh
Definition: mutex.cc:628
#define ABSL_TSAN_MUTEX_DESTROY(...)
static GraphId DeadlockCheck(Mutex *mu)
Definition: mutex.cc:1308
#define ABSL_CACHELINE_ALIGNED
Definition: optimization.h:147
#define ABSL_ATTRIBUTE_WEAK
Definition: attributes.h:168
#define ABSL_PREDICT_TRUE(x)
Definition: optimization.h:178
void * arg
Definition: mutex.cc:292
#define ABSL_TSAN_MUTEX_POST_LOCK(...)
static PerThreadSynch * Skip(PerThreadSynch *x)
Definition: mutex.cc:817
static bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, bool locking, bool trylock, bool read_lock)
Definition: mutex.cc:1757
static ABSL_CONST_INIT base_internal::AtomicHook< void(*)(const void *lock, int64_t wait_cycles)> submit_profile_data
Definition: spinlock.cc:61
#define ABSL_ARRAYSIZE(array)
Definition: macros.h:42
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook)
#define ANNOTATE_IGNORE_READS_AND_WRITES_END()
static void DebugOnlyLockEnter(Mutex *mu)
Definition: mutex.cc:1225
struct absl::SynchWaitParams GUARDED_BY[kNSynchEvent]
static SynchLocksHeld * Synch_GetAllLocks()
Definition: mutex.cc:536
static void AtomicSetBits(std::atomic< intptr_t > *pv, intptr_t bits, intptr_t wait_until_clear)
Definition: mutex.cc:175
SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg, KernelTimeout timeout_arg, Mutex *cvmu_arg, PerThreadSynch *thread_arg, std::atomic< intptr_t > *cv_word_arg)
Definition: mutex.cc:462
static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed)
Definition: mutex.cc:836
struct absl::SynchLocksHeld::@26 locks[40]
#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
static const intptr_t ignore_waiting_writers[]
Definition: mutex.cc:1743
static SynchEvent * GetSynchEvent(const void *addr)
Definition: mutex.cc:387
static void CondVarEnqueue(SynchWaitParams *waitp)
Definition: mutex.cc:2475
void AssertNotHeld() const
static void ForgetSynchEvent(std::atomic< intptr_t > *addr, intptr_t bits, intptr_t lockbit)
Definition: mutex.cc:362
static void DeleteSynchEvent(SynchEvent *e)
Definition: mutex.cc:343
void Await(const Condition &cond)
void Remove(base_internal::PerThreadSynch *s)
Definition: mutex.cc:2429
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode)
Definition: mutex.cc:745
static absl::base_internal::SpinLock deadlock_graph_mu(absl::base_internal::kLinkerInitialized)
void EnableMutexInvariantDebugging(bool enabled)
Definition: mutex.cc:730
static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w)
Definition: mutex.cc:545
bool WaitWithTimeout(Mutex *mu, absl::Duration timeout)
static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, synchronization_internal::KernelTimeout t)
Definition: mutex.cc:556
char * out
Definition: mutex.h:1013
static bool GuaranteedEqual(const Condition *a, const Condition *b)
Definition: mutex.cc:2712
void TryRemove(base_internal::PerThreadSynch *s)
Definition: mutex.cc:1053
static PerThreadSynch * Synch_GetPerThread()
Definition: mutex.cc:520
void ReaderLock() SHARED_LOCK_FUNCTION()
static void LockEnter(Mutex *mu, GraphId id, SynchLocksHeld *held_locks)
Definition: mutex.cc:1165
void Wait(Mutex *mu)
static bool CallVoidPtrFunction(const Condition *)
int flags
Definition: mutex.cc:253
void AssertReaderHeld() const ASSERT_SHARED_LOCK()
static const struct absl::@21 event_properties[]
#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
static const int kMuHasBlocked
Definition: mutex.cc:652
intptr_t slow_need_zero
Definition: mutex.cc:668
PerThreadSynch *const thread
Definition: mutex.cc:482
void LockSlowLoop(SynchWaitParams *waitp, int flags)
Definition: mutex.cc:1897
void RegisterCondVarTracer(void(*fn)(const char *msg, const void *cv))
Definition: mutex.cc:134
void * arg
Definition: mutex.cc:294
absl::Time TimeFromTimeval(timeval tv)
Definition: time.cc:286
intptr_t slow_inc_need_zero
Definition: mutex.cc:670
static const intptr_t kCvEvent
Definition: mutex.cc:2405
std::atomic< intptr_t > * cv_word
Definition: mutex.cc:486
ABSL_XRAY_LOG_ARGS(1) void Mutex
Definition: mutex.cc:1107
KernelTimeout timeout
Definition: mutex.cc:478
void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD
Definition: mutex.cc:2016
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(void **result, int max_depth, int skip_count)
Definition: stacktrace.cc:98


abseil_cpp
Author(s):
autogenerated on Tue Jun 18 2019 19:44:36