xref: /aosp_15_r20/external/abseil-cpp/absl/synchronization/mutex.h (revision 9356374a3709195abf420251b3e825997ff56c0f)
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // -----------------------------------------------------------------------------
16 // mutex.h
17 // -----------------------------------------------------------------------------
18 //
19 // This header file defines a `Mutex` -- a mutually exclusive lock -- and the
20 // most common type of synchronization primitive for facilitating locks on
21 // shared resources. A mutex is used to prevent multiple threads from accessing
22 // and/or writing to a shared resource concurrently.
23 //
24 // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
25 // features:
26 //   * Conditional predicates intrinsic to the `Mutex` object
27 //   * Shared/reader locks, in addition to standard exclusive/writer locks
28 //   * Deadlock detection and debug support.
29 //
30 // The following helper classes are also defined within this file:
31 //
32 //  MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
33 //              write access within the current scope.
34 //
35 //  ReaderMutexLock
36 //            - An RAII wrapper to acquire and release a `Mutex` for shared/read
37 //              access within the current scope.
38 //
39 //  WriterMutexLock
40 //            - Effectively an alias for `MutexLock` above, designed for use in
41 //              distinguishing reader and writer locks within code.
42 //
43 // In addition to simple mutex locks, this file also defines ways to perform
44 // locking under certain conditions.
45 //
46 //  Condition - (Preferred) Used to wait for a particular predicate that
47 //              depends on state protected by the `Mutex` to become true.
48 //  CondVar   - A lower-level variant of `Condition` that relies on
49 //              application code to explicitly signal the `CondVar` when
50 //              a condition has been met.
51 //
52 // See below for more information on using `Condition` or `CondVar`.
53 //
54 // Mutexes and mutex behavior can be quite complicated. The information within
55 // this header file is limited, as a result. Please consult the Mutex guide for
56 // more complete information and examples.
57 
58 #ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
59 #define ABSL_SYNCHRONIZATION_MUTEX_H_
60 
61 #include <atomic>
62 #include <cstdint>
63 #include <cstring>
64 #include <iterator>
65 #include <string>
66 
67 #include "absl/base/attributes.h"
68 #include "absl/base/const_init.h"
69 #include "absl/base/internal/identity.h"
70 #include "absl/base/internal/low_level_alloc.h"
71 #include "absl/base/internal/thread_identity.h"
72 #include "absl/base/internal/tsan_mutex_interface.h"
73 #include "absl/base/port.h"
74 #include "absl/base/thread_annotations.h"
75 #include "absl/synchronization/internal/kernel_timeout.h"
76 #include "absl/synchronization/internal/per_thread_sem.h"
77 #include "absl/time/time.h"
78 
79 namespace absl {
80 ABSL_NAMESPACE_BEGIN
81 
82 class Condition;
83 struct SynchWaitParams;
84 
85 // -----------------------------------------------------------------------------
86 // Mutex
87 // -----------------------------------------------------------------------------
88 //
89 // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
90 // on some resource, typically a variable or data structure with associated
91 // invariants. Proper usage of mutexes prevents concurrent access by different
92 // threads to the same resource.
93 //
94 // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
95 // The `Lock()` operation *acquires* a `Mutex` (in a state known as an
96 // *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
97 // Mutex. During the span of time between the Lock() and Unlock() operations,
98 // a mutex is said to be *held*. By design, all mutexes support exclusive/write
99 // locks, as this is the most common way to use a mutex.
100 //
101 // Mutex operations are only allowed under certain conditions; otherwise an
102 // operation is "invalid", and disallowed by the API. The conditions concern
103 // both the current state of the mutex and the identity of the threads that
104 // are performing the operations.
105 //
106 // The `Mutex` state machine for basic lock/unlock operations is quite simple:
107 //
108 // |                | Lock()                 | Unlock() |
109 // |----------------+------------------------+----------|
110 // | Free           | Exclusive              | invalid  |
111 // | Exclusive      | blocks, then exclusive | Free     |
112 //
113 // The full conditions are as follows.
114 //
115 // * Calls to `Unlock()` require that the mutex be held, and must be made in the
116 //   same thread that performed the corresponding `Lock()` operation which
117 //   acquired the mutex; otherwise the call is invalid.
118 //
119 // * The mutex being non-reentrant (or non-recursive) means that a call to
120 //   `Lock()` or `TryLock()` must not be made in a thread that already holds the
121 //   mutex; such a call is invalid.
122 //
123 // * In other words, the state of being "held" has both a temporal component
124 //   (from `Lock()` until `Unlock()`) as well as a thread identity component:
125 //   the mutex is held *by a particular thread*.
126 //
127 // An "invalid" operation has undefined behavior. The `Mutex` implementation
128 // is allowed to do anything on an invalid call, including, but not limited to,
129 // crashing with a useful error message, silently succeeding, or corrupting
130 // data structures. In debug mode, the implementation may crash with a useful
131 // error message.
132 //
133 // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
134 // is, however, approximately fair over long periods, and starvation-free for
135 // threads at the same priority.
136 //
137 // The lock/unlock primitives are now annotated with lock annotations
138 // defined in (base/thread_annotations.h). When writing multi-threaded code,
139 // you should use lock annotations whenever possible to document your lock
140 // synchronization policy. Besides acting as documentation, these annotations
141 // also help compilers or static analysis tools to identify and warn about
142 // issues that could potentially result in race conditions and deadlocks.
143 //
144 // For more information about the lock annotations, please see
145 // [Thread Safety
146 // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
147 // documentation.
148 //
149 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
150 
151 class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
152  public:
153   // Creates a `Mutex` that is not held by anyone. This constructor is
154   // typically used for Mutexes allocated on the heap or the stack.
155   //
156   // To create `Mutex` instances with static storage duration
157   // (e.g. a namespace-scoped or global variable), see
158   // `Mutex::Mutex(absl::kConstInit)` below instead.
159   Mutex();
160 
161   // Creates a mutex with static storage duration.  A global variable
162   // constructed this way avoids the lifetime issues that can occur on program
163   // startup and shutdown.  (See absl/base/const_init.h.)
164   //
165   // For Mutexes allocated on the heap and stack, instead use the default
166   // constructor, which can interact more fully with the thread sanitizer.
167   //
168   // Example usage:
169   //   namespace foo {
170   //   ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
171   //   }
172   explicit constexpr Mutex(absl::ConstInitType);
173 
174   ~Mutex();
175 
176   // Mutex::Lock()
177   //
178   // Blocks the calling thread, if necessary, until this `Mutex` is free, and
179   // then acquires it exclusively. (This lock is also known as a "write lock.")
180   void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
181 
182   // Mutex::Unlock()
183   //
184   // Releases this `Mutex` and returns it from the exclusive/write state to the
185   // free state. Calling thread must hold the `Mutex` exclusively.
186   void Unlock() ABSL_UNLOCK_FUNCTION();
187 
188   // Mutex::TryLock()
189   //
190   // If the mutex can be acquired without blocking, does so exclusively and
191   // returns `true`. Otherwise, returns `false`. Returns `true` with high
192   // probability if the `Mutex` was free.
193   ABSL_MUST_USE_RESULT bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
194 
195   // Mutex::AssertHeld()
196   //
197   // Require that the mutex be held exclusively (write mode) by this thread.
198   //
199   // If the mutex is not currently held by this thread, this function may report
200   // an error (typically by crashing with a diagnostic) or it may do nothing.
201   // This function is intended only as a tool to assist debugging; it doesn't
202   // guarantee correctness.
203   void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
204 
205   // ---------------------------------------------------------------------------
206   // Reader-Writer Locking
207   // ---------------------------------------------------------------------------
208 
209   // A Mutex can also be used as a starvation-free reader-writer lock.
210   // Neither read-locks nor write-locks are reentrant/recursive to avoid
211   // potential client programming errors.
212   //
213   // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
214   // `Unlock()` and `TryLock()` methods for use within applications mixing
215   // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
216   // manner can make locking behavior clearer when mixing read and write modes.
217   //
218   // Introducing reader locks necessarily complicates the `Mutex` state
219   // machine somewhat. The table below illustrates the allowed state transitions
220   // of a mutex in such cases. Note that ReaderLock() may block even if the lock
221   // is held in shared mode; this occurs when another thread is blocked on a
222   // call to WriterLock().
223   //
224   // ---------------------------------------------------------------------------
225   //     Operation: WriterLock() Unlock()  ReaderLock()           ReaderUnlock()
226   // ---------------------------------------------------------------------------
227   // State
228   // ---------------------------------------------------------------------------
229   // Free           Exclusive    invalid   Shared(1)              invalid
230   // Shared(1)      blocks       invalid   Shared(2) or blocks    Free
231   // Shared(n) n>1  blocks       invalid   Shared(n+1) or blocks  Shared(n-1)
232   // Exclusive      blocks       Free      blocks                 invalid
233   // ---------------------------------------------------------------------------
234   //
235   // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
236 
237   // Mutex::ReaderLock()
238   //
239   // Blocks the calling thread, if necessary, until this `Mutex` is either free,
240   // or in shared mode, and then acquires a share of it. Note that
241   // `ReaderLock()` will block if some other thread has an exclusive/writer lock
242   // on the mutex.
243 
244   void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
245 
246   // Mutex::ReaderUnlock()
247   //
248   // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
249   // the free state if this thread holds the last reader lock on the mutex. Note
250   // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
251   void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
252 
253   // Mutex::ReaderTryLock()
254   //
255   // If the mutex can be acquired without blocking, acquires this mutex for
256   // shared access and returns `true`. Otherwise, returns `false`. Returns
257   // `true` with high probability if the `Mutex` was free or shared.
258   ABSL_MUST_USE_RESULT bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
259 
260   // Mutex::AssertReaderHeld()
261   //
262   // Require that the mutex be held at least in shared mode (read mode) by this
263   // thread.
264   //
265   // If the mutex is not currently held by this thread, this function may report
266   // an error (typically by crashing with a diagnostic) or it may do nothing.
267   // This function is intended only as a tool to assist debugging; it doesn't
268   // guarantee correctness.
269   void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
270 
271   // Mutex::WriterLock()
272   // Mutex::WriterUnlock()
273   // Mutex::WriterTryLock()
274   //
275   // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
276   //
277   // These methods may be used (along with the complementary `Reader*()`
278   // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
279   // etc.) from reader/writer lock usage.
WriterLock()280   void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
281 
WriterUnlock()282   void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
283 
WriterTryLock()284   ABSL_MUST_USE_RESULT bool WriterTryLock()
285       ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
286     return this->TryLock();
287   }
288 
289   // ---------------------------------------------------------------------------
290   // Conditional Critical Regions
291   // ---------------------------------------------------------------------------
292 
293   // Conditional usage of a `Mutex` can occur using two distinct paradigms:
294   //
295   //   * Use of `Mutex` member functions with `Condition` objects.
296   //   * Use of the separate `CondVar` abstraction.
297   //
298   // In general, prefer use of `Condition` and the `Mutex` member functions
299   // listed below over `CondVar`. When there are multiple threads waiting on
300   // distinctly different conditions, however, a battery of `CondVar`s may be
301   // more efficient. This section discusses use of `Condition` objects.
302   //
303   // `Mutex` contains member functions for performing lock operations only under
304   // certain conditions, of class `Condition`. For correctness, the `Condition`
305   // must return a boolean that is a pure function, only of state protected by
306   // the `Mutex`. The condition must be invariant w.r.t. environmental state
307   // such as thread, cpu id, or time, and must be `noexcept`. The condition will
308   // always be invoked with the mutex held in at least read mode, so you should
309   // not block it for long periods or sleep it on a timer.
310   //
311   // Since a condition must not depend directly on the current time, use
312   // `*WithTimeout()` member function variants to make your condition
313   // effectively true after a given duration, or `*WithDeadline()` variants to
314   // make your condition effectively true after a given time.
315   //
316   // The condition function should have no side-effects aside from debug
317   // logging; as a special exception, the function may acquire other mutexes
318   // provided it releases all those that it acquires.  (This exception was
319   // required to allow logging.)
320 
321   // Mutex::Await()
322   //
323   // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
324   // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
325   // same mode in which it was previously held. If the condition is initially
326   // `true`, `Await()` *may* skip the release/re-acquire step.
327   //
328   // `Await()` requires that this thread holds this `Mutex` in some mode.
Await(const Condition & cond)329   void Await(const Condition& cond) {
330     AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
331   }
332 
333   // Mutex::LockWhen()
334   // Mutex::ReaderLockWhen()
335   // Mutex::WriterLockWhen()
336   //
337   // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
338   // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
339   // logically equivalent to `*Lock(); Await();` though they may have different
340   // performance characteristics.
LockWhen(const Condition & cond)341   void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
342     LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
343                    true);
344   }
345 
ReaderLockWhen(const Condition & cond)346   void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() {
347     LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
348                    false);
349   }
350 
WriterLockWhen(const Condition & cond)351   void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
352     this->LockWhen(cond);
353   }
354 
355   // ---------------------------------------------------------------------------
356   // Mutex Variants with Timeouts/Deadlines
357   // ---------------------------------------------------------------------------
358 
359   // Mutex::AwaitWithTimeout()
360   // Mutex::AwaitWithDeadline()
361   //
362   // Unlocks this `Mutex` and blocks until simultaneously:
363   //   - either `cond` is true or the {timeout has expired, deadline has passed}
364   //     and
365   //   - this `Mutex` can be reacquired,
366   // then reacquire this `Mutex` in the same mode in which it was previously
367   // held, returning `true` iff `cond` is `true` on return.
368   //
369   // If the condition is initially `true`, the implementation *may* skip the
370   // release/re-acquire step and return immediately.
371   //
372   // Deadlines in the past are equivalent to an immediate deadline.
373   // Negative timeouts are equivalent to a zero timeout.
374   //
375   // This method requires that this thread holds this `Mutex` in some mode.
AwaitWithTimeout(const Condition & cond,absl::Duration timeout)376   bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
377     return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
378   }
379 
AwaitWithDeadline(const Condition & cond,absl::Time deadline)380   bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
381     return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
382   }
383 
384   // Mutex::LockWhenWithTimeout()
385   // Mutex::ReaderLockWhenWithTimeout()
386   // Mutex::WriterLockWhenWithTimeout()
387   //
388   // Blocks until simultaneously both:
389   //   - either `cond` is `true` or the timeout has expired, and
390   //   - this `Mutex` can be acquired,
391   // then atomically acquires this `Mutex`, returning `true` iff `cond` is
392   // `true` on return.
393   //
394   // Negative timeouts are equivalent to a zero timeout.
LockWhenWithTimeout(const Condition & cond,absl::Duration timeout)395   bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
396       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
397     return LockWhenCommon(
398         cond, synchronization_internal::KernelTimeout{timeout}, true);
399   }
ReaderLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)400   bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
401       ABSL_SHARED_LOCK_FUNCTION() {
402     return LockWhenCommon(
403         cond, synchronization_internal::KernelTimeout{timeout}, false);
404   }
WriterLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)405   bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
406       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
407     return this->LockWhenWithTimeout(cond, timeout);
408   }
409 
410   // Mutex::LockWhenWithDeadline()
411   // Mutex::ReaderLockWhenWithDeadline()
412   // Mutex::WriterLockWhenWithDeadline()
413   //
414   // Blocks until simultaneously both:
415   //   - either `cond` is `true` or the deadline has been passed, and
416   //   - this `Mutex` can be acquired,
417   // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
418   // on return.
419   //
420   // Deadlines in the past are equivalent to an immediate deadline.
LockWhenWithDeadline(const Condition & cond,absl::Time deadline)421   bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
422       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
423     return LockWhenCommon(
424         cond, synchronization_internal::KernelTimeout{deadline}, true);
425   }
ReaderLockWhenWithDeadline(const Condition & cond,absl::Time deadline)426   bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
427       ABSL_SHARED_LOCK_FUNCTION() {
428     return LockWhenCommon(
429         cond, synchronization_internal::KernelTimeout{deadline}, false);
430   }
WriterLockWhenWithDeadline(const Condition & cond,absl::Time deadline)431   bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
432       ABSL_EXCLUSIVE_LOCK_FUNCTION() {
433     return this->LockWhenWithDeadline(cond, deadline);
434   }
435 
436   // ---------------------------------------------------------------------------
437   // Debug Support: Invariant Checking, Deadlock Detection, Logging.
438   // ---------------------------------------------------------------------------
439 
440   // Mutex::EnableInvariantDebugging()
441   //
442   // If `invariant`!=null and if invariant debugging has been enabled globally,
443   // cause `(*invariant)(arg)` to be called at moments when the invariant for
444   // this `Mutex` should hold (for example: just after acquire, just before
445   // release).
446   //
447   // The routine `invariant` should have no side-effects since it is not
448   // guaranteed how many times it will be called; it should check the invariant
449   // and crash if it does not hold. Enabling global invariant debugging may
450   // substantially reduce `Mutex` performance; it should be set only for
451   // non-production runs.  Optimization options may also disable invariant
452   // checks.
453   void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
454 
455   // Mutex::EnableDebugLog()
456   //
457   // Cause all subsequent uses of this `Mutex` to be logged via
458   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
459   // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
460   //
461   // Note: This method substantially reduces `Mutex` performance.
462   void EnableDebugLog(const char* name);
463 
464   // Deadlock detection
465 
466   // Mutex::ForgetDeadlockInfo()
467   //
468   // Forget any deadlock-detection information previously gathered
469   // about this `Mutex`. Call this method in debug mode when the lock ordering
470   // of a `Mutex` changes.
471   void ForgetDeadlockInfo();
472 
473   // Mutex::AssertNotHeld()
474   //
475   // Return immediately if this thread does not hold this `Mutex` in any
476   // mode; otherwise, may report an error (typically by crashing with a
477   // diagnostic), or may return immediately.
478   //
479   // Currently this check is performed only if all of:
480   //    - in debug mode
481   //    - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
482   //    - number of locks concurrently held by this thread is not large.
483   // are true.
484   void AssertNotHeld() const;
485 
486   // Special cases.
487 
488   // A `MuHow` is a constant that indicates how a lock should be acquired.
489   // Internal implementation detail.  Clients should ignore.
490   typedef const struct MuHowS* MuHow;
491 
492   // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
493   //
494   // Causes the `Mutex` implementation to prepare itself for re-entry caused by
495   // future use of `Mutex` within a fatal signal handler. This method is
496   // intended for use only for last-ditch attempts to log crash information.
497   // It does not guarantee that attempts to use Mutexes within the handler will
498   // not deadlock; it merely makes other faults less likely.
499   //
500   // WARNING:  This routine must be invoked from a signal handler, and the
501   // signal handler must either loop forever or terminate the process.
502   // Attempts to return from (or `longjmp` out of) the signal handler once this
503   // call has been made may cause arbitrary program behaviour including
504   // crashes and deadlocks.
505   static void InternalAttemptToUseMutexInFatalSignalHandler();
506 
507  private:
508   std::atomic<intptr_t> mu_;  // The Mutex state.
509 
510   // Post()/Wait() versus associated PerThreadSem; in class for required
511   // friendship with PerThreadSem.
512   static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
513   static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
514                                 synchronization_internal::KernelTimeout t);
515 
516   // slow path acquire
517   void LockSlowLoop(SynchWaitParams* waitp, int flags);
518   // wrappers around LockSlowLoop()
519   bool LockSlowWithDeadline(MuHow how, const Condition* cond,
520                             synchronization_internal::KernelTimeout t,
521                             int flags);
522   void LockSlow(MuHow how, const Condition* cond,
523                 int flags) ABSL_ATTRIBUTE_COLD;
524   // slow path release
525   void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
526   // TryLock slow path.
527   bool TryLockSlow();
528   // ReaderTryLock slow path.
529   bool ReaderTryLockSlow();
530   // Common code between Await() and AwaitWithTimeout/Deadline()
531   bool AwaitCommon(const Condition& cond,
532                    synchronization_internal::KernelTimeout t);
533   bool LockWhenCommon(const Condition& cond,
534                       synchronization_internal::KernelTimeout t, bool write);
535   // Attempt to remove thread s from queue.
536   void TryRemove(base_internal::PerThreadSynch* s);
537   // Block a thread on mutex.
538   void Block(base_internal::PerThreadSynch* s);
539   // Wake a thread; return successor.
540   base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
541   void Dtor();
542 
543   friend class CondVar;   // for access to Trans()/Fer().
544   void Trans(MuHow how);  // used for CondVar->Mutex transfer
545   void Fer(
546       base_internal::PerThreadSynch* w);  // used for CondVar->Mutex transfer
547 
548   // Catch the error of writing Mutex when intending MutexLock.
Mutex(const volatile Mutex *)549   explicit Mutex(const volatile Mutex* /*ignored*/) {}
550 
551   Mutex(const Mutex&) = delete;
552   Mutex& operator=(const Mutex&) = delete;
553 };
554 
555 // -----------------------------------------------------------------------------
556 // Mutex RAII Wrappers
557 // -----------------------------------------------------------------------------
558 
559 // MutexLock
560 //
561 // `MutexLock` is a helper class, which acquires and releases a `Mutex` via
562 // RAII.
563 //
564 // Example:
565 //
566 // Class Foo {
567 //  public:
568 //   Foo::Bar* Baz() {
569 //     MutexLock lock(&mu_);
570 //     ...
571 //     return bar;
572 //   }
573 //
574 // private:
575 //   Mutex mu_;
576 // };
577 class ABSL_SCOPED_LOCKABLE MutexLock {
578  public:
579   // Constructors
580 
581   // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
582   // guaranteed to be locked when this object is constructed. Requires that
583   // `mu` be dereferenceable.
MutexLock(Mutex * mu)584   explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
585     this->mu_->Lock();
586   }
587 
588   // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
589   // the above, the condition given by `cond` is also guaranteed to hold when
590   // this object is constructed.
MutexLock(Mutex * mu,const Condition & cond)591   explicit MutexLock(Mutex* mu, const Condition& cond)
592       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
593       : mu_(mu) {
594     this->mu_->LockWhen(cond);
595   }
596 
597   MutexLock(const MutexLock&) = delete;  // NOLINT(runtime/mutex)
598   MutexLock(MutexLock&&) = delete;       // NOLINT(runtime/mutex)
599   MutexLock& operator=(const MutexLock&) = delete;
600   MutexLock& operator=(MutexLock&&) = delete;
601 
ABSL_UNLOCK_FUNCTION()602   ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
603 
604  private:
605   Mutex* const mu_;
606 };
607 
608 // ReaderMutexLock
609 //
610 // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
611 // releases a shared lock on a `Mutex` via RAII.
612 class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
613  public:
ReaderMutexLock(Mutex * mu)614   explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
615     mu->ReaderLock();
616   }
617 
ReaderMutexLock(Mutex * mu,const Condition & cond)618   explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
619       ABSL_SHARED_LOCK_FUNCTION(mu)
620       : mu_(mu) {
621     mu->ReaderLockWhen(cond);
622   }
623 
624   ReaderMutexLock(const ReaderMutexLock&) = delete;
625   ReaderMutexLock(ReaderMutexLock&&) = delete;
626   ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
627   ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
628 
ABSL_UNLOCK_FUNCTION()629   ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
630 
631  private:
632   Mutex* const mu_;
633 };
634 
635 // WriterMutexLock
636 //
637 // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
638 // releases a write (exclusive) lock on a `Mutex` via RAII.
639 class ABSL_SCOPED_LOCKABLE WriterMutexLock {
640  public:
WriterMutexLock(Mutex * mu)641   explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
642       : mu_(mu) {
643     mu->WriterLock();
644   }
645 
WriterMutexLock(Mutex * mu,const Condition & cond)646   explicit WriterMutexLock(Mutex* mu, const Condition& cond)
647       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
648       : mu_(mu) {
649     mu->WriterLockWhen(cond);
650   }
651 
652   WriterMutexLock(const WriterMutexLock&) = delete;
653   WriterMutexLock(WriterMutexLock&&) = delete;
654   WriterMutexLock& operator=(const WriterMutexLock&) = delete;
655   WriterMutexLock& operator=(WriterMutexLock&&) = delete;
656 
ABSL_UNLOCK_FUNCTION()657   ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
658 
659  private:
660   Mutex* const mu_;
661 };
662 
663 // -----------------------------------------------------------------------------
664 // Condition
665 // -----------------------------------------------------------------------------
666 //
667 // `Mutex` contains a number of member functions which take a `Condition` as an
668 // argument; clients can wait for conditions to become `true` before attempting
669 // to acquire the mutex. These sections are known as "condition critical"
670 // sections. To use a `Condition`, you simply need to construct it, and use
671 // within an appropriate `Mutex` member function; everything else in the
672 // `Condition` class is an implementation detail.
673 //
674 // A `Condition` is specified as a function pointer which returns a boolean.
675 // `Condition` functions should be pure functions -- their results should depend
676 // only on passed arguments, should not consult any external state (such as
677 // clocks), and should have no side-effects, aside from debug logging. Any
678 // objects that the function may access should be limited to those which are
679 // constant while the mutex is blocked on the condition (e.g. a stack variable),
680 // or objects of state protected explicitly by the mutex.
681 //
682 // No matter which construction is used for `Condition`, the underlying
683 // function pointer / functor / callable must not throw any
684 // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
685 // the face of a throwing `Condition`. (When Abseil is allowed to depend
686 // on C++17, these function pointers will be explicitly marked
687 // `noexcept`; until then this requirement cannot be enforced in the
688 // type system.)
689 //
690 // Note: to use a `Condition`, you need only construct it and pass it to a
691 // suitable `Mutex' member function, such as `Mutex::Await()`, or to the
692 // constructor of one of the scope guard classes.
693 //
694 // Example using LockWhen/Unlock:
695 //
696 //   // assume count_ is not internal reference count
697 //   int count_ ABSL_GUARDED_BY(mu_);
698 //   Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
699 //
700 //   mu_.LockWhen(count_is_zero);
701 //   // ...
702 //   mu_.Unlock();
703 //
704 // Example using a scope guard:
705 //
706 //   {
707 //     MutexLock lock(&mu_, count_is_zero);
708 //     // ...
709 //   }
710 //
711 // When multiple threads are waiting on exactly the same condition, make sure
712 // that they are constructed with the same parameters (same pointer to function
713 // + arg, or same pointer to object + method), so that the mutex implementation
714 // can avoid redundantly evaluating the same condition for each thread.
715 class Condition {
716  public:
717   // A Condition that returns the result of "(*func)(arg)"
718   Condition(bool (*func)(void*), void* arg);
719 
720   // Templated version for people who are averse to casts.
721   //
722   // To use a lambda, prepend it with unary plus, which converts the lambda
723   // into a function pointer:
724   //     Condition(+[](T* t) { return ...; }, arg).
725   //
726   // Note: lambdas in this case must contain no bound variables.
727   //
728   // See class comment for performance advice.
729   template <typename T>
730   Condition(bool (*func)(T*), T* arg);
731 
732   // Same as above, but allows for cases where `arg` comes from a pointer that
733   // is convertible to the function parameter type `T*` but not an exact match.
734   //
735   // For example, the argument might be `X*` but the function takes `const X*`,
736   // or the argument might be `Derived*` while the function takes `Base*`, and
737   // so on for cases where the argument pointer can be implicitly converted.
738   //
739   // Implementation notes: This constructor overload is required in addition to
740   // the one above to allow deduction of `T` from `arg` for cases such as where
741   // a function template is passed as `func`. Also, the dummy `typename = void`
742   // template parameter exists just to work around a MSVC mangling bug.
743   template <typename T, typename = void>
744   Condition(bool (*func)(T*),
745             typename absl::internal::type_identity<T>::type* arg);
746 
747   // Templated version for invoking a method that returns a `bool`.
748   //
749   // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
750   // `object->Method()`.
751   //
752   // Implementation Note: `absl::internal::type_identity` is used to allow
753   // methods to come from base classes. A simpler signature like
754   // `Condition(T*, bool (T::*)())` does not suffice.
755   template <typename T>
756   Condition(T* object,
757             bool (absl::internal::type_identity<T>::type::*method)());
758 
759   // Same as above, for const members
760   template <typename T>
761   Condition(const T* object,
762             bool (absl::internal::type_identity<T>::type::*method)() const);
763 
764   // A Condition that returns the value of `*cond`
765   explicit Condition(const bool* cond);
766 
767   // Templated version for invoking a functor that returns a `bool`.
768   // This approach accepts pointers to non-mutable lambdas, `std::function`,
769   // the result of` std::bind` and user-defined functors that define
770   // `bool F::operator()() const`.
771   //
772   // Example:
773   //
774   //   auto reached = [this, current]() {
775   //     mu_.AssertReaderHeld();                // For annotalysis.
776   //     return processed_ >= current;
777   //   };
778   //   mu_.Await(Condition(&reached));
779   //
780   // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
781   // the lambda as it may be called when the mutex is being unlocked from a
782   // scope holding only a reader lock, which will make the assertion not
783   // fulfilled and crash the binary.
784 
785   // See class comment for performance advice. In particular, if there
786   // might be more than one waiter for the same condition, make sure
787   // that all waiters construct the condition with the same pointers.
788 
789   // Implementation note: The second template parameter ensures that this
790   // constructor doesn't participate in overload resolution if T doesn't have
791   // `bool operator() const`.
792   template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
793                             &T::operator()))>
Condition(const T * obj)794   explicit Condition(const T* obj)
795       : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
796 
797   // A Condition that always returns `true`.
798   // kTrue is only useful in a narrow set of circumstances, mostly when
799   // it's passed conditionally. For example:
800   //
801   //   mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
802   //
803   // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
804   // don't return immediately when the timeout happens, they still block until
805   // the Mutex becomes available. The return value of these methods does
806   // not indicate if the timeout was reached; rather it indicates whether or
807   // not the condition is true.
808   ABSL_CONST_INIT static const Condition kTrue;
809 
810   // Evaluates the condition.
811   bool Eval() const;
812 
813   // Returns `true` if the two conditions are guaranteed to return the same
814   // value if evaluated at the same time, `false` if the evaluation *may* return
815   // different results.
816   //
817   // Two `Condition` values are guaranteed equal if both their `func` and `arg`
818   // components are the same. A null pointer is equivalent to a `true`
819   // condition.
820   static bool GuaranteedEqual(const Condition* a, const Condition* b);
821 
822  private:
823   // Sizing an allocation for a method pointer can be subtle. In the Itanium
824   // specifications, a method pointer has a predictable, uniform size. On the
825   // other hand, MSVC ABI, method pointer sizes vary based on the
826   // inheritance of the class. Specifically, method pointers from classes with
827   // multiple inheritance are bigger than those of classes with single
828   // inheritance. Other variations also exist.
829 
830 #ifndef _MSC_VER
831   // Allocation for a function pointer or method pointer.
832   // The {0} initializer ensures that all unused bytes of this buffer are
833   // always zeroed out.  This is necessary, because GuaranteedEqual() compares
834   // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
835   using MethodPtr = bool (Condition::*)();
836   char callback_[sizeof(MethodPtr)] = {0};
837 #else
838   // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
839   // may be the largest known pointer-to-member of any platform. For this
840   // reason we will allocate 24 bytes for MSVC platform toolchains.
841   char callback_[24] = {0};
842 #endif
843 
844   // Function with which to evaluate callbacks and/or arguments.
845   bool (*eval_)(const Condition*) = nullptr;
846 
847   // Either an argument for a function call or an object for a method call.
848   void* arg_ = nullptr;
849 
850   // Various functions eval_ can point to:
851   static bool CallVoidPtrFunction(const Condition*);
852   template <typename T>
853   static bool CastAndCallFunction(const Condition* c);
854   template <typename T, typename ConditionMethodPtr>
855   static bool CastAndCallMethod(const Condition* c);
856 
857   // Helper methods for storing, validating, and reading callback arguments.
858   template <typename T>
StoreCallback(T callback)859   inline void StoreCallback(T callback) {
860     static_assert(
861         sizeof(callback) <= sizeof(callback_),
862         "An overlarge pointer was passed as a callback to Condition.");
863     std::memcpy(callback_, &callback, sizeof(callback));
864   }
865 
866   template <typename T>
ReadCallback(T * callback)867   inline void ReadCallback(T* callback) const {
868     std::memcpy(callback, callback_, sizeof(*callback));
869   }
870 
AlwaysTrue(const Condition *)871   static bool AlwaysTrue(const Condition*) { return true; }
872 
873   // Used only to create kTrue.
Condition()874   constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
875 };
876 
877 // -----------------------------------------------------------------------------
878 // CondVar
879 // -----------------------------------------------------------------------------
880 //
881 // A condition variable, reflecting state evaluated separately outside of the
882 // `Mutex` object, which can be signaled to wake callers.
883 // This class is not normally needed; use `Mutex` member functions such as
884 // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
885 // with many threads and many conditions, `CondVar` may be faster.
886 //
887 // The implementation may deliver signals to any condition variable at
888 // any time, even when no call to `Signal()` or `SignalAll()` is made; as a
889 // result, upon being awoken, you must check the logical condition you have
890 // been waiting upon.
891 //
892 // Examples:
893 //
894 // Usage for a thread waiting for some condition C protected by mutex mu:
895 //       mu.Lock();
896 //       while (!C) { cv->Wait(&mu); }        // releases and reacquires mu
897 //       //  C holds; process data
898 //       mu.Unlock();
899 //
900 // Usage to wake T is:
901 //       mu.Lock();
902 //       // process data, possibly establishing C
903 //       if (C) { cv->Signal(); }
904 //       mu.Unlock();
905 //
906 // If C may be useful to more than one waiter, use `SignalAll()` instead of
907 // `Signal()`.
908 //
909 // With this implementation it is efficient to use `Signal()/SignalAll()` inside
910 // the locked region; this usage can make reasoning about your program easier.
911 //
912 class CondVar {
913  public:
914   // A `CondVar` allocated on the heap or on the stack can use the this
915   // constructor.
916   CondVar();
917 
918   // CondVar::Wait()
919   //
920   // Atomically releases a `Mutex` and blocks on this condition variable.
921   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
922   // spurious wakeup), then reacquires the `Mutex` and returns.
923   //
924   // Requires and ensures that the current thread holds the `Mutex`.
Wait(Mutex * mu)925   void Wait(Mutex* mu) {
926     WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
927   }
928 
929   // CondVar::WaitWithTimeout()
930   //
931   // Atomically releases a `Mutex` and blocks on this condition variable.
932   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
933   // spurious wakeup), or until the timeout has expired, then reacquires
934   // the `Mutex` and returns.
935   //
936   // Returns true if the timeout has expired without this `CondVar`
937   // being signalled in any manner. If both the timeout has expired
938   // and this `CondVar` has been signalled, the implementation is free
939   // to return `true` or `false`.
940   //
941   // Requires and ensures that the current thread holds the `Mutex`.
WaitWithTimeout(Mutex * mu,absl::Duration timeout)942   bool WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
943     return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
944   }
945 
946   // CondVar::WaitWithDeadline()
947   //
948   // Atomically releases a `Mutex` and blocks on this condition variable.
949   // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
950   // spurious wakeup), or until the deadline has passed, then reacquires
951   // the `Mutex` and returns.
952   //
953   // Deadlines in the past are equivalent to an immediate deadline.
954   //
955   // Returns true if the deadline has passed without this `CondVar`
956   // being signalled in any manner. If both the deadline has passed
957   // and this `CondVar` has been signalled, the implementation is free
958   // to return `true` or `false`.
959   //
960   // Requires and ensures that the current thread holds the `Mutex`.
WaitWithDeadline(Mutex * mu,absl::Time deadline)961   bool WaitWithDeadline(Mutex* mu, absl::Time deadline) {
962     return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
963   }
964 
965   // CondVar::Signal()
966   //
967   // Signal this `CondVar`; wake at least one waiter if one exists.
968   void Signal();
969 
970   // CondVar::SignalAll()
971   //
972   // Signal this `CondVar`; wake all waiters.
973   void SignalAll();
974 
975   // CondVar::EnableDebugLog()
976   //
977   // Causes all subsequent uses of this `CondVar` to be logged via
978   // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
979   // Note: this method substantially reduces `CondVar` performance.
980   void EnableDebugLog(const char* name);
981 
982  private:
983   bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
984   void Remove(base_internal::PerThreadSynch* s);
985   std::atomic<intptr_t> cv_;  // Condition variable state.
986   CondVar(const CondVar&) = delete;
987   CondVar& operator=(const CondVar&) = delete;
988 };
989 
990 // Variants of MutexLock.
991 //
992 // If you find yourself using one of these, consider instead using
993 // Mutex::Unlock() and/or if-statements for clarity.
994 
995 // MutexLockMaybe
996 //
997 // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
998 class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
999  public:
MutexLockMaybe(Mutex * mu)1000   explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1001       : mu_(mu) {
1002     if (this->mu_ != nullptr) {
1003       this->mu_->Lock();
1004     }
1005   }
1006 
MutexLockMaybe(Mutex * mu,const Condition & cond)1007   explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
1008       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1009       : mu_(mu) {
1010     if (this->mu_ != nullptr) {
1011       this->mu_->LockWhen(cond);
1012     }
1013   }
1014 
ABSL_UNLOCK_FUNCTION()1015   ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
1016     if (this->mu_ != nullptr) {
1017       this->mu_->Unlock();
1018     }
1019   }
1020 
1021  private:
1022   Mutex* const mu_;
1023   MutexLockMaybe(const MutexLockMaybe&) = delete;
1024   MutexLockMaybe(MutexLockMaybe&&) = delete;
1025   MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
1026   MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
1027 };
1028 
1029 // ReleasableMutexLock
1030 //
1031 // ReleasableMutexLock is like MutexLock, but permits `Release()` of its
1032 // mutex before destruction. `Release()` may be called at most once.
1033 class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
1034  public:
ReleasableMutexLock(Mutex * mu)1035   explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1036       : mu_(mu) {
1037     this->mu_->Lock();
1038   }
1039 
ReleasableMutexLock(Mutex * mu,const Condition & cond)1040   explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
1041       ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1042       : mu_(mu) {
1043     this->mu_->LockWhen(cond);
1044   }
1045 
ABSL_UNLOCK_FUNCTION()1046   ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
1047     if (this->mu_ != nullptr) {
1048       this->mu_->Unlock();
1049     }
1050   }
1051 
1052   void Release() ABSL_UNLOCK_FUNCTION();
1053 
1054  private:
1055   Mutex* mu_;
1056   ReleasableMutexLock(const ReleasableMutexLock&) = delete;
1057   ReleasableMutexLock(ReleasableMutexLock&&) = delete;
1058   ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
1059   ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
1060 };
1061 
Mutex()1062 inline Mutex::Mutex() : mu_(0) {
1063   ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
1064 }
1065 
Mutex(absl::ConstInitType)1066 inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
1067 
1068 #if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
1069 ABSL_ATTRIBUTE_ALWAYS_INLINE
~Mutex()1070 inline Mutex::~Mutex() { Dtor(); }
1071 #endif
1072 
1073 #if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
1074 // Use default (empty) destructor in release build for performance reasons.
1075 // We need to mark both Dtor and ~Mutex as always inline for inconsistent
1076 // builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
1077 // cases we want the empty functions to dissolve entirely rather than being
1078 // exported from dynamic libraries and potentially override the non-empty ones.
1079 ABSL_ATTRIBUTE_ALWAYS_INLINE
Dtor()1080 inline void Mutex::Dtor() {}
1081 #endif
1082 
CondVar()1083 inline CondVar::CondVar() : cv_(0) {}
1084 
1085 // static
1086 template <typename T, typename ConditionMethodPtr>
CastAndCallMethod(const Condition * c)1087 bool Condition::CastAndCallMethod(const Condition* c) {
1088   T* object = static_cast<T*>(c->arg_);
1089   ConditionMethodPtr condition_method_pointer;
1090   c->ReadCallback(&condition_method_pointer);
1091   return (object->*condition_method_pointer)();
1092 }
1093 
1094 // static
1095 template <typename T>
CastAndCallFunction(const Condition * c)1096 bool Condition::CastAndCallFunction(const Condition* c) {
1097   bool (*function)(T*);
1098   c->ReadCallback(&function);
1099   T* argument = static_cast<T*>(c->arg_);
1100   return (*function)(argument);
1101 }
1102 
1103 template <typename T>
Condition(bool (* func)(T *),T * arg)1104 inline Condition::Condition(bool (*func)(T*), T* arg)
1105     : eval_(&CastAndCallFunction<T>),
1106       arg_(const_cast<void*>(static_cast<const void*>(arg))) {
1107   static_assert(sizeof(&func) <= sizeof(callback_),
1108                 "An overlarge function pointer was passed to Condition.");
1109   StoreCallback(func);
1110 }
1111 
1112 template <typename T, typename>
Condition(bool (* func)(T *),typename absl::internal::type_identity<T>::type * arg)1113 inline Condition::Condition(
1114     bool (*func)(T*), typename absl::internal::type_identity<T>::type* arg)
1115     // Just delegate to the overload above.
1116     : Condition(func, arg) {}
1117 
1118 template <typename T>
Condition(T * object,bool (absl::internal::type_identity<T>::type::* method)())1119 inline Condition::Condition(
1120     T* object, bool (absl::internal::type_identity<T>::type::*method)())
1121     : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
1122   static_assert(sizeof(&method) <= sizeof(callback_),
1123                 "An overlarge method pointer was passed to Condition.");
1124   StoreCallback(method);
1125 }
1126 
1127 template <typename T>
Condition(const T * object,bool (absl::internal::type_identity<T>::type::* method)()const)1128 inline Condition::Condition(
1129     const T* object,
1130     bool (absl::internal::type_identity<T>::type::*method)() const)
1131     : eval_(&CastAndCallMethod<const T, decltype(method)>),
1132       arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
1133   StoreCallback(method);
1134 }
1135 
1136 // Register hooks for profiling support.
1137 //
1138 // The function pointer registered here will be called whenever a mutex is
1139 // contended.  The callback is given the cycles for which waiting happened (as
1140 // measured by //absl/base/internal/cycleclock.h, and which may not
1141 // be real "cycle" counts.)
1142 //
1143 // There is no ordering guarantee between when the hook is registered and when
1144 // callbacks will begin.  Only a single profiler can be installed in a running
1145 // binary; if this function is called a second time with a different function
1146 // pointer, the value is ignored (and will cause an assertion failure in debug
1147 // mode.)
1148 void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
1149 
1150 // Register a hook for Mutex tracing.
1151 //
1152 // The function pointer registered here will be called whenever a mutex is
1153 // contended.  The callback is given an opaque handle to the contended mutex,
1154 // an event name, and the number of wait cycles (as measured by
1155 // //absl/base/internal/cycleclock.h, and which may not be real
1156 // "cycle" counts.)
1157 //
1158 // The only event name currently sent is "slow release".
1159 //
1160 // This has the same ordering and single-use limitations as
1161 // RegisterMutexProfiler() above.
1162 void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
1163                                     int64_t wait_cycles));
1164 
1165 // Register a hook for CondVar tracing.
1166 //
1167 // The function pointer registered here will be called here on various CondVar
1168 // events.  The callback is given an opaque handle to the CondVar object and
1169 // a string identifying the event.  This is thread-safe, but only a single
1170 // tracer can be registered.
1171 //
1172 // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
1173 // "SignalAll wakeup".
1174 //
1175 // This has the same ordering and single-use limitations as
1176 // RegisterMutexProfiler() above.
1177 void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
1178 
1179 // EnableMutexInvariantDebugging()
1180 //
1181 // Enable or disable global support for Mutex invariant debugging.  If enabled,
1182 // then invariant predicates can be registered per-Mutex for debug checking.
1183 // See Mutex::EnableInvariantDebugging().
1184 void EnableMutexInvariantDebugging(bool enabled);
1185 
1186 // When in debug mode, and when the feature has been enabled globally, the
1187 // implementation will keep track of lock ordering and complain (or optionally
1188 // crash) if a cycle is detected in the acquired-before graph.
1189 
1190 // Possible modes of operation for the deadlock detector in debug mode.
1191 enum class OnDeadlockCycle {
1192   kIgnore,  // Neither report on nor attempt to track cycles in lock ordering
1193   kReport,  // Report lock cycles to stderr when detected
1194   kAbort,   // Report lock cycles to stderr when detected, then abort
1195 };
1196 
1197 // SetMutexDeadlockDetectionMode()
1198 //
1199 // Enable or disable global support for detection of potential deadlocks
1200 // due to Mutex lock ordering inversions.  When set to 'kIgnore', tracking of
1201 // lock ordering is disabled.  Otherwise, in debug builds, a lock ordering graph
1202 // will be maintained internally, and detected cycles will be reported in
1203 // the manner chosen here.
1204 void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
1205 
1206 ABSL_NAMESPACE_END
1207 }  // namespace absl
1208 
1209 // In some build configurations we pass --detect-odr-violations to the
1210 // gold linker.  This causes it to flag weak symbol overrides as ODR
1211 // violations.  Because ODR only applies to C++ and not C,
1212 // --detect-odr-violations ignores symbols not mangled with C++ names.
1213 // By changing our extension points to be extern "C", we dodge this
1214 // check.
1215 extern "C" {
1216 void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
1217 }  // extern "C"
1218 
1219 #endif  // ABSL_SYNCHRONIZATION_MUTEX_H_
1220