1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // -----------------------------------------------------------------------------
16 // mutex.h
17 // -----------------------------------------------------------------------------
18 //
19 // This header file defines a `Mutex` -- a mutually exclusive lock -- and the
20 // most common type of synchronization primitive for facilitating locks on
21 // shared resources. A mutex is used to prevent multiple threads from accessing
22 // and/or writing to a shared resource concurrently.
23 //
24 // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
25 // features:
26 // * Conditional predicates intrinsic to the `Mutex` object
27 // * Shared/reader locks, in addition to standard exclusive/writer locks
28 // * Deadlock detection and debug support.
29 //
30 // The following helper classes are also defined within this file:
31 //
32 // MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
33 // write access within the current scope.
34 //
35 // ReaderMutexLock
36 // - An RAII wrapper to acquire and release a `Mutex` for shared/read
37 // access within the current scope.
38 //
39 // WriterMutexLock
40 // - Effectively an alias for `MutexLock` above, designed for use in
41 // distinguishing reader and writer locks within code.
42 //
43 // In addition to simple mutex locks, this file also defines ways to perform
44 // locking under certain conditions.
45 //
46 // Condition - (Preferred) Used to wait for a particular predicate that
47 // depends on state protected by the `Mutex` to become true.
48 // CondVar - A lower-level variant of `Condition` that relies on
49 // application code to explicitly signal the `CondVar` when
50 // a condition has been met.
51 //
52 // See below for more information on using `Condition` or `CondVar`.
53 //
54 // Mutexes and mutex behavior can be quite complicated. The information within
55 // this header file is limited, as a result. Please consult the Mutex guide for
56 // more complete information and examples.
57
58 #ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
59 #define ABSL_SYNCHRONIZATION_MUTEX_H_
60
61 #include <atomic>
62 #include <cstdint>
63 #include <cstring>
64 #include <iterator>
65 #include <string>
66
67 #include "absl/base/attributes.h"
68 #include "absl/base/const_init.h"
69 #include "absl/base/internal/identity.h"
70 #include "absl/base/internal/low_level_alloc.h"
71 #include "absl/base/internal/thread_identity.h"
72 #include "absl/base/internal/tsan_mutex_interface.h"
73 #include "absl/base/port.h"
74 #include "absl/base/thread_annotations.h"
75 #include "absl/synchronization/internal/kernel_timeout.h"
76 #include "absl/synchronization/internal/per_thread_sem.h"
77 #include "absl/time/time.h"
78
79 namespace absl {
80 ABSL_NAMESPACE_BEGIN
81
82 class Condition;
83 struct SynchWaitParams;
84
85 // -----------------------------------------------------------------------------
86 // Mutex
87 // -----------------------------------------------------------------------------
88 //
89 // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
90 // on some resource, typically a variable or data structure with associated
91 // invariants. Proper usage of mutexes prevents concurrent access by different
92 // threads to the same resource.
93 //
94 // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
95 // The `Lock()` operation *acquires* a `Mutex` (in a state known as an
96 // *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
97 // Mutex. During the span of time between the Lock() and Unlock() operations,
98 // a mutex is said to be *held*. By design, all mutexes support exclusive/write
99 // locks, as this is the most common way to use a mutex.
100 //
101 // Mutex operations are only allowed under certain conditions; otherwise an
102 // operation is "invalid", and disallowed by the API. The conditions concern
103 // both the current state of the mutex and the identity of the threads that
104 // are performing the operations.
105 //
106 // The `Mutex` state machine for basic lock/unlock operations is quite simple:
107 //
108 // | | Lock() | Unlock() |
109 // |----------------+------------------------+----------|
110 // | Free | Exclusive | invalid |
111 // | Exclusive | blocks, then exclusive | Free |
112 //
113 // The full conditions are as follows.
114 //
115 // * Calls to `Unlock()` require that the mutex be held, and must be made in the
116 // same thread that performed the corresponding `Lock()` operation which
117 // acquired the mutex; otherwise the call is invalid.
118 //
119 // * The mutex being non-reentrant (or non-recursive) means that a call to
120 // `Lock()` or `TryLock()` must not be made in a thread that already holds the
121 // mutex; such a call is invalid.
122 //
123 // * In other words, the state of being "held" has both a temporal component
124 // (from `Lock()` until `Unlock()`) as well as a thread identity component:
125 // the mutex is held *by a particular thread*.
126 //
127 // An "invalid" operation has undefined behavior. The `Mutex` implementation
128 // is allowed to do anything on an invalid call, including, but not limited to,
129 // crashing with a useful error message, silently succeeding, or corrupting
130 // data structures. In debug mode, the implementation may crash with a useful
131 // error message.
132 //
133 // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
134 // is, however, approximately fair over long periods, and starvation-free for
135 // threads at the same priority.
136 //
137 // The lock/unlock primitives are now annotated with lock annotations
138 // defined in (base/thread_annotations.h). When writing multi-threaded code,
139 // you should use lock annotations whenever possible to document your lock
140 // synchronization policy. Besides acting as documentation, these annotations
141 // also help compilers or static analysis tools to identify and warn about
142 // issues that could potentially result in race conditions and deadlocks.
143 //
144 // For more information about the lock annotations, please see
145 // [Thread Safety
146 // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
147 // documentation.
148 //
149 // See also `MutexLock`, below, for scoped `Mutex` acquisition.
150
151 class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
152 public:
153 // Creates a `Mutex` that is not held by anyone. This constructor is
154 // typically used for Mutexes allocated on the heap or the stack.
155 //
156 // To create `Mutex` instances with static storage duration
157 // (e.g. a namespace-scoped or global variable), see
158 // `Mutex::Mutex(absl::kConstInit)` below instead.
159 Mutex();
160
161 // Creates a mutex with static storage duration. A global variable
162 // constructed this way avoids the lifetime issues that can occur on program
163 // startup and shutdown. (See absl/base/const_init.h.)
164 //
165 // For Mutexes allocated on the heap and stack, instead use the default
166 // constructor, which can interact more fully with the thread sanitizer.
167 //
168 // Example usage:
169 // namespace foo {
170 // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
171 // }
172 explicit constexpr Mutex(absl::ConstInitType);
173
174 ~Mutex();
175
176 // Mutex::Lock()
177 //
178 // Blocks the calling thread, if necessary, until this `Mutex` is free, and
179 // then acquires it exclusively. (This lock is also known as a "write lock.")
180 void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
181
182 // Mutex::Unlock()
183 //
184 // Releases this `Mutex` and returns it from the exclusive/write state to the
185 // free state. Calling thread must hold the `Mutex` exclusively.
186 void Unlock() ABSL_UNLOCK_FUNCTION();
187
188 // Mutex::TryLock()
189 //
190 // If the mutex can be acquired without blocking, does so exclusively and
191 // returns `true`. Otherwise, returns `false`. Returns `true` with high
192 // probability if the `Mutex` was free.
193 bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
194
195 // Mutex::AssertHeld()
196 //
197 // Require that the mutex be held exclusively (write mode) by this thread.
198 //
199 // If the mutex is not currently held by this thread, this function may report
200 // an error (typically by crashing with a diagnostic) or it may do nothing.
201 // This function is intended only as a tool to assist debugging; it doesn't
202 // guarantee correctness.
203 void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
204
205 // ---------------------------------------------------------------------------
206 // Reader-Writer Locking
207 // ---------------------------------------------------------------------------
208
209 // A Mutex can also be used as a starvation-free reader-writer lock.
210 // Neither read-locks nor write-locks are reentrant/recursive to avoid
211 // potential client programming errors.
212 //
213 // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
214 // `Unlock()` and `TryLock()` methods for use within applications mixing
215 // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
216 // manner can make locking behavior clearer when mixing read and write modes.
217 //
218 // Introducing reader locks necessarily complicates the `Mutex` state
219 // machine somewhat. The table below illustrates the allowed state transitions
220 // of a mutex in such cases. Note that ReaderLock() may block even if the lock
221 // is held in shared mode; this occurs when another thread is blocked on a
222 // call to WriterLock().
223 //
224 // ---------------------------------------------------------------------------
225 // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
226 // ---------------------------------------------------------------------------
227 // State
228 // ---------------------------------------------------------------------------
229 // Free Exclusive invalid Shared(1) invalid
230 // Shared(1) blocks invalid Shared(2) or blocks Free
231 // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
232 // Exclusive blocks Free blocks invalid
233 // ---------------------------------------------------------------------------
234 //
235 // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
236
237 // Mutex::ReaderLock()
238 //
239 // Blocks the calling thread, if necessary, until this `Mutex` is either free,
240 // or in shared mode, and then acquires a share of it. Note that
241 // `ReaderLock()` will block if some other thread has an exclusive/writer lock
242 // on the mutex.
243
244 void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
245
246 // Mutex::ReaderUnlock()
247 //
248 // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
249 // the free state if this thread holds the last reader lock on the mutex. Note
250 // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
251 void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
252
253 // Mutex::ReaderTryLock()
254 //
255 // If the mutex can be acquired without blocking, acquires this mutex for
256 // shared access and returns `true`. Otherwise, returns `false`. Returns
257 // `true` with high probability if the `Mutex` was free or shared.
258 bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
259
260 // Mutex::AssertReaderHeld()
261 //
262 // Require that the mutex be held at least in shared mode (read mode) by this
263 // thread.
264 //
265 // If the mutex is not currently held by this thread, this function may report
266 // an error (typically by crashing with a diagnostic) or it may do nothing.
267 // This function is intended only as a tool to assist debugging; it doesn't
268 // guarantee correctness.
269 void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
270
271 // Mutex::WriterLock()
272 // Mutex::WriterUnlock()
273 // Mutex::WriterTryLock()
274 //
275 // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
276 //
277 // These methods may be used (along with the complementary `Reader*()`
278 // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
279 // etc.) from reader/writer lock usage.
WriterLock()280 void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
281
WriterUnlock()282 void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
283
WriterTryLock()284 bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
285 return this->TryLock();
286 }
287
288 // ---------------------------------------------------------------------------
289 // Conditional Critical Regions
290 // ---------------------------------------------------------------------------
291
292 // Conditional usage of a `Mutex` can occur using two distinct paradigms:
293 //
294 // * Use of `Mutex` member functions with `Condition` objects.
295 // * Use of the separate `CondVar` abstraction.
296 //
297 // In general, prefer use of `Condition` and the `Mutex` member functions
298 // listed below over `CondVar`. When there are multiple threads waiting on
299 // distinctly different conditions, however, a battery of `CondVar`s may be
300 // more efficient. This section discusses use of `Condition` objects.
301 //
302 // `Mutex` contains member functions for performing lock operations only under
303 // certain conditions, of class `Condition`. For correctness, the `Condition`
304 // must return a boolean that is a pure function, only of state protected by
305 // the `Mutex`. The condition must be invariant w.r.t. environmental state
306 // such as thread, cpu id, or time, and must be `noexcept`. The condition will
307 // always be invoked with the mutex held in at least read mode, so you should
308 // not block it for long periods or sleep it on a timer.
309 //
310 // Since a condition must not depend directly on the current time, use
311 // `*WithTimeout()` member function variants to make your condition
312 // effectively true after a given duration, or `*WithDeadline()` variants to
313 // make your condition effectively true after a given time.
314 //
315 // The condition function should have no side-effects aside from debug
316 // logging; as a special exception, the function may acquire other mutexes
317 // provided it releases all those that it acquires. (This exception was
318 // required to allow logging.)
319
320 // Mutex::Await()
321 //
322 // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
323 // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
324 // same mode in which it was previously held. If the condition is initially
325 // `true`, `Await()` *may* skip the release/re-acquire step.
326 //
327 // `Await()` requires that this thread holds this `Mutex` in some mode.
Await(const Condition & cond)328 void Await(const Condition& cond) {
329 AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
330 }
331
332 // Mutex::LockWhen()
333 // Mutex::ReaderLockWhen()
334 // Mutex::WriterLockWhen()
335 //
336 // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
337 // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
338 // logically equivalent to `*Lock(); Await();` though they may have different
339 // performance characteristics.
LockWhen(const Condition & cond)340 void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
341 LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
342 true);
343 }
344
ReaderLockWhen(const Condition & cond)345 void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION() {
346 LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
347 false);
348 }
349
WriterLockWhen(const Condition & cond)350 void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
351 this->LockWhen(cond);
352 }
353
354 // ---------------------------------------------------------------------------
355 // Mutex Variants with Timeouts/Deadlines
356 // ---------------------------------------------------------------------------
357
358 // Mutex::AwaitWithTimeout()
359 // Mutex::AwaitWithDeadline()
360 //
361 // Unlocks this `Mutex` and blocks until simultaneously:
362 // - either `cond` is true or the {timeout has expired, deadline has passed}
363 // and
364 // - this `Mutex` can be reacquired,
365 // then reacquire this `Mutex` in the same mode in which it was previously
366 // held, returning `true` iff `cond` is `true` on return.
367 //
368 // If the condition is initially `true`, the implementation *may* skip the
369 // release/re-acquire step and return immediately.
370 //
371 // Deadlines in the past are equivalent to an immediate deadline.
372 // Negative timeouts are equivalent to a zero timeout.
373 //
374 // This method requires that this thread holds this `Mutex` in some mode.
AwaitWithTimeout(const Condition & cond,absl::Duration timeout)375 bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
376 return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
377 }
378
AwaitWithDeadline(const Condition & cond,absl::Time deadline)379 bool AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
380 return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
381 }
382
383 // Mutex::LockWhenWithTimeout()
384 // Mutex::ReaderLockWhenWithTimeout()
385 // Mutex::WriterLockWhenWithTimeout()
386 //
387 // Blocks until simultaneously both:
388 // - either `cond` is `true` or the timeout has expired, and
389 // - this `Mutex` can be acquired,
390 // then atomically acquires this `Mutex`, returning `true` iff `cond` is
391 // `true` on return.
392 //
393 // Negative timeouts are equivalent to a zero timeout.
LockWhenWithTimeout(const Condition & cond,absl::Duration timeout)394 bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
395 ABSL_EXCLUSIVE_LOCK_FUNCTION() {
396 return LockWhenCommon(
397 cond, synchronization_internal::KernelTimeout{timeout}, true);
398 }
ReaderLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)399 bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
400 ABSL_SHARED_LOCK_FUNCTION() {
401 return LockWhenCommon(
402 cond, synchronization_internal::KernelTimeout{timeout}, false);
403 }
WriterLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)404 bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
405 ABSL_EXCLUSIVE_LOCK_FUNCTION() {
406 return this->LockWhenWithTimeout(cond, timeout);
407 }
408
409 // Mutex::LockWhenWithDeadline()
410 // Mutex::ReaderLockWhenWithDeadline()
411 // Mutex::WriterLockWhenWithDeadline()
412 //
413 // Blocks until simultaneously both:
414 // - either `cond` is `true` or the deadline has been passed, and
415 // - this `Mutex` can be acquired,
416 // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
417 // on return.
418 //
419 // Deadlines in the past are equivalent to an immediate deadline.
LockWhenWithDeadline(const Condition & cond,absl::Time deadline)420 bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
421 ABSL_EXCLUSIVE_LOCK_FUNCTION() {
422 return LockWhenCommon(
423 cond, synchronization_internal::KernelTimeout{deadline}, true);
424 }
ReaderLockWhenWithDeadline(const Condition & cond,absl::Time deadline)425 bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
426 ABSL_SHARED_LOCK_FUNCTION() {
427 return LockWhenCommon(
428 cond, synchronization_internal::KernelTimeout{deadline}, false);
429 }
WriterLockWhenWithDeadline(const Condition & cond,absl::Time deadline)430 bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
431 ABSL_EXCLUSIVE_LOCK_FUNCTION() {
432 return this->LockWhenWithDeadline(cond, deadline);
433 }
434
435 // ---------------------------------------------------------------------------
436 // Debug Support: Invariant Checking, Deadlock Detection, Logging.
437 // ---------------------------------------------------------------------------
438
439 // Mutex::EnableInvariantDebugging()
440 //
441 // If `invariant`!=null and if invariant debugging has been enabled globally,
442 // cause `(*invariant)(arg)` to be called at moments when the invariant for
443 // this `Mutex` should hold (for example: just after acquire, just before
444 // release).
445 //
446 // The routine `invariant` should have no side-effects since it is not
447 // guaranteed how many times it will be called; it should check the invariant
448 // and crash if it does not hold. Enabling global invariant debugging may
449 // substantially reduce `Mutex` performance; it should be set only for
450 // non-production runs. Optimization options may also disable invariant
451 // checks.
452 void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
453
454 // Mutex::EnableDebugLog()
455 //
456 // Cause all subsequent uses of this `Mutex` to be logged via
457 // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
458 // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
459 //
460 // Note: This method substantially reduces `Mutex` performance.
461 void EnableDebugLog(const char* name);
462
463 // Deadlock detection
464
465 // Mutex::ForgetDeadlockInfo()
466 //
467 // Forget any deadlock-detection information previously gathered
468 // about this `Mutex`. Call this method in debug mode when the lock ordering
469 // of a `Mutex` changes.
470 void ForgetDeadlockInfo();
471
472 // Mutex::AssertNotHeld()
473 //
474 // Return immediately if this thread does not hold this `Mutex` in any
475 // mode; otherwise, may report an error (typically by crashing with a
476 // diagnostic), or may return immediately.
477 //
478 // Currently this check is performed only if all of:
479 // - in debug mode
480 // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
481 // - number of locks concurrently held by this thread is not large.
482 // are true.
483 void AssertNotHeld() const;
484
485 // Special cases.
486
487 // A `MuHow` is a constant that indicates how a lock should be acquired.
488 // Internal implementation detail. Clients should ignore.
489 typedef const struct MuHowS* MuHow;
490
491 // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
492 //
493 // Causes the `Mutex` implementation to prepare itself for re-entry caused by
494 // future use of `Mutex` within a fatal signal handler. This method is
495 // intended for use only for last-ditch attempts to log crash information.
496 // It does not guarantee that attempts to use Mutexes within the handler will
497 // not deadlock; it merely makes other faults less likely.
498 //
499 // WARNING: This routine must be invoked from a signal handler, and the
500 // signal handler must either loop forever or terminate the process.
501 // Attempts to return from (or `longjmp` out of) the signal handler once this
502 // call has been made may cause arbitrary program behaviour including
503 // crashes and deadlocks.
504 static void InternalAttemptToUseMutexInFatalSignalHandler();
505
506 private:
507 std::atomic<intptr_t> mu_; // The Mutex state.
508
509 // Post()/Wait() versus associated PerThreadSem; in class for required
510 // friendship with PerThreadSem.
511 static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
512 static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
513 synchronization_internal::KernelTimeout t);
514
515 // slow path acquire
516 void LockSlowLoop(SynchWaitParams* waitp, int flags);
517 // wrappers around LockSlowLoop()
518 bool LockSlowWithDeadline(MuHow how, const Condition* cond,
519 synchronization_internal::KernelTimeout t,
520 int flags);
521 void LockSlow(MuHow how, const Condition* cond,
522 int flags) ABSL_ATTRIBUTE_COLD;
523 // slow path release
524 void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
525 // TryLock slow path.
526 bool TryLockSlow();
527 // ReaderTryLock slow path.
528 bool ReaderTryLockSlow();
529 // Common code between Await() and AwaitWithTimeout/Deadline()
530 bool AwaitCommon(const Condition& cond,
531 synchronization_internal::KernelTimeout t);
532 bool LockWhenCommon(const Condition& cond,
533 synchronization_internal::KernelTimeout t, bool write);
534 // Attempt to remove thread s from queue.
535 void TryRemove(base_internal::PerThreadSynch* s);
536 // Block a thread on mutex.
537 void Block(base_internal::PerThreadSynch* s);
538 // Wake a thread; return successor.
539 base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
540 void Dtor();
541
542 friend class CondVar; // for access to Trans()/Fer().
543 void Trans(MuHow how); // used for CondVar->Mutex transfer
544 void Fer(
545 base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
546
547 // Catch the error of writing Mutex when intending MutexLock.
Mutex(const volatile Mutex *)548 explicit Mutex(const volatile Mutex* /*ignored*/) {}
549
550 Mutex(const Mutex&) = delete;
551 Mutex& operator=(const Mutex&) = delete;
552 };
553
554 // -----------------------------------------------------------------------------
555 // Mutex RAII Wrappers
556 // -----------------------------------------------------------------------------
557
558 // MutexLock
559 //
560 // `MutexLock` is a helper class, which acquires and releases a `Mutex` via
561 // RAII.
562 //
563 // Example:
564 //
565 // Class Foo {
566 // public:
567 // Foo::Bar* Baz() {
568 // MutexLock lock(&mu_);
569 // ...
570 // return bar;
571 // }
572 //
573 // private:
574 // Mutex mu_;
575 // };
576 class ABSL_SCOPED_LOCKABLE MutexLock {
577 public:
578 // Constructors
579
580 // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
581 // guaranteed to be locked when this object is constructed. Requires that
582 // `mu` be dereferenceable.
MutexLock(Mutex * mu)583 explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
584 this->mu_->Lock();
585 }
586
587 // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
588 // the above, the condition given by `cond` is also guaranteed to hold when
589 // this object is constructed.
MutexLock(Mutex * mu,const Condition & cond)590 explicit MutexLock(Mutex* mu, const Condition& cond)
591 ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
592 : mu_(mu) {
593 this->mu_->LockWhen(cond);
594 }
595
596 MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
597 MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
598 MutexLock& operator=(const MutexLock&) = delete;
599 MutexLock& operator=(MutexLock&&) = delete;
600
ABSL_UNLOCK_FUNCTION()601 ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
602
603 private:
604 Mutex* const mu_;
605 };
606
607 // ReaderMutexLock
608 //
609 // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
610 // releases a shared lock on a `Mutex` via RAII.
611 class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
612 public:
ReaderMutexLock(Mutex * mu)613 explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
614 mu->ReaderLock();
615 }
616
ReaderMutexLock(Mutex * mu,const Condition & cond)617 explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
618 ABSL_SHARED_LOCK_FUNCTION(mu)
619 : mu_(mu) {
620 mu->ReaderLockWhen(cond);
621 }
622
623 ReaderMutexLock(const ReaderMutexLock&) = delete;
624 ReaderMutexLock(ReaderMutexLock&&) = delete;
625 ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
626 ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
627
ABSL_UNLOCK_FUNCTION()628 ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
629
630 private:
631 Mutex* const mu_;
632 };
633
634 // WriterMutexLock
635 //
636 // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
637 // releases a write (exclusive) lock on a `Mutex` via RAII.
638 class ABSL_SCOPED_LOCKABLE WriterMutexLock {
639 public:
WriterMutexLock(Mutex * mu)640 explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
641 : mu_(mu) {
642 mu->WriterLock();
643 }
644
WriterMutexLock(Mutex * mu,const Condition & cond)645 explicit WriterMutexLock(Mutex* mu, const Condition& cond)
646 ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
647 : mu_(mu) {
648 mu->WriterLockWhen(cond);
649 }
650
651 WriterMutexLock(const WriterMutexLock&) = delete;
652 WriterMutexLock(WriterMutexLock&&) = delete;
653 WriterMutexLock& operator=(const WriterMutexLock&) = delete;
654 WriterMutexLock& operator=(WriterMutexLock&&) = delete;
655
ABSL_UNLOCK_FUNCTION()656 ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
657
658 private:
659 Mutex* const mu_;
660 };
661
662 // -----------------------------------------------------------------------------
663 // Condition
664 // -----------------------------------------------------------------------------
665 //
666 // `Mutex` contains a number of member functions which take a `Condition` as an
667 // argument; clients can wait for conditions to become `true` before attempting
668 // to acquire the mutex. These sections are known as "condition critical"
669 // sections. To use a `Condition`, you simply need to construct it, and use
670 // within an appropriate `Mutex` member function; everything else in the
671 // `Condition` class is an implementation detail.
672 //
673 // A `Condition` is specified as a function pointer which returns a boolean.
674 // `Condition` functions should be pure functions -- their results should depend
675 // only on passed arguments, should not consult any external state (such as
676 // clocks), and should have no side-effects, aside from debug logging. Any
677 // objects that the function may access should be limited to those which are
678 // constant while the mutex is blocked on the condition (e.g. a stack variable),
679 // or objects of state protected explicitly by the mutex.
680 //
681 // No matter which construction is used for `Condition`, the underlying
682 // function pointer / functor / callable must not throw any
683 // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
684 // the face of a throwing `Condition`. (When Abseil is allowed to depend
685 // on C++17, these function pointers will be explicitly marked
686 // `noexcept`; until then this requirement cannot be enforced in the
687 // type system.)
688 //
689 // Note: to use a `Condition`, you need only construct it and pass it to a
690 // suitable `Mutex' member function, such as `Mutex::Await()`, or to the
691 // constructor of one of the scope guard classes.
692 //
693 // Example using LockWhen/Unlock:
694 //
695 // // assume count_ is not internal reference count
696 // int count_ ABSL_GUARDED_BY(mu_);
697 // Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
698 //
699 // mu_.LockWhen(count_is_zero);
700 // // ...
701 // mu_.Unlock();
702 //
703 // Example using a scope guard:
704 //
705 // {
706 // MutexLock lock(&mu_, count_is_zero);
707 // // ...
708 // }
709 //
710 // When multiple threads are waiting on exactly the same condition, make sure
711 // that they are constructed with the same parameters (same pointer to function
712 // + arg, or same pointer to object + method), so that the mutex implementation
713 // can avoid redundantly evaluating the same condition for each thread.
714 class Condition {
715 public:
716 // A Condition that returns the result of "(*func)(arg)"
717 Condition(bool (*func)(void*), void* arg);
718
719 // Templated version for people who are averse to casts.
720 //
721 // To use a lambda, prepend it with unary plus, which converts the lambda
722 // into a function pointer:
723 // Condition(+[](T* t) { return ...; }, arg).
724 //
725 // Note: lambdas in this case must contain no bound variables.
726 //
727 // See class comment for performance advice.
728 template <typename T>
729 Condition(bool (*func)(T*), T* arg);
730
731 // Same as above, but allows for cases where `arg` comes from a pointer that
732 // is convertible to the function parameter type `T*` but not an exact match.
733 //
734 // For example, the argument might be `X*` but the function takes `const X*`,
735 // or the argument might be `Derived*` while the function takes `Base*`, and
736 // so on for cases where the argument pointer can be implicitly converted.
737 //
738 // Implementation notes: This constructor overload is required in addition to
739 // the one above to allow deduction of `T` from `arg` for cases such as where
740 // a function template is passed as `func`. Also, the dummy `typename = void`
741 // template parameter exists just to work around a MSVC mangling bug.
742 template <typename T, typename = void>
743 Condition(bool (*func)(T*),
744 typename absl::internal::type_identity<T>::type* arg);
745
746 // Templated version for invoking a method that returns a `bool`.
747 //
748 // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
749 // `object->Method()`.
750 //
751 // Implementation Note: `absl::internal::type_identity` is used to allow
752 // methods to come from base classes. A simpler signature like
753 // `Condition(T*, bool (T::*)())` does not suffice.
754 template <typename T>
755 Condition(T* object,
756 bool (absl::internal::type_identity<T>::type::*method)());
757
758 // Same as above, for const members
759 template <typename T>
760 Condition(const T* object,
761 bool (absl::internal::type_identity<T>::type::*method)() const);
762
763 // A Condition that returns the value of `*cond`
764 explicit Condition(const bool* cond);
765
766 // Templated version for invoking a functor that returns a `bool`.
767 // This approach accepts pointers to non-mutable lambdas, `std::function`,
768 // the result of` std::bind` and user-defined functors that define
769 // `bool F::operator()() const`.
770 //
771 // Example:
772 //
773 // auto reached = [this, current]() {
774 // mu_.AssertReaderHeld(); // For annotalysis.
775 // return processed_ >= current;
776 // };
777 // mu_.Await(Condition(&reached));
778 //
779 // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
780 // the lambda as it may be called when the mutex is being unlocked from a
781 // scope holding only a reader lock, which will make the assertion not
782 // fulfilled and crash the binary.
783
784 // See class comment for performance advice. In particular, if there
785 // might be more than one waiter for the same condition, make sure
786 // that all waiters construct the condition with the same pointers.
787
788 // Implementation note: The second template parameter ensures that this
789 // constructor doesn't participate in overload resolution if T doesn't have
790 // `bool operator() const`.
791 template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
792 &T::operator()))>
Condition(const T * obj)793 explicit Condition(const T* obj)
794 : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
795
796 // A Condition that always returns `true`.
797 // kTrue is only useful in a narrow set of circumstances, mostly when
798 // it's passed conditionally. For example:
799 //
800 // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
801 //
802 // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
803 // don't return immediately when the timeout happens, they still block until
804 // the Mutex becomes available. The return value of these methods does
805 // not indicate if the timeout was reached; rather it indicates whether or
806 // not the condition is true.
807 ABSL_CONST_INIT static const Condition kTrue;
808
809 // Evaluates the condition.
810 bool Eval() const;
811
812 // Returns `true` if the two conditions are guaranteed to return the same
813 // value if evaluated at the same time, `false` if the evaluation *may* return
814 // different results.
815 //
816 // Two `Condition` values are guaranteed equal if both their `func` and `arg`
817 // components are the same. A null pointer is equivalent to a `true`
818 // condition.
819 static bool GuaranteedEqual(const Condition* a, const Condition* b);
820
821 private:
822 // Sizing an allocation for a method pointer can be subtle. In the Itanium
823 // specifications, a method pointer has a predictable, uniform size. On the
824 // other hand, MSVC ABI, method pointer sizes vary based on the
825 // inheritance of the class. Specifically, method pointers from classes with
826 // multiple inheritance are bigger than those of classes with single
827 // inheritance. Other variations also exist.
828
829 #ifndef _MSC_VER
830 // Allocation for a function pointer or method pointer.
831 // The {0} initializer ensures that all unused bytes of this buffer are
832 // always zeroed out. This is necessary, because GuaranteedEqual() compares
833 // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
834 using MethodPtr = bool (Condition::*)();
835 char callback_[sizeof(MethodPtr)] = {0};
836 #else
837 // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
838 // may be the largest known pointer-to-member of any platform. For this
839 // reason we will allocate 24 bytes for MSVC platform toolchains.
840 char callback_[24] = {0};
841 #endif
842
843 // Function with which to evaluate callbacks and/or arguments.
844 bool (*eval_)(const Condition*) = nullptr;
845
846 // Either an argument for a function call or an object for a method call.
847 void* arg_ = nullptr;
848
849 // Various functions eval_ can point to:
850 static bool CallVoidPtrFunction(const Condition*);
851 template <typename T>
852 static bool CastAndCallFunction(const Condition* c);
853 template <typename T, typename ConditionMethodPtr>
854 static bool CastAndCallMethod(const Condition* c);
855
856 // Helper methods for storing, validating, and reading callback arguments.
857 template <typename T>
StoreCallback(T callback)858 inline void StoreCallback(T callback) {
859 static_assert(
860 sizeof(callback) <= sizeof(callback_),
861 "An overlarge pointer was passed as a callback to Condition.");
862 std::memcpy(callback_, &callback, sizeof(callback));
863 }
864
865 template <typename T>
ReadCallback(T * callback)866 inline void ReadCallback(T* callback) const {
867 std::memcpy(callback, callback_, sizeof(*callback));
868 }
869
AlwaysTrue(const Condition *)870 static bool AlwaysTrue(const Condition*) { return true; }
871
872 // Used only to create kTrue.
Condition()873 constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
874 };
875
876 // -----------------------------------------------------------------------------
877 // CondVar
878 // -----------------------------------------------------------------------------
879 //
880 // A condition variable, reflecting state evaluated separately outside of the
881 // `Mutex` object, which can be signaled to wake callers.
882 // This class is not normally needed; use `Mutex` member functions such as
883 // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
884 // with many threads and many conditions, `CondVar` may be faster.
885 //
886 // The implementation may deliver signals to any condition variable at
887 // any time, even when no call to `Signal()` or `SignalAll()` is made; as a
888 // result, upon being awoken, you must check the logical condition you have
889 // been waiting upon.
890 //
891 // Examples:
892 //
893 // Usage for a thread waiting for some condition C protected by mutex mu:
894 // mu.Lock();
895 // while (!C) { cv->Wait(&mu); } // releases and reacquires mu
896 // // C holds; process data
897 // mu.Unlock();
898 //
899 // Usage to wake T is:
900 // mu.Lock();
901 // // process data, possibly establishing C
902 // if (C) { cv->Signal(); }
903 // mu.Unlock();
904 //
905 // If C may be useful to more than one waiter, use `SignalAll()` instead of
906 // `Signal()`.
907 //
908 // With this implementation it is efficient to use `Signal()/SignalAll()` inside
909 // the locked region; this usage can make reasoning about your program easier.
910 //
911 class CondVar {
912 public:
913 // A `CondVar` allocated on the heap or on the stack can use the this
914 // constructor.
915 CondVar();
916
917 // CondVar::Wait()
918 //
919 // Atomically releases a `Mutex` and blocks on this condition variable.
920 // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
921 // spurious wakeup), then reacquires the `Mutex` and returns.
922 //
923 // Requires and ensures that the current thread holds the `Mutex`.
Wait(Mutex * mu)924 void Wait(Mutex* mu) {
925 WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
926 }
927
928 // CondVar::WaitWithTimeout()
929 //
930 // Atomically releases a `Mutex` and blocks on this condition variable.
931 // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
932 // spurious wakeup), or until the timeout has expired, then reacquires
933 // the `Mutex` and returns.
934 //
935 // Returns true if the timeout has expired without this `CondVar`
936 // being signalled in any manner. If both the timeout has expired
937 // and this `CondVar` has been signalled, the implementation is free
938 // to return `true` or `false`.
939 //
940 // Requires and ensures that the current thread holds the `Mutex`.
WaitWithTimeout(Mutex * mu,absl::Duration timeout)941 bool WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
942 return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
943 }
944
945 // CondVar::WaitWithDeadline()
946 //
947 // Atomically releases a `Mutex` and blocks on this condition variable.
948 // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
949 // spurious wakeup), or until the deadline has passed, then reacquires
950 // the `Mutex` and returns.
951 //
952 // Deadlines in the past are equivalent to an immediate deadline.
953 //
954 // Returns true if the deadline has passed without this `CondVar`
955 // being signalled in any manner. If both the deadline has passed
956 // and this `CondVar` has been signalled, the implementation is free
957 // to return `true` or `false`.
958 //
959 // Requires and ensures that the current thread holds the `Mutex`.
WaitWithDeadline(Mutex * mu,absl::Time deadline)960 bool WaitWithDeadline(Mutex* mu, absl::Time deadline) {
961 return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
962 }
963
964 // CondVar::Signal()
965 //
966 // Signal this `CondVar`; wake at least one waiter if one exists.
967 void Signal();
968
969 // CondVar::SignalAll()
970 //
971 // Signal this `CondVar`; wake all waiters.
972 void SignalAll();
973
974 // CondVar::EnableDebugLog()
975 //
976 // Causes all subsequent uses of this `CondVar` to be logged via
977 // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
978 // Note: this method substantially reduces `CondVar` performance.
979 void EnableDebugLog(const char* name);
980
981 private:
982 bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
983 void Remove(base_internal::PerThreadSynch* s);
984 std::atomic<intptr_t> cv_; // Condition variable state.
985 CondVar(const CondVar&) = delete;
986 CondVar& operator=(const CondVar&) = delete;
987 };
988
989 // Variants of MutexLock.
990 //
991 // If you find yourself using one of these, consider instead using
992 // Mutex::Unlock() and/or if-statements for clarity.
993
994 // MutexLockMaybe
995 //
996 // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
997 class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
998 public:
MutexLockMaybe(Mutex * mu)999 explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1000 : mu_(mu) {
1001 if (this->mu_ != nullptr) {
1002 this->mu_->Lock();
1003 }
1004 }
1005
MutexLockMaybe(Mutex * mu,const Condition & cond)1006 explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
1007 ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1008 : mu_(mu) {
1009 if (this->mu_ != nullptr) {
1010 this->mu_->LockWhen(cond);
1011 }
1012 }
1013
ABSL_UNLOCK_FUNCTION()1014 ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
1015 if (this->mu_ != nullptr) {
1016 this->mu_->Unlock();
1017 }
1018 }
1019
1020 private:
1021 Mutex* const mu_;
1022 MutexLockMaybe(const MutexLockMaybe&) = delete;
1023 MutexLockMaybe(MutexLockMaybe&&) = delete;
1024 MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
1025 MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
1026 };
1027
1028 // ReleasableMutexLock
1029 //
1030 // ReleasableMutexLock is like MutexLock, but permits `Release()` of its
1031 // mutex before destruction. `Release()` may be called at most once.
1032 class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
1033 public:
ReleasableMutexLock(Mutex * mu)1034 explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1035 : mu_(mu) {
1036 this->mu_->Lock();
1037 }
1038
ReleasableMutexLock(Mutex * mu,const Condition & cond)1039 explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
1040 ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
1041 : mu_(mu) {
1042 this->mu_->LockWhen(cond);
1043 }
1044
ABSL_UNLOCK_FUNCTION()1045 ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
1046 if (this->mu_ != nullptr) {
1047 this->mu_->Unlock();
1048 }
1049 }
1050
1051 void Release() ABSL_UNLOCK_FUNCTION();
1052
1053 private:
1054 Mutex* mu_;
1055 ReleasableMutexLock(const ReleasableMutexLock&) = delete;
1056 ReleasableMutexLock(ReleasableMutexLock&&) = delete;
1057 ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
1058 ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
1059 };
1060
Mutex()1061 inline Mutex::Mutex() : mu_(0) {
1062 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
1063 }
1064
Mutex(absl::ConstInitType)1065 inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
1066
1067 #if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
1068 ABSL_ATTRIBUTE_ALWAYS_INLINE
~Mutex()1069 inline Mutex::~Mutex() { Dtor(); }
1070 #endif
1071
1072 #if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
1073 // Use default (empty) destructor in release build for performance reasons.
1074 // We need to mark both Dtor and ~Mutex as always inline for inconsistent
1075 // builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
1076 // cases we want the empty functions to dissolve entirely rather than being
1077 // exported from dynamic libraries and potentially override the non-empty ones.
1078 ABSL_ATTRIBUTE_ALWAYS_INLINE
Dtor()1079 inline void Mutex::Dtor() {}
1080 #endif
1081
CondVar()1082 inline CondVar::CondVar() : cv_(0) {}
1083
1084 // static
1085 template <typename T, typename ConditionMethodPtr>
CastAndCallMethod(const Condition * c)1086 bool Condition::CastAndCallMethod(const Condition* c) {
1087 T* object = static_cast<T*>(c->arg_);
1088 ConditionMethodPtr condition_method_pointer;
1089 c->ReadCallback(&condition_method_pointer);
1090 return (object->*condition_method_pointer)();
1091 }
1092
1093 // static
1094 template <typename T>
CastAndCallFunction(const Condition * c)1095 bool Condition::CastAndCallFunction(const Condition* c) {
1096 bool (*function)(T*);
1097 c->ReadCallback(&function);
1098 T* argument = static_cast<T*>(c->arg_);
1099 return (*function)(argument);
1100 }
1101
1102 template <typename T>
Condition(bool (* func)(T *),T * arg)1103 inline Condition::Condition(bool (*func)(T*), T* arg)
1104 : eval_(&CastAndCallFunction<T>),
1105 arg_(const_cast<void*>(static_cast<const void*>(arg))) {
1106 static_assert(sizeof(&func) <= sizeof(callback_),
1107 "An overlarge function pointer was passed to Condition.");
1108 StoreCallback(func);
1109 }
1110
1111 template <typename T, typename>
Condition(bool (* func)(T *),typename absl::internal::type_identity<T>::type * arg)1112 inline Condition::Condition(
1113 bool (*func)(T*), typename absl::internal::type_identity<T>::type* arg)
1114 // Just delegate to the overload above.
1115 : Condition(func, arg) {}
1116
1117 template <typename T>
Condition(T * object,bool (absl::internal::type_identity<T>::type::* method)())1118 inline Condition::Condition(
1119 T* object, bool (absl::internal::type_identity<T>::type::*method)())
1120 : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
1121 static_assert(sizeof(&method) <= sizeof(callback_),
1122 "An overlarge method pointer was passed to Condition.");
1123 StoreCallback(method);
1124 }
1125
1126 template <typename T>
Condition(const T * object,bool (absl::internal::type_identity<T>::type::* method)()const)1127 inline Condition::Condition(
1128 const T* object,
1129 bool (absl::internal::type_identity<T>::type::*method)() const)
1130 : eval_(&CastAndCallMethod<const T, decltype(method)>),
1131 arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
1132 StoreCallback(method);
1133 }
1134
1135 // Register hooks for profiling support.
1136 //
1137 // The function pointer registered here will be called whenever a mutex is
1138 // contended. The callback is given the cycles for which waiting happened (as
1139 // measured by //absl/base/internal/cycleclock.h, and which may not
1140 // be real "cycle" counts.)
1141 //
1142 // There is no ordering guarantee between when the hook is registered and when
1143 // callbacks will begin. Only a single profiler can be installed in a running
1144 // binary; if this function is called a second time with a different function
1145 // pointer, the value is ignored (and will cause an assertion failure in debug
1146 // mode.)
1147 void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
1148
1149 // Register a hook for Mutex tracing.
1150 //
1151 // The function pointer registered here will be called whenever a mutex is
1152 // contended. The callback is given an opaque handle to the contended mutex,
1153 // an event name, and the number of wait cycles (as measured by
1154 // //absl/base/internal/cycleclock.h, and which may not be real
1155 // "cycle" counts.)
1156 //
1157 // The only event name currently sent is "slow release".
1158 //
1159 // This has the same ordering and single-use limitations as
1160 // RegisterMutexProfiler() above.
1161 void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
1162 int64_t wait_cycles));
1163
1164 // Register a hook for CondVar tracing.
1165 //
1166 // The function pointer registered here will be called here on various CondVar
1167 // events. The callback is given an opaque handle to the CondVar object and
1168 // a string identifying the event. This is thread-safe, but only a single
1169 // tracer can be registered.
1170 //
1171 // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
1172 // "SignalAll wakeup".
1173 //
1174 // This has the same ordering and single-use limitations as
1175 // RegisterMutexProfiler() above.
1176 void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
1177
1178 // EnableMutexInvariantDebugging()
1179 //
1180 // Enable or disable global support for Mutex invariant debugging. If enabled,
1181 // then invariant predicates can be registered per-Mutex for debug checking.
1182 // See Mutex::EnableInvariantDebugging().
1183 void EnableMutexInvariantDebugging(bool enabled);
1184
1185 // When in debug mode, and when the feature has been enabled globally, the
1186 // implementation will keep track of lock ordering and complain (or optionally
1187 // crash) if a cycle is detected in the acquired-before graph.
1188
1189 // Possible modes of operation for the deadlock detector in debug mode.
1190 enum class OnDeadlockCycle {
1191 kIgnore, // Neither report on nor attempt to track cycles in lock ordering
1192 kReport, // Report lock cycles to stderr when detected
1193 kAbort, // Report lock cycles to stderr when detected, then abort
1194 };
1195
1196 // SetMutexDeadlockDetectionMode()
1197 //
1198 // Enable or disable global support for detection of potential deadlocks
1199 // due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
1200 // lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
1201 // will be maintained internally, and detected cycles will be reported in
1202 // the manner chosen here.
1203 void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
1204
1205 ABSL_NAMESPACE_END
1206 } // namespace absl
1207
1208 // In some build configurations we pass --detect-odr-violations to the
1209 // gold linker. This causes it to flag weak symbol overrides as ODR
1210 // violations. Because ODR only applies to C++ and not C,
1211 // --detect-odr-violations ignores symbols not mangled with C++ names.
1212 // By changing our extension points to be extern "C", we dodge this
1213 // check.
1214 extern "C" {
1215 void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
1216 } // extern "C"
1217
1218 #endif // ABSL_SYNCHRONIZATION_MUTEX_H_
1219