1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/synchronization/mutex.h"
16
17 #ifdef _WIN32
18 #include <windows.h>
19 #ifdef ERROR
20 #undef ERROR
21 #endif
22 #else
23 #include <fcntl.h>
24 #include <pthread.h>
25 #include <sched.h>
26 #include <sys/time.h>
27 #endif
28
29 #include <assert.h>
30 #include <errno.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <time.h>
35
36 #include <algorithm>
37 #include <atomic>
38 #include <cinttypes>
39 #include <cstddef>
40 #include <cstring>
41 #include <iterator>
42 #include <thread> // NOLINT(build/c++11)
43
44 #include "absl/base/attributes.h"
45 #include "absl/base/call_once.h"
46 #include "absl/base/config.h"
47 #include "absl/base/dynamic_annotations.h"
48 #include "absl/base/internal/atomic_hook.h"
49 #include "absl/base/internal/cycleclock.h"
50 #include "absl/base/internal/hide_ptr.h"
51 #include "absl/base/internal/low_level_alloc.h"
52 #include "absl/base/internal/raw_logging.h"
53 #include "absl/base/internal/spinlock.h"
54 #include "absl/base/internal/sysinfo.h"
55 #include "absl/base/internal/thread_identity.h"
56 #include "absl/base/internal/tsan_mutex_interface.h"
57 #include "absl/base/optimization.h"
58 #include "absl/base/port.h"
59 #include "absl/debugging/stacktrace.h"
60 #include "absl/debugging/symbolize.h"
61 #include "absl/synchronization/internal/graphcycles.h"
62 #include "absl/synchronization/internal/per_thread_sem.h"
63 #include "absl/time/time.h"
64
65 using absl::base_internal::CurrentThreadIdentityIfPresent;
66 using absl::base_internal::PerThreadSynch;
67 using absl::base_internal::SchedulingGuard;
68 using absl::base_internal::ThreadIdentity;
69 using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
70 using absl::synchronization_internal::GraphCycles;
71 using absl::synchronization_internal::GraphId;
72 using absl::synchronization_internal::InvalidGraphId;
73 using absl::synchronization_internal::KernelTimeout;
74 using absl::synchronization_internal::PerThreadSem;
75
76 extern "C" {
ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)77 ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
78 std::this_thread::yield();
79 }
80 } // extern "C"
81
82 namespace absl {
83 ABSL_NAMESPACE_BEGIN
84
85 namespace {
86
87 #if defined(ABSL_HAVE_THREAD_SANITIZER)
88 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
89 #else
90 constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
91 #endif
92
93 ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
94 kDeadlockDetectionDefault);
95 ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
96
97 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
98 absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
99 submit_profile_data;
100 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
101 const char *msg, const void *obj, int64_t wait_cycles)>
102 mutex_tracer;
103 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
104 absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
105 cond_var_tracer;
106 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
107 bool (*)(const void *pc, char *out, int out_size)>
108 symbolizer(absl::Symbolize);
109
110 } // namespace
111
112 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
113 bool locking, bool trylock,
114 bool read_lock);
115
RegisterMutexProfiler(void (* fn)(int64_t wait_cycles))116 void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
117 submit_profile_data.Store(fn);
118 }
119
RegisterMutexTracer(void (* fn)(const char * msg,const void * obj,int64_t wait_cycles))120 void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
121 int64_t wait_cycles)) {
122 mutex_tracer.Store(fn);
123 }
124
RegisterCondVarTracer(void (* fn)(const char * msg,const void * cv))125 void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
126 cond_var_tracer.Store(fn);
127 }
128
RegisterSymbolizer(bool (* fn)(const void * pc,char * out,int out_size))129 void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
130 symbolizer.Store(fn);
131 }
132
133 namespace {
134 // Represents the strategy for spin and yield.
135 // See the comment in GetMutexGlobals() for more information.
136 enum DelayMode { AGGRESSIVE, GENTLE };
137
138 struct ABSL_CACHELINE_ALIGNED MutexGlobals {
139 absl::once_flag once;
140 int spinloop_iterations = 0;
141 int32_t mutex_sleep_spins[2] = {};
142 absl::Duration mutex_sleep_time;
143 };
144
MeasureTimeToYield()145 absl::Duration MeasureTimeToYield() {
146 absl::Time before = absl::Now();
147 ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
148 return absl::Now() - before;
149 }
150
GetMutexGlobals()151 const MutexGlobals &GetMutexGlobals() {
152 ABSL_CONST_INIT static MutexGlobals data;
153 absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
154 const int num_cpus = absl::base_internal::NumCPUs();
155 data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
156 // If this a uniprocessor, only yield/sleep.
157 // Real-time threads are often unable to yield, so the sleep time needs
158 // to be long enough to keep the calling thread asleep until scheduling
159 // happens.
160 // If this is multiprocessor, allow spinning. If the mode is
161 // aggressive then spin many times before yielding. If the mode is
162 // gentle then spin only a few times before yielding. Aggressive spinning
163 // is used to ensure that an Unlock() call, which must get the spin lock
164 // for any thread to make progress gets it without undue delay.
165 if (num_cpus > 1) {
166 data.mutex_sleep_spins[AGGRESSIVE] = 5000;
167 data.mutex_sleep_spins[GENTLE] = 250;
168 data.mutex_sleep_time = absl::Microseconds(10);
169 } else {
170 data.mutex_sleep_spins[AGGRESSIVE] = 0;
171 data.mutex_sleep_spins[GENTLE] = 0;
172 data.mutex_sleep_time = MeasureTimeToYield() * 5;
173 data.mutex_sleep_time =
174 std::min(data.mutex_sleep_time, absl::Milliseconds(1));
175 data.mutex_sleep_time =
176 std::max(data.mutex_sleep_time, absl::Microseconds(10));
177 }
178 });
179 return data;
180 }
181 } // namespace
182
183 namespace synchronization_internal {
184 // Returns the Mutex delay on iteration `c` depending on the given `mode`.
185 // The returned value should be used as `c` for the next call to `MutexDelay`.
MutexDelay(int32_t c,int mode)186 int MutexDelay(int32_t c, int mode) {
187 const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
188 const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
189 if (c < limit) {
190 // Spin.
191 c++;
192 } else {
193 SchedulingGuard::ScopedEnable enable_rescheduling;
194 ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
195 if (c == limit) {
196 // Yield once.
197 ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
198 c++;
199 } else {
200 // Then wait.
201 absl::SleepFor(sleep_time);
202 c = 0;
203 }
204 ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
205 }
206 return c;
207 }
208 } // namespace synchronization_internal
209
210 // --------------------------Generic atomic ops
211 // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
212 // "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
213 // before making any change.
214 // This is used to set flags in mutex and condition variable words.
AtomicSetBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)215 static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
216 intptr_t wait_until_clear) {
217 intptr_t v;
218 do {
219 v = pv->load(std::memory_order_relaxed);
220 } while ((v & bits) != bits &&
221 ((v & wait_until_clear) != 0 ||
222 !pv->compare_exchange_weak(v, v | bits,
223 std::memory_order_release,
224 std::memory_order_relaxed)));
225 }
226
227 // Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
228 // "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
229 // before making any change.
230 // This is used to unset flags in mutex and condition variable words.
AtomicClearBits(std::atomic<intptr_t> * pv,intptr_t bits,intptr_t wait_until_clear)231 static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
232 intptr_t wait_until_clear) {
233 intptr_t v;
234 do {
235 v = pv->load(std::memory_order_relaxed);
236 } while ((v & bits) != 0 &&
237 ((v & wait_until_clear) != 0 ||
238 !pv->compare_exchange_weak(v, v & ~bits,
239 std::memory_order_release,
240 std::memory_order_relaxed)));
241 }
242
243 //------------------------------------------------------------------
244
245 // Data for doing deadlock detection.
246 ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
247 absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
248
249 // Graph used to detect deadlocks.
250 ABSL_CONST_INIT static GraphCycles *deadlock_graph
251 ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
252
253 //------------------------------------------------------------------
254 // An event mechanism for debugging mutex use.
255 // It also allows mutexes to be given names for those who can't handle
256 // addresses, and instead like to give their data structures names like
257 // "Henry", "Fido", or "Rupert IV, King of Yondavia".
258
259 namespace { // to prevent name pollution
260 enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
261 // Mutex events
262 SYNCH_EV_TRYLOCK_SUCCESS,
263 SYNCH_EV_TRYLOCK_FAILED,
264 SYNCH_EV_READERTRYLOCK_SUCCESS,
265 SYNCH_EV_READERTRYLOCK_FAILED,
266 SYNCH_EV_LOCK,
267 SYNCH_EV_LOCK_RETURNING,
268 SYNCH_EV_READERLOCK,
269 SYNCH_EV_READERLOCK_RETURNING,
270 SYNCH_EV_UNLOCK,
271 SYNCH_EV_READERUNLOCK,
272
273 // CondVar events
274 SYNCH_EV_WAIT,
275 SYNCH_EV_WAIT_RETURNING,
276 SYNCH_EV_SIGNAL,
277 SYNCH_EV_SIGNALALL,
278 };
279
280 enum { // Event flags
281 SYNCH_F_R = 0x01, // reader event
282 SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
283 SYNCH_F_TRY = 0x04, // TryLock or ReaderTryLock
284 SYNCH_F_UNLOCK = 0x08, // Unlock or ReaderUnlock
285
286 SYNCH_F_LCK_W = SYNCH_F_LCK,
287 SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
288 };
289 } // anonymous namespace
290
291 // Properties of the events.
292 static const struct {
293 int flags;
294 const char *msg;
295 } event_properties[] = {
296 {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
297 {0, "TryLock failed "},
298 {SYNCH_F_LCK_R | SYNCH_F_TRY, "ReaderTryLock succeeded "},
299 {0, "ReaderTryLock failed "},
300 {0, "Lock blocking "},
301 {SYNCH_F_LCK_W, "Lock returning "},
302 {0, "ReaderLock blocking "},
303 {SYNCH_F_LCK_R, "ReaderLock returning "},
304 {SYNCH_F_LCK_W | SYNCH_F_UNLOCK, "Unlock "},
305 {SYNCH_F_LCK_R | SYNCH_F_UNLOCK, "ReaderUnlock "},
306 {0, "Wait on "},
307 {0, "Wait unblocked "},
308 {0, "Signal on "},
309 {0, "SignalAll on "},
310 };
311
312 ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
313 absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
314
315 // Hash table size; should be prime > 2.
316 // Can't be too small, as it's used for deadlock detection information.
317 static constexpr uint32_t kNSynchEvent = 1031;
318
319 static struct SynchEvent { // this is a trivial hash table for the events
320 // struct is freed when refcount reaches 0
321 int refcount ABSL_GUARDED_BY(synch_event_mu);
322
323 // buckets have linear, 0-terminated chains
324 SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
325
326 // Constant after initialization
327 uintptr_t masked_addr; // object at this address is called "name"
328
329 // No explicit synchronization used. Instead we assume that the
330 // client who enables/disables invariants/logging on a Mutex does so
331 // while the Mutex is not being concurrently accessed by others.
332 void (*invariant)(void *arg); // called on each event
333 void *arg; // first arg to (*invariant)()
334 bool log; // logging turned on
335
336 // Constant after initialization
337 char name[1]; // actually longer---NUL-terminated string
338 } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
339
340 // Ensure that the object at "addr" has a SynchEvent struct associated with it,
341 // set "bits" in the word there (waiting until lockbit is clear before doing
342 // so), and return a refcounted reference that will remain valid until
343 // UnrefSynchEvent() is called. If a new SynchEvent is allocated,
344 // the string name is copied into it.
345 // When used with a mutex, the caller should also ensure that kMuEvent
346 // is set in the mutex word, and similarly for condition variables and kCVEvent.
EnsureSynchEvent(std::atomic<intptr_t> * addr,const char * name,intptr_t bits,intptr_t lockbit)347 static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
348 const char *name, intptr_t bits,
349 intptr_t lockbit) {
350 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
351 SynchEvent *e;
352 // first look for existing SynchEvent struct..
353 synch_event_mu.Lock();
354 for (e = synch_event[h];
355 e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
356 e = e->next) {
357 }
358 if (e == nullptr) { // no SynchEvent struct found; make one.
359 if (name == nullptr) {
360 name = "";
361 }
362 size_t l = strlen(name);
363 e = reinterpret_cast<SynchEvent *>(
364 base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
365 e->refcount = 2; // one for return value, one for linked list
366 e->masked_addr = base_internal::HidePtr(addr);
367 e->invariant = nullptr;
368 e->arg = nullptr;
369 e->log = false;
370 strcpy(e->name, name); // NOLINT(runtime/printf)
371 e->next = synch_event[h];
372 AtomicSetBits(addr, bits, lockbit);
373 synch_event[h] = e;
374 } else {
375 e->refcount++; // for return value
376 }
377 synch_event_mu.Unlock();
378 return e;
379 }
380
381 // Deallocate the SynchEvent *e, whose refcount has fallen to zero.
DeleteSynchEvent(SynchEvent * e)382 static void DeleteSynchEvent(SynchEvent *e) {
383 base_internal::LowLevelAlloc::Free(e);
384 }
385
386 // Decrement the reference count of *e, or do nothing if e==null.
UnrefSynchEvent(SynchEvent * e)387 static void UnrefSynchEvent(SynchEvent *e) {
388 if (e != nullptr) {
389 synch_event_mu.Lock();
390 bool del = (--(e->refcount) == 0);
391 synch_event_mu.Unlock();
392 if (del) {
393 DeleteSynchEvent(e);
394 }
395 }
396 }
397
398 // Forget the mapping from the object (Mutex or CondVar) at address addr
399 // to SynchEvent object, and clear "bits" in its word (waiting until lockbit
400 // is clear before doing so).
ForgetSynchEvent(std::atomic<intptr_t> * addr,intptr_t bits,intptr_t lockbit)401 static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
402 intptr_t lockbit) {
403 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
404 SynchEvent **pe;
405 SynchEvent *e;
406 synch_event_mu.Lock();
407 for (pe = &synch_event[h];
408 (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
409 pe = &e->next) {
410 }
411 bool del = false;
412 if (e != nullptr) {
413 *pe = e->next;
414 del = (--(e->refcount) == 0);
415 }
416 AtomicClearBits(addr, bits, lockbit);
417 synch_event_mu.Unlock();
418 if (del) {
419 DeleteSynchEvent(e);
420 }
421 }
422
423 // Return a refcounted reference to the SynchEvent of the object at address
424 // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
425 // called.
GetSynchEvent(const void * addr)426 static SynchEvent *GetSynchEvent(const void *addr) {
427 uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
428 SynchEvent *e;
429 synch_event_mu.Lock();
430 for (e = synch_event[h];
431 e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
432 e = e->next) {
433 }
434 if (e != nullptr) {
435 e->refcount++;
436 }
437 synch_event_mu.Unlock();
438 return e;
439 }
440
441 // Called when an event "ev" occurs on a Mutex of CondVar "obj"
442 // if event recording is on
PostSynchEvent(void * obj,int ev)443 static void PostSynchEvent(void *obj, int ev) {
444 SynchEvent *e = GetSynchEvent(obj);
445 // logging is on if event recording is on and either there's no event struct,
446 // or it explicitly says to log
447 if (e == nullptr || e->log) {
448 void *pcs[40];
449 int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
450 // A buffer with enough space for the ASCII for all the PCs, even on a
451 // 64-bit machine.
452 char buffer[ABSL_ARRAYSIZE(pcs) * 24];
453 int pos = snprintf(buffer, sizeof (buffer), " @");
454 for (int i = 0; i != n; i++) {
455 int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
456 " %p", pcs[i]);
457 if (b < 0 ||
458 static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
459 break;
460 }
461 pos += b;
462 }
463 ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
464 (e == nullptr ? "" : e->name), buffer);
465 }
466 const int flags = event_properties[ev].flags;
467 if ((flags & SYNCH_F_LCK) != 0 && e != nullptr && e->invariant != nullptr) {
468 // Calling the invariant as is causes problems under ThreadSanitizer.
469 // We are currently inside of Mutex Lock/Unlock and are ignoring all
470 // memory accesses and synchronization. If the invariant transitively
471 // synchronizes something else and we ignore the synchronization, we will
472 // get false positive race reports later.
473 // Reuse EvalConditionAnnotated to properly call into user code.
474 struct local {
475 static bool pred(SynchEvent *ev) {
476 (*ev->invariant)(ev->arg);
477 return false;
478 }
479 };
480 Condition cond(&local::pred, e);
481 Mutex *mu = static_cast<Mutex *>(obj);
482 const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
483 const bool trylock = (flags & SYNCH_F_TRY) != 0;
484 const bool read_lock = (flags & SYNCH_F_R) != 0;
485 EvalConditionAnnotated(&cond, mu, locking, trylock, read_lock);
486 }
487 UnrefSynchEvent(e);
488 }
489
490 //------------------------------------------------------------------
491
492 // The SynchWaitParams struct encapsulates the way in which a thread is waiting:
493 // whether it has a timeout, the condition, exclusive/shared, and whether a
494 // condition variable wait has an associated Mutex (as opposed to another
495 // type of lock). It also points to the PerThreadSynch struct of its thread.
496 // cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
497 //
498 // This structure is held on the stack rather than directly in
499 // PerThreadSynch because a thread can be waiting on multiple Mutexes if,
500 // while waiting on one Mutex, the implementation calls a client callback
501 // (such as a Condition function) that acquires another Mutex. We don't
502 // strictly need to allow this, but programmers become confused if we do not
503 // allow them to use functions such a LOG() within Condition functions. The
504 // PerThreadSynch struct points at the most recent SynchWaitParams struct when
505 // the thread is on a Mutex's waiter queue.
506 struct SynchWaitParams {
SynchWaitParamsabsl::SynchWaitParams507 SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
508 KernelTimeout timeout_arg, Mutex *cvmu_arg,
509 PerThreadSynch *thread_arg,
510 std::atomic<intptr_t> *cv_word_arg)
511 : how(how_arg),
512 cond(cond_arg),
513 timeout(timeout_arg),
514 cvmu(cvmu_arg),
515 thread(thread_arg),
516 cv_word(cv_word_arg),
517 contention_start_cycles(base_internal::CycleClock::Now()),
518 should_submit_contention_data(false) {}
519
520 const Mutex::MuHow how; // How this thread needs to wait.
521 const Condition *cond; // The condition that this thread is waiting for.
522 // In Mutex, this field is set to zero if a timeout
523 // expires.
524 KernelTimeout timeout; // timeout expiry---absolute time
525 // In Mutex, this field is set to zero if a timeout
526 // expires.
527 Mutex *const cvmu; // used for transfer from cond var to mutex
528 PerThreadSynch *const thread; // thread that is waiting
529
530 // If not null, thread should be enqueued on the CondVar whose state
531 // word is cv_word instead of queueing normally on the Mutex.
532 std::atomic<intptr_t> *cv_word;
533
534 int64_t contention_start_cycles; // Time (in cycles) when this thread started
535 // to contend for the mutex.
536 bool should_submit_contention_data;
537 };
538
539 struct SynchLocksHeld {
540 int n; // number of valid entries in locks[]
541 bool overflow; // true iff we overflowed the array at some point
542 struct {
543 Mutex *mu; // lock acquired
544 int32_t count; // times acquired
545 GraphId id; // deadlock_graph id of acquired lock
546 } locks[40];
547 // If a thread overfills the array during deadlock detection, we
548 // continue, discarding information as needed. If no overflow has
549 // taken place, we can provide more error checking, such as
550 // detecting when a thread releases a lock it does not hold.
551 };
552
553 // A sentinel value in lists that is not 0.
554 // A 0 value is used to mean "not on a list".
555 static PerThreadSynch *const kPerThreadSynchNull =
556 reinterpret_cast<PerThreadSynch *>(1);
557
LocksHeldAlloc()558 static SynchLocksHeld *LocksHeldAlloc() {
559 SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
560 base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
561 ret->n = 0;
562 ret->overflow = false;
563 return ret;
564 }
565
566 // Return the PerThreadSynch-struct for this thread.
Synch_GetPerThread()567 static PerThreadSynch *Synch_GetPerThread() {
568 ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
569 return &identity->per_thread_synch;
570 }
571
Synch_GetPerThreadAnnotated(Mutex * mu)572 static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
573 if (mu) {
574 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
575 }
576 PerThreadSynch *w = Synch_GetPerThread();
577 if (mu) {
578 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
579 }
580 return w;
581 }
582
Synch_GetAllLocks()583 static SynchLocksHeld *Synch_GetAllLocks() {
584 PerThreadSynch *s = Synch_GetPerThread();
585 if (s->all_locks == nullptr) {
586 s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
587 }
588 return s->all_locks;
589 }
590
591 // Post on "w"'s associated PerThreadSem.
IncrementSynchSem(Mutex * mu,PerThreadSynch * w)592 void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
593 if (mu) {
594 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
595 // We miss synchronization around passing PerThreadSynch between threads
596 // since it happens inside of the Mutex code, so we need to ignore all
597 // accesses to the object.
598 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
599 PerThreadSem::Post(w->thread_identity());
600 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
601 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
602 } else {
603 PerThreadSem::Post(w->thread_identity());
604 }
605 }
606
607 // Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
DecrementSynchSem(Mutex * mu,PerThreadSynch * w,KernelTimeout t)608 bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
609 if (mu) {
610 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
611 }
612 assert(w == Synch_GetPerThread());
613 static_cast<void>(w);
614 bool res = PerThreadSem::Wait(t);
615 if (mu) {
616 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
617 }
618 return res;
619 }
620
621 // We're in a fatal signal handler that hopes to use Mutex and to get
622 // lucky by not deadlocking. We try to improve its chances of success
623 // by effectively disabling some of the consistency checks. This will
624 // prevent certain ABSL_RAW_CHECK() statements from being triggered when
625 // re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
626 // Mutex code checking that the "waitp" field has not been reused.
InternalAttemptToUseMutexInFatalSignalHandler()627 void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
628 // Fix the per-thread state only if it exists.
629 ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
630 if (identity != nullptr) {
631 identity->per_thread_synch.suppress_fatal_errors = true;
632 }
633 // Don't do deadlock detection when we are already failing.
634 synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
635 std::memory_order_release);
636 }
637
638 // --------------------------time support
639
640 // Return the current time plus the timeout. Use the same clock as
641 // PerThreadSem::Wait() for consistency. Unfortunately, we don't have
642 // such a choice when a deadline is given directly.
DeadlineFromTimeout(absl::Duration timeout)643 static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
644 #ifndef _WIN32
645 struct timeval tv;
646 gettimeofday(&tv, nullptr);
647 return absl::TimeFromTimeval(tv) + timeout;
648 #else
649 return absl::Now() + timeout;
650 #endif
651 }
652
653 // --------------------------Mutexes
654
655 // In the layout below, the msb of the bottom byte is currently unused. Also,
656 // the following constraints were considered in choosing the layout:
657 // o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
658 // 0xcd) are illegal: reader and writer lock both held.
659 // o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
660 // bit-twiddling trick in Mutex::Unlock().
661 // o kMuWriter / kMuReader == kMuWrWait / kMuWait,
662 // to enable the bit-twiddling trick in CheckForMutexCorruption().
663 static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
664 static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
665 static const intptr_t kMuWait = 0x0004L; // threads are waiting
666 static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
667 static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
668 // INVARIANT1: there's a thread that was blocked on the mutex, is
669 // no longer, yet has not yet acquired the mutex. If there's a
670 // designated waker, all threads can avoid taking the slow path in
671 // unlock because the designated waker will subsequently acquire
672 // the lock and wake someone. To maintain INVARIANT1 the bit is
673 // set when a thread is unblocked(INV1a), and threads that were
674 // unblocked reset the bit when they either acquire or re-block
675 // (INV1b).
676 static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
677 // for a reader
678 static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
679 static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
680 static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
681
682 // Hack to make constant values available to gdb pretty printer
683 enum {
684 kGdbMuSpin = kMuSpin,
685 kGdbMuEvent = kMuEvent,
686 kGdbMuWait = kMuWait,
687 kGdbMuWriter = kMuWriter,
688 kGdbMuDesig = kMuDesig,
689 kGdbMuWrWait = kMuWrWait,
690 kGdbMuReader = kMuReader,
691 kGdbMuLow = kMuLow,
692 };
693
694 // kMuWrWait implies kMuWait.
695 // kMuReader and kMuWriter are mutually exclusive.
696 // If kMuReader is zero, there are no readers.
697 // Otherwise, if kMuWait is zero, the high order bits contain a count of the
698 // number of readers. Otherwise, the reader count is held in
699 // PerThreadSynch::readers of the most recently queued waiter, again in the
700 // bits above kMuLow.
701 static const intptr_t kMuOne = 0x0100; // a count of one reader
702
703 // flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
704 static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
705 static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
706
707 static_assert(PerThreadSynch::kAlignment > kMuLow,
708 "PerThreadSynch::kAlignment must be greater than kMuLow");
709
710 // This struct contains various bitmasks to be used in
711 // acquiring and releasing a mutex in a particular mode.
712 struct MuHowS {
713 // if all the bits in fast_need_zero are zero, the lock can be acquired by
714 // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
715 // this is the designated waker.
716 intptr_t fast_need_zero;
717 intptr_t fast_or;
718 intptr_t fast_add;
719
720 intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
721
722 intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
723 // zero a reader can acquire a read share by
724 // setting the reader bit and incrementing
725 // the reader count (in last waiter since
726 // we're now slow-path). kMuWrWait be may
727 // be ignored if we already waited once.
728 };
729
730 static const MuHowS kSharedS = {
731 // shared or read lock
732 kMuWriter | kMuWait | kMuEvent, // fast_need_zero
733 kMuReader, // fast_or
734 kMuOne, // fast_add
735 kMuWriter | kMuWait, // slow_need_zero
736 kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
737 };
738 static const MuHowS kExclusiveS = {
739 // exclusive or write lock
740 kMuWriter | kMuReader | kMuEvent, // fast_need_zero
741 kMuWriter, // fast_or
742 0, // fast_add
743 kMuWriter | kMuReader, // slow_need_zero
744 ~static_cast<intptr_t>(0), // slow_inc_need_zero
745 };
746 static const Mutex::MuHow kShared = &kSharedS; // shared lock
747 static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
748
749 #ifdef NDEBUG
750 static constexpr bool kDebugMode = false;
751 #else
752 static constexpr bool kDebugMode = true;
753 #endif
754
755 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
TsanFlags(Mutex::MuHow how)756 static unsigned TsanFlags(Mutex::MuHow how) {
757 return how == kShared ? __tsan_mutex_read_lock : 0;
758 }
759 #endif
760
DebugOnlyIsExiting()761 static bool DebugOnlyIsExiting() {
762 return false;
763 }
764
~Mutex()765 Mutex::~Mutex() {
766 intptr_t v = mu_.load(std::memory_order_relaxed);
767 if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
768 ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
769 }
770 if (kDebugMode) {
771 this->ForgetDeadlockInfo();
772 }
773 ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
774 }
775
EnableDebugLog(const char * name)776 void Mutex::EnableDebugLog(const char *name) {
777 SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
778 e->log = true;
779 UnrefSynchEvent(e);
780 }
781
EnableMutexInvariantDebugging(bool enabled)782 void EnableMutexInvariantDebugging(bool enabled) {
783 synch_check_invariants.store(enabled, std::memory_order_release);
784 }
785
EnableInvariantDebugging(void (* invariant)(void *),void * arg)786 void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
787 void *arg) {
788 if (synch_check_invariants.load(std::memory_order_acquire) &&
789 invariant != nullptr) {
790 SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
791 e->invariant = invariant;
792 e->arg = arg;
793 UnrefSynchEvent(e);
794 }
795 }
796
SetMutexDeadlockDetectionMode(OnDeadlockCycle mode)797 void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
798 synch_deadlock_detection.store(mode, std::memory_order_release);
799 }
800
801 // Return true iff threads x and y are part of the same equivalence
802 // class of waiters. An equivalence class is defined as the set of
803 // waiters with the same condition, type of lock, and thread priority.
804 //
805 // Requires that x and y be waiting on the same Mutex queue.
MuEquivalentWaiter(PerThreadSynch * x,PerThreadSynch * y)806 static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
807 return x->waitp->how == y->waitp->how && x->priority == y->priority &&
808 Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
809 }
810
811 // Given the contents of a mutex word containing a PerThreadSynch pointer,
812 // return the pointer.
GetPerThreadSynch(intptr_t v)813 static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
814 return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
815 }
816
817 // The next several routines maintain the per-thread next and skip fields
818 // used in the Mutex waiter queue.
819 // The queue is a circular singly-linked list, of which the "head" is the
820 // last element, and head->next if the first element.
821 // The skip field has the invariant:
822 // For thread x, x->skip is one of:
823 // - invalid (iff x is not in a Mutex wait queue),
824 // - null, or
825 // - a pointer to a distinct thread waiting later in the same Mutex queue
826 // such that all threads in [x, x->skip] have the same condition, priority
827 // and lock type (MuEquivalentWaiter() is true for all pairs in [x,
828 // x->skip]).
829 // In addition, if x->skip is valid, (x->may_skip || x->skip == null)
830 //
831 // By the spec of MuEquivalentWaiter(), it is not necessary when removing the
832 // first runnable thread y from the front a Mutex queue to adjust the skip
833 // field of another thread x because if x->skip==y, x->skip must (have) become
834 // invalid before y is removed. The function TryRemove can remove a specified
835 // thread from an arbitrary position in the queue whether runnable or not, so
836 // it fixes up skip fields that would otherwise be left dangling.
837 // The statement
838 // if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
839 // maintains the invariant provided x is not the last waiter in a Mutex queue
840 // The statement
841 // if (x->skip != null) { x->skip = x->skip->skip; }
842 // maintains the invariant.
843
844 // Returns the last thread y in a mutex waiter queue such that all threads in
845 // [x, y] inclusive share the same condition. Sets skip fields of some threads
846 // in that range to optimize future evaluation of Skip() on x values in
847 // the range. Requires thread x is in a mutex waiter queue.
848 // The locking is unusual. Skip() is called under these conditions:
849 // - spinlock is held in call from Enqueue(), with maybe_unlocking == false
850 // - Mutex is held in call from UnlockSlow() by last unlocker, with
851 // maybe_unlocking == true
852 // - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
853 // UnlockSlow()) and TryRemove()
854 // These cases are mutually exclusive, so Skip() never runs concurrently
855 // with itself on the same Mutex. The skip chain is used in these other places
856 // that cannot occur concurrently:
857 // - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
858 // - Dequeue() (with spinlock and Mutex held)
859 // - UnlockSlow() (with spinlock and Mutex held)
860 // A more complex case is Enqueue()
861 // - Enqueue() (with spinlock held and maybe_unlocking == false)
862 // This is the first case in which Skip is called, above.
863 // - Enqueue() (without spinlock held; but queue is empty and being freshly
864 // formed)
865 // - Enqueue() (with spinlock held and maybe_unlocking == true)
866 // The first case has mutual exclusion, and the second isolation through
867 // working on an otherwise unreachable data structure.
868 // In the last case, Enqueue() is required to change no skip/next pointers
869 // except those in the added node and the former "head" node. This implies
870 // that the new node is added after head, and so must be the new head or the
871 // new front of the queue.
Skip(PerThreadSynch * x)872 static PerThreadSynch *Skip(PerThreadSynch *x) {
873 PerThreadSynch *x0 = nullptr;
874 PerThreadSynch *x1 = x;
875 PerThreadSynch *x2 = x->skip;
876 if (x2 != nullptr) {
877 // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
878 // such that x1 == x0->skip && x2 == x1->skip
879 while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
880 x0->skip = x2; // short-circuit skip from x0 to x2
881 }
882 x->skip = x1; // short-circuit skip from x to result
883 }
884 return x1;
885 }
886
887 // "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
888 // The latter is going to be removed out of order, because of a timeout.
889 // Check whether "ancestor" has a skip field pointing to "to_be_removed",
890 // and fix it if it does.
FixSkip(PerThreadSynch * ancestor,PerThreadSynch * to_be_removed)891 static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
892 if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
893 if (to_be_removed->skip != nullptr) {
894 ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
895 } else if (ancestor->next != to_be_removed) { // they are not adjacent
896 ancestor->skip = ancestor->next; // can skip one past ancestor
897 } else {
898 ancestor->skip = nullptr; // can't skip at all
899 }
900 }
901 }
902
903 static void CondVarEnqueue(SynchWaitParams *waitp);
904
905 // Enqueue thread "waitp->thread" on a waiter queue.
906 // Called with mutex spinlock held if head != nullptr
907 // If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
908 // idempotent; it alters no state associated with the existing (empty)
909 // queue.
910 //
911 // If waitp->cv_word == nullptr, queue the thread at either the front or
912 // the end (according to its priority) of the circular mutex waiter queue whose
913 // head is "head", and return the new head. mu is the previous mutex state,
914 // which contains the reader count (perhaps adjusted for the operation in
915 // progress) if the list was empty and a read lock held, and the holder hint if
916 // the list was empty and a write lock held. (flags & kMuIsCond) indicates
917 // whether this thread was transferred from a CondVar or is waiting for a
918 // non-trivial condition. In this case, Enqueue() never returns nullptr
919 //
920 // If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
921 // returned. This mechanism is used by CondVar to queue a thread on the
922 // condition variable queue instead of the mutex queue in implementing Wait().
923 // In this case, Enqueue() can return nullptr (if head==nullptr).
Enqueue(PerThreadSynch * head,SynchWaitParams * waitp,intptr_t mu,int flags)924 static PerThreadSynch *Enqueue(PerThreadSynch *head,
925 SynchWaitParams *waitp, intptr_t mu, int flags) {
926 // If we have been given a cv_word, call CondVarEnqueue() and return
927 // the previous head of the Mutex waiter queue.
928 if (waitp->cv_word != nullptr) {
929 CondVarEnqueue(waitp);
930 return head;
931 }
932
933 PerThreadSynch *s = waitp->thread;
934 ABSL_RAW_CHECK(
935 s->waitp == nullptr || // normal case
936 s->waitp == waitp || // Fer()---transfer from condition variable
937 s->suppress_fatal_errors,
938 "detected illegal recursion into Mutex code");
939 s->waitp = waitp;
940 s->skip = nullptr; // maintain skip invariant (see above)
941 s->may_skip = true; // always true on entering queue
942 s->wake = false; // not being woken
943 s->cond_waiter = ((flags & kMuIsCond) != 0);
944 if (head == nullptr) { // s is the only waiter
945 s->next = s; // it's the only entry in the cycle
946 s->readers = mu; // reader count is from mu word
947 s->maybe_unlocking = false; // no one is searching an empty list
948 head = s; // s is new head
949 } else {
950 PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
951 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
952 int64_t now_cycles = base_internal::CycleClock::Now();
953 if (s->next_priority_read_cycles < now_cycles) {
954 // Every so often, update our idea of the thread's priority.
955 // pthread_getschedparam() is 5% of the block/wakeup time;
956 // base_internal::CycleClock::Now() is 0.5%.
957 int policy;
958 struct sched_param param;
959 const int err = pthread_getschedparam(pthread_self(), &policy, ¶m);
960 if (err != 0) {
961 ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
962 } else {
963 s->priority = param.sched_priority;
964 s->next_priority_read_cycles =
965 now_cycles +
966 static_cast<int64_t>(base_internal::CycleClock::Frequency());
967 }
968 }
969 if (s->priority > head->priority) { // s's priority is above head's
970 // try to put s in priority-fifo order, or failing that at the front.
971 if (!head->maybe_unlocking) {
972 // No unlocker can be scanning the queue, so we can insert into the
973 // middle of the queue.
974 //
975 // Within a skip chain, all waiters have the same priority, so we can
976 // skip forward through the chains until we find one with a lower
977 // priority than the waiter to be enqueued.
978 PerThreadSynch *advance_to = head; // next value of enqueue_after
979 do {
980 enqueue_after = advance_to;
981 // (side-effect: optimizes skip chain)
982 advance_to = Skip(enqueue_after->next);
983 } while (s->priority <= advance_to->priority);
984 // termination guaranteed because s->priority > head->priority
985 // and head is the end of a skip chain
986 } else if (waitp->how == kExclusive &&
987 Condition::GuaranteedEqual(waitp->cond, nullptr)) {
988 // An unlocker could be scanning the queue, but we know it will recheck
989 // the queue front for writers that have no condition, which is what s
990 // is, so an insert at front is safe.
991 enqueue_after = head; // add after head, at front
992 }
993 }
994 #endif
995 if (enqueue_after != nullptr) {
996 s->next = enqueue_after->next;
997 enqueue_after->next = s;
998
999 // enqueue_after can be: head, Skip(...), or cur.
1000 // The first two imply enqueue_after->skip == nullptr, and
1001 // the last is used only if MuEquivalentWaiter(s, cur).
1002 // We require this because clearing enqueue_after->skip
1003 // is impossible; enqueue_after's predecessors might also
1004 // incorrectly skip over s if we were to allow other
1005 // insertion points.
1006 ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
1007 MuEquivalentWaiter(enqueue_after, s),
1008 "Mutex Enqueue failure");
1009
1010 if (enqueue_after != head && enqueue_after->may_skip &&
1011 MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
1012 // enqueue_after can skip to its new successor, s
1013 enqueue_after->skip = enqueue_after->next;
1014 }
1015 if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
1016 s->skip = s->next; // s may skip to its successor
1017 }
1018 } else { // enqueue not done any other way, so
1019 // we're inserting s at the back
1020 // s will become new head; copy data from head into it
1021 s->next = head->next; // add s after head
1022 head->next = s;
1023 s->readers = head->readers; // reader count is from previous head
1024 s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
1025 if (head->may_skip && MuEquivalentWaiter(head, s)) {
1026 // head now has successor; may skip
1027 head->skip = s;
1028 }
1029 head = s; // s is new head
1030 }
1031 }
1032 s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
1033 return head;
1034 }
1035
1036 // Dequeue the successor pw->next of thread pw from the Mutex waiter queue
1037 // whose last element is head. The new head element is returned, or null
1038 // if the list is made empty.
1039 // Dequeue is called with both spinlock and Mutex held.
Dequeue(PerThreadSynch * head,PerThreadSynch * pw)1040 static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
1041 PerThreadSynch *w = pw->next;
1042 pw->next = w->next; // snip w out of list
1043 if (head == w) { // we removed the head
1044 head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
1045 } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
1046 // pw can skip to its new successor
1047 if (pw->next->skip !=
1048 nullptr) { // either skip to its successors skip target
1049 pw->skip = pw->next->skip;
1050 } else { // or to pw's successor
1051 pw->skip = pw->next;
1052 }
1053 }
1054 return head;
1055 }
1056
1057 // Traverse the elements [ pw->next, h] of the circular list whose last element
1058 // is head.
1059 // Remove all elements with wake==true and place them in the
1060 // singly-linked list wake_list in the order found. Assumes that
1061 // there is only one such element if the element has how == kExclusive.
1062 // Return the new head.
DequeueAllWakeable(PerThreadSynch * head,PerThreadSynch * pw,PerThreadSynch ** wake_tail)1063 static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
1064 PerThreadSynch *pw,
1065 PerThreadSynch **wake_tail) {
1066 PerThreadSynch *orig_h = head;
1067 PerThreadSynch *w = pw->next;
1068 bool skipped = false;
1069 do {
1070 if (w->wake) { // remove this element
1071 ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
1072 // we're removing pw's successor so either pw->skip is zero or we should
1073 // already have removed pw since if pw->skip!=null, pw has the same
1074 // condition as w.
1075 head = Dequeue(head, pw);
1076 w->next = *wake_tail; // keep list terminated
1077 *wake_tail = w; // add w to wake_list;
1078 wake_tail = &w->next; // next addition to end
1079 if (w->waitp->how == kExclusive) { // wake at most 1 writer
1080 break;
1081 }
1082 } else { // not waking this one; skip
1083 pw = Skip(w); // skip as much as possible
1084 skipped = true;
1085 }
1086 w = pw->next;
1087 // We want to stop processing after we've considered the original head,
1088 // orig_h. We can't test for w==orig_h in the loop because w may skip over
1089 // it; we are guaranteed only that w's predecessor will not skip over
1090 // orig_h. When we've considered orig_h, either we've processed it and
1091 // removed it (so orig_h != head), or we considered it and skipped it (so
1092 // skipped==true && pw == head because skipping from head always skips by
1093 // just one, leaving pw pointing at head). So we want to
1094 // continue the loop with the negation of that expression.
1095 } while (orig_h == head && (pw != head || !skipped));
1096 return head;
1097 }
1098
1099 // Try to remove thread s from the list of waiters on this mutex.
1100 // Does nothing if s is not on the waiter list.
TryRemove(PerThreadSynch * s)1101 void Mutex::TryRemove(PerThreadSynch *s) {
1102 SchedulingGuard::ScopedDisable disable_rescheduling;
1103 intptr_t v = mu_.load(std::memory_order_relaxed);
1104 // acquire spinlock & lock
1105 if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
1106 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
1107 std::memory_order_acquire,
1108 std::memory_order_relaxed)) {
1109 PerThreadSynch *h = GetPerThreadSynch(v);
1110 if (h != nullptr) {
1111 PerThreadSynch *pw = h; // pw is w's predecessor
1112 PerThreadSynch *w;
1113 if ((w = pw->next) != s) { // search for thread,
1114 do { // processing at least one element
1115 // If the current element isn't equivalent to the waiter to be
1116 // removed, we can skip the entire chain.
1117 if (!MuEquivalentWaiter(s, w)) {
1118 pw = Skip(w); // so skip all that won't match
1119 // we don't have to worry about dangling skip fields
1120 // in the threads we skipped; none can point to s
1121 // because they are in a different equivalence class.
1122 } else { // seeking same condition
1123 FixSkip(w, s); // fix up any skip pointer from w to s
1124 pw = w;
1125 }
1126 // don't search further if we found the thread, or we're about to
1127 // process the first thread again.
1128 } while ((w = pw->next) != s && pw != h);
1129 }
1130 if (w == s) { // found thread; remove it
1131 // pw->skip may be non-zero here; the loop above ensured that
1132 // no ancestor of s can skip to s, so removal is safe anyway.
1133 h = Dequeue(h, pw);
1134 s->next = nullptr;
1135 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1136 }
1137 }
1138 intptr_t nv;
1139 do { // release spinlock and lock
1140 v = mu_.load(std::memory_order_relaxed);
1141 nv = v & (kMuDesig | kMuEvent);
1142 if (h != nullptr) {
1143 nv |= kMuWait | reinterpret_cast<intptr_t>(h);
1144 h->readers = 0; // we hold writer lock
1145 h->maybe_unlocking = false; // finished unlocking
1146 }
1147 } while (!mu_.compare_exchange_weak(v, nv,
1148 std::memory_order_release,
1149 std::memory_order_relaxed));
1150 }
1151 }
1152
1153 // Wait until thread "s", which must be the current thread, is removed from the
1154 // this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
1155 // if the wait extends past the absolute time specified, even if "s" is still
1156 // on the mutex queue. In this case, remove "s" from the queue and return
1157 // true, otherwise return false.
Block(PerThreadSynch * s)1158 void Mutex::Block(PerThreadSynch *s) {
1159 while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
1160 if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
1161 // After a timeout, we go into a spin loop until we remove ourselves
1162 // from the queue, or someone else removes us. We can't be sure to be
1163 // able to remove ourselves in a single lock acquisition because this
1164 // mutex may be held, and the holder has the right to read the centre
1165 // of the waiter queue without holding the spinlock.
1166 this->TryRemove(s);
1167 int c = 0;
1168 while (s->next != nullptr) {
1169 c = synchronization_internal::MutexDelay(c, GENTLE);
1170 this->TryRemove(s);
1171 }
1172 if (kDebugMode) {
1173 // This ensures that we test the case that TryRemove() is called when s
1174 // is not on the queue.
1175 this->TryRemove(s);
1176 }
1177 s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
1178 s->waitp->cond = nullptr; // condition no longer relevant for wakeups
1179 }
1180 }
1181 ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
1182 "detected illegal recursion in Mutex code");
1183 s->waitp = nullptr;
1184 }
1185
1186 // Wake thread w, and return the next thread in the list.
Wakeup(PerThreadSynch * w)1187 PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
1188 PerThreadSynch *next = w->next;
1189 w->next = nullptr;
1190 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
1191 IncrementSynchSem(this, w);
1192
1193 return next;
1194 }
1195
GetGraphIdLocked(Mutex * mu)1196 static GraphId GetGraphIdLocked(Mutex *mu)
1197 ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
1198 if (!deadlock_graph) { // (re)create the deadlock graph.
1199 deadlock_graph =
1200 new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
1201 GraphCycles;
1202 }
1203 return deadlock_graph->GetId(mu);
1204 }
1205
GetGraphId(Mutex * mu)1206 static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
1207 deadlock_graph_mu.Lock();
1208 GraphId id = GetGraphIdLocked(mu);
1209 deadlock_graph_mu.Unlock();
1210 return id;
1211 }
1212
1213 // Record a lock acquisition. This is used in debug mode for deadlock
1214 // detection. The held_locks pointer points to the relevant data
1215 // structure for each case.
LockEnter(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1216 static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1217 int n = held_locks->n;
1218 int i = 0;
1219 while (i != n && held_locks->locks[i].id != id) {
1220 i++;
1221 }
1222 if (i == n) {
1223 if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
1224 held_locks->overflow = true; // lost some data
1225 } else { // we have room for lock
1226 held_locks->locks[i].mu = mu;
1227 held_locks->locks[i].count = 1;
1228 held_locks->locks[i].id = id;
1229 held_locks->n = n + 1;
1230 }
1231 } else {
1232 held_locks->locks[i].count++;
1233 }
1234 }
1235
1236 // Record a lock release. Each call to LockEnter(mu, id, x) should be
1237 // eventually followed by a call to LockLeave(mu, id, x) by the same thread.
1238 // It does not process the event if is not needed when deadlock detection is
1239 // disabled.
LockLeave(Mutex * mu,GraphId id,SynchLocksHeld * held_locks)1240 static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
1241 int n = held_locks->n;
1242 int i = 0;
1243 while (i != n && held_locks->locks[i].id != id) {
1244 i++;
1245 }
1246 if (i == n) {
1247 if (!held_locks->overflow) {
1248 // The deadlock id may have been reassigned after ForgetDeadlockInfo,
1249 // but in that case mu should still be present.
1250 i = 0;
1251 while (i != n && held_locks->locks[i].mu != mu) {
1252 i++;
1253 }
1254 if (i == n) { // mu missing means releasing unheld lock
1255 SynchEvent *mu_events = GetSynchEvent(mu);
1256 ABSL_RAW_LOG(FATAL,
1257 "thread releasing lock it does not hold: %p %s; "
1258 ,
1259 static_cast<void *>(mu),
1260 mu_events == nullptr ? "" : mu_events->name);
1261 }
1262 }
1263 } else if (held_locks->locks[i].count == 1) {
1264 held_locks->n = n - 1;
1265 held_locks->locks[i] = held_locks->locks[n - 1];
1266 held_locks->locks[n - 1].id = InvalidGraphId();
1267 held_locks->locks[n - 1].mu =
1268 nullptr; // clear mu to please the leak detector.
1269 } else {
1270 assert(held_locks->locks[i].count > 0);
1271 held_locks->locks[i].count--;
1272 }
1273 }
1274
1275 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu)1276 static inline void DebugOnlyLockEnter(Mutex *mu) {
1277 if (kDebugMode) {
1278 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1279 OnDeadlockCycle::kIgnore) {
1280 LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
1281 }
1282 }
1283 }
1284
1285 // Call LockEnter() if in debug mode and deadlock detection is enabled.
DebugOnlyLockEnter(Mutex * mu,GraphId id)1286 static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
1287 if (kDebugMode) {
1288 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1289 OnDeadlockCycle::kIgnore) {
1290 LockEnter(mu, id, Synch_GetAllLocks());
1291 }
1292 }
1293 }
1294
1295 // Call LockLeave() if in debug mode and deadlock detection is enabled.
DebugOnlyLockLeave(Mutex * mu)1296 static inline void DebugOnlyLockLeave(Mutex *mu) {
1297 if (kDebugMode) {
1298 if (synch_deadlock_detection.load(std::memory_order_acquire) !=
1299 OnDeadlockCycle::kIgnore) {
1300 LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
1301 }
1302 }
1303 }
1304
StackString(void ** pcs,int n,char * buf,int maxlen,bool symbolize)1305 static char *StackString(void **pcs, int n, char *buf, int maxlen,
1306 bool symbolize) {
1307 static const int kSymLen = 200;
1308 char sym[kSymLen];
1309 int len = 0;
1310 for (int i = 0; i != n; i++) {
1311 if (len >= maxlen)
1312 return buf;
1313 size_t count = static_cast<size_t>(maxlen - len);
1314 if (symbolize) {
1315 if (!symbolizer(pcs[i], sym, kSymLen)) {
1316 sym[0] = '\0';
1317 }
1318 snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
1319 sym);
1320 } else {
1321 snprintf(buf + len, count, " %p", pcs[i]);
1322 }
1323 len += strlen(&buf[len]);
1324 }
1325 return buf;
1326 }
1327
CurrentStackString(char * buf,int maxlen,bool symbolize)1328 static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
1329 void *pcs[40];
1330 return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
1331 maxlen, symbolize);
1332 }
1333
1334 namespace {
1335 enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
1336 // a path this long would be remarkable
1337 // Buffers required to report a deadlock.
1338 // We do not allocate them on stack to avoid large stack frame.
1339 struct DeadlockReportBuffers {
1340 char buf[6100];
1341 GraphId path[kMaxDeadlockPathLen];
1342 };
1343
1344 struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffersabsl::__anon6dba5f550a11::ScopedDeadlockReportBuffers1345 ScopedDeadlockReportBuffers() {
1346 b = reinterpret_cast<DeadlockReportBuffers *>(
1347 base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
1348 }
~ScopedDeadlockReportBuffersabsl::__anon6dba5f550a11::ScopedDeadlockReportBuffers1349 ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
1350 DeadlockReportBuffers *b;
1351 };
1352
1353 // Helper to pass to GraphCycles::UpdateStackTrace.
GetStack(void ** stack,int max_depth)1354 int GetStack(void** stack, int max_depth) {
1355 return absl::GetStackTrace(stack, max_depth, 3);
1356 }
1357 } // anonymous namespace
1358
1359 // Called in debug mode when a thread is about to acquire a lock in a way that
1360 // may block.
DeadlockCheck(Mutex * mu)1361 static GraphId DeadlockCheck(Mutex *mu) {
1362 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1363 OnDeadlockCycle::kIgnore) {
1364 return InvalidGraphId();
1365 }
1366
1367 SynchLocksHeld *all_locks = Synch_GetAllLocks();
1368
1369 absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
1370 const GraphId mu_id = GetGraphIdLocked(mu);
1371
1372 if (all_locks->n == 0) {
1373 // There are no other locks held. Return now so that we don't need to
1374 // call GetSynchEvent(). This way we do not record the stack trace
1375 // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
1376 // it can't always be the first lock acquired by a thread.
1377 return mu_id;
1378 }
1379
1380 // We prefer to keep stack traces that show a thread holding and acquiring
1381 // as many locks as possible. This increases the chances that a given edge
1382 // in the acquires-before graph will be represented in the stack traces
1383 // recorded for the locks.
1384 deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
1385
1386 // For each other mutex already held by this thread:
1387 for (int i = 0; i != all_locks->n; i++) {
1388 const GraphId other_node_id = all_locks->locks[i].id;
1389 const Mutex *other =
1390 static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
1391 if (other == nullptr) {
1392 // Ignore stale lock
1393 continue;
1394 }
1395
1396 // Add the acquired-before edge to the graph.
1397 if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
1398 ScopedDeadlockReportBuffers scoped_buffers;
1399 DeadlockReportBuffers *b = scoped_buffers.b;
1400 static int number_of_reported_deadlocks = 0;
1401 number_of_reported_deadlocks++;
1402 // Symbolize only 2 first deadlock report to avoid huge slowdowns.
1403 bool symbolize = number_of_reported_deadlocks <= 2;
1404 ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
1405 CurrentStackString(b->buf, sizeof (b->buf), symbolize));
1406 size_t len = 0;
1407 for (int j = 0; j != all_locks->n; j++) {
1408 void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
1409 if (pr != nullptr) {
1410 snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
1411 len += strlen(&b->buf[len]);
1412 }
1413 }
1414 ABSL_RAW_LOG(ERROR,
1415 "Acquiring absl::Mutex %p while holding %s; a cycle in the "
1416 "historical lock ordering graph has been observed",
1417 static_cast<void *>(mu), b->buf);
1418 ABSL_RAW_LOG(ERROR, "Cycle: ");
1419 int path_len = deadlock_graph->FindPath(
1420 mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
1421 for (int j = 0; j != path_len; j++) {
1422 GraphId id = b->path[j];
1423 Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
1424 if (path_mu == nullptr) continue;
1425 void** stack;
1426 int depth = deadlock_graph->GetStackTrace(id, &stack);
1427 snprintf(b->buf, sizeof(b->buf),
1428 "mutex@%p stack: ", static_cast<void *>(path_mu));
1429 StackString(stack, depth, b->buf + strlen(b->buf),
1430 static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
1431 symbolize);
1432 ABSL_RAW_LOG(ERROR, "%s", b->buf);
1433 }
1434 if (synch_deadlock_detection.load(std::memory_order_acquire) ==
1435 OnDeadlockCycle::kAbort) {
1436 deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
1437 ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
1438 return mu_id;
1439 }
1440 break; // report at most one potential deadlock per acquisition
1441 }
1442 }
1443
1444 return mu_id;
1445 }
1446
1447 // Invoke DeadlockCheck() iff we're in debug mode and
1448 // deadlock checking has been enabled.
DebugOnlyDeadlockCheck(Mutex * mu)1449 static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
1450 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1451 OnDeadlockCycle::kIgnore) {
1452 return DeadlockCheck(mu);
1453 } else {
1454 return InvalidGraphId();
1455 }
1456 }
1457
ForgetDeadlockInfo()1458 void Mutex::ForgetDeadlockInfo() {
1459 if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
1460 OnDeadlockCycle::kIgnore) {
1461 deadlock_graph_mu.Lock();
1462 if (deadlock_graph != nullptr) {
1463 deadlock_graph->RemoveNode(this);
1464 }
1465 deadlock_graph_mu.Unlock();
1466 }
1467 }
1468
AssertNotHeld() const1469 void Mutex::AssertNotHeld() const {
1470 // We have the data to allow this check only if in debug mode and deadlock
1471 // detection is enabled.
1472 if (kDebugMode &&
1473 (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
1474 synch_deadlock_detection.load(std::memory_order_acquire) !=
1475 OnDeadlockCycle::kIgnore) {
1476 GraphId id = GetGraphId(const_cast<Mutex *>(this));
1477 SynchLocksHeld *locks = Synch_GetAllLocks();
1478 for (int i = 0; i != locks->n; i++) {
1479 if (locks->locks[i].id == id) {
1480 SynchEvent *mu_events = GetSynchEvent(this);
1481 ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
1482 static_cast<const void *>(this),
1483 (mu_events == nullptr ? "" : mu_events->name));
1484 }
1485 }
1486 }
1487 }
1488
1489 // Attempt to acquire *mu, and return whether successful. The implementation
1490 // may spin for a short while if the lock cannot be acquired immediately.
TryAcquireWithSpinning(std::atomic<intptr_t> * mu)1491 static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
1492 int c = GetMutexGlobals().spinloop_iterations;
1493 do { // do/while somewhat faster on AMD
1494 intptr_t v = mu->load(std::memory_order_relaxed);
1495 if ((v & (kMuReader|kMuEvent)) != 0) {
1496 return false; // a reader or tracing -> give up
1497 } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
1498 mu->compare_exchange_strong(v, kMuWriter | v,
1499 std::memory_order_acquire,
1500 std::memory_order_relaxed)) {
1501 return true;
1502 }
1503 } while (--c > 0);
1504 return false;
1505 }
1506
Lock()1507 void Mutex::Lock() {
1508 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1509 GraphId id = DebugOnlyDeadlockCheck(this);
1510 intptr_t v = mu_.load(std::memory_order_relaxed);
1511 // try fast acquire, then spin loop
1512 if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
1513 !mu_.compare_exchange_strong(v, kMuWriter | v,
1514 std::memory_order_acquire,
1515 std::memory_order_relaxed)) {
1516 // try spin acquire, then slow loop
1517 if (!TryAcquireWithSpinning(&this->mu_)) {
1518 this->LockSlow(kExclusive, nullptr, 0);
1519 }
1520 }
1521 DebugOnlyLockEnter(this, id);
1522 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1523 }
1524
ReaderLock()1525 void Mutex::ReaderLock() {
1526 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1527 GraphId id = DebugOnlyDeadlockCheck(this);
1528 intptr_t v = mu_.load(std::memory_order_relaxed);
1529 // try fast acquire, then slow loop
1530 if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
1531 !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1532 std::memory_order_acquire,
1533 std::memory_order_relaxed)) {
1534 this->LockSlow(kShared, nullptr, 0);
1535 }
1536 DebugOnlyLockEnter(this, id);
1537 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1538 }
1539
LockWhen(const Condition & cond)1540 void Mutex::LockWhen(const Condition &cond) {
1541 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1542 GraphId id = DebugOnlyDeadlockCheck(this);
1543 this->LockSlow(kExclusive, &cond, 0);
1544 DebugOnlyLockEnter(this, id);
1545 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1546 }
1547
LockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1548 bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
1549 return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1550 }
1551
LockWhenWithDeadline(const Condition & cond,absl::Time deadline)1552 bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
1553 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
1554 GraphId id = DebugOnlyDeadlockCheck(this);
1555 bool res = LockSlowWithDeadline(kExclusive, &cond,
1556 KernelTimeout(deadline), 0);
1557 DebugOnlyLockEnter(this, id);
1558 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
1559 return res;
1560 }
1561
ReaderLockWhen(const Condition & cond)1562 void Mutex::ReaderLockWhen(const Condition &cond) {
1563 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1564 GraphId id = DebugOnlyDeadlockCheck(this);
1565 this->LockSlow(kShared, &cond, 0);
1566 DebugOnlyLockEnter(this, id);
1567 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1568 }
1569
ReaderLockWhenWithTimeout(const Condition & cond,absl::Duration timeout)1570 bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
1571 absl::Duration timeout) {
1572 return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
1573 }
1574
ReaderLockWhenWithDeadline(const Condition & cond,absl::Time deadline)1575 bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
1576 absl::Time deadline) {
1577 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
1578 GraphId id = DebugOnlyDeadlockCheck(this);
1579 bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
1580 DebugOnlyLockEnter(this, id);
1581 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
1582 return res;
1583 }
1584
Await(const Condition & cond)1585 void Mutex::Await(const Condition &cond) {
1586 if (cond.Eval()) { // condition already true; nothing to do
1587 if (kDebugMode) {
1588 this->AssertReaderHeld();
1589 }
1590 } else { // normal case
1591 ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
1592 "condition untrue on return from Await");
1593 }
1594 }
1595
AwaitWithTimeout(const Condition & cond,absl::Duration timeout)1596 bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
1597 return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
1598 }
1599
AwaitWithDeadline(const Condition & cond,absl::Time deadline)1600 bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
1601 if (cond.Eval()) { // condition already true; nothing to do
1602 if (kDebugMode) {
1603 this->AssertReaderHeld();
1604 }
1605 return true;
1606 }
1607
1608 KernelTimeout t{deadline};
1609 bool res = this->AwaitCommon(cond, t);
1610 ABSL_RAW_CHECK(res || t.has_timeout(),
1611 "condition untrue on return from Await");
1612 return res;
1613 }
1614
AwaitCommon(const Condition & cond,KernelTimeout t)1615 bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
1616 this->AssertReaderHeld();
1617 MuHow how =
1618 (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
1619 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
1620 SynchWaitParams waitp(
1621 how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1622 nullptr /*no cv_word*/);
1623 int flags = kMuHasBlocked;
1624 if (!Condition::GuaranteedEqual(&cond, nullptr)) {
1625 flags |= kMuIsCond;
1626 }
1627 this->UnlockSlow(&waitp);
1628 this->Block(waitp.thread);
1629 ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
1630 ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
1631 this->LockSlowLoop(&waitp, flags);
1632 bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
1633 EvalConditionAnnotated(&cond, this, true, false, how == kShared);
1634 ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
1635 return res;
1636 }
1637
TryLock()1638 bool Mutex::TryLock() {
1639 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
1640 intptr_t v = mu_.load(std::memory_order_relaxed);
1641 if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
1642 mu_.compare_exchange_strong(v, kMuWriter | v,
1643 std::memory_order_acquire,
1644 std::memory_order_relaxed)) {
1645 DebugOnlyLockEnter(this);
1646 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1647 return true;
1648 }
1649 if ((v & kMuEvent) != 0) { // we're recording events
1650 if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
1651 mu_.compare_exchange_strong(
1652 v, (kExclusive->fast_or | v) + kExclusive->fast_add,
1653 std::memory_order_acquire, std::memory_order_relaxed)) {
1654 DebugOnlyLockEnter(this);
1655 PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
1656 ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
1657 return true;
1658 } else {
1659 PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
1660 }
1661 }
1662 ABSL_TSAN_MUTEX_POST_LOCK(
1663 this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
1664 return false;
1665 }
1666
ReaderTryLock()1667 bool Mutex::ReaderTryLock() {
1668 ABSL_TSAN_MUTEX_PRE_LOCK(this,
1669 __tsan_mutex_read_lock | __tsan_mutex_try_lock);
1670 intptr_t v = mu_.load(std::memory_order_relaxed);
1671 // The while-loops (here and below) iterate only if the mutex word keeps
1672 // changing (typically because the reader count changes) under the CAS. We
1673 // limit the number of attempts to avoid having to think about livelock.
1674 int loop_limit = 5;
1675 while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
1676 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1677 std::memory_order_acquire,
1678 std::memory_order_relaxed)) {
1679 DebugOnlyLockEnter(this);
1680 ABSL_TSAN_MUTEX_POST_LOCK(
1681 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1682 return true;
1683 }
1684 loop_limit--;
1685 v = mu_.load(std::memory_order_relaxed);
1686 }
1687 if ((v & kMuEvent) != 0) { // we're recording events
1688 loop_limit = 5;
1689 while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
1690 if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
1691 std::memory_order_acquire,
1692 std::memory_order_relaxed)) {
1693 DebugOnlyLockEnter(this);
1694 PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
1695 ABSL_TSAN_MUTEX_POST_LOCK(
1696 this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
1697 return true;
1698 }
1699 loop_limit--;
1700 v = mu_.load(std::memory_order_relaxed);
1701 }
1702 if ((v & kMuEvent) != 0) {
1703 PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
1704 }
1705 }
1706 ABSL_TSAN_MUTEX_POST_LOCK(this,
1707 __tsan_mutex_read_lock | __tsan_mutex_try_lock |
1708 __tsan_mutex_try_lock_failed,
1709 0);
1710 return false;
1711 }
1712
Unlock()1713 void Mutex::Unlock() {
1714 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
1715 DebugOnlyLockLeave(this);
1716 intptr_t v = mu_.load(std::memory_order_relaxed);
1717
1718 if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
1719 ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
1720 static_cast<unsigned>(v));
1721 }
1722
1723 // should_try_cas is whether we'll try a compare-and-swap immediately.
1724 // NOTE: optimized out when kDebugMode is false.
1725 bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
1726 (v & (kMuWait | kMuDesig)) != kMuWait);
1727 // But, we can use an alternate computation of it, that compilers
1728 // currently don't find on their own. When that changes, this function
1729 // can be simplified.
1730 intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
1731 intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
1732 // Claim: "x == 0 && y > 0" is equal to should_try_cas.
1733 // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
1734 // all possible non-zero values for x exceed all possible values for y.
1735 // Therefore, (x == 0 && y > 0) == (x < y).
1736 if (kDebugMode && should_try_cas != (x < y)) {
1737 // We would usually use PRIdPTR here, but is not correctly implemented
1738 // within the android toolchain.
1739 ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
1740 static_cast<long long>(v), static_cast<long long>(x),
1741 static_cast<long long>(y));
1742 }
1743 if (x < y &&
1744 mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
1745 std::memory_order_release,
1746 std::memory_order_relaxed)) {
1747 // fast writer release (writer with no waiters or with designated waker)
1748 } else {
1749 this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1750 }
1751 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
1752 }
1753
1754 // Requires v to represent a reader-locked state.
ExactlyOneReader(intptr_t v)1755 static bool ExactlyOneReader(intptr_t v) {
1756 assert((v & (kMuWriter|kMuReader)) == kMuReader);
1757 assert((v & kMuHigh) != 0);
1758 // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
1759 // on some architectures the following generates slightly smaller code.
1760 // It may be faster too.
1761 constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
1762 return (v & kMuMultipleWaitersMask) == 0;
1763 }
1764
ReaderUnlock()1765 void Mutex::ReaderUnlock() {
1766 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
1767 DebugOnlyLockLeave(this);
1768 intptr_t v = mu_.load(std::memory_order_relaxed);
1769 assert((v & (kMuWriter|kMuReader)) == kMuReader);
1770 if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
1771 // fast reader release (reader with no waiters)
1772 intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
1773 if (mu_.compare_exchange_strong(v, v - clear,
1774 std::memory_order_release,
1775 std::memory_order_relaxed)) {
1776 ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1777 return;
1778 }
1779 }
1780 this->UnlockSlow(nullptr /*no waitp*/); // take slow path
1781 ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
1782 }
1783
1784 // Clears the designated waker flag in the mutex if this thread has blocked, and
1785 // therefore may be the designated waker.
ClearDesignatedWakerMask(int flag)1786 static intptr_t ClearDesignatedWakerMask(int flag) {
1787 assert(flag >= 0);
1788 assert(flag <= 1);
1789 switch (flag) {
1790 case 0: // not blocked
1791 return ~static_cast<intptr_t>(0);
1792 case 1: // blocked; turn off the designated waker bit
1793 return ~static_cast<intptr_t>(kMuDesig);
1794 }
1795 ABSL_UNREACHABLE();
1796 }
1797
1798 // Conditionally ignores the existence of waiting writers if a reader that has
1799 // already blocked once wakes up.
IgnoreWaitingWritersMask(int flag)1800 static intptr_t IgnoreWaitingWritersMask(int flag) {
1801 assert(flag >= 0);
1802 assert(flag <= 1);
1803 switch (flag) {
1804 case 0: // not blocked
1805 return ~static_cast<intptr_t>(0);
1806 case 1: // blocked; pretend there are no waiting writers
1807 return ~static_cast<intptr_t>(kMuWrWait);
1808 }
1809 ABSL_UNREACHABLE();
1810 }
1811
1812 // Internal version of LockWhen(). See LockSlowWithDeadline()
LockSlow(MuHow how,const Condition * cond,int flags)1813 ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
1814 int flags) {
1815 ABSL_RAW_CHECK(
1816 this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
1817 "condition untrue on return from LockSlow");
1818 }
1819
1820 // Compute cond->Eval() and tell race detectors that we do it under mutex mu.
EvalConditionAnnotated(const Condition * cond,Mutex * mu,bool locking,bool trylock,bool read_lock)1821 static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
1822 bool locking, bool trylock,
1823 bool read_lock) {
1824 // Delicate annotation dance.
1825 // We are currently inside of read/write lock/unlock operation.
1826 // All memory accesses are ignored inside of mutex operations + for unlock
1827 // operation tsan considers that we've already released the mutex.
1828 bool res = false;
1829 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
1830 const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0;
1831 const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0);
1832 #endif
1833 if (locking) {
1834 // For lock we pretend that we have finished the operation,
1835 // evaluate the predicate, then unlock the mutex and start locking it again
1836 // to match the annotation at the end of outer lock operation.
1837 // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
1838 // will think the lock acquisition is recursive which will trigger
1839 // deadlock detector.
1840 ABSL_TSAN_MUTEX_POST_LOCK(mu, tryflags, 0);
1841 res = cond->Eval();
1842 // There is no "try" version of Unlock, so use flags instead of tryflags.
1843 ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1844 ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1845 ABSL_TSAN_MUTEX_PRE_LOCK(mu, tryflags);
1846 } else {
1847 // Similarly, for unlock we pretend that we have unlocked the mutex,
1848 // lock the mutex, evaluate the predicate, and start unlocking it again
1849 // to match the annotation at the end of outer unlock operation.
1850 ABSL_TSAN_MUTEX_POST_UNLOCK(mu, flags);
1851 ABSL_TSAN_MUTEX_PRE_LOCK(mu, flags);
1852 ABSL_TSAN_MUTEX_POST_LOCK(mu, flags, 0);
1853 res = cond->Eval();
1854 ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, flags);
1855 }
1856 // Prevent unused param warnings in non-TSAN builds.
1857 static_cast<void>(mu);
1858 static_cast<void>(trylock);
1859 static_cast<void>(read_lock);
1860 return res;
1861 }
1862
1863 // Compute cond->Eval() hiding it from race detectors.
1864 // We are hiding it because inside of UnlockSlow we can evaluate a predicate
1865 // that was just added by a concurrent Lock operation; Lock adds the predicate
1866 // to the internal Mutex list without actually acquiring the Mutex
1867 // (it only acquires the internal spinlock, which is rightfully invisible for
1868 // tsan). As the result there is no tsan-visible synchronization between the
1869 // addition and this thread. So if we would enable race detection here,
1870 // it would race with the predicate initialization.
EvalConditionIgnored(Mutex * mu,const Condition * cond)1871 static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
1872 // Memory accesses are already ignored inside of lock/unlock operations,
1873 // but synchronization operations are also ignored. When we evaluate the
1874 // predicate we must ignore only memory accesses but not synchronization,
1875 // because missed synchronization can lead to false reports later.
1876 // So we "divert" (which un-ignores both memory accesses and synchronization)
1877 // and then separately turn on ignores of memory accesses.
1878 ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
1879 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1880 bool res = cond->Eval();
1881 ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
1882 ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
1883 static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
1884 return res;
1885 }
1886
1887 // Internal equivalent of *LockWhenWithDeadline(), where
1888 // "t" represents the absolute timeout; !t.has_timeout() means "forever".
1889 // "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
1890 // In flags, bits are ored together:
1891 // - kMuHasBlocked indicates that the client has already blocked on the call so
1892 // the designated waker bit must be cleared and waiting writers should not
1893 // obstruct this call
1894 // - kMuIsCond indicates that this is a conditional acquire (condition variable,
1895 // Await, LockWhen) so contention profiling should be suppressed.
LockSlowWithDeadline(MuHow how,const Condition * cond,KernelTimeout t,int flags)1896 bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
1897 KernelTimeout t, int flags) {
1898 intptr_t v = mu_.load(std::memory_order_relaxed);
1899 bool unlock = false;
1900 if ((v & how->fast_need_zero) == 0 && // try fast acquire
1901 mu_.compare_exchange_strong(
1902 v,
1903 (how->fast_or |
1904 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1905 how->fast_add,
1906 std::memory_order_acquire, std::memory_order_relaxed)) {
1907 if (cond == nullptr ||
1908 EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
1909 return true;
1910 }
1911 unlock = true;
1912 }
1913 SynchWaitParams waitp(
1914 how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
1915 nullptr /*no cv_word*/);
1916 if (!Condition::GuaranteedEqual(cond, nullptr)) {
1917 flags |= kMuIsCond;
1918 }
1919 if (unlock) {
1920 this->UnlockSlow(&waitp);
1921 this->Block(waitp.thread);
1922 flags |= kMuHasBlocked;
1923 }
1924 this->LockSlowLoop(&waitp, flags);
1925 return waitp.cond != nullptr || // => cond known true from LockSlowLoop
1926 cond == nullptr ||
1927 EvalConditionAnnotated(cond, this, true, false, how == kShared);
1928 }
1929
1930 // RAW_CHECK_FMT() takes a condition, a printf-style format string, and
1931 // the printf-style argument list. The format string must be a literal.
1932 // Arguments after the first are not evaluated unless the condition is true.
1933 #define RAW_CHECK_FMT(cond, ...) \
1934 do { \
1935 if (ABSL_PREDICT_FALSE(!(cond))) { \
1936 ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
1937 } \
1938 } while (0)
1939
CheckForMutexCorruption(intptr_t v,const char * label)1940 static void CheckForMutexCorruption(intptr_t v, const char* label) {
1941 // Test for either of two situations that should not occur in v:
1942 // kMuWriter and kMuReader
1943 // kMuWrWait and !kMuWait
1944 const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
1945 // By flipping that bit, we can now test for:
1946 // kMuWriter and kMuReader in w
1947 // kMuWrWait and kMuWait in w
1948 // We've chosen these two pairs of values to be so that they will overlap,
1949 // respectively, when the word is left shifted by three. This allows us to
1950 // save a branch in the common (correct) case of them not being coincident.
1951 static_assert(kMuReader << 3 == kMuWriter, "must match");
1952 static_assert(kMuWait << 3 == kMuWrWait, "must match");
1953 if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
1954 RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
1955 "%s: Mutex corrupt: both reader and writer lock held: %p",
1956 label, reinterpret_cast<void *>(v));
1957 RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
1958 "%s: Mutex corrupt: waiting writer with no waiters: %p",
1959 label, reinterpret_cast<void *>(v));
1960 assert(false);
1961 }
1962
LockSlowLoop(SynchWaitParams * waitp,int flags)1963 void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
1964 SchedulingGuard::ScopedDisable disable_rescheduling;
1965 int c = 0;
1966 intptr_t v = mu_.load(std::memory_order_relaxed);
1967 if ((v & kMuEvent) != 0) {
1968 PostSynchEvent(this,
1969 waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
1970 }
1971 ABSL_RAW_CHECK(
1972 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
1973 "detected illegal recursion into Mutex code");
1974 for (;;) {
1975 v = mu_.load(std::memory_order_relaxed);
1976 CheckForMutexCorruption(v, "Lock");
1977 if ((v & waitp->how->slow_need_zero) == 0) {
1978 if (mu_.compare_exchange_strong(
1979 v,
1980 (waitp->how->fast_or |
1981 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
1982 waitp->how->fast_add,
1983 std::memory_order_acquire, std::memory_order_relaxed)) {
1984 if (waitp->cond == nullptr ||
1985 EvalConditionAnnotated(waitp->cond, this, true, false,
1986 waitp->how == kShared)) {
1987 break; // we timed out, or condition true, so return
1988 }
1989 this->UnlockSlow(waitp); // got lock but condition false
1990 this->Block(waitp->thread);
1991 flags |= kMuHasBlocked;
1992 c = 0;
1993 }
1994 } else { // need to access waiter list
1995 bool dowait = false;
1996 if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
1997 // This thread tries to become the one and only waiter.
1998 PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
1999 intptr_t nv =
2000 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
2001 kMuWait;
2002 ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
2003 if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2004 nv |= kMuWrWait;
2005 }
2006 if (mu_.compare_exchange_strong(
2007 v, reinterpret_cast<intptr_t>(new_h) | nv,
2008 std::memory_order_release, std::memory_order_relaxed)) {
2009 dowait = true;
2010 } else { // attempted Enqueue() failed
2011 // zero out the waitp field set by Enqueue()
2012 waitp->thread->waitp = nullptr;
2013 }
2014 } else if ((v & waitp->how->slow_inc_need_zero &
2015 IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
2016 // This is a reader that needs to increment the reader count,
2017 // but the count is currently held in the last waiter.
2018 if (mu_.compare_exchange_strong(
2019 v,
2020 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2021 kMuSpin | kMuReader,
2022 std::memory_order_acquire, std::memory_order_relaxed)) {
2023 PerThreadSynch *h = GetPerThreadSynch(v);
2024 h->readers += kMuOne; // inc reader count in waiter
2025 do { // release spinlock
2026 v = mu_.load(std::memory_order_relaxed);
2027 } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
2028 std::memory_order_release,
2029 std::memory_order_relaxed));
2030 if (waitp->cond == nullptr ||
2031 EvalConditionAnnotated(waitp->cond, this, true, false,
2032 waitp->how == kShared)) {
2033 break; // we timed out, or condition true, so return
2034 }
2035 this->UnlockSlow(waitp); // got lock but condition false
2036 this->Block(waitp->thread);
2037 flags |= kMuHasBlocked;
2038 c = 0;
2039 }
2040 } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
2041 mu_.compare_exchange_strong(
2042 v,
2043 (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
2044 kMuSpin | kMuWait,
2045 std::memory_order_acquire, std::memory_order_relaxed)) {
2046 PerThreadSynch *h = GetPerThreadSynch(v);
2047 PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
2048 intptr_t wr_wait = 0;
2049 ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
2050 if (waitp->how == kExclusive && (v & kMuReader) != 0) {
2051 wr_wait = kMuWrWait; // give priority to a waiting writer
2052 }
2053 do { // release spinlock
2054 v = mu_.load(std::memory_order_relaxed);
2055 } while (!mu_.compare_exchange_weak(
2056 v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
2057 reinterpret_cast<intptr_t>(new_h),
2058 std::memory_order_release, std::memory_order_relaxed));
2059 dowait = true;
2060 }
2061 if (dowait) {
2062 this->Block(waitp->thread); // wait until removed from list or timeout
2063 flags |= kMuHasBlocked;
2064 c = 0;
2065 }
2066 }
2067 ABSL_RAW_CHECK(
2068 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2069 "detected illegal recursion into Mutex code");
2070 // delay, then try again
2071 c = synchronization_internal::MutexDelay(c, GENTLE);
2072 }
2073 ABSL_RAW_CHECK(
2074 waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
2075 "detected illegal recursion into Mutex code");
2076 if ((v & kMuEvent) != 0) {
2077 PostSynchEvent(this,
2078 waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
2079 SYNCH_EV_READERLOCK_RETURNING);
2080 }
2081 }
2082
2083 // Unlock this mutex, which is held by the current thread.
2084 // If waitp is non-zero, it must be the wait parameters for the current thread
2085 // which holds the lock but is not runnable because its condition is false
2086 // or it is in the process of blocking on a condition variable; it must requeue
2087 // itself on the mutex/condvar to wait for its condition to become true.
UnlockSlow(SynchWaitParams * waitp)2088 ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
2089 SchedulingGuard::ScopedDisable disable_rescheduling;
2090 intptr_t v = mu_.load(std::memory_order_relaxed);
2091 this->AssertReaderHeld();
2092 CheckForMutexCorruption(v, "Unlock");
2093 if ((v & kMuEvent) != 0) {
2094 PostSynchEvent(this,
2095 (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
2096 }
2097 int c = 0;
2098 // the waiter under consideration to wake, or zero
2099 PerThreadSynch *w = nullptr;
2100 // the predecessor to w or zero
2101 PerThreadSynch *pw = nullptr;
2102 // head of the list searched previously, or zero
2103 PerThreadSynch *old_h = nullptr;
2104 // a condition that's known to be false.
2105 const Condition *known_false = nullptr;
2106 PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
2107 intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
2108 // later writer could have acquired the lock
2109 // (starvation avoidance)
2110 ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
2111 waitp->thread->suppress_fatal_errors,
2112 "detected illegal recursion into Mutex code");
2113 // This loop finds threads wake_list to wakeup if any, and removes them from
2114 // the list of waiters. In addition, it places waitp.thread on the queue of
2115 // waiters if waitp is non-zero.
2116 for (;;) {
2117 v = mu_.load(std::memory_order_relaxed);
2118 if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
2119 waitp == nullptr) {
2120 // fast writer release (writer with no waiters or with designated waker)
2121 if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
2122 std::memory_order_release,
2123 std::memory_order_relaxed)) {
2124 return;
2125 }
2126 } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
2127 // fast reader release (reader with no waiters)
2128 intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
2129 if (mu_.compare_exchange_strong(v, v - clear,
2130 std::memory_order_release,
2131 std::memory_order_relaxed)) {
2132 return;
2133 }
2134 } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
2135 mu_.compare_exchange_strong(v, v | kMuSpin,
2136 std::memory_order_acquire,
2137 std::memory_order_relaxed)) {
2138 if ((v & kMuWait) == 0) { // no one to wake
2139 intptr_t nv;
2140 bool do_enqueue = true; // always Enqueue() the first time
2141 ABSL_RAW_CHECK(waitp != nullptr,
2142 "UnlockSlow is confused"); // about to sleep
2143 do { // must loop to release spinlock as reader count may change
2144 v = mu_.load(std::memory_order_relaxed);
2145 // decrement reader count if there are readers
2146 intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
2147 PerThreadSynch *new_h = nullptr;
2148 if (do_enqueue) {
2149 // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
2150 // we must not retry here. The initial attempt will always have
2151 // succeeded, further attempts would enqueue us against *this due to
2152 // Fer() handling.
2153 do_enqueue = (waitp->cv_word == nullptr);
2154 new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
2155 }
2156 intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
2157 if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
2158 clear = kMuWrWait | kMuReader; // clear read bit
2159 }
2160 nv = (v & kMuLow & ~clear & ~kMuSpin);
2161 if (new_h != nullptr) {
2162 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2163 } else { // new_h could be nullptr if we queued ourselves on a
2164 // CondVar
2165 // In that case, we must place the reader count back in the mutex
2166 // word, as Enqueue() did not store it in the new waiter.
2167 nv |= new_readers & kMuHigh;
2168 }
2169 // release spinlock & our lock; retry if reader-count changed
2170 // (writer count cannot change since we hold lock)
2171 } while (!mu_.compare_exchange_weak(v, nv,
2172 std::memory_order_release,
2173 std::memory_order_relaxed));
2174 break;
2175 }
2176
2177 // There are waiters.
2178 // Set h to the head of the circular waiter list.
2179 PerThreadSynch *h = GetPerThreadSynch(v);
2180 if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
2181 // a reader but not the last
2182 h->readers -= kMuOne; // release our lock
2183 intptr_t nv = v; // normally just release spinlock
2184 if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
2185 PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2186 ABSL_RAW_CHECK(new_h != nullptr,
2187 "waiters disappeared during Enqueue()!");
2188 nv &= kMuLow;
2189 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2190 }
2191 mu_.store(nv, std::memory_order_release); // release spinlock
2192 // can release with a store because there were waiters
2193 break;
2194 }
2195
2196 // Either we didn't search before, or we marked the queue
2197 // as "maybe_unlocking" and no one else should have changed it.
2198 ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
2199 "Mutex queue changed beneath us");
2200
2201 // The lock is becoming free, and there's a waiter
2202 if (old_h != nullptr &&
2203 !old_h->may_skip) { // we used old_h as a terminator
2204 old_h->may_skip = true; // allow old_h to skip once more
2205 ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
2206 if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
2207 old_h->skip = old_h->next; // old_h not head & can skip to successor
2208 }
2209 }
2210 if (h->next->waitp->how == kExclusive &&
2211 Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
2212 // easy case: writer with no condition; no need to search
2213 pw = h; // wake w, the successor of h (=pw)
2214 w = h->next;
2215 w->wake = true;
2216 // We are waking up a writer. This writer may be racing against
2217 // an already awake reader for the lock. We want the
2218 // writer to usually win this race,
2219 // because if it doesn't, we can potentially keep taking a reader
2220 // perpetually and writers will starve. Worse than
2221 // that, this can also starve other readers if kMuWrWait gets set
2222 // later.
2223 wr_wait = kMuWrWait;
2224 } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
2225 // we found a waiter w to wake on a previous iteration and either it's
2226 // a writer, or we've searched the entire list so we have all the
2227 // readers.
2228 if (pw == nullptr) { // if w's predecessor is unknown, it must be h
2229 pw = h;
2230 }
2231 } else {
2232 // At this point we don't know all the waiters to wake, and the first
2233 // waiter has a condition or is a reader. We avoid searching over
2234 // waiters we've searched on previous iterations by starting at
2235 // old_h if it's set. If old_h==h, there's no one to wakeup at all.
2236 if (old_h == h) { // we've searched before, and nothing's new
2237 // so there's no one to wake.
2238 intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
2239 h->readers = 0;
2240 h->maybe_unlocking = false; // finished unlocking
2241 if (waitp != nullptr) { // we must queue ourselves and sleep
2242 PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
2243 nv &= kMuLow;
2244 if (new_h != nullptr) {
2245 nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
2246 } // else new_h could be nullptr if we queued ourselves on a
2247 // CondVar
2248 }
2249 // release spinlock & lock
2250 // can release with a store because there were waiters
2251 mu_.store(nv, std::memory_order_release);
2252 break;
2253 }
2254
2255 // set up to walk the list
2256 PerThreadSynch *w_walk; // current waiter during list walk
2257 PerThreadSynch *pw_walk; // previous waiter during list walk
2258 if (old_h != nullptr) { // we've searched up to old_h before
2259 pw_walk = old_h;
2260 w_walk = old_h->next;
2261 } else { // no prior search, start at beginning
2262 pw_walk =
2263 nullptr; // h->next's predecessor may change; don't record it
2264 w_walk = h->next;
2265 }
2266
2267 h->may_skip = false; // ensure we never skip past h in future searches
2268 // even if other waiters are queued after it.
2269 ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
2270
2271 h->maybe_unlocking = true; // we're about to scan the waiter list
2272 // without the spinlock held.
2273 // Enqueue must be conservative about
2274 // priority queuing.
2275
2276 // We must release the spinlock to evaluate the conditions.
2277 mu_.store(v, std::memory_order_release); // release just spinlock
2278 // can release with a store because there were waiters
2279
2280 // h is the last waiter queued, and w_walk the first unsearched waiter.
2281 // Without the spinlock, the locations mu_ and h->next may now change
2282 // underneath us, but since we hold the lock itself, the only legal
2283 // change is to add waiters between h and w_walk. Therefore, it's safe
2284 // to walk the path from w_walk to h inclusive. (TryRemove() can remove
2285 // a waiter anywhere, but it acquires both the spinlock and the Mutex)
2286
2287 old_h = h; // remember we searched to here
2288
2289 // Walk the path upto and including h looking for waiters we can wake.
2290 while (pw_walk != h) {
2291 w_walk->wake = false;
2292 if (w_walk->waitp->cond ==
2293 nullptr || // no condition => vacuously true OR
2294 (w_walk->waitp->cond != known_false &&
2295 // this thread's condition is not known false, AND
2296 // is in fact true
2297 EvalConditionIgnored(this, w_walk->waitp->cond))) {
2298 if (w == nullptr) {
2299 w_walk->wake = true; // can wake this waiter
2300 w = w_walk;
2301 pw = pw_walk;
2302 if (w_walk->waitp->how == kExclusive) {
2303 wr_wait = kMuWrWait;
2304 break; // bail if waking this writer
2305 }
2306 } else if (w_walk->waitp->how == kShared) { // wake if a reader
2307 w_walk->wake = true;
2308 } else { // writer with true condition
2309 wr_wait = kMuWrWait;
2310 }
2311 } else { // can't wake; condition false
2312 known_false = w_walk->waitp->cond; // remember last false condition
2313 }
2314 if (w_walk->wake) { // we're waking reader w_walk
2315 pw_walk = w_walk; // don't skip similar waiters
2316 } else { // not waking; skip as much as possible
2317 pw_walk = Skip(w_walk);
2318 }
2319 // If pw_walk == h, then load of pw_walk->next can race with
2320 // concurrent write in Enqueue(). However, at the same time
2321 // we do not need to do the load, because we will bail out
2322 // from the loop anyway.
2323 if (pw_walk != h) {
2324 w_walk = pw_walk->next;
2325 }
2326 }
2327
2328 continue; // restart for(;;)-loop to wakeup w or to find more waiters
2329 }
2330 ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
2331 // The first (and perhaps only) waiter we've chosen to wake is w, whose
2332 // predecessor is pw. If w is a reader, we must wake all the other
2333 // waiters with wake==true as well. We may also need to queue
2334 // ourselves if waitp != null. The spinlock and the lock are still
2335 // held.
2336
2337 // This traverses the list in [ pw->next, h ], where h is the head,
2338 // removing all elements with wake==true and placing them in the
2339 // singly-linked list wake_list. Returns the new head.
2340 h = DequeueAllWakeable(h, pw, &wake_list);
2341
2342 intptr_t nv = (v & kMuEvent) | kMuDesig;
2343 // assume no waiters left,
2344 // set kMuDesig for INV1a
2345
2346 if (waitp != nullptr) { // we must queue ourselves and sleep
2347 h = Enqueue(h, waitp, v, kMuIsCond);
2348 // h is new last waiter; could be null if we queued ourselves on a
2349 // CondVar
2350 }
2351
2352 ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
2353 "unexpected empty wake list");
2354
2355 if (h != nullptr) { // there are waiters left
2356 h->readers = 0;
2357 h->maybe_unlocking = false; // finished unlocking
2358 nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
2359 }
2360
2361 // release both spinlock & lock
2362 // can release with a store because there were waiters
2363 mu_.store(nv, std::memory_order_release);
2364 break; // out of for(;;)-loop
2365 }
2366 // aggressive here; no one can proceed till we do
2367 c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
2368 } // end of for(;;)-loop
2369
2370 if (wake_list != kPerThreadSynchNull) {
2371 int64_t total_wait_cycles = 0;
2372 int64_t max_wait_cycles = 0;
2373 int64_t now = base_internal::CycleClock::Now();
2374 do {
2375 // Profile lock contention events only if the waiter was trying to acquire
2376 // the lock, not waiting on a condition variable or Condition.
2377 if (!wake_list->cond_waiter) {
2378 int64_t cycles_waited =
2379 (now - wake_list->waitp->contention_start_cycles);
2380 total_wait_cycles += cycles_waited;
2381 if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
2382 wake_list->waitp->contention_start_cycles = now;
2383 wake_list->waitp->should_submit_contention_data = true;
2384 }
2385 wake_list = Wakeup(wake_list); // wake waiters
2386 } while (wake_list != kPerThreadSynchNull);
2387 if (total_wait_cycles > 0) {
2388 mutex_tracer("slow release", this, total_wait_cycles);
2389 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
2390 submit_profile_data(total_wait_cycles);
2391 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
2392 }
2393 }
2394 }
2395
2396 // Used by CondVar implementation to reacquire mutex after waking from
2397 // condition variable. This routine is used instead of Lock() because the
2398 // waiting thread may have been moved from the condition variable queue to the
2399 // mutex queue without a wakeup, by Trans(). In that case, when the thread is
2400 // finally woken, the woken thread will believe it has been woken from the
2401 // condition variable (i.e. its PC will be in when in the CondVar code), when
2402 // in fact it has just been woken from the mutex. Thus, it must enter the slow
2403 // path of the mutex in the same state as if it had just woken from the mutex.
2404 // That is, it must ensure to clear kMuDesig (INV1b).
Trans(MuHow how)2405 void Mutex::Trans(MuHow how) {
2406 this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
2407 }
2408
2409 // Used by CondVar implementation to effectively wake thread w from the
2410 // condition variable. If this mutex is free, we simply wake the thread.
2411 // It will later acquire the mutex with high probability. Otherwise, we
2412 // enqueue thread w on this mutex.
Fer(PerThreadSynch * w)2413 void Mutex::Fer(PerThreadSynch *w) {
2414 SchedulingGuard::ScopedDisable disable_rescheduling;
2415 int c = 0;
2416 ABSL_RAW_CHECK(w->waitp->cond == nullptr,
2417 "Mutex::Fer while waiting on Condition");
2418 ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
2419 "Mutex::Fer while in timed wait");
2420 ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
2421 "Mutex::Fer with pending CondVar queueing");
2422 for (;;) {
2423 intptr_t v = mu_.load(std::memory_order_relaxed);
2424 // Note: must not queue if the mutex is unlocked (nobody will wake it).
2425 // For example, we can have only kMuWait (conditional) or maybe
2426 // kMuWait|kMuWrWait.
2427 // conflicting != 0 implies that the waking thread cannot currently take
2428 // the mutex, which in turn implies that someone else has it and can wake
2429 // us if we queue.
2430 const intptr_t conflicting =
2431 kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
2432 if ((v & conflicting) == 0) {
2433 w->next = nullptr;
2434 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2435 IncrementSynchSem(this, w);
2436 return;
2437 } else {
2438 if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
2439 // This thread tries to become the one and only waiter.
2440 PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
2441 ABSL_RAW_CHECK(new_h != nullptr,
2442 "Enqueue failed"); // we must queue ourselves
2443 if (mu_.compare_exchange_strong(
2444 v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
2445 std::memory_order_release, std::memory_order_relaxed)) {
2446 return;
2447 }
2448 } else if ((v & kMuSpin) == 0 &&
2449 mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
2450 PerThreadSynch *h = GetPerThreadSynch(v);
2451 PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
2452 ABSL_RAW_CHECK(new_h != nullptr,
2453 "Enqueue failed"); // we must queue ourselves
2454 do {
2455 v = mu_.load(std::memory_order_relaxed);
2456 } while (!mu_.compare_exchange_weak(
2457 v,
2458 (v & kMuLow & ~kMuSpin) | kMuWait |
2459 reinterpret_cast<intptr_t>(new_h),
2460 std::memory_order_release, std::memory_order_relaxed));
2461 return;
2462 }
2463 }
2464 c = synchronization_internal::MutexDelay(c, GENTLE);
2465 }
2466 }
2467
AssertHeld() const2468 void Mutex::AssertHeld() const {
2469 if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
2470 SynchEvent *e = GetSynchEvent(this);
2471 ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
2472 static_cast<const void *>(this),
2473 (e == nullptr ? "" : e->name));
2474 }
2475 }
2476
AssertReaderHeld() const2477 void Mutex::AssertReaderHeld() const {
2478 if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
2479 SynchEvent *e = GetSynchEvent(this);
2480 ABSL_RAW_LOG(
2481 FATAL, "thread should hold at least a read lock on Mutex %p %s",
2482 static_cast<const void *>(this), (e == nullptr ? "" : e->name));
2483 }
2484 }
2485
2486 // -------------------------------- condition variables
2487 static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
2488 static const intptr_t kCvEvent = 0x0002L; // record events
2489
2490 static const intptr_t kCvLow = 0x0003L; // low order bits of CV
2491
2492 // Hack to make constant values available to gdb pretty printer
2493 enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
2494
2495 static_assert(PerThreadSynch::kAlignment > kCvLow,
2496 "PerThreadSynch::kAlignment must be greater than kCvLow");
2497
EnableDebugLog(const char * name)2498 void CondVar::EnableDebugLog(const char *name) {
2499 SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
2500 e->log = true;
2501 UnrefSynchEvent(e);
2502 }
2503
~CondVar()2504 CondVar::~CondVar() {
2505 if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
2506 ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
2507 }
2508 }
2509
2510
2511 // Remove thread s from the list of waiters on this condition variable.
Remove(PerThreadSynch * s)2512 void CondVar::Remove(PerThreadSynch *s) {
2513 SchedulingGuard::ScopedDisable disable_rescheduling;
2514 intptr_t v;
2515 int c = 0;
2516 for (v = cv_.load(std::memory_order_relaxed);;
2517 v = cv_.load(std::memory_order_relaxed)) {
2518 if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2519 cv_.compare_exchange_strong(v, v | kCvSpin,
2520 std::memory_order_acquire,
2521 std::memory_order_relaxed)) {
2522 PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2523 if (h != nullptr) {
2524 PerThreadSynch *w = h;
2525 while (w->next != s && w->next != h) { // search for thread
2526 w = w->next;
2527 }
2528 if (w->next == s) { // found thread; remove it
2529 w->next = s->next;
2530 if (h == s) {
2531 h = (w == s) ? nullptr : w;
2532 }
2533 s->next = nullptr;
2534 s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2535 }
2536 }
2537 // release spinlock
2538 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2539 std::memory_order_release);
2540 return;
2541 } else {
2542 // try again after a delay
2543 c = synchronization_internal::MutexDelay(c, GENTLE);
2544 }
2545 }
2546 }
2547
2548 // Queue thread waitp->thread on condition variable word cv_word using
2549 // wait parameters waitp.
2550 // We split this into a separate routine, rather than simply doing it as part
2551 // of WaitCommon(). If we were to queue ourselves on the condition variable
2552 // before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
2553 // the logging code, or via a Condition function) and might potentially attempt
2554 // to block this thread. That would be a problem if the thread were already on
2555 // a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
2556 // the unlock code to call CondVarEnqueue() to queue the thread on the condition
2557 // variable queue just before the mutex is to be unlocked, and (most
2558 // importantly) after any call to an external routine that might re-enter the
2559 // mutex code.
CondVarEnqueue(SynchWaitParams * waitp)2560 static void CondVarEnqueue(SynchWaitParams *waitp) {
2561 // This thread might be transferred to the Mutex queue by Fer() when
2562 // we are woken. To make sure that is what happens, Enqueue() doesn't
2563 // call CondVarEnqueue() again but instead uses its normal code. We
2564 // must do this before we queue ourselves so that cv_word will be null
2565 // when seen by the dequeuer, who may wish immediately to requeue
2566 // this thread on another queue.
2567 std::atomic<intptr_t> *cv_word = waitp->cv_word;
2568 waitp->cv_word = nullptr;
2569
2570 intptr_t v = cv_word->load(std::memory_order_relaxed);
2571 int c = 0;
2572 while ((v & kCvSpin) != 0 || // acquire spinlock
2573 !cv_word->compare_exchange_weak(v, v | kCvSpin,
2574 std::memory_order_acquire,
2575 std::memory_order_relaxed)) {
2576 c = synchronization_internal::MutexDelay(c, GENTLE);
2577 v = cv_word->load(std::memory_order_relaxed);
2578 }
2579 ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
2580 waitp->thread->waitp = waitp; // prepare ourselves for waiting
2581 PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2582 if (h == nullptr) { // add this thread to waiter list
2583 waitp->thread->next = waitp->thread;
2584 } else {
2585 waitp->thread->next = h->next;
2586 h->next = waitp->thread;
2587 }
2588 waitp->thread->state.store(PerThreadSynch::kQueued,
2589 std::memory_order_relaxed);
2590 cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
2591 std::memory_order_release);
2592 }
2593
WaitCommon(Mutex * mutex,KernelTimeout t)2594 bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
2595 bool rc = false; // return value; true iff we timed-out
2596
2597 intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
2598 Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
2599 ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
2600
2601 // maybe trace this call
2602 intptr_t v = cv_.load(std::memory_order_relaxed);
2603 cond_var_tracer("Wait", this);
2604 if ((v & kCvEvent) != 0) {
2605 PostSynchEvent(this, SYNCH_EV_WAIT);
2606 }
2607
2608 // Release mu and wait on condition variable.
2609 SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
2610 Synch_GetPerThreadAnnotated(mutex), &cv_);
2611 // UnlockSlow() will call CondVarEnqueue() just before releasing the
2612 // Mutex, thus queuing this thread on the condition variable. See
2613 // CondVarEnqueue() for the reasons.
2614 mutex->UnlockSlow(&waitp);
2615
2616 // wait for signal
2617 while (waitp.thread->state.load(std::memory_order_acquire) ==
2618 PerThreadSynch::kQueued) {
2619 if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
2620 // DecrementSynchSem returned due to timeout.
2621 // Now we will either (1) remove ourselves from the wait list in Remove
2622 // below, in which case Remove will set thread.state = kAvailable and
2623 // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
2624 // has removed us concurrently and is calling Wakeup, which will set
2625 // thread.state = kAvailable and post to the semaphore.
2626 // It's important to reset the timeout for the case (2) because otherwise
2627 // we can live-lock in this loop since DecrementSynchSem will always
2628 // return immediately due to timeout, but Signal/SignalAll is not
2629 // necessary set thread.state = kAvailable yet (and is not scheduled
2630 // due to thread priorities or other scheduler artifacts).
2631 // Note this could also be resolved if Signal/SignalAll would set
2632 // thread.state = kAvailable while holding the wait list spin lock.
2633 // But this can't be easily done for SignalAll since it grabs the whole
2634 // wait list with a single compare-exchange and does not really grab
2635 // the spin lock.
2636 t = KernelTimeout::Never();
2637 this->Remove(waitp.thread);
2638 rc = true;
2639 }
2640 }
2641
2642 ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
2643 waitp.thread->waitp = nullptr; // cleanup
2644
2645 // maybe trace this call
2646 cond_var_tracer("Unwait", this);
2647 if ((v & kCvEvent) != 0) {
2648 PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
2649 }
2650
2651 // From synchronization point of view Wait is unlock of the mutex followed
2652 // by lock of the mutex. We've annotated start of unlock in the beginning
2653 // of the function. Now, finish unlock and annotate lock of the mutex.
2654 // (Trans is effectively lock).
2655 ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
2656 ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
2657 mutex->Trans(mutex_how); // Reacquire mutex
2658 ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
2659 return rc;
2660 }
2661
WaitWithTimeout(Mutex * mu,absl::Duration timeout)2662 bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
2663 return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
2664 }
2665
WaitWithDeadline(Mutex * mu,absl::Time deadline)2666 bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
2667 return WaitCommon(mu, KernelTimeout(deadline));
2668 }
2669
Wait(Mutex * mu)2670 void CondVar::Wait(Mutex *mu) {
2671 WaitCommon(mu, KernelTimeout::Never());
2672 }
2673
2674 // Wake thread w
2675 // If it was a timed wait, w will be waiting on w->cv
2676 // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
2677 // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
Wakeup(PerThreadSynch * w)2678 void CondVar::Wakeup(PerThreadSynch *w) {
2679 if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
2680 // The waiting thread only needs to observe "w->state == kAvailable" to be
2681 // released, we must cache "cvmu" before clearing "next".
2682 Mutex *mu = w->waitp->cvmu;
2683 w->next = nullptr;
2684 w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
2685 Mutex::IncrementSynchSem(mu, w);
2686 } else {
2687 w->waitp->cvmu->Fer(w);
2688 }
2689 }
2690
Signal()2691 void CondVar::Signal() {
2692 SchedulingGuard::ScopedDisable disable_rescheduling;
2693 ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2694 intptr_t v;
2695 int c = 0;
2696 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2697 v = cv_.load(std::memory_order_relaxed)) {
2698 if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
2699 cv_.compare_exchange_strong(v, v | kCvSpin,
2700 std::memory_order_acquire,
2701 std::memory_order_relaxed)) {
2702 PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2703 PerThreadSynch *w = nullptr;
2704 if (h != nullptr) { // remove first waiter
2705 w = h->next;
2706 if (w == h) {
2707 h = nullptr;
2708 } else {
2709 h->next = w->next;
2710 }
2711 }
2712 // release spinlock
2713 cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
2714 std::memory_order_release);
2715 if (w != nullptr) {
2716 CondVar::Wakeup(w); // wake waiter, if there was one
2717 cond_var_tracer("Signal wakeup", this);
2718 }
2719 if ((v & kCvEvent) != 0) {
2720 PostSynchEvent(this, SYNCH_EV_SIGNAL);
2721 }
2722 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2723 return;
2724 } else {
2725 c = synchronization_internal::MutexDelay(c, GENTLE);
2726 }
2727 }
2728 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2729 }
2730
SignalAll()2731 void CondVar::SignalAll () {
2732 ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
2733 intptr_t v;
2734 int c = 0;
2735 for (v = cv_.load(std::memory_order_relaxed); v != 0;
2736 v = cv_.load(std::memory_order_relaxed)) {
2737 // empty the list if spinlock free
2738 // We do this by simply setting the list to empty using
2739 // compare and swap. We then have the entire list in our hands,
2740 // which cannot be changing since we grabbed it while no one
2741 // held the lock.
2742 if ((v & kCvSpin) == 0 &&
2743 cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
2744 std::memory_order_relaxed)) {
2745 PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
2746 if (h != nullptr) {
2747 PerThreadSynch *w;
2748 PerThreadSynch *n = h->next;
2749 do { // for every thread, wake it up
2750 w = n;
2751 n = n->next;
2752 CondVar::Wakeup(w);
2753 } while (w != h);
2754 cond_var_tracer("SignalAll wakeup", this);
2755 }
2756 if ((v & kCvEvent) != 0) {
2757 PostSynchEvent(this, SYNCH_EV_SIGNALALL);
2758 }
2759 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2760 return;
2761 } else {
2762 // try again after a delay
2763 c = synchronization_internal::MutexDelay(c, GENTLE);
2764 }
2765 }
2766 ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
2767 }
2768
Release()2769 void ReleasableMutexLock::Release() {
2770 ABSL_RAW_CHECK(this->mu_ != nullptr,
2771 "ReleasableMutexLock::Release may only be called once");
2772 this->mu_->Unlock();
2773 this->mu_ = nullptr;
2774 }
2775
2776 #ifdef ABSL_HAVE_THREAD_SANITIZER
2777 extern "C" void __tsan_read1(void *addr);
2778 #else
2779 #define __tsan_read1(addr) // do nothing if TSan not enabled
2780 #endif
2781
2782 // A function that just returns its argument, dereferenced
Dereference(void * arg)2783 static bool Dereference(void *arg) {
2784 // ThreadSanitizer does not instrument this file for memory accesses.
2785 // This function dereferences a user variable that can participate
2786 // in a data race, so we need to manually tell TSan about this memory access.
2787 __tsan_read1(arg);
2788 return *(static_cast<bool *>(arg));
2789 }
2790
2791 ABSL_CONST_INIT const Condition Condition::kTrue;
2792
Condition(bool (* func)(void *),void * arg)2793 Condition::Condition(bool (*func)(void *), void *arg)
2794 : eval_(&CallVoidPtrFunction),
2795 arg_(arg) {
2796 static_assert(sizeof(&func) <= sizeof(callback_),
2797 "An overlarge function pointer passed to Condition.");
2798 StoreCallback(func);
2799 }
2800
CallVoidPtrFunction(const Condition * c)2801 bool Condition::CallVoidPtrFunction(const Condition *c) {
2802 using FunctionPointer = bool (*)(void *);
2803 FunctionPointer function_pointer;
2804 std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
2805 return (*function_pointer)(c->arg_);
2806 }
2807
Condition(const bool * cond)2808 Condition::Condition(const bool *cond)
2809 : eval_(CallVoidPtrFunction),
2810 // const_cast is safe since Dereference does not modify arg
2811 arg_(const_cast<bool *>(cond)) {
2812 using FunctionPointer = bool (*)(void *);
2813 const FunctionPointer dereference = Dereference;
2814 StoreCallback(dereference);
2815 }
2816
Eval() const2817 bool Condition::Eval() const {
2818 // eval_ == null for kTrue
2819 return (this->eval_ == nullptr) || (*this->eval_)(this);
2820 }
2821
GuaranteedEqual(const Condition * a,const Condition * b)2822 bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
2823 // kTrue logic.
2824 if (a == nullptr || a->eval_ == nullptr) {
2825 return b == nullptr || b->eval_ == nullptr;
2826 } else if (b == nullptr || b->eval_ == nullptr) {
2827 return false;
2828 }
2829 // Check equality of the representative fields.
2830 return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
2831 !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
2832 }
2833
2834 ABSL_NAMESPACE_END
2835 } // namespace absl
2836