xref: /aosp_15_r20/bionic/libc/bionic/pthread_mutex.cpp (revision 8d67ca893c1523eb926b9080dbe4e2ffd2a27ba1)
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <sys/cdefs.h>
37 #include <sys/mman.h>
38 #include <unistd.h>
39 
40 #include "pthread_internal.h"
41 
42 #include "private/bionic_constants.h"
43 #include "private/bionic_fortify.h"
44 #include "private/bionic_futex.h"
45 #include "private/bionic_systrace.h"
46 #include "private/bionic_time_conversions.h"
47 #include "private/bionic_tls.h"
48 
49 /* a mutex attribute holds the following fields
50  *
51  * bits:     name       description
52  * 0-3       type       type of mutex
53  * 4         shared     process-shared flag
54  * 5         protocol   whether it is a priority inherit mutex.
55  */
56 #define  MUTEXATTR_TYPE_MASK   0x000f
57 #define  MUTEXATTR_SHARED_MASK 0x0010
58 #define MUTEXATTR_PROTOCOL_MASK 0x0020
59 
60 #define MUTEXATTR_PROTOCOL_SHIFT 5
61 
pthread_mutexattr_init(pthread_mutexattr_t * attr)62 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
63 {
64     *attr = PTHREAD_MUTEX_DEFAULT;
65     return 0;
66 }
67 
pthread_mutexattr_destroy(pthread_mutexattr_t * attr)68 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
69 {
70     *attr = -1;
71     return 0;
72 }
73 
pthread_mutexattr_gettype(const pthread_mutexattr_t * attr,int * type_p)74 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
75 {
76     int type = (*attr & MUTEXATTR_TYPE_MASK);
77 
78     if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
79         return EINVAL;
80     }
81 
82     *type_p = type;
83     return 0;
84 }
85 
pthread_mutexattr_settype(pthread_mutexattr_t * attr,int type)86 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
87 {
88     if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
89         return EINVAL;
90     }
91 
92     *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
93     return 0;
94 }
95 
96 /* process-shared mutexes are not supported at the moment */
97 
pthread_mutexattr_setpshared(pthread_mutexattr_t * attr,int pshared)98 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
99 {
100     switch (pshared) {
101     case PTHREAD_PROCESS_PRIVATE:
102         *attr &= ~MUTEXATTR_SHARED_MASK;
103         return 0;
104 
105     case PTHREAD_PROCESS_SHARED:
106         /* our current implementation of pthread actually supports shared
107          * mutexes but won't cleanup if a process dies with the mutex held.
108          * Nevertheless, it's better than nothing. Shared mutexes are used
109          * by surfaceflinger and audioflinger.
110          */
111         *attr |= MUTEXATTR_SHARED_MASK;
112         return 0;
113     }
114     return EINVAL;
115 }
116 
pthread_mutexattr_getpshared(const pthread_mutexattr_t * attr,int * pshared)117 int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
118     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
119     return 0;
120 }
121 
pthread_mutexattr_setprotocol(pthread_mutexattr_t * attr,int protocol)122 int pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int protocol) {
123     if (protocol != PTHREAD_PRIO_NONE && protocol != PTHREAD_PRIO_INHERIT) {
124         return EINVAL;
125     }
126     *attr = (*attr & ~MUTEXATTR_PROTOCOL_MASK) | (protocol << MUTEXATTR_PROTOCOL_SHIFT);
127     return 0;
128 }
129 
pthread_mutexattr_getprotocol(const pthread_mutexattr_t * attr,int * protocol)130 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t* attr, int* protocol) {
131     *protocol = (*attr & MUTEXATTR_PROTOCOL_MASK) >> MUTEXATTR_PROTOCOL_SHIFT;
132     return 0;
133 }
134 
135 // Priority Inheritance mutex implementation
136 struct PIMutex {
137   // mutex type, can be 0 (normal), 1 (recursive), 2 (errorcheck), constant during lifetime
138   uint8_t type;
139   // process-shared flag, constant during lifetime
140   bool shared;
141   // <number of times a thread holding a recursive PI mutex> - 1
142   uint16_t counter;
143   // owner_tid is read/written by both userspace code and kernel code. It includes three fields:
144   // FUTEX_WAITERS, FUTEX_OWNER_DIED and FUTEX_TID_MASK.
145   atomic_int owner_tid;
146 };
147 
PIMutexTryLock(PIMutex & mutex)148 static inline __always_inline int PIMutexTryLock(PIMutex& mutex) {
149     pid_t tid = __get_thread()->tid;
150     // Handle common case first.
151     int old_owner = 0;
152     if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
153                                                                &old_owner, tid,
154                                                                memory_order_acquire,
155                                                                memory_order_relaxed))) {
156         return 0;
157     }
158     if (tid == (old_owner & FUTEX_TID_MASK)) {
159         // We already own this mutex.
160         if (mutex.type == PTHREAD_MUTEX_NORMAL) {
161             return EBUSY;
162         }
163         if (mutex.type == PTHREAD_MUTEX_ERRORCHECK) {
164             return EDEADLK;
165         }
166         if (mutex.counter == 0xffff) {
167             return EAGAIN;
168         }
169         mutex.counter++;
170         return 0;
171     }
172     return EBUSY;
173 }
174 
175 // Inlining this function in pthread_mutex_lock() adds the cost of stack frame instructions on
176 // ARM/ARM64, which increases at most 20 percent overhead. So make it noinline.
PIMutexTimedLock(PIMutex & mutex,bool use_realtime_clock,const timespec * abs_timeout)177 static int  __attribute__((noinline)) PIMutexTimedLock(PIMutex& mutex,
178                                                        bool use_realtime_clock,
179                                                        const timespec* abs_timeout) {
180     int ret = PIMutexTryLock(mutex);
181     if (__predict_true(ret == 0)) {
182         return 0;
183     }
184     if (ret == EBUSY) {
185         char trace_msg[64];
186         const pid_t owner = atomic_load_explicit(&mutex.owner_tid, memory_order_relaxed)
187                 & FUTEX_TID_MASK;
188         snprintf(trace_msg, sizeof(trace_msg),
189                  "Contending for pthread mutex owned by tid: %d", owner);
190         ScopedTrace trace(trace_msg);
191         ret = -__futex_pi_lock_ex(&mutex.owner_tid, mutex.shared, use_realtime_clock, abs_timeout);
192     }
193     return ret;
194 }
195 
PIMutexUnlock(PIMutex & mutex)196 static int PIMutexUnlock(PIMutex& mutex) {
197     pid_t tid = __get_thread()->tid;
198     int old_owner = tid;
199     // Handle common case first.
200     if (__predict_true(mutex.type == PTHREAD_MUTEX_NORMAL)) {
201         if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
202                                                                    &old_owner, 0,
203                                                                    memory_order_release,
204                                                                    memory_order_relaxed))) {
205             return 0;
206         }
207     } else {
208         old_owner = atomic_load_explicit(&mutex.owner_tid, memory_order_relaxed);
209     }
210 
211     if (tid != (old_owner & FUTEX_TID_MASK)) {
212         // The mutex can only be unlocked by the thread who owns it.
213         return EPERM;
214     }
215     if (mutex.type == PTHREAD_MUTEX_RECURSIVE) {
216         if (mutex.counter != 0u) {
217             --mutex.counter;
218             return 0;
219         }
220     }
221     if (old_owner == tid) {
222         // No thread is waiting.
223         if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex.owner_tid,
224                                                                    &old_owner, 0,
225                                                                    memory_order_release,
226                                                                    memory_order_relaxed))) {
227             return 0;
228         }
229     }
230     return -__futex_pi_unlock(&mutex.owner_tid, mutex.shared);
231 }
232 
PIMutexDestroy(PIMutex & mutex)233 static int PIMutexDestroy(PIMutex& mutex) {
234     // The mutex should be in unlocked state (owner_tid == 0) when destroyed.
235     // Store 0xffffffff to make the mutex unusable.
236     int old_owner = 0;
237     if (atomic_compare_exchange_strong_explicit(&mutex.owner_tid, &old_owner, 0xffffffff,
238                                                 memory_order_relaxed, memory_order_relaxed)) {
239         return 0;
240     }
241     return EBUSY;
242 }
243 
244 #if !defined(__LP64__)
245 
246 namespace PIMutexAllocator {
247 // pthread_mutex_t has only 4 bytes in 32-bit programs, which are not enough to hold PIMutex.
248 // So we use malloc to allocate PIMutexes and use 16-bit of pthread_mutex_t as indexes to find
249 // the allocated PIMutexes. This allows at most 65536 PI mutexes.
250 // When calling operations like pthread_mutex_lock/unlock, the 16-bit index is mapped to the
251 // corresponding PIMutex. To make the map operation fast, we use a lockless mapping method:
252 //   Once a PIMutex is allocated, all the data used to map index to the PIMutex isn't changed until
253 //   it is destroyed.
254 // Below are the data structures:
255 //   // struct Node contains a PIMutex.
256 //   typedef Node NodeArray[256];
257 //   typedef NodeArray* NodeArrayP;
258 //   NodeArrayP nodes[256];
259 //
260 // A 16-bit index is mapped to Node as below:
261 //   (*nodes[index >> 8])[index & 0xff]
262 //
263 // Also use a free list to allow O(1) finding recycled PIMutexes.
264 
265 union Node {
266     PIMutex mutex;
267     int next_free_id;  // If not -1, refer to the next node in the free PIMutex list.
268 };
269 typedef Node NodeArray[256];
270 typedef NodeArray* NodeArrayP;
271 
272 // lock_ protects below items.
273 static Lock lock;
274 static NodeArrayP* nodes;
275 static int next_to_alloc_id;
276 static int first_free_id = -1;  // If not -1, refer to the first node in the free PIMutex list.
277 
IdToNode(int id)278 static inline __always_inline Node& IdToNode(int id) {
279     return (*nodes[id >> 8])[id & 0xff];
280 }
281 
IdToPIMutex(int id)282 static inline __always_inline PIMutex& IdToPIMutex(int id) {
283     return IdToNode(id).mutex;
284 }
285 
AllocIdLocked()286 static int AllocIdLocked() {
287     if (first_free_id != -1) {
288         int result = first_free_id;
289         first_free_id = IdToNode(result).next_free_id;
290         return result;
291     }
292     if (next_to_alloc_id >= 0x10000) {
293         return -1;
294     }
295     int array_pos = next_to_alloc_id >> 8;
296     int node_pos = next_to_alloc_id & 0xff;
297     if (node_pos == 0) {
298         if (array_pos == 0) {
299             nodes = static_cast<NodeArray**>(calloc(256, sizeof(NodeArray*)));
300             if (nodes == nullptr) {
301                 return -1;
302             }
303         }
304         nodes[array_pos] = static_cast<NodeArray*>(malloc(sizeof(NodeArray)));
305         if (nodes[array_pos] == nullptr) {
306             return -1;
307         }
308     }
309     return next_to_alloc_id++;
310 }
311 
312 // If succeed, return an id referring to a PIMutex, otherwise return -1.
313 // A valid id is in range [0, 0xffff].
AllocId()314 static int AllocId() {
315     lock.lock();
316     int result = AllocIdLocked();
317     lock.unlock();
318     if (result != -1) {
319         memset(&IdToPIMutex(result), 0, sizeof(PIMutex));
320     }
321     return result;
322 }
323 
FreeId(int id)324 static void FreeId(int id) {
325     lock.lock();
326     IdToNode(id).next_free_id = first_free_id;
327     first_free_id = id;
328     lock.unlock();
329 }
330 
331 }  // namespace PIMutexAllocator
332 
333 #endif  // !defined(__LP64__)
334 
335 
336 /* Convenience macro, creates a mask of 'bits' bits that starts from
337  * the 'shift'-th least significant bit in a 32-bit word.
338  *
339  * Examples: FIELD_MASK(0,4)  -> 0xf
340  *           FIELD_MASK(16,9) -> 0x1ff0000
341  */
342 #define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
343 
344 /* This one is used to create a bit pattern from a given field value */
345 #define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
346 
347 /* And this one does the opposite, i.e. extract a field's value from a bit pattern */
348 #define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
349 
350 /* Convenience macros.
351  *
352  * These are used to form or modify the bit pattern of a given mutex value
353  */
354 
355 /* Mutex state:
356  *
357  * 0 for unlocked
358  * 1 for locked, no waiters
359  * 2 for locked, maybe waiters
360  */
361 #define  MUTEX_STATE_SHIFT      0
362 #define  MUTEX_STATE_LEN        2
363 
364 #define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
365 #define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
366 #define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
367 
368 #define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match PTHREAD_MUTEX_INITIALIZER */
369 #define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
370 #define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
371 
372 #define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
373 #define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
374 #define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
375 
376 // Return true iff the mutex is unlocked.
377 #define MUTEX_STATE_BITS_IS_UNLOCKED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_UNLOCKED)
378 
379 // Return true iff the mutex is locked with no waiters.
380 #define MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
381 
382 // return true iff the mutex is locked with maybe waiters.
383 #define MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
384 
385 /* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
386 #define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
387 
388 /* Mutex counter:
389  *
390  * We need to check for overflow before incrementing, and we also need to
391  * detect when the counter is 0
392  */
393 #define  MUTEX_COUNTER_SHIFT         2
394 #define  MUTEX_COUNTER_LEN           11
395 #define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
396 
397 #define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
398 #define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
399 
400 /* Used to increment the counter directly after overflow has been checked */
401 #define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
402 
403 /* Mutex shared bit flag
404  *
405  * This flag is set to indicate that the mutex is shared among processes.
406  * This changes the futex opcode we use for futex wait/wake operations
407  * (non-shared operations are much faster).
408  */
409 #define  MUTEX_SHARED_SHIFT    13
410 #define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
411 
412 /* Mutex type:
413  * We support normal, recursive and errorcheck mutexes.
414  */
415 #define  MUTEX_TYPE_SHIFT      14
416 #define  MUTEX_TYPE_LEN        2
417 #define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
418 
419 #define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
420 
421 #define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_NORMAL)
422 #define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_RECURSIVE)
423 #define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(PTHREAD_MUTEX_ERRORCHECK)
424 // Use a special mutex type to mark priority inheritance mutexes.
425 #define  PI_MUTEX_STATE     MUTEX_TYPE_TO_BITS(3)
426 
427 // For a PI mutex, it includes below fields:
428 //   Atomic(uint16_t) state;
429 //   PIMutex pi_mutex;  // uint16_t pi_mutex_id in 32-bit programs
430 //
431 //   state holds the following fields:
432 //
433 //   bits:   name    description
434 //   15-14   type    mutex type, should be 3
435 //   13-0    padding should be 0
436 //
437 //   pi_mutex holds the state of a PI mutex.
438 //   pi_mutex_id holds an integer to find the state of a PI mutex.
439 //
440 // For a Non-PI mutex, it includes below fields:
441 //   Atomic(uint16_t) state;
442 //   atomic_int owner_tid;  // Atomic(uint16_t) in 32-bit programs
443 //
444 //   state holds the following fields:
445 //
446 //   bits:     name     description
447 //   15-14     type     mutex type, can be 0 (normal), 1 (recursive), 2 (errorcheck)
448 //   13        shared   process-shared flag
449 //   12-2      counter  <number of times a thread holding a recursive Non-PI mutex> - 1
450 //   1-0       state    lock state (0, 1 or 2)
451 //
452 //   bits 15-13 are constant during the lifetime of the mutex.
453 //
454 //   owner_tid is used only in recursive and errorcheck Non-PI mutexes to hold the mutex owner
455 //   thread id.
456 //
457 // PI mutexes and Non-PI mutexes are distinguished by checking type field in state.
458 #if defined(__LP64__)
459 struct pthread_mutex_internal_t {
460     _Atomic(uint16_t) state;
461     uint16_t __pad;
462     union {
463         atomic_int owner_tid;
464         PIMutex pi_mutex;
465     };
466     char __reserved[28];
467 
ToPIMutexpthread_mutex_internal_t468     PIMutex& ToPIMutex() {
469         return pi_mutex;
470     }
471 
FreePIMutexpthread_mutex_internal_t472     void FreePIMutex() {
473     }
474 } __attribute__((aligned(4)));
475 
476 #else
477 struct pthread_mutex_internal_t {
478     _Atomic(uint16_t) state;
479     union {
480         _Atomic(uint16_t) owner_tid;
481         uint16_t pi_mutex_id;
482     };
483 
ToPIMutexpthread_mutex_internal_t484     PIMutex& ToPIMutex() {
485         return PIMutexAllocator::IdToPIMutex(pi_mutex_id);
486     }
487 
FreePIMutexpthread_mutex_internal_t488     void FreePIMutex() {
489         PIMutexAllocator::FreeId(pi_mutex_id);
490     }
491 } __attribute__((aligned(4)));
492 #endif
493 
494 static_assert(sizeof(pthread_mutex_t) == sizeof(pthread_mutex_internal_t),
495               "pthread_mutex_t should actually be pthread_mutex_internal_t in implementation.");
496 
497 // For binary compatibility with old version of pthread_mutex_t, we can't use more strict alignment
498 // than 4-byte alignment.
499 static_assert(alignof(pthread_mutex_t) == 4,
500               "pthread_mutex_t should fulfill the alignment of pthread_mutex_internal_t.");
501 
__get_internal_mutex(pthread_mutex_t * mutex_interface)502 static inline pthread_mutex_internal_t* __get_internal_mutex(pthread_mutex_t* mutex_interface) {
503   return reinterpret_cast<pthread_mutex_internal_t*>(mutex_interface);
504 }
505 
pthread_mutex_init(pthread_mutex_t * mutex_interface,const pthread_mutexattr_t * attr)506 int pthread_mutex_init(pthread_mutex_t* mutex_interface, const pthread_mutexattr_t* attr) {
507     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
508 
509     memset(mutex, 0, sizeof(pthread_mutex_internal_t));
510 
511     if (__predict_true(attr == nullptr)) {
512       atomic_store_explicit(&mutex->state, MUTEX_TYPE_BITS_NORMAL, memory_order_relaxed);
513       return 0;
514     }
515 
516     uint16_t state = 0;
517     if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
518         state |= MUTEX_SHARED_MASK;
519     }
520 
521     switch (*attr & MUTEXATTR_TYPE_MASK) {
522     case PTHREAD_MUTEX_NORMAL:
523       state |= MUTEX_TYPE_BITS_NORMAL;
524       break;
525     case PTHREAD_MUTEX_RECURSIVE:
526       state |= MUTEX_TYPE_BITS_RECURSIVE;
527       break;
528     case PTHREAD_MUTEX_ERRORCHECK:
529       state |= MUTEX_TYPE_BITS_ERRORCHECK;
530       break;
531     default:
532         return EINVAL;
533     }
534 
535     if (((*attr & MUTEXATTR_PROTOCOL_MASK) >> MUTEXATTR_PROTOCOL_SHIFT) == PTHREAD_PRIO_INHERIT) {
536 #if !defined(__LP64__)
537         if (state & MUTEX_SHARED_MASK) {
538             return EINVAL;
539         }
540         int id = PIMutexAllocator::AllocId();
541         if (id == -1) {
542             return ENOMEM;
543         }
544         mutex->pi_mutex_id = id;
545 #endif
546         atomic_store_explicit(&mutex->state, PI_MUTEX_STATE, memory_order_relaxed);
547         PIMutex& pi_mutex = mutex->ToPIMutex();
548         pi_mutex.type = *attr & MUTEXATTR_TYPE_MASK;
549         pi_mutex.shared = (*attr & MUTEXATTR_SHARED_MASK) != 0;
550     } else {
551       atomic_store_explicit(&mutex->state, state, memory_order_relaxed);
552       atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
553     }
554     return 0;
555 }
556 
557 // namespace for Non-PI mutex routines.
558 namespace NonPI {
559 
NormalMutexTryLock(pthread_mutex_internal_t * mutex,uint16_t shared)560 static inline __always_inline int NormalMutexTryLock(pthread_mutex_internal_t* mutex,
561                                                      uint16_t shared) {
562     const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
563     const uint16_t locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
564 
565     uint16_t old_state = unlocked;
566     if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
567                          locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
568         return 0;
569     }
570     return EBUSY;
571 }
572 
573 /*
574  * Lock a normal Non-PI mutex.
575  *
576  * As noted above, there are three states:
577  *   0 (unlocked, no contention)
578  *   1 (locked, no contention)
579  *   2 (locked, contention)
580  *
581  * Non-recursive mutexes don't use the thread-id or counter fields, and the
582  * "type" value is zero, so the only bits that will be set are the ones in
583  * the lock state field.
584  */
NormalMutexLock(pthread_mutex_internal_t * mutex,uint16_t shared,bool use_realtime_clock,const timespec * abs_timeout_or_null)585 static inline __always_inline int NormalMutexLock(pthread_mutex_internal_t* mutex,
586                                                   uint16_t shared,
587                                                   bool use_realtime_clock,
588                                                   const timespec* abs_timeout_or_null) {
589     if (__predict_true(NormalMutexTryLock(mutex, shared) == 0)) {
590         return 0;
591     }
592     int result = check_timespec(abs_timeout_or_null, true);
593     if (result != 0) {
594         return result;
595     }
596 
597     ScopedTrace trace("Contending for pthread mutex");
598 
599     const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
600     const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
601 
602     // We want to go to sleep until the mutex is available, which requires
603     // promoting it to locked_contended. We need to swap in the new state
604     // and then wait until somebody wakes us up.
605     // An atomic_exchange is used to compete with other threads for the lock.
606     // If it returns unlocked, we have acquired the lock, otherwise another
607     // thread still holds the lock and we should wait again.
608     // If lock is acquired, an acquire fence is needed to make all memory accesses
609     // made by other threads visible to the current CPU.
610     while (atomic_exchange_explicit(&mutex->state, locked_contended,
611                                     memory_order_acquire) != unlocked) {
612         if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock,
613                             abs_timeout_or_null) == -ETIMEDOUT) {
614             return ETIMEDOUT;
615         }
616     }
617     return 0;
618 }
619 
620 /*
621  * Release a normal Non-PI mutex.  The caller is responsible for determining
622  * that we are in fact the owner of this lock.
623  */
NormalMutexUnlock(pthread_mutex_internal_t * mutex,uint16_t shared)624 static inline __always_inline void NormalMutexUnlock(pthread_mutex_internal_t* mutex,
625                                                      uint16_t shared) {
626     const uint16_t unlocked         = shared | MUTEX_STATE_BITS_UNLOCKED;
627     const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
628 
629     // We use an atomic_exchange to release the lock. If locked_contended state
630     // is returned, some threads is waiting for the lock and we need to wake up
631     // one of them.
632     // A release fence is required to make previous stores visible to next
633     // lock owner threads.
634     if (atomic_exchange_explicit(&mutex->state, unlocked,
635                                  memory_order_release) == locked_contended) {
636         // Wake up one waiting thread. We don't know which thread will be
637         // woken or when it'll start executing -- futexes make no guarantees
638         // here. There may not even be a thread waiting.
639         //
640         // The newly-woken thread will replace the unlocked state we just set above
641         // with locked_contended state, which means that when it eventually releases
642         // the mutex it will also call FUTEX_WAKE. This results in one extra wake
643         // call whenever a lock is contended, but let us avoid forgetting anyone
644         // without requiring us to track the number of sleepers.
645         //
646         // It's possible for another thread to sneak in and grab the lock between
647         // the exchange above and the wake call below. If the new thread is "slow"
648         // and holds the lock for a while, we'll wake up a sleeper, which will swap
649         // in locked_uncontended state and then go back to sleep since the lock is
650         // still held. If the new thread is "fast", running to completion before
651         // we call wake, the thread we eventually wake will find an unlocked mutex
652         // and will execute. Either way we have correct behavior and nobody is
653         // orphaned on the wait queue.
654         //
655         // The pthread_mutex_internal_t object may have been deallocated between the
656         // atomic exchange and the wake call. In that case, this wake call could
657         // target unmapped memory or memory used by an otherwise unrelated futex
658         // operation. Even if the kernel avoids spurious futex wakeups from its
659         // point of view, this wake call could trigger a spurious wakeup in any
660         // futex accessible from this process. References:
661         //  - https://lkml.org/lkml/2014/11/27/472
662         //  - http://austingroupbugs.net/view.php?id=811#c2267
663         __futex_wake_ex(&mutex->state, shared, 1);
664     }
665 }
666 
667 /* This common inlined function is used to increment the counter of a recursive Non-PI mutex.
668  *
669  * If the counter overflows, it will return EAGAIN.
670  * Otherwise, it atomically increments the counter and returns 0.
671  *
672  */
RecursiveIncrement(pthread_mutex_internal_t * mutex,uint16_t old_state)673 static inline __always_inline int RecursiveIncrement(pthread_mutex_internal_t* mutex,
674                                                      uint16_t old_state) {
675     // Detect recursive lock overflow and return EAGAIN.
676     // This is safe because only the owner thread can modify the
677     // counter bits in the mutex value.
678     if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(old_state)) {
679         return EAGAIN;
680     }
681 
682     // Other threads are able to change the lower bits (e.g. promoting it to "contended"),
683     // but the mutex counter will not overflow. So we use atomic_fetch_add operation here.
684     // The mutex is already locked by current thread, so we don't need an acquire fence.
685     atomic_fetch_add_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
686     return 0;
687 }
688 
689 // Wait on a recursive or errorcheck Non-PI mutex.
RecursiveOrErrorcheckMutexWait(pthread_mutex_internal_t * mutex,uint16_t shared,uint16_t old_state,bool use_realtime_clock,const timespec * abs_timeout)690 static inline __always_inline int RecursiveOrErrorcheckMutexWait(pthread_mutex_internal_t* mutex,
691                                                                  uint16_t shared,
692                                                                  uint16_t old_state,
693                                                                  bool use_realtime_clock,
694                                                                  const timespec* abs_timeout) {
695 // __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid
696 // field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used.
697 // But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the
698 // owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error.
699 
700 #if defined(__LP64__)
701   return __futex_wait_ex(&mutex->state, shared, old_state, use_realtime_clock, abs_timeout);
702 
703 #else
704   // This implementation works only when the layout of pthread_mutex_internal_t matches below expectation.
705   // And it is based on the assumption that Android is always in little-endian devices.
706   static_assert(offsetof(pthread_mutex_internal_t, state) == 0, "");
707   static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, "");
708 
709   uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed);
710   return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state,
711                          use_realtime_clock, abs_timeout);
712 #endif
713 }
714 
715 // Lock a Non-PI mutex.
MutexLockWithTimeout(pthread_mutex_internal_t * mutex,bool use_realtime_clock,const timespec * abs_timeout_or_null)716 static int MutexLockWithTimeout(pthread_mutex_internal_t* mutex, bool use_realtime_clock,
717                                 const timespec* abs_timeout_or_null) {
718     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
719     uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
720     uint16_t shared = (old_state & MUTEX_SHARED_MASK);
721 
722     // Handle common case first.
723     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
724         return NormalMutexLock(mutex, shared, use_realtime_clock, abs_timeout_or_null);
725     }
726 
727     // Do we already own this recursive or error-check mutex?
728     pid_t tid = __get_thread()->tid;
729     if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
730         if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
731             return EDEADLK;
732         }
733         return RecursiveIncrement(mutex, old_state);
734     }
735 
736     const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
737     const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
738     const uint16_t locked_contended   = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
739 
740     // First, if the mutex is unlocked, try to quickly acquire it.
741     // In the optimistic case where this works, set the state to locked_uncontended.
742     if (old_state == unlocked) {
743         // If exchanged successfully, an acquire fence is required to make
744         // all memory accesses made by other threads visible to the current CPU.
745         if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
746                              locked_uncontended, memory_order_acquire, memory_order_relaxed))) {
747             atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
748             return 0;
749         }
750     }
751 
752     ScopedTrace trace("Contending for pthread mutex");
753 
754     while (true) {
755         if (old_state == unlocked) {
756             // NOTE: We put the state to locked_contended since we _know_ there
757             // is contention when we are in this loop. This ensures all waiters
758             // will be unlocked.
759 
760             // If exchanged successfully, an acquire fence is required to make
761             // all memory accesses made by other threads visible to the current CPU.
762             if (__predict_true(atomic_compare_exchange_weak_explicit(&mutex->state,
763                                                                      &old_state, locked_contended,
764                                                                      memory_order_acquire,
765                                                                      memory_order_relaxed))) {
766                 atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
767                 return 0;
768             }
769             continue;
770         } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(old_state)) {
771             // We should set it to locked_contended beforing going to sleep. This can make
772             // sure waiters will be woken up eventually.
773 
774             int new_state = MUTEX_STATE_BITS_FLIP_CONTENTION(old_state);
775             if (__predict_false(!atomic_compare_exchange_weak_explicit(&mutex->state,
776                                                                        &old_state, new_state,
777                                                                        memory_order_relaxed,
778                                                                        memory_order_relaxed))) {
779                 continue;
780             }
781             old_state = new_state;
782         }
783 
784         int result = check_timespec(abs_timeout_or_null, true);
785         if (result != 0) {
786             return result;
787         }
788         // We are in locked_contended state, sleep until someone wakes us up.
789         if (RecursiveOrErrorcheckMutexWait(mutex, shared, old_state, use_realtime_clock,
790                                            abs_timeout_or_null) == -ETIMEDOUT) {
791             return ETIMEDOUT;
792         }
793         old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
794     }
795 }
796 
797 }  // namespace NonPI
798 
IsMutexDestroyed(uint16_t mutex_state)799 static inline __always_inline bool IsMutexDestroyed(uint16_t mutex_state) {
800     return mutex_state == 0xffff;
801 }
802 
803 // Inlining this function in pthread_mutex_lock() adds the cost of stack frame instructions on
804 // ARM64. So make it noinline.
HandleUsingDestroyedMutex(pthread_mutex_t * mutex,const char * function_name)805 static int __attribute__((noinline)) HandleUsingDestroyedMutex(pthread_mutex_t* mutex,
806                                                                const char* function_name) {
807     if (android_get_application_target_sdk_version() >= 28) {
808         __fortify_fatal("%s called on a destroyed mutex (%p)", function_name, mutex);
809     }
810     return EBUSY;
811 }
812 
pthread_mutex_lock(pthread_mutex_t * mutex_interface)813 int pthread_mutex_lock(pthread_mutex_t* mutex_interface) {
814 #if !defined(__LP64__)
815     // Some apps depend on being able to pass NULL as a mutex and get EINVAL
816     // back. Don't need to worry about it for LP64 since the ABI is brand new,
817     // but keep compatibility for LP32. http://b/19995172.
818     if (mutex_interface == nullptr) {
819         return EINVAL;
820     }
821 #endif
822 
823     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
824     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
825     uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
826     // Avoid slowing down fast path of normal mutex lock operation.
827     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
828         uint16_t shared = (old_state & MUTEX_SHARED_MASK);
829         if (__predict_true(NonPI::NormalMutexTryLock(mutex, shared) == 0)) {
830             return 0;
831         }
832     }
833     if (old_state == PI_MUTEX_STATE) {
834         PIMutex& m = mutex->ToPIMutex();
835         // Handle common case first.
836         if (__predict_true(PIMutexTryLock(m) == 0)) {
837             return 0;
838         }
839         return PIMutexTimedLock(mutex->ToPIMutex(), false, nullptr);
840     }
841     if (__predict_false(IsMutexDestroyed(old_state))) {
842         return HandleUsingDestroyedMutex(mutex_interface, __FUNCTION__);
843     }
844     return NonPI::MutexLockWithTimeout(mutex, false, nullptr);
845 }
846 
pthread_mutex_unlock(pthread_mutex_t * mutex_interface)847 int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
848 #if !defined(__LP64__)
849     // Some apps depend on being able to pass NULL as a mutex and get EINVAL
850     // back. Don't need to worry about it for LP64 since the ABI is brand new,
851     // but keep compatibility for LP32. http://b/19995172.
852     if (mutex_interface == nullptr) {
853         return EINVAL;
854     }
855 #endif
856 
857     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
858     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
859     uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
860     uint16_t shared = (old_state & MUTEX_SHARED_MASK);
861 
862     // Handle common case first.
863     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
864         NonPI::NormalMutexUnlock(mutex, shared);
865         return 0;
866     }
867     if (old_state == PI_MUTEX_STATE) {
868         return PIMutexUnlock(mutex->ToPIMutex());
869     }
870     if (__predict_false(IsMutexDestroyed(old_state))) {
871         return HandleUsingDestroyedMutex(mutex_interface, __FUNCTION__);
872     }
873 
874     // Do we already own this recursive or error-check mutex?
875     pid_t tid = __get_thread()->tid;
876     if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
877         return EPERM;
878     }
879 
880     // If the counter is > 0, we can simply decrement it atomically.
881     // Since other threads can mutate the lower state bits (and only the
882     // lower state bits), use a compare_exchange loop to do it.
883     if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
884         // We still own the mutex, so a release fence is not needed.
885         atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
886         return 0;
887     }
888 
889     // The counter is 0, so we'are going to unlock the mutex by resetting its
890     // state to unlocked, we need to perform a atomic_exchange inorder to read
891     // the current state, which will be locked_contended if there may have waiters
892     // to awake.
893     // A release fence is required to make previous stores visible to next
894     // lock owner threads.
895     atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
896     const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
897     old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
898     if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
899         __futex_wake_ex(&mutex->state, shared, 1);
900     }
901 
902     return 0;
903 }
904 
pthread_mutex_trylock(pthread_mutex_t * mutex_interface)905 int pthread_mutex_trylock(pthread_mutex_t* mutex_interface) {
906     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
907 
908     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
909     uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
910 
911     // Handle common case first.
912     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
913         uint16_t shared = (old_state & MUTEX_SHARED_MASK);
914         return NonPI::NormalMutexTryLock(mutex, shared);
915     }
916     if (old_state == PI_MUTEX_STATE) {
917         return PIMutexTryLock(mutex->ToPIMutex());
918     }
919     if (__predict_false(IsMutexDestroyed(old_state))) {
920         return HandleUsingDestroyedMutex(mutex_interface, __FUNCTION__);
921     }
922 
923     // Do we already own this recursive or error-check mutex?
924     pid_t tid = __get_thread()->tid;
925     if (tid == atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed)) {
926         if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
927             return EBUSY;
928         }
929         return NonPI::RecursiveIncrement(mutex, old_state);
930     }
931 
932     uint16_t shared = (old_state & MUTEX_SHARED_MASK);
933     const uint16_t unlocked           = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
934     const uint16_t locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
935 
936     // Same as pthread_mutex_lock, except that we don't want to wait, and
937     // the only operation that can succeed is a single compare_exchange to acquire the
938     // lock if it is released / not owned by anyone. No need for a complex loop.
939     // If exchanged successfully, an acquire fence is required to make
940     // all memory accesses made by other threads visible to the current CPU.
941     old_state = unlocked;
942     if (__predict_true(atomic_compare_exchange_strong_explicit(&mutex->state, &old_state,
943                                                                locked_uncontended,
944                                                                memory_order_acquire,
945                                                                memory_order_relaxed))) {
946         atomic_store_explicit(&mutex->owner_tid, tid, memory_order_relaxed);
947         return 0;
948     }
949     return EBUSY;
950 }
951 
952 #if !defined(__LP64__)
953 // This exists only for backward binary compatibility on 32 bit platforms.
954 // (This function never existed for LP64.)
pthread_mutex_lock_timeout_np(pthread_mutex_t * mutex_interface,unsigned ms)955 extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex_interface, unsigned ms) {
956     timespec ts;
957     timespec_from_ms(ts, ms);
958     timespec abs_timeout;
959     absolute_timespec_from_timespec(abs_timeout, ts, CLOCK_MONOTONIC);
960     int error = NonPI::MutexLockWithTimeout(__get_internal_mutex(mutex_interface), false,
961                                             &abs_timeout);
962     if (error == ETIMEDOUT) {
963         error = EBUSY;
964     }
965     return error;
966 }
967 #endif
968 
__pthread_mutex_timedlock(pthread_mutex_t * mutex_interface,bool use_realtime_clock,const timespec * abs_timeout,const char * function)969 static int __pthread_mutex_timedlock(pthread_mutex_t* mutex_interface, bool use_realtime_clock,
970                                      const timespec* abs_timeout, const char* function) {
971     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
972     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
973     uint16_t mtype = (old_state & MUTEX_TYPE_MASK);
974     // Handle common case first.
975     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
976         uint16_t shared = (old_state & MUTEX_SHARED_MASK);
977         if (__predict_true(NonPI::NormalMutexTryLock(mutex, shared) == 0)) {
978             return 0;
979         }
980     }
981     if (old_state == PI_MUTEX_STATE) {
982         return PIMutexTimedLock(mutex->ToPIMutex(), use_realtime_clock, abs_timeout);
983     }
984     if (__predict_false(IsMutexDestroyed(old_state))) {
985         return HandleUsingDestroyedMutex(mutex_interface, function);
986     }
987     return NonPI::MutexLockWithTimeout(mutex, use_realtime_clock, abs_timeout);
988 }
989 
pthread_mutex_timedlock(pthread_mutex_t * mutex_interface,const struct timespec * abs_timeout)990 int pthread_mutex_timedlock(pthread_mutex_t* mutex_interface, const struct timespec* abs_timeout) {
991     return __pthread_mutex_timedlock(mutex_interface, true, abs_timeout, __FUNCTION__);
992 }
993 
pthread_mutex_timedlock_monotonic_np(pthread_mutex_t * mutex_interface,const struct timespec * abs_timeout)994 int pthread_mutex_timedlock_monotonic_np(pthread_mutex_t* mutex_interface,
995                                          const struct timespec* abs_timeout) {
996     return __pthread_mutex_timedlock(mutex_interface, false, abs_timeout, __FUNCTION__);
997 }
998 
pthread_mutex_clocklock(pthread_mutex_t * mutex_interface,clockid_t clock,const struct timespec * abs_timeout)999 int pthread_mutex_clocklock(pthread_mutex_t* mutex_interface, clockid_t clock,
1000                             const struct timespec* abs_timeout) {
1001   switch (clock) {
1002     case CLOCK_MONOTONIC:
1003       return __pthread_mutex_timedlock(mutex_interface, false, abs_timeout, __FUNCTION__);
1004     case CLOCK_REALTIME:
1005       return __pthread_mutex_timedlock(mutex_interface, true, abs_timeout, __FUNCTION__);
1006     default: {
1007       pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
1008       uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
1009       if (IsMutexDestroyed(old_state)) {
1010         return HandleUsingDestroyedMutex(mutex_interface, __FUNCTION__);
1011       }
1012       return EINVAL;
1013     }
1014   }
1015 }
1016 
pthread_mutex_destroy(pthread_mutex_t * mutex_interface)1017 int pthread_mutex_destroy(pthread_mutex_t* mutex_interface) {
1018     pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);
1019     uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
1020     if (__predict_false(IsMutexDestroyed(old_state))) {
1021         return HandleUsingDestroyedMutex(mutex_interface, __FUNCTION__);
1022     }
1023     if (old_state == PI_MUTEX_STATE) {
1024         int result = PIMutexDestroy(mutex->ToPIMutex());
1025         if (result == 0) {
1026             mutex->FreePIMutex();
1027             atomic_store(&mutex->state, 0xffff);
1028         }
1029         return result;
1030     }
1031     // Store 0xffff to make the mutex unusable. Although POSIX standard says it is undefined
1032     // behavior to destroy a locked mutex, we prefer not to change mutex->state in that situation.
1033     if (MUTEX_STATE_BITS_IS_UNLOCKED(old_state) &&
1034         atomic_compare_exchange_strong_explicit(&mutex->state, &old_state, 0xffff,
1035                                                 memory_order_relaxed, memory_order_relaxed)) {
1036       return 0;
1037     }
1038     return EBUSY;
1039 }
1040