xref: /aosp_15_r20/bionic/libc/include/pthread.h (revision 8d67ca893c1523eb926b9080dbe4e2ffd2a27ba1)
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #pragma once
30 
31 /**
32  * @file pthread.h
33  * @brief POSIX threads.
34  */
35 
36 #include <sys/cdefs.h>
37 
38 #include <limits.h>
39 #include <bits/page_size.h>
40 #include <bits/pthread_types.h>
41 #include <sched.h>
42 #include <sys/types.h>
43 #include <time.h>
44 
45 __BEGIN_DECLS
46 
47 enum {
48   PTHREAD_MUTEX_NORMAL = 0,
49   PTHREAD_MUTEX_RECURSIVE = 1,
50   PTHREAD_MUTEX_ERRORCHECK = 2,
51 
52   PTHREAD_MUTEX_ERRORCHECK_NP = PTHREAD_MUTEX_ERRORCHECK,
53   PTHREAD_MUTEX_RECURSIVE_NP  = PTHREAD_MUTEX_RECURSIVE,
54 
55   PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
56 };
57 
58 #define PTHREAD_MUTEX_INITIALIZER { { ((PTHREAD_MUTEX_NORMAL & 3) << 14) } }
59 #define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP { { ((PTHREAD_MUTEX_RECURSIVE & 3) << 14) } }
60 #define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP { { ((PTHREAD_MUTEX_ERRORCHECK & 3) << 14) } }
61 
62 #define PTHREAD_COND_INITIALIZER  { { 0 } }
63 #define PTHREAD_COND_INITIALIZER_MONOTONIC_NP  { { 1 << 1 } }
64 
65 #define PTHREAD_RWLOCK_INITIALIZER  { { 0 } }
66 
67 enum {
68   PTHREAD_RWLOCK_PREFER_READER_NP = 0,
69   PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP = 1,
70 };
71 
72 #define PTHREAD_ONCE_INIT 0
73 
74 #define PTHREAD_BARRIER_SERIAL_THREAD (-1)
75 
76 #if defined(__LP64__)
77 #define PTHREAD_STACK_MIN 16384
78 #else
79 #define PTHREAD_STACK_MIN 8192
80 #endif
81 
82 #define PTHREAD_CREATE_DETACHED 1
83 #define PTHREAD_CREATE_JOINABLE 0
84 
85 #define PTHREAD_EXPLICIT_SCHED 0
86 #define PTHREAD_INHERIT_SCHED 1
87 
88 #define PTHREAD_PRIO_NONE 0
89 #define PTHREAD_PRIO_INHERIT 1
90 
91 #define PTHREAD_PROCESS_PRIVATE 0
92 #define PTHREAD_PROCESS_SHARED 1
93 
94 #define PTHREAD_SCOPE_SYSTEM 0
95 #define PTHREAD_SCOPE_PROCESS 1
96 
97 int pthread_atfork(void (* _Nullable __prepare)(void), void (* _Nullable __parent)(void), void (* _Nullable __child)(void));
98 
99 int pthread_attr_destroy(pthread_attr_t* _Nonnull __attr);
100 int pthread_attr_getdetachstate(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __state);
101 int pthread_attr_getguardsize(const pthread_attr_t* _Nonnull __attr, size_t* _Nonnull __size);
102 
103 #if __BIONIC_AVAILABILITY_GUARD(28)
104 int pthread_attr_getinheritsched(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __flag) __INTRODUCED_IN(28);
105 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
106 
107 int pthread_attr_getschedparam(const pthread_attr_t* _Nonnull __attr, struct sched_param* _Nonnull __param);
108 int pthread_attr_getschedpolicy(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __policy);
109 int pthread_attr_getscope(const pthread_attr_t* _Nonnull __attr, int* _Nonnull __scope);
110 int pthread_attr_getstack(const pthread_attr_t* _Nonnull __attr, void* _Nullable * _Nonnull __addr, size_t* _Nonnull __size);
111 int pthread_attr_getstacksize(const pthread_attr_t* _Nonnull __attr, size_t* _Nonnull __size);
112 int pthread_attr_init(pthread_attr_t* _Nonnull __attr);
113 int pthread_attr_setdetachstate(pthread_attr_t* _Nonnull __attr, int __state);
114 int pthread_attr_setguardsize(pthread_attr_t* _Nonnull __attr, size_t __size);
115 
116 #if __BIONIC_AVAILABILITY_GUARD(28)
117 int pthread_attr_setinheritsched(pthread_attr_t* _Nonnull __attr, int __flag) __INTRODUCED_IN(28);
118 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
119 
120 int pthread_attr_setschedparam(pthread_attr_t* _Nonnull __attr, const struct sched_param* _Nonnull __param);
121 int pthread_attr_setschedpolicy(pthread_attr_t* _Nonnull __attr, int __policy);
122 int pthread_attr_setscope(pthread_attr_t* _Nonnull __attr, int __scope);
123 int pthread_attr_setstack(pthread_attr_t* _Nonnull __attr, void* _Nonnull __addr, size_t __size);
124 int pthread_attr_setstacksize(pthread_attr_t* _Nonnull __addr, size_t __size);
125 
126 int pthread_condattr_destroy(pthread_condattr_t* _Nonnull __attr);
127 int pthread_condattr_getclock(const pthread_condattr_t* _Nonnull __attr, clockid_t* _Nonnull __clock);
128 int pthread_condattr_getpshared(const pthread_condattr_t* _Nonnull __attr, int* _Nonnull __shared);
129 int pthread_condattr_init(pthread_condattr_t* _Nonnull __attr);
130 int pthread_condattr_setclock(pthread_condattr_t* _Nonnull __attr, clockid_t __clock);
131 int pthread_condattr_setpshared(pthread_condattr_t* _Nonnull __attr, int __shared);
132 
133 int pthread_cond_broadcast(pthread_cond_t* _Nonnull __cond);
134 
135 #if __BIONIC_AVAILABILITY_GUARD(30)
136 int pthread_cond_clockwait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex, clockid_t __clock,
137                            const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
138 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
139 
140 int pthread_cond_destroy(pthread_cond_t* _Nonnull __cond);
141 int pthread_cond_init(pthread_cond_t* _Nonnull __cond, const pthread_condattr_t* _Nullable __attr);
142 int pthread_cond_signal(pthread_cond_t* _Nonnull __cond);
143 int pthread_cond_timedwait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout);
144 /*
145  * Condition variables use CLOCK_REALTIME by default for their timeouts, however that is
146  * typically inappropriate, since that clock can change dramatically, causing the timeout to
147  * either expire earlier or much later than intended.
148  * Condition variables have an initialization option to use CLOCK_MONOTONIC, and in addition,
149  * Android provides pthread_cond_timedwait_monotonic_np to use CLOCK_MONOTONIC on a condition
150  * variable for this single wait no matter how it was initialized.
151  * Note that pthread_cond_clockwait() allows specifying an arbitrary clock and has superseded this
152  * function.
153  */
154 
155 #if (!defined(__LP64__)) || (defined(__LP64__) && __ANDROID_API__ >= 28)
156 int pthread_cond_timedwait_monotonic_np(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex,
157                                         const struct timespec* _Nullable __timeout) __INTRODUCED_IN_64(28);
158 #endif /* (!defined(__LP64__)) || (defined(__LP64__) && __ANDROID_API__ >= 28) */
159 
160 int pthread_cond_wait(pthread_cond_t* _Nonnull __cond, pthread_mutex_t* _Nonnull __mutex);
161 
162 int pthread_create(pthread_t* _Nonnull __pthread_ptr, pthread_attr_t const* _Nullable __attr, void* _Nullable (* _Nonnull __start_routine)(void* _Nullable), void* _Nullable);
163 
164 int pthread_detach(pthread_t __pthread);
165 void pthread_exit(void* _Nullable __return_value) __noreturn;
166 
167 int pthread_equal(pthread_t __lhs, pthread_t __rhs);
168 
169 int pthread_getattr_np(pthread_t __pthread, pthread_attr_t* _Nonnull __attr);
170 
171 int pthread_getcpuclockid(pthread_t __pthread, clockid_t* _Nonnull __clock);
172 
173 void* _Nullable pthread_getspecific(pthread_key_t __key);
174 
175 pid_t pthread_gettid_np(pthread_t __pthread);
176 
177 int pthread_join(pthread_t __pthread, void* _Nullable * _Nullable __return_value_ptr);
178 
179 /**
180  * [pthread_key_create(3)](https://man7.org/linux/man-pages/man3/pthread_key_create.3p.html)
181  * creates a key for thread-specific data.
182  *
183  * There is a limit of `PTHREAD_KEYS_MAX` keys per process, but most callers
184  * should just use the C or C++ `thread_local` storage specifier anyway. When
185  * targeting new enough OS versions, the compiler will automatically use
186  * ELF TLS; when targeting old OS versions the emutls implementation will
187  * multiplex pthread keys behind the scenes, using one per library rather than
188  * one per thread-local variable. If you are implementing the runtime for a
189  * different language, you should consider similar implementation choices and
190  * avoid a direct one-to-one mapping from thread locals to pthread keys.
191  *
192  * Returns 0 on success and returns an error number on failure.
193  */
194 int pthread_key_create(pthread_key_t* _Nonnull __key_ptr, void (* _Nullable __key_destructor)(void* _Nullable));
195 
196 /**
197  * [pthread_key_delete(3)](https://man7.org/linux/man-pages/man3/pthread_key_delete.3p.html)
198  * deletes a key for thread-specific data.
199  *
200  * Returns 0 on success and returns an error number on failure.
201  */
202 int pthread_key_delete(pthread_key_t __key);
203 
204 int pthread_mutexattr_destroy(pthread_mutexattr_t* _Nonnull __attr);
205 int pthread_mutexattr_getpshared(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __shared);
206 int pthread_mutexattr_gettype(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __type);
207 
208 #if __BIONIC_AVAILABILITY_GUARD(28)
209 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t* _Nonnull __attr, int* _Nonnull __protocol) __INTRODUCED_IN(28);
210 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
211 
212 int pthread_mutexattr_init(pthread_mutexattr_t* _Nonnull __attr);
213 int pthread_mutexattr_setpshared(pthread_mutexattr_t* _Nonnull __attr, int __shared);
214 int pthread_mutexattr_settype(pthread_mutexattr_t* _Nonnull __attr, int __type);
215 
216 #if __BIONIC_AVAILABILITY_GUARD(28)
217 int pthread_mutexattr_setprotocol(pthread_mutexattr_t* _Nonnull __attr, int __protocol) __INTRODUCED_IN(28);
218 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
219 
220 
221 
222 #if __BIONIC_AVAILABILITY_GUARD(30)
223 int pthread_mutex_clocklock(pthread_mutex_t* _Nonnull __mutex, clockid_t __clock,
224                             const struct timespec* _Nullable __abstime) __INTRODUCED_IN(30);
225 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
226 
227 int pthread_mutex_destroy(pthread_mutex_t* _Nonnull __mutex);
228 int pthread_mutex_init(pthread_mutex_t* _Nonnull __mutex, const pthread_mutexattr_t* _Nullable __attr);
229 int pthread_mutex_lock(pthread_mutex_t* _Nonnull __mutex);
230 int pthread_mutex_timedlock(pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout);
231 
232 /*
233  * POSIX historically only supported using pthread_mutex_timedlock() with CLOCK_REALTIME, however
234  * that is typically inappropriate, since that clock can change dramatically, causing the timeout to
235  * either expire earlier or much later than intended.
236  * This function is added to use a timespec based on CLOCK_MONOTONIC that does not suffer
237  * from this issue.
238  * Note that pthread_mutex_clocklock() allows specifying an arbitrary clock and has superseded this
239  * function.
240  */
241 
242 #if __BIONIC_AVAILABILITY_GUARD(28)
243 int pthread_mutex_timedlock_monotonic_np(pthread_mutex_t* _Nonnull __mutex, const struct timespec* _Nullable __timeout)
244     __INTRODUCED_IN(28);
245 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
246 
247 int pthread_mutex_trylock(pthread_mutex_t* _Nonnull __mutex);
248 int pthread_mutex_unlock(pthread_mutex_t* _Nonnull __mutex);
249 
250 int pthread_once(pthread_once_t* _Nonnull __once, void (* _Nonnull __init_routine)(void));
251 
252 int pthread_rwlockattr_init(pthread_rwlockattr_t* _Nonnull __attr);
253 int pthread_rwlockattr_destroy(pthread_rwlockattr_t* _Nonnull __attr);
254 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _Nonnull __attr, int* _Nonnull __shared);
255 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _Nonnull __attr, int __shared);
256 
257 #if __BIONIC_AVAILABILITY_GUARD(23)
258 int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t* _Nonnull __attr, int* _Nonnull __kind)
259   __INTRODUCED_IN(23);
260 int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t* _Nonnull __attr, int __kind) __INTRODUCED_IN(23);
261 #endif /* __BIONIC_AVAILABILITY_GUARD(23) */
262 
263 
264 
265 #if __BIONIC_AVAILABILITY_GUARD(30)
266 int pthread_rwlock_clockrdlock(pthread_rwlock_t* _Nonnull __rwlock, clockid_t __clock,
267                                const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
268 int pthread_rwlock_clockwrlock(pthread_rwlock_t* _Nonnull __rwlock, clockid_t __clock,
269                                const struct timespec* _Nullable __timeout) __INTRODUCED_IN(30);
270 #endif /* __BIONIC_AVAILABILITY_GUARD(30) */
271 
272 int pthread_rwlock_destroy(pthread_rwlock_t* _Nonnull __rwlock);
273 int pthread_rwlock_init(pthread_rwlock_t* _Nonnull __rwlock, const pthread_rwlockattr_t* _Nullable __attr);
274 int pthread_rwlock_rdlock(pthread_rwlock_t* _Nonnull __rwlock);
275 int pthread_rwlock_timedrdlock(pthread_rwlock_t* _Nonnull __rwlock, const struct timespec* _Nullable __timeout);
276 /* See the comment on pthread_mutex_timedlock_monotonic_np for usage of this function. */
277 
278 #if __BIONIC_AVAILABILITY_GUARD(28)
279 int pthread_rwlock_timedrdlock_monotonic_np(pthread_rwlock_t* _Nonnull __rwlock,
280                                             const struct timespec* _Nullable __timeout) __INTRODUCED_IN(28);
281 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
282 
283 int pthread_rwlock_timedwrlock(pthread_rwlock_t* _Nonnull __rwlock, const struct timespec* _Nullable __timeout);
284 /* See the comment on pthread_mutex_timedlock_monotonic_np for usage of this function. */
285 
286 #if __BIONIC_AVAILABILITY_GUARD(28)
287 int pthread_rwlock_timedwrlock_monotonic_np(pthread_rwlock_t* _Nonnull __rwlock,
288                                             const struct timespec* _Nullable __timeout) __INTRODUCED_IN(28);
289 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
290 
291 int pthread_rwlock_tryrdlock(pthread_rwlock_t* _Nonnull __rwlock);
292 int pthread_rwlock_trywrlock(pthread_rwlock_t* _Nonnull __rwlock);
293 int pthread_rwlock_unlock(pthread_rwlock_t* _Nonnull __rwlock);
294 int pthread_rwlock_wrlock(pthread_rwlock_t* _Nonnull __rwlock);
295 
296 
297 #if __BIONIC_AVAILABILITY_GUARD(24)
298 int pthread_barrierattr_init(pthread_barrierattr_t* _Nonnull __attr) __INTRODUCED_IN(24);
299 int pthread_barrierattr_destroy(pthread_barrierattr_t* _Nonnull __attr) __INTRODUCED_IN(24);
300 int pthread_barrierattr_getpshared(const pthread_barrierattr_t* _Nonnull __attr, int* _Nonnull __shared) __INTRODUCED_IN(24);
301 int pthread_barrierattr_setpshared(pthread_barrierattr_t* _Nonnull __attr, int __shared) __INTRODUCED_IN(24);
302 
303 int pthread_barrier_init(pthread_barrier_t* _Nonnull __barrier, const pthread_barrierattr_t* _Nullable __attr, unsigned __count) __INTRODUCED_IN(24);
304 int pthread_barrier_destroy(pthread_barrier_t* _Nonnull __barrier) __INTRODUCED_IN(24);
305 int pthread_barrier_wait(pthread_barrier_t* _Nonnull __barrier) __INTRODUCED_IN(24);
306 
307 int pthread_spin_destroy(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
308 int pthread_spin_init(pthread_spinlock_t* _Nonnull __spinlock, int __shared) __INTRODUCED_IN(24);
309 int pthread_spin_lock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
310 int pthread_spin_trylock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
311 int pthread_spin_unlock(pthread_spinlock_t* _Nonnull __spinlock) __INTRODUCED_IN(24);
312 #endif /* __BIONIC_AVAILABILITY_GUARD(24) */
313 
314 
315 pthread_t pthread_self(void) __attribute_const__;
316 
317 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(26)
318 /**
319  * [pthread_getname_np(3)](https://man7.org/linux/man-pages/man3/pthread_getname_np.3.html)
320  * gets the name of the given thread.
321  * Names are at most 16 bytes (including '\0').
322  *
323  * Returns 0 on success and returns an error number on failure.
324  *
325  * Available since API level 26.
326  */
327 int pthread_getname_np(pthread_t __pthread, char* _Nonnull __buf, size_t __n) __INTRODUCED_IN(26);
328 #endif
329 
330 /**
331  * [pthread_setname_np(3)](https://man7.org/linux/man-pages/man3/pthread_setname_np.3.html)
332  * sets the name of the given thread.
333  * Names are at most 16 bytes (including '\0').
334  * Truncation must be done by the caller;
335  * calls with longer names will fail with ERANGE.
336  *
337  * Returns 0 on success and returns an error number on failure.
338  *
339  * This should only have been available under _GNU_SOURCE,
340  * but is always available on Android by historical accident.
341  */
342 int pthread_setname_np(pthread_t __pthread, const char* _Nonnull __name);
343 
344 /**
345  * [pthread_getaffinity_np(3)](https://man7.org/linux/man-pages/man3/pthread_getaffinity_np.3.html)
346  * gets the CPU affinity mask for the given thread.
347  *
348  * Returns 0 on success and returns an error number on failure.
349  *
350  * Available since API level 36.
351  * See sched_getaffinity() and pthread_gettid_np() for greater portability.
352  */
353 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(36)
354 int pthread_getaffinity_np(pthread_t __pthread, size_t __cpu_set_size, cpu_set_t* __cpu_set) __INTRODUCED_IN(36);
355 #endif
356 
357 /**
358  * [pthread_setaffinity_np(3)](https://man7.org/linux/man-pages/man3/pthread_setaffinity_np.3.html)
359  * sets the CPU affinity mask for the given thread.
360  *
361  * Returns 0 on success and returns an error number on failure.
362  *
363  * Available since API level 36.
364  * See sched_getaffinity() and pthread_gettid_np() for greater portability.
365  */
366 #if defined(__USE_GNU) && __BIONIC_AVAILABILITY_GUARD(36)
367 int pthread_setaffinity_np(pthread_t __pthread, size_t __cpu_set_size, const cpu_set_t* __cpu_set) __INTRODUCED_IN(36);
368 #endif
369 
370 /**
371  * [pthread_setschedparam(3)](https://man7.org/linux/man-pages/man3/pthread_setschedparam.3.html)
372  * sets the scheduler policy and parameters of the given thread.
373  *
374  * This call is not useful to applications on Android, because they don't
375  * have permission to set their scheduling policy, and the only priority
376  * for their policy is 0 anyway. If you only need to set your scheduling
377  * priority, see setpriority() instead.
378  *
379  * Returns 0 on success and returns an error number on failure.
380  */
381 int pthread_setschedparam(pthread_t __pthread, int __policy, const struct sched_param* _Nonnull __param);
382 
383 /**
384  * [pthread_getschedparam(3)](https://man7.org/linux/man-pages/man3/pthread_getschedparam.3.html)
385  * gets the scheduler policy and parameters of the given thread.
386  *
387  * Returns 0 on success and returns an error number on failure.
388  */
389 int pthread_getschedparam(pthread_t __pthread, int* _Nonnull __policy, struct sched_param* _Nonnull __param);
390 
391 /**
392  * [pthread_setschedprio(3)](https://man7.org/linux/man-pages/man3/pthread_setschedprio.3.html)
393  * sets the scheduler priority of the given thread.
394  *
395  * This call is not useful to applications on Android, because they don't
396  * have permission to set their scheduling policy, and the only priority
397  * for their policy is 0 anyway. If you only need to set your scheduling
398  * priority, see setpriority() instead.
399  *
400  * Returns 0 on success and returns an error number on failure.
401  *
402  * Available since API level 28.
403  */
404 
405 #if __BIONIC_AVAILABILITY_GUARD(28)
406 int pthread_setschedprio(pthread_t __pthread, int __priority) __INTRODUCED_IN(28);
407 #endif /* __BIONIC_AVAILABILITY_GUARD(28) */
408 
409 
410 int pthread_setspecific(pthread_key_t __key, const void* _Nullable __value);
411 
412 typedef void (* _Nullable __pthread_cleanup_func_t)(void* _Nullable);
413 
414 typedef struct __pthread_cleanup_t {
415   struct __pthread_cleanup_t*   _Nullable __cleanup_prev;
416   __pthread_cleanup_func_t      _Nullable __cleanup_routine;
417   void*                         _Nullable __cleanup_arg;
418 } __pthread_cleanup_t;
419 
420 void __pthread_cleanup_push(__pthread_cleanup_t* _Nonnull c, __pthread_cleanup_func_t _Nullable, void* _Nullable);
421 void __pthread_cleanup_pop(__pthread_cleanup_t* _Nonnull, int);
422 
423 /* Believe or not, the definitions of pthread_cleanup_push and
424  * pthread_cleanup_pop below are correct. Posix states that these
425  * can be implemented as macros that might introduce opening and
426  * closing braces, and that using setjmp/longjmp/return/break/continue
427  * between them results in undefined behavior.
428  */
429 #define  pthread_cleanup_push(routine, arg)                      \
430     do {                                                         \
431         __pthread_cleanup_t  __cleanup;                          \
432         __pthread_cleanup_push( &__cleanup, (routine), (arg) );  \
433 
434 #define  pthread_cleanup_pop(execute)                  \
435         __pthread_cleanup_pop( &__cleanup, (execute)); \
436     } while (0);                                       \
437 
438 __END_DECLS
439