1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "pthread_internal.h"
30
31 #include <errno.h>
32 #include <semaphore.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/mman.h>
36 #include <sys/prctl.h>
37 #include <sys/types.h>
38
39 #include <async_safe/log.h>
40 #include <bionic/mte.h>
41 #include <bionic/reserved_signals.h>
42 #include <bionic/tls_defines.h>
43
44 #include "private/ErrnoRestorer.h"
45 #include "private/ScopedRWLock.h"
46 #include "private/bionic_futex.h"
47 #include "private/bionic_globals.h"
48 #include "private/bionic_tls.h"
49
50 static pthread_internal_t* g_thread_list = nullptr;
51 static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
52
__pthread_internal_add(pthread_internal_t * thread)53 pthread_t __pthread_internal_add(pthread_internal_t* thread) {
54 ScopedWriteLock locker(&g_thread_list_lock);
55
56 // We insert at the head.
57 thread->next = g_thread_list;
58 thread->prev = nullptr;
59 if (thread->next != nullptr) {
60 thread->next->prev = thread;
61 }
62 g_thread_list = thread;
63 return reinterpret_cast<pthread_t>(thread);
64 }
65
__pthread_internal_remove(pthread_internal_t * thread)66 void __pthread_internal_remove(pthread_internal_t* thread) {
67 ScopedWriteLock locker(&g_thread_list_lock);
68
69 if (thread->next != nullptr) {
70 thread->next->prev = thread->prev;
71 }
72 if (thread->prev != nullptr) {
73 thread->prev->next = thread->next;
74 } else {
75 g_thread_list = thread->next;
76 }
77 }
78
__pthread_internal_free(pthread_internal_t * thread)79 static void __pthread_internal_free(pthread_internal_t* thread) {
80 if (thread->mmap_size != 0) {
81 // Free mapped space, including thread stack and pthread_internal_t.
82 munmap(thread->mmap_base, thread->mmap_size);
83 }
84 }
85
__pthread_internal_remove_and_free(pthread_internal_t * thread)86 void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
87 __pthread_internal_remove(thread);
88 __pthread_internal_free(thread);
89 }
90
__pthread_internal_gettid(pthread_t thread_id,const char * caller)91 pid_t __pthread_internal_gettid(pthread_t thread_id, const char* caller) {
92 pthread_internal_t* thread = __pthread_internal_find(thread_id, caller);
93 return thread ? thread->tid : -1;
94 }
95
__pthread_internal_find(pthread_t thread_id,const char * caller)96 pthread_internal_t* __pthread_internal_find(pthread_t thread_id, const char* caller) {
97 pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
98
99 // Check if we're looking for ourselves before acquiring the lock.
100 if (thread == __get_thread()) return thread;
101
102 {
103 // Make sure to release the lock before the abort below. Otherwise,
104 // some apps might deadlock in their own crash handlers (see b/6565627).
105 ScopedReadLock locker(&g_thread_list_lock);
106 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
107 if (t == thread) return thread;
108 }
109 }
110
111 // Historically we'd return null, but from API level 26 we catch this error.
112 if (android_get_application_target_sdk_version() >= 26) {
113 if (thread == nullptr) {
114 // This seems to be a common mistake, and it's relatively harmless because
115 // there will never be a valid thread at address 0, whereas other invalid
116 // addresses might sometimes contain threads or things that look enough like
117 // threads for us to do some real damage by continuing.
118 // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
119 async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to %s", caller);
120 } else {
121 async_safe_fatal("invalid pthread_t %p passed to %s", thread, caller);
122 }
123 }
124 return nullptr;
125 }
126
__get_main_stack_startstack()127 static uintptr_t __get_main_stack_startstack() {
128 FILE* fp = fopen("/proc/self/stat", "re");
129 if (fp == nullptr) {
130 async_safe_fatal("couldn't open /proc/self/stat: %m");
131 }
132
133 char line[BUFSIZ];
134 if (fgets(line, sizeof(line), fp) == nullptr) {
135 async_safe_fatal("couldn't read /proc/self/stat: %m");
136 }
137
138 fclose(fp);
139
140 // See man 5 proc. There's no reason comm can't contain ' ' or ')',
141 // so we search backwards for the end of it. We're looking for this field:
142 //
143 // startstack %lu (28) The address of the start (i.e., bottom) of the stack.
144 uintptr_t startstack = 0;
145 const char* end_of_comm = strrchr(line, ')');
146 if (sscanf(end_of_comm + 1,
147 " %*c "
148 "%*d %*d %*d %*d %*d "
149 "%*u %*u %*u %*u %*u %*u %*u "
150 "%*d %*d %*d %*d %*d %*d "
151 "%*u %*u %*d %*u %*u %*u %" SCNuPTR,
152 &startstack) != 1) {
153 async_safe_fatal("couldn't parse /proc/self/stat");
154 }
155
156 return startstack;
157 }
158
__find_main_stack_limits(uintptr_t * low,uintptr_t * high)159 void __find_main_stack_limits(uintptr_t* low, uintptr_t* high) {
160 // Ask the kernel where our main thread's stack started.
161 uintptr_t startstack = __get_main_stack_startstack();
162
163 // Hunt for the region that contains that address.
164 FILE* fp = fopen("/proc/self/maps", "re");
165 if (fp == nullptr) {
166 async_safe_fatal("couldn't open /proc/self/maps: %m");
167 }
168 char line[BUFSIZ];
169 while (fgets(line, sizeof(line), fp) != nullptr) {
170 uintptr_t lo, hi;
171 if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR, &lo, &hi) == 2) {
172 if (lo <= startstack && startstack <= hi) {
173 *low = lo;
174 *high = hi;
175 fclose(fp);
176 return;
177 }
178 }
179 }
180 async_safe_fatal("stack not found in /proc/self/maps");
181 }
182
183 #if defined(__aarch64__)
__allocate_stack_mte_ringbuffer(size_t n,pthread_internal_t * thread)184 __LIBC_HIDDEN__ void* __allocate_stack_mte_ringbuffer(size_t n, pthread_internal_t* thread) {
185 const char* name;
186 if (thread == nullptr) {
187 name = "stack_mte_ring:main";
188 } else {
189 // The kernel doesn't copy the name string, but this variable will last at least as long as the
190 // mapped area. We unmap the ring buffer before unmapping the rest of the thread storage.
191 auto& name_buffer = thread->stack_mte_ringbuffer_vma_name_buffer;
192 static_assert(arraysize(name_buffer) >= arraysize("stack_mte_ring:") + 11 + 1);
193 async_safe_format_buffer(name_buffer, arraysize(name_buffer), "stack_mte_ring:%d", thread->tid);
194 name = name_buffer;
195 }
196 void* ret = stack_mte_ringbuffer_allocate(n, name);
197 if (!ret) async_safe_fatal("error: failed to allocate stack mte ring buffer");
198 return ret;
199 }
200 #endif
201
__pthread_internal_remap_stack_with_mte()202 bool __pthread_internal_remap_stack_with_mte() {
203 #if defined(__aarch64__)
204 ScopedWriteLock creation_locker(&g_thread_creation_lock);
205 ScopedReadLock list_locker(&g_thread_list_lock);
206 // If process already uses memtag-stack ABI, we don't need to do anything.
207 if (__libc_memtag_stack_abi) return false;
208 __libc_memtag_stack_abi = true;
209
210 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
211 // should_allocate_stack_mte_ringbuffer indicates the thread is already
212 // aware that this process requires stack MTE, and will allocate the
213 // ring buffer in __pthread_start.
214 if (t->terminating || t->should_allocate_stack_mte_ringbuffer) continue;
215 t->bionic_tcb->tls_slot(TLS_SLOT_STACK_MTE) =
216 __allocate_stack_mte_ringbuffer(0, t->is_main() ? nullptr : t);
217 }
218 if (!atomic_load(&__libc_globals->memtag)) return false;
219 if (atomic_exchange(&__libc_memtag_stack, true)) return false;
220 uintptr_t lo, hi;
221 __find_main_stack_limits(&lo, &hi);
222
223 if (mprotect(reinterpret_cast<void*>(lo), hi - lo,
224 PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
225 async_safe_fatal("error: failed to set PROT_MTE on main thread");
226 }
227 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
228 if (t->terminating || t->is_main()) continue;
229 if (mprotect(t->mmap_base_unguarded, t->mmap_size_unguarded,
230 PROT_READ | PROT_WRITE | PROT_MTE)) {
231 async_safe_fatal("error: failed to set PROT_MTE on thread: %d", t->tid);
232 }
233 }
234 return true;
235 #else
236 return false;
237 #endif // defined(__aarch64__)
238 }
239
android_run_on_all_threads(bool (* func)(void *),void * arg)240 bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
241 // Take the locks in this order to avoid inversion (pthread_create ->
242 // __pthread_internal_add).
243 ScopedWriteLock creation_locker(&g_thread_creation_lock);
244 ScopedReadLock list_locker(&g_thread_list_lock);
245
246 // Call the function directly for the current thread so that we don't need to worry about
247 // the consequences of synchronizing with ourselves.
248 if (!func(arg)) {
249 return false;
250 }
251
252 static sem_t g_sem;
253 if (sem_init(&g_sem, 0, 0) != 0) {
254 return false;
255 }
256
257 static bool (*g_func)(void*);
258 static void *g_arg;
259 g_func = func;
260 g_arg = arg;
261
262 static _Atomic(bool) g_retval(true);
263
264 auto handler = [](int, siginfo_t*, void*) {
265 ErrnoRestorer restorer;
266 if (!g_func(g_arg)) {
267 atomic_store(&g_retval, false);
268 }
269 sem_post(&g_sem);
270 };
271
272 struct sigaction act = {}, oldact;
273 act.sa_flags = SA_SIGINFO;
274 act.sa_sigaction = handler;
275 sigfillset(&act.sa_mask);
276 if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
277 sem_destroy(&g_sem);
278 return false;
279 }
280
281 pid_t my_pid = getpid();
282 size_t num_tids = 0;
283 for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
284 // The function is called directly for the current thread above, so no need to send a signal to
285 // ourselves to call it here.
286 if (t == __get_thread()) continue;
287
288 // If a thread is terminating (has blocked signals) or has already terminated, our signal will
289 // never be received, so we need to check for that condition and skip the thread if it is the
290 // case.
291 if (atomic_load(&t->terminating)) continue;
292
293 if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
294 ++num_tids;
295 } else {
296 atomic_store(&g_retval, false);
297 }
298 }
299
300 for (size_t i = 0; i != num_tids; ++i) {
301 if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
302 atomic_store(&g_retval, false);
303 break;
304 }
305 }
306
307 sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
308 sem_destroy(&g_sem);
309 return atomic_load(&g_retval);
310 }
311