1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "pthread_internal.h"
30 
31 #include <errno.h>
32 #include <semaphore.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/mman.h>
36 
37 #include <async_safe/log.h>
38 #include <bionic/reserved_signals.h>
39 
40 #include "private/ErrnoRestorer.h"
41 #include "private/ScopedRWLock.h"
42 #include "private/bionic_futex.h"
43 #include "private/bionic_globals.h"
44 #include "private/bionic_tls.h"
45 
46 static pthread_internal_t* g_thread_list = nullptr;
47 static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
48 
__pthread_internal_add(pthread_internal_t * thread)49 pthread_t __pthread_internal_add(pthread_internal_t* thread) {
50   ScopedWriteLock locker(&g_thread_list_lock);
51 
52   // We insert at the head.
53   thread->next = g_thread_list;
54   thread->prev = nullptr;
55   if (thread->next != nullptr) {
56     thread->next->prev = thread;
57   }
58   g_thread_list = thread;
59   return reinterpret_cast<pthread_t>(thread);
60 }
61 
__pthread_internal_remove(pthread_internal_t * thread)62 void __pthread_internal_remove(pthread_internal_t* thread) {
63   ScopedWriteLock locker(&g_thread_list_lock);
64 
65   if (thread->next != nullptr) {
66     thread->next->prev = thread->prev;
67   }
68   if (thread->prev != nullptr) {
69     thread->prev->next = thread->next;
70   } else {
71     g_thread_list = thread->next;
72   }
73 }
74 
__pthread_internal_free(pthread_internal_t * thread)75 static void __pthread_internal_free(pthread_internal_t* thread) {
76   if (thread->mmap_size != 0) {
77     // Free mapped space, including thread stack and pthread_internal_t.
78     munmap(thread->mmap_base, thread->mmap_size);
79   }
80 }
81 
__pthread_internal_remove_and_free(pthread_internal_t * thread)82 void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
83   __pthread_internal_remove(thread);
84   __pthread_internal_free(thread);
85 }
86 
__pthread_internal_gettid(pthread_t thread_id,const char * caller)87 pid_t __pthread_internal_gettid(pthread_t thread_id, const char* caller) {
88   pthread_internal_t* thread = __pthread_internal_find(thread_id, caller);
89   return thread ? thread->tid : -1;
90 }
91 
__pthread_internal_find(pthread_t thread_id,const char * caller)92 pthread_internal_t* __pthread_internal_find(pthread_t thread_id, const char* caller) {
93   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
94 
95   // Check if we're looking for ourselves before acquiring the lock.
96   if (thread == __get_thread()) return thread;
97 
98   {
99     // Make sure to release the lock before the abort below. Otherwise,
100     // some apps might deadlock in their own crash handlers (see b/6565627).
101     ScopedReadLock locker(&g_thread_list_lock);
102     for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
103       if (t == thread) return thread;
104     }
105   }
106 
107   // Historically we'd return null, but from API level 26 we catch this error.
108   if (android_get_application_target_sdk_version() >= 26) {
109     if (thread == nullptr) {
110       // This seems to be a common mistake, and it's relatively harmless because
111       // there will never be a valid thread at address 0, whereas other invalid
112       // addresses might sometimes contain threads or things that look enough like
113       // threads for us to do some real damage by continuing.
114       // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
115       async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to %s", caller);
116     } else {
117       async_safe_fatal("invalid pthread_t %p passed to %s", thread, caller);
118     }
119   }
120   return nullptr;
121 }
122 
__get_main_stack_startstack()123 static uintptr_t __get_main_stack_startstack() {
124   FILE* fp = fopen("/proc/self/stat", "re");
125   if (fp == nullptr) {
126     async_safe_fatal("couldn't open /proc/self/stat: %m");
127   }
128 
129   char line[BUFSIZ];
130   if (fgets(line, sizeof(line), fp) == nullptr) {
131     async_safe_fatal("couldn't read /proc/self/stat: %m");
132   }
133 
134   fclose(fp);
135 
136   // See man 5 proc. There's no reason comm can't contain ' ' or ')',
137   // so we search backwards for the end of it. We're looking for this field:
138   //
139   //  startstack %lu (28) The address of the start (i.e., bottom) of the stack.
140   uintptr_t startstack = 0;
141   const char* end_of_comm = strrchr(line, ')');
142   if (sscanf(end_of_comm + 1,
143              " %*c "
144              "%*d %*d %*d %*d %*d "
145              "%*u %*u %*u %*u %*u %*u %*u "
146              "%*d %*d %*d %*d %*d %*d "
147              "%*u %*u %*d %*u %*u %*u %" SCNuPTR,
148              &startstack) != 1) {
149     async_safe_fatal("couldn't parse /proc/self/stat");
150   }
151 
152   return startstack;
153 }
154 
__find_main_stack_limits(uintptr_t * low,uintptr_t * high)155 void __find_main_stack_limits(uintptr_t* low, uintptr_t* high) {
156   // Ask the kernel where our main thread's stack started.
157   uintptr_t startstack = __get_main_stack_startstack();
158 
159   // Hunt for the region that contains that address.
160   FILE* fp = fopen("/proc/self/maps", "re");
161   if (fp == nullptr) {
162     async_safe_fatal("couldn't open /proc/self/maps: %m");
163   }
164   char line[BUFSIZ];
165   while (fgets(line, sizeof(line), fp) != nullptr) {
166     uintptr_t lo, hi;
167     if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR, &lo, &hi) == 2) {
168       if (lo <= startstack && startstack <= hi) {
169         *low = lo;
170         *high = hi;
171         fclose(fp);
172         return;
173       }
174     }
175   }
176   async_safe_fatal("stack not found in /proc/self/maps");
177 }
178 
__pthread_internal_remap_stack_with_mte()179 void __pthread_internal_remap_stack_with_mte() {
180 #if defined(__aarch64__)
181   // If process doesn't have MTE enabled, we don't need to do anything.
182   if (!atomic_load(&__libc_globals->memtag)) return;
183   bool prev = atomic_exchange(&__libc_memtag_stack, true);
184   if (prev) return;
185   uintptr_t lo, hi;
186   __find_main_stack_limits(&lo, &hi);
187 
188   if (mprotect(reinterpret_cast<void*>(lo), hi - lo,
189                PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
190     async_safe_fatal("error: failed to set PROT_MTE on main thread");
191   }
192   ScopedWriteLock creation_locker(&g_thread_creation_lock);
193   ScopedReadLock list_locker(&g_thread_list_lock);
194   for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
195     if (t->terminating || t->is_main()) continue;
196     if (mprotect(t->mmap_base_unguarded, t->mmap_size_unguarded,
197                  PROT_READ | PROT_WRITE | PROT_MTE)) {
198       async_safe_fatal("error: failed to set PROT_MTE on thread: %d", t->tid);
199     }
200   }
201 #endif
202 }
203 
android_run_on_all_threads(bool (* func)(void *),void * arg)204 bool android_run_on_all_threads(bool (*func)(void*), void* arg) {
205   // Take the locks in this order to avoid inversion (pthread_create ->
206   // __pthread_internal_add).
207   ScopedWriteLock creation_locker(&g_thread_creation_lock);
208   ScopedReadLock list_locker(&g_thread_list_lock);
209 
210   // Call the function directly for the current thread so that we don't need to worry about
211   // the consequences of synchronizing with ourselves.
212   if (!func(arg)) {
213     return false;
214   }
215 
216   static sem_t g_sem;
217   if (sem_init(&g_sem, 0, 0) != 0) {
218     return false;
219   }
220 
221   static bool (*g_func)(void*);
222   static void *g_arg;
223   g_func = func;
224   g_arg = arg;
225 
226   static _Atomic(bool) g_retval;
227   atomic_init(&g_retval, true);
228 
229   auto handler = [](int, siginfo_t*, void*) {
230     ErrnoRestorer restorer;
231     if (!g_func(g_arg)) {
232       atomic_store(&g_retval, false);
233     }
234     sem_post(&g_sem);
235   };
236 
237   struct sigaction act = {}, oldact;
238   act.sa_flags = SA_SIGINFO;
239   act.sa_sigaction = handler;
240   sigfillset(&act.sa_mask);
241   if (sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &act, &oldact) != 0) {
242     sem_destroy(&g_sem);
243     return false;
244   }
245 
246   pid_t my_pid = getpid();
247   size_t num_tids = 0;
248   for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
249     // The function is called directly for the current thread above, so no need to send a signal to
250     // ourselves to call it here.
251     if (t == __get_thread()) continue;
252 
253     // If a thread is terminating (has blocked signals) or has already terminated, our signal will
254     // never be received, so we need to check for that condition and skip the thread if it is the
255     // case.
256     if (atomic_load(&t->terminating)) continue;
257 
258     if (tgkill(my_pid, t->tid, BIONIC_SIGNAL_RUN_ON_ALL_THREADS) == 0) {
259       ++num_tids;
260     } else {
261       atomic_store(&g_retval, false);
262     }
263   }
264 
265   for (size_t i = 0; i != num_tids; ++i) {
266     if (TEMP_FAILURE_RETRY(sem_wait(&g_sem)) != 0) {
267       atomic_store(&g_retval, false);
268       break;
269     }
270   }
271 
272   sigaction(BIONIC_SIGNAL_RUN_ON_ALL_THREADS, &oldact, 0);
273   sem_destroy(&g_sem);
274   return atomic_load(&g_retval);
275 }
276