1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <pthread.h>
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/auxv.h>
34 #include <sys/mman.h>
35 #include <sys/prctl.h>
36 #include <sys/random.h>
37 #include <unistd.h>
38 
39 #include "pthread_internal.h"
40 
41 #include <async_safe/log.h>
42 
43 #include "private/ScopedRWLock.h"
44 #include "private/bionic_constants.h"
45 #include "private/bionic_defs.h"
46 #include "private/bionic_globals.h"
47 #include "platform/bionic/macros.h"
48 #include "private/bionic_ssp.h"
49 #include "private/bionic_systrace.h"
50 #include "private/bionic_tls.h"
51 #include "private/ErrnoRestorer.h"
52 
53 // x86 uses segment descriptors rather than a direct pointer to TLS.
54 #if defined(__i386__)
55 #include <asm/ldt.h>
56 void __init_user_desc(struct user_desc*, bool, void*);
57 #endif
58 
59 __attribute__((no_stack_protector))
__init_tcb_stack_guard(bionic_tcb * tcb)60 void __init_tcb_stack_guard(bionic_tcb* tcb) {
61   // GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
62   tcb->tls_slot(TLS_SLOT_STACK_GUARD) = reinterpret_cast<void*>(__stack_chk_guard);
63 }
64 
__init_bionic_tls_ptrs(bionic_tcb * tcb,bionic_tls * tls)65 void __init_bionic_tls_ptrs(bionic_tcb* tcb, bionic_tls* tls) {
66   tcb->thread()->bionic_tls = tls;
67   tcb->tls_slot(TLS_SLOT_BIONIC_TLS) = tls;
68 }
69 
70 // Allocate a temporary bionic_tls that the dynamic linker's main thread can
71 // use while it's loading the initial set of ELF modules.
__allocate_temp_bionic_tls()72 bionic_tls* __allocate_temp_bionic_tls() {
73   size_t allocation_size = __BIONIC_ALIGN(sizeof(bionic_tls), PAGE_SIZE);
74   void* allocation = mmap(nullptr, allocation_size,
75                           PROT_READ | PROT_WRITE,
76                           MAP_PRIVATE | MAP_ANONYMOUS,
77                           -1, 0);
78   if (allocation == MAP_FAILED) {
79     // Avoid strerror because it might need bionic_tls.
80     async_safe_fatal("failed to allocate bionic_tls: error %d", errno);
81   }
82   return static_cast<bionic_tls*>(allocation);
83 }
84 
__free_temp_bionic_tls(bionic_tls * tls)85 void __free_temp_bionic_tls(bionic_tls* tls) {
86   munmap(tls, __BIONIC_ALIGN(sizeof(bionic_tls), PAGE_SIZE));
87 }
88 
__init_alternate_signal_stack(pthread_internal_t * thread)89 static void __init_alternate_signal_stack(pthread_internal_t* thread) {
90   // Create and set an alternate signal stack.
91   void* stack_base = mmap(nullptr, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
92   if (stack_base != MAP_FAILED) {
93     // Create a guard to catch stack overflows in signal handlers.
94     if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) {
95       munmap(stack_base, SIGNAL_STACK_SIZE);
96       return;
97     }
98     stack_t ss;
99     ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PTHREAD_GUARD_SIZE;
100     ss.ss_size = SIGNAL_STACK_SIZE - PTHREAD_GUARD_SIZE;
101     ss.ss_flags = 0;
102     sigaltstack(&ss, nullptr);
103     thread->alternate_signal_stack = stack_base;
104 
105     // We can only use const static allocated string for mapped region name, as Android kernel
106     // uses the string pointer directly when dumping /proc/pid/maps.
107     prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack");
108   }
109 }
110 
__init_shadow_call_stack(pthread_internal_t * thread __unused)111 static void __init_shadow_call_stack(pthread_internal_t* thread __unused) {
112 #ifdef __aarch64__
113   // Allocate the stack and the guard region.
114   char* scs_guard_region = reinterpret_cast<char*>(
115       mmap(nullptr, SCS_GUARD_REGION_SIZE, 0, MAP_PRIVATE | MAP_ANON, -1, 0));
116   thread->shadow_call_stack_guard_region = scs_guard_region;
117 
118   // The address is aligned to SCS_SIZE so that we only need to store the lower log2(SCS_SIZE) bits
119   // in jmp_buf.
120   char* scs_aligned_guard_region =
121       reinterpret_cast<char*>(align_up(reinterpret_cast<uintptr_t>(scs_guard_region), SCS_SIZE));
122 
123   // We need to ensure that [scs_offset,scs_offset+SCS_SIZE) is in the guard region and that there
124   // is at least one unmapped page after the shadow call stack (to catch stack overflows). We can't
125   // use arc4random_uniform in init because /dev/urandom might not have been created yet.
126   size_t scs_offset =
127       (getpid() == 1) ? 0 : (arc4random_uniform(SCS_GUARD_REGION_SIZE / SCS_SIZE - 1) * SCS_SIZE);
128 
129   // Make the stack readable and writable and store its address in register x18. This is
130   // deliberately the only place where the address is stored.
131   char *scs = scs_aligned_guard_region + scs_offset;
132   mprotect(scs, SCS_SIZE, PROT_READ | PROT_WRITE);
133   __asm__ __volatile__("mov x18, %0" ::"r"(scs));
134 #endif
135 }
136 
__init_additional_stacks(pthread_internal_t * thread)137 void __init_additional_stacks(pthread_internal_t* thread) {
138   __init_alternate_signal_stack(thread);
139   __init_shadow_call_stack(thread);
140 }
141 
__init_thread(pthread_internal_t * thread)142 int __init_thread(pthread_internal_t* thread) {
143   thread->cleanup_stack = nullptr;
144 
145   if (__predict_true((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) == 0)) {
146     atomic_init(&thread->join_state, THREAD_NOT_JOINED);
147   } else {
148     atomic_init(&thread->join_state, THREAD_DETACHED);
149   }
150 
151   // Set the scheduling policy/priority of the thread if necessary.
152   bool need_set = true;
153   int policy;
154   sched_param param;
155   if ((thread->attr.flags & PTHREAD_ATTR_FLAG_INHERIT) != 0) {
156     // Unless the parent has SCHED_RESET_ON_FORK set, we've already inherited from the parent.
157     policy = sched_getscheduler(0);
158     need_set = ((policy & SCHED_RESET_ON_FORK) != 0);
159     if (need_set) {
160       if (policy == -1) {
161         async_safe_format_log(ANDROID_LOG_WARN, "libc",
162                               "pthread_create sched_getscheduler failed: %s", strerror(errno));
163         return errno;
164       }
165       if (sched_getparam(0, &param) == -1) {
166         async_safe_format_log(ANDROID_LOG_WARN, "libc",
167                               "pthread_create sched_getparam failed: %s", strerror(errno));
168         return errno;
169       }
170     }
171   } else {
172     policy = thread->attr.sched_policy;
173     param.sched_priority = thread->attr.sched_priority;
174   }
175   // Backwards compatibility: before P, Android didn't have pthread_attr_setinheritsched,
176   // and our behavior was neither of the POSIX behaviors.
177   if ((thread->attr.flags & (PTHREAD_ATTR_FLAG_INHERIT|PTHREAD_ATTR_FLAG_EXPLICIT)) == 0) {
178     need_set = (thread->attr.sched_policy != SCHED_NORMAL);
179   }
180   if (need_set) {
181     if (sched_setscheduler(thread->tid, policy, &param) == -1) {
182       async_safe_format_log(ANDROID_LOG_WARN, "libc",
183                             "pthread_create sched_setscheduler(%d, {%d}) call failed: %s", policy,
184                             param.sched_priority, strerror(errno));
185 #if defined(__LP64__)
186       // For backwards compatibility reasons, we only report failures on 64-bit devices.
187       return errno;
188 #endif
189     }
190   }
191 
192   return 0;
193 }
194 
195 
196 // Allocate a thread's primary mapping. This mapping includes static TLS and
197 // optionally a stack. Static TLS includes ELF TLS segments and the bionic_tls
198 // struct.
199 //
200 // The stack_guard_size must be a multiple of the PAGE_SIZE.
__allocate_thread_mapping(size_t stack_size,size_t stack_guard_size)201 ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size) {
202   const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
203 
204   // Allocate in order: stack guard, stack, static TLS, guard page.
205   size_t mmap_size;
206   if (__builtin_add_overflow(stack_size, stack_guard_size, &mmap_size)) return {};
207   if (__builtin_add_overflow(mmap_size, layout.size(), &mmap_size)) return {};
208   if (__builtin_add_overflow(mmap_size, PTHREAD_GUARD_SIZE, &mmap_size)) return {};
209 
210   // Align the result to a page size.
211   const size_t unaligned_size = mmap_size;
212   mmap_size = __BIONIC_ALIGN(mmap_size, PAGE_SIZE);
213   if (mmap_size < unaligned_size) return {};
214 
215   // Create a new private anonymous map. Make the entire mapping PROT_NONE, then carve out a
216   // read+write area in the middle.
217   const int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
218   char* const space = static_cast<char*>(mmap(nullptr, mmap_size, PROT_NONE, flags, -1, 0));
219   if (space == MAP_FAILED) {
220     async_safe_format_log(ANDROID_LOG_WARN,
221                           "libc",
222                           "pthread_create failed: couldn't allocate %zu-bytes mapped space: %s",
223                           mmap_size, strerror(errno));
224     return {};
225   }
226   const size_t writable_size = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE;
227   if (mprotect(space + stack_guard_size,
228                writable_size,
229                PROT_READ | PROT_WRITE) != 0) {
230     async_safe_format_log(ANDROID_LOG_WARN, "libc",
231                           "pthread_create failed: couldn't mprotect R+W %zu-byte thread mapping region: %s",
232                           writable_size, strerror(errno));
233     munmap(space, mmap_size);
234     return {};
235   }
236 
237   ThreadMapping result = {};
238   result.mmap_base = space;
239   result.mmap_size = mmap_size;
240   result.mmap_base_unguarded = space + stack_guard_size;
241   result.mmap_size_unguarded = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE;
242   result.static_tls = space + mmap_size - PTHREAD_GUARD_SIZE - layout.size();
243   result.stack_base = space;
244   result.stack_top = result.static_tls;
245   return result;
246 }
247 
__allocate_thread(pthread_attr_t * attr,bionic_tcb ** tcbp,void ** child_stack)248 static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** child_stack) {
249   ThreadMapping mapping;
250   char* stack_top;
251   bool stack_clean = false;
252 
253   if (attr->stack_base == nullptr) {
254     // The caller didn't provide a stack, so allocate one.
255 
256     // Make sure the guard size is a multiple of PAGE_SIZE.
257     const size_t unaligned_guard_size = attr->guard_size;
258     attr->guard_size = __BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
259     if (attr->guard_size < unaligned_guard_size) return EAGAIN;
260 
261     mapping = __allocate_thread_mapping(attr->stack_size, attr->guard_size);
262     if (mapping.mmap_base == nullptr) return EAGAIN;
263 
264     stack_top = mapping.stack_top;
265     attr->stack_base = mapping.stack_base;
266     stack_clean = true;
267   } else {
268     mapping = __allocate_thread_mapping(0, PTHREAD_GUARD_SIZE);
269     if (mapping.mmap_base == nullptr) return EAGAIN;
270 
271     stack_top = static_cast<char*>(attr->stack_base) + attr->stack_size;
272   }
273 
274   // Carve out space from the stack for the thread's pthread_internal_t. This
275   // memory isn't counted in pthread_attr_getstacksize.
276 
277   // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
278   stack_top = align_down(stack_top - sizeof(pthread_internal_t), 16);
279 
280   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
281   if (!stack_clean) {
282     // If thread was not allocated by mmap(), it may not have been cleared to zero.
283     // So assume the worst and zero it.
284     memset(thread, 0, sizeof(pthread_internal_t));
285   }
286 
287   // Locate static TLS structures within the mapped region.
288   const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
289   auto tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
290   auto tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());
291 
292   // Initialize TLS memory.
293   __init_static_tls(mapping.static_tls);
294   __init_tcb(tcb, thread);
295   __init_tcb_dtv(tcb);
296   __init_tcb_stack_guard(tcb);
297   __init_bionic_tls_ptrs(tcb, tls);
298 
299   attr->stack_size = stack_top - static_cast<char*>(attr->stack_base);
300   thread->attr = *attr;
301   thread->mmap_base = mapping.mmap_base;
302   thread->mmap_size = mapping.mmap_size;
303   thread->mmap_base_unguarded = mapping.mmap_base_unguarded;
304   thread->mmap_size_unguarded = mapping.mmap_size_unguarded;
305   thread->stack_top = reinterpret_cast<uintptr_t>(stack_top);
306 
307   *tcbp = tcb;
308   *child_stack = stack_top;
309   return 0;
310 }
311 
__set_stack_and_tls_vma_name(bool is_main_thread)312 void __set_stack_and_tls_vma_name(bool is_main_thread) {
313   // Name the thread's stack-and-tls area to help with debugging. This mapped area also includes
314   // static TLS data, which is typically a few pages (e.g. bionic_tls).
315   pthread_internal_t* thread = __get_thread();
316   const char* name;
317   if (is_main_thread) {
318     name = "stack_and_tls:main";
319   } else {
320     // The kernel doesn't copy the name string, but this variable will last at least as long as the
321     // mapped area. The mapped area's VMAs are unmapped with a single call to munmap.
322     auto& name_buffer = thread->vma_name_buffer;
323     static_assert(arraysize(name_buffer) >= arraysize("stack_and_tls:") + 11 + 1);
324     async_safe_format_buffer(name_buffer, arraysize(name_buffer), "stack_and_tls:%d", thread->tid);
325     name = name_buffer;
326   }
327   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, thread->mmap_base_unguarded, thread->mmap_size_unguarded,
328         name);
329 }
330 
331 extern "C" int __rt_sigprocmask(int, const sigset64_t*, sigset64_t*, size_t);
332 
333 __attribute__((no_sanitize("hwaddress")))
334 #ifdef __aarch64__
335 // This function doesn't return, but it does appear in stack traces. Avoid using return PAC in this
336 // function because we may end up resetting IA, which may confuse unwinders due to mismatching keys.
337 __attribute__((target("branch-protection=bti")))
338 #endif
__pthread_start(void * arg)339 static int __pthread_start(void* arg) {
340   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(arg);
341 
342   __hwasan_thread_enter();
343 
344   // Wait for our creating thread to release us. This lets it have time to
345   // notify gdb about this thread before we start doing anything.
346   // This also provides the memory barrier needed to ensure that all memory
347   // accesses previously made by the creating thread are visible to us.
348   thread->startup_handshake_lock.lock();
349 
350   __set_stack_and_tls_vma_name(false);
351   __init_additional_stacks(thread);
352   __rt_sigprocmask(SIG_SETMASK, &thread->start_mask, nullptr, sizeof(thread->start_mask));
353 #ifdef __aarch64__
354   // Chrome's sandbox prevents this prctl, so only reset IA if the target SDK level is high enough.
355   // Furthermore, processes loaded from vendor partitions may have their own sandboxes that would
356   // reject the prctl. Because no devices launched with PAC enabled before S, we can avoid issues on
357   // upgrading devices by checking for PAC support before issuing the prctl.
358   static const bool pac_supported = getauxval(AT_HWCAP) & HWCAP_PACA;
359   if (pac_supported && android_get_application_target_sdk_version() >= __ANDROID_API_S__) {
360     prctl(PR_PAC_RESET_KEYS, PR_PAC_APIAKEY, 0, 0, 0);
361   }
362 #endif
363 
364   void* result = thread->start_routine(thread->start_routine_arg);
365   pthread_exit(result);
366 
367   return 0;
368 }
369 
370 // A no-op start routine for pthread_create failures where we've created a thread but aren't
371 // going to run user code on it. We swap out the user's start routine for this and take advantage
372 // of the regular thread teardown to free up resources.
__do_nothing(void *)373 static void* __do_nothing(void*) {
374   return nullptr;
375 }
376 
377 pthread_rwlock_t g_thread_creation_lock = PTHREAD_RWLOCK_INITIALIZER;
378 
379 __BIONIC_WEAK_FOR_NATIVE_BRIDGE
pthread_create(pthread_t * thread_out,pthread_attr_t const * attr,void * (* start_routine)(void *),void * arg)380 int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
381                    void* (*start_routine)(void*), void* arg) {
382   ErrnoRestorer errno_restorer;
383 
384   pthread_attr_t thread_attr;
385   ScopedTrace trace("pthread_create");
386   if (attr == nullptr) {
387     pthread_attr_init(&thread_attr);
388   } else {
389     thread_attr = *attr;
390     attr = nullptr; // Prevent misuse below.
391   }
392 
393   bionic_tcb* tcb = nullptr;
394   void* child_stack = nullptr;
395   int result = __allocate_thread(&thread_attr, &tcb, &child_stack);
396   if (result != 0) {
397     return result;
398   }
399 
400   pthread_internal_t* thread = tcb->thread();
401 
402   // Create a lock for the thread to wait on once it starts so we can keep
403   // it from doing anything until after we notify the debugger about it
404   //
405   // This also provides the memory barrier we need to ensure that all
406   // memory accesses previously performed by this thread are visible to
407   // the new thread.
408   thread->startup_handshake_lock.init(false);
409   thread->startup_handshake_lock.lock();
410 
411   thread->start_routine = start_routine;
412   thread->start_routine_arg = arg;
413 
414   thread->set_cached_pid(getpid());
415 
416   int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM |
417       CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
418   void* tls = &tcb->tls_slot(0);
419 #if defined(__i386__)
420   // On x86 (but not x86-64), CLONE_SETTLS takes a pointer to a struct user_desc rather than
421   // a pointer to the TLS itself.
422   user_desc tls_descriptor;
423   __init_user_desc(&tls_descriptor, false, tls);
424   tls = &tls_descriptor;
425 #endif
426 
427   ScopedReadLock locker(&g_thread_creation_lock);
428 
429   sigset64_t block_all_mask;
430   sigfillset64(&block_all_mask);
431   __rt_sigprocmask(SIG_SETMASK, &block_all_mask, &thread->start_mask, sizeof(thread->start_mask));
432   int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid));
433   __rt_sigprocmask(SIG_SETMASK, &thread->start_mask, nullptr, sizeof(thread->start_mask));
434   if (rc == -1) {
435     int clone_errno = errno;
436     // We don't have to unlock the mutex at all because clone(2) failed so there's no child waiting to
437     // be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
438     // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
439     thread->startup_handshake_lock.unlock();
440     if (thread->mmap_size != 0) {
441       munmap(thread->mmap_base, thread->mmap_size);
442     }
443     async_safe_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s",
444                           strerror(clone_errno));
445     return clone_errno;
446   }
447 
448   int init_errno = __init_thread(thread);
449   if (init_errno != 0) {
450     // Mark the thread detached and replace its start_routine with a no-op.
451     // Letting the thread run is the easiest way to clean up its resources.
452     atomic_store(&thread->join_state, THREAD_DETACHED);
453     __pthread_internal_add(thread);
454     thread->start_routine = __do_nothing;
455     thread->startup_handshake_lock.unlock();
456     return init_errno;
457   }
458 
459   // Publish the pthread_t and unlock the mutex to let the new thread start running.
460   *thread_out = __pthread_internal_add(thread);
461   thread->startup_handshake_lock.unlock();
462 
463   return 0;
464 }
465