1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "private/bionic_elf_tls.h"
30
31 #include <async_safe/CHECK.h>
32 #include <async_safe/log.h>
33 #include <string.h>
34 #include <sys/param.h>
35 #include <unistd.h>
36
37 #include "private/ScopedRWLock.h"
38 #include "private/ScopedSignalBlocker.h"
39 #include "private/bionic_globals.h"
40 #include "platform/bionic/macros.h"
41 #include "private/bionic_tls.h"
42 #include "pthread_internal.h"
43
44 // Every call to __tls_get_addr needs to check the generation counter, so
45 // accesses to the counter need to be as fast as possible. Keep a copy of it in
46 // a hidden variable, which can be accessed without using the GOT. The linker
47 // will update this variable when it updates its counter.
48 //
49 // To allow the linker to update this variable, libc.so's constructor passes its
50 // address to the linker. To accommodate a possible __tls_get_addr call before
51 // libc.so's constructor, this local copy is initialized to SIZE_MAX, forcing
52 // __tls_get_addr to initially use the slow path.
53 __LIBC_HIDDEN__ _Atomic(size_t) __libc_tls_generation_copy = SIZE_MAX;
54
55 // Search for a TLS segment in the given phdr table. Returns true if it has a
56 // TLS segment and false otherwise.
__bionic_get_tls_segment(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,TlsSegment * out)57 bool __bionic_get_tls_segment(const ElfW(Phdr)* phdr_table, size_t phdr_count,
58 ElfW(Addr) load_bias, TlsSegment* out) {
59 for (size_t i = 0; i < phdr_count; ++i) {
60 const ElfW(Phdr)& phdr = phdr_table[i];
61 if (phdr.p_type == PT_TLS) {
62 *out = TlsSegment {
63 phdr.p_memsz,
64 phdr.p_align,
65 reinterpret_cast<void*>(load_bias + phdr.p_vaddr),
66 phdr.p_filesz,
67 };
68 return true;
69 }
70 }
71 return false;
72 }
73
74 // Return true if the alignment of a TLS segment is a valid power-of-two. Also
75 // cap the alignment if it's too high.
__bionic_check_tls_alignment(size_t * alignment)76 bool __bionic_check_tls_alignment(size_t* alignment) {
77 // N.B. The size does not need to be a multiple of the alignment. With
78 // ld.bfd (or after using binutils' strip), the TLS segment's size isn't
79 // rounded up.
80 if (*alignment == 0 || !powerof2(*alignment)) {
81 return false;
82 }
83 // Bionic only respects TLS alignment up to one page.
84 *alignment = MIN(*alignment, PAGE_SIZE);
85 return true;
86 }
87
offset_thread_pointer() const88 size_t StaticTlsLayout::offset_thread_pointer() const {
89 return offset_bionic_tcb_ + (-MIN_TLS_SLOT * sizeof(void*));
90 }
91
92 // Reserves space for the Bionic TCB and the executable's TLS segment. Returns
93 // the offset of the executable's TLS segment.
reserve_exe_segment_and_tcb(const TlsSegment * exe_segment,const char * progname)94 size_t StaticTlsLayout::reserve_exe_segment_and_tcb(const TlsSegment* exe_segment,
95 const char* progname __attribute__((unused))) {
96 // Special case: if the executable has no TLS segment, then just allocate a
97 // TCB and skip the minimum alignment check on ARM.
98 if (exe_segment == nullptr) {
99 offset_bionic_tcb_ = reserve_type<bionic_tcb>();
100 return 0;
101 }
102
103 #if defined(__arm__) || defined(__aarch64__)
104
105 // First reserve enough space for the TCB before the executable segment.
106 reserve(sizeof(bionic_tcb), 1);
107
108 // Then reserve the segment itself.
109 const size_t result = reserve(exe_segment->size, exe_segment->alignment);
110
111 // The variant 1 ABI that ARM linkers follow specifies a 2-word TCB between
112 // the thread pointer and the start of the executable's TLS segment, but both
113 // the thread pointer and the TLS segment are aligned appropriately for the
114 // TLS segment. Calculate the distance between the thread pointer and the
115 // EXE's segment.
116 const size_t exe_tpoff = __BIONIC_ALIGN(sizeof(void*) * 2, exe_segment->alignment);
117
118 const size_t min_bionic_alignment = BIONIC_ROUND_UP_POWER_OF_2(MAX_TLS_SLOT) * sizeof(void*);
119 if (exe_tpoff < min_bionic_alignment) {
120 async_safe_fatal("error: \"%s\": executable's TLS segment is underaligned: "
121 "alignment is %zu, needs to be at least %zu for %s Bionic",
122 progname, exe_segment->alignment, min_bionic_alignment,
123 (sizeof(void*) == 4 ? "ARM" : "ARM64"));
124 }
125
126 offset_bionic_tcb_ = result - exe_tpoff - (-MIN_TLS_SLOT * sizeof(void*));
127 return result;
128
129 #elif defined(__i386__) || defined(__x86_64__)
130
131 // x86 uses variant 2 TLS layout. The executable's segment is located just
132 // before the TCB.
133 static_assert(MIN_TLS_SLOT == 0, "First slot of bionic_tcb must be slot #0 on x86");
134 const size_t exe_size = round_up_with_overflow_check(exe_segment->size, exe_segment->alignment);
135 reserve(exe_size, 1);
136 const size_t max_align = MAX(alignof(bionic_tcb), exe_segment->alignment);
137 offset_bionic_tcb_ = reserve(sizeof(bionic_tcb), max_align);
138 return offset_bionic_tcb_ - exe_size;
139
140 #else
141 #error "Unrecognized architecture"
142 #endif
143 }
144
reserve_bionic_tls()145 void StaticTlsLayout::reserve_bionic_tls() {
146 offset_bionic_tls_ = reserve_type<bionic_tls>();
147 }
148
finish_layout()149 void StaticTlsLayout::finish_layout() {
150 // Round the offset up to the alignment.
151 offset_ = round_up_with_overflow_check(offset_, alignment_);
152
153 if (overflowed_) {
154 async_safe_fatal("error: TLS segments in static TLS overflowed");
155 }
156 }
157
158 // The size is not required to be a multiple of the alignment. The alignment
159 // must be a positive power-of-two.
reserve(size_t size,size_t alignment)160 size_t StaticTlsLayout::reserve(size_t size, size_t alignment) {
161 offset_ = round_up_with_overflow_check(offset_, alignment);
162 const size_t result = offset_;
163 if (__builtin_add_overflow(offset_, size, &offset_)) overflowed_ = true;
164 alignment_ = MAX(alignment_, alignment);
165 return result;
166 }
167
round_up_with_overflow_check(size_t value,size_t alignment)168 size_t StaticTlsLayout::round_up_with_overflow_check(size_t value, size_t alignment) {
169 const size_t old_value = value;
170 value = __BIONIC_ALIGN(value, alignment);
171 if (value < old_value) overflowed_ = true;
172 return value;
173 }
174
175 // Copy each TLS module's initialization image into a newly-allocated block of
176 // static TLS memory. To reduce dirty pages, this function only writes to pages
177 // within the static TLS that need initialization. The memory should already be
178 // zero-initialized on entry.
__init_static_tls(void * static_tls)179 void __init_static_tls(void* static_tls) {
180 // The part of the table we care about (i.e. static TLS modules) never changes
181 // after startup, but we still need the mutex because the table could grow,
182 // moving the initial part. If this locking is too slow, we can duplicate the
183 // static part of the table.
184 TlsModules& modules = __libc_shared_globals()->tls_modules;
185 ScopedSignalBlocker ssb;
186 ScopedReadLock locker(&modules.rwlock);
187
188 for (size_t i = 0; i < modules.module_count; ++i) {
189 TlsModule& module = modules.module_table[i];
190 if (module.static_offset == SIZE_MAX) {
191 // All of the static modules come before all of the dynamic modules, so
192 // once we see the first dynamic module, we're done.
193 break;
194 }
195 if (module.segment.init_size == 0) {
196 // Skip the memcpy call for TLS segments with no initializer, which is
197 // common.
198 continue;
199 }
200 memcpy(static_cast<char*>(static_tls) + module.static_offset,
201 module.segment.init_ptr,
202 module.segment.init_size);
203 }
204 }
205
dtv_size_in_bytes(size_t module_count)206 static inline size_t dtv_size_in_bytes(size_t module_count) {
207 return sizeof(TlsDtv) + module_count * sizeof(void*);
208 }
209
210 // Calculates the number of module slots to allocate in a new DTV. For small
211 // objects (up to 1KiB), the TLS allocator allocates memory in power-of-2 sizes,
212 // so for better space usage, ensure that the DTV size (header + slots) is a
213 // power of 2.
214 //
215 // The lock on TlsModules must be held.
calculate_new_dtv_count()216 static size_t calculate_new_dtv_count() {
217 size_t loaded_cnt = __libc_shared_globals()->tls_modules.module_count;
218 size_t bytes = dtv_size_in_bytes(MAX(1, loaded_cnt));
219 if (!powerof2(bytes)) {
220 bytes = BIONIC_ROUND_UP_POWER_OF_2(bytes);
221 }
222 return (bytes - sizeof(TlsDtv)) / sizeof(void*);
223 }
224
225 // This function must be called with signals blocked and a write lock on
226 // TlsModules held.
update_tls_dtv(bionic_tcb * tcb)227 static void update_tls_dtv(bionic_tcb* tcb) {
228 const TlsModules& modules = __libc_shared_globals()->tls_modules;
229 BionicAllocator& allocator = __libc_shared_globals()->tls_allocator;
230
231 // Use the generation counter from the shared globals instead of the local
232 // copy, which won't be initialized yet if __tls_get_addr is called before
233 // libc.so's constructor.
234 if (__get_tcb_dtv(tcb)->generation == atomic_load(&modules.generation)) {
235 return;
236 }
237
238 const size_t old_cnt = __get_tcb_dtv(tcb)->count;
239
240 // If the DTV isn't large enough, allocate a larger one. Because a signal
241 // handler could interrupt the fast path of __tls_get_addr, we don't free the
242 // old DTV. Instead, we add the old DTV to a list, then free all of a thread's
243 // DTVs at thread-exit. Each time the DTV is reallocated, its size at least
244 // doubles.
245 if (modules.module_count > old_cnt) {
246 size_t new_cnt = calculate_new_dtv_count();
247 TlsDtv* const old_dtv = __get_tcb_dtv(tcb);
248 TlsDtv* const new_dtv = static_cast<TlsDtv*>(allocator.alloc(dtv_size_in_bytes(new_cnt)));
249 memcpy(new_dtv, old_dtv, dtv_size_in_bytes(old_cnt));
250 new_dtv->count = new_cnt;
251 new_dtv->next = old_dtv;
252 __set_tcb_dtv(tcb, new_dtv);
253 }
254
255 TlsDtv* const dtv = __get_tcb_dtv(tcb);
256
257 const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
258 char* static_tls = reinterpret_cast<char*>(tcb) - layout.offset_bionic_tcb();
259
260 // Initialize static TLS modules and free unloaded modules.
261 for (size_t i = 0; i < dtv->count; ++i) {
262 if (i < modules.module_count) {
263 const TlsModule& mod = modules.module_table[i];
264 if (mod.static_offset != SIZE_MAX) {
265 dtv->modules[i] = static_tls + mod.static_offset;
266 continue;
267 }
268 if (mod.first_generation != kTlsGenerationNone &&
269 mod.first_generation <= dtv->generation) {
270 continue;
271 }
272 }
273 if (modules.on_destruction_cb != nullptr) {
274 void* dtls_begin = dtv->modules[i];
275 void* dtls_end =
276 static_cast<void*>(static_cast<char*>(dtls_begin) + allocator.get_chunk_size(dtls_begin));
277 modules.on_destruction_cb(dtls_begin, dtls_end);
278 }
279 allocator.free(dtv->modules[i]);
280 dtv->modules[i] = nullptr;
281 }
282
283 dtv->generation = atomic_load(&modules.generation);
284 }
285
tls_get_addr_slow_path(const TlsIndex * ti)286 __attribute__((noinline)) static void* tls_get_addr_slow_path(const TlsIndex* ti) {
287 TlsModules& modules = __libc_shared_globals()->tls_modules;
288 bionic_tcb* tcb = __get_bionic_tcb();
289
290 // Block signals and lock TlsModules. We may need the allocator, so take
291 // a write lock.
292 ScopedSignalBlocker ssb;
293 ScopedWriteLock locker(&modules.rwlock);
294
295 update_tls_dtv(tcb);
296
297 TlsDtv* dtv = __get_tcb_dtv(tcb);
298 const size_t module_idx = __tls_module_id_to_idx(ti->module_id);
299 void* mod_ptr = dtv->modules[module_idx];
300 if (mod_ptr == nullptr) {
301 const TlsSegment& segment = modules.module_table[module_idx].segment;
302 mod_ptr = __libc_shared_globals()->tls_allocator.memalign(segment.alignment, segment.size);
303 if (segment.init_size > 0) {
304 memcpy(mod_ptr, segment.init_ptr, segment.init_size);
305 }
306 dtv->modules[module_idx] = mod_ptr;
307
308 // Reports the allocation to the listener, if any.
309 if (modules.on_creation_cb != nullptr) {
310 modules.on_creation_cb(mod_ptr,
311 static_cast<void*>(static_cast<char*>(mod_ptr) + segment.size));
312 }
313 }
314
315 return static_cast<char*>(mod_ptr) + ti->offset;
316 }
317
318 // Returns the address of a thread's TLS memory given a module ID and an offset
319 // into that module's TLS segment. This function is called on every access to a
320 // dynamic TLS variable on targets that don't use TLSDESC. arm64 uses TLSDESC,
321 // so it only calls this function on a thread's first access to a module's TLS
322 // segment.
323 //
324 // On most targets, this accessor function is __tls_get_addr and
325 // TLS_GET_ADDR_CCONV is unset. 32-bit x86 uses ___tls_get_addr instead and a
326 // regparm() calling convention.
TLS_GET_ADDR(const TlsIndex * ti)327 extern "C" void* TLS_GET_ADDR(const TlsIndex* ti) TLS_GET_ADDR_CCONV {
328 TlsDtv* dtv = __get_tcb_dtv(__get_bionic_tcb());
329
330 // TODO: See if we can use a relaxed memory ordering here instead.
331 size_t generation = atomic_load(&__libc_tls_generation_copy);
332 if (__predict_true(generation == dtv->generation)) {
333 void* mod_ptr = dtv->modules[__tls_module_id_to_idx(ti->module_id)];
334 if (__predict_true(mod_ptr != nullptr)) {
335 return static_cast<char*>(mod_ptr) + ti->offset;
336 }
337 }
338
339 return tls_get_addr_slow_path(ti);
340 }
341
342 // This function frees:
343 // - TLS modules referenced by the current DTV.
344 // - The list of DTV objects associated with the current thread.
345 //
346 // The caller must have already blocked signals.
__free_dynamic_tls(bionic_tcb * tcb)347 void __free_dynamic_tls(bionic_tcb* tcb) {
348 TlsModules& modules = __libc_shared_globals()->tls_modules;
349 BionicAllocator& allocator = __libc_shared_globals()->tls_allocator;
350
351 // If we didn't allocate any dynamic memory, skip out early without taking
352 // the lock.
353 TlsDtv* dtv = __get_tcb_dtv(tcb);
354 if (dtv->generation == kTlsGenerationNone) {
355 return;
356 }
357
358 // We need the write lock to use the allocator.
359 ScopedWriteLock locker(&modules.rwlock);
360
361 // First free everything in the current DTV.
362 for (size_t i = 0; i < dtv->count; ++i) {
363 if (i < modules.module_count && modules.module_table[i].static_offset != SIZE_MAX) {
364 // This module's TLS memory is allocated statically, so don't free it here.
365 continue;
366 }
367
368 if (modules.on_destruction_cb != nullptr) {
369 void* dtls_begin = dtv->modules[i];
370 void* dtls_end =
371 static_cast<void*>(static_cast<char*>(dtls_begin) + allocator.get_chunk_size(dtls_begin));
372 modules.on_destruction_cb(dtls_begin, dtls_end);
373 }
374
375 allocator.free(dtv->modules[i]);
376 }
377
378 // Now free the thread's list of DTVs.
379 while (dtv->generation != kTlsGenerationNone) {
380 TlsDtv* next = dtv->next;
381 allocator.free(dtv);
382 dtv = next;
383 }
384
385 // Clear the DTV slot. The DTV must not be used again with this thread.
386 tcb->tls_slot(TLS_SLOT_DTV) = nullptr;
387 }
388
389 // Invokes all the registered thread_exit callbacks, if any.
__notify_thread_exit_callbacks()390 void __notify_thread_exit_callbacks() {
391 TlsModules& modules = __libc_shared_globals()->tls_modules;
392 if (modules.first_thread_exit_callback == nullptr) {
393 // If there is no first_thread_exit_callback, there shouldn't be a tail.
394 CHECK(modules.thread_exit_callback_tail_node == nullptr);
395 return;
396 }
397
398 // Callbacks are supposed to be invoked in the reverse order
399 // in which they were registered.
400 CallbackHolder* node = modules.thread_exit_callback_tail_node;
401 while (node != nullptr) {
402 node->cb();
403 node = node->prev;
404 }
405 modules.first_thread_exit_callback();
406 }
407