1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <unistd.h>
39
40 #include <new>
41
42 // Private C library headers.
43 #include "private/bionic_tls.h"
44 #include "private/KernelArgumentBlock.h"
45 #include "private/ScopedPthreadMutexLocker.h"
46 #include "private/ScopedFd.h"
47 #include "private/ScopeGuard.h"
48 #include "private/UniquePtr.h"
49
50 #include "linker.h"
51 #include "linker_debug.h"
52 #include "linker_environ.h"
53 #include "linker_phdr.h"
54 #include "linker_allocator.h"
55
56 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
57 *
58 * Do NOT use malloc() and friends or pthread_*() code here.
59 * Don't use printf() either; it's caused mysterious memory
60 * corruption in the past.
61 * The linker runs before we bring up libc and it's easiest
62 * to make sure it does not depend on any complex libc features
63 *
64 * open issues / todo:
65 *
66 * - cleaner error reporting
67 * - after linking, set as much stuff as possible to READONLY
68 * and NOEXEC
69 */
70
71 #if defined(__LP64__)
72 #define SEARCH_NAME(x) x
73 #else
74 // Nvidia drivers are relying on the bug:
75 // http://code.google.com/p/android/issues/detail?id=6670
76 // so we continue to use base-name lookup for lp32
get_base_name(const char * name)77 static const char* get_base_name(const char* name) {
78 const char* bname = strrchr(name, '/');
79 return bname ? bname + 1 : name;
80 }
81 #define SEARCH_NAME(x) get_base_name(x)
82 #endif
83
84 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
85
86 static LinkerAllocator<soinfo> g_soinfo_allocator;
87 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
88
89 static soinfo* solist;
90 static soinfo* sonext;
91 static soinfo* somain; // main process, always the one after libdl_info
92
93 static const char* const kDefaultLdPaths[] = {
94 #if defined(__LP64__)
95 "/vendor/lib64",
96 "/system/lib64",
97 #else
98 "/vendor/lib",
99 "/system/lib",
100 #endif
101 nullptr
102 };
103
104 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
105 #define LDPATH_MAX 8
106
107 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
108 #define LDPRELOAD_MAX 8
109
110 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
111 static const char* g_ld_library_paths[LDPATH_MAX + 1];
112
113 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
114 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
115
116 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
117
118 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
119
120 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd.
121
122 enum RelocationKind {
123 kRelocAbsolute = 0,
124 kRelocRelative,
125 kRelocCopy,
126 kRelocSymbol,
127 kRelocMax
128 };
129
130 #if STATS
131 struct linker_stats_t {
132 int count[kRelocMax];
133 };
134
135 static linker_stats_t linker_stats;
136
count_relocation(RelocationKind kind)137 static void count_relocation(RelocationKind kind) {
138 ++linker_stats.count[kind];
139 }
140 #else
count_relocation(RelocationKind)141 static void count_relocation(RelocationKind) {
142 }
143 #endif
144
145 #if COUNT_PAGES
146 static unsigned bitmask[4096];
147 #if defined(__LP64__)
148 #define MARK(offset) \
149 do { \
150 if ((((offset) >> 12) >> 5) < 4096) \
151 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
152 } while (0)
153 #else
154 #define MARK(offset) \
155 do { \
156 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
157 } while (0)
158 #endif
159 #else
160 #define MARK(x) do {} while (0)
161 #endif
162
163 // You shouldn't try to call memory-allocating functions in the dynamic linker.
164 // Guard against the most obvious ones.
165 #define DISALLOW_ALLOCATION(return_type, name, ...) \
166 return_type name __VA_ARGS__ \
167 { \
168 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \
169 }
170 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
171 DISALLOW_ALLOCATION(void, free, (void* u __unused));
172 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
173 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
174
175 static char __linker_dl_err_buf[768];
176
linker_get_error_buffer()177 char* linker_get_error_buffer() {
178 return &__linker_dl_err_buf[0];
179 }
180
linker_get_error_buffer_size()181 size_t linker_get_error_buffer_size() {
182 return sizeof(__linker_dl_err_buf);
183 }
184
185 // This function is an empty stub where GDB locates a breakpoint to get notified
186 // about linker activity.
187 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
188
189 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
190 static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
191 static link_map* r_debug_tail = 0;
192
insert_soinfo_into_debug_map(soinfo * info)193 static void insert_soinfo_into_debug_map(soinfo* info) {
194 // Copy the necessary fields into the debug structure.
195 link_map* map = &(info->link_map_head);
196 map->l_addr = info->load_bias;
197 map->l_name = reinterpret_cast<char*>(info->name);
198 map->l_ld = info->dynamic;
199
200 // Stick the new library at the end of the list.
201 // gdb tends to care more about libc than it does
202 // about leaf libraries, and ordering it this way
203 // reduces the back-and-forth over the wire.
204 if (r_debug_tail) {
205 r_debug_tail->l_next = map;
206 map->l_prev = r_debug_tail;
207 map->l_next = 0;
208 } else {
209 _r_debug.r_map = map;
210 map->l_prev = 0;
211 map->l_next = 0;
212 }
213 r_debug_tail = map;
214 }
215
remove_soinfo_from_debug_map(soinfo * info)216 static void remove_soinfo_from_debug_map(soinfo* info) {
217 link_map* map = &(info->link_map_head);
218
219 if (r_debug_tail == map) {
220 r_debug_tail = map->l_prev;
221 }
222
223 if (map->l_prev) {
224 map->l_prev->l_next = map->l_next;
225 }
226 if (map->l_next) {
227 map->l_next->l_prev = map->l_prev;
228 }
229 }
230
notify_gdb_of_load(soinfo * info)231 static void notify_gdb_of_load(soinfo* info) {
232 if (info->flags & FLAG_EXE) {
233 // GDB already knows about the main executable
234 return;
235 }
236
237 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
238
239 _r_debug.r_state = r_debug::RT_ADD;
240 rtld_db_dlactivity();
241
242 insert_soinfo_into_debug_map(info);
243
244 _r_debug.r_state = r_debug::RT_CONSISTENT;
245 rtld_db_dlactivity();
246 }
247
notify_gdb_of_unload(soinfo * info)248 static void notify_gdb_of_unload(soinfo* info) {
249 if (info->flags & FLAG_EXE) {
250 // GDB already knows about the main executable
251 return;
252 }
253
254 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
255
256 _r_debug.r_state = r_debug::RT_DELETE;
257 rtld_db_dlactivity();
258
259 remove_soinfo_from_debug_map(info);
260
261 _r_debug.r_state = r_debug::RT_CONSISTENT;
262 rtld_db_dlactivity();
263 }
264
notify_gdb_of_libraries()265 void notify_gdb_of_libraries() {
266 _r_debug.r_state = r_debug::RT_ADD;
267 rtld_db_dlactivity();
268 _r_debug.r_state = r_debug::RT_CONSISTENT;
269 rtld_db_dlactivity();
270 }
271
alloc()272 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
273 return g_soinfo_links_allocator.alloc();
274 }
275
free(LinkedListEntry<soinfo> * entry)276 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
277 g_soinfo_links_allocator.free(entry);
278 }
279
protect_data(int protection)280 static void protect_data(int protection) {
281 g_soinfo_allocator.protect_all(protection);
282 g_soinfo_links_allocator.protect_all(protection);
283 }
284
soinfo_alloc(const char * name,struct stat * file_stat,off64_t file_offset)285 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, off64_t file_offset) {
286 if (strlen(name) >= SOINFO_NAME_LEN) {
287 DL_ERR("library name \"%s\" too long", name);
288 return nullptr;
289 }
290
291 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, file_offset);
292
293 sonext->next = si;
294 sonext = si;
295
296 TRACE("name %s: allocated soinfo @ %p", name, si);
297 return si;
298 }
299
soinfo_free(soinfo * si)300 static void soinfo_free(soinfo* si) {
301 if (si == nullptr) {
302 return;
303 }
304
305 if (si->base != 0 && si->size != 0) {
306 munmap(reinterpret_cast<void*>(si->base), si->size);
307 }
308
309 soinfo *prev = nullptr, *trav;
310
311 TRACE("name %s: freeing soinfo @ %p", si->name, si);
312
313 for (trav = solist; trav != nullptr; trav = trav->next) {
314 if (trav == si) {
315 break;
316 }
317 prev = trav;
318 }
319 if (trav == nullptr) {
320 // si was not in solist
321 DL_ERR("name \"%s\" is not in solist!", si->name);
322 return;
323 }
324
325 // clear links to/from si
326 si->remove_all_links();
327
328 // prev will never be null, because the first entry in solist is
329 // always the static libdl_info.
330 prev->next = si->next;
331 if (si == sonext) {
332 sonext = prev;
333 }
334
335 g_soinfo_allocator.free(si);
336 }
337
338
parse_path(const char * path,const char * delimiters,const char ** array,char * buf,size_t buf_size,size_t max_count)339 static void parse_path(const char* path, const char* delimiters,
340 const char** array, char* buf, size_t buf_size, size_t max_count) {
341 if (path == nullptr) {
342 return;
343 }
344
345 size_t len = strlcpy(buf, path, buf_size);
346
347 size_t i = 0;
348 char* buf_p = buf;
349 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
350 if (*array[i] != '\0') {
351 ++i;
352 }
353 }
354
355 // Forget the last path if we had to truncate; this occurs if the 2nd to
356 // last char isn't '\0' (i.e. wasn't originally a delimiter).
357 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
358 array[i - 1] = nullptr;
359 } else {
360 array[i] = nullptr;
361 }
362 }
363
parse_LD_LIBRARY_PATH(const char * path)364 static void parse_LD_LIBRARY_PATH(const char* path) {
365 parse_path(path, ":", g_ld_library_paths,
366 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
367 }
368
parse_LD_PRELOAD(const char * path)369 static void parse_LD_PRELOAD(const char* path) {
370 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
371 parse_path(path, " :", g_ld_preload_names,
372 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
373 }
374
375 #if defined(__arm__)
376
377 // For a given PC, find the .so that it belongs to.
378 // Returns the base address of the .ARM.exidx section
379 // for that .so, and the number of 8-byte entries
380 // in that section (via *pcount).
381 //
382 // Intended to be called by libc's __gnu_Unwind_Find_exidx().
383 //
384 // This function is exposed via dlfcn.cpp and libdl.so.
dl_unwind_find_exidx(_Unwind_Ptr pc,int * pcount)385 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
386 unsigned addr = (unsigned)pc;
387
388 for (soinfo* si = solist; si != 0; si = si->next) {
389 if ((addr >= si->base) && (addr < (si->base + si->size))) {
390 *pcount = si->ARM_exidx_count;
391 return (_Unwind_Ptr)si->ARM_exidx;
392 }
393 }
394 *pcount = 0;
395 return nullptr;
396 }
397
398 #endif
399
400 // Here, we only have to provide a callback to iterate across all the
401 // loaded libraries. gcc_eh does the rest.
dl_iterate_phdr(int (* cb)(dl_phdr_info * info,size_t size,void * data),void * data)402 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
403 int rv = 0;
404 for (soinfo* si = solist; si != nullptr; si = si->next) {
405 dl_phdr_info dl_info;
406 dl_info.dlpi_addr = si->link_map_head.l_addr;
407 dl_info.dlpi_name = si->link_map_head.l_name;
408 dl_info.dlpi_phdr = si->phdr;
409 dl_info.dlpi_phnum = si->phnum;
410 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
411 if (rv != 0) {
412 break;
413 }
414 }
415 return rv;
416 }
417
ElfW(Sym)418 static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
419 ElfW(Sym)* symtab = si->symtab;
420
421 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
422 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
423
424 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
425 ElfW(Sym)* s = symtab + n;
426 if (strcmp(si->get_string(s->st_name), name)) continue;
427
428 // only concern ourselves with global and weak symbol definitions
429 switch (ELF_ST_BIND(s->st_info)) {
430 case STB_GLOBAL:
431 case STB_WEAK:
432 if (s->st_shndx == SHN_UNDEF) {
433 continue;
434 }
435
436 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
437 name, si->name, reinterpret_cast<void*>(s->st_value),
438 static_cast<size_t>(s->st_size));
439 return s;
440 case STB_LOCAL:
441 continue;
442 default:
443 __libc_fatal("ERROR: Unexpected ST_BIND value: %d for '%s' in '%s'",
444 ELF_ST_BIND(s->st_info), name, si->name);
445 }
446 }
447
448 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd",
449 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
450
451
452 return nullptr;
453 }
454
soinfo(const char * name,const struct stat * file_stat,off64_t file_offset)455 soinfo::soinfo(const char* name, const struct stat* file_stat, off64_t file_offset) {
456 memset(this, 0, sizeof(*this));
457
458 strlcpy(this->name, name, sizeof(this->name));
459 flags = FLAG_NEW_SOINFO;
460 version = SOINFO_VERSION;
461
462 if (file_stat != nullptr) {
463 this->st_dev = file_stat->st_dev;
464 this->st_ino = file_stat->st_ino;
465 this->file_offset = file_offset;
466 }
467 }
468
elfhash(const char * _name)469 static unsigned elfhash(const char* _name) {
470 const unsigned char* name = reinterpret_cast<const unsigned char*>(_name);
471 unsigned h = 0, g;
472
473 while (*name) {
474 h = (h << 4) + *name++;
475 g = h & 0xf0000000;
476 h ^= g;
477 h ^= g >> 24;
478 }
479 return h;
480 }
481
ElfW(Sym)482 static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi) {
483 unsigned elf_hash = elfhash(name);
484 ElfW(Sym)* s = nullptr;
485
486 /* "This element's presence in a shared object library alters the dynamic linker's
487 * symbol resolution algorithm for references within the library. Instead of starting
488 * a symbol search with the executable file, the dynamic linker starts from the shared
489 * object itself. If the shared object fails to supply the referenced symbol, the
490 * dynamic linker then searches the executable file and other shared objects as usual."
491 *
492 * http://www.sco.com/developers/gabi/2012-12-31/ch5.dynamic.html
493 *
494 * Note that this is unlikely since static linker avoids generating
495 * relocations for -Bsymbolic linked dynamic executables.
496 */
497 if (si->has_DT_SYMBOLIC) {
498 DEBUG("%s: looking up %s in local scope (DT_SYMBOLIC)", si->name, name);
499 s = soinfo_elf_lookup(si, elf_hash, name);
500 if (s != nullptr) {
501 *lsi = si;
502 }
503 }
504
505 if (s == nullptr && somain != nullptr) {
506 // 1. Look for it in the main executable unless we already did.
507 if (si != somain || !si->has_DT_SYMBOLIC) {
508 DEBUG("%s: looking up %s in executable %s",
509 si->name, name, somain->name);
510 s = soinfo_elf_lookup(somain, elf_hash, name);
511 if (s != nullptr) {
512 *lsi = somain;
513 }
514 }
515
516 // 2. Look for it in the ld_preloads
517 if (s == nullptr) {
518 for (int i = 0; g_ld_preloads[i] != NULL; i++) {
519 s = soinfo_elf_lookup(g_ld_preloads[i], elf_hash, name);
520 if (s != nullptr) {
521 *lsi = g_ld_preloads[i];
522 break;
523 }
524 }
525 }
526 }
527
528 /* Look for symbols in the local scope (the object who is
529 * searching). This happens with C++ templates on x86 for some
530 * reason.
531 *
532 * Notes on weak symbols:
533 * The ELF specs are ambiguous about treatment of weak definitions in
534 * dynamic linking. Some systems return the first definition found
535 * and some the first non-weak definition. This is system dependent.
536 * Here we return the first definition found for simplicity. */
537
538 if (s == nullptr && !si->has_DT_SYMBOLIC) {
539 DEBUG("%s: looking up %s in local scope", si->name, name);
540 s = soinfo_elf_lookup(si, elf_hash, name);
541 if (s != nullptr) {
542 *lsi = si;
543 }
544 }
545
546 if (s == nullptr) {
547 si->get_children().visit([&](soinfo* child) {
548 DEBUG("%s: looking up %s in %s", si->name, name, child->name);
549 s = soinfo_elf_lookup(child, elf_hash, name);
550 if (s != nullptr) {
551 *lsi = child;
552 return false;
553 }
554 return true;
555 });
556 }
557
558 if (s != nullptr) {
559 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
560 "found in %s, base = %p, load bias = %p",
561 si->name, name, reinterpret_cast<void*>(s->st_value),
562 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base),
563 reinterpret_cast<void*>((*lsi)->load_bias));
564 }
565
566 return s;
567 }
568
569 // Each size has it's own allocator.
570 template<size_t size>
571 class SizeBasedAllocator {
572 public:
alloc()573 static void* alloc() {
574 return allocator_.alloc();
575 }
576
free(void * ptr)577 static void free(void* ptr) {
578 allocator_.free(ptr);
579 }
580
581 private:
582 static LinkerBlockAllocator allocator_;
583 };
584
585 template<size_t size>
586 LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size);
587
588 template<typename T>
589 class TypeBasedAllocator {
590 public:
alloc()591 static T* alloc() {
592 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc());
593 }
594
free(T * ptr)595 static void free(T* ptr) {
596 SizeBasedAllocator<sizeof(T)>::free(ptr);
597 }
598 };
599
600 class LoadTask {
601 public:
602 struct deleter_t {
operator ()LoadTask::deleter_t603 void operator()(LoadTask* t) {
604 TypeBasedAllocator<LoadTask>::free(t);
605 }
606 };
607
608 typedef UniquePtr<LoadTask, deleter_t> unique_ptr;
609
610 static deleter_t deleter;
611
create(const char * name,soinfo * needed_by)612 static LoadTask* create(const char* name, soinfo* needed_by) {
613 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc();
614 return new (ptr) LoadTask(name, needed_by);
615 }
616
get_name() const617 const char* get_name() const {
618 return name_;
619 }
620
get_needed_by() const621 soinfo* get_needed_by() const {
622 return needed_by_;
623 }
624 private:
LoadTask(const char * name,soinfo * needed_by)625 LoadTask(const char* name, soinfo* needed_by)
626 : name_(name), needed_by_(needed_by) {}
627
628 const char* name_;
629 soinfo* needed_by_;
630
631 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask);
632 };
633
634 LoadTask::deleter_t LoadTask::deleter;
635
636 template <typename T>
637 using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>;
638
639 typedef linked_list_t<soinfo> SoinfoLinkedList;
640 typedef linked_list_t<const char> StringLinkedList;
641 typedef linked_list_t<LoadTask> LoadTaskList;
642
643
644 // This is used by dlsym(3). It performs symbol lookup only within the
645 // specified soinfo object and its dependencies in breadth first order.
ElfW(Sym)646 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) {
647 SoinfoLinkedList visit_list;
648 SoinfoLinkedList visited;
649
650 visit_list.push_back(si);
651 soinfo* current_soinfo;
652 while ((current_soinfo = visit_list.pop_front()) != nullptr) {
653 if (visited.contains(current_soinfo)) {
654 continue;
655 }
656
657 ElfW(Sym)* result = soinfo_elf_lookup(current_soinfo, elfhash(name), name);
658
659 if (result != nullptr) {
660 *found = current_soinfo;
661 return result;
662 }
663 visited.push_back(current_soinfo);
664
665 current_soinfo->get_children().for_each([&](soinfo* child) {
666 visit_list.push_back(child);
667 });
668 }
669
670 return nullptr;
671 }
672
673 /* This is used by dlsym(3) to performs a global symbol lookup. If the
674 start value is null (for RTLD_DEFAULT), the search starts at the
675 beginning of the global solist. Otherwise the search starts at the
676 specified soinfo (for RTLD_NEXT).
677 */
ElfW(Sym)678 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
679 unsigned elf_hash = elfhash(name);
680
681 if (start == nullptr) {
682 start = solist;
683 }
684
685 ElfW(Sym)* s = nullptr;
686 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) {
687 s = soinfo_elf_lookup(si, elf_hash, name);
688 if (s != nullptr) {
689 *found = si;
690 break;
691 }
692 }
693
694 if (s != nullptr) {
695 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
696 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
697 }
698
699 return s;
700 }
701
find_containing_library(const void * p)702 soinfo* find_containing_library(const void* p) {
703 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
704 for (soinfo* si = solist; si != nullptr; si = si->next) {
705 if (address >= si->base && address - si->base < si->size) {
706 return si;
707 }
708 }
709 return nullptr;
710 }
711
ElfW(Sym)712 ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
713 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
714
715 // Search the library's symbol table for any defined symbol which
716 // contains this address.
717 for (size_t i = 0; i < si->nchain; ++i) {
718 ElfW(Sym)* sym = &si->symtab[i];
719 if (sym->st_shndx != SHN_UNDEF &&
720 soaddr >= sym->st_value &&
721 soaddr < sym->st_value + sym->st_size) {
722 return sym;
723 }
724 }
725
726 return nullptr;
727 }
728
open_library_on_path(const char * name,const char * const paths[])729 static int open_library_on_path(const char* name, const char* const paths[]) {
730 char buf[512];
731 for (size_t i = 0; paths[i] != nullptr; ++i) {
732 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
733 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
734 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
735 continue;
736 }
737 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
738 if (fd != -1) {
739 return fd;
740 }
741 }
742 return -1;
743 }
744
open_library(const char * name)745 static int open_library(const char* name) {
746 TRACE("[ opening %s ]", name);
747
748 // If the name contains a slash, we should attempt to open it directly and not search the paths.
749 if (strchr(name, '/') != nullptr) {
750 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
751 if (fd != -1) {
752 return fd;
753 }
754 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
755 #if defined(__LP64__)
756 return -1;
757 #endif
758 }
759
760 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
761 int fd = open_library_on_path(name, g_ld_library_paths);
762 if (fd == -1) {
763 fd = open_library_on_path(name, kDefaultLdPaths);
764 }
765 return fd;
766 }
767
768 template<typename F>
for_each_dt_needed(const soinfo * si,F action)769 static void for_each_dt_needed(const soinfo* si, F action) {
770 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
771 if (d->d_tag == DT_NEEDED) {
772 action(si->get_string(d->d_un.d_val));
773 }
774 }
775 }
776
load_library(LoadTaskList & load_tasks,const char * name,int dlflags,const android_dlextinfo * extinfo)777 static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int dlflags, const android_dlextinfo* extinfo) {
778 int fd = -1;
779 off64_t file_offset = 0;
780 ScopedFd file_guard(-1);
781
782 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) {
783 fd = extinfo->library_fd;
784 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
785 file_offset = extinfo->library_fd_offset;
786 }
787 } else {
788 // Open the file.
789 fd = open_library(name);
790 if (fd == -1) {
791 DL_ERR("library \"%s\" not found", name);
792 return nullptr;
793 }
794
795 file_guard.reset(fd);
796 }
797
798 if ((file_offset % PAGE_SIZE) != 0) {
799 DL_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
800 return nullptr;
801 }
802
803 struct stat file_stat;
804 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
805 DL_ERR("unable to stat file for the library \"%s\": %s", name, strerror(errno));
806 return nullptr;
807 }
808
809 // Check for symlink and other situations where
810 // file can have different names.
811 for (soinfo* si = solist; si != nullptr; si = si->next) {
812 if (si->get_st_dev() != 0 &&
813 si->get_st_ino() != 0 &&
814 si->get_st_dev() == file_stat.st_dev &&
815 si->get_st_ino() == file_stat.st_ino &&
816 si->get_file_offset() == file_offset) {
817 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
818 return si;
819 }
820 }
821
822 if ((dlflags & RTLD_NOLOAD) != 0) {
823 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name);
824 return nullptr;
825 }
826
827 // Read the ELF header and load the segments.
828 ElfReader elf_reader(name, fd, file_offset);
829 if (!elf_reader.Load(extinfo)) {
830 return nullptr;
831 }
832
833 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, file_offset);
834 if (si == nullptr) {
835 return nullptr;
836 }
837 si->base = elf_reader.load_start();
838 si->size = elf_reader.load_size();
839 si->load_bias = elf_reader.load_bias();
840 si->phnum = elf_reader.phdr_count();
841 si->phdr = elf_reader.loaded_phdr();
842
843 if (!si->PrelinkImage()) {
844 soinfo_free(si);
845 return nullptr;
846 }
847
848 for_each_dt_needed(si, [&] (const char* name) {
849 load_tasks.push_back(LoadTask::create(name, si));
850 });
851
852 return si;
853 }
854
find_loaded_library_by_name(const char * name)855 static soinfo *find_loaded_library_by_name(const char* name) {
856 const char* search_name = SEARCH_NAME(name);
857 for (soinfo* si = solist; si != nullptr; si = si->next) {
858 if (!strcmp(search_name, si->name)) {
859 return si;
860 }
861 }
862 return nullptr;
863 }
864
find_library_internal(LoadTaskList & load_tasks,const char * name,int dlflags,const android_dlextinfo * extinfo)865 static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int dlflags, const android_dlextinfo* extinfo) {
866
867 soinfo* si = find_loaded_library_by_name(name);
868
869 // Library might still be loaded, the accurate detection
870 // of this fact is done by load_library.
871 if (si == nullptr) {
872 TRACE("[ '%s' has not been found by name. Trying harder...]", name);
873 si = load_library(load_tasks, name, dlflags, extinfo);
874 }
875
876 return si;
877 }
878
879 static void soinfo_unload(soinfo* si);
880
is_recursive(soinfo * si,soinfo * parent)881 static bool is_recursive(soinfo* si, soinfo* parent) {
882 if (parent == nullptr) {
883 return false;
884 }
885
886 if (si == parent) {
887 DL_ERR("recursive link to \"%s\"", si->name);
888 return true;
889 }
890
891 return !parent->get_parents().visit([&](soinfo* grandparent) {
892 return !is_recursive(si, grandparent);
893 });
894 }
895
find_libraries(const char * const library_names[],size_t library_names_size,soinfo * soinfos[],soinfo * ld_preloads[],size_t ld_preloads_size,int dlflags,const android_dlextinfo * extinfo)896 static bool find_libraries(const char* const library_names[], size_t library_names_size, soinfo* soinfos[],
897 soinfo* ld_preloads[], size_t ld_preloads_size, int dlflags, const android_dlextinfo* extinfo) {
898 // Step 0: prepare.
899 LoadTaskList load_tasks;
900 for (size_t i = 0; i < library_names_size; ++i) {
901 const char* name = library_names[i];
902 load_tasks.push_back(LoadTask::create(name, nullptr));
903 }
904
905 // Libraries added to this list in reverse order so that we can
906 // start linking from bottom-up - see step 2.
907 SoinfoLinkedList found_libs;
908 size_t soinfos_size = 0;
909
910 auto failure_guard = make_scope_guard([&]() {
911 // Housekeeping
912 load_tasks.for_each([] (LoadTask* t) {
913 LoadTask::deleter(t);
914 });
915
916 for (size_t i = 0; i<soinfos_size; ++i) {
917 soinfo_unload(soinfos[i]);
918 }
919 });
920
921 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order.
922 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) {
923 soinfo* si = find_library_internal(load_tasks, task->get_name(), dlflags, extinfo);
924 if (si == nullptr) {
925 return false;
926 }
927
928 soinfo* needed_by = task->get_needed_by();
929
930 if (is_recursive(si, needed_by)) {
931 return false;
932 }
933
934 si->ref_count++;
935 if (needed_by != nullptr) {
936 needed_by->add_child(si);
937 }
938 found_libs.push_front(si);
939
940 // When ld_preloads is not null first
941 // ld_preloads_size libs are in fact ld_preloads.
942 if (ld_preloads != nullptr && soinfos_size < ld_preloads_size) {
943 ld_preloads[soinfos_size] = si;
944 }
945
946 if (soinfos_size<library_names_size) {
947 soinfos[soinfos_size++] = si;
948 }
949 }
950
951 // Step 2: link libraries.
952 soinfo* si;
953 while ((si = found_libs.pop_front()) != nullptr) {
954 if ((si->flags & FLAG_LINKED) == 0) {
955 if (!si->LinkImage(extinfo)) {
956 return false;
957 }
958 si->flags |= FLAG_LINKED;
959 }
960 }
961
962 // All is well - found_libs and load_tasks are empty at this point
963 // and all libs are successfully linked.
964 failure_guard.disable();
965 return true;
966 }
967
find_library(const char * name,int dlflags,const android_dlextinfo * extinfo)968 static soinfo* find_library(const char* name, int dlflags, const android_dlextinfo* extinfo) {
969 if (name == nullptr) {
970 somain->ref_count++;
971 return somain;
972 }
973
974 soinfo* si;
975
976 if (!find_libraries(&name, 1, &si, nullptr, 0, dlflags, extinfo)) {
977 return nullptr;
978 }
979
980 return si;
981 }
982
soinfo_unload(soinfo * si)983 static void soinfo_unload(soinfo* si) {
984 if (si->ref_count == 1) {
985 TRACE("unloading '%s'", si->name);
986 si->CallDestructors();
987
988 if (si->has_min_version(0)) {
989 soinfo* child = nullptr;
990 while ((child = si->get_children().pop_front()) != nullptr) {
991 TRACE("%s needs to unload %s", si->name, child->name);
992 soinfo_unload(child);
993 }
994 } else {
995 for_each_dt_needed(si, [&] (const char* library_name) {
996 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name);
997 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr);
998 if (needed != nullptr) {
999 soinfo_unload(needed);
1000 } else {
1001 // Not found: for example if symlink was deleted between dlopen and dlclose
1002 // Since we cannot really handle errors at this point - print and continue.
1003 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name);
1004 }
1005 });
1006 }
1007
1008 notify_gdb_of_unload(si);
1009 si->ref_count = 0;
1010 soinfo_free(si);
1011 } else {
1012 si->ref_count--;
1013 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
1014 }
1015 }
1016
do_android_get_LD_LIBRARY_PATH(char * buffer,size_t buffer_size)1017 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
1018 // Use basic string manipulation calls to avoid snprintf.
1019 // snprintf indirectly calls pthread_getspecific to get the size of a buffer.
1020 // When debug malloc is enabled, this call returns 0. This in turn causes
1021 // snprintf to do nothing, which causes libraries to fail to load.
1022 // See b/17302493 for further details.
1023 // Once the above bug is fixed, this code can be modified to use
1024 // snprintf again.
1025 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2;
1026 if (buffer_size < required_len) {
1027 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu",
1028 buffer_size, required_len);
1029 }
1030 char* end = stpcpy(buffer, kDefaultLdPaths[0]);
1031 *end = ':';
1032 strcpy(end + 1, kDefaultLdPaths[1]);
1033 }
1034
do_android_update_LD_LIBRARY_PATH(const char * ld_library_path)1035 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
1036 if (!get_AT_SECURE()) {
1037 parse_LD_LIBRARY_PATH(ld_library_path);
1038 }
1039 }
1040
do_dlopen(const char * name,int flags,const android_dlextinfo * extinfo)1041 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
1042 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NOLOAD)) != 0) {
1043 DL_ERR("invalid flags to dlopen: %x", flags);
1044 return nullptr;
1045 }
1046 if (extinfo != nullptr) {
1047 if ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0) {
1048 DL_ERR("invalid extended flags to android_dlopen_ext: 0x%" PRIx64, extinfo->flags);
1049 return nullptr;
1050 }
1051 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) == 0 &&
1052 (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
1053 DL_ERR("invalid extended flag combination (ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET without ANDROID_DLEXT_USE_LIBRARY_FD): 0x%" PRIx64, extinfo->flags);
1054 return nullptr;
1055 }
1056 }
1057 protect_data(PROT_READ | PROT_WRITE);
1058 soinfo* si = find_library(name, flags, extinfo);
1059 if (si != nullptr) {
1060 si->CallConstructors();
1061 }
1062 protect_data(PROT_READ);
1063 return si;
1064 }
1065
do_dlclose(soinfo * si)1066 void do_dlclose(soinfo* si) {
1067 protect_data(PROT_READ | PROT_WRITE);
1068 soinfo_unload(si);
1069 protect_data(PROT_READ);
1070 }
1071
call_ifunc_resolver(ElfW (Addr)resolver_addr)1072 static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) {
1073 typedef ElfW(Addr) (*ifunc_resolver_t)(void);
1074 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr);
1075 ElfW(Addr) ifunc_addr = ifunc_resolver();
1076 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr));
1077
1078 return ifunc_addr;
1079 }
1080
1081 #if defined(USE_RELA)
Relocate(ElfW (Rela)* rela,unsigned count)1082 int soinfo::Relocate(ElfW(Rela)* rela, unsigned count) {
1083 for (size_t idx = 0; idx < count; ++idx, ++rela) {
1084 unsigned type = ELFW(R_TYPE)(rela->r_info);
1085 unsigned sym = ELFW(R_SYM)(rela->r_info);
1086 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias);
1087 ElfW(Addr) sym_addr = 0;
1088 const char* sym_name = nullptr;
1089
1090 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1091 if (type == 0) { // R_*_NONE
1092 continue;
1093 }
1094
1095 ElfW(Sym)* s = nullptr;
1096 soinfo* lsi = nullptr;
1097
1098 if (sym != 0) {
1099 sym_name = get_string(symtab[sym].st_name);
1100 s = soinfo_do_lookup(this, sym_name, &lsi);
1101 if (s == nullptr) {
1102 // We only allow an undefined symbol if this is a weak reference...
1103 s = &symtab[sym];
1104 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1105 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1106 return -1;
1107 }
1108
1109 /* IHI0044C AAELF 4.5.1.1:
1110
1111 Libraries are not searched to resolve weak references.
1112 It is not an error for a weak reference to remain unsatisfied.
1113
1114 During linking, the value of an undefined weak reference is:
1115 - Zero if the relocation type is absolute
1116 - The address of the place if the relocation is pc-relative
1117 - The address of nominal base address if the relocation
1118 type is base-relative.
1119 */
1120
1121 switch (type) {
1122 #if defined(__aarch64__)
1123 case R_AARCH64_JUMP_SLOT:
1124 case R_AARCH64_GLOB_DAT:
1125 case R_AARCH64_ABS64:
1126 case R_AARCH64_ABS32:
1127 case R_AARCH64_ABS16:
1128 case R_AARCH64_RELATIVE:
1129 case R_AARCH64_IRELATIVE:
1130 /*
1131 * The sym_addr was initialized to be zero above, or the relocation
1132 * code below does not care about value of sym_addr.
1133 * No need to do anything.
1134 */
1135 break;
1136 #elif defined(__x86_64__)
1137 case R_X86_64_JUMP_SLOT:
1138 case R_X86_64_GLOB_DAT:
1139 case R_X86_64_32:
1140 case R_X86_64_64:
1141 case R_X86_64_RELATIVE:
1142 case R_X86_64_IRELATIVE:
1143 // No need to do anything.
1144 break;
1145 case R_X86_64_PC32:
1146 sym_addr = reloc;
1147 break;
1148 #endif
1149 default:
1150 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
1151 return -1;
1152 }
1153 } else {
1154 // We got a definition.
1155 sym_addr = lsi->resolve_symbol_address(s);
1156 }
1157 count_relocation(kRelocSymbol);
1158 }
1159
1160 switch (type) {
1161 #if defined(__aarch64__)
1162 case R_AARCH64_JUMP_SLOT:
1163 count_relocation(kRelocAbsolute);
1164 MARK(rela->r_offset);
1165 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
1166 reloc, (sym_addr + rela->r_addend), sym_name);
1167 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1168 break;
1169 case R_AARCH64_GLOB_DAT:
1170 count_relocation(kRelocAbsolute);
1171 MARK(rela->r_offset);
1172 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
1173 reloc, (sym_addr + rela->r_addend), sym_name);
1174 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1175 break;
1176 case R_AARCH64_ABS64:
1177 count_relocation(kRelocAbsolute);
1178 MARK(rela->r_offset);
1179 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
1180 reloc, (sym_addr + rela->r_addend), sym_name);
1181 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1182 break;
1183 case R_AARCH64_ABS32:
1184 count_relocation(kRelocAbsolute);
1185 MARK(rela->r_offset);
1186 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
1187 reloc, (sym_addr + rela->r_addend), sym_name);
1188 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1189 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1190 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1191 } else {
1192 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1193 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1194 static_cast<ElfW(Addr)>(INT32_MIN),
1195 static_cast<ElfW(Addr)>(UINT32_MAX));
1196 return -1;
1197 }
1198 break;
1199 case R_AARCH64_ABS16:
1200 count_relocation(kRelocAbsolute);
1201 MARK(rela->r_offset);
1202 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
1203 reloc, (sym_addr + rela->r_addend), sym_name);
1204 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1205 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1206 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1207 } else {
1208 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1209 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1210 static_cast<ElfW(Addr)>(INT16_MIN),
1211 static_cast<ElfW(Addr)>(UINT16_MAX));
1212 return -1;
1213 }
1214 break;
1215 case R_AARCH64_PREL64:
1216 count_relocation(kRelocRelative);
1217 MARK(rela->r_offset);
1218 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
1219 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1220 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
1221 break;
1222 case R_AARCH64_PREL32:
1223 count_relocation(kRelocRelative);
1224 MARK(rela->r_offset);
1225 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
1226 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1227 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1228 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1229 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1230 } else {
1231 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1232 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1233 static_cast<ElfW(Addr)>(INT32_MIN),
1234 static_cast<ElfW(Addr)>(UINT32_MAX));
1235 return -1;
1236 }
1237 break;
1238 case R_AARCH64_PREL16:
1239 count_relocation(kRelocRelative);
1240 MARK(rela->r_offset);
1241 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
1242 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1243 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1244 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1245 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1246 } else {
1247 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1248 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1249 static_cast<ElfW(Addr)>(INT16_MIN),
1250 static_cast<ElfW(Addr)>(UINT16_MAX));
1251 return -1;
1252 }
1253 break;
1254
1255 case R_AARCH64_RELATIVE:
1256 count_relocation(kRelocRelative);
1257 MARK(rela->r_offset);
1258 if (sym) {
1259 DL_ERR("odd RELATIVE form...");
1260 return -1;
1261 }
1262 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1263 reloc, (base + rela->r_addend));
1264 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend);
1265 break;
1266
1267 case R_AARCH64_IRELATIVE:
1268 count_relocation(kRelocRelative);
1269 MARK(rela->r_offset);
1270 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1271 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1272 break;
1273
1274 case R_AARCH64_COPY:
1275 /*
1276 * ET_EXEC is not supported so this should not happen.
1277 *
1278 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1279 *
1280 * Section 4.7.1.10 "Dynamic relocations"
1281 * R_AARCH64_COPY may only appear in executable objects where e_type is
1282 * set to ET_EXEC.
1283 */
1284 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name);
1285 return -1;
1286 case R_AARCH64_TLS_TPREL64:
1287 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1288 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1289 break;
1290 case R_AARCH64_TLS_DTPREL32:
1291 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1292 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1293 break;
1294 #elif defined(__x86_64__)
1295 case R_X86_64_JUMP_SLOT:
1296 count_relocation(kRelocAbsolute);
1297 MARK(rela->r_offset);
1298 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1299 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1300 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1301 break;
1302 case R_X86_64_GLOB_DAT:
1303 count_relocation(kRelocAbsolute);
1304 MARK(rela->r_offset);
1305 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1306 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1307 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1308 break;
1309 case R_X86_64_RELATIVE:
1310 count_relocation(kRelocRelative);
1311 MARK(rela->r_offset);
1312 if (sym) {
1313 DL_ERR("odd RELATIVE form...");
1314 return -1;
1315 }
1316 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1317 static_cast<size_t>(base));
1318 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend;
1319 break;
1320 case R_X86_64_IRELATIVE:
1321 count_relocation(kRelocRelative);
1322 MARK(rela->r_offset);
1323 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1324 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1325 break;
1326 case R_X86_64_32:
1327 count_relocation(kRelocRelative);
1328 MARK(rela->r_offset);
1329 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1330 static_cast<size_t>(sym_addr), sym_name);
1331 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1332 break;
1333 case R_X86_64_64:
1334 count_relocation(kRelocRelative);
1335 MARK(rela->r_offset);
1336 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1337 static_cast<size_t>(sym_addr), sym_name);
1338 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1339 break;
1340 case R_X86_64_PC32:
1341 count_relocation(kRelocRelative);
1342 MARK(rela->r_offset);
1343 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1344 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1345 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1346 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1347 break;
1348 #endif
1349
1350 default:
1351 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1352 return -1;
1353 }
1354 }
1355 return 0;
1356 }
1357
1358 #else // REL, not RELA.
Relocate(ElfW (Rel)* rel,unsigned count)1359 int soinfo::Relocate(ElfW(Rel)* rel, unsigned count) {
1360 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1361 unsigned type = ELFW(R_TYPE)(rel->r_info);
1362 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1363 unsigned sym = ELFW(R_SYM)(rel->r_info);
1364 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias);
1365 ElfW(Addr) sym_addr = 0;
1366 const char* sym_name = nullptr;
1367
1368 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1369 if (type == 0) { // R_*_NONE
1370 continue;
1371 }
1372
1373 ElfW(Sym)* s = nullptr;
1374 soinfo* lsi = nullptr;
1375
1376 if (sym != 0) {
1377 sym_name = get_string(symtab[sym].st_name);
1378 s = soinfo_do_lookup(this, sym_name, &lsi);
1379 if (s == nullptr) {
1380 // We only allow an undefined symbol if this is a weak reference...
1381 s = &symtab[sym];
1382 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1383 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1384 return -1;
1385 }
1386
1387 /* IHI0044C AAELF 4.5.1.1:
1388
1389 Libraries are not searched to resolve weak references.
1390 It is not an error for a weak reference to remain
1391 unsatisfied.
1392
1393 During linking, the value of an undefined weak reference is:
1394 - Zero if the relocation type is absolute
1395 - The address of the place if the relocation is pc-relative
1396 - The address of nominal base address if the relocation
1397 type is base-relative.
1398 */
1399
1400 switch (type) {
1401 #if defined(__arm__)
1402 case R_ARM_JUMP_SLOT:
1403 case R_ARM_GLOB_DAT:
1404 case R_ARM_ABS32:
1405 case R_ARM_RELATIVE: /* Don't care. */
1406 // sym_addr was initialized to be zero above or relocation
1407 // code below does not care about value of sym_addr.
1408 // No need to do anything.
1409 break;
1410 #elif defined(__i386__)
1411 case R_386_JMP_SLOT:
1412 case R_386_GLOB_DAT:
1413 case R_386_32:
1414 case R_386_RELATIVE: /* Don't care. */
1415 case R_386_IRELATIVE:
1416 // sym_addr was initialized to be zero above or relocation
1417 // code below does not care about value of sym_addr.
1418 // No need to do anything.
1419 break;
1420 case R_386_PC32:
1421 sym_addr = reloc;
1422 break;
1423 #endif
1424
1425 #if defined(__arm__)
1426 case R_ARM_COPY:
1427 // Fall through. Can't really copy if weak symbol is not found at run-time.
1428 #endif
1429 default:
1430 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1431 return -1;
1432 }
1433 } else {
1434 // We got a definition.
1435 sym_addr = lsi->resolve_symbol_address(s);
1436 }
1437 count_relocation(kRelocSymbol);
1438 }
1439
1440 switch (type) {
1441 #if defined(__arm__)
1442 case R_ARM_JUMP_SLOT:
1443 count_relocation(kRelocAbsolute);
1444 MARK(rel->r_offset);
1445 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1446 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1447 break;
1448 case R_ARM_GLOB_DAT:
1449 count_relocation(kRelocAbsolute);
1450 MARK(rel->r_offset);
1451 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1452 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1453 break;
1454 case R_ARM_ABS32:
1455 count_relocation(kRelocAbsolute);
1456 MARK(rel->r_offset);
1457 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1458 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1459 break;
1460 case R_ARM_REL32:
1461 count_relocation(kRelocRelative);
1462 MARK(rel->r_offset);
1463 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1464 reloc, sym_addr, rel->r_offset, sym_name);
1465 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1466 break;
1467 case R_ARM_COPY:
1468 /*
1469 * ET_EXEC is not supported so this should not happen.
1470 *
1471 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1472 *
1473 * Section 4.7.1.10 "Dynamic relocations"
1474 * R_ARM_COPY may only appear in executable objects where e_type is
1475 * set to ET_EXEC.
1476 */
1477 DL_ERR("%s R_ARM_COPY relocations are not supported", name);
1478 return -1;
1479 #elif defined(__i386__)
1480 case R_386_JMP_SLOT:
1481 count_relocation(kRelocAbsolute);
1482 MARK(rel->r_offset);
1483 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1484 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1485 break;
1486 case R_386_GLOB_DAT:
1487 count_relocation(kRelocAbsolute);
1488 MARK(rel->r_offset);
1489 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1490 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1491 break;
1492 case R_386_32:
1493 count_relocation(kRelocRelative);
1494 MARK(rel->r_offset);
1495 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1496 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1497 break;
1498 case R_386_PC32:
1499 count_relocation(kRelocRelative);
1500 MARK(rel->r_offset);
1501 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1502 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1503 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1504 break;
1505 #elif defined(__mips__)
1506 case R_MIPS_REL32:
1507 #if defined(__LP64__)
1508 // MIPS Elf64_Rel entries contain compound relocations
1509 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1510 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1511 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1512 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1513 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1514 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1515 return -1;
1516 }
1517 #endif
1518 count_relocation(kRelocAbsolute);
1519 MARK(rel->r_offset);
1520 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1521 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1522 if (s) {
1523 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1524 } else {
1525 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1526 }
1527 break;
1528 #endif
1529
1530 #if defined(__arm__)
1531 case R_ARM_RELATIVE:
1532 #elif defined(__i386__)
1533 case R_386_RELATIVE:
1534 #endif
1535 count_relocation(kRelocRelative);
1536 MARK(rel->r_offset);
1537 if (sym) {
1538 DL_ERR("odd RELATIVE form...");
1539 return -1;
1540 }
1541 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1542 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1543 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1544 break;
1545 #if defined(__i386__)
1546 case R_386_IRELATIVE:
1547 count_relocation(kRelocRelative);
1548 MARK(rel->r_offset);
1549 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1550 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc));
1551 break;
1552 #endif
1553
1554 default:
1555 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1556 return -1;
1557 }
1558 }
1559 return 0;
1560 }
1561 #endif
1562
1563 #if defined(__mips__)
mips_relocate_got(soinfo * si)1564 static bool mips_relocate_got(soinfo* si) {
1565 ElfW(Addr)** got = si->plt_got;
1566 if (got == nullptr) {
1567 return true;
1568 }
1569 unsigned local_gotno = si->mips_local_gotno;
1570 unsigned gotsym = si->mips_gotsym;
1571 unsigned symtabno = si->mips_symtabno;
1572 ElfW(Sym)* symtab = si->symtab;
1573
1574 // got[0] is the address of the lazy resolver function.
1575 // got[1] may be used for a GNU extension.
1576 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1577 // FIXME: maybe this should be in a separate routine?
1578 if ((si->flags & FLAG_LINKER) == 0) {
1579 size_t g = 0;
1580 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1581 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1582 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1583 }
1584 // Relocate the local GOT entries.
1585 for (; g < local_gotno; g++) {
1586 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias);
1587 }
1588 }
1589
1590 // Now for the global GOT entries...
1591 ElfW(Sym)* sym = symtab + gotsym;
1592 got = si->plt_got + local_gotno;
1593 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
1594 // This is an undefined reference... try to locate it.
1595 const char* sym_name = si->get_string(sym->st_name);
1596 soinfo* lsi = nullptr;
1597 ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi);
1598 if (s == nullptr) {
1599 // We only allow an undefined symbol if this is a weak reference.
1600 s = &symtab[g];
1601 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1602 DL_ERR("cannot locate \"%s\"...", sym_name);
1603 return false;
1604 }
1605 *got = 0;
1606 } else {
1607 // FIXME: is this sufficient?
1608 // For reference see NetBSD link loader
1609 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1610 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s));
1611 }
1612 }
1613 return true;
1614 }
1615 #endif
1616
CallArray(const char * array_name __unused,linker_function_t * functions,size_t count,bool reverse)1617 void soinfo::CallArray(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1618 if (functions == nullptr) {
1619 return;
1620 }
1621
1622 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1623
1624 int begin = reverse ? (count - 1) : 0;
1625 int end = reverse ? -1 : count;
1626 int step = reverse ? -1 : 1;
1627
1628 for (int i = begin; i != end; i += step) {
1629 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1630 CallFunction("function", functions[i]);
1631 }
1632
1633 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1634 }
1635
CallFunction(const char * function_name __unused,linker_function_t function)1636 void soinfo::CallFunction(const char* function_name __unused, linker_function_t function) {
1637 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1638 return;
1639 }
1640
1641 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1642 function();
1643 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1644
1645 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1646 // are still writable. This happens with our debug malloc (see http://b/7941716).
1647 protect_data(PROT_READ | PROT_WRITE);
1648 }
1649
CallPreInitConstructors()1650 void soinfo::CallPreInitConstructors() {
1651 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1652 // but ignored in a shared library.
1653 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false);
1654 }
1655
CallConstructors()1656 void soinfo::CallConstructors() {
1657 if (constructors_called) {
1658 return;
1659 }
1660
1661 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1662 // protect against recursive constructor calls. One simple example of constructor recursion
1663 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1664 // 1. The program depends on libc, so libc's constructor is called here.
1665 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1666 // 3. dlopen() calls the constructors on the newly created
1667 // soinfo for libc_malloc_debug_leak.so.
1668 // 4. The debug .so depends on libc, so CallConstructors is
1669 // called again with the libc soinfo. If it doesn't trigger the early-
1670 // out above, the libc constructor will be called again (recursively!).
1671 constructors_called = true;
1672
1673 if ((flags & FLAG_EXE) == 0 && preinit_array != nullptr) {
1674 // The GNU dynamic linker silently ignores these, but we warn the developer.
1675 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1676 name, preinit_array_count);
1677 }
1678
1679 get_children().for_each([] (soinfo* si) {
1680 si->CallConstructors();
1681 });
1682
1683 TRACE("\"%s\": calling constructors", name);
1684
1685 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1686 CallFunction("DT_INIT", init_func);
1687 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false);
1688 }
1689
CallDestructors()1690 void soinfo::CallDestructors() {
1691 if (!constructors_called) {
1692 return;
1693 }
1694 TRACE("\"%s\": calling destructors", name);
1695
1696 // DT_FINI_ARRAY must be parsed in reverse order.
1697 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true);
1698
1699 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1700 CallFunction("DT_FINI", fini_func);
1701
1702 // This is needed on second call to dlopen
1703 // after library has been unloaded with RTLD_NODELETE
1704 constructors_called = false;
1705 }
1706
add_child(soinfo * child)1707 void soinfo::add_child(soinfo* child) {
1708 if (has_min_version(0)) {
1709 child->parents.push_back(this);
1710 this->children.push_back(child);
1711 }
1712 }
1713
remove_all_links()1714 void soinfo::remove_all_links() {
1715 if (!has_min_version(0)) {
1716 return;
1717 }
1718
1719 // 1. Untie connected soinfos from 'this'.
1720 children.for_each([&] (soinfo* child) {
1721 child->parents.remove_if([&] (const soinfo* parent) {
1722 return parent == this;
1723 });
1724 });
1725
1726 parents.for_each([&] (soinfo* parent) {
1727 parent->children.remove_if([&] (const soinfo* child) {
1728 return child == this;
1729 });
1730 });
1731
1732 // 2. Once everything untied - clear local lists.
1733 parents.clear();
1734 children.clear();
1735 }
1736
get_st_dev()1737 dev_t soinfo::get_st_dev() {
1738 if (has_min_version(0)) {
1739 return st_dev;
1740 }
1741
1742 return 0;
1743 };
1744
get_st_ino()1745 ino_t soinfo::get_st_ino() {
1746 if (has_min_version(0)) {
1747 return st_ino;
1748 }
1749
1750 return 0;
1751 }
1752
get_file_offset()1753 off64_t soinfo::get_file_offset() {
1754 if (has_min_version(1)) {
1755 return file_offset;
1756 }
1757
1758 return 0;
1759 }
1760
1761 // This is a return on get_children()/get_parents() if
1762 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1763 static soinfo::soinfo_list_t g_empty_list;
1764
get_children()1765 soinfo::soinfo_list_t& soinfo::get_children() {
1766 if (has_min_version(0)) {
1767 return this->children;
1768 }
1769
1770 return g_empty_list;
1771 }
1772
get_parents()1773 soinfo::soinfo_list_t& soinfo::get_parents() {
1774 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1775 return g_empty_list;
1776 }
1777
1778 return this->parents;
1779 }
1780
ElfW(Addr)1781 ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) {
1782 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
1783 return call_ifunc_resolver(s->st_value + load_bias);
1784 }
1785
1786 return static_cast<ElfW(Addr)>(s->st_value + load_bias);
1787 }
1788
get_string(ElfW (Word)index) const1789 const char* soinfo::get_string(ElfW(Word) index) const {
1790 if (has_min_version(1) && (index >= strtab_size)) {
1791 __libc_fatal("%s: strtab out of bounds error; STRSZ=%zd, name=%d", name, strtab_size, index);
1792 }
1793
1794 return strtab + index;
1795 }
1796
1797 /* Force any of the closed stdin, stdout and stderr to be associated with
1798 /dev/null. */
nullify_closed_stdio()1799 static int nullify_closed_stdio() {
1800 int dev_null, i, status;
1801 int return_value = 0;
1802
1803 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
1804 if (dev_null < 0) {
1805 DL_ERR("cannot open /dev/null: %s", strerror(errno));
1806 return -1;
1807 }
1808 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
1809
1810 /* If any of the stdio file descriptors is valid and not associated
1811 with /dev/null, dup /dev/null to it. */
1812 for (i = 0; i < 3; i++) {
1813 /* If it is /dev/null already, we are done. */
1814 if (i == dev_null) {
1815 continue;
1816 }
1817
1818 TRACE("[ Nullifying stdio file descriptor %d]", i);
1819 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
1820
1821 /* If file is opened, we are good. */
1822 if (status != -1) {
1823 continue;
1824 }
1825
1826 /* The only error we allow is that the file descriptor does not
1827 exist, in which case we dup /dev/null to it. */
1828 if (errno != EBADF) {
1829 DL_ERR("fcntl failed: %s", strerror(errno));
1830 return_value = -1;
1831 continue;
1832 }
1833
1834 /* Try dupping /dev/null to this stdio file descriptor and
1835 repeat if there is a signal. Note that any errors in closing
1836 the stdio descriptor are lost. */
1837 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
1838 if (status < 0) {
1839 DL_ERR("dup2 failed: %s", strerror(errno));
1840 return_value = -1;
1841 continue;
1842 }
1843 }
1844
1845 /* If /dev/null is not one of the stdio file descriptors, close it. */
1846 if (dev_null > 2) {
1847 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
1848 status = TEMP_FAILURE_RETRY(close(dev_null));
1849 if (status == -1) {
1850 DL_ERR("close failed: %s", strerror(errno));
1851 return_value = -1;
1852 }
1853 }
1854
1855 return return_value;
1856 }
1857
PrelinkImage()1858 bool soinfo::PrelinkImage() {
1859 /* Extract dynamic section */
1860 ElfW(Word) dynamic_flags = 0;
1861 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags);
1862
1863 /* We can't log anything until the linker is relocated */
1864 bool relocating_linker = (flags & FLAG_LINKER) != 0;
1865 if (!relocating_linker) {
1866 INFO("[ linking %s ]", name);
1867 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags);
1868 }
1869
1870 if (dynamic == nullptr) {
1871 if (!relocating_linker) {
1872 DL_ERR("missing PT_DYNAMIC in \"%s\"", name);
1873 }
1874 return false;
1875 } else {
1876 if (!relocating_linker) {
1877 DEBUG("dynamic = %p", dynamic);
1878 }
1879 }
1880
1881 #if defined(__arm__)
1882 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias,
1883 &ARM_exidx, &ARM_exidx_count);
1884 #endif
1885
1886 // Extract useful information from dynamic section.
1887 uint32_t needed_count = 0;
1888 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
1889 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
1890 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1891 switch (d->d_tag) {
1892 case DT_SONAME:
1893 // TODO: glibc dynamic linker uses this name for
1894 // initial library lookup; consider doing the same here.
1895 break;
1896
1897 case DT_HASH:
1898 nbucket = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
1899 nchain = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
1900 bucket = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8);
1901 chain = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket * 4);
1902 break;
1903
1904 case DT_STRTAB:
1905 strtab = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr);
1906 break;
1907
1908 case DT_STRSZ:
1909 strtab_size = d->d_un.d_val;
1910 break;
1911
1912 case DT_SYMTAB:
1913 symtab = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr);
1914 break;
1915
1916 case DT_SYMENT:
1917 if (d->d_un.d_val != sizeof(ElfW(Sym))) {
1918 DL_ERR("invalid DT_SYMENT: %zd", static_cast<size_t>(d->d_un.d_val));
1919 return false;
1920 }
1921 break;
1922
1923 case DT_PLTREL:
1924 #if defined(USE_RELA)
1925 if (d->d_un.d_val != DT_RELA) {
1926 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_RELA", name);
1927 return false;
1928 }
1929 #else
1930 if (d->d_un.d_val != DT_REL) {
1931 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_REL", name);
1932 return false;
1933 }
1934 #endif
1935 break;
1936
1937 case DT_JMPREL:
1938 #if defined(USE_RELA)
1939 plt_rela = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
1940 #else
1941 plt_rel = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
1942 #endif
1943 break;
1944
1945 case DT_PLTRELSZ:
1946 #if defined(USE_RELA)
1947 plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1948 #else
1949 plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1950 #endif
1951 break;
1952
1953 case DT_PLTGOT:
1954 #if defined(__mips__)
1955 // Used by mips and mips64.
1956 plt_got = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr);
1957 #endif
1958 // Ignore for other platforms... (because RTLD_LAZY is not supported)
1959 break;
1960
1961 case DT_DEBUG:
1962 // Set the DT_DEBUG entry to the address of _r_debug for GDB
1963 // if the dynamic table is writable
1964 // FIXME: not working currently for N64
1965 // The flags for the LOAD and DYNAMIC program headers do not agree.
1966 // The LOAD section containing the dynamic table has been mapped as
1967 // read-only, but the DYNAMIC header claims it is writable.
1968 #if !(defined(__mips__) && defined(__LP64__))
1969 if ((dynamic_flags & PF_W) != 0) {
1970 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
1971 }
1972 break;
1973 #endif
1974 #if defined(USE_RELA)
1975 case DT_RELA:
1976 rela = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
1977 break;
1978
1979 case DT_RELASZ:
1980 rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1981 break;
1982
1983 case DT_RELAENT:
1984 if (d->d_un.d_val != sizeof(ElfW(Rela))) {
1985 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val));
1986 return false;
1987 }
1988 break;
1989
1990 // ignored (see DT_RELCOUNT comments for details)
1991 case DT_RELACOUNT:
1992 break;
1993
1994 case DT_REL:
1995 DL_ERR("unsupported DT_REL in \"%s\"", name);
1996 return false;
1997
1998 case DT_RELSZ:
1999 DL_ERR("unsupported DT_RELSZ in \"%s\"", name);
2000 return false;
2001 #else
2002 case DT_REL:
2003 rel = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2004 break;
2005
2006 case DT_RELSZ:
2007 rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
2008 break;
2009
2010 case DT_RELENT:
2011 if (d->d_un.d_val != sizeof(ElfW(Rel))) {
2012 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val));
2013 return false;
2014 }
2015 break;
2016
2017 // "Indicates that all RELATIVE relocations have been concatenated together,
2018 // and specifies the RELATIVE relocation count."
2019 //
2020 // TODO: Spec also mentions that this can be used to optimize relocation process;
2021 // Not currently used by bionic linker - ignored.
2022 case DT_RELCOUNT:
2023 break;
2024 case DT_RELA:
2025 DL_ERR("unsupported DT_RELA in \"%s\"", name);
2026 return false;
2027 #endif
2028 case DT_INIT:
2029 init_func = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2030 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func);
2031 break;
2032
2033 case DT_FINI:
2034 fini_func = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2035 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func);
2036 break;
2037
2038 case DT_INIT_ARRAY:
2039 init_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2040 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array);
2041 break;
2042
2043 case DT_INIT_ARRAYSZ:
2044 init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2045 break;
2046
2047 case DT_FINI_ARRAY:
2048 fini_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2049 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array);
2050 break;
2051
2052 case DT_FINI_ARRAYSZ:
2053 fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2054 break;
2055
2056 case DT_PREINIT_ARRAY:
2057 preinit_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2058 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array);
2059 break;
2060
2061 case DT_PREINIT_ARRAYSZ:
2062 preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2063 break;
2064
2065 case DT_TEXTREL:
2066 #if defined(__LP64__)
2067 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2068 return false;
2069 #else
2070 has_text_relocations = true;
2071 break;
2072 #endif
2073
2074 case DT_SYMBOLIC:
2075 has_DT_SYMBOLIC = true;
2076 break;
2077
2078 case DT_NEEDED:
2079 ++needed_count;
2080 break;
2081
2082 case DT_FLAGS:
2083 if (d->d_un.d_val & DF_TEXTREL) {
2084 #if defined(__LP64__)
2085 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2086 return false;
2087 #else
2088 has_text_relocations = true;
2089 #endif
2090 }
2091 if (d->d_un.d_val & DF_SYMBOLIC) {
2092 has_DT_SYMBOLIC = true;
2093 }
2094 break;
2095
2096 case DT_FLAGS_1:
2097 if ((d->d_un.d_val & DF_1_GLOBAL) != 0) {
2098 rtld_flags |= RTLD_GLOBAL;
2099 }
2100 // TODO: Implement other flags
2101
2102 if ((d->d_un.d_val & ~(DF_1_NOW | DF_1_GLOBAL)) != 0) {
2103 DL_WARN("Unsupported flags DT_FLAGS_1=%p", reinterpret_cast<void*>(d->d_un.d_val));
2104 }
2105 break;
2106 #if defined(__mips__)
2107 case DT_MIPS_RLD_MAP:
2108 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
2109 {
2110 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr);
2111 *dp = &_r_debug;
2112 }
2113 break;
2114
2115 case DT_MIPS_RLD_VERSION:
2116 case DT_MIPS_FLAGS:
2117 case DT_MIPS_BASE_ADDRESS:
2118 case DT_MIPS_UNREFEXTNO:
2119 break;
2120
2121 case DT_MIPS_SYMTABNO:
2122 mips_symtabno = d->d_un.d_val;
2123 break;
2124
2125 case DT_MIPS_LOCAL_GOTNO:
2126 mips_local_gotno = d->d_un.d_val;
2127 break;
2128
2129 case DT_MIPS_GOTSYM:
2130 mips_gotsym = d->d_un.d_val;
2131 break;
2132 #endif
2133 // Ignored: "Its use has been superseded by the DF_BIND_NOW flag"
2134 case DT_BIND_NOW:
2135 break;
2136
2137 // Ignore: bionic does not support symbol versioning...
2138 case DT_VERSYM:
2139 case DT_VERDEF:
2140 case DT_VERDEFNUM:
2141 break;
2142
2143 default:
2144 if (!relocating_linker) {
2145 DL_WARN("%s: unused DT entry: type %p arg %p", name,
2146 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2147 }
2148 break;
2149 }
2150 }
2151
2152 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
2153 reinterpret_cast<void*>(base), strtab, symtab);
2154
2155 // Sanity checks.
2156 if (relocating_linker && needed_count != 0) {
2157 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
2158 return false;
2159 }
2160 if (nbucket == 0) {
2161 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", name);
2162 return false;
2163 }
2164 if (strtab == 0) {
2165 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name);
2166 return false;
2167 }
2168 if (symtab == 0) {
2169 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name);
2170 return false;
2171 }
2172 return true;
2173 }
2174
LinkImage(const android_dlextinfo * extinfo)2175 bool soinfo::LinkImage(const android_dlextinfo* extinfo) {
2176
2177 #if !defined(__LP64__)
2178 if (has_text_relocations) {
2179 // Make segments writable to allow text relocations to work properly. We will later call
2180 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
2181 DL_WARN("%s has text relocations. This is wasting memory and prevents "
2182 "security hardening. Please fix.", name);
2183 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
2184 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
2185 name, strerror(errno));
2186 return false;
2187 }
2188 }
2189 #endif
2190
2191 #if defined(USE_RELA)
2192 if (rela != nullptr) {
2193 DEBUG("[ relocating %s ]", name);
2194 if (Relocate(rela, rela_count)) {
2195 return false;
2196 }
2197 }
2198 if (plt_rela != nullptr) {
2199 DEBUG("[ relocating %s plt ]", name);
2200 if (Relocate(plt_rela, plt_rela_count)) {
2201 return false;
2202 }
2203 }
2204 #else
2205 if (rel != nullptr) {
2206 DEBUG("[ relocating %s ]", name);
2207 if (Relocate(rel, rel_count)) {
2208 return false;
2209 }
2210 }
2211 if (plt_rel != nullptr) {
2212 DEBUG("[ relocating %s plt ]", name);
2213 if (Relocate(plt_rel, plt_rel_count)) {
2214 return false;
2215 }
2216 }
2217 #endif
2218
2219 #if defined(__mips__)
2220 if (!mips_relocate_got(this)) {
2221 return false;
2222 }
2223 #endif
2224
2225 DEBUG("[ finished linking %s ]", name);
2226
2227 #if !defined(__LP64__)
2228 if (has_text_relocations) {
2229 // All relocations are done, we can protect our segments back to read-only.
2230 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
2231 DL_ERR("can't protect segments for \"%s\": %s",
2232 name, strerror(errno));
2233 return false;
2234 }
2235 }
2236 #endif
2237
2238 /* We can also turn on GNU RELRO protection */
2239 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
2240 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
2241 name, strerror(errno));
2242 return false;
2243 }
2244
2245 /* Handle serializing/sharing the RELRO segment */
2246 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
2247 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
2248 extinfo->relro_fd) < 0) {
2249 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
2250 name, strerror(errno));
2251 return false;
2252 }
2253 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
2254 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias,
2255 extinfo->relro_fd) < 0) {
2256 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
2257 name, strerror(errno));
2258 return false;
2259 }
2260 }
2261
2262 notify_gdb_of_load(this);
2263 return true;
2264 }
2265
2266 /*
2267 * This function add vdso to internal dso list.
2268 * It helps to stack unwinding through signal handlers.
2269 * Also, it makes bionic more like glibc.
2270 */
add_vdso(KernelArgumentBlock & args __unused)2271 static void add_vdso(KernelArgumentBlock& args __unused) {
2272 #if defined(AT_SYSINFO_EHDR)
2273 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
2274 if (ehdr_vdso == nullptr) {
2275 return;
2276 }
2277
2278 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0);
2279
2280 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
2281 si->phnum = ehdr_vdso->e_phnum;
2282 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
2283 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2284 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
2285
2286 si->PrelinkImage();
2287 si->LinkImage(nullptr);
2288 #endif
2289 }
2290
2291 /*
2292 * This is linker soinfo for GDB. See details below.
2293 */
2294 #if defined(__LP64__)
2295 #define LINKER_PATH "/system/bin/linker64"
2296 #else
2297 #define LINKER_PATH "/system/bin/linker"
2298 #endif
2299 static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0);
2300
2301 /* gdb expects the linker to be in the debug shared object list.
2302 * Without this, gdb has trouble locating the linker's ".text"
2303 * and ".plt" sections. Gdb could also potentially use this to
2304 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2305 * Don't use soinfo_alloc(), because the linker shouldn't
2306 * be on the soinfo list.
2307 */
init_linker_info_for_gdb(ElfW (Addr)linker_base)2308 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
2309 linker_soinfo_for_gdb.base = linker_base;
2310
2311 /*
2312 * Set the dynamic field in the link map otherwise gdb will complain with
2313 * the following:
2314 * warning: .dynamic section for "/system/bin/linker" is not at the
2315 * expected address (wrong library or version mismatch?)
2316 */
2317 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2318 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2319 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2320 &linker_soinfo_for_gdb.dynamic, nullptr);
2321 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
2322 }
2323
2324 /*
2325 * This code is called after the linker has linked itself and
2326 * fixed it's own GOT. It is safe to make references to externs
2327 * and other non-local data at this point.
2328 */
__linker_init_post_relocation(KernelArgumentBlock & args,ElfW (Addr)linker_base)2329 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
2330 #if TIMING
2331 struct timeval t0, t1;
2332 gettimeofday(&t0, 0);
2333 #endif
2334
2335 // Initialize environment functions, and get to the ELF aux vectors table.
2336 linker_env_init(args);
2337
2338 // If this is a setuid/setgid program, close the security hole described in
2339 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2340 if (get_AT_SECURE()) {
2341 nullify_closed_stdio();
2342 }
2343
2344 debuggerd_init();
2345
2346 // Get a few environment variables.
2347 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2348 if (LD_DEBUG != nullptr) {
2349 g_ld_debug_verbosity = atoi(LD_DEBUG);
2350 }
2351
2352 // Normally, these are cleaned by linker_env_init, but the test
2353 // doesn't cost us anything.
2354 const char* ldpath_env = nullptr;
2355 const char* ldpreload_env = nullptr;
2356 if (!get_AT_SECURE()) {
2357 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2358 ldpreload_env = linker_env_get("LD_PRELOAD");
2359 }
2360
2361 INFO("[ android linker & debugger ]");
2362
2363 soinfo* si = soinfo_alloc(args.argv[0], nullptr, 0);
2364 if (si == nullptr) {
2365 exit(EXIT_FAILURE);
2366 }
2367
2368 /* bootstrap the link map, the main exe always needs to be first */
2369 si->flags |= FLAG_EXE;
2370 link_map* map = &(si->link_map_head);
2371
2372 map->l_addr = 0;
2373 map->l_name = args.argv[0];
2374 map->l_prev = nullptr;
2375 map->l_next = nullptr;
2376
2377 _r_debug.r_map = map;
2378 r_debug_tail = map;
2379
2380 init_linker_info_for_gdb(linker_base);
2381
2382 // Extract information passed from the kernel.
2383 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2384 si->phnum = args.getauxval(AT_PHNUM);
2385 si->entry = args.getauxval(AT_ENTRY);
2386
2387 /* Compute the value of si->base. We can't rely on the fact that
2388 * the first entry is the PHDR because this will not be true
2389 * for certain executables (e.g. some in the NDK unit test suite)
2390 */
2391 si->base = 0;
2392 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2393 si->load_bias = 0;
2394 for (size_t i = 0; i < si->phnum; ++i) {
2395 if (si->phdr[i].p_type == PT_PHDR) {
2396 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2397 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2398 break;
2399 }
2400 }
2401 si->dynamic = nullptr;
2402 si->ref_count = 1;
2403
2404 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2405 if (elf_hdr->e_type != ET_DYN) {
2406 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2407 exit(EXIT_FAILURE);
2408 }
2409
2410 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2411 parse_LD_LIBRARY_PATH(ldpath_env);
2412 parse_LD_PRELOAD(ldpreload_env);
2413
2414 somain = si;
2415
2416 if (!si->PrelinkImage()) {
2417 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2418 exit(EXIT_FAILURE);
2419 }
2420
2421 // Load ld_preloads and dependencies.
2422 StringLinkedList needed_library_name_list;
2423 size_t needed_libraries_count = 0;
2424 size_t ld_preloads_count = 0;
2425 while (g_ld_preload_names[ld_preloads_count] != nullptr) {
2426 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]);
2427 ++needed_libraries_count;
2428 }
2429
2430 for_each_dt_needed(si, [&](const char* name) {
2431 needed_library_name_list.push_back(name);
2432 ++needed_libraries_count;
2433 });
2434
2435 const char* needed_library_names[needed_libraries_count];
2436 soinfo* needed_library_si[needed_libraries_count];
2437
2438 memset(needed_library_names, 0, sizeof(needed_library_names));
2439 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count);
2440
2441 if (needed_libraries_count > 0 && !find_libraries(needed_library_names, needed_libraries_count, needed_library_si, g_ld_preloads, ld_preloads_count, 0, nullptr)) {
2442 __libc_format_fd(2, "CANNOT LINK EXECUTABLE DEPENDENCIES: %s\n", linker_get_error_buffer());
2443 exit(EXIT_FAILURE);
2444 }
2445
2446 for (size_t i = 0; i<needed_libraries_count; ++i) {
2447 si->add_child(needed_library_si[i]);
2448 }
2449
2450 if (!si->LinkImage(nullptr)) {
2451 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2452 exit(EXIT_FAILURE);
2453 }
2454
2455 add_vdso(args);
2456
2457 si->CallPreInitConstructors();
2458
2459 /* After the PrelinkImage, the si->load_bias is initialized.
2460 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2461 * We need to update this value for so exe here. So Unwind_Backtrace
2462 * for some arch like x86 could work correctly within so exe.
2463 */
2464 map->l_addr = si->load_bias;
2465 si->CallConstructors();
2466
2467 #if TIMING
2468 gettimeofday(&t1, nullptr);
2469 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2470 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2471 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2472 #endif
2473 #if STATS
2474 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2475 linker_stats.count[kRelocAbsolute],
2476 linker_stats.count[kRelocRelative],
2477 linker_stats.count[kRelocCopy],
2478 linker_stats.count[kRelocSymbol]);
2479 #endif
2480 #if COUNT_PAGES
2481 {
2482 unsigned n;
2483 unsigned i;
2484 unsigned count = 0;
2485 for (n = 0; n < 4096; n++) {
2486 if (bitmask[n]) {
2487 unsigned x = bitmask[n];
2488 #if defined(__LP64__)
2489 for (i = 0; i < 32; i++) {
2490 #else
2491 for (i = 0; i < 8; i++) {
2492 #endif
2493 if (x & 1) {
2494 count++;
2495 }
2496 x >>= 1;
2497 }
2498 }
2499 }
2500 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2501 }
2502 #endif
2503
2504 #if TIMING || STATS || COUNT_PAGES
2505 fflush(stdout);
2506 #endif
2507
2508 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2509 return si->entry;
2510 }
2511
2512 /* Compute the load-bias of an existing executable. This shall only
2513 * be used to compute the load bias of an executable or shared library
2514 * that was loaded by the kernel itself.
2515 *
2516 * Input:
2517 * elf -> address of ELF header, assumed to be at the start of the file.
2518 * Return:
2519 * load bias, i.e. add the value of any p_vaddr in the file to get
2520 * the corresponding address in memory.
2521 */
2522 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2523 ElfW(Addr) offset = elf->e_phoff;
2524 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2525 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2526
2527 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2528 if (phdr->p_type == PT_LOAD) {
2529 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2530 }
2531 }
2532 return 0;
2533 }
2534
2535 extern "C" void _start();
2536
2537 /*
2538 * This is the entry point for the linker, called from begin.S. This
2539 * method is responsible for fixing the linker's own relocations, and
2540 * then calling __linker_init_post_relocation().
2541 *
2542 * Because this method is called before the linker has fixed it's own
2543 * relocations, any attempt to reference an extern variable, extern
2544 * function, or other GOT reference will generate a segfault.
2545 */
2546 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2547 KernelArgumentBlock args(raw_args);
2548
2549 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2550 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY);
2551 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2552 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2553
2554 soinfo linker_so("[dynamic linker]", nullptr, 0);
2555
2556 // If the linker is not acting as PT_INTERP entry_point is equal to
2557 // _start. Which means that the linker is running as an executable and
2558 // already linked by PT_INTERP.
2559 //
2560 // This happens when user tries to run 'adb shell /system/bin/linker'
2561 // see also https://code.google.com/p/android/issues/detail?id=63174
2562 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) {
2563 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]);
2564 }
2565
2566 linker_so.base = linker_addr;
2567 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2568 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2569 linker_so.dynamic = nullptr;
2570 linker_so.phdr = phdr;
2571 linker_so.phnum = elf_hdr->e_phnum;
2572 linker_so.flags |= FLAG_LINKER;
2573
2574 if (!(linker_so.PrelinkImage() && linker_so.LinkImage(nullptr))) {
2575 // It would be nice to print an error message, but if the linker
2576 // can't link itself, there's no guarantee that we'll be able to
2577 // call write() (because it involves a GOT reference). We may as
2578 // well try though...
2579 const char* msg = "CANNOT LINK EXECUTABLE: ";
2580 write(2, msg, strlen(msg));
2581 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2582 write(2, "\n", 1);
2583 _exit(EXIT_FAILURE);
2584 }
2585
2586 __libc_init_tls(args);
2587
2588 // Initialize the linker's own global variables
2589 linker_so.CallConstructors();
2590
2591 // Initialize static variables. Note that in order to
2592 // get correct libdl_info we need to call constructors
2593 // before get_libdl_info().
2594 solist = get_libdl_info();
2595 sonext = get_libdl_info();
2596
2597 // We have successfully fixed our own relocations. It's safe to run
2598 // the main part of the linker now.
2599 args.abort_message_ptr = &g_abort_message;
2600 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2601
2602 protect_data(PROT_READ);
2603
2604 // Return the address that the calling assembly stub should jump to.
2605 return start_address;
2606 }
2607