1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "linker_phdr.h"
30 
31 #include <errno.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <unistd.h>
37 
38 #include "linker.h"
39 #include "linker_dlwarning.h"
40 #include "linker_globals.h"
41 #include "linker_debug.h"
42 #include "linker_utils.h"
43 
44 #include "private/bionic_prctl.h"
45 #include "private/CFIShadow.h" // For kLibraryAlignment
46 
GetTargetElfMachine()47 static int GetTargetElfMachine() {
48 #if defined(__arm__)
49   return EM_ARM;
50 #elif defined(__aarch64__)
51   return EM_AARCH64;
52 #elif defined(__i386__)
53   return EM_386;
54 #elif defined(__mips__)
55   return EM_MIPS;
56 #elif defined(__x86_64__)
57   return EM_X86_64;
58 #endif
59 }
60 
61 /**
62   TECHNICAL NOTE ON ELF LOADING.
63 
64   An ELF file's program header table contains one or more PT_LOAD
65   segments, which corresponds to portions of the file that need to
66   be mapped into the process' address space.
67 
68   Each loadable segment has the following important properties:
69 
70     p_offset  -> segment file offset
71     p_filesz  -> segment file size
72     p_memsz   -> segment memory size (always >= p_filesz)
73     p_vaddr   -> segment's virtual address
74     p_flags   -> segment flags (e.g. readable, writable, executable)
75 
76   We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
77 
78   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
79   ranges of virtual addresses. A few rules apply:
80 
81   - the virtual address ranges should not overlap.
82 
83   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
84     between them should always be initialized to 0.
85 
86   - ranges do not necessarily start or end at page boundaries. Two distinct
87     segments can have their start and end on the same page. In this case, the
88     page inherits the mapping flags of the latter segment.
89 
90   Finally, the real load addrs of each segment is not p_vaddr. Instead the
91   loader decides where to load the first segment, then will load all others
92   relative to the first one to respect the initial range layout.
93 
94   For example, consider the following list:
95 
96     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
97     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
98 
99   This corresponds to two segments that cover these virtual address ranges:
100 
101        0x30000...0x34000
102        0x40000...0x48000
103 
104   If the loader decides to load the first segment at address 0xa0000000
105   then the segments' load address ranges will be:
106 
107        0xa0030000...0xa0034000
108        0xa0040000...0xa0048000
109 
110   In other words, all segments must be loaded at an address that has the same
111   constant offset from their p_vaddr value. This offset is computed as the
112   difference between the first segment's load address, and its p_vaddr value.
113 
114   However, in practice, segments do _not_ start at page boundaries. Since we
115   can only memory-map at page boundaries, this means that the bias is
116   computed as:
117 
118        load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
119 
120   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
121           possible wrap around UINT32_MAX for possible large p_vaddr values).
122 
123   And that the phdr0_load_address must start at a page boundary, with
124   the segment's real content starting at:
125 
126        phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
127 
128   Note that ELF requires the following condition to make the mmap()-ing work:
129 
130       PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
131 
132   The load_bias must be added to any p_vaddr value read from the ELF file to
133   determine the corresponding memory address.
134 
135  **/
136 
137 #define MAYBE_MAP_FLAG(x, from, to)  (((x) & (from)) ? (to) : 0)
138 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
139                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
140                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
141 
ElfReader()142 ElfReader::ElfReader()
143     : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
144       phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
145       strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
146       mapped_by_caller_(false) {
147 }
148 
Read(const char * name,int fd,off64_t file_offset,off64_t file_size)149 bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
150   if (did_read_) {
151     return true;
152   }
153   name_ = name;
154   fd_ = fd;
155   file_offset_ = file_offset;
156   file_size_ = file_size;
157 
158   if (ReadElfHeader() &&
159       VerifyElfHeader() &&
160       ReadProgramHeaders() &&
161       ReadSectionHeaders() &&
162       ReadDynamicSection()) {
163     did_read_ = true;
164   }
165 
166   return did_read_;
167 }
168 
Load(const android_dlextinfo * extinfo)169 bool ElfReader::Load(const android_dlextinfo* extinfo) {
170   CHECK(did_read_);
171   if (did_load_) {
172     return true;
173   }
174   if (ReserveAddressSpace(extinfo) &&
175       LoadSegments() &&
176       FindPhdr()) {
177     did_load_ = true;
178   }
179 
180   return did_load_;
181 }
182 
get_string(ElfW (Word)index) const183 const char* ElfReader::get_string(ElfW(Word) index) const {
184   CHECK(strtab_ != nullptr);
185   CHECK(index < strtab_size_);
186 
187   return strtab_ + index;
188 }
189 
ReadElfHeader()190 bool ElfReader::ReadElfHeader() {
191   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
192   if (rc < 0) {
193     DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
194     return false;
195   }
196 
197   if (rc != sizeof(header_)) {
198     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
199            static_cast<size_t>(rc));
200     return false;
201   }
202   return true;
203 }
204 
EM_to_string(int em)205 static const char* EM_to_string(int em) {
206   if (em == EM_386) return "EM_386";
207   if (em == EM_AARCH64) return "EM_AARCH64";
208   if (em == EM_ARM) return "EM_ARM";
209   if (em == EM_MIPS) return "EM_MIPS";
210   if (em == EM_X86_64) return "EM_X86_64";
211   return "EM_???";
212 }
213 
VerifyElfHeader()214 bool ElfReader::VerifyElfHeader() {
215   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
216     DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
217     return false;
218   }
219 
220   // Try to give a clear diagnostic for ELF class mismatches, since they're
221   // an easy mistake to make during the 32-bit/64-bit transition period.
222   int elf_class = header_.e_ident[EI_CLASS];
223 #if defined(__LP64__)
224   if (elf_class != ELFCLASS64) {
225     if (elf_class == ELFCLASS32) {
226       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
227     } else {
228       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
229     }
230     return false;
231   }
232 #else
233   if (elf_class != ELFCLASS32) {
234     if (elf_class == ELFCLASS64) {
235       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
236     } else {
237       DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
238     }
239     return false;
240   }
241 #endif
242 
243   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
244     DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
245     return false;
246   }
247 
248   if (header_.e_type != ET_DYN) {
249     DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
250     return false;
251   }
252 
253   if (header_.e_version != EV_CURRENT) {
254     DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
255     return false;
256   }
257 
258   if (header_.e_machine != GetTargetElfMachine()) {
259     DL_ERR("\"%s\" has unexpected e_machine: %d (%s)", name_.c_str(), header_.e_machine,
260            EM_to_string(header_.e_machine));
261     return false;
262   }
263 
264   if (header_.e_shentsize != sizeof(ElfW(Shdr))) {
265     // Fail if app is targeting Android O or above
266     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
267       DL_ERR_AND_LOG("\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)",
268                      name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
269       return false;
270     }
271     DL_WARN_documented_change(__ANDROID_API_O__,
272                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
273                               "\"%s\" has unsupported e_shentsize 0x%x (expected 0x%zx)",
274                               name_.c_str(), header_.e_shentsize, sizeof(ElfW(Shdr)));
275     add_dlwarning(name_.c_str(), "has invalid ELF header");
276   }
277 
278   if (header_.e_shstrndx == 0) {
279     // Fail if app is targeting Android O or above
280     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
281       DL_ERR_AND_LOG("\"%s\" has invalid e_shstrndx", name_.c_str());
282       return false;
283     }
284 
285     DL_WARN_documented_change(__ANDROID_API_O__,
286                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
287                               "\"%s\" has invalid e_shstrndx", name_.c_str());
288     add_dlwarning(name_.c_str(), "has invalid ELF header");
289   }
290 
291   return true;
292 }
293 
CheckFileRange(ElfW (Addr)offset,size_t size,size_t alignment)294 bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
295   off64_t range_start;
296   off64_t range_end;
297 
298   // Only header can be located at the 0 offset... This function called to
299   // check DYNSYM and DYNAMIC sections and phdr/shdr - none of them can be
300   // at offset 0.
301 
302   return offset > 0 &&
303          safe_add(&range_start, file_offset_, offset) &&
304          safe_add(&range_end, range_start, size) &&
305          (range_start < file_size_) &&
306          (range_end <= file_size_) &&
307          ((offset % alignment) == 0);
308 }
309 
310 // Loads the program header table from an ELF file into a read-only private
311 // anonymous mmap-ed block.
ReadProgramHeaders()312 bool ElfReader::ReadProgramHeaders() {
313   phdr_num_ = header_.e_phnum;
314 
315   // Like the kernel, we only accept program header tables that
316   // are smaller than 64KiB.
317   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
318     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
319     return false;
320   }
321 
322   // Boundary checks
323   size_t size = phdr_num_ * sizeof(ElfW(Phdr));
324   if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
325     DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
326                    name_.c_str(),
327                    static_cast<size_t>(header_.e_phoff),
328                    size);
329     return false;
330   }
331 
332   if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
333     DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
334     return false;
335   }
336 
337   phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
338   return true;
339 }
340 
ReadSectionHeaders()341 bool ElfReader::ReadSectionHeaders() {
342   shdr_num_ = header_.e_shnum;
343 
344   if (shdr_num_ == 0) {
345     DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
346     return false;
347   }
348 
349   size_t size = shdr_num_ * sizeof(ElfW(Shdr));
350   if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
351     DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
352                    name_.c_str(),
353                    static_cast<size_t>(header_.e_shoff),
354                    size);
355     return false;
356   }
357 
358   if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
359     DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
360     return false;
361   }
362 
363   shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
364   return true;
365 }
366 
ReadDynamicSection()367 bool ElfReader::ReadDynamicSection() {
368   // 1. Find .dynamic section (in section headers)
369   const ElfW(Shdr)* dynamic_shdr = nullptr;
370   for (size_t i = 0; i < shdr_num_; ++i) {
371     if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
372       dynamic_shdr = &shdr_table_ [i];
373       break;
374     }
375   }
376 
377   if (dynamic_shdr == nullptr) {
378     DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
379     return false;
380   }
381 
382   // Make sure dynamic_shdr offset and size matches PT_DYNAMIC phdr
383   size_t pt_dynamic_offset = 0;
384   size_t pt_dynamic_filesz = 0;
385   for (size_t i = 0; i < phdr_num_; ++i) {
386     const ElfW(Phdr)* phdr = &phdr_table_[i];
387     if (phdr->p_type == PT_DYNAMIC) {
388       pt_dynamic_offset = phdr->p_offset;
389       pt_dynamic_filesz = phdr->p_filesz;
390     }
391   }
392 
393   if (pt_dynamic_offset != dynamic_shdr->sh_offset) {
394     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
395       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid offset: 0x%zx, "
396                      "expected to match PT_DYNAMIC offset: 0x%zx",
397                      name_.c_str(),
398                      static_cast<size_t>(dynamic_shdr->sh_offset),
399                      pt_dynamic_offset);
400       return false;
401     }
402     DL_WARN_documented_change(__ANDROID_API_O__,
403                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
404                               "\"%s\" .dynamic section has invalid offset: 0x%zx "
405                               "(expected to match PT_DYNAMIC offset 0x%zx)",
406                               name_.c_str(),
407                               static_cast<size_t>(dynamic_shdr->sh_offset),
408                               pt_dynamic_offset);
409     add_dlwarning(name_.c_str(), "invalid .dynamic section");
410   }
411 
412   if (pt_dynamic_filesz != dynamic_shdr->sh_size) {
413     if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
414       DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid size: 0x%zx, "
415                      "expected to match PT_DYNAMIC filesz: 0x%zx",
416                      name_.c_str(),
417                      static_cast<size_t>(dynamic_shdr->sh_size),
418                      pt_dynamic_filesz);
419       return false;
420     }
421     DL_WARN_documented_change(__ANDROID_API_O__,
422                               "invalid-elf-header_section-headers-enforced-for-api-level-26",
423                               "\"%s\" .dynamic section has invalid size: 0x%zx "
424                               "(expected to match PT_DYNAMIC filesz 0x%zx)",
425                               name_.c_str(),
426                               static_cast<size_t>(dynamic_shdr->sh_size),
427                               pt_dynamic_filesz);
428     add_dlwarning(name_.c_str(), "invalid .dynamic section");
429   }
430 
431   if (dynamic_shdr->sh_link >= shdr_num_) {
432     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
433                    name_.c_str(),
434                    dynamic_shdr->sh_link);
435     return false;
436   }
437 
438   const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
439 
440   if (strtab_shdr->sh_type != SHT_STRTAB) {
441     DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
442                    name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
443     return false;
444   }
445 
446   if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
447     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
448     return false;
449   }
450 
451   if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
452     DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
453     return false;
454   }
455 
456   dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
457 
458   if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
459     DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
460                    name_.c_str());
461     return false;
462   }
463 
464   if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
465     DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
466     return false;
467   }
468 
469   strtab_ = static_cast<const char*>(strtab_fragment_.data());
470   strtab_size_ = strtab_fragment_.size();
471   return true;
472 }
473 
474 /* Returns the size of the extent of all the possibly non-contiguous
475  * loadable segments in an ELF program header table. This corresponds
476  * to the page-aligned size in bytes that needs to be reserved in the
477  * process' address space. If there are no loadable segments, 0 is
478  * returned.
479  *
480  * If out_min_vaddr or out_max_vaddr are not null, they will be
481  * set to the minimum and maximum addresses of pages to be reserved,
482  * or 0 if there is nothing to load.
483  */
phdr_table_get_load_size(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)* out_min_vaddr,ElfW (Addr)* out_max_vaddr)484 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
485                                 ElfW(Addr)* out_min_vaddr,
486                                 ElfW(Addr)* out_max_vaddr) {
487   ElfW(Addr) min_vaddr = UINTPTR_MAX;
488   ElfW(Addr) max_vaddr = 0;
489 
490   bool found_pt_load = false;
491   for (size_t i = 0; i < phdr_count; ++i) {
492     const ElfW(Phdr)* phdr = &phdr_table[i];
493 
494     if (phdr->p_type != PT_LOAD) {
495       continue;
496     }
497     found_pt_load = true;
498 
499     if (phdr->p_vaddr < min_vaddr) {
500       min_vaddr = phdr->p_vaddr;
501     }
502 
503     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
504       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
505     }
506   }
507   if (!found_pt_load) {
508     min_vaddr = 0;
509   }
510 
511   min_vaddr = PAGE_START(min_vaddr);
512   max_vaddr = PAGE_END(max_vaddr);
513 
514   if (out_min_vaddr != nullptr) {
515     *out_min_vaddr = min_vaddr;
516   }
517   if (out_max_vaddr != nullptr) {
518     *out_max_vaddr = max_vaddr;
519   }
520   return max_vaddr - min_vaddr;
521 }
522 
523 // Reserve a virtual address range such that if it's limits were extended to the next 2**align
524 // boundary, it would not overlap with any existing mappings.
ReserveAligned(void * hint,size_t size,size_t align)525 static void* ReserveAligned(void* hint, size_t size, size_t align) {
526   int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
527   // Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
528   // with it.
529   // FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
530   // mapping at the requested address?
531   if (align == PAGE_SIZE || hint != nullptr) {
532     void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
533     if (mmap_ptr == MAP_FAILED) {
534       return nullptr;
535     }
536     return mmap_ptr;
537   }
538 
539   // Allocate enough space so that the end of the desired region aligned up is still inside the
540   // mapping.
541   size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
542   uint8_t* mmap_ptr =
543       reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
544   if (mmap_ptr == MAP_FAILED) {
545     return nullptr;
546   }
547 
548   uint8_t* first = align_up(mmap_ptr, align);
549   uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
550   size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
551   uint8_t* start = first + n * PAGE_SIZE;
552   munmap(mmap_ptr, start - mmap_ptr);
553   munmap(start + size, mmap_ptr + mmap_size - (start + size));
554   return start;
555 }
556 
557 // Reserve a virtual address range big enough to hold all loadable
558 // segments of a program header table. This is done by creating a
559 // private anonymous mmap() with PROT_NONE.
ReserveAddressSpace(const android_dlextinfo * extinfo)560 bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
561   ElfW(Addr) min_vaddr;
562   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
563   if (load_size_ == 0) {
564     DL_ERR("\"%s\" has no loadable segments", name_.c_str());
565     return false;
566   }
567 
568   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
569   void* start;
570   size_t reserved_size = 0;
571   bool reserved_hint = true;
572   bool strict_hint = false;
573   // Assume position independent executable by default.
574   void* mmap_hint = nullptr;
575 
576   if (extinfo != nullptr) {
577     if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
578       reserved_size = extinfo->reserved_size;
579       reserved_hint = false;
580     } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
581       reserved_size = extinfo->reserved_size;
582     }
583 
584     if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
585       mmap_hint = addr;
586     } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
587       mmap_hint = extinfo->reserved_addr;
588       strict_hint = true;
589     }
590   }
591 
592   if (load_size_ > reserved_size) {
593     if (!reserved_hint) {
594       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
595              reserved_size - load_size_, load_size_, name_.c_str());
596       return false;
597     }
598     start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
599     if (start == nullptr) {
600       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
601       return false;
602     }
603     if (strict_hint && (start != mmap_hint)) {
604       munmap(start, load_size_);
605       DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
606              load_size_, mmap_hint, name_.c_str());
607       return false;
608     }
609   } else {
610     start = extinfo->reserved_addr;
611     mapped_by_caller_ = true;
612   }
613 
614   load_start_ = start;
615   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
616   return true;
617 }
618 
LoadSegments()619 bool ElfReader::LoadSegments() {
620   for (size_t i = 0; i < phdr_num_; ++i) {
621     const ElfW(Phdr)* phdr = &phdr_table_[i];
622 
623     if (phdr->p_type != PT_LOAD) {
624       continue;
625     }
626 
627     // Segment addresses in memory.
628     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
629     ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
630 
631     ElfW(Addr) seg_page_start = PAGE_START(seg_start);
632     ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
633 
634     ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
635 
636     // File offsets.
637     ElfW(Addr) file_start = phdr->p_offset;
638     ElfW(Addr) file_end   = file_start + phdr->p_filesz;
639 
640     ElfW(Addr) file_page_start = PAGE_START(file_start);
641     ElfW(Addr) file_length = file_end - file_page_start;
642 
643     if (file_size_ <= 0) {
644       DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
645       return false;
646     }
647 
648     if (file_end > static_cast<size_t>(file_size_)) {
649       DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
650           " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
651           name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
652           reinterpret_cast<void*>(phdr->p_filesz),
653           reinterpret_cast<void*>(file_end), file_size_);
654       return false;
655     }
656 
657     if (file_length != 0) {
658       int prot = PFLAGS_TO_PROT(phdr->p_flags);
659       if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
660         // W + E PT_LOAD segments are not allowed in O.
661         if (get_application_target_sdk_version() >= __ANDROID_API_O__) {
662           DL_ERR_AND_LOG("\"%s\": W+E load segments are not allowed", name_.c_str());
663           return false;
664         }
665         DL_WARN_documented_change(__ANDROID_API_O__,
666                                   "writable-and-executable-segments-enforced-for-api-level-26",
667                                   "\"%s\" has load segments that are both writable and executable",
668                                   name_.c_str());
669         add_dlwarning(name_.c_str(), "W+E load segments");
670       }
671 
672       void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
673                             file_length,
674                             prot,
675                             MAP_FIXED|MAP_PRIVATE,
676                             fd_,
677                             file_offset_ + file_page_start);
678       if (seg_addr == MAP_FAILED) {
679         DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
680         return false;
681       }
682     }
683 
684     // if the segment is writable, and does not end on a page boundary,
685     // zero-fill it until the page limit.
686     if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
687       memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
688     }
689 
690     seg_file_end = PAGE_END(seg_file_end);
691 
692     // seg_file_end is now the first page address after the file
693     // content. If seg_end is larger, we need to zero anything
694     // between them. This is done by using a private anonymous
695     // map for all extra pages.
696     if (seg_page_end > seg_file_end) {
697       size_t zeromap_size = seg_page_end - seg_file_end;
698       void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
699                            zeromap_size,
700                            PFLAGS_TO_PROT(phdr->p_flags),
701                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
702                            -1,
703                            0);
704       if (zeromap == MAP_FAILED) {
705         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
706         return false;
707       }
708 
709       prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
710     }
711   }
712   return true;
713 }
714 
715 /* Used internally. Used to set the protection bits of all loaded segments
716  * with optional extra flags (i.e. really PROT_WRITE). Used by
717  * phdr_table_protect_segments and phdr_table_unprotect_segments.
718  */
_phdr_table_set_load_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int extra_prot_flags)719 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
720                                      ElfW(Addr) load_bias, int extra_prot_flags) {
721   const ElfW(Phdr)* phdr = phdr_table;
722   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
723 
724   for (; phdr < phdr_limit; phdr++) {
725     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
726       continue;
727     }
728 
729     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
730     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
731 
732     int prot = PFLAGS_TO_PROT(phdr->p_flags);
733     if ((extra_prot_flags & PROT_WRITE) != 0) {
734       // make sure we're never simultaneously writable / executable
735       prot &= ~PROT_EXEC;
736     }
737 
738     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
739                        seg_page_end - seg_page_start,
740                        prot | extra_prot_flags);
741     if (ret < 0) {
742       return -1;
743     }
744   }
745   return 0;
746 }
747 
748 /* Restore the original protection modes for all loadable segments.
749  * You should only call this after phdr_table_unprotect_segments and
750  * applying all relocations.
751  *
752  * Input:
753  *   phdr_table  -> program header table
754  *   phdr_count  -> number of entries in tables
755  *   load_bias   -> load bias
756  * Return:
757  *   0 on error, -1 on failure (error code in errno).
758  */
phdr_table_protect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)759 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
760                                 size_t phdr_count, ElfW(Addr) load_bias) {
761   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
762 }
763 
764 /* Change the protection of all loaded segments in memory to writable.
765  * This is useful before performing relocations. Once completed, you
766  * will have to call phdr_table_protect_segments to restore the original
767  * protection flags on all segments.
768  *
769  * Note that some writable segments can also have their content turned
770  * to read-only by calling phdr_table_protect_gnu_relro. This is no
771  * performed here.
772  *
773  * Input:
774  *   phdr_table  -> program header table
775  *   phdr_count  -> number of entries in tables
776  *   load_bias   -> load bias
777  * Return:
778  *   0 on error, -1 on failure (error code in errno).
779  */
phdr_table_unprotect_segments(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)780 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
781                                   size_t phdr_count, ElfW(Addr) load_bias) {
782   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
783 }
784 
785 /* Used internally by phdr_table_protect_gnu_relro and
786  * phdr_table_unprotect_gnu_relro.
787  */
_phdr_table_set_gnu_relro_prot(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int prot_flags)788 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
789                                           ElfW(Addr) load_bias, int prot_flags) {
790   const ElfW(Phdr)* phdr = phdr_table;
791   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
792 
793   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
794     if (phdr->p_type != PT_GNU_RELRO) {
795       continue;
796     }
797 
798     // Tricky: what happens when the relro segment does not start
799     // or end at page boundaries? We're going to be over-protective
800     // here and put every page touched by the segment as read-only.
801 
802     // This seems to match Ian Lance Taylor's description of the
803     // feature at http://www.airs.com/blog/archives/189.
804 
805     //    Extract:
806     //       Note that the current dynamic linker code will only work
807     //       correctly if the PT_GNU_RELRO segment starts on a page
808     //       boundary. This is because the dynamic linker rounds the
809     //       p_vaddr field down to the previous page boundary. If
810     //       there is anything on the page which should not be read-only,
811     //       the program is likely to fail at runtime. So in effect the
812     //       linker must only emit a PT_GNU_RELRO segment if it ensures
813     //       that it starts on a page boundary.
814     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
815     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
816 
817     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
818                        seg_page_end - seg_page_start,
819                        prot_flags);
820     if (ret < 0) {
821       return -1;
822     }
823   }
824   return 0;
825 }
826 
827 /* Apply GNU relro protection if specified by the program header. This will
828  * turn some of the pages of a writable PT_LOAD segment to read-only, as
829  * specified by one or more PT_GNU_RELRO segments. This must be always
830  * performed after relocations.
831  *
832  * The areas typically covered are .got and .data.rel.ro, these are
833  * read-only from the program's POV, but contain absolute addresses
834  * that need to be relocated before use.
835  *
836  * Input:
837  *   phdr_table  -> program header table
838  *   phdr_count  -> number of entries in tables
839  *   load_bias   -> load bias
840  * Return:
841  *   0 on error, -1 on failure (error code in errno).
842  */
phdr_table_protect_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)843 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
844                                  size_t phdr_count, ElfW(Addr) load_bias) {
845   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
846 }
847 
848 /* Serialize the GNU relro segments to the given file descriptor. This can be
849  * performed after relocations to allow another process to later share the
850  * relocated segment, if it was loaded at the same address.
851  *
852  * Input:
853  *   phdr_table  -> program header table
854  *   phdr_count  -> number of entries in tables
855  *   load_bias   -> load bias
856  *   fd          -> writable file descriptor to use
857  * Return:
858  *   0 on error, -1 on failure (error code in errno).
859  */
phdr_table_serialize_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd)860 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
861                                    size_t phdr_count,
862                                    ElfW(Addr) load_bias,
863                                    int fd) {
864   const ElfW(Phdr)* phdr = phdr_table;
865   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
866   ssize_t file_offset = 0;
867 
868   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
869     if (phdr->p_type != PT_GNU_RELRO) {
870       continue;
871     }
872 
873     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
874     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
875     ssize_t size = seg_page_end - seg_page_start;
876 
877     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
878     if (written != size) {
879       return -1;
880     }
881     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
882                      MAP_PRIVATE|MAP_FIXED, fd, file_offset);
883     if (map == MAP_FAILED) {
884       return -1;
885     }
886     file_offset += size;
887   }
888   return 0;
889 }
890 
891 /* Where possible, replace the GNU relro segments with mappings of the given
892  * file descriptor. This can be performed after relocations to allow a file
893  * previously created by phdr_table_serialize_gnu_relro in another process to
894  * replace the dirty relocated pages, saving memory, if it was loaded at the
895  * same address. We have to compare the data before we map over it, since some
896  * parts of the relro segment may not be identical due to other libraries in
897  * the process being loaded at different addresses.
898  *
899  * Input:
900  *   phdr_table  -> program header table
901  *   phdr_count  -> number of entries in tables
902  *   load_bias   -> load bias
903  *   fd          -> readable file descriptor to use
904  * Return:
905  *   0 on error, -1 on failure (error code in errno).
906  */
phdr_table_map_gnu_relro(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,int fd)907 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
908                              size_t phdr_count,
909                              ElfW(Addr) load_bias,
910                              int fd) {
911   // Map the file at a temporary location so we can compare its contents.
912   struct stat file_stat;
913   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
914     return -1;
915   }
916   off_t file_size = file_stat.st_size;
917   void* temp_mapping = nullptr;
918   if (file_size > 0) {
919     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
920     if (temp_mapping == MAP_FAILED) {
921       return -1;
922     }
923   }
924   size_t file_offset = 0;
925 
926   // Iterate over the relro segments and compare/remap the pages.
927   const ElfW(Phdr)* phdr = phdr_table;
928   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
929 
930   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
931     if (phdr->p_type != PT_GNU_RELRO) {
932       continue;
933     }
934 
935     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
936     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
937 
938     char* file_base = static_cast<char*>(temp_mapping) + file_offset;
939     char* mem_base = reinterpret_cast<char*>(seg_page_start);
940     size_t match_offset = 0;
941     size_t size = seg_page_end - seg_page_start;
942 
943     if (file_size - file_offset < size) {
944       // File is too short to compare to this segment. The contents are likely
945       // different as well (it's probably for a different library version) so
946       // just don't bother checking.
947       break;
948     }
949 
950     while (match_offset < size) {
951       // Skip over dissimilar pages.
952       while (match_offset < size &&
953              memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
954         match_offset += PAGE_SIZE;
955       }
956 
957       // Count similar pages.
958       size_t mismatch_offset = match_offset;
959       while (mismatch_offset < size &&
960              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
961         mismatch_offset += PAGE_SIZE;
962       }
963 
964       // Map over similar pages.
965       if (mismatch_offset > match_offset) {
966         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
967                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
968         if (map == MAP_FAILED) {
969           munmap(temp_mapping, file_size);
970           return -1;
971         }
972       }
973 
974       match_offset = mismatch_offset;
975     }
976 
977     // Add to the base file offset in case there are multiple relro segments.
978     file_offset += size;
979   }
980   munmap(temp_mapping, file_size);
981   return 0;
982 }
983 
984 
985 #if defined(__arm__)
986 
987 #  ifndef PT_ARM_EXIDX
988 #    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
989 #  endif
990 
991 /* Return the address and size of the .ARM.exidx section in memory,
992  * if present.
993  *
994  * Input:
995  *   phdr_table  -> program header table
996  *   phdr_count  -> number of entries in tables
997  *   load_bias   -> load bias
998  * Output:
999  *   arm_exidx       -> address of table in memory (null on failure).
1000  *   arm_exidx_count -> number of items in table (0 on failure).
1001  * Return:
1002  *   0 on error, -1 on failure (_no_ error code in errno)
1003  */
phdr_table_get_arm_exidx(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Addr)** arm_exidx,size_t * arm_exidx_count)1004 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1005                              ElfW(Addr) load_bias,
1006                              ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
1007   const ElfW(Phdr)* phdr = phdr_table;
1008   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
1009 
1010   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
1011     if (phdr->p_type != PT_ARM_EXIDX) {
1012       continue;
1013     }
1014 
1015     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
1016     *arm_exidx_count = phdr->p_memsz / 8;
1017     return 0;
1018   }
1019   *arm_exidx = nullptr;
1020   *arm_exidx_count = 0;
1021   return -1;
1022 }
1023 #endif
1024 
1025 /* Return the address and size of the ELF file's .dynamic section in memory,
1026  * or null if missing.
1027  *
1028  * Input:
1029  *   phdr_table  -> program header table
1030  *   phdr_count  -> number of entries in tables
1031  *   load_bias   -> load bias
1032  * Output:
1033  *   dynamic       -> address of table in memory (null on failure).
1034  *   dynamic_flags -> protection flags for section (unset on failure)
1035  * Return:
1036  *   void
1037  */
phdr_table_get_dynamic_section(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias,ElfW (Dyn)** dynamic,ElfW (Word)* dynamic_flags)1038 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
1039                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
1040                                     ElfW(Word)* dynamic_flags) {
1041   *dynamic = nullptr;
1042   for (size_t i = 0; i<phdr_count; ++i) {
1043     const ElfW(Phdr)& phdr = phdr_table[i];
1044     if (phdr.p_type == PT_DYNAMIC) {
1045       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
1046       if (dynamic_flags) {
1047         *dynamic_flags = phdr.p_flags;
1048       }
1049       return;
1050     }
1051   }
1052 }
1053 
1054 /* Return the program interpreter string, or nullptr if missing.
1055  *
1056  * Input:
1057  *   phdr_table  -> program header table
1058  *   phdr_count  -> number of entries in tables
1059  *   load_bias   -> load bias
1060  * Return:
1061  *   pointer to the program interpreter string.
1062  */
phdr_table_get_interpreter_name(const ElfW (Phdr)* phdr_table,size_t phdr_count,ElfW (Addr)load_bias)1063 const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
1064                                             ElfW(Addr) load_bias) {
1065   for (size_t i = 0; i<phdr_count; ++i) {
1066     const ElfW(Phdr)& phdr = phdr_table[i];
1067     if (phdr.p_type == PT_INTERP) {
1068       return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
1069     }
1070   }
1071   return nullptr;
1072 }
1073 
1074 // Sets loaded_phdr_ to the address of the program header table as it appears
1075 // in the loaded segments in memory. This is in contrast with phdr_table_,
1076 // which is temporary and will be released before the library is relocated.
FindPhdr()1077 bool ElfReader::FindPhdr() {
1078   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1079 
1080   // If there is a PT_PHDR, use it directly.
1081   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1082     if (phdr->p_type == PT_PHDR) {
1083       return CheckPhdr(load_bias_ + phdr->p_vaddr);
1084     }
1085   }
1086 
1087   // Otherwise, check the first loadable segment. If its file offset
1088   // is 0, it starts with the ELF header, and we can trivially find the
1089   // loaded program header from it.
1090   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1091     if (phdr->p_type == PT_LOAD) {
1092       if (phdr->p_offset == 0) {
1093         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
1094         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
1095         ElfW(Addr)  offset = ehdr->e_phoff;
1096         return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
1097       }
1098       break;
1099     }
1100   }
1101 
1102   DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
1103   return false;
1104 }
1105 
1106 // Ensures that our program header is actually within a loadable
1107 // segment. This should help catch badly-formed ELF files that
1108 // would cause the linker to crash later when trying to access it.
CheckPhdr(ElfW (Addr)loaded)1109 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1110   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1111   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
1112   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
1113     if (phdr->p_type != PT_LOAD) {
1114       continue;
1115     }
1116     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1117     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
1118     if (seg_start <= loaded && loaded_end <= seg_end) {
1119       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
1120       return true;
1121     }
1122   }
1123   DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1124          name_.c_str(), reinterpret_cast<void*>(loaded));
1125   return false;
1126 }
1127