1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "berberis/tiny_loader/tiny_loader.h"
18
19 #include <elf.h>
20 #include <fcntl.h>
21 #include <inttypes.h>
22 #include <sys/param.h>
23 #include <sys/stat.h>
24 #include <sys/user.h>
25 #include <unistd.h>
26
27 #include "berberis/base/bit_util.h"
28 #include "berberis/base/checks.h"
29 #include "berberis/base/mapped_file_fragment.h"
30 #include "berberis/base/page_size.h"
31 #include "berberis/base/prctl_helpers.h"
32 #include "berberis/base/stringprintf.h"
33
34 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
35 #define PFLAGS_TO_PROT(x) \
36 (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
37 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
38
39 namespace {
40
set_error_msg(std::string * error_msg,const char * format,...)41 void set_error_msg(std::string* error_msg, const char* format, ...) {
42 if (error_msg == nullptr) {
43 return;
44 }
45
46 va_list ap;
47 va_start(ap, format);
48 berberis::StringAppendV(error_msg, format, ap);
49 va_end(ap);
50 }
51
52 template <typename T>
page_align_down(T addr)53 constexpr T page_align_down(T addr) {
54 return berberis::AlignDown(addr, berberis::kPageSize);
55 }
56
57 template <typename T>
page_align_up(T addr)58 constexpr T page_align_up(T addr) {
59 return berberis::AlignUp(addr, berberis::kPageSize);
60 }
61
62 template <typename T>
page_offset(T addr)63 constexpr T page_offset(T addr) {
64 return addr - page_align_down(addr);
65 }
66
EiClassString(int elf_class)67 const char* EiClassString(int elf_class) {
68 switch (elf_class) {
69 case ELFCLASSNONE:
70 return "ELFCLASSNONE";
71 case ELFCLASS32:
72 return "ELFCLASS32";
73 case ELFCLASS64:
74 return "ELFCLASS64";
75 default:
76 return "(unknown)";
77 }
78 }
79
80 // Returns the size of the extent of all the possibly non-contiguous
81 // loadable segments in an ELF program header table. This corresponds
82 // to the page-aligned size in bytes that needs to be reserved in the
83 // process' address space. If there are no loadable segments, 0 is
84 // returned.
85 //
86 // If out_min_vaddr or out_max_vaddr are not null, they will be
87 // set to the minimum and maximum addresses of pages to be reserved,
88 // or 0 if there is nothing to load.
phdr_table_get_load_size(const ElfPhdr * phdr_table,size_t phdr_count,ElfAddr * out_min_vaddr)89 size_t phdr_table_get_load_size(const ElfPhdr* phdr_table, size_t phdr_count,
90 ElfAddr* out_min_vaddr) {
91 ElfAddr min_vaddr = UINTPTR_MAX;
92 ElfAddr max_vaddr = 0;
93
94 bool found_pt_load = false;
95 for (size_t i = 0; i < phdr_count; ++i) {
96 const ElfPhdr* phdr = &phdr_table[i];
97
98 if (phdr->p_type != PT_LOAD) {
99 continue;
100 }
101 found_pt_load = true;
102
103 if (phdr->p_vaddr < min_vaddr) {
104 min_vaddr = phdr->p_vaddr;
105 }
106
107 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
108 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
109 }
110 }
111 if (!found_pt_load) {
112 min_vaddr = 0;
113 }
114
115 min_vaddr = page_align_down(min_vaddr);
116 max_vaddr = page_align_up(max_vaddr);
117
118 if (out_min_vaddr != nullptr) {
119 *out_min_vaddr = min_vaddr;
120 }
121 return max_vaddr - min_vaddr;
122 }
123
124 class TinyElfLoader {
125 public:
126 explicit TinyElfLoader(const char* name);
127
128 bool LoadFromFile(int fd, off64_t file_size, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
129 TinyLoader::munmap_fn_t munmap_fn, LoadedElfFile* loaded_elf_file);
130
131 bool LoadFromMemory(void* load_addr, size_t load_size, LoadedElfFile* loaded_elf_file);
132
error_msg() const133 const std::string& error_msg() const { return error_msg_; }
134
135 private:
136 bool CheckElfHeader(const ElfEhdr* header);
137 bool ReadElfHeader(int fd, ElfEhdr* header);
138 bool ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
139 const ElfPhdr** phdr_table, size_t* phdr_num);
140
141 bool ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr, size_t load_size,
142 const ElfPhdr** phdr_table, size_t* phdr_num);
143
144 bool ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
145 TinyLoader::mmap64_fn_t mmap64_fn, TinyLoader::munmap_fn_t munmap_fn,
146 void** load_start, size_t* load_size, uintptr_t* load_bias);
147
148 bool LoadSegments(int fd, size_t file_size, ElfHalf e_type, const ElfPhdr* phdr_table,
149 size_t phdr_num, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
150 TinyLoader::munmap_fn_t munmap_fn, void** load_start, size_t* load_size);
151
152 bool FindDynamicSegment(const ElfEhdr* header);
153 bool InitializeFields(const ElfEhdr* header);
154
155 bool Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file);
156
157 static bool CheckFileRange(off64_t file_size, ElfAddr offset, size_t size, size_t alignment);
158 static bool CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset, size_t size,
159 size_t alignment);
160 uint8_t* Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn);
161
162 bool did_load_;
163
164 const char* name_;
165
166 MappedFileFragment phdr_fragment_;
167
168 // Loaded phdr
169 const ElfPhdr* loaded_phdr_;
170 size_t loaded_phdr_num_;
171
172 ElfAddr load_bias_;
173
174 void* entry_point_;
175
176 // Loaded dynamic section
177 const ElfDyn* dynamic_;
178
179 // Fields needed for symbol lookup
180 bool has_gnu_hash_;
181 size_t gnu_nbucket_;
182 uint32_t* gnu_bucket_;
183 uint32_t* gnu_chain_;
184 uint32_t gnu_maskwords_;
185 uint32_t gnu_shift2_;
186 ElfAddr* gnu_bloom_filter_;
187
188 uint32_t sysv_nbucket_;
189 uint32_t sysv_nchain_;
190 uint32_t* sysv_bucket_;
191 uint32_t* sysv_chain_;
192
193 ElfSym* symtab_;
194
195 const char* strtab_;
196 size_t strtab_size_;
197
198 std::string error_msg_;
199 };
200
TinyElfLoader(const char * name)201 TinyElfLoader::TinyElfLoader(const char* name)
202 : did_load_(false),
203 name_(name),
204 loaded_phdr_(nullptr),
205 loaded_phdr_num_(0),
206 load_bias_(0),
207 entry_point_(nullptr),
208 dynamic_(nullptr),
209 has_gnu_hash_(false),
210 gnu_nbucket_(0),
211 gnu_bucket_(nullptr),
212 gnu_chain_(nullptr),
213 gnu_maskwords_(0),
214 gnu_shift2_(0),
215 gnu_bloom_filter_(nullptr),
216 sysv_nbucket_(0),
217 sysv_nchain_(0),
218 sysv_bucket_(nullptr),
219 sysv_chain_(nullptr),
220 symtab_(nullptr),
221 strtab_(nullptr),
222 strtab_size_(0) {}
223
CheckElfHeader(const ElfEhdr * header)224 bool TinyElfLoader::CheckElfHeader(const ElfEhdr* header) {
225 if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0) {
226 set_error_msg(&error_msg_, "\"%s\" has bad ELF magic", name_);
227 return false;
228 }
229
230 int elf_class = header->e_ident[EI_CLASS];
231 if (elf_class != kSupportedElfClass) {
232 set_error_msg(&error_msg_, "\"%s\" %s is not supported, expected %s.", name_,
233 EiClassString(elf_class), EiClassString(kSupportedElfClass));
234 return false;
235 }
236
237 if (header->e_ident[EI_DATA] != ELFDATA2LSB) {
238 set_error_msg(&error_msg_, "\"%s\" not little-endian: %d", name_, header->e_ident[EI_DATA]);
239 return false;
240 }
241
242 if (header->e_version != EV_CURRENT) {
243 set_error_msg(&error_msg_, "\"%s\" has unexpected e_version: %d", name_, header->e_version);
244 return false;
245 }
246
247 if (header->e_shentsize != sizeof(ElfShdr)) {
248 set_error_msg(&error_msg_, "\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)", name_,
249 header->e_shentsize, sizeof(ElfShdr));
250 return false;
251 }
252
253 if (header->e_shstrndx == 0) {
254 set_error_msg(&error_msg_, "\"%s\" has invalid e_shstrndx", name_);
255 return false;
256 }
257
258 // Like the kernel, we only accept program header tables that
259 // are smaller than 64KiB.
260 if (header->e_phnum < 1 || header->e_phnum > 65536 / sizeof(ElfPhdr)) {
261 set_error_msg(&error_msg_, "\"%s\" has invalid e_phnum: %zd", name_, header->e_phnum);
262 return false;
263 }
264
265 return true;
266 }
267
ReadElfHeader(int fd,ElfEhdr * header)268 bool TinyElfLoader::ReadElfHeader(int fd, ElfEhdr* header) {
269 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd, header, sizeof(*header), 0));
270 if (rc < 0) {
271 set_error_msg(&error_msg_, "can't read file \"%s\": %s", name_, strerror(errno));
272 return false;
273 }
274
275 if (rc != sizeof(*header)) {
276 set_error_msg(&error_msg_, "\"%s\" is too small to be an ELF executable: only found %zd bytes",
277 name_, static_cast<size_t>(rc));
278 return false;
279 }
280
281 return CheckElfHeader(header);
282 }
283
CheckFileRange(off64_t file_size,ElfAddr offset,size_t size,size_t alignment)284 bool TinyElfLoader::CheckFileRange(off64_t file_size, ElfAddr offset, size_t size,
285 size_t alignment) {
286 off64_t range_start = offset;
287 off64_t range_end;
288
289 return offset > 0 && !__builtin_add_overflow(range_start, size, &range_end) &&
290 (range_start < file_size) && (range_end <= file_size) && ((offset % alignment) == 0);
291 }
292
CheckMemoryRange(uintptr_t load_addr,size_t load_size,ElfAddr offset,size_t size,size_t alignment)293 bool TinyElfLoader::CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset,
294 size_t size, size_t alignment) {
295 uintptr_t dummy;
296 uintptr_t offset_end;
297
298 return offset < load_size && !__builtin_add_overflow(load_addr, load_size, &dummy) &&
299 !__builtin_add_overflow(offset, size, &offset_end) && offset_end <= load_size &&
300 ((offset % alignment) == 0);
301 }
302
ReadProgramHeadersFromFile(const ElfEhdr * header,int fd,off64_t file_size,const ElfPhdr ** phdr_table,size_t * phdr_num)303 bool TinyElfLoader::ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
304 const ElfPhdr** phdr_table, size_t* phdr_num) {
305 size_t phnum = header->e_phnum;
306 size_t size = phnum * sizeof(ElfPhdr);
307
308 if (!CheckFileRange(file_size, header->e_phoff, size, alignof(ElfPhdr))) {
309 set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
310 static_cast<size_t>(header->e_phoff), size);
311 return false;
312 }
313
314 if (!phdr_fragment_.Map(fd, 0, header->e_phoff, size)) {
315 set_error_msg(&error_msg_, "\"%s\" phdr mmap failed: %s", name_, strerror(errno));
316 return false;
317 }
318
319 *phdr_table = static_cast<ElfPhdr*>(phdr_fragment_.data());
320 *phdr_num = phnum;
321 return true;
322 }
323
ReadProgramHeadersFromMemory(const ElfEhdr * header,uintptr_t load_addr,size_t load_size,const ElfPhdr ** phdr_table,size_t * phdr_num)324 bool TinyElfLoader::ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr,
325 size_t load_size, const ElfPhdr** phdr_table,
326 size_t* phdr_num) {
327 size_t phnum = header->e_phnum;
328 size_t size = phnum * sizeof(ElfPhdr);
329
330 if (!CheckMemoryRange(load_addr, load_size, header->e_phoff, size, alignof(ElfPhdr))) {
331 set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
332 static_cast<size_t>(header->e_phoff), size);
333 return false;
334 }
335
336 *phdr_table = reinterpret_cast<const ElfPhdr*>(load_addr + header->e_phoff);
337 *phdr_num = phnum;
338 return true;
339 }
340
Reserve(void * hint,size_t size,TinyLoader::mmap64_fn_t mmap64_fn)341 uint8_t* TinyElfLoader::Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn) {
342 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
343
344 void* mmap_ptr = mmap64_fn(hint, size, PROT_NONE, mmap_flags, -1, 0);
345 if (mmap_ptr == MAP_FAILED) {
346 return nullptr;
347 }
348
349 return reinterpret_cast<uint8_t*>(mmap_ptr);
350 }
351
ReserveAddressSpace(ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size,uintptr_t * load_bias)352 bool TinyElfLoader::ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num,
353 size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
354 TinyLoader::munmap_fn_t munmap_fn, void** load_start,
355 size_t* load_size, uintptr_t* load_bias) {
356 ElfAddr min_vaddr;
357 size_t size = phdr_table_get_load_size(phdr_table, phdr_num, &min_vaddr);
358 if (size == 0) {
359 set_error_msg(&error_msg_, "\"%s\" has no loadable segments", name_);
360 return false;
361 }
362
363 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
364 uint8_t* start;
365
366 if (e_type == ET_EXEC) {
367 // Reserve with hint.
368 start = Reserve(addr, size, mmap64_fn);
369 if (start != addr) {
370 if (start != nullptr) {
371 munmap_fn(start, size);
372 }
373 set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space at %p for \"%s\"",
374 size, addr, name_);
375
376 return false;
377 }
378 } else if (align <= berberis::kPageSize) {
379 // Reserve.
380 start = Reserve(nullptr, size, mmap64_fn);
381 if (start == nullptr) {
382 set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space for \"%s\"", size,
383 name_);
384 return false;
385 }
386 } else {
387 // Reserve overaligned.
388 CHECK(berberis::IsPowerOf2(align));
389 uint8_t* unaligned_start = Reserve(nullptr, align + size, mmap64_fn);
390 if (unaligned_start == nullptr) {
391 set_error_msg(&error_msg_,
392 "couldn't reserve %zd bytes of address space aligned on %zd for \"%s\"", size,
393 align, name_);
394 return false;
395 }
396 start = berberis::AlignUp(unaligned_start, align);
397 munmap_fn(unaligned_start, start - unaligned_start);
398 munmap_fn(start + size, unaligned_start + align - start);
399 }
400
401 *load_start = start;
402 *load_size = size;
403 *load_bias = start - addr;
404 return true;
405 }
406
LoadSegments(int fd,size_t file_size,ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size)407 bool TinyElfLoader::LoadSegments(int fd, size_t file_size, ElfHalf e_type,
408 const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
409 TinyLoader::mmap64_fn_t mmap64_fn,
410 TinyLoader::munmap_fn_t munmap_fn, void** load_start,
411 size_t* load_size) {
412 uintptr_t load_bias = 0;
413 if (!ReserveAddressSpace(e_type, phdr_table, phdr_num, align, mmap64_fn, munmap_fn, load_start,
414 load_size, &load_bias)) {
415 return false;
416 }
417
418 for (size_t i = 0; i < phdr_num; ++i) {
419 const ElfPhdr* phdr = &phdr_table[i];
420
421 if (phdr->p_type != PT_LOAD) {
422 continue;
423 }
424
425 // Segment addresses in memory.
426 ElfAddr seg_start = phdr->p_vaddr + load_bias;
427 ElfAddr seg_end = seg_start + phdr->p_memsz;
428
429 ElfAddr seg_page_start = page_align_down(seg_start);
430 ElfAddr seg_page_end = page_align_up(seg_end);
431
432 ElfAddr seg_file_end = seg_start + phdr->p_filesz;
433
434 // File offsets.
435 ElfAddr file_start = phdr->p_offset;
436 ElfAddr file_end = file_start + phdr->p_filesz;
437
438 ElfAddr file_page_start = page_align_down(file_start);
439 ElfAddr file_length = file_end - file_page_start;
440
441 if (file_size <= 0) {
442 set_error_msg(&error_msg_, "\"%s\" invalid file size: %" PRId64, name_, file_size);
443 return false;
444 }
445
446 if (file_end > static_cast<size_t>(file_size)) {
447 set_error_msg(&error_msg_,
448 "invalid ELF file \"%s\" load segment[%zd]:"
449 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
450 name_, i, reinterpret_cast<void*>(phdr->p_offset),
451 reinterpret_cast<void*>(phdr->p_filesz), reinterpret_cast<void*>(file_end),
452 file_size);
453 return false;
454 }
455
456 if (file_length != 0) {
457 int prot = PFLAGS_TO_PROT(phdr->p_flags);
458 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
459 set_error_msg(&error_msg_, "\"%s\": W + E load segments are not allowed", name_);
460 return false;
461 }
462
463 void* seg_addr = mmap64_fn(reinterpret_cast<void*>(seg_page_start), file_length, prot,
464 MAP_FIXED | MAP_PRIVATE, fd, file_page_start);
465 if (seg_addr == MAP_FAILED) {
466 set_error_msg(&error_msg_, "couldn't map \"%s\" segment %zd: %s", name_, i,
467 strerror(errno));
468 return false;
469 }
470 }
471
472 // if the segment is writable, and does not end on a page boundary,
473 // zero-fill it until the page limit.
474 if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
475 memset(reinterpret_cast<void*>(seg_file_end),
476 0,
477 berberis::kPageSize - page_offset(seg_file_end));
478 }
479
480 seg_file_end = page_align_up(seg_file_end);
481
482 // seg_file_end is now the first page address after the file
483 // content. If seg_end is larger, we need to zero anything
484 // between them. This is done by using a private anonymous
485 // map for all extra pages.
486 if (seg_page_end > seg_file_end) {
487 size_t zeromap_size = seg_page_end - seg_file_end;
488 void* zeromap =
489 mmap64_fn(reinterpret_cast<void*>(seg_file_end), zeromap_size,
490 PFLAGS_TO_PROT(phdr->p_flags), MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
491 if (zeromap == MAP_FAILED) {
492 set_error_msg(&error_msg_, "couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
493 return false;
494 }
495
496 berberis::SetVmaAnonName(zeromap, zeromap_size, ".bss");
497 }
498 }
499
500 return true;
501 }
502
FindDynamicSegment(const ElfEhdr * header)503 bool TinyElfLoader::FindDynamicSegment(const ElfEhdr* header) {
504 // Static executables do not have PT_DYNAMIC
505 if (header->e_type == ET_EXEC) {
506 return true;
507 }
508
509 for (size_t i = 0; i < loaded_phdr_num_; ++i) {
510 const ElfPhdr& phdr = loaded_phdr_[i];
511 if (phdr.p_type == PT_DYNAMIC) {
512 // TODO(dimitry): Check all addresses and sizes referencing loaded segments.
513 dynamic_ = reinterpret_cast<ElfDyn*>(load_bias_ + phdr.p_vaddr);
514 return true;
515 }
516 }
517
518 set_error_msg(&error_msg_, "dynamic segment was not found in \"%s\"", name_);
519 return false;
520 }
521
InitializeFields(const ElfEhdr * header)522 bool TinyElfLoader::InitializeFields(const ElfEhdr* header) {
523 if (header->e_entry != 0) {
524 entry_point_ = reinterpret_cast<void*>(load_bias_ + header->e_entry);
525 }
526
527 // There is nothing else to do for a static executable.
528 if (header->e_type == ET_EXEC) {
529 return true;
530 }
531
532 for (const ElfDyn* d = dynamic_; d->d_tag != DT_NULL; ++d) {
533 if (d->d_tag == DT_GNU_HASH) {
534 has_gnu_hash_ = true;
535 gnu_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
536 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[2];
537 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[3];
538 gnu_bloom_filter_ = reinterpret_cast<ElfAddr*>(load_bias_ + d->d_un.d_ptr + 16);
539 gnu_bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
540 gnu_chain_ =
541 gnu_bucket_ + gnu_nbucket_ - reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
542
543 if (!powerof2(gnu_maskwords_)) {
544 set_error_msg(&error_msg_,
545 "invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power of two",
546 gnu_maskwords_, name_);
547
548 return false;
549 }
550
551 --gnu_maskwords_;
552 } else if (d->d_tag == DT_HASH) {
553 sysv_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
554 sysv_nchain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
555 sysv_bucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8);
556 sysv_chain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8 + sysv_nbucket_ * 4);
557 } else if (d->d_tag == DT_SYMTAB) {
558 symtab_ = reinterpret_cast<ElfSym*>(load_bias_ + d->d_un.d_ptr);
559 } else if (d->d_tag == DT_STRTAB) {
560 strtab_ = reinterpret_cast<const char*>(load_bias_ + d->d_un.d_ptr);
561 } else if (d->d_tag == DT_STRSZ) {
562 strtab_size_ = d->d_un.d_val;
563 }
564 }
565
566 if (symtab_ == nullptr) {
567 set_error_msg(&error_msg_, "missing DT_SYMTAB in \"%s\"", name_);
568 return false;
569 }
570
571 if (strtab_ == nullptr) {
572 set_error_msg(&error_msg_, "missing DT_STRTAB in \"%s\"", name_);
573 return false;
574 }
575
576 if (strtab_size_ == 0) {
577 set_error_msg(&error_msg_, "missing or invalid (0) DT_STRSZ in \"%s\"", name_);
578 return false;
579 }
580
581 return true;
582 }
583
Parse(void * load_ptr,size_t load_size,LoadedElfFile * loaded_elf_file)584 bool TinyElfLoader::Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file) {
585 uintptr_t load_addr = reinterpret_cast<uintptr_t>(load_ptr);
586 const ElfEhdr* header = reinterpret_cast<const ElfEhdr*>(load_addr);
587 if (!CheckElfHeader(header)) {
588 return false;
589 }
590
591 if (!ReadProgramHeadersFromMemory(header, load_addr, load_size, &loaded_phdr_,
592 &loaded_phdr_num_)) {
593 return false;
594 }
595
596 ElfAddr min_vaddr;
597 phdr_table_get_load_size(loaded_phdr_, loaded_phdr_num_, &min_vaddr);
598 load_bias_ = load_addr - min_vaddr;
599
600 if (!FindDynamicSegment(header) || !InitializeFields(header)) {
601 return false;
602 }
603
604 if (has_gnu_hash_) {
605 *loaded_elf_file = LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_,
606 loaded_phdr_, loaded_phdr_num_, dynamic_, gnu_nbucket_,
607 gnu_bucket_, gnu_chain_, gnu_maskwords_, gnu_shift2_,
608 gnu_bloom_filter_, symtab_, strtab_, strtab_size_);
609 } else {
610 *loaded_elf_file =
611 LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_, loaded_phdr_,
612 loaded_phdr_num_, dynamic_, sysv_nbucket_, sysv_nchain_, sysv_bucket_,
613 sysv_chain_, symtab_, strtab_, strtab_size_);
614 }
615 return true;
616 }
617
LoadFromFile(int fd,off64_t file_size,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file)618 bool TinyElfLoader::LoadFromFile(int fd, off64_t file_size, size_t align,
619 TinyLoader::mmap64_fn_t mmap64_fn,
620 TinyLoader::munmap_fn_t munmap_fn,
621 LoadedElfFile* loaded_elf_file) {
622 CHECK(!did_load_);
623 void* load_addr = nullptr;
624 size_t load_size = 0;
625 ElfEhdr header;
626 const ElfPhdr* phdr_table = nullptr;
627 size_t phdr_num = 0;
628
629 did_load_ = ReadElfHeader(fd, &header) &&
630 ReadProgramHeadersFromFile(&header, fd, file_size, &phdr_table, &phdr_num) &&
631 LoadSegments(fd, file_size, header.e_type, phdr_table, phdr_num, align, mmap64_fn,
632 munmap_fn, &load_addr, &load_size) &&
633 Parse(load_addr, load_size, loaded_elf_file);
634
635 return did_load_;
636 }
637
LoadFromMemory(void * load_addr,size_t load_size,LoadedElfFile * loaded_elf_file)638 bool TinyElfLoader::LoadFromMemory(void* load_addr, size_t load_size,
639 LoadedElfFile* loaded_elf_file) {
640 CHECK(!did_load_);
641 did_load_ = Parse(load_addr, load_size, loaded_elf_file);
642 return did_load_;
643 }
644
645 } // namespace
646
LoadFromFile(const char * path,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file,std::string * error_msg)647 bool TinyLoader::LoadFromFile(const char* path, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
648 TinyLoader::munmap_fn_t munmap_fn, LoadedElfFile* loaded_elf_file,
649 std::string* error_msg) {
650 int fd = TEMP_FAILURE_RETRY(open(path, O_RDONLY | O_CLOEXEC));
651 if (fd == -1) {
652 set_error_msg(error_msg, "unable to open the file \"%s\": %s", path, strerror(errno));
653 return false;
654 }
655
656 struct stat file_stat;
657 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
658 set_error_msg(error_msg, "unable to stat file for the library \"%s\": %s", path,
659 strerror(errno));
660 close(fd);
661 return false;
662 }
663
664 TinyElfLoader loader(path);
665
666 if (!loader.LoadFromFile(fd, file_stat.st_size, align, mmap64_fn, munmap_fn, loaded_elf_file)) {
667 if (error_msg != nullptr) {
668 *error_msg = loader.error_msg();
669 }
670
671 close(fd);
672 return false;
673 }
674
675 close(fd);
676 return true;
677 }
678
LoadFromMemory(const char * path,void * address,size_t size,LoadedElfFile * loaded_elf_file,std::string * error_msg)679 bool TinyLoader::LoadFromMemory(const char* path, void* address, size_t size,
680 LoadedElfFile* loaded_elf_file, std::string* error_msg) {
681 TinyElfLoader loader(path);
682 if (!loader.LoadFromMemory(address, size, loaded_elf_file)) {
683 if (error_msg != nullptr) {
684 *error_msg = loader.error_msg();
685 }
686
687 return false;
688 }
689
690 return true;
691 }
692