1 // Copyright (c) 2010, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 //     * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 //     * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 //     * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30 // linux_dumper.cc: Implement google_breakpad::LinuxDumper.
31 // See linux_dumper.h for details.
32 
33 // This code deals with the mechanics of getting information about a crashed
34 // process. Since this code may run in a compromised address space, the same
35 // rules apply as detailed at the top of minidump_writer.h: no libc calls and
36 // use the alternative allocator.
37 
38 #include "client/linux/minidump_writer/linux_dumper.h"
39 
40 #include <assert.h>
41 #include <elf.h>
42 #include <fcntl.h>
43 #include <limits.h>
44 #include <stddef.h>
45 #include <string.h>
46 
47 #include "client/linux/minidump_writer/line_reader.h"
48 #include "common/linux/elfutils.h"
49 #include "common/linux/file_id.h"
50 #include "common/linux/linux_libc_support.h"
51 #include "common/linux/memory_mapped_file.h"
52 #include "common/linux/safe_readlink.h"
53 #include "google_breakpad/common/minidump_exception_linux.h"
54 #include "third_party/lss/linux_syscall_support.h"
55 
56 #if defined(__ANDROID__)
57 
58 // Android packed relocations definitions are not yet available from the
59 // NDK header files, so we have to provide them manually here.
60 #ifndef DT_LOOS
61 #define DT_LOOS 0x6000000d
62 #endif
63 #ifndef DT_ANDROID_REL
64 static const int DT_ANDROID_REL = DT_LOOS + 2;
65 #endif
66 #ifndef DT_ANDROID_RELA
67 static const int DT_ANDROID_RELA = DT_LOOS + 4;
68 #endif
69 
70 #endif  // __ANDROID __
71 
72 static const char kMappedFileUnsafePrefix[] = "/dev/";
73 static const char kDeletedSuffix[] = " (deleted)";
74 
IsMappedFileOpenUnsafe(const google_breakpad::MappingInfo & mapping)75 inline static bool IsMappedFileOpenUnsafe(
76     const google_breakpad::MappingInfo& mapping) {
77   // It is unsafe to attempt to open a mapped file that lives under /dev,
78   // because the semantics of the open may be driver-specific so we'd risk
79   // hanging the crash dumper. And a file in /dev/ almost certainly has no
80   // ELF file identifier anyways.
81   return my_strncmp(mapping.name,
82                     kMappedFileUnsafePrefix,
83                     sizeof(kMappedFileUnsafePrefix) - 1) == 0;
84 }
85 
86 namespace google_breakpad {
87 
88 namespace {
89 
MappingContainsAddress(const MappingInfo & mapping,uintptr_t address)90 bool MappingContainsAddress(const MappingInfo& mapping, uintptr_t address) {
91   return mapping.system_mapping_info.start_addr <= address &&
92          address < mapping.system_mapping_info.end_addr;
93 }
94 
95 #if defined(__CHROMEOS__)
96 
97 // Recover memory mappings before writing dump on ChromeOS
98 //
99 // On Linux, breakpad relies on /proc/[pid]/maps to associate symbols from
100 // addresses. ChromeOS' hugepage implementation replaces some segments with
101 // anonymous private pages, which is a restriction of current implementation
102 // in Linux kernel at the time of writing. Thus, breakpad can no longer
103 // symbolize addresses from those text segments replaced with hugepages.
104 //
105 // This postprocess tries to recover the mappings. Because hugepages are always
106 // inserted in between some .text sections, it tries to infer the names and
107 // offsets of the segments, by looking at segments immediately precede and
108 // succeed them.
109 //
110 // For example, a text segment before hugepage optimization
111 //   02001000-03002000 r-xp /opt/google/chrome/chrome
112 //
113 // can be broken into
114 //   02001000-02200000 r-xp /opt/google/chrome/chrome
115 //   02200000-03000000 r-xp
116 //   03000000-03002000 r-xp /opt/google/chrome/chrome
117 //
118 // For more details, see:
119 // crbug.com/628040 ChromeOS' use of hugepages confuses crash symbolization
120 
121 // Copied from CrOS' hugepage implementation, which is unlikely to change.
122 // The hugepage size is 2M.
123 const unsigned int kHpageShift = 21;
124 const size_t kHpageSize = (1 << kHpageShift);
125 const size_t kHpageMask = (~(kHpageSize - 1));
126 
127 // Find and merge anonymous r-xp segments with surrounding named segments.
128 // There are two cases:
129 
130 // Case 1: curr, next
131 //   curr is anonymous
132 //   curr is r-xp
133 //   curr.size >= 2M
134 //   curr.size is a multiple of 2M.
135 //   next is backed by some file.
136 //   curr and next are contiguous.
137 //   offset(next) == sizeof(curr)
TryRecoverMappings(MappingInfo * curr,MappingInfo * next)138 void TryRecoverMappings(MappingInfo *curr, MappingInfo *next) {
139   // Merged segments are marked with size = 0.
140   if (curr->size == 0 || next->size == 0)
141     return;
142 
143   if (curr->size >= kHpageSize &&
144       curr->exec &&
145       (curr->size & kHpageMask) == curr->size &&
146       (curr->start_addr & kHpageMask) == curr->start_addr &&
147       curr->name[0] == '\0' &&
148       next->name[0] != '\0' &&
149       curr->start_addr + curr->size == next->start_addr &&
150       curr->size == next->offset) {
151 
152     // matched
153     my_strlcpy(curr->name, next->name, NAME_MAX);
154     if (next->exec) {
155       // (curr, next)
156       curr->size += next->size;
157       next->size = 0;
158     }
159   }
160 }
161 
162 // Case 2: prev, curr, next
163 //   curr is anonymous
164 //   curr is r-xp
165 //   curr.size >= 2M
166 //   curr.size is a multiple of 2M.
167 //   next and prev are backed by the same file.
168 //   prev, curr and next are contiguous.
169 //   offset(next) == offset(prev) + sizeof(prev) + sizeof(curr)
TryRecoverMappings(MappingInfo * prev,MappingInfo * curr,MappingInfo * next)170 void TryRecoverMappings(MappingInfo *prev, MappingInfo *curr,
171     MappingInfo *next) {
172   // Merged segments are marked with size = 0.
173   if (prev->size == 0 || curr->size == 0 || next->size == 0)
174     return;
175 
176   if (curr->size >= kHpageSize &&
177       curr->exec &&
178       (curr->size & kHpageMask) == curr->size &&
179       (curr->start_addr & kHpageMask) == curr->start_addr &&
180       curr->name[0] == '\0' &&
181       next->name[0] != '\0' &&
182       curr->start_addr + curr->size == next->start_addr &&
183       prev->start_addr + prev->size == curr->start_addr &&
184       my_strncmp(prev->name, next->name, NAME_MAX) == 0 &&
185       next->offset == prev->offset + prev->size + curr->size) {
186 
187     // matched
188     my_strlcpy(curr->name, prev->name, NAME_MAX);
189     if (prev->exec) {
190       curr->offset = prev->offset;
191       curr->start_addr = prev->start_addr;
192       if (next->exec) {
193         // (prev, curr, next)
194         curr->size += prev->size + next->size;
195         prev->size = 0;
196         next->size = 0;
197       } else {
198         // (prev, curr), next
199         curr->size += prev->size;
200         prev->size = 0;
201       }
202     } else {
203       curr->offset = prev->offset + prev->size;
204       if (next->exec) {
205         // prev, (curr, next)
206         curr->size += next->size;
207         next->size = 0;
208       } else {
209         // prev, curr, next
210       }
211     }
212   }
213 }
214 
215 // mappings_ is sorted excepted for the first entry.
216 // This function tries to merge segemnts into the first entry,
217 // then check for other sorted entries.
218 // See LinuxDumper::EnumerateMappings().
CrOSPostProcessMappings(wasteful_vector<MappingInfo * > & mappings)219 void CrOSPostProcessMappings(wasteful_vector<MappingInfo*>& mappings) {
220   // Find the candidate "next" to first segment, which is the only one that
221   // could be out-of-order.
222   size_t l = 1;
223   size_t r = mappings.size();
224   size_t next = mappings.size();
225   while (l < r) {
226     int m = (l + r) / 2;
227     if (mappings[m]->start_addr > mappings[0]->start_addr)
228       r = next = m;
229     else
230       l = m + 1;
231   }
232 
233   // Shows the range that contains the entry point is
234   // [first_start_addr, first_end_addr)
235   size_t first_start_addr = mappings[0]->start_addr;
236   size_t first_end_addr = mappings[0]->start_addr + mappings[0]->size;
237 
238   // Put the out-of-order segment in order.
239   std::rotate(mappings.begin(), mappings.begin() + 1, mappings.begin() + next);
240 
241   // Iterate through normal, sorted cases.
242   // Normal case 1.
243   for (size_t i = 0; i < mappings.size() - 1; i++)
244     TryRecoverMappings(mappings[i], mappings[i + 1]);
245 
246   // Normal case 2.
247   for (size_t i = 0; i < mappings.size() - 2; i++)
248     TryRecoverMappings(mappings[i], mappings[i + 1], mappings[i + 2]);
249 
250   // Collect merged (size == 0) segments.
251   size_t f, e;
252   for (f = e = 0; e < mappings.size(); e++)
253     if (mappings[e]->size > 0)
254       mappings[f++] = mappings[e];
255   mappings.resize(f);
256 
257   // The entry point is in the first mapping. We want to find the location
258   // of the entry point after merging segment. To do this, we want to find
259   // the mapping that covers the first mapping from the original mapping list.
260   // If the mapping is not in the beginning, we move it to the begining via
261   // a right rotate by using reverse iterators.
262   for (l = 0; l < mappings.size(); l++) {
263     if (mappings[l]->start_addr <= first_start_addr
264         && (mappings[l]->start_addr + mappings[l]->size >= first_end_addr))
265       break;
266   }
267   if (l > 0) {
268     r = mappings.size();
269     std::rotate(mappings.rbegin() + r - l - 1, mappings.rbegin() + r - l,
270                 mappings.rend());
271   }
272 }
273 
274 #endif  // __CHROMEOS__
275 
276 }  // namespace
277 
278 // All interesting auvx entry types are below AT_SYSINFO_EHDR
279 #define AT_MAX AT_SYSINFO_EHDR
280 
LinuxDumper(pid_t pid,const char * root_prefix)281 LinuxDumper::LinuxDumper(pid_t pid, const char* root_prefix)
282     : pid_(pid),
283       root_prefix_(root_prefix),
284       crash_address_(0),
285       crash_signal_(0),
286       crash_signal_code_(0),
287       crash_thread_(pid),
288       threads_(&allocator_, 8),
289       mappings_(&allocator_),
290       auxv_(&allocator_, AT_MAX + 1) {
291   assert(root_prefix_ && my_strlen(root_prefix_) < PATH_MAX);
292   // The passed-in size to the constructor (above) is only a hint.
293   // Must call .resize() to do actual initialization of the elements.
294   auxv_.resize(AT_MAX + 1);
295 }
296 
~LinuxDumper()297 LinuxDumper::~LinuxDumper() {
298 }
299 
Init()300 bool LinuxDumper::Init() {
301   return ReadAuxv() && EnumerateThreads() && EnumerateMappings();
302 }
303 
LateInit()304 bool LinuxDumper::LateInit() {
305 #if defined(__ANDROID__)
306   LatePostprocessMappings();
307 #endif
308 
309 #if defined(__CHROMEOS__)
310   CrOSPostProcessMappings(mappings_);
311 #endif
312 
313   return true;
314 }
315 
316 bool
ElfFileIdentifierForMapping(const MappingInfo & mapping,bool member,unsigned int mapping_id,wasteful_vector<uint8_t> & identifier)317 LinuxDumper::ElfFileIdentifierForMapping(const MappingInfo& mapping,
318                                          bool member,
319                                          unsigned int mapping_id,
320                                          wasteful_vector<uint8_t>& identifier) {
321   assert(!member || mapping_id < mappings_.size());
322   if (IsMappedFileOpenUnsafe(mapping))
323     return false;
324 
325   // Special-case linux-gate because it's not a real file.
326   if (my_strcmp(mapping.name, kLinuxGateLibraryName) == 0) {
327     void* linux_gate = NULL;
328     if (pid_ == sys_getpid()) {
329       linux_gate = reinterpret_cast<void*>(mapping.start_addr);
330     } else {
331       linux_gate = allocator_.Alloc(mapping.size);
332       CopyFromProcess(linux_gate, pid_,
333                       reinterpret_cast<const void*>(mapping.start_addr),
334                       mapping.size);
335     }
336     return FileID::ElfFileIdentifierFromMappedFile(linux_gate, identifier);
337   }
338 
339   char filename[PATH_MAX];
340   if (!GetMappingAbsolutePath(mapping, filename))
341     return false;
342   bool filename_modified = HandleDeletedFileInMapping(filename);
343 
344   MemoryMappedFile mapped_file(filename, mapping.offset);
345   if (!mapped_file.data() || mapped_file.size() < SELFMAG)
346     return false;
347 
348   bool success =
349       FileID::ElfFileIdentifierFromMappedFile(mapped_file.data(), identifier);
350   if (success && member && filename_modified) {
351     mappings_[mapping_id]->name[my_strlen(mapping.name) -
352                                 sizeof(kDeletedSuffix) + 1] = '\0';
353   }
354 
355   return success;
356 }
357 
SetCrashInfoFromSigInfo(const siginfo_t & siginfo)358 void LinuxDumper::SetCrashInfoFromSigInfo(const siginfo_t& siginfo) {
359   set_crash_address(reinterpret_cast<uintptr_t>(siginfo.si_addr));
360   set_crash_signal(siginfo.si_signo);
361   set_crash_signal_code(siginfo.si_code);
362 }
363 
GetCrashSignalString() const364 const char* LinuxDumper::GetCrashSignalString() const {
365   switch (static_cast<unsigned int>(crash_signal_)) {
366     case MD_EXCEPTION_CODE_LIN_SIGHUP:
367       return "SIGHUP";
368     case MD_EXCEPTION_CODE_LIN_SIGINT:
369       return "SIGINT";
370     case MD_EXCEPTION_CODE_LIN_SIGQUIT:
371       return "SIGQUIT";
372     case MD_EXCEPTION_CODE_LIN_SIGILL:
373       return "SIGILL";
374     case MD_EXCEPTION_CODE_LIN_SIGTRAP:
375       return "SIGTRAP";
376     case MD_EXCEPTION_CODE_LIN_SIGABRT:
377       return "SIGABRT";
378     case MD_EXCEPTION_CODE_LIN_SIGBUS:
379       return "SIGBUS";
380     case MD_EXCEPTION_CODE_LIN_SIGFPE:
381       return "SIGFPE";
382     case MD_EXCEPTION_CODE_LIN_SIGKILL:
383       return "SIGKILL";
384     case MD_EXCEPTION_CODE_LIN_SIGUSR1:
385       return "SIGUSR1";
386     case MD_EXCEPTION_CODE_LIN_SIGSEGV:
387       return "SIGSEGV";
388     case MD_EXCEPTION_CODE_LIN_SIGUSR2:
389       return "SIGUSR2";
390     case MD_EXCEPTION_CODE_LIN_SIGPIPE:
391       return "SIGPIPE";
392     case MD_EXCEPTION_CODE_LIN_SIGALRM:
393       return "SIGALRM";
394     case MD_EXCEPTION_CODE_LIN_SIGTERM:
395       return "SIGTERM";
396     case MD_EXCEPTION_CODE_LIN_SIGSTKFLT:
397       return "SIGSTKFLT";
398     case MD_EXCEPTION_CODE_LIN_SIGCHLD:
399       return "SIGCHLD";
400     case MD_EXCEPTION_CODE_LIN_SIGCONT:
401       return "SIGCONT";
402     case MD_EXCEPTION_CODE_LIN_SIGSTOP:
403       return "SIGSTOP";
404     case MD_EXCEPTION_CODE_LIN_SIGTSTP:
405       return "SIGTSTP";
406     case MD_EXCEPTION_CODE_LIN_SIGTTIN:
407       return "SIGTTIN";
408     case MD_EXCEPTION_CODE_LIN_SIGTTOU:
409       return "SIGTTOU";
410     case MD_EXCEPTION_CODE_LIN_SIGURG:
411       return "SIGURG";
412     case MD_EXCEPTION_CODE_LIN_SIGXCPU:
413       return "SIGXCPU";
414     case MD_EXCEPTION_CODE_LIN_SIGXFSZ:
415       return "SIGXFSZ";
416     case MD_EXCEPTION_CODE_LIN_SIGVTALRM:
417       return "SIGVTALRM";
418     case MD_EXCEPTION_CODE_LIN_SIGPROF:
419       return "SIGPROF";
420     case MD_EXCEPTION_CODE_LIN_SIGWINCH:
421       return "SIGWINCH";
422     case MD_EXCEPTION_CODE_LIN_SIGIO:
423       return "SIGIO";
424     case MD_EXCEPTION_CODE_LIN_SIGPWR:
425       return "SIGPWR";
426     case MD_EXCEPTION_CODE_LIN_SIGSYS:
427       return "SIGSYS";
428     case MD_EXCEPTION_CODE_LIN_DUMP_REQUESTED:
429       return "DUMP_REQUESTED";
430     default:
431       return "UNKNOWN";
432   }
433 }
434 
GetMappingAbsolutePath(const MappingInfo & mapping,char path[PATH_MAX]) const435 bool LinuxDumper::GetMappingAbsolutePath(const MappingInfo& mapping,
436                                          char path[PATH_MAX]) const {
437   return my_strlcpy(path, root_prefix_, PATH_MAX) < PATH_MAX &&
438          my_strlcat(path, mapping.name, PATH_MAX) < PATH_MAX;
439 }
440 
441 namespace {
442 // Find the shared object name (SONAME) by examining the ELF information
443 // for |mapping|. If the SONAME is found copy it into the passed buffer
444 // |soname| and return true. The size of the buffer is |soname_size|.
445 // The SONAME will be truncated if it is too long to fit in the buffer.
ElfFileSoName(const LinuxDumper & dumper,const MappingInfo & mapping,char * soname,size_t soname_size)446 bool ElfFileSoName(const LinuxDumper& dumper,
447     const MappingInfo& mapping, char* soname, size_t soname_size) {
448   if (IsMappedFileOpenUnsafe(mapping)) {
449     // Not safe
450     return false;
451   }
452 
453   char filename[PATH_MAX];
454   if (!dumper.GetMappingAbsolutePath(mapping, filename))
455     return false;
456 
457   MemoryMappedFile mapped_file(filename, mapping.offset);
458   if (!mapped_file.data() || mapped_file.size() < SELFMAG) {
459     // mmap failed
460     return false;
461   }
462 
463   return ElfFileSoNameFromMappedFile(mapped_file.data(), soname, soname_size);
464 }
465 
466 }  // namespace
467 
468 
GetMappingEffectiveNameAndPath(const MappingInfo & mapping,char * file_path,size_t file_path_size,char * file_name,size_t file_name_size)469 void LinuxDumper::GetMappingEffectiveNameAndPath(const MappingInfo& mapping,
470                                                  char* file_path,
471                                                  size_t file_path_size,
472                                                  char* file_name,
473                                                  size_t file_name_size) {
474   my_strlcpy(file_path, mapping.name, file_path_size);
475 
476   // Tools such as minidump_stackwalk use the name of the module to look up
477   // symbols produced by dump_syms. dump_syms will prefer to use a module's
478   // DT_SONAME as the module name, if one exists, and will fall back to the
479   // filesystem name of the module.
480 
481   // Just use the filesystem name if no SONAME is present.
482   if (!ElfFileSoName(*this, mapping, file_name, file_name_size)) {
483     //   file_path := /path/to/libname.so
484     //   file_name := libname.so
485     const char* basename = my_strrchr(file_path, '/');
486     basename = basename == NULL ? file_path : (basename + 1);
487     my_strlcpy(file_name, basename, file_name_size);
488     return;
489   }
490 
491   if (mapping.exec && mapping.offset != 0) {
492     // If an executable is mapped from a non-zero offset, this is likely because
493     // the executable was loaded directly from inside an archive file (e.g., an
494     // apk on Android).
495     // In this case, we append the file_name to the mapped archive path:
496     //   file_name := libname.so
497     //   file_path := /path/to/ARCHIVE.APK/libname.so
498     if (my_strlen(file_path) + 1 + my_strlen(file_name) < file_path_size) {
499       my_strlcat(file_path, "/", file_path_size);
500       my_strlcat(file_path, file_name, file_path_size);
501     }
502   } else {
503     // Otherwise, replace the basename with the SONAME.
504     char* basename = const_cast<char*>(my_strrchr(file_path, '/'));
505     if (basename) {
506       my_strlcpy(basename + 1, file_name,
507                  file_path_size - my_strlen(file_path) +
508                      my_strlen(basename + 1));
509     } else {
510       my_strlcpy(file_path, file_name, file_path_size);
511     }
512   }
513 }
514 
ReadAuxv()515 bool LinuxDumper::ReadAuxv() {
516   char auxv_path[NAME_MAX];
517   if (!BuildProcPath(auxv_path, pid_, "auxv")) {
518     return false;
519   }
520 
521   int fd = sys_open(auxv_path, O_RDONLY, 0);
522   if (fd < 0) {
523     return false;
524   }
525 
526   elf_aux_entry one_aux_entry;
527   bool res = false;
528   while (sys_read(fd,
529                   &one_aux_entry,
530                   sizeof(elf_aux_entry)) == sizeof(elf_aux_entry) &&
531          one_aux_entry.a_type != AT_NULL) {
532     if (one_aux_entry.a_type <= AT_MAX) {
533       auxv_[one_aux_entry.a_type] = one_aux_entry.a_un.a_val;
534       res = true;
535     }
536   }
537   sys_close(fd);
538   return res;
539 }
540 
EnumerateMappings()541 bool LinuxDumper::EnumerateMappings() {
542   char maps_path[NAME_MAX];
543   if (!BuildProcPath(maps_path, pid_, "maps"))
544     return false;
545 
546   // linux_gate_loc is the beginning of the kernel's mapping of
547   // linux-gate.so in the process.  It doesn't actually show up in the
548   // maps list as a filename, but it can be found using the AT_SYSINFO_EHDR
549   // aux vector entry, which gives the information necessary to special
550   // case its entry when creating the list of mappings.
551   // See http://www.trilithium.com/johan/2005/08/linux-gate/ for more
552   // information.
553   const void* linux_gate_loc =
554       reinterpret_cast<void *>(auxv_[AT_SYSINFO_EHDR]);
555   // Although the initial executable is usually the first mapping, it's not
556   // guaranteed (see http://crosbug.com/25355); therefore, try to use the
557   // actual entry point to find the mapping.
558   const void* entry_point_loc = reinterpret_cast<void *>(auxv_[AT_ENTRY]);
559 
560   const int fd = sys_open(maps_path, O_RDONLY, 0);
561   if (fd < 0)
562     return false;
563   LineReader* const line_reader = new(allocator_) LineReader(fd);
564 
565   const char* line;
566   unsigned line_len;
567   while (line_reader->GetNextLine(&line, &line_len)) {
568     uintptr_t start_addr, end_addr, offset;
569 
570     const char* i1 = my_read_hex_ptr(&start_addr, line);
571     if (*i1 == '-') {
572       const char* i2 = my_read_hex_ptr(&end_addr, i1 + 1);
573       if (*i2 == ' ') {
574         bool exec = (*(i2 + 3) == 'x');
575         const char* i3 = my_read_hex_ptr(&offset, i2 + 6 /* skip ' rwxp ' */);
576         if (*i3 == ' ') {
577           const char* name = NULL;
578           // Only copy name if the name is a valid path name, or if
579           // it's the VDSO image.
580           if (((name = my_strchr(line, '/')) == NULL) &&
581               linux_gate_loc &&
582               reinterpret_cast<void*>(start_addr) == linux_gate_loc) {
583             name = kLinuxGateLibraryName;
584             offset = 0;
585           }
586           // Merge adjacent mappings into one module, assuming they're a single
587           // library mapped by the dynamic linker. Do this only if their name
588           // matches and either they have the same +x protection flag, or if the
589           // previous mapping is not executable and the new one is, to handle
590           // lld's output (see crbug.com/716484).
591           if (name && !mappings_.empty()) {
592             MappingInfo* module = mappings_.back();
593             if ((start_addr == module->start_addr + module->size) &&
594                 (my_strlen(name) == my_strlen(module->name)) &&
595                 (my_strncmp(name, module->name, my_strlen(name)) == 0) &&
596                 ((exec == module->exec) || (!module->exec && exec))) {
597               module->system_mapping_info.end_addr = end_addr;
598               module->size = end_addr - module->start_addr;
599               module->exec |= exec;
600               line_reader->PopLine(line_len);
601               continue;
602             }
603           }
604           MappingInfo* const module = new(allocator_) MappingInfo;
605           mappings_.push_back(module);
606           my_memset(module, 0, sizeof(MappingInfo));
607           module->system_mapping_info.start_addr = start_addr;
608           module->system_mapping_info.end_addr = end_addr;
609           module->start_addr = start_addr;
610           module->size = end_addr - start_addr;
611           module->offset = offset;
612           module->exec = exec;
613           if (name != NULL) {
614             const unsigned l = my_strlen(name);
615             if (l < sizeof(module->name))
616               my_memcpy(module->name, name, l);
617           }
618         }
619       }
620     }
621     line_reader->PopLine(line_len);
622   }
623 
624   if (entry_point_loc) {
625     for (size_t i = 0; i < mappings_.size(); ++i) {
626       MappingInfo* module = mappings_[i];
627 
628       // If this module contains the entry-point, and it's not already the first
629       // one, then we need to make it be first.  This is because the minidump
630       // format assumes the first module is the one that corresponds to the main
631       // executable (as codified in
632       // processor/minidump.cc:MinidumpModuleList::GetMainModule()).
633       if ((entry_point_loc >= reinterpret_cast<void*>(module->start_addr)) &&
634           (entry_point_loc <
635            reinterpret_cast<void*>(module->start_addr + module->size))) {
636         for (size_t j = i; j > 0; j--)
637           mappings_[j] = mappings_[j - 1];
638         mappings_[0] = module;
639         break;
640       }
641     }
642   }
643 
644   sys_close(fd);
645 
646   return !mappings_.empty();
647 }
648 
649 #if defined(__ANDROID__)
650 
GetLoadedElfHeader(uintptr_t start_addr,ElfW (Ehdr)* ehdr)651 bool LinuxDumper::GetLoadedElfHeader(uintptr_t start_addr, ElfW(Ehdr)* ehdr) {
652   CopyFromProcess(ehdr, pid_,
653                   reinterpret_cast<const void*>(start_addr),
654                   sizeof(*ehdr));
655   return my_memcmp(&ehdr->e_ident, ELFMAG, SELFMAG) == 0;
656 }
657 
ParseLoadedElfProgramHeaders(ElfW (Ehdr)* ehdr,uintptr_t start_addr,uintptr_t * min_vaddr_ptr,uintptr_t * dyn_vaddr_ptr,size_t * dyn_count_ptr)658 void LinuxDumper::ParseLoadedElfProgramHeaders(ElfW(Ehdr)* ehdr,
659                                                uintptr_t start_addr,
660                                                uintptr_t* min_vaddr_ptr,
661                                                uintptr_t* dyn_vaddr_ptr,
662                                                size_t* dyn_count_ptr) {
663   uintptr_t phdr_addr = start_addr + ehdr->e_phoff;
664 
665   const uintptr_t max_addr = UINTPTR_MAX;
666   uintptr_t min_vaddr = max_addr;
667   uintptr_t dyn_vaddr = 0;
668   size_t dyn_count = 0;
669 
670   for (size_t i = 0; i < ehdr->e_phnum; ++i) {
671     ElfW(Phdr) phdr;
672     CopyFromProcess(&phdr, pid_,
673                     reinterpret_cast<const void*>(phdr_addr),
674                     sizeof(phdr));
675     if (phdr.p_type == PT_LOAD && phdr.p_vaddr < min_vaddr) {
676       min_vaddr = phdr.p_vaddr;
677     }
678     if (phdr.p_type == PT_DYNAMIC) {
679       dyn_vaddr = phdr.p_vaddr;
680       dyn_count = phdr.p_memsz / sizeof(ElfW(Dyn));
681     }
682     phdr_addr += sizeof(phdr);
683   }
684 
685   *min_vaddr_ptr = min_vaddr;
686   *dyn_vaddr_ptr = dyn_vaddr;
687   *dyn_count_ptr = dyn_count;
688 }
689 
HasAndroidPackedRelocations(uintptr_t load_bias,uintptr_t dyn_vaddr,size_t dyn_count)690 bool LinuxDumper::HasAndroidPackedRelocations(uintptr_t load_bias,
691                                               uintptr_t dyn_vaddr,
692                                               size_t dyn_count) {
693   uintptr_t dyn_addr = load_bias + dyn_vaddr;
694   for (size_t i = 0; i < dyn_count; ++i) {
695     ElfW(Dyn) dyn;
696     CopyFromProcess(&dyn, pid_,
697                     reinterpret_cast<const void*>(dyn_addr),
698                     sizeof(dyn));
699     if (dyn.d_tag == DT_ANDROID_REL || dyn.d_tag == DT_ANDROID_RELA) {
700       return true;
701     }
702     dyn_addr += sizeof(dyn);
703   }
704   return false;
705 }
706 
GetEffectiveLoadBias(ElfW (Ehdr)* ehdr,uintptr_t start_addr)707 uintptr_t LinuxDumper::GetEffectiveLoadBias(ElfW(Ehdr)* ehdr,
708                                             uintptr_t start_addr) {
709   uintptr_t min_vaddr = 0;
710   uintptr_t dyn_vaddr = 0;
711   size_t dyn_count = 0;
712   ParseLoadedElfProgramHeaders(ehdr, start_addr,
713                                &min_vaddr, &dyn_vaddr, &dyn_count);
714   // If |min_vaddr| is non-zero and we find Android packed relocation tags,
715   // return the effective load bias.
716   if (min_vaddr != 0) {
717     const uintptr_t load_bias = start_addr - min_vaddr;
718     if (HasAndroidPackedRelocations(load_bias, dyn_vaddr, dyn_count)) {
719       return load_bias;
720     }
721   }
722   // Either |min_vaddr| is zero, or it is non-zero but we did not find the
723   // expected Android packed relocations tags.
724   return start_addr;
725 }
726 
LatePostprocessMappings()727 void LinuxDumper::LatePostprocessMappings() {
728   for (size_t i = 0; i < mappings_.size(); ++i) {
729     // Only consider exec mappings that indicate a file path was mapped, and
730     // where the ELF header indicates a mapped shared library.
731     MappingInfo* mapping = mappings_[i];
732     if (!(mapping->exec && mapping->name[0] == '/')) {
733       continue;
734     }
735     ElfW(Ehdr) ehdr;
736     if (!GetLoadedElfHeader(mapping->start_addr, &ehdr)) {
737       continue;
738     }
739     if (ehdr.e_type == ET_DYN) {
740       // Compute the effective load bias for this mapped library, and update
741       // the mapping to hold that rather than |start_addr|, at the same time
742       // adjusting |size| to account for the change in |start_addr|. Where
743       // the library does not contain Android packed relocations,
744       // GetEffectiveLoadBias() returns |start_addr| and the mapping entry
745       // is not changed.
746       const uintptr_t load_bias = GetEffectiveLoadBias(&ehdr,
747                                                        mapping->start_addr);
748       mapping->size += mapping->start_addr - load_bias;
749       mapping->start_addr = load_bias;
750     }
751   }
752 }
753 
754 #endif  // __ANDROID__
755 
756 // Get information about the stack, given the stack pointer. We don't try to
757 // walk the stack since we might not have all the information needed to do
758 // unwind. So we just grab, up to, 32k of stack.
GetStackInfo(const void ** stack,size_t * stack_len,uintptr_t int_stack_pointer)759 bool LinuxDumper::GetStackInfo(const void** stack, size_t* stack_len,
760                                uintptr_t int_stack_pointer) {
761   // Move the stack pointer to the bottom of the page that it's in.
762   const uintptr_t page_size = getpagesize();
763 
764   uint8_t* const stack_pointer =
765       reinterpret_cast<uint8_t*>(int_stack_pointer & ~(page_size - 1));
766 
767   // The number of bytes of stack which we try to capture.
768   static const ptrdiff_t kStackToCapture = 32 * 1024;
769 
770   const MappingInfo* mapping = FindMapping(stack_pointer);
771   if (!mapping)
772     return false;
773   const ptrdiff_t offset = stack_pointer -
774       reinterpret_cast<uint8_t*>(mapping->start_addr);
775   const ptrdiff_t distance_to_end =
776       static_cast<ptrdiff_t>(mapping->size) - offset;
777   *stack_len = distance_to_end > kStackToCapture ?
778       kStackToCapture : distance_to_end;
779   *stack = stack_pointer;
780   return true;
781 }
782 
SanitizeStackCopy(uint8_t * stack_copy,size_t stack_len,uintptr_t stack_pointer,uintptr_t sp_offset)783 void LinuxDumper::SanitizeStackCopy(uint8_t* stack_copy, size_t stack_len,
784                                     uintptr_t stack_pointer,
785                                     uintptr_t sp_offset) {
786   // We optimize the search for containing mappings in three ways:
787   // 1) We expect that pointers into the stack mapping will be common, so
788   //    we cache that address range.
789   // 2) The last referenced mapping is a reasonable predictor for the next
790   //    referenced mapping, so we test that first.
791   // 3) We precompute a bitfield based upon bits 32:32-n of the start and
792   //    stop addresses, and use that to short circuit any values that can
793   //    not be pointers. (n=11)
794   const uintptr_t defaced =
795 #if defined(__LP64__)
796       0x0defaced0defaced;
797 #else
798       0x0defaced;
799 #endif
800   // the bitfield length is 2^test_bits long.
801   const unsigned int test_bits = 11;
802   // byte length of the corresponding array.
803   const unsigned int array_size = 1 << (test_bits - 3);
804   const unsigned int array_mask = array_size - 1;
805   // The amount to right shift pointers by. This captures the top bits
806   // on 32 bit architectures. On 64 bit architectures this would be
807   // uninformative so we take the same range of bits.
808   const unsigned int shift = 32 - 11;
809   const MappingInfo* last_hit_mapping = nullptr;
810   const MappingInfo* hit_mapping = nullptr;
811   const MappingInfo* stack_mapping = FindMappingNoBias(stack_pointer);
812   // The magnitude below which integers are considered to be to be
813   // 'small', and not constitute a PII risk. These are included to
814   // avoid eliding useful register values.
815   const ssize_t small_int_magnitude = 4096;
816 
817   char could_hit_mapping[array_size];
818   my_memset(could_hit_mapping, 0, array_size);
819 
820   // Initialize the bitfield such that if the (pointer >> shift)'th
821   // bit, modulo the bitfield size, is not set then there does not
822   // exist a mapping in mappings_ that would contain that pointer.
823   for (size_t i = 0; i < mappings_.size(); ++i) {
824     if (!mappings_[i]->exec) continue;
825     // For each mapping, work out the (unmodulo'ed) range of bits to
826     // set.
827     uintptr_t start = mappings_[i]->start_addr;
828     uintptr_t end = start + mappings_[i]->size;
829     start >>= shift;
830     end >>= shift;
831     for (size_t bit = start; bit <= end; ++bit) {
832       // Set each bit in the range, applying the modulus.
833       could_hit_mapping[(bit >> 3) & array_mask] |= 1 << (bit & 7);
834     }
835   }
836 
837   // Zero memory that is below the current stack pointer.
838   const uintptr_t offset =
839       (sp_offset + sizeof(uintptr_t) - 1) & ~(sizeof(uintptr_t) - 1);
840   if (offset) {
841     my_memset(stack_copy, 0, offset);
842   }
843 
844   // Apply sanitization to each complete pointer-aligned word in the
845   // stack.
846   uint8_t* sp;
847   for (sp = stack_copy + offset;
848        sp <= stack_copy + stack_len - sizeof(uintptr_t);
849        sp += sizeof(uintptr_t)) {
850     uintptr_t addr;
851     my_memcpy(&addr, sp, sizeof(uintptr_t));
852     if (static_cast<intptr_t>(addr) <= small_int_magnitude &&
853         static_cast<intptr_t>(addr) >= -small_int_magnitude) {
854       continue;
855     }
856     if (stack_mapping && MappingContainsAddress(*stack_mapping, addr)) {
857       continue;
858     }
859     if (last_hit_mapping && MappingContainsAddress(*last_hit_mapping, addr)) {
860       continue;
861     }
862     uintptr_t test = addr >> shift;
863     if (could_hit_mapping[(test >> 3) & array_mask] & (1 << (test & 7)) &&
864         (hit_mapping = FindMappingNoBias(addr)) != nullptr &&
865         hit_mapping->exec) {
866       last_hit_mapping = hit_mapping;
867       continue;
868     }
869     my_memcpy(sp, &defaced, sizeof(uintptr_t));
870   }
871   // Zero any partial word at the top of the stack, if alignment is
872   // such that that is required.
873   if (sp < stack_copy + stack_len) {
874     my_memset(sp, 0, stack_copy + stack_len - sp);
875   }
876 }
877 
StackHasPointerToMapping(const uint8_t * stack_copy,size_t stack_len,uintptr_t sp_offset,const MappingInfo & mapping)878 bool LinuxDumper::StackHasPointerToMapping(const uint8_t* stack_copy,
879                                            size_t stack_len,
880                                            uintptr_t sp_offset,
881                                            const MappingInfo& mapping) {
882   // Loop over all stack words that would have been on the stack in
883   // the target process (i.e. are word aligned, and at addresses >=
884   // the stack pointer).  Regardless of the alignment of |stack_copy|,
885   // the memory starting at |stack_copy| + |offset| represents an
886   // aligned word in the target process.
887   const uintptr_t low_addr = mapping.system_mapping_info.start_addr;
888   const uintptr_t high_addr = mapping.system_mapping_info.end_addr;
889   const uintptr_t offset =
890       (sp_offset + sizeof(uintptr_t) - 1) & ~(sizeof(uintptr_t) - 1);
891 
892   for (const uint8_t* sp = stack_copy + offset;
893        sp <= stack_copy + stack_len - sizeof(uintptr_t);
894        sp += sizeof(uintptr_t)) {
895     uintptr_t addr;
896     my_memcpy(&addr, sp, sizeof(uintptr_t));
897     if (low_addr <= addr && addr <= high_addr)
898       return true;
899   }
900   return false;
901 }
902 
903 // Find the mapping which the given memory address falls in.
FindMapping(const void * address) const904 const MappingInfo* LinuxDumper::FindMapping(const void* address) const {
905   const uintptr_t addr = (uintptr_t) address;
906 
907   for (size_t i = 0; i < mappings_.size(); ++i) {
908     const uintptr_t start = static_cast<uintptr_t>(mappings_[i]->start_addr);
909     if (addr >= start && addr - start < mappings_[i]->size)
910       return mappings_[i];
911   }
912 
913   return NULL;
914 }
915 
916 // Find the mapping which the given memory address falls in. Uses the
917 // unadjusted mapping address range from the kernel, rather than the
918 // biased range.
FindMappingNoBias(uintptr_t address) const919 const MappingInfo* LinuxDumper::FindMappingNoBias(uintptr_t address) const {
920   for (size_t i = 0; i < mappings_.size(); ++i) {
921     if (address >= mappings_[i]->system_mapping_info.start_addr &&
922         address < mappings_[i]->system_mapping_info.end_addr) {
923       return mappings_[i];
924     }
925   }
926   return NULL;
927 }
928 
HandleDeletedFileInMapping(char * path) const929 bool LinuxDumper::HandleDeletedFileInMapping(char* path) const {
930   static const size_t kDeletedSuffixLen = sizeof(kDeletedSuffix) - 1;
931 
932   // Check for ' (deleted)' in |path|.
933   // |path| has to be at least as long as "/x (deleted)".
934   const size_t path_len = my_strlen(path);
935   if (path_len < kDeletedSuffixLen + 2)
936     return false;
937   if (my_strncmp(path + path_len - kDeletedSuffixLen, kDeletedSuffix,
938                  kDeletedSuffixLen) != 0) {
939     return false;
940   }
941 
942   // Check |path| against the /proc/pid/exe 'symlink'.
943   char exe_link[NAME_MAX];
944   if (!BuildProcPath(exe_link, pid_, "exe"))
945     return false;
946   MappingInfo new_mapping = {};
947   if (!SafeReadLink(exe_link, new_mapping.name))
948     return false;
949   char new_path[PATH_MAX];
950   if (!GetMappingAbsolutePath(new_mapping, new_path))
951     return false;
952   if (my_strcmp(path, new_path) != 0)
953     return false;
954 
955   // Check to see if someone actually named their executable 'foo (deleted)'.
956   struct kernel_stat exe_stat;
957   struct kernel_stat new_path_stat;
958   if (sys_stat(exe_link, &exe_stat) == 0 &&
959       sys_stat(new_path, &new_path_stat) == 0 &&
960       exe_stat.st_dev == new_path_stat.st_dev &&
961       exe_stat.st_ino == new_path_stat.st_ino) {
962     return false;
963   }
964 
965   my_memcpy(path, exe_link, NAME_MAX);
966   return true;
967 }
968 
969 }  // namespace google_breakpad
970