1 // Copyright (c) 2010 Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 // The ExceptionHandler object installs signal handlers for a number of
31 // signals. We rely on the signal handler running on the thread which crashed
32 // in order to identify it. This is true of the synchronous signals (SEGV etc),
33 // but not true of ABRT. Thus, if you send ABRT to yourself in a program which
34 // uses ExceptionHandler, you need to use tgkill to direct it to the current
35 // thread.
36 //
37 // The signal flow looks like this:
38 //
39 // SignalHandler (uses a global stack of ExceptionHandler objects to find
40 // | one to handle the signal. If the first rejects it, try
41 // | the second etc...)
42 // V
43 // HandleSignal ----------------------------| (clones a new process which
44 // | | shares an address space with
45 // (wait for cloned | the crashed process. This
46 // process) | allows us to ptrace the crashed
47 // | | process)
48 // V V
49 // (set signal handler to ThreadEntry (static function to bounce
50 // SIG_DFL and rethrow, | back into the object)
51 // killing the crashed |
52 // process) V
53 // DoDump (writes minidump)
54 // |
55 // V
56 // sys_exit
57 //
58
59 // This code is a little fragmented. Different functions of the ExceptionHandler
60 // class run in a number of different contexts. Some of them run in a normal
61 // context and are easy to code, others run in a compromised context and the
62 // restrictions at the top of minidump_writer.cc apply: no libc and use the
63 // alternative malloc. Each function should have comment above it detailing the
64 // context which it runs in.
65
66 #include "client/linux/handler/exception_handler.h"
67
68 #include <errno.h>
69 #include <fcntl.h>
70 #include <linux/limits.h>
71 #include <pthread.h>
72 #include <sched.h>
73 #include <signal.h>
74 #include <stdio.h>
75 #include <sys/mman.h>
76 #include <sys/prctl.h>
77 #include <sys/syscall.h>
78 #include <sys/wait.h>
79 #include <unistd.h>
80
81 #include <sys/ucontext.h>
82 #include <sys/user.h>
83 #include <ucontext.h>
84
85 #include <algorithm>
86 #include <utility>
87 #include <vector>
88
89 #include "common/basictypes.h"
90 #include "common/linux/breakpad_getcontext.h"
91 #include "common/linux/linux_libc_support.h"
92 #include "common/memory_allocator.h"
93 #include "client/linux/log/log.h"
94 #include "client/linux/microdump_writer/microdump_writer.h"
95 #include "client/linux/minidump_writer/linux_dumper.h"
96 #include "client/linux/minidump_writer/minidump_writer.h"
97 #include "common/linux/eintr_wrapper.h"
98 #include "third_party/lss/linux_syscall_support.h"
99
100 #if defined(__ANDROID__)
101 #include "linux/sched.h"
102 #endif
103
104 #ifndef PR_SET_PTRACER
105 #define PR_SET_PTRACER 0x59616d61
106 #endif
107
108 namespace google_breakpad {
109
110 namespace {
111 // The list of signals which we consider to be crashes. The default action for
112 // all these signals must be Core (see man 7 signal) because we rethrow the
113 // signal after handling it and expect that it'll be fatal.
114 const int kExceptionSignals[] = {
115 SIGSEGV, SIGABRT, SIGFPE, SIGILL, SIGBUS, SIGTRAP
116 };
117 const int kNumHandledSignals =
118 sizeof(kExceptionSignals) / sizeof(kExceptionSignals[0]);
119 struct sigaction old_handlers[kNumHandledSignals];
120 bool handlers_installed = false;
121
122 // InstallAlternateStackLocked will store the newly installed stack in new_stack
123 // and (if it exists) the previously installed stack in old_stack.
124 stack_t old_stack;
125 stack_t new_stack;
126 bool stack_installed = false;
127
128 // Create an alternative stack to run the signal handlers on. This is done since
129 // the signal might have been caused by a stack overflow.
130 // Runs before crashing: normal context.
InstallAlternateStackLocked()131 void InstallAlternateStackLocked() {
132 if (stack_installed)
133 return;
134
135 memset(&old_stack, 0, sizeof(old_stack));
136 memset(&new_stack, 0, sizeof(new_stack));
137
138 // SIGSTKSZ may be too small to prevent the signal handlers from overrunning
139 // the alternative stack. Ensure that the size of the alternative stack is
140 // large enough.
141 static const unsigned kSigStackSize = std::max(16384, SIGSTKSZ);
142
143 // Only set an alternative stack if there isn't already one, or if the current
144 // one is too small.
145 if (sys_sigaltstack(NULL, &old_stack) == -1 || !old_stack.ss_sp ||
146 old_stack.ss_size < kSigStackSize) {
147 new_stack.ss_sp = calloc(1, kSigStackSize);
148 new_stack.ss_size = kSigStackSize;
149
150 if (sys_sigaltstack(&new_stack, NULL) == -1) {
151 free(new_stack.ss_sp);
152 return;
153 }
154 stack_installed = true;
155 }
156 }
157
158 // Runs before crashing: normal context.
RestoreAlternateStackLocked()159 void RestoreAlternateStackLocked() {
160 if (!stack_installed)
161 return;
162
163 stack_t current_stack;
164 if (sys_sigaltstack(NULL, ¤t_stack) == -1)
165 return;
166
167 // Only restore the old_stack if the current alternative stack is the one
168 // installed by the call to InstallAlternateStackLocked.
169 if (current_stack.ss_sp == new_stack.ss_sp) {
170 if (old_stack.ss_sp) {
171 if (sys_sigaltstack(&old_stack, NULL) == -1)
172 return;
173 } else {
174 stack_t disable_stack;
175 disable_stack.ss_flags = SS_DISABLE;
176 if (sys_sigaltstack(&disable_stack, NULL) == -1)
177 return;
178 }
179 }
180
181 free(new_stack.ss_sp);
182 stack_installed = false;
183 }
184
InstallDefaultHandler(int sig)185 void InstallDefaultHandler(int sig) {
186 #if defined(__ANDROID__)
187 // Android L+ expose signal and sigaction symbols that override the system
188 // ones. There is a bug in these functions where a request to set the handler
189 // to SIG_DFL is ignored. In that case, an infinite loop is entered as the
190 // signal is repeatedly sent to breakpad's signal handler.
191 // To work around this, directly call the system's sigaction.
192 struct kernel_sigaction sa;
193 memset(&sa, 0, sizeof(sa));
194 sys_sigemptyset(&sa.sa_mask);
195 sa.sa_handler_ = SIG_DFL;
196 sa.sa_flags = SA_RESTART;
197 sys_rt_sigaction(sig, &sa, NULL, sizeof(kernel_sigset_t));
198 #else
199 signal(sig, SIG_DFL);
200 #endif
201 }
202
203 // The global exception handler stack. This is needed because there may exist
204 // multiple ExceptionHandler instances in a process. Each will have itself
205 // registered in this stack.
206 std::vector<ExceptionHandler*>* g_handler_stack_ = NULL;
207 pthread_mutex_t g_handler_stack_mutex_ = PTHREAD_MUTEX_INITIALIZER;
208
209 // sizeof(CrashContext) can be too big w.r.t the size of alternatate stack
210 // for SignalHandler(). Keep the crash context as a .bss field. Exception
211 // handlers are serialized by the |g_handler_stack_mutex_| and at most one at a
212 // time can use |g_crash_context_|.
213 ExceptionHandler::CrashContext g_crash_context_;
214
215 FirstChanceHandler g_first_chance_handler_ = nullptr;
216 } // namespace
217
218 // Runs before crashing: normal context.
ExceptionHandler(const MinidumpDescriptor & descriptor,FilterCallback filter,MinidumpCallback callback,void * callback_context,bool install_handler,const int server_fd)219 ExceptionHandler::ExceptionHandler(const MinidumpDescriptor& descriptor,
220 FilterCallback filter,
221 MinidumpCallback callback,
222 void* callback_context,
223 bool install_handler,
224 const int server_fd)
225 : filter_(filter),
226 callback_(callback),
227 callback_context_(callback_context),
228 minidump_descriptor_(descriptor),
229 crash_handler_(NULL) {
230 if (server_fd >= 0)
231 crash_generation_client_.reset(CrashGenerationClient::TryCreate(server_fd));
232
233 if (!IsOutOfProcess() && !minidump_descriptor_.IsFD() &&
234 !minidump_descriptor_.IsMicrodumpOnConsole())
235 minidump_descriptor_.UpdatePath();
236
237 #if defined(__ANDROID__)
238 if (minidump_descriptor_.IsMicrodumpOnConsole())
239 logger::initializeCrashLogWriter();
240 #endif
241
242 pthread_mutex_lock(&g_handler_stack_mutex_);
243
244 // Pre-fault the crash context struct. This is to avoid failing due to OOM
245 // if handling an exception when the process ran out of virtual memory.
246 memset(&g_crash_context_, 0, sizeof(g_crash_context_));
247
248 if (!g_handler_stack_)
249 g_handler_stack_ = new std::vector<ExceptionHandler*>;
250 if (install_handler) {
251 InstallAlternateStackLocked();
252 InstallHandlersLocked();
253 }
254 g_handler_stack_->push_back(this);
255 pthread_mutex_unlock(&g_handler_stack_mutex_);
256 }
257
258 // Runs before crashing: normal context.
~ExceptionHandler()259 ExceptionHandler::~ExceptionHandler() {
260 pthread_mutex_lock(&g_handler_stack_mutex_);
261 std::vector<ExceptionHandler*>::iterator handler =
262 std::find(g_handler_stack_->begin(), g_handler_stack_->end(), this);
263 g_handler_stack_->erase(handler);
264 if (g_handler_stack_->empty()) {
265 delete g_handler_stack_;
266 g_handler_stack_ = NULL;
267 RestoreAlternateStackLocked();
268 RestoreHandlersLocked();
269 }
270 pthread_mutex_unlock(&g_handler_stack_mutex_);
271 }
272
273 // Runs before crashing: normal context.
274 // static
InstallHandlersLocked()275 bool ExceptionHandler::InstallHandlersLocked() {
276 if (handlers_installed)
277 return false;
278
279 // Fail if unable to store all the old handlers.
280 for (int i = 0; i < kNumHandledSignals; ++i) {
281 if (sigaction(kExceptionSignals[i], NULL, &old_handlers[i]) == -1)
282 return false;
283 }
284
285 struct sigaction sa;
286 memset(&sa, 0, sizeof(sa));
287 sigemptyset(&sa.sa_mask);
288
289 // Mask all exception signals when we're handling one of them.
290 for (int i = 0; i < kNumHandledSignals; ++i)
291 sigaddset(&sa.sa_mask, kExceptionSignals[i]);
292
293 sa.sa_sigaction = SignalHandler;
294 sa.sa_flags = SA_ONSTACK | SA_SIGINFO;
295
296 for (int i = 0; i < kNumHandledSignals; ++i) {
297 if (sigaction(kExceptionSignals[i], &sa, NULL) == -1) {
298 // At this point it is impractical to back out changes, and so failure to
299 // install a signal is intentionally ignored.
300 }
301 }
302 handlers_installed = true;
303 return true;
304 }
305
306 // This function runs in a compromised context: see the top of the file.
307 // Runs on the crashing thread.
308 // static
RestoreHandlersLocked()309 void ExceptionHandler::RestoreHandlersLocked() {
310 if (!handlers_installed)
311 return;
312
313 for (int i = 0; i < kNumHandledSignals; ++i) {
314 if (sigaction(kExceptionSignals[i], &old_handlers[i], NULL) == -1) {
315 InstallDefaultHandler(kExceptionSignals[i]);
316 }
317 }
318 handlers_installed = false;
319 }
320
321 // void ExceptionHandler::set_crash_handler(HandlerCallback callback) {
322 // crash_handler_ = callback;
323 // }
324
325 // This function runs in a compromised context: see the top of the file.
326 // Runs on the crashing thread.
327 // static
SignalHandler(int sig,siginfo_t * info,void * uc)328 void ExceptionHandler::SignalHandler(int sig, siginfo_t* info, void* uc) {
329
330 // Give the first chance handler a chance to recover from this signal
331 //
332 // This is primarily used by V8. V8 uses guard regions to guarantee memory
333 // safety in WebAssembly. This means some signals might be expected if they
334 // originate from Wasm code while accessing the guard region. We give V8 the
335 // chance to handle and recover from these signals first.
336 if (g_first_chance_handler_ != nullptr &&
337 g_first_chance_handler_(sig, info, uc)) {
338 return;
339 }
340
341 // All the exception signals are blocked at this point.
342 pthread_mutex_lock(&g_handler_stack_mutex_);
343
344 // Sometimes, Breakpad runs inside a process where some other buggy code
345 // saves and restores signal handlers temporarily with 'signal'
346 // instead of 'sigaction'. This loses the SA_SIGINFO flag associated
347 // with this function. As a consequence, the values of 'info' and 'uc'
348 // become totally bogus, generally inducing a crash.
349 //
350 // The following code tries to detect this case. When it does, it
351 // resets the signal handlers with sigaction + SA_SIGINFO and returns.
352 // This forces the signal to be thrown again, but this time the kernel
353 // will call the function with the right arguments.
354 struct sigaction cur_handler;
355 if (sigaction(sig, NULL, &cur_handler) == 0 &&
356 cur_handler.sa_sigaction == SignalHandler &&
357 (cur_handler.sa_flags & SA_SIGINFO) == 0) {
358 // Reset signal handler with the right flags.
359 sigemptyset(&cur_handler.sa_mask);
360 sigaddset(&cur_handler.sa_mask, sig);
361
362 cur_handler.sa_sigaction = SignalHandler;
363 cur_handler.sa_flags = SA_ONSTACK | SA_SIGINFO;
364
365 if (sigaction(sig, &cur_handler, NULL) == -1) {
366 // When resetting the handler fails, try to reset the
367 // default one to avoid an infinite loop here.
368 InstallDefaultHandler(sig);
369 }
370 pthread_mutex_unlock(&g_handler_stack_mutex_);
371 return;
372 }
373
374 bool handled = false;
375 for (int i = g_handler_stack_->size() - 1; !handled && i >= 0; --i) {
376 handled = (*g_handler_stack_)[i]->HandleSignal(sig, info, uc);
377 }
378
379 // Upon returning from this signal handler, sig will become unmasked and then
380 // it will be retriggered. If one of the ExceptionHandlers handled it
381 // successfully, restore the default handler. Otherwise, restore the
382 // previously installed handler. Then, when the signal is retriggered, it will
383 // be delivered to the appropriate handler.
384 if (handled) {
385 InstallDefaultHandler(sig);
386 } else {
387 RestoreHandlersLocked();
388 }
389
390 pthread_mutex_unlock(&g_handler_stack_mutex_);
391
392 // info->si_code <= 0 iff SI_FROMUSER (SI_FROMKERNEL otherwise).
393 if (info->si_code <= 0 || sig == SIGABRT) {
394 // This signal was triggered by somebody sending us the signal with kill().
395 // In order to retrigger it, we have to queue a new signal by calling
396 // kill() ourselves. The special case (si_pid == 0 && sig == SIGABRT) is
397 // due to the kernel sending a SIGABRT from a user request via SysRQ.
398 if (sys_tgkill(getpid(), syscall(__NR_gettid), sig) < 0) {
399 // If we failed to kill ourselves (e.g. because a sandbox disallows us
400 // to do so), we instead resort to terminating our process. This will
401 // result in an incorrect exit code.
402 _exit(1);
403 }
404 } else {
405 // This was a synchronous signal triggered by a hard fault (e.g. SIGSEGV).
406 // No need to reissue the signal. It will automatically trigger again,
407 // when we return from the signal handler.
408 }
409 }
410
411 struct ThreadArgument {
412 pid_t pid; // the crashing process
413 const MinidumpDescriptor* minidump_descriptor;
414 ExceptionHandler* handler;
415 const void* context; // a CrashContext structure
416 size_t context_size;
417 };
418
419 // This is the entry function for the cloned process. We are in a compromised
420 // context here: see the top of the file.
421 // static
ThreadEntry(void * arg)422 int ExceptionHandler::ThreadEntry(void *arg) {
423 const ThreadArgument *thread_arg = reinterpret_cast<ThreadArgument*>(arg);
424
425 // Close the write end of the pipe. This allows us to fail if the parent dies
426 // while waiting for the continue signal.
427 sys_close(thread_arg->handler->fdes[1]);
428
429 // Block here until the crashing process unblocks us when
430 // we're allowed to use ptrace
431 thread_arg->handler->WaitForContinueSignal();
432 sys_close(thread_arg->handler->fdes[0]);
433
434 return thread_arg->handler->DoDump(thread_arg->pid, thread_arg->context,
435 thread_arg->context_size) == false;
436 }
437
438 // This function runs in a compromised context: see the top of the file.
439 // Runs on the crashing thread.
HandleSignal(int,siginfo_t * info,void * uc)440 bool ExceptionHandler::HandleSignal(int /*sig*/, siginfo_t* info, void* uc) {
441 if (filter_ && !filter_(callback_context_))
442 return false;
443
444 // Allow ourselves to be dumped if the signal is trusted.
445 bool signal_trusted = info->si_code > 0;
446 bool signal_pid_trusted = info->si_code == SI_USER ||
447 info->si_code == SI_TKILL;
448 if (signal_trusted || (signal_pid_trusted && info->si_pid == getpid())) {
449 sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
450 }
451
452 // Fill in all the holes in the struct to make Valgrind happy.
453 memset(&g_crash_context_, 0, sizeof(g_crash_context_));
454 memcpy(&g_crash_context_.siginfo, info, sizeof(siginfo_t));
455 memcpy(&g_crash_context_.context, uc, sizeof(ucontext_t));
456 #if defined(__aarch64__)
457 ucontext_t* uc_ptr = (ucontext_t*)uc;
458 struct fpsimd_context* fp_ptr =
459 (struct fpsimd_context*)&uc_ptr->uc_mcontext.__reserved;
460 if (fp_ptr->head.magic == FPSIMD_MAGIC) {
461 memcpy(&g_crash_context_.float_state, fp_ptr,
462 sizeof(g_crash_context_.float_state));
463 }
464 #elif !defined(__ARM_EABI__) && !defined(__mips__)
465 // FP state is not part of user ABI on ARM Linux.
466 // In case of MIPS Linux FP state is already part of ucontext_t
467 // and 'float_state' is not a member of CrashContext.
468 ucontext_t* uc_ptr = (ucontext_t*)uc;
469 if (uc_ptr->uc_mcontext.fpregs) {
470 memcpy(&g_crash_context_.float_state, uc_ptr->uc_mcontext.fpregs,
471 sizeof(g_crash_context_.float_state));
472 }
473 #endif
474 g_crash_context_.tid = syscall(__NR_gettid);
475 if (crash_handler_ != NULL) {
476 if (crash_handler_(&g_crash_context_, sizeof(g_crash_context_),
477 callback_context_)) {
478 return true;
479 }
480 }
481 return GenerateDump(&g_crash_context_);
482 }
483
484 // This is a public interface to HandleSignal that allows the client to
485 // generate a crash dump. This function may run in a compromised context.
SimulateSignalDelivery(int sig)486 bool ExceptionHandler::SimulateSignalDelivery(int sig) {
487 siginfo_t siginfo = {};
488 // Mimic a trusted signal to allow tracing the process (see
489 // ExceptionHandler::HandleSignal().
490 siginfo.si_code = SI_USER;
491 siginfo.si_pid = getpid();
492 ucontext_t context;
493 getcontext(&context);
494 return HandleSignal(sig, &siginfo, &context);
495 }
496
497 // This function may run in a compromised context: see the top of the file.
GenerateDump(CrashContext * context)498 bool ExceptionHandler::GenerateDump(CrashContext *context) {
499 if (IsOutOfProcess())
500 return crash_generation_client_->RequestDump(context, sizeof(*context));
501
502 // Allocating too much stack isn't a problem, and better to err on the side
503 // of caution than smash it into random locations.
504 static const unsigned kChildStackSize = 16000;
505 PageAllocator allocator;
506 uint8_t* stack = reinterpret_cast<uint8_t*>(allocator.Alloc(kChildStackSize));
507 if (!stack)
508 return false;
509 // clone() needs the top-most address. (scrub just to be safe)
510 stack += kChildStackSize;
511 my_memset(stack - 16, 0, 16);
512
513 ThreadArgument thread_arg;
514 thread_arg.handler = this;
515 thread_arg.minidump_descriptor = &minidump_descriptor_;
516 thread_arg.pid = getpid();
517 thread_arg.context = context;
518 thread_arg.context_size = sizeof(*context);
519
520 // We need to explicitly enable ptrace of parent processes on some
521 // kernels, but we need to know the PID of the cloned process before we
522 // can do this. Create a pipe here which we can use to block the
523 // cloned process after creating it, until we have explicitly enabled ptrace
524 if (sys_pipe(fdes) == -1) {
525 // Creating the pipe failed. We'll log an error but carry on anyway,
526 // as we'll probably still get a useful crash report. All that will happen
527 // is the write() and read() calls will fail with EBADF
528 static const char no_pipe_msg[] = "ExceptionHandler::GenerateDump "
529 "sys_pipe failed:";
530 logger::write(no_pipe_msg, sizeof(no_pipe_msg) - 1);
531 logger::write(strerror(errno), strlen(strerror(errno)));
532 logger::write("\n", 1);
533
534 // Ensure fdes[0] and fdes[1] are invalid file descriptors.
535 fdes[0] = fdes[1] = -1;
536 }
537
538 const pid_t child = sys_clone(
539 ThreadEntry, stack, CLONE_FS | CLONE_UNTRACED, &thread_arg, NULL, NULL,
540 NULL);
541 if (child == -1) {
542 sys_close(fdes[0]);
543 sys_close(fdes[1]);
544 return false;
545 }
546
547 // Close the read end of the pipe.
548 sys_close(fdes[0]);
549 // Allow the child to ptrace us
550 sys_prctl(PR_SET_PTRACER, child, 0, 0, 0);
551 SendContinueSignalToChild();
552 int status = 0;
553 const int r = HANDLE_EINTR(sys_waitpid(child, &status, __WALL));
554
555 sys_close(fdes[1]);
556
557 if (r == -1) {
558 static const char msg[] = "ExceptionHandler::GenerateDump waitpid failed:";
559 logger::write(msg, sizeof(msg) - 1);
560 logger::write(strerror(errno), strlen(strerror(errno)));
561 logger::write("\n", 1);
562 }
563
564 bool success = r != -1 && WIFEXITED(status) && WEXITSTATUS(status) == 0;
565 if (callback_)
566 success = callback_(minidump_descriptor_, callback_context_, success);
567 return success;
568 }
569
570 // This function runs in a compromised context: see the top of the file.
SendContinueSignalToChild()571 void ExceptionHandler::SendContinueSignalToChild() {
572 static const char okToContinueMessage = 'a';
573 int r;
574 r = HANDLE_EINTR(sys_write(fdes[1], &okToContinueMessage, sizeof(char)));
575 if (r == -1) {
576 static const char msg[] = "ExceptionHandler::SendContinueSignalToChild "
577 "sys_write failed:";
578 logger::write(msg, sizeof(msg) - 1);
579 logger::write(strerror(errno), strlen(strerror(errno)));
580 logger::write("\n", 1);
581 }
582 }
583
584 // This function runs in a compromised context: see the top of the file.
585 // Runs on the cloned process.
WaitForContinueSignal()586 void ExceptionHandler::WaitForContinueSignal() {
587 int r;
588 char receivedMessage;
589 r = HANDLE_EINTR(sys_read(fdes[0], &receivedMessage, sizeof(char)));
590 if (r == -1) {
591 static const char msg[] = "ExceptionHandler::WaitForContinueSignal "
592 "sys_read failed:";
593 logger::write(msg, sizeof(msg) - 1);
594 logger::write(strerror(errno), strlen(strerror(errno)));
595 logger::write("\n", 1);
596 }
597 }
598
599 // This function runs in a compromised context: see the top of the file.
600 // Runs on the cloned process.
DoDump(pid_t crashing_process,const void * context,size_t context_size)601 bool ExceptionHandler::DoDump(pid_t crashing_process, const void* context,
602 size_t context_size) {
603 const bool may_skip_dump =
604 minidump_descriptor_.skip_dump_if_principal_mapping_not_referenced();
605 const uintptr_t principal_mapping_address =
606 minidump_descriptor_.address_within_principal_mapping();
607 const bool sanitize_stacks = minidump_descriptor_.sanitize_stacks();
608 if (minidump_descriptor_.IsMicrodumpOnConsole()) {
609 return google_breakpad::WriteMicrodump(
610 crashing_process,
611 context,
612 context_size,
613 mapping_list_,
614 may_skip_dump,
615 principal_mapping_address,
616 sanitize_stacks,
617 *minidump_descriptor_.microdump_extra_info());
618 }
619 if (minidump_descriptor_.IsFD()) {
620 return google_breakpad::WriteMinidump(minidump_descriptor_.fd(),
621 minidump_descriptor_.size_limit(),
622 crashing_process,
623 context,
624 context_size,
625 mapping_list_,
626 app_memory_list_,
627 may_skip_dump,
628 principal_mapping_address,
629 sanitize_stacks);
630 }
631 return google_breakpad::WriteMinidump(minidump_descriptor_.path(),
632 minidump_descriptor_.size_limit(),
633 crashing_process,
634 context,
635 context_size,
636 mapping_list_,
637 app_memory_list_,
638 may_skip_dump,
639 principal_mapping_address,
640 sanitize_stacks);
641 }
642
643 // static
WriteMinidump(const string & dump_path,MinidumpCallback callback,void * callback_context)644 bool ExceptionHandler::WriteMinidump(const string& dump_path,
645 MinidumpCallback callback,
646 void* callback_context) {
647 MinidumpDescriptor descriptor(dump_path);
648 ExceptionHandler eh(descriptor, NULL, callback, callback_context, false, -1);
649 return eh.WriteMinidump();
650 }
651
652 // In order to making using EBP to calculate the desired value for ESP
653 // a valid operation, ensure that this function is compiled with a
654 // frame pointer using the following attribute. This attribute
655 // is supported on GCC but not on clang.
656 #if defined(__i386__) && defined(__GNUC__) && !defined(__clang__)
657 __attribute__((optimize("no-omit-frame-pointer")))
658 #endif
WriteMinidump()659 bool ExceptionHandler::WriteMinidump() {
660 if (!IsOutOfProcess() && !minidump_descriptor_.IsFD() &&
661 !minidump_descriptor_.IsMicrodumpOnConsole()) {
662 // Update the path of the minidump so that this can be called multiple times
663 // and new files are created for each minidump. This is done before the
664 // generation happens, as clients may want to access the MinidumpDescriptor
665 // after this call to find the exact path to the minidump file.
666 minidump_descriptor_.UpdatePath();
667 } else if (minidump_descriptor_.IsFD()) {
668 // Reposition the FD to its beginning and resize it to get rid of the
669 // previous minidump info.
670 lseek(minidump_descriptor_.fd(), 0, SEEK_SET);
671 ignore_result(ftruncate(minidump_descriptor_.fd(), 0));
672 }
673
674 // Allow this process to be dumped.
675 sys_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
676
677 CrashContext context;
678 int getcontext_result = getcontext(&context.context);
679 if (getcontext_result)
680 return false;
681
682 #if defined(__i386__)
683 // In CPUFillFromUContext in minidumpwriter.cc the stack pointer is retrieved
684 // from REG_UESP instead of from REG_ESP. REG_UESP is the user stack pointer
685 // and it only makes sense when running in kernel mode with a different stack
686 // pointer. When WriteMiniDump is called during normal processing REG_UESP is
687 // zero which leads to bad minidump files.
688 if (!context.context.uc_mcontext.gregs[REG_UESP]) {
689 // If REG_UESP is set to REG_ESP then that includes the stack space for the
690 // CrashContext object in this function, which is about 128 KB. Since the
691 // Linux dumper only records 32 KB of stack this would mean that nothing
692 // useful would be recorded. A better option is to set REG_UESP to REG_EBP,
693 // perhaps with a small negative offset in case there is any code that
694 // objects to them being equal.
695 context.context.uc_mcontext.gregs[REG_UESP] =
696 context.context.uc_mcontext.gregs[REG_EBP] - 16;
697 // The stack saving is based off of REG_ESP so it must be set to match the
698 // new REG_UESP.
699 context.context.uc_mcontext.gregs[REG_ESP] =
700 context.context.uc_mcontext.gregs[REG_UESP];
701 }
702 #endif
703
704 #if !defined(__ARM_EABI__) && !defined(__aarch64__) && !defined(__mips__)
705 // FPU state is not part of ARM EABI ucontext_t.
706 memcpy(&context.float_state, context.context.uc_mcontext.fpregs,
707 sizeof(context.float_state));
708 #endif
709 context.tid = sys_gettid();
710
711 // Add an exception stream to the minidump for better reporting.
712 memset(&context.siginfo, 0, sizeof(context.siginfo));
713 context.siginfo.si_signo = MD_EXCEPTION_CODE_LIN_DUMP_REQUESTED;
714 #if defined(__i386__)
715 context.siginfo.si_addr =
716 reinterpret_cast<void*>(context.context.uc_mcontext.gregs[REG_EIP]);
717 #elif defined(__x86_64__)
718 context.siginfo.si_addr =
719 reinterpret_cast<void*>(context.context.uc_mcontext.gregs[REG_RIP]);
720 #elif defined(__arm__)
721 context.siginfo.si_addr =
722 reinterpret_cast<void*>(context.context.uc_mcontext.arm_pc);
723 #elif defined(__aarch64__)
724 context.siginfo.si_addr =
725 reinterpret_cast<void*>(context.context.uc_mcontext.pc);
726 #elif defined(__mips__)
727 context.siginfo.si_addr =
728 reinterpret_cast<void*>(context.context.uc_mcontext.pc);
729 #else
730 #error "This code has not been ported to your platform yet."
731 #endif
732
733 return GenerateDump(&context);
734 }
735
AddMappingInfo(const string & name,const uint8_t identifier[sizeof (MDGUID)],uintptr_t start_address,size_t mapping_size,size_t file_offset)736 void ExceptionHandler::AddMappingInfo(const string& name,
737 const uint8_t identifier[sizeof(MDGUID)],
738 uintptr_t start_address,
739 size_t mapping_size,
740 size_t file_offset) {
741 MappingInfo info;
742 info.start_addr = start_address;
743 info.size = mapping_size;
744 info.offset = file_offset;
745 strncpy(info.name, name.c_str(), sizeof(info.name) - 1);
746 info.name[sizeof(info.name) - 1] = '\0';
747
748 MappingEntry mapping;
749 mapping.first = info;
750 memcpy(mapping.second, identifier, sizeof(MDGUID));
751 mapping_list_.push_back(mapping);
752 }
753
RegisterAppMemory(void * ptr,size_t length)754 void ExceptionHandler::RegisterAppMemory(void* ptr, size_t length) {
755 AppMemoryList::iterator iter =
756 std::find(app_memory_list_.begin(), app_memory_list_.end(), ptr);
757 if (iter != app_memory_list_.end()) {
758 // Don't allow registering the same pointer twice.
759 return;
760 }
761
762 AppMemory app_memory;
763 app_memory.ptr = ptr;
764 app_memory.length = length;
765 app_memory_list_.push_back(app_memory);
766 }
767
UnregisterAppMemory(void * ptr)768 void ExceptionHandler::UnregisterAppMemory(void* ptr) {
769 AppMemoryList::iterator iter =
770 std::find(app_memory_list_.begin(), app_memory_list_.end(), ptr);
771 if (iter != app_memory_list_.end()) {
772 app_memory_list_.erase(iter);
773 }
774 }
775
776 // static
WriteMinidumpForChild(pid_t child,pid_t child_blamed_thread,const string & dump_path,MinidumpCallback callback,void * callback_context)777 bool ExceptionHandler::WriteMinidumpForChild(pid_t child,
778 pid_t child_blamed_thread,
779 const string& dump_path,
780 MinidumpCallback callback,
781 void* callback_context) {
782 // This function is not run in a compromised context.
783 MinidumpDescriptor descriptor(dump_path);
784 descriptor.UpdatePath();
785 if (!google_breakpad::WriteMinidump(descriptor.path(),
786 child,
787 child_blamed_thread))
788 return false;
789
790 return callback ? callback(descriptor, callback_context, true) : true;
791 }
792
SetFirstChanceExceptionHandler(FirstChanceHandler callback)793 void SetFirstChanceExceptionHandler(FirstChanceHandler callback) {
794 g_first_chance_handler_ = callback;
795 }
796
797 } // namespace google_breakpad
798