1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 
3 /*--------------------------------------------------------------------*/
4 /*--- Implementation of POSIX signals.                 m_signals.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of Valgrind, a dynamic binary instrumentation
9    framework.
10 
11    Copyright (C) 2000-2015 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 /*
33    Signal handling.
34 
35    There are 4 distinct classes of signal:
36 
37    1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
38    TRAP): these are signals as a result of an instruction fault.  If
39    we get one while running client code, then we just do the
40    appropriate thing.  If it happens while running Valgrind code, then
41    it indicates a Valgrind bug.  Note that we "manually" implement
42    automatic stack growth, such that if a fault happens near the
43    client process stack, it is extended in the same way the kernel
44    would, and the fault is never reported to the client program.
45 
46    2. Asynchronous variants of the above signals: If the kernel tries
47    to deliver a sync signal while it is blocked, it just kills the
48    process.  Therefore, we can't block those signals if we want to be
49    able to report on bugs in Valgrind.  This means that we're also
50    open to receiving those signals from other processes, sent with
51    kill.  We could get away with just dropping them, since they aren't
52    really signals that processes send to each other.
53 
54    3. Synchronous, general signals.  If a thread/process sends itself
55    a signal with kill, its expected to be synchronous: ie, the signal
56    will have been delivered by the time the syscall finishes.
57 
58    4. Asynchronous, general signals.  All other signals, sent by
59    another process with kill.  These are generally blocked, except for
60    two special cases: we poll for them each time we're about to run a
61    thread for a time quanta, and while running blocking syscalls.
62 
63 
64    In addition, we reserve one signal for internal use: SIGVGKILL.
65    SIGVGKILL is used to terminate threads.  When one thread wants
66    another to exit, it will set its exitreason and send it SIGVGKILL
67    if it appears to be blocked in a syscall.
68 
69 
70    We use a kernel thread for each application thread.  When the
71    thread allows itself to be open to signals, it sets the thread
72    signal mask to what the client application set it to.  This means
73    that we get the kernel to do all signal routing: under Valgrind,
74    signals get delivered in the same way as in the non-Valgrind case
75    (the exception being for the sync signal set, since they're almost
76    always unblocked).
77  */
78 
79 /*
80    Some more details...
81 
82    First off, we take note of the client's requests (via sys_sigaction
83    and sys_sigprocmask) to set the signal state (handlers for each
84    signal, which are process-wide, + a mask for each signal, which is
85    per-thread).  This info is duly recorded in the SCSS (static Client
86    signal state) in m_signals.c, and if the client later queries what
87    the state is, we merely fish the relevant info out of SCSS and give
88    it back.
89 
90    However, we set the real signal state in the kernel to something
91    entirely different.  This is recorded in SKSS, the static Kernel
92    signal state.  What's nice (to the extent that anything is nice w.r.t
93    signals) is that there's a pure function to calculate SKSS from SCSS,
94    calculate_SKSS_from_SCSS.  So when the client changes SCSS then we
95    recompute the associated SKSS and apply any changes from the previous
96    SKSS through to the kernel.
97 
98    Now, that said, the general scheme we have now is, that regardless of
99    what the client puts into the SCSS (viz, asks for), what we would
100    like to do is as follows:
101 
102    (1) run code on the virtual CPU with all signals blocked
103 
104    (2) at convenient moments for us (that is, when the VCPU stops, and
105       control is back with the scheduler), ask the kernel "do you have
106       any signals for me?"  and if it does, collect up the info, and
107       deliver them to the client (by building sigframes).
108 
109    And that's almost what we do.  The signal polling is done by
110    VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
111    do the dirty work.  (of which more later).
112 
113    By polling signals, rather than catching them, we get to deal with
114    them only at convenient moments, rather than having to recover from
115    taking a signal while generated code is running.
116 
117    Now unfortunately .. the above scheme only works for so-called async
118    signals.  An async signal is one which isn't associated with any
119    particular instruction, eg Control-C (SIGINT).  For those, it doesn't
120    matter if we don't deliver the signal to the client immediately; it
121    only matters that we deliver it eventually.  Hence polling is OK.
122 
123    But the other group -- sync signals -- are all related by the fact
124    that they are various ways for the host CPU to fail to execute an
125    instruction: SIGILL, SIGSEGV, SIGFPU.  And they can't be deferred,
126    because obviously if a host instruction can't execute, well then we
127    have to immediately do Plan B, whatever that is.
128 
129    So the next approximation of what happens is:
130 
131    (1) run code on vcpu with all async signals blocked
132 
133    (2) at convenient moments (when NOT running the vcpu), poll for async
134       signals.
135 
136    (1) and (2) together imply that if the host does deliver a signal to
137       async_signalhandler while the VCPU is running, something's
138       seriously wrong.
139 
140    (3) when running code on vcpu, don't block sync signals.  Instead
141       register sync_signalhandler and catch any such via that.  Of
142       course, that means an ugly recovery path if we do -- the
143       sync_signalhandler has to longjump, exiting out of the generated
144       code, and the assembly-dispatcher thingy that runs it, and gets
145       caught in m_scheduler, which then tells m_signals to deliver the
146       signal.
147 
148    Now naturally (ha ha) even that might be tolerable, but there's
149    something worse: dealing with signals delivered to threads in
150    syscalls.
151 
152    Obviously from the above, SKSS's signal mask (viz, what we really run
153    with) is way different from SCSS's signal mask (viz, what the client
154    thread thought it asked for).  (eg) It may well be that the client
155    did not block control-C, so that it just expects to drop dead if it
156    receives ^C whilst blocked in a syscall, but by default we are
157    running with all async signals blocked, and so that signal could be
158    arbitrarily delayed, or perhaps even lost (not sure).
159 
160    So what we have to do, when doing any syscall which SfMayBlock, is to
161    quickly switch in the SCSS-specified signal mask just before the
162    syscall, and switch it back just afterwards, and hope that we don't
163    get caught up in some weird race condition.  This is the primary
164    purpose of the ultra-magical pieces of assembly code in
165    coregrind/m_syswrap/syscall-<plat>.S
166 
167    -----------
168 
169    The ways in which V can come to hear of signals that need to be
170    forwarded to the client as are follows:
171 
172     sync signals: can arrive at any time whatsoever.  These are caught
173                   by sync_signalhandler
174 
175     async signals:
176 
177        if    running generated code
178        then  these are blocked, so we don't expect to catch them in
179              async_signalhandler
180 
181        else
182        if    thread is blocked in a syscall marked SfMayBlock
183        then  signals may be delivered to async_sighandler, since we
184              temporarily unblocked them for the duration of the syscall,
185              by using the real (SCSS) mask for this thread
186 
187        else  we're doing misc housekeeping activities (eg, making a translation,
188              washing our hair, etc).  As in the normal case, these signals are
189              blocked, but we can  and do poll for them using VG_(poll_signals).
190 
191    Now, re VG_(poll_signals), it polls the kernel by doing
192    VG_(sigtimedwait_zero).  This is trivial on Linux, since it's just a
193    syscall.  But on Darwin and AIX, we have to cobble together the
194    functionality in a tedious, longwinded and probably error-prone way.
195 
196    Finally, if a gdb is debugging the process under valgrind,
197    the signal can be ignored if gdb tells this. So, before resuming the
198    scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
199    is done. If this returns True, the signal is delivered.
200  */
201 
202 #include "pub_core_basics.h"
203 #include "pub_core_vki.h"
204 #include "pub_core_vkiscnums.h"
205 #include "pub_core_debuglog.h"
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_errormgr.h"
211 #include "pub_core_gdbserver.h"
212 #include "pub_core_libcbase.h"
213 #include "pub_core_libcassert.h"
214 #include "pub_core_libcprint.h"
215 #include "pub_core_libcproc.h"
216 #include "pub_core_libcsignal.h"
217 #include "pub_core_machine.h"
218 #include "pub_core_mallocfree.h"
219 #include "pub_core_options.h"
220 #include "pub_core_scheduler.h"
221 #include "pub_core_signals.h"
222 #include "pub_core_sigframe.h"      // For VG_(sigframe_create)()
223 #include "pub_core_stacks.h"        // For VG_(change_stack)()
224 #include "pub_core_stacktrace.h"    // For VG_(get_and_pp_StackTrace)()
225 #include "pub_core_syscall.h"
226 #include "pub_core_syswrap.h"
227 #include "pub_core_tooliface.h"
228 #include "pub_core_coredump.h"
229 
230 
231 /* ---------------------------------------------------------------------
232    Forwards decls.
233    ------------------------------------------------------------------ */
234 
235 static void sync_signalhandler  ( Int sigNo, vki_siginfo_t *info,
236                                              struct vki_ucontext * );
237 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
238                                              struct vki_ucontext * );
239 static void sigvgkill_handler	( Int sigNo, vki_siginfo_t *info,
240                                              struct vki_ucontext * );
241 
242 /* Maximum usable signal. */
243 Int VG_(max_signal) = _VKI_NSIG;
244 
245 #define N_QUEUED_SIGNALS	8
246 
247 typedef struct SigQueue {
248    Int	next;
249    vki_siginfo_t sigs[N_QUEUED_SIGNALS];
250 } SigQueue;
251 
252 /* ------ Macros for pulling stuff out of ucontexts ------ */
253 
254 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do?  A: let's suppose the
255    machine context (uc) reflects the situation that a syscall had just
256    completed, quite literally -- that is, that the program counter was
257    now at the instruction following the syscall.  (or we're slightly
258    downstream, but we're sure no relevant register has yet changed
259    value.)  Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
260    the result of the syscall; it does this by fishing relevant bits of
261    the machine state out of the uc.  Of course if the program counter
262    was somewhere else entirely then the result is likely to be
263    meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
264    very careful to pay attention to the results only when it is sure
265    that the said constraint on the program counter is indeed valid. */
266 
267 #if defined(VGP_x86_linux)
268 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.eip)
269 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.esp)
270 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
271       /* Convert the value in uc_mcontext.eax into a SysRes. */ \
272       VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
273 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
274       { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip);    \
275         (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp);    \
276         (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp;   \
277       }
278 
279 #elif defined(VGP_amd64_linux)
280 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.rip)
281 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.rsp)
282 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
283       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
284       VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
285 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
286       { (srP)->r_pc = (uc)->uc_mcontext.rip;             \
287         (srP)->r_sp = (uc)->uc_mcontext.rsp;             \
288         (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
289       }
290 
291 #elif defined(VGP_ppc32_linux)
292 /* Comments from Paul Mackerras 25 Nov 05:
293 
294    > I'm tracking down a problem where V's signal handling doesn't
295    > work properly on a ppc440gx running 2.4.20.  The problem is that
296    > the ucontext being presented to V's sighandler seems completely
297    > bogus.
298 
299    > V's kernel headers and hence ucontext layout are derived from
300    > 2.6.9.  I compared include/asm-ppc/ucontext.h from 2.4.20 and
301    > 2.6.13.
302 
303    > Can I just check my interpretation: the 2.4.20 one contains the
304    > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
305    > to said struct?  And so if V is using the 2.6.13 struct then a
306    > 2.4.20 one will make no sense to it.
307 
308    Not quite... what is inline in the 2.4.20 version is a
309    sigcontext_struct, not an mcontext.  The sigcontext looks like
310    this:
311 
312      struct sigcontext_struct {
313         unsigned long   _unused[4];
314         int             signal;
315         unsigned long   handler;
316         unsigned long   oldmask;
317         struct pt_regs  *regs;
318      };
319 
320    The regs pointer of that struct ends up at the same offset as the
321    uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
322    same as the mc_gregs field of the mcontext.  In fact the integer
323    regs are followed in memory by the floating point regs on 2.4.20.
324 
325    Thus if you are using the 2.6 definitions, it should work on 2.4.20
326    provided that you go via uc->uc_regs rather than looking in
327    uc->uc_mcontext directly.
328 
329    There is another subtlety: 2.4.20 doesn't save the vector regs when
330    delivering a signal, and 2.6.x only saves the vector regs if the
331    process has ever used an altivec instructions.  If 2.6.x does save
332    the vector regs, it sets the MSR_VEC bit in
333    uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it.  That bit
334    will always be clear under 2.4.20.  So you can use that bit to tell
335    whether uc->uc_regs->mc_vregs is valid. */
336 #  define VG_UCONTEXT_INSTR_PTR(uc)  ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
337 #  define VG_UCONTEXT_STACK_PTR(uc)  ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
338 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                            \
339       /* Convert the values in uc_mcontext r3,cr into a SysRes. */  \
340       VG_(mk_SysRes_ppc32_linux)(                                   \
341          (uc)->uc_regs->mc_gregs[VKI_PT_R3],                        \
342          (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1)          \
343       )
344 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                     \
345       { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]);   \
346         (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]);    \
347         (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
348       }
349 
350 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
351 #  define VG_UCONTEXT_INSTR_PTR(uc)  ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
352 #  define VG_UCONTEXT_STACK_PTR(uc)  ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
353    /* Dubious hack: if there is an error, only consider the lowest 8
354       bits of r3.  memcheck/tests/post-syscall shows a case where an
355       interrupted syscall should have produced a ucontext with 0x4
356       (VKI_EINTR) in r3 but is in fact producing 0x204. */
357    /* Awaiting clarification from PaulM.  Evidently 0x204 is
358       ERESTART_RESTARTBLOCK, which shouldn't have made it into user
359       space. */
VG_UCONTEXT_SYSCALL_SYSRES(struct vki_ucontext * uc)360    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
361    {
362       ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
363       ULong r3  = uc->uc_mcontext.gp_regs[VKI_PT_R3];
364       if (err) r3 &= 0xFF;
365       return VG_(mk_SysRes_ppc64_linux)( r3, err );
366    }
367 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                       \
368       { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP];            \
369         (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1];             \
370         (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
371       }
372 
373 #elif defined(VGP_arm_linux)
374 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.arm_pc)
375 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.arm_sp)
376 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
377       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
378       VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
379 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)       \
380       { (srP)->r_pc = (uc)->uc_mcontext.arm_pc;         \
381         (srP)->r_sp = (uc)->uc_mcontext.arm_sp;         \
382         (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
383         (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
384         (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
385         (srP)->misc.ARM.r7  = (uc)->uc_mcontext.arm_r7; \
386       }
387 
388 #elif defined(VGP_arm64_linux)
389 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((UWord)((uc)->uc_mcontext.pc))
390 #  define VG_UCONTEXT_STACK_PTR(uc)       ((UWord)((uc)->uc_mcontext.sp))
391 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
392       /* Convert the value in uc_mcontext.regs[0] into a SysRes. */ \
393       VG_(mk_SysRes_arm64_linux)( (uc)->uc_mcontext.regs[0] )
394 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)           \
395       { (srP)->r_pc = (uc)->uc_mcontext.pc;                 \
396         (srP)->r_sp = (uc)->uc_mcontext.sp;                 \
397         (srP)->misc.ARM64.x29 = (uc)->uc_mcontext.regs[29]; \
398         (srP)->misc.ARM64.x30 = (uc)->uc_mcontext.regs[30]; \
399       }
400 
401 #elif defined(VGP_x86_darwin)
402 
VG_UCONTEXT_INSTR_PTR(void * ucV)403    static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
404       ucontext_t* uc = (ucontext_t*)ucV;
405       struct __darwin_mcontext32* mc = uc->uc_mcontext;
406       struct __darwin_i386_thread_state* ss = &mc->__ss;
407       return ss->__eip;
408    }
VG_UCONTEXT_STACK_PTR(void * ucV)409    static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
410       ucontext_t* uc = (ucontext_t*)ucV;
411       struct __darwin_mcontext32* mc = uc->uc_mcontext;
412       struct __darwin_i386_thread_state* ss = &mc->__ss;
413       return ss->__esp;
414    }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)415    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
416                                                     UWord scclass ) {
417       /* this is complicated by the problem that there are 3 different
418          kinds of syscalls, each with its own return convention.
419          NB: scclass is a host word, hence UWord is good for both
420          amd64-darwin and x86-darwin */
421       ucontext_t* uc = (ucontext_t*)ucV;
422       struct __darwin_mcontext32* mc = uc->uc_mcontext;
423       struct __darwin_i386_thread_state* ss = &mc->__ss;
424       /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
425       UInt carry = 1 & ss->__eflags;
426       UInt err = 0;
427       UInt wLO = 0;
428       UInt wHI = 0;
429       switch (scclass) {
430          case VG_DARWIN_SYSCALL_CLASS_UNIX:
431             err = carry;
432             wLO = ss->__eax;
433             wHI = ss->__edx;
434             break;
435          case VG_DARWIN_SYSCALL_CLASS_MACH:
436             wLO = ss->__eax;
437             break;
438          case VG_DARWIN_SYSCALL_CLASS_MDEP:
439             wLO = ss->__eax;
440             break;
441          default:
442             vg_assert(0);
443             break;
444       }
445       return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
446                                         wHI, wLO );
447    }
448    static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)449    void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
450                                         void* ucV ) {
451       ucontext_t* uc = (ucontext_t*)(ucV);
452       struct __darwin_mcontext32* mc = uc->uc_mcontext;
453       struct __darwin_i386_thread_state* ss = &mc->__ss;
454       srP->r_pc = (ULong)(ss->__eip);
455       srP->r_sp = (ULong)(ss->__esp);
456       srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
457    }
458 
459 #elif defined(VGP_amd64_darwin)
460 
VG_UCONTEXT_INSTR_PTR(void * ucV)461    static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
462       ucontext_t* uc = (ucontext_t*)ucV;
463       struct __darwin_mcontext64* mc = uc->uc_mcontext;
464       struct __darwin_x86_thread_state64* ss = &mc->__ss;
465       return ss->__rip;
466    }
VG_UCONTEXT_STACK_PTR(void * ucV)467    static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
468       ucontext_t* uc = (ucontext_t*)ucV;
469       struct __darwin_mcontext64* mc = uc->uc_mcontext;
470       struct __darwin_x86_thread_state64* ss = &mc->__ss;
471       return ss->__rsp;
472    }
VG_UCONTEXT_SYSCALL_SYSRES(void * ucV,UWord scclass)473    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
474                                                     UWord scclass ) {
475       /* This is copied from the x86-darwin case.  I'm not sure if it
476 	 is correct. */
477       ucontext_t* uc = (ucontext_t*)ucV;
478       struct __darwin_mcontext64* mc = uc->uc_mcontext;
479       struct __darwin_x86_thread_state64* ss = &mc->__ss;
480       /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
481       ULong carry = 1 & ss->__rflags;
482       ULong err = 0;
483       ULong wLO = 0;
484       ULong wHI = 0;
485       switch (scclass) {
486          case VG_DARWIN_SYSCALL_CLASS_UNIX:
487             err = carry;
488             wLO = ss->__rax;
489             wHI = ss->__rdx;
490             break;
491          case VG_DARWIN_SYSCALL_CLASS_MACH:
492             wLO = ss->__rax;
493             break;
494          case VG_DARWIN_SYSCALL_CLASS_MDEP:
495             wLO = ss->__rax;
496             break;
497          default:
498             vg_assert(0);
499             break;
500       }
501       return VG_(mk_SysRes_amd64_darwin)( scclass, err ? True : False,
502 					  wHI, wLO );
503    }
504    static inline
VG_UCONTEXT_TO_UnwindStartRegs(UnwindStartRegs * srP,void * ucV)505    void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
506                                         void* ucV ) {
507       ucontext_t* uc = (ucontext_t*)ucV;
508       struct __darwin_mcontext64* mc = uc->uc_mcontext;
509       struct __darwin_x86_thread_state64* ss = &mc->__ss;
510       srP->r_pc = (ULong)(ss->__rip);
511       srP->r_sp = (ULong)(ss->__rsp);
512       srP->misc.AMD64.r_rbp = (ULong)(ss->__rbp);
513    }
514 
515 #elif defined(VGP_s390x_linux)
516 
517 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.regs.psw.addr)
518 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.regs.gprs[15])
519 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.regs.gprs[11])
520 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
521       VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
522 #  define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
523 
524 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
525       { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr);    \
526         (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]);    \
527         (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11];  \
528         (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14];  \
529       }
530 
531 #elif defined(VGP_mips32_linux)
532 #  define VG_UCONTEXT_INSTR_PTR(uc)   ((UWord)(((uc)->uc_mcontext.sc_pc)))
533 #  define VG_UCONTEXT_STACK_PTR(uc)   ((UWord)((uc)->uc_mcontext.sc_regs[29]))
534 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.sc_regs[30])
535 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.sc_regs[2])
536 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                         \
537       /* Convert the value in uc_mcontext.rax into a SysRes. */  \
538       VG_(mk_SysRes_mips32_linux)( (uc)->uc_mcontext.sc_regs[2], \
539                                    (uc)->uc_mcontext.sc_regs[3], \
540                                    (uc)->uc_mcontext.sc_regs[7])
541 
542 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)              \
543       { (srP)->r_pc = (uc)->uc_mcontext.sc_pc;                 \
544         (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29];           \
545         (srP)->misc.MIPS32.r30 = (uc)->uc_mcontext.sc_regs[30]; \
546         (srP)->misc.MIPS32.r31 = (uc)->uc_mcontext.sc_regs[31]; \
547         (srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
548       }
549 
550 #elif defined(VGP_mips64_linux)
551 #  define VG_UCONTEXT_INSTR_PTR(uc)       (((uc)->uc_mcontext.sc_pc))
552 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.sc_regs[29])
553 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.sc_regs[30])
554 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.sc_regs[2])
555 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
556       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
557       VG_(mk_SysRes_mips64_linux)((uc)->uc_mcontext.sc_regs[2], \
558                                   (uc)->uc_mcontext.sc_regs[3], \
559                                   (uc)->uc_mcontext.sc_regs[7])
560 
561 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)               \
562       { (srP)->r_pc = (uc)->uc_mcontext.sc_pc;                  \
563         (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29];            \
564         (srP)->misc.MIPS64.r30 = (uc)->uc_mcontext.sc_regs[30]; \
565         (srP)->misc.MIPS64.r31 = (uc)->uc_mcontext.sc_regs[31]; \
566         (srP)->misc.MIPS64.r28 = (uc)->uc_mcontext.sc_regs[28]; \
567       }
568 
569 #elif defined(VGP_tilegx_linux)
570 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.pc)
571 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.sp)
572 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.gregs[52])
573 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.gregs[10])
574 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                            \
575       /* Convert the value in uc_mcontext.rax into a SysRes. */     \
576       VG_(mk_SysRes_tilegx_linux)((uc)->uc_mcontext.gregs[0])
577 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)              \
578       { (srP)->r_pc = (uc)->uc_mcontext.pc;                    \
579         (srP)->r_sp = (uc)->uc_mcontext.sp;                    \
580         (srP)->misc.TILEGX.r52 = (uc)->uc_mcontext.gregs[52];  \
581         (srP)->misc.TILEGX.r55 = (uc)->uc_mcontext.lr;         \
582       }
583 
584 #elif defined(VGP_x86_solaris)
585 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_EIP])
586 #  define VG_UCONTEXT_STACK_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_UESP])
587 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                               \
588       VG_(mk_SysRes_x86_solaris)((uc)->uc_mcontext.gregs[VKI_EFL] & 1, \
589                                  (uc)->uc_mcontext.gregs[VKI_EAX],     \
590                                  (uc)->uc_mcontext.gregs[VKI_EFL] & 1  \
591                                  ? 0 : (uc)->uc_mcontext.gregs[VKI_EDX])
592 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                      \
593       { (srP)->r_pc = (ULong)(uc)->uc_mcontext.gregs[VKI_EIP];         \
594         (srP)->r_sp = (ULong)(uc)->uc_mcontext.gregs[VKI_UESP];        \
595         (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.gregs[VKI_EBP];      \
596       }
597 
598 #elif defined(VGP_amd64_solaris)
599 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_REG_RIP])
600 #  define VG_UCONTEXT_STACK_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_REG_RSP])
601 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                                     \
602       VG_(mk_SysRes_amd64_solaris)((uc)->uc_mcontext.gregs[VKI_REG_RFL] & 1, \
603                                    (uc)->uc_mcontext.gregs[VKI_REG_RAX],     \
604                                    (uc)->uc_mcontext.gregs[VKI_REG_RFL] & 1  \
605                                    ? 0 : (uc)->uc_mcontext.gregs[VKI_REG_RDX])
606 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                            \
607       { (srP)->r_pc = (uc)->uc_mcontext.gregs[VKI_REG_RIP];                  \
608         (srP)->r_sp = (uc)->uc_mcontext.gregs[VKI_REG_RSP];                  \
609         (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.gregs[VKI_REG_RBP];      \
610       }
611 #else
612 #  error Unknown platform
613 #endif
614 
615 
616 /* ------ Macros for pulling stuff out of siginfos ------ */
617 
618 /* These macros allow use of uniform names when working with
619    both the Linux and Darwin vki definitions. */
620 #if defined(VGO_linux)
621 #  define VKI_SIGINFO_si_addr  _sifields._sigfault._addr
622 #  define VKI_SIGINFO_si_pid   _sifields._kill._pid
623 #elif defined(VGO_darwin) || defined(VGO_solaris)
624 #  define VKI_SIGINFO_si_addr  si_addr
625 #  define VKI_SIGINFO_si_pid   si_pid
626 #else
627 #  error Unknown OS
628 #endif
629 
630 
631 /* ---------------------------------------------------------------------
632    HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
633    ------------------------------------------------------------------ */
634 
635 /* ---------------------------------------------------------------------
636    Signal state for this process.
637    ------------------------------------------------------------------ */
638 
639 
640 /* Base-ment of these arrays[_VKI_NSIG].
641 
642    Valid signal numbers are 1 .. _VKI_NSIG inclusive.
643    Rather than subtracting 1 for indexing these arrays, which
644    is tedious and error-prone, they are simply dimensioned 1 larger,
645    and entry [0] is not used.
646  */
647 
648 
649 /* -----------------------------------------------------
650    Static client signal state (SCSS).  This is the state
651    that the client thinks it has the kernel in.
652    SCSS records verbatim the client's settings.  These
653    are mashed around only when SKSS is calculated from it.
654    -------------------------------------------------- */
655 
656 typedef
657    struct {
658       void* scss_handler;  /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
659                               client's handler */
660       UInt  scss_flags;
661       vki_sigset_t scss_mask;
662       void* scss_restorer; /* where sigreturn goes */
663       void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
664       /* re _restorer and _sa_tramp, we merely record the values
665          supplied when the client does 'sigaction' and give them back
666          when requested.  Otherwise they are simply ignored. */
667    }
668    SCSS_Per_Signal;
669 
670 typedef
671    struct {
672       /* per-signal info */
673       SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
674 
675       /* Additional elements to SCSS not stored here:
676          - for each thread, the thread's blocking mask
677          - for each thread in WaitSIG, the set of waited-on sigs
678       */
679       }
680       SCSS;
681 
682 static SCSS scss;
683 
684 
685 /* -----------------------------------------------------
686    Static kernel signal state (SKSS).  This is the state
687    that we have the kernel in.  It is computed from SCSS.
688    -------------------------------------------------- */
689 
690 /* Let's do:
691      sigprocmask assigns to all thread masks
692      so that at least everything is always consistent
693    Flags:
694      SA_SIGINFO -- we always set it, and honour it for the client
695      SA_NOCLDSTOP -- passed to kernel
696      SA_ONESHOT or SA_RESETHAND -- pass through
697      SA_RESTART -- we observe this but set our handlers to always restart
698                    (this doesn't apply to the Solaris port)
699      SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
700      SA_ONSTACK -- pass through
701      SA_NOCLDWAIT -- pass through
702 */
703 
704 
705 typedef
706    struct {
707       void* skss_handler;  /* VKI_SIG_DFL or VKI_SIG_IGN
708                               or ptr to our handler */
709       UInt skss_flags;
710       /* There is no skss_mask, since we know that we will always ask
711          for all signals to be blocked in our sighandlers. */
712       /* Also there is no skss_restorer. */
713    }
714    SKSS_Per_Signal;
715 
716 typedef
717    struct {
718       SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
719    }
720    SKSS;
721 
722 static SKSS skss;
723 
724 /* returns True if signal is to be ignored.
725    To check this, possibly call gdbserver with tid. */
is_sig_ign(vki_siginfo_t * info,ThreadId tid)726 static Bool is_sig_ign(vki_siginfo_t *info, ThreadId tid)
727 {
728    vg_assert(info->si_signo >= 1 && info->si_signo <= _VKI_NSIG);
729 
730    /* If VG_(gdbserver_report_signal) tells to report the signal,
731       then verify if this signal is not to be ignored. GDB might have
732       modified si_signo, so we check after the call to gdbserver. */
733    return !VG_(gdbserver_report_signal) (info, tid)
734       || scss.scss_per_sig[info->si_signo].scss_handler == VKI_SIG_IGN;
735 }
736 
737 /* ---------------------------------------------------------------------
738    Compute the SKSS required by the current SCSS.
739    ------------------------------------------------------------------ */
740 
741 static
pp_SKSS(void)742 void pp_SKSS ( void )
743 {
744    Int sig;
745    VG_(printf)("\n\nSKSS:\n");
746    for (sig = 1; sig <= _VKI_NSIG; sig++) {
747       VG_(printf)("sig %d:  handler %p,  flags 0x%x\n", sig,
748                   skss.skss_per_sig[sig].skss_handler,
749                   skss.skss_per_sig[sig].skss_flags );
750 
751    }
752 }
753 
754 /* This is the core, clever bit.  Computation is as follows:
755 
756    For each signal
757       handler = if client has a handler, then our handler
758                 else if client is DFL, then our handler as well
759                 else (client must be IGN)
760 			then hander is IGN
761 */
762 static
calculate_SKSS_from_SCSS(SKSS * dst)763 void calculate_SKSS_from_SCSS ( SKSS* dst )
764 {
765    Int   sig;
766    UInt  scss_flags;
767    UInt  skss_flags;
768 
769    for (sig = 1; sig <= _VKI_NSIG; sig++) {
770       void *skss_handler;
771       void *scss_handler;
772 
773       scss_handler = scss.scss_per_sig[sig].scss_handler;
774       scss_flags   = scss.scss_per_sig[sig].scss_flags;
775 
776       switch(sig) {
777       case VKI_SIGSEGV:
778       case VKI_SIGBUS:
779       case VKI_SIGFPE:
780       case VKI_SIGILL:
781       case VKI_SIGTRAP:
782 	 /* For these, we always want to catch them and report, even
783 	    if the client code doesn't. */
784 	 skss_handler = sync_signalhandler;
785 	 break;
786 
787       case VKI_SIGCONT:
788 	 /* Let the kernel handle SIGCONT unless the client is actually
789 	    catching it. */
790       case VKI_SIGCHLD:
791       case VKI_SIGWINCH:
792       case VKI_SIGURG:
793          /* For signals which are have a default action of Ignore,
794             only set a handler if the client has set a signal handler.
795             Otherwise the kernel will interrupt a syscall which
796             wouldn't have otherwise been interrupted. */
797 	 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
798 	    skss_handler = VKI_SIG_DFL;
799 	 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
800 	    skss_handler = VKI_SIG_IGN;
801 	 else
802 	    skss_handler = async_signalhandler;
803 	 break;
804 
805       default:
806          // VKI_SIGVG* are runtime variables, so we can't make them
807          // cases in the switch, so we handle them in the 'default' case.
808 	 if (sig == VG_SIGVGKILL)
809 	    skss_handler = sigvgkill_handler;
810 	 else {
811 	    if (scss_handler == VKI_SIG_IGN)
812 	       skss_handler = VKI_SIG_IGN;
813 	    else
814 	       skss_handler = async_signalhandler;
815 	 }
816 	 break;
817       }
818 
819       /* Flags */
820 
821       skss_flags = 0;
822 
823       /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
824       skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
825 
826       /* SA_ONESHOT: ignore client setting */
827 
828 #     if !defined(VGO_solaris)
829       /* SA_RESTART: ignore client setting and always set it for us.
830 	 Though we never rely on the kernel to restart a
831 	 syscall, we observe whether it wanted to restart the syscall
832 	 or not, which is needed by
833          VG_(fixup_guest_state_after_syscall_interrupted) */
834       skss_flags |= VKI_SA_RESTART;
835 #else
836       /* The above does not apply to the Solaris port, where the kernel does
837          not directly restart syscalls, but instead it checks SA_RESTART flag
838          and if it is set then it returns ERESTART to libc and the library
839          actually restarts the syscall. */
840       skss_flags |= scss_flags & VKI_SA_RESTART;
841 #     endif
842 
843       /* SA_NOMASK: ignore it */
844 
845       /* SA_ONSTACK: client setting is irrelevant here */
846       /* We don't set a signal stack, so ignore */
847 
848       /* always ask for SA_SIGINFO */
849       skss_flags |= VKI_SA_SIGINFO;
850 
851       /* use our own restorer */
852       skss_flags |= VKI_SA_RESTORER;
853 
854       /* Create SKSS entry for this signal. */
855       if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
856          dst->skss_per_sig[sig].skss_handler = skss_handler;
857       else
858          dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
859 
860       dst->skss_per_sig[sig].skss_flags   = skss_flags;
861    }
862 
863    /* Sanity checks. */
864    vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
865    vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
866 
867    if (0)
868       pp_SKSS();
869 }
870 
871 
872 /* ---------------------------------------------------------------------
873    After a possible SCSS change, update SKSS and the kernel itself.
874    ------------------------------------------------------------------ */
875 
876 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
877 // to a number before converting it to a string... sigh.
878 extern void my_sigreturn(void);
879 
880 #if defined(VGP_x86_linux)
881 #  define _MY_SIGRETURN(name) \
882    ".text\n" \
883    ".globl my_sigreturn\n" \
884    "my_sigreturn:\n" \
885    "	movl	$" #name ", %eax\n" \
886    "	int	$0x80\n" \
887    ".previous\n"
888 
889 #elif defined(VGP_amd64_linux)
890 #  define _MY_SIGRETURN(name) \
891    ".text\n" \
892    ".globl my_sigreturn\n" \
893    "my_sigreturn:\n" \
894    "	movq	$" #name ", %rax\n" \
895    "	syscall\n" \
896    ".previous\n"
897 
898 #elif defined(VGP_ppc32_linux)
899 #  define _MY_SIGRETURN(name) \
900    ".text\n" \
901    ".globl my_sigreturn\n" \
902    "my_sigreturn:\n" \
903    "	li	0, " #name "\n" \
904    "	sc\n" \
905    ".previous\n"
906 
907 #elif defined(VGP_ppc64be_linux)
908 #  define _MY_SIGRETURN(name) \
909    ".align   2\n" \
910    ".globl   my_sigreturn\n" \
911    ".section \".opd\",\"aw\"\n" \
912    ".align   3\n" \
913    "my_sigreturn:\n" \
914    ".quad    .my_sigreturn,.TOC.@tocbase,0\n" \
915    ".previous\n" \
916    ".type    .my_sigreturn,@function\n" \
917    ".globl   .my_sigreturn\n" \
918    ".my_sigreturn:\n" \
919    "	li	0, " #name "\n" \
920    "	sc\n"
921 
922 #elif defined(VGP_ppc64le_linux)
923 /* Little Endian supports ELF version 2.  In the future, it may
924  * support other versions.
925  */
926 #  define _MY_SIGRETURN(name) \
927    ".align   2\n" \
928    ".globl   my_sigreturn\n" \
929    ".type    .my_sigreturn,@function\n" \
930    "my_sigreturn:\n" \
931    "#if _CALL_ELF == 2 \n" \
932    "0: addis        2,12,.TOC.-0b@ha\n" \
933    "   addi         2,2,.TOC.-0b@l\n" \
934    "   .localentry my_sigreturn,.-my_sigreturn\n" \
935    "#endif \n" \
936    "   sc\n" \
937    "   .size my_sigreturn,.-my_sigreturn\n"
938 
939 #elif defined(VGP_arm_linux)
940 #  define _MY_SIGRETURN(name) \
941    ".text\n" \
942    ".globl my_sigreturn\n" \
943    "my_sigreturn:\n\t" \
944    "    mov  r7, #" #name "\n\t" \
945    "    svc  0x00000000\n" \
946    ".previous\n"
947 
948 #elif defined(VGP_arm64_linux)
949 #  define _MY_SIGRETURN(name) \
950    ".text\n" \
951    ".globl my_sigreturn\n" \
952    "my_sigreturn:\n\t" \
953    "    mov  x8, #" #name "\n\t" \
954    "    svc  0x0\n" \
955    ".previous\n"
956 
957 #elif defined(VGP_x86_darwin)
958 #  define _MY_SIGRETURN(name) \
959    ".text\n" \
960    ".globl my_sigreturn\n" \
961    "my_sigreturn:\n" \
962    "    movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
963    "    int $0x80\n"
964 
965 #elif defined(VGP_amd64_darwin)
966 #  define _MY_SIGRETURN(name) \
967    ".text\n" \
968    ".globl my_sigreturn\n" \
969    "my_sigreturn:\n" \
970    "    movq $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%rax\n" \
971    "    syscall\n"
972 
973 #elif defined(VGP_s390x_linux)
974 #  define _MY_SIGRETURN(name) \
975    ".text\n" \
976    ".globl my_sigreturn\n" \
977    "my_sigreturn:\n" \
978    " svc " #name "\n" \
979    ".previous\n"
980 
981 #elif defined(VGP_mips32_linux)
982 #  define _MY_SIGRETURN(name) \
983    ".text\n" \
984    "my_sigreturn:\n" \
985    "	li	$2, " #name "\n" /* apparently $2 is v0 */ \
986    "	syscall\n" \
987    ".previous\n"
988 
989 #elif defined(VGP_mips64_linux)
990 #  define _MY_SIGRETURN(name) \
991    ".text\n" \
992    "my_sigreturn:\n" \
993    "   li $2, " #name "\n" \
994    "   syscall\n" \
995    ".previous\n"
996 
997 #elif defined(VGP_tilegx_linux)
998 #  define _MY_SIGRETURN(name) \
999    ".text\n" \
1000    "my_sigreturn:\n" \
1001    " moveli r10 ," #name "\n" \
1002    " swint1\n" \
1003    ".previous\n"
1004 
1005 #elif defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
1006 /* Not used on Solaris. */
1007 #  define _MY_SIGRETURN(name) \
1008    ".text\n" \
1009    ".globl my_sigreturn\n" \
1010    "my_sigreturn:\n" \
1011    "ud2\n" \
1012    ".previous\n"
1013 
1014 #else
1015 #  error Unknown platform
1016 #endif
1017 
1018 #define MY_SIGRETURN(name)  _MY_SIGRETURN(name)
1019 asm(
1020    MY_SIGRETURN(__NR_rt_sigreturn)
1021 );
1022 
1023 
handle_SCSS_change(Bool force_update)1024 static void handle_SCSS_change ( Bool force_update )
1025 {
1026    Int  res, sig;
1027    SKSS skss_old;
1028    vki_sigaction_toK_t   ksa;
1029    vki_sigaction_fromK_t ksa_old;
1030 
1031    /* Remember old SKSS and calculate new one. */
1032    skss_old = skss;
1033    calculate_SKSS_from_SCSS ( &skss );
1034 
1035    /* Compare the new SKSS entries vs the old ones, and update kernel
1036       where they differ. */
1037    for (sig = 1; sig <= VG_(max_signal); sig++) {
1038 
1039       /* Trying to do anything with SIGKILL is pointless; just ignore
1040          it. */
1041       if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
1042          continue;
1043 
1044       if (!force_update) {
1045          if ((skss_old.skss_per_sig[sig].skss_handler
1046               == skss.skss_per_sig[sig].skss_handler)
1047              && (skss_old.skss_per_sig[sig].skss_flags
1048                  == skss.skss_per_sig[sig].skss_flags))
1049             /* no difference */
1050             continue;
1051       }
1052 
1053       ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
1054       ksa.sa_flags    = skss.skss_per_sig[sig].skss_flags;
1055 #     if !defined(VGP_ppc32_linux) && \
1056          !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1057          !defined(VGP_mips32_linux) && !defined(VGO_solaris)
1058       ksa.sa_restorer = my_sigreturn;
1059 #     endif
1060       /* Re above ifdef (also the assertion below), PaulM says:
1061          The sa_restorer field is not used at all on ppc.  Glibc
1062          converts the sigaction you give it into a kernel sigaction,
1063          but it doesn't put anything in the sa_restorer field.
1064       */
1065 
1066       /* block all signals in handler */
1067       VG_(sigfillset)( &ksa.sa_mask );
1068       VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
1069       VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
1070 
1071       if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
1072          VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
1073                    "mask(msb..lsb) 0x%llx 0x%llx\n",
1074                    sig, ksa.ksa_handler,
1075                    (UWord)ksa.sa_flags,
1076                    _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
1077                    (ULong)ksa.sa_mask.sig[0]);
1078 
1079       res = VG_(sigaction)( sig, &ksa, &ksa_old );
1080       vg_assert(res == 0);
1081 
1082       /* Since we got the old sigaction more or less for free, might
1083          as well extract the maximum sanity-check value from it. */
1084       if (!force_update) {
1085          vg_assert(ksa_old.ksa_handler
1086                    == skss_old.skss_per_sig[sig].skss_handler);
1087 #        if defined(VGO_solaris)
1088          if (ksa_old.ksa_handler == VKI_SIG_DFL
1089                || ksa_old.ksa_handler == VKI_SIG_IGN) {
1090             /* The Solaris kernel ignores signal flags (except SA_NOCLDWAIT
1091                and SA_NOCLDSTOP) and a signal mask if a handler is set to
1092                SIG_DFL or SIG_IGN. */
1093             skss_old.skss_per_sig[sig].skss_flags
1094                &= (VKI_SA_NOCLDWAIT | VKI_SA_NOCLDSTOP);
1095             vg_assert(VG_(isemptysigset)( &ksa_old.sa_mask ));
1096             VG_(sigfillset)( &ksa_old.sa_mask );
1097          }
1098 #        endif
1099          vg_assert(ksa_old.sa_flags
1100                    == skss_old.skss_per_sig[sig].skss_flags);
1101 #        if !defined(VGP_ppc32_linux) && \
1102             !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1103             !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) && \
1104             !defined(VGO_solaris)
1105          vg_assert(ksa_old.sa_restorer == my_sigreturn);
1106 #        endif
1107          VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
1108          VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
1109          vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
1110       }
1111    }
1112 }
1113 
1114 
1115 /* ---------------------------------------------------------------------
1116    Update/query SCSS in accordance with client requests.
1117    ------------------------------------------------------------------ */
1118 
1119 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
1120    in kernel/signal.[ch] */
1121 
1122 /* True if we are on the alternate signal stack.  */
on_sig_stack(ThreadId tid,Addr m_SP)1123 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
1124 {
1125    ThreadState *tst = VG_(get_ThreadState)(tid);
1126 
1127    return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
1128 }
1129 
sas_ss_flags(ThreadId tid,Addr m_SP)1130 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
1131 {
1132    ThreadState *tst = VG_(get_ThreadState)(tid);
1133 
1134    return (tst->altstack.ss_size == 0
1135               ? VKI_SS_DISABLE
1136               : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
1137 }
1138 
1139 
VG_(do_sys_sigaltstack)1140 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
1141 {
1142    Addr m_SP;
1143 
1144    vg_assert(VG_(is_valid_tid)(tid));
1145    m_SP  = VG_(get_SP)(tid);
1146 
1147    if (VG_(clo_trace_signals))
1148       VG_(dmsg)("sys_sigaltstack: tid %u, "
1149                 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1150                 tid, (void*)ss,
1151                 ss ? ss->ss_sp : 0,
1152                 (ULong)(ss ? ss->ss_size : 0),
1153                 (ULong)(ss ? ss->ss_flags : 0),
1154                 (void*)oss, (void*)m_SP);
1155 
1156    if (oss != NULL) {
1157       oss->ss_sp    = VG_(threads)[tid].altstack.ss_sp;
1158       oss->ss_size  = VG_(threads)[tid].altstack.ss_size;
1159       oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1160                       | sas_ss_flags(tid, m_SP);
1161    }
1162 
1163    if (ss != NULL) {
1164       if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1165          return VG_(mk_SysRes_Error)( VKI_EPERM );
1166       }
1167       if (ss->ss_flags != VKI_SS_DISABLE
1168           && ss->ss_flags != VKI_SS_ONSTACK
1169           && ss->ss_flags != 0) {
1170          return VG_(mk_SysRes_Error)( VKI_EINVAL );
1171       }
1172       if (ss->ss_flags == VKI_SS_DISABLE) {
1173          VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1174       } else {
1175          if (ss->ss_size < VKI_MINSIGSTKSZ) {
1176             return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1177          }
1178 
1179 	 VG_(threads)[tid].altstack.ss_sp    = ss->ss_sp;
1180 	 VG_(threads)[tid].altstack.ss_size  = ss->ss_size;
1181 	 VG_(threads)[tid].altstack.ss_flags = 0;
1182       }
1183    }
1184    return VG_(mk_SysRes_Success)( 0 );
1185 }
1186 
1187 
VG_(do_sys_sigaction)1188 SysRes VG_(do_sys_sigaction) ( Int signo,
1189                                const vki_sigaction_toK_t* new_act,
1190                                vki_sigaction_fromK_t* old_act )
1191 {
1192    if (VG_(clo_trace_signals))
1193       VG_(dmsg)("sys_sigaction: sigNo %d, "
1194                 "new %#lx, old %#lx, new flags 0x%llx\n",
1195                 signo, (UWord)new_act, (UWord)old_act,
1196                 (ULong)(new_act ? new_act->sa_flags : 0));
1197 
1198    /* Rule out various error conditions.  The aim is to ensure that if
1199       when the call is passed to the kernel it will definitely
1200       succeed. */
1201 
1202    /* Reject out-of-range signal numbers. */
1203    if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1204 
1205    /* don't let them use our signals */
1206    if ( (signo > VG_SIGVGRTUSERMAX)
1207 	&& new_act
1208 	&& !(new_act->ksa_handler == VKI_SIG_DFL
1209              || new_act->ksa_handler == VKI_SIG_IGN) )
1210       goto bad_signo_reserved;
1211 
1212    /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1213    if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1214        && new_act
1215        && new_act->ksa_handler != VKI_SIG_DFL)
1216       goto bad_sigkill_or_sigstop;
1217 
1218    /* If the client supplied non-NULL old_act, copy the relevant SCSS
1219       entry into it. */
1220    if (old_act) {
1221       old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1222       old_act->sa_flags    = scss.scss_per_sig[signo].scss_flags;
1223       old_act->sa_mask     = scss.scss_per_sig[signo].scss_mask;
1224 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1225          !defined(VGO_solaris)
1226       old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1227 #     endif
1228    }
1229 
1230    /* And now copy new SCSS entry from new_act. */
1231    if (new_act) {
1232       scss.scss_per_sig[signo].scss_handler  = new_act->ksa_handler;
1233       scss.scss_per_sig[signo].scss_flags    = new_act->sa_flags;
1234       scss.scss_per_sig[signo].scss_mask     = new_act->sa_mask;
1235 
1236       scss.scss_per_sig[signo].scss_restorer = NULL;
1237 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1238          !defined(VGO_solaris)
1239       scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1240 #     endif
1241 
1242       scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1243 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1244       scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1245 #     endif
1246 
1247       VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1248       VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1249    }
1250 
1251    /* All happy bunnies ... */
1252    if (new_act) {
1253       handle_SCSS_change( False /* lazy update */ );
1254    }
1255    return VG_(mk_SysRes_Success)( 0 );
1256 
1257   bad_signo:
1258    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1259       VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1260    }
1261    return VG_(mk_SysRes_Error)( VKI_EINVAL );
1262 
1263   bad_signo_reserved:
1264    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1265       VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1266                 VG_(signame)(signo));
1267       VG_(umsg)("         the %s signal is used internally by Valgrind\n",
1268                 VG_(signame)(signo));
1269    }
1270    return VG_(mk_SysRes_Error)( VKI_EINVAL );
1271 
1272   bad_sigkill_or_sigstop:
1273    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1274       VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1275                 VG_(signame)(signo));
1276       VG_(umsg)("         the %s signal is uncatchable\n",
1277                 VG_(signame)(signo));
1278    }
1279    return VG_(mk_SysRes_Error)( VKI_EINVAL );
1280 }
1281 
1282 
1283 static
do_sigprocmask_bitops(Int vki_how,vki_sigset_t * orig_set,vki_sigset_t * modifier)1284 void do_sigprocmask_bitops ( Int vki_how,
1285 			     vki_sigset_t* orig_set,
1286 			     vki_sigset_t* modifier )
1287 {
1288    switch (vki_how) {
1289       case VKI_SIG_BLOCK:
1290          VG_(sigaddset_from_set)( orig_set, modifier );
1291          break;
1292       case VKI_SIG_UNBLOCK:
1293          VG_(sigdelset_from_set)( orig_set, modifier );
1294          break;
1295       case VKI_SIG_SETMASK:
1296          *orig_set = *modifier;
1297          break;
1298       default:
1299          VG_(core_panic)("do_sigprocmask_bitops");
1300 	 break;
1301    }
1302 }
1303 
1304 static
format_sigset(const vki_sigset_t * set)1305 HChar* format_sigset ( const vki_sigset_t* set )
1306 {
1307    static HChar buf[_VKI_NSIG_WORDS * 16 + 1];
1308    int w;
1309 
1310    VG_(strcpy)(buf, "");
1311 
1312    for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1313    {
1314 #     if _VKI_NSIG_BPW == 32
1315       VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1316                    set ? (ULong)set->sig[w] : 0);
1317 #     elif _VKI_NSIG_BPW == 64
1318       VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1319                    set ? (ULong)set->sig[w] : 0);
1320 #     else
1321 #       error "Unsupported value for _VKI_NSIG_BPW"
1322 #     endif
1323    }
1324 
1325    return buf;
1326 }
1327 
1328 /*
1329    This updates the thread's signal mask.  There's no such thing as a
1330    process-wide signal mask.
1331 
1332    Note that the thread signal masks are an implicit part of SCSS,
1333    which is why this routine is allowed to mess with them.
1334 */
1335 static
do_setmask(ThreadId tid,Int how,vki_sigset_t * newset,vki_sigset_t * oldset)1336 void do_setmask ( ThreadId tid,
1337                   Int how,
1338                   vki_sigset_t* newset,
1339 		  vki_sigset_t* oldset )
1340 {
1341    if (VG_(clo_trace_signals))
1342       VG_(dmsg)("do_setmask: tid = %u how = %d (%s), newset = %p (%s)\n",
1343                 tid, how,
1344                 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1345                    how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1346                       how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1347                 newset, newset ? format_sigset(newset) : "NULL" );
1348 
1349    /* Just do this thread. */
1350    vg_assert(VG_(is_valid_tid)(tid));
1351    if (oldset) {
1352       *oldset = VG_(threads)[tid].sig_mask;
1353       if (VG_(clo_trace_signals))
1354          VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1355    }
1356    if (newset) {
1357       do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1358       VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1359       VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1360       VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1361    }
1362 }
1363 
1364 
VG_(do_sys_sigprocmask)1365 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1366                                  Int how,
1367                                  vki_sigset_t* set,
1368                                  vki_sigset_t* oldset )
1369 {
1370    switch(how) {
1371       case VKI_SIG_BLOCK:
1372       case VKI_SIG_UNBLOCK:
1373       case VKI_SIG_SETMASK:
1374          vg_assert(VG_(is_valid_tid)(tid));
1375          do_setmask ( tid, how, set, oldset );
1376          return VG_(mk_SysRes_Success)( 0 );
1377 
1378       default:
1379          VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1380          return VG_(mk_SysRes_Error)( VKI_EINVAL );
1381    }
1382 }
1383 
1384 
1385 /* ---------------------------------------------------------------------
1386    LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1387    ------------------------------------------------------------------ */
1388 
1389 /* ---------------------------------------------------------------------
1390    Handy utilities to block/restore all host signals.
1391    ------------------------------------------------------------------ */
1392 
1393 /* Block all host signals, dumping the old mask in *saved_mask. */
block_all_host_signals(vki_sigset_t * saved_mask)1394 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1395 {
1396    Int           ret;
1397    vki_sigset_t block_procmask;
1398    VG_(sigfillset)(&block_procmask);
1399    ret = VG_(sigprocmask)
1400             (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1401    vg_assert(ret == 0);
1402 }
1403 
1404 /* Restore the blocking mask using the supplied saved one. */
restore_all_host_signals(vki_sigset_t * saved_mask)1405 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1406 {
1407    Int ret;
1408    ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1409    vg_assert(ret == 0);
1410 }
1411 
VG_(clear_out_queued_signals)1412 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1413 {
1414    block_all_host_signals(saved_mask);
1415    if (VG_(threads)[tid].sig_queue != NULL) {
1416       VG_(free)(VG_(threads)[tid].sig_queue);
1417       VG_(threads)[tid].sig_queue = NULL;
1418    }
1419    restore_all_host_signals(saved_mask);
1420 }
1421 
1422 /* ---------------------------------------------------------------------
1423    The signal simulation proper.  A simplified version of what the
1424    Linux kernel does.
1425    ------------------------------------------------------------------ */
1426 
1427 /* Set up a stack frame (VgSigContext) for the client's signal
1428    handler. */
1429 static
push_signal_frame(ThreadId tid,const vki_siginfo_t * siginfo,const struct vki_ucontext * uc)1430 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1431                                        const struct vki_ucontext *uc )
1432 {
1433    Bool         on_altstack;
1434    Addr         esp_top_of_frame;
1435    ThreadState* tst;
1436    Int		sigNo = siginfo->si_signo;
1437 
1438    vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1439    vg_assert(VG_(is_valid_tid)(tid));
1440    tst = & VG_(threads)[tid];
1441 
1442    if (VG_(clo_trace_signals)) {
1443       VG_(dmsg)("push_signal_frame (thread %u): signal %d\n", tid, sigNo);
1444       VG_(get_and_pp_StackTrace)(tid, 10);
1445    }
1446 
1447    if (/* this signal asked to run on an alt stack */
1448        (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1449        && /* there is a defined and enabled alt stack, which we're not
1450              already using.  Logic from get_sigframe in
1451              arch/i386/kernel/signal.c. */
1452           sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1453       ) {
1454       on_altstack = True;
1455       esp_top_of_frame
1456          = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1457       if (VG_(clo_trace_signals))
1458          VG_(dmsg)("delivering signal %d (%s) to thread %u: "
1459                    "on ALT STACK (%p-%p; %ld bytes)\n",
1460                    sigNo, VG_(signame)(sigNo), tid, tst->altstack.ss_sp,
1461                    (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1462                    (Word)tst->altstack.ss_size );
1463    } else {
1464       on_altstack = False;
1465       esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1466    }
1467 
1468    /* Signal delivery to tools */
1469    VG_TRACK( pre_deliver_signal, tid, sigNo, on_altstack );
1470 
1471    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1472    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1473 
1474    /* This may fail if the client stack is busted; if that happens,
1475       the whole process will exit rather than simply calling the
1476       signal handler. */
1477    VG_(sigframe_create) (tid, on_altstack, esp_top_of_frame, siginfo, uc,
1478                          scss.scss_per_sig[sigNo].scss_handler,
1479                          scss.scss_per_sig[sigNo].scss_flags,
1480                          &tst->sig_mask,
1481                          scss.scss_per_sig[sigNo].scss_restorer);
1482 }
1483 
1484 
VG_(signame)1485 const HChar *VG_(signame)(Int sigNo)
1486 {
1487    static HChar buf[20];  // large enough
1488 
1489    switch(sigNo) {
1490       case VKI_SIGHUP:    return "SIGHUP";
1491       case VKI_SIGINT:    return "SIGINT";
1492       case VKI_SIGQUIT:   return "SIGQUIT";
1493       case VKI_SIGILL:    return "SIGILL";
1494       case VKI_SIGTRAP:   return "SIGTRAP";
1495       case VKI_SIGABRT:   return "SIGABRT";
1496       case VKI_SIGBUS:    return "SIGBUS";
1497       case VKI_SIGFPE:    return "SIGFPE";
1498       case VKI_SIGKILL:   return "SIGKILL";
1499       case VKI_SIGUSR1:   return "SIGUSR1";
1500       case VKI_SIGUSR2:   return "SIGUSR2";
1501       case VKI_SIGSEGV:   return "SIGSEGV";
1502       case VKI_SIGSYS:    return "SIGSYS";
1503       case VKI_SIGPIPE:   return "SIGPIPE";
1504       case VKI_SIGALRM:   return "SIGALRM";
1505       case VKI_SIGTERM:   return "SIGTERM";
1506 #     if defined(VKI_SIGSTKFLT)
1507       case VKI_SIGSTKFLT: return "SIGSTKFLT";
1508 #     endif
1509       case VKI_SIGCHLD:   return "SIGCHLD";
1510       case VKI_SIGCONT:   return "SIGCONT";
1511       case VKI_SIGSTOP:   return "SIGSTOP";
1512       case VKI_SIGTSTP:   return "SIGTSTP";
1513       case VKI_SIGTTIN:   return "SIGTTIN";
1514       case VKI_SIGTTOU:   return "SIGTTOU";
1515       case VKI_SIGURG:    return "SIGURG";
1516       case VKI_SIGXCPU:   return "SIGXCPU";
1517       case VKI_SIGXFSZ:   return "SIGXFSZ";
1518       case VKI_SIGVTALRM: return "SIGVTALRM";
1519       case VKI_SIGPROF:   return "SIGPROF";
1520       case VKI_SIGWINCH:  return "SIGWINCH";
1521       case VKI_SIGIO:     return "SIGIO";
1522 #     if defined(VKI_SIGPWR)
1523       case VKI_SIGPWR:    return "SIGPWR";
1524 #     endif
1525 #     if defined(VKI_SIGUNUSED) && (VKI_SIGUNUSED != VKI_SIGSYS)
1526       case VKI_SIGUNUSED: return "SIGUNUSED";
1527 #     endif
1528 
1529       /* Solaris-specific signals. */
1530 #     if defined(VKI_SIGEMT)
1531       case VKI_SIGEMT:    return "SIGEMT";
1532 #     endif
1533 #     if defined(VKI_SIGWAITING)
1534       case VKI_SIGWAITING: return "SIGWAITING";
1535 #     endif
1536 #     if defined(VKI_SIGLWP)
1537       case VKI_SIGLWP:    return "SIGLWP";
1538 #     endif
1539 #     if defined(VKI_SIGFREEZE)
1540       case VKI_SIGFREEZE: return "SIGFREEZE";
1541 #     endif
1542 #     if defined(VKI_SIGTHAW)
1543       case VKI_SIGTHAW:   return "SIGTHAW";
1544 #     endif
1545 #     if defined(VKI_SIGCANCEL)
1546       case VKI_SIGCANCEL: return "SIGCANCEL";
1547 #     endif
1548 #     if defined(VKI_SIGLOST)
1549       case VKI_SIGLOST:   return "SIGLOST";
1550 #     endif
1551 #     if defined(VKI_SIGXRES)
1552       case VKI_SIGXRES:   return "SIGXRES";
1553 #     endif
1554 #     if defined(VKI_SIGJVM1)
1555       case VKI_SIGJVM1:   return "SIGJVM1";
1556 #     endif
1557 #     if defined(VKI_SIGJVM2)
1558       case VKI_SIGJVM2:   return "SIGJVM2";
1559 #     endif
1560 
1561 #  if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1562    case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1563       VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1564       return buf;
1565 #  endif
1566 
1567    default:
1568       VG_(sprintf)(buf, "SIG%d", sigNo);
1569       return buf;
1570    }
1571 }
1572 
1573 /* Hit ourselves with a signal using the default handler */
VG_(kill_self)1574 void VG_(kill_self)(Int sigNo)
1575 {
1576    Int r;
1577    vki_sigset_t	         mask, origmask;
1578    vki_sigaction_toK_t   sa, origsa2;
1579    vki_sigaction_fromK_t origsa;
1580 
1581    sa.ksa_handler = VKI_SIG_DFL;
1582    sa.sa_flags = 0;
1583 #  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1584       !defined(VGO_solaris)
1585    sa.sa_restorer = 0;
1586 #  endif
1587    VG_(sigemptyset)(&sa.sa_mask);
1588 
1589    VG_(sigaction)(sigNo, &sa, &origsa);
1590 
1591    VG_(sigemptyset)(&mask);
1592    VG_(sigaddset)(&mask, sigNo);
1593    VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1594 
1595    r = VG_(kill)(VG_(getpid)(), sigNo);
1596 #  if !defined(VGO_darwin)
1597    /* This sometimes fails with EPERM on Darwin.  I don't know why. */
1598    vg_assert(r == 0);
1599 #  endif
1600 
1601    VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1602    VG_(sigaction)(sigNo, &origsa2, NULL);
1603    VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1604 }
1605 
1606 // The si_code describes where the signal came from.  Some come from the
1607 // kernel, eg.: seg faults, illegal opcodes.  Some come from the user, eg.:
1608 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1609 // request (SI_ASYNCIO).  There's lots of implementation-defined leeway in
1610 // POSIX, but the user vs. kernal distinction is what we want here.  We also
1611 // pass in some other details that can help when si_code is unreliable.
is_signal_from_kernel(ThreadId tid,int signum,int si_code)1612 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1613 {
1614 #  if defined(VGO_linux) || defined(VGO_solaris)
1615    // On Linux, SI_USER is zero, negative values are from the user, positive
1616    // values are from the kernel.  There are SI_FROMUSER and SI_FROMKERNEL
1617    // macros but we don't use them here because other platforms don't have
1618    // them.
1619    return ( si_code > VKI_SI_USER ? True : False );
1620 
1621 #  elif defined(VGO_darwin)
1622    // On Darwin 9.6.0, the si_code is completely unreliable.  It should be the
1623    // case that 0 means "user", and >0 means "kernel".  But:
1624    // - For SIGSEGV, it seems quite reliable.
1625    // - For SIGBUS, it's always 2.
1626    // - For SIGFPE, it's often 0, even for kernel ones (eg.
1627    //   div-by-integer-zero always gives zero).
1628    // - For SIGILL, it's unclear.
1629    // - For SIGTRAP, it's always 1.
1630    // You can see the "NOTIMP" (not implemented) status of a number of the
1631    // sub-cases in sys/signal.h.  Hopefully future versions of Darwin will
1632    // get this right.
1633 
1634    // If we're blocked waiting on a syscall, it must be a user signal, because
1635    // the kernel won't generate sync signals within syscalls.
1636    if (VG_(threads)[tid].status == VgTs_WaitSys) {
1637       return False;
1638 
1639    // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1640    } else if (SIGSEGV == signum) {
1641       return ( si_code > 0 ? True : False );
1642 
1643    // If it's anything else, assume it's kernel-generated.  Reason being that
1644    // kernel-generated sync signals are more common, and it's probable that
1645    // misdiagnosing a user signal as a kernel signal is better than the
1646    // opposite.
1647    } else {
1648       return True;
1649    }
1650 #  else
1651 #    error Unknown OS
1652 #  endif
1653 }
1654 
1655 /*
1656    Perform the default action of a signal.  If the signal is fatal, it
1657    marks all threads as needing to exit, but it doesn't actually kill
1658    the process or thread.
1659 
1660    If we're not being quiet, then print out some more detail about
1661    fatal signals (esp. core dumping signals).
1662  */
default_action(const vki_siginfo_t * info,ThreadId tid)1663 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1664 {
1665    Int  sigNo     = info->si_signo;
1666    Bool terminate = False;	/* kills process         */
1667    Bool core      = False;	/* kills process w/ core */
1668    struct vki_rlimit corelim;
1669    Bool could_core;
1670 
1671    vg_assert(VG_(is_running_thread)(tid));
1672 
1673    switch(sigNo) {
1674       case VKI_SIGQUIT:	/* core */
1675       case VKI_SIGILL:	/* core */
1676       case VKI_SIGABRT:	/* core */
1677       case VKI_SIGFPE:	/* core */
1678       case VKI_SIGSEGV:	/* core */
1679       case VKI_SIGBUS:	/* core */
1680       case VKI_SIGTRAP:	/* core */
1681       case VKI_SIGSYS:	/* core */
1682       case VKI_SIGXCPU:	/* core */
1683       case VKI_SIGXFSZ:	/* core */
1684 
1685       /* Solaris-specific signals. */
1686 #     if defined(VKI_SIGEMT)
1687       case VKI_SIGEMT:	/* core */
1688 #     endif
1689 
1690          terminate = True;
1691          core = True;
1692          break;
1693 
1694       case VKI_SIGHUP:	/* term */
1695       case VKI_SIGINT:	/* term */
1696       case VKI_SIGKILL:	/* term - we won't see this */
1697       case VKI_SIGPIPE:	/* term */
1698       case VKI_SIGALRM:	/* term */
1699       case VKI_SIGTERM:	/* term */
1700       case VKI_SIGUSR1:	/* term */
1701       case VKI_SIGUSR2:	/* term */
1702       case VKI_SIGIO:	/* term */
1703 #     if defined(VKI_SIGPWR)
1704       case VKI_SIGPWR:	/* term */
1705 #     endif
1706       case VKI_SIGPROF:	/* term */
1707       case VKI_SIGVTALRM:	/* term */
1708 #     if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1709       case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1710 #     endif
1711 
1712       /* Solaris-specific signals. */
1713 #     if defined(VKI_SIGLOST)
1714       case VKI_SIGLOST:	/* term */
1715 #     endif
1716 
1717          terminate = True;
1718          break;
1719    }
1720 
1721    vg_assert(!core || (core && terminate));
1722 
1723    if (VG_(clo_trace_signals))
1724       VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1725                 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1726                 core ? "+core" : "");
1727 
1728    if (!terminate)
1729       return;			/* nothing to do */
1730 
1731    could_core = core;
1732 
1733    if (core) {
1734       /* If they set the core-size limit to zero, don't generate a
1735 	 core file */
1736 
1737       VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1738 
1739       if (corelim.rlim_cur == 0)
1740 	 core = False;
1741    }
1742 
1743    if ( (VG_(clo_verbosity) >= 1 ||
1744          (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1745         ) &&
1746         !VG_(clo_xml) ) {
1747       VG_(umsg)(
1748          "\n"
1749          "Process terminating with default action of signal %d (%s)%s\n",
1750          sigNo, VG_(signame)(sigNo), core ? ": dumping core" : "");
1751 
1752       /* Be helpful - decode some more details about this fault */
1753       if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1754 	 const HChar *event = NULL;
1755 	 Bool haveaddr = True;
1756 
1757 	 switch(sigNo) {
1758 	 case VKI_SIGSEGV:
1759 	    switch(info->si_code) {
1760 	    case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1761                                   break;
1762 	    case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1763                                   break;
1764 	    case VKI_SEGV_MADE_UP_GPF:
1765 	       /* General Protection Fault: The CPU/kernel
1766 		  isn't telling us anything useful, but this
1767 		  is commonly the result of exceeding a
1768 		  segment limit. */
1769 	       event = "General Protection Fault";
1770 	       haveaddr = False;
1771 	       break;
1772 	    }
1773 #if 0
1774             {
1775               HChar buf[50];  // large enough
1776               VG_(am_show_nsegments)(0,"post segfault");
1777               VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1778               VG_(system)(buf);
1779             }
1780 #endif
1781 	    break;
1782 
1783 	 case VKI_SIGILL:
1784 	    switch(info->si_code) {
1785 	    case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1786 	    case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1787 	    case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1788 	    case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1789 	    case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1790 	    case VKI_ILL_PRVREG: event = "Privileged register"; break;
1791 	    case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1792 	    case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1793 	    }
1794 	    break;
1795 
1796 	 case VKI_SIGFPE:
1797 	    switch (info->si_code) {
1798 	    case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1799 	    case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1800 	    case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1801 	    case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1802 	    case VKI_FPE_FLTUND: event = "FP underflow"; break;
1803 	    case VKI_FPE_FLTRES: event = "FP inexact"; break;
1804 	    case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1805 	    case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1806 
1807             /* Solaris-specific codes. */
1808 #           if defined(VKI_FPE_FLTDEN)
1809 	    case VKI_FPE_FLTDEN: event = "FP denormalize"; break;
1810 #           endif
1811 	    }
1812 	    break;
1813 
1814 	 case VKI_SIGBUS:
1815 	    switch (info->si_code) {
1816 	    case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1817 	    case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1818 	    case VKI_BUS_OBJERR: event = "Hardware error"; break;
1819 	    }
1820 	    break;
1821 	 } /* switch (sigNo) */
1822 
1823 	 if (event != NULL) {
1824 	    if (haveaddr)
1825                VG_(umsg)(" %s at address %p\n",
1826                          event, info->VKI_SIGINFO_si_addr);
1827 	    else
1828                VG_(umsg)(" %s\n", event);
1829 	 }
1830       }
1831       /* Print a stack trace.  Be cautious if the thread's SP is in an
1832          obviously stupid place (not mapped readable) that would
1833          likely cause a segfault. */
1834       if (VG_(is_valid_tid)(tid)) {
1835          Word first_ip_delta = 0;
1836 #if defined(VGO_linux) || defined(VGO_solaris)
1837          /* Make sure that the address stored in the stack pointer is
1838             located in a mapped page. That is not necessarily so. E.g.
1839             consider the scenario where the stack pointer was decreased
1840             and now has a value that is just below the end of a page that has
1841             not been mapped yet. In that case VG_(am_is_valid_for_client)
1842             will consider the address of the stack pointer invalid and that
1843             would cause a back-trace of depth 1 to be printed, instead of a
1844             full back-trace. */
1845          if (tid == 1) {           // main thread
1846             Addr esp  = VG_(get_SP)(tid);
1847             Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
1848             if (VG_(am_addr_is_in_extensible_client_stack)(base) &&
1849                 VG_(extend_stack)(tid, base)) {
1850                if (VG_(clo_trace_signals))
1851                   VG_(dmsg)("       -> extended stack base to %#lx\n",
1852                             VG_PGROUNDDN(esp));
1853             }
1854          }
1855 #endif
1856 #if defined(VGA_s390x)
1857          if (sigNo == VKI_SIGILL) {
1858             /* The guest instruction address has been adjusted earlier to
1859                point to the insn following the one that could not be decoded.
1860                When printing the back-trace here we need to undo that
1861                adjustment so the first line in the back-trace reports the
1862                correct address. */
1863             Addr  addr = (Addr)info->VKI_SIGINFO_si_addr;
1864             UChar byte = ((UChar *)addr)[0];
1865             Int   insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
1866 
1867             first_ip_delta = -insn_length;
1868          }
1869 #endif
1870          ExeContext* ec = VG_(am_is_valid_for_client)
1871                              (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1872                         ? VG_(record_ExeContext)( tid, first_ip_delta )
1873                       : VG_(record_depth_1_ExeContext)( tid,
1874                                                         first_ip_delta );
1875          vg_assert(ec);
1876          VG_(pp_ExeContext)( ec );
1877       }
1878       if (sigNo == VKI_SIGSEGV
1879           && is_signal_from_kernel(tid, sigNo, info->si_code)
1880           && info->si_code == VKI_SEGV_MAPERR) {
1881          VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1882          VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1883          VG_(umsg)(" possible), you can try to increase the size of the\n"  );
1884          VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1885          // FIXME: assumes main ThreadId == 1
1886          if (VG_(is_valid_tid)(1)) {
1887             VG_(umsg)(
1888                " The main thread stack size used in this run was %lu.\n",
1889                VG_(threads)[1].client_stack_szB);
1890          }
1891       }
1892    }
1893 
1894    if (VG_(clo_vgdb) != Vg_VgdbNo
1895        && VG_(dyn_vgdb_error) <= VG_(get_n_errs_shown)() + 1) {
1896       /* Note: we add + 1 to n_errs_shown as the fatal signal was not
1897          reported through error msg, and so was not counted. */
1898       VG_(gdbserver_report_fatal_signal) (info, tid);
1899    }
1900 
1901    if (core) {
1902       static const struct vki_rlimit zero = { 0, 0 };
1903 
1904       VG_(make_coredump)(tid, info, corelim.rlim_cur);
1905 
1906       /* Make sure we don't get a confusing kernel-generated
1907 	 coredump when we finally exit */
1908       VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1909    }
1910 
1911    /* stash fatal signal in main thread */
1912    // what's this for?
1913    //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1914 
1915    /* everyone dies */
1916    VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1917    VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1918    VG_(threads)[tid].os_state.fatalsig = sigNo;
1919 }
1920 
1921 /*
1922    This does the business of delivering a signal to a thread.  It may
1923    be called from either a real signal handler, or from normal code to
1924    cause the thread to enter the signal handler.
1925 
1926    This updates the thread state, but it does not set it to be
1927    Runnable.
1928 */
deliver_signal(ThreadId tid,const vki_siginfo_t * info,const struct vki_ucontext * uc)1929 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1930                                            const struct vki_ucontext *uc )
1931 {
1932    Int			sigNo = info->si_signo;
1933    SCSS_Per_Signal	*handler = &scss.scss_per_sig[sigNo];
1934    void			*handler_fn;
1935    ThreadState		*tst = VG_(get_ThreadState)(tid);
1936 
1937    if (VG_(clo_trace_signals))
1938       VG_(dmsg)("delivering signal %d (%s):%d to thread %u\n",
1939                 sigNo, VG_(signame)(sigNo), info->si_code, tid );
1940 
1941    if (sigNo == VG_SIGVGKILL) {
1942       /* If this is a SIGVGKILL, we're expecting it to interrupt any
1943 	 blocked syscall.  It doesn't matter whether the VCPU state is
1944 	 set to restart or not, because we don't expect it will
1945 	 execute any more client instructions. */
1946       vg_assert(VG_(is_exiting)(tid));
1947       return;
1948    }
1949 
1950    /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1951 
1952       If deliver_signal() is being called on a thread, we want
1953       the signal to get through no matter what; if they're ignoring
1954       it, then we do this override (this is so we can send it SIGSEGV,
1955       etc). */
1956    handler_fn = handler->scss_handler;
1957    if (handler_fn == VKI_SIG_IGN)
1958       handler_fn = VKI_SIG_DFL;
1959 
1960    vg_assert(handler_fn != VKI_SIG_IGN);
1961 
1962    if (handler_fn == VKI_SIG_DFL) {
1963       default_action(info, tid);
1964    } else {
1965       /* Create a signal delivery frame, and set the client's %ESP and
1966 	 %EIP so that when execution continues, we will enter the
1967 	 signal handler with the frame on top of the client's stack,
1968 	 as it expects.
1969 
1970 	 Signal delivery can fail if the client stack is too small or
1971 	 missing, and we can't push the frame.  If that happens,
1972 	 push_signal_frame will cause the whole process to exit when
1973 	 we next hit the scheduler.
1974       */
1975       vg_assert(VG_(is_valid_tid)(tid));
1976 
1977       push_signal_frame ( tid, info, uc );
1978 
1979       if (handler->scss_flags & VKI_SA_ONESHOT) {
1980 	 /* Do the ONESHOT thing. */
1981 	 handler->scss_handler = VKI_SIG_DFL;
1982 
1983 	 handle_SCSS_change( False /* lazy update */ );
1984       }
1985 
1986       /* At this point:
1987 	 tst->sig_mask is the current signal mask
1988 	 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1989 	 handler->scss_mask is the mask set by the handler
1990 
1991 	 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1992        */
1993       tst->sig_mask = tst->tmp_sig_mask;
1994       if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1995 	 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1996 	 VG_(sigaddset)(&tst->sig_mask, sigNo);
1997 	 tst->tmp_sig_mask = tst->sig_mask;
1998       }
1999    }
2000 
2001    /* Thread state is ready to go - just add Runnable */
2002 }
2003 
resume_scheduler(ThreadId tid)2004 static void resume_scheduler(ThreadId tid)
2005 {
2006    ThreadState *tst = VG_(get_ThreadState)(tid);
2007 
2008    vg_assert(tst->os_state.lwpid == VG_(gettid)());
2009 
2010    if (tst->sched_jmpbuf_valid) {
2011       /* Can't continue; must longjmp back to the scheduler and thus
2012          enter the sighandler immediately. */
2013       VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
2014    }
2015 }
2016 
synth_fault_common(ThreadId tid,Addr addr,Int si_code)2017 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
2018 {
2019    vki_siginfo_t info;
2020 
2021    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2022 
2023    VG_(memset)(&info, 0, sizeof(info));
2024    info.si_signo = VKI_SIGSEGV;
2025    info.si_code = si_code;
2026    info.VKI_SIGINFO_si_addr = (void*)addr;
2027 
2028    /* Even if gdbserver indicates to ignore the signal, we must deliver it.
2029       So ignore the return value of VG_(gdbserver_report_signal). */
2030    (void) VG_(gdbserver_report_signal) (&info, tid);
2031 
2032    /* If they're trying to block the signal, force it to be delivered */
2033    if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
2034       VG_(set_default_handler)(VKI_SIGSEGV);
2035 
2036    deliver_signal(tid, &info, NULL);
2037 }
2038 
2039 // Synthesize a fault where the address is OK, but the page
2040 // permissions are bad.
VG_(synth_fault_perms)2041 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
2042 {
2043    synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
2044 }
2045 
2046 // Synthesize a fault where the address there's nothing mapped at the address.
VG_(synth_fault_mapping)2047 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
2048 {
2049    synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
2050 }
2051 
2052 // Synthesize a misc memory fault.
VG_(synth_fault)2053 void VG_(synth_fault)(ThreadId tid)
2054 {
2055    synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
2056 }
2057 
2058 // Synthesise a SIGILL.
VG_(synth_sigill)2059 void VG_(synth_sigill)(ThreadId tid, Addr addr)
2060 {
2061    vki_siginfo_t info;
2062 
2063    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2064 
2065    VG_(memset)(&info, 0, sizeof(info));
2066    info.si_signo = VKI_SIGILL;
2067    info.si_code  = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
2068    info.VKI_SIGINFO_si_addr = (void*)addr;
2069 
2070    if (VG_(gdbserver_report_signal) (&info, tid)) {
2071       resume_scheduler(tid);
2072       deliver_signal(tid, &info, NULL);
2073    }
2074    else
2075       resume_scheduler(tid);
2076 }
2077 
2078 // Synthesise a SIGBUS.
VG_(synth_sigbus)2079 void VG_(synth_sigbus)(ThreadId tid)
2080 {
2081    vki_siginfo_t info;
2082 
2083    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2084 
2085    VG_(memset)(&info, 0, sizeof(info));
2086    info.si_signo = VKI_SIGBUS;
2087    /* There are several meanings to SIGBUS (as per POSIX, presumably),
2088       but the most widely understood is "invalid address alignment",
2089       so let's use that. */
2090    info.si_code  = VKI_BUS_ADRALN;
2091    /* If we knew the invalid address in question, we could put it
2092       in .si_addr.  Oh well. */
2093    /* info.VKI_SIGINFO_si_addr = (void*)addr; */
2094 
2095    if (VG_(gdbserver_report_signal) (&info, tid)) {
2096       resume_scheduler(tid);
2097       deliver_signal(tid, &info, NULL);
2098    }
2099    else
2100       resume_scheduler(tid);
2101 }
2102 
2103 // Synthesise a SIGTRAP.
VG_(synth_sigtrap)2104 void VG_(synth_sigtrap)(ThreadId tid)
2105 {
2106    vki_siginfo_t info;
2107    struct vki_ucontext uc;
2108 #  if defined(VGP_x86_darwin)
2109    struct __darwin_mcontext32 mc;
2110 #  elif defined(VGP_amd64_darwin)
2111    struct __darwin_mcontext64 mc;
2112 #  endif
2113 
2114    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2115 
2116    VG_(memset)(&info, 0, sizeof(info));
2117    VG_(memset)(&uc,   0, sizeof(uc));
2118    info.si_signo = VKI_SIGTRAP;
2119    info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
2120 
2121 #  if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
2122    uc.uc_mcontext.trapno = 3;     /* tjh: this is the x86 trap number
2123                                           for a breakpoint trap... */
2124    uc.uc_mcontext.err = 0;        /* tjh: no error code for x86
2125                                           breakpoint trap... */
2126 #  elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2127    /* the same thing, but using Darwin field/struct names */
2128    VG_(memset)(&mc, 0, sizeof(mc));
2129    uc.uc_mcontext = &mc;
2130    uc.uc_mcontext->__es.__trapno = 3;
2131    uc.uc_mcontext->__es.__err = 0;
2132 #  elif defined(VGP_x86_solaris)
2133    uc.uc_mcontext.gregs[VKI_ERR] = 0;
2134    uc.uc_mcontext.gregs[VKI_TRAPNO] = VKI_T_BPTFLT;
2135 #  endif
2136 
2137    /* fixs390: do we need to do anything here for s390 ? */
2138    if (VG_(gdbserver_report_signal) (&info, tid)) {
2139       resume_scheduler(tid);
2140       deliver_signal(tid, &info, &uc);
2141    }
2142    else
2143       resume_scheduler(tid);
2144 }
2145 
2146 // Synthesise a SIGFPE.
VG_(synth_sigfpe)2147 void VG_(synth_sigfpe)(ThreadId tid, UInt code)
2148 {
2149 // Only tested on mips32 and mips64
2150 #if !defined(VGA_mips32) && !defined(VGA_mips64)
2151    vg_assert(0);
2152 #else
2153    vki_siginfo_t info;
2154    struct vki_ucontext uc;
2155 
2156    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
2157 
2158    VG_(memset)(&info, 0, sizeof(info));
2159    VG_(memset)(&uc,   0, sizeof(uc));
2160    info.si_signo = VKI_SIGFPE;
2161    info.si_code = code;
2162 
2163    if (VG_(gdbserver_report_signal) (VKI_SIGFPE, tid)) {
2164       resume_scheduler(tid);
2165       deliver_signal(tid, &info, &uc);
2166    }
2167    else
2168       resume_scheduler(tid);
2169 #endif
2170 }
2171 
2172 /* Make a signal pending for a thread, for later delivery.
2173    VG_(poll_signals) will arrange for it to be delivered at the right
2174    time.
2175 
2176    tid==0 means add it to the process-wide queue, and not sent it to a
2177    specific thread.
2178 */
2179 static
queue_signal(ThreadId tid,const vki_siginfo_t * si)2180 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
2181 {
2182    ThreadState *tst;
2183    SigQueue *sq;
2184    vki_sigset_t savedmask;
2185 
2186    tst = VG_(get_ThreadState)(tid);
2187 
2188    /* Protect the signal queue against async deliveries */
2189    block_all_host_signals(&savedmask);
2190 
2191    if (tst->sig_queue == NULL) {
2192       tst->sig_queue = VG_(malloc)("signals.qs.1", sizeof(*tst->sig_queue));
2193       VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
2194    }
2195    sq = tst->sig_queue;
2196 
2197    if (VG_(clo_trace_signals))
2198       VG_(dmsg)("Queueing signal %d (idx %d) to thread %u\n",
2199                 si->si_signo, sq->next, tid);
2200 
2201    /* Add signal to the queue.  If the queue gets overrun, then old
2202       queued signals may get lost.
2203 
2204       XXX We should also keep a sigset of pending signals, so that at
2205       least a non-siginfo signal gets deliviered.
2206    */
2207    if (sq->sigs[sq->next].si_signo != 0)
2208       VG_(umsg)("Signal %d being dropped from thread %u's queue\n",
2209                 sq->sigs[sq->next].si_signo, tid);
2210 
2211    sq->sigs[sq->next] = *si;
2212    sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
2213 
2214    restore_all_host_signals(&savedmask);
2215 }
2216 
2217 /*
2218    Returns the next queued signal for thread tid which is in "set".
2219    tid==0 means process-wide signal.  Set si_signo to 0 when the
2220    signal has been delivered.
2221 
2222    Must be called with all signals blocked, to protect against async
2223    deliveries.
2224 */
next_queued(ThreadId tid,const vki_sigset_t * set)2225 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
2226 {
2227    ThreadState *tst = VG_(get_ThreadState)(tid);
2228    SigQueue *sq;
2229    Int idx;
2230    vki_siginfo_t *ret = NULL;
2231 
2232    sq = tst->sig_queue;
2233    if (sq == NULL)
2234       goto out;
2235 
2236    idx = sq->next;
2237    do {
2238       if (0)
2239 	 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
2240 		     sq->sigs[idx].si_signo,
2241                      VG_(sigismember)(set, sq->sigs[idx].si_signo));
2242 
2243       if (sq->sigs[idx].si_signo != 0
2244           && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
2245 	 if (VG_(clo_trace_signals))
2246             VG_(dmsg)("Returning queued signal %d (idx %d) for thread %u\n",
2247                       sq->sigs[idx].si_signo, idx, tid);
2248 	 ret = &sq->sigs[idx];
2249 	 goto out;
2250       }
2251 
2252       idx = (idx + 1) % N_QUEUED_SIGNALS;
2253    } while(idx != sq->next);
2254   out:
2255    return ret;
2256 }
2257 
sanitize_si_code(int si_code)2258 static int sanitize_si_code(int si_code)
2259 {
2260 #if defined(VGO_linux)
2261    /* The linux kernel uses the top 16 bits of si_code for it's own
2262       use and only exports the bottom 16 bits to user space - at least
2263       that is the theory, but it turns out that there are some kernels
2264       around that forget to mask out the top 16 bits so we do it here.
2265 
2266       The kernel treats the bottom 16 bits as signed and (when it does
2267       mask them off) sign extends them when exporting to user space so
2268       we do the same thing here. */
2269    return (Short)si_code;
2270 #elif defined(VGO_darwin) || defined(VGO_solaris)
2271    return si_code;
2272 #else
2273 #  error Unknown OS
2274 #endif
2275 }
2276 
2277 #if defined(VGO_solaris)
2278 /* Following function is used to switch Valgrind from a client stack back onto
2279    a Valgrind stack.  It is used only when the door_return call was invoked by
2280    the client because this is the only syscall which is executed directly on
2281    the client stack (see syscall-{x86,amd64}-solaris.S).  The switch onto the
2282    Valgrind stack has to be made as soon as possible because there is no
2283    guarantee that there is enough space on the client stack to run the
2284    complete signal machinery.  Also, Valgrind has to be switched back onto its
2285    stack before a simulated signal frame is created because that will
2286    overwrite the real sigframe built by the kernel. */
async_signalhandler_solaris_preprocess(ThreadId tid,Int * signo,vki_siginfo_t * info,struct vki_ucontext * uc)2287 static void async_signalhandler_solaris_preprocess(ThreadId tid, Int *signo,
2288                                                    vki_siginfo_t *info,
2289                                                    struct vki_ucontext *uc)
2290 {
2291 #  define RECURSION_BIT 0x1000
2292    Addr sp;
2293    vki_sigframe_t *frame;
2294    ThreadState *tst = VG_(get_ThreadState)(tid);
2295    Int rec_signo;
2296 
2297    /* If not doing door_return then return instantly. */
2298    if (!tst->os_state.in_door_return)
2299       return;
2300 
2301    /* Check for the recursion:
2302       v ...
2303       | async_signalhandler - executed on the client stack
2304       v async_signalhandler_solaris_preprocess - first call switches the
2305       |   stacks and sets the RECURSION_BIT flag
2306       v async_signalhandler - executed on the Valgrind stack
2307       | async_signalhandler_solaris_preprocess - the RECURSION_BIT flag is
2308       v   set, clear it and return
2309     */
2310    if (*signo & RECURSION_BIT) {
2311       *signo &= ~RECURSION_BIT;
2312       return;
2313    }
2314 
2315    rec_signo = *signo | RECURSION_BIT;
2316 
2317 #  if defined(VGP_x86_solaris)
2318    /* Register %ebx/%rbx points to the top of the original V stack. */
2319    sp = uc->uc_mcontext.gregs[VKI_EBX];
2320 #  elif defined(VGP_amd64_solaris)
2321    sp = uc->uc_mcontext.gregs[VKI_REG_RBX];
2322 #  else
2323 #    error "Unknown platform"
2324 #  endif
2325 
2326    /* Build a fake signal frame, similarly as in sigframe-solaris.c. */
2327    /* Calculate a new stack pointer. */
2328    sp -= sizeof(vki_sigframe_t);
2329    sp = VG_ROUNDDN(sp, 16) - sizeof(UWord);
2330 
2331    /* Fill in the frame. */
2332    frame = (vki_sigframe_t*)sp;
2333    /* Set a bogus return address. */
2334    frame->return_addr = (void*)~0UL;
2335    frame->a1_signo = rec_signo;
2336    /* The first parameter has to be 16-byte aligned, resembling a function
2337       call. */
2338    {
2339       /* Using
2340          vg_assert(VG_IS_16_ALIGNED(&frame->a1_signo));
2341          seems to get miscompiled on amd64 with GCC 4.7.2. */
2342       Addr signo_addr = (Addr)&frame->a1_signo;
2343       vg_assert(VG_IS_16_ALIGNED(signo_addr));
2344    }
2345    frame->a2_siginfo = &frame->siginfo;
2346    frame->siginfo = *info;
2347    frame->ucontext = *uc;
2348 
2349 #  if defined(VGP_x86_solaris)
2350    frame->a3_ucontext = &frame->ucontext;
2351 
2352    /* Switch onto the V stack and restart the signal processing. */
2353    __asm__ __volatile__(
2354       "xorl %%ebp, %%ebp\n"
2355       "movl %[sp], %%esp\n"
2356       "jmp async_signalhandler\n"
2357       :
2358       : [sp] "a" (sp)
2359       : /*"ebp"*/);
2360 
2361 #  elif defined(VGP_amd64_solaris)
2362    __asm__ __volatile__(
2363       "xorq %%rbp, %%rbp\n"
2364       "movq %[sp], %%rsp\n"
2365       "jmp async_signalhandler\n"
2366       :
2367       : [sp] "a" (sp), "D" (rec_signo), "S" (&frame->siginfo),
2368         "d" (&frame->ucontext)
2369       : /*"rbp"*/);
2370 #  else
2371 #    error "Unknown platform"
2372 #  endif
2373 
2374    /* We should never get here. */
2375    vg_assert(0);
2376 
2377 #  undef RECURSION_BIT
2378 }
2379 #endif
2380 
2381 /*
2382    Receive an async signal from the kernel.
2383 
2384    This should only happen when the thread is blocked in a syscall,
2385    since that's the only time this set of signals is unblocked.
2386 */
2387 static
async_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2388 void async_signalhandler ( Int sigNo,
2389                            vki_siginfo_t *info, struct vki_ucontext *uc )
2390 {
2391    ThreadId     tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2392    ThreadState* tst = VG_(get_ThreadState)(tid);
2393    SysRes       sres;
2394 
2395    vg_assert(tst->status == VgTs_WaitSys);
2396 
2397 #  if defined(VGO_solaris)
2398    async_signalhandler_solaris_preprocess(tid, &sigNo, info, uc);
2399 #  endif
2400 
2401    /* The thread isn't currently running, make it so before going on */
2402    VG_(acquire_BigLock)(tid, "async_signalhandler");
2403 
2404    info->si_code = sanitize_si_code(info->si_code);
2405 
2406    if (VG_(clo_trace_signals))
2407       VG_(dmsg)("async signal handler: signal=%d, tid=%u, si_code=%d\n",
2408                 sigNo, tid, info->si_code);
2409 
2410    /* Update thread state properly.  The signal can only have been
2411       delivered whilst we were in
2412       coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2413       window between the two sigprocmask calls, since at all other
2414       times, we run with async signals on the host blocked.  Hence
2415       make enquiries on the basis that we were in or very close to a
2416       syscall, and attempt to fix up the guest state accordingly.
2417 
2418       (normal async signals occurring during computation are blocked,
2419       but periodically polled for using VG_(sigtimedwait_zero), and
2420       delivered at a point convenient for us.  Hence this routine only
2421       deals with signals that are delivered to a thread during a
2422       syscall.) */
2423 
2424    /* First, extract a SysRes from the ucontext_t* given to this
2425       handler.  If it is subsequently established by
2426       VG_(fixup_guest_state_after_syscall_interrupted) that the
2427       syscall was complete but the results had not been committed yet
2428       to the guest state, then it'll have to commit the results itself
2429       "by hand", and so we need to extract the SysRes.  Of course if
2430       the thread was not in that particular window then the
2431       SysRes will be meaningless, but that's OK too because
2432       VG_(fixup_guest_state_after_syscall_interrupted) will detect
2433       that the thread was not in said window and ignore the SysRes. */
2434 
2435    /* To make matters more complex still, on Darwin we need to know
2436       the "class" of the syscall under consideration in order to be
2437       able to extract the a correct SysRes.  The class will have been
2438       saved just before the syscall, by VG_(client_syscall), into this
2439       thread's tst->arch.vex.guest_SC_CLASS.  Hence: */
2440 #  if defined(VGO_darwin)
2441    sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2442 #  else
2443    sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2444 #  endif
2445 
2446    /* (1) */
2447    VG_(fixup_guest_state_after_syscall_interrupted)(
2448       tid,
2449       VG_UCONTEXT_INSTR_PTR(uc),
2450       sres,
2451       !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART),
2452       uc
2453    );
2454 
2455    /* (2) */
2456    /* Set up the thread's state to deliver a signal */
2457    if (!is_sig_ign(info, tid))
2458       deliver_signal(tid, info, uc);
2459 
2460    /* It's crucial that (1) and (2) happen in the order (1) then (2)
2461       and not the other way around.  (1) fixes up the guest thread
2462       state to reflect the fact that the syscall was interrupted --
2463       either to restart the syscall or to return EINTR.  (2) then sets
2464       up the thread state to deliver the signal.  Then we resume
2465       execution.  First, the signal handler is run, since that's the
2466       second adjustment we made to the thread state.  If that returns,
2467       then we resume at the guest state created by (1), viz, either
2468       the syscall returns EINTR or is restarted.
2469 
2470       If (2) was done before (1) the outcome would be completely
2471       different, and wrong. */
2472 
2473    /* longjmp back to the thread's main loop to start executing the
2474       handler. */
2475    resume_scheduler(tid);
2476 
2477    VG_(core_panic)("async_signalhandler: got unexpected signal "
2478                    "while outside of scheduler");
2479 }
2480 
2481 /* Extend the stack of thread #tid to cover addr. It is expected that
2482    addr either points into an already mapped anonymous segment or into a
2483    reservation segment abutting the stack segment. Everything else is a bug.
2484 
2485    Returns True on success, False on failure.
2486 
2487    Succeeds without doing anything if addr is already within a segment.
2488 
2489    Failure could be caused by:
2490    - addr not below a growable segment
2491    - new stack size would exceed the stack limit for the given thread
2492    - mmap failed for some other reason
2493 */
VG_(extend_stack)2494 Bool VG_(extend_stack)(ThreadId tid, Addr addr)
2495 {
2496    SizeT udelta;
2497 
2498    /* Get the segment containing addr. */
2499    const NSegment* seg = VG_(am_find_nsegment)(addr);
2500    vg_assert(seg != NULL);
2501 
2502    /* TODO: the test "seg->kind == SkAnonC" is really inadequate,
2503       because although it tests whether the segment is mapped
2504       _somehow_, it doesn't check that it has the right permissions
2505       (r,w, maybe x) ?  */
2506    if (seg->kind == SkAnonC)
2507       /* addr is already mapped.  Nothing to do. */
2508       return True;
2509 
2510    const NSegment* seg_next = VG_(am_next_nsegment)( seg, True/*fwds*/ );
2511    vg_assert(seg_next != NULL);
2512 
2513    udelta = VG_PGROUNDUP(seg_next->start - addr);
2514 
2515    VG_(debugLog)(1, "signals",
2516                     "extending a stack base 0x%lx down by %lu\n",
2517                     seg_next->start, udelta);
2518    Bool overflow;
2519    if (! VG_(am_extend_into_adjacent_reservation_client)
2520        ( seg_next->start, -(SSizeT)udelta, &overflow )) {
2521       Addr new_stack_base = seg_next->start - udelta;
2522       if (overflow)
2523          VG_(umsg)("Stack overflow in thread #%u: can't grow stack to %#lx\n",
2524                    tid, new_stack_base);
2525       else
2526          VG_(umsg)("Cannot map memory to grow the stack for thread #%u "
2527                    "to %#lx\n", tid, new_stack_base);
2528       return False;
2529    }
2530 
2531    /* When we change the main stack, we have to let the stack handling
2532       code know about it. */
2533    VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2534 
2535    if (VG_(clo_sanity_level) > 2)
2536       VG_(sanity_check_general)(False);
2537 
2538    return True;
2539 }
2540 
2541 static fault_catcher_t fault_catcher = NULL;
2542 
VG_(set_fault_catcher)2543 fault_catcher_t VG_(set_fault_catcher)(fault_catcher_t catcher)
2544 {
2545    fault_catcher_t prev_catcher = fault_catcher;
2546    fault_catcher = catcher;
2547    return prev_catcher;
2548 }
2549 
2550 static
sync_signalhandler_from_user(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2551 void sync_signalhandler_from_user ( ThreadId tid,
2552          Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2553 {
2554    ThreadId qtid;
2555 
2556    /* If some user-process sent us a sync signal (ie. it's not the result
2557       of a faulting instruction), then how we treat it depends on when it
2558       arrives... */
2559 
2560    if (VG_(threads)[tid].status == VgTs_WaitSys
2561 #     if defined(VGO_solaris)
2562       /* Check if the signal was really received while doing a blocking
2563          syscall.  Only then the async_signalhandler() path can be used. */
2564        && VG_(is_ip_in_blocking_syscall)(tid, VG_UCONTEXT_INSTR_PTR(uc))
2565 #     endif
2566          ) {
2567       /* Signal arrived while we're blocked in a syscall.  This means that
2568          the client's signal mask was applied.  In other words, so we can't
2569          get here unless the client wants this signal right now.  This means
2570          we can simply use the async_signalhandler. */
2571       if (VG_(clo_trace_signals))
2572          VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2573                    sigNo);
2574 
2575       async_signalhandler(sigNo, info, uc);
2576       VG_(core_panic)("async_signalhandler returned!?\n");
2577 
2578    } else {
2579       /* Signal arrived while in generated client code, or while running
2580          Valgrind core code.  That means that every thread has these signals
2581          unblocked, so we can't rely on the kernel to route them properly, so
2582          we need to queue them manually. */
2583       if (VG_(clo_trace_signals))
2584          VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2585 
2586 #     if defined(VGO_linux)
2587       /* On Linux, first we have to do a sanity check of the siginfo. */
2588       if (info->VKI_SIGINFO_si_pid == 0) {
2589          /* There's a per-user limit of pending siginfo signals.  If
2590             you exceed this, by having more than that number of
2591             pending signals with siginfo, then new signals are
2592             delivered without siginfo.  This condition can be caused
2593             by any unrelated program you're running at the same time
2594             as Valgrind, if it has a large number of pending siginfo
2595             signals which it isn't taking delivery of.
2596 
2597             Since we depend on siginfo to work out why we were sent a
2598             signal and what we should do about it, we really can't
2599             continue unless we get it. */
2600          VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2601                    "I can't go on.\n", sigNo, VG_(signame)(sigNo));
2602          VG_(printf)(
2603 "  This may be because one of your programs has consumed your ration of\n"
2604 "  siginfo structures.  For more information, see:\n"
2605 "    http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2606 "  Basically, some program on your system is building up a large queue of\n"
2607 "  pending signals, and this causes the siginfo data for other signals to\n"
2608 "  be dropped because it's exceeding a system limit.  However, Valgrind\n"
2609 "  absolutely needs siginfo for SIGSEGV.  A workaround is to track down the\n"
2610 "  offending program and avoid running it while using Valgrind, but there\n"
2611 "  is no easy way to do this.  Apparently the problem was fixed in kernel\n"
2612 "  2.6.12.\n");
2613 
2614          /* It's a fatal signal, so we force the default handler. */
2615          VG_(set_default_handler)(sigNo);
2616          deliver_signal(tid, info, uc);
2617          resume_scheduler(tid);
2618          VG_(exit)(99);       /* If we can't resume, then just exit */
2619       }
2620 #     endif
2621 
2622       qtid = 0;         /* shared pending by default */
2623 #     if defined(VGO_linux)
2624       if (info->si_code == VKI_SI_TKILL)
2625          qtid = tid;    /* directed to us specifically */
2626 #     endif
2627       queue_signal(qtid, info);
2628    }
2629 }
2630 
2631 /* Returns the reported fault address for an exact address */
fault_mask(Addr in)2632 static Addr fault_mask(Addr in)
2633 {
2634    /*  We have to use VG_PGROUNDDN because faults on s390x only deliver
2635        the page address but not the address within a page.
2636     */
2637 #  if defined(VGA_s390x)
2638    return VG_PGROUNDDN(in);
2639 #  else
2640    return in;
2641 #endif
2642 }
2643 
2644 /* Returns True if the sync signal was due to the stack requiring extension
2645    and the extension was successful.
2646 */
extend_stack_if_appropriate(ThreadId tid,vki_siginfo_t * info)2647 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2648 {
2649    Addr fault;
2650    Addr esp;
2651    NSegment const *seg, *seg_next;
2652 
2653    if (info->si_signo != VKI_SIGSEGV)
2654       return False;
2655 
2656    fault    = (Addr)info->VKI_SIGINFO_si_addr;
2657    esp      = VG_(get_SP)(tid);
2658    seg      = VG_(am_find_nsegment)(fault);
2659    seg_next = seg ? VG_(am_next_nsegment)( seg, True/*fwds*/ )
2660                   : NULL;
2661 
2662    if (VG_(clo_trace_signals)) {
2663       if (seg == NULL)
2664          VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%u ESP=%#lx "
2665                    "seg=NULL\n",
2666                    info->si_code, fault, tid, esp);
2667       else
2668          VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%u ESP=%#lx "
2669                    "seg=%#lx-%#lx\n",
2670                    info->si_code, fault, tid, esp, seg->start, seg->end);
2671    }
2672 
2673    if (info->si_code == VKI_SEGV_MAPERR
2674        && seg
2675        && seg->kind == SkResvn
2676        && seg->smode == SmUpper
2677        && seg_next
2678        && seg_next->kind == SkAnonC
2679        && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
2680       /* If the fault address is above esp but below the current known
2681          stack segment base, and it was a fault because there was
2682          nothing mapped there (as opposed to a permissions fault),
2683          then extend the stack segment.
2684        */
2685       Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2686       if (VG_(am_addr_is_in_extensible_client_stack)(base) &&
2687           VG_(extend_stack)(tid, base)) {
2688          if (VG_(clo_trace_signals))
2689             VG_(dmsg)("       -> extended stack base to %#lx\n",
2690                       VG_PGROUNDDN(fault));
2691          return True;
2692       } else {
2693          return False;
2694       }
2695    } else {
2696       return False;
2697    }
2698 }
2699 
2700 static
sync_signalhandler_from_kernel(ThreadId tid,Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2701 void sync_signalhandler_from_kernel ( ThreadId tid,
2702          Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2703 {
2704    /* Check to see if some part of Valgrind itself is interested in faults.
2705       The fault catcher should never be set whilst we're in generated code, so
2706       check for that.  AFAIK the only use of the catcher right now is
2707       memcheck's leak detector. */
2708    if (fault_catcher) {
2709       vg_assert(VG_(in_generated_code) == False);
2710 
2711       (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2712       /* If the catcher returns, then it didn't handle the fault,
2713          so carry on panicking. */
2714    }
2715 
2716    if (extend_stack_if_appropriate(tid, info)) {
2717       /* Stack extension occurred, so we don't need to do anything else; upon
2718          returning from this function, we'll restart the host (hence guest)
2719          instruction. */
2720    } else {
2721       /* OK, this is a signal we really have to deal with.  If it came
2722          from the client's code, then we can jump back into the scheduler
2723          and have it delivered.  Otherwise it's a Valgrind bug. */
2724       ThreadState *tst = VG_(get_ThreadState)(tid);
2725 
2726       if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2727          /* signal is blocked, but they're not allowed to block faults */
2728          VG_(set_default_handler)(sigNo);
2729       }
2730 
2731       if (VG_(in_generated_code)) {
2732          if (VG_(gdbserver_report_signal) (info, tid)
2733              || VG_(sigismember)(&tst->sig_mask, sigNo)) {
2734             /* Can't continue; must longjmp back to the scheduler and thus
2735                enter the sighandler immediately. */
2736             deliver_signal(tid, info, uc);
2737             resume_scheduler(tid);
2738          }
2739          else
2740             resume_scheduler(tid);
2741       }
2742 
2743       /* If resume_scheduler returns or its our fault, it means we
2744          don't have longjmp set up, implying that we weren't running
2745          client code, and therefore it was actually generated by
2746          Valgrind internally.
2747        */
2748       VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2749                 "a signal %d (%s) - exiting\n",
2750                 sigNo, VG_(signame)(sigNo));
2751 
2752       VG_(dmsg)("si_code=%d;  Faulting address: %p;  sp: %#lx\n",
2753                 info->si_code, info->VKI_SIGINFO_si_addr,
2754                 VG_UCONTEXT_STACK_PTR(uc));
2755 
2756       if (0)
2757          VG_(kill_self)(sigNo);  /* generate a core dump */
2758 
2759       //if (tid == 0)            /* could happen after everyone has exited */
2760       //  tid = VG_(master_tid);
2761       vg_assert(tid != 0);
2762 
2763       UnwindStartRegs startRegs;
2764       VG_(memset)(&startRegs, 0, sizeof(startRegs));
2765 
2766       VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2767       VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2768    }
2769 }
2770 
2771 /*
2772    Receive a sync signal from the host.
2773 */
2774 static
sync_signalhandler(Int sigNo,vki_siginfo_t * info,struct vki_ucontext * uc)2775 void sync_signalhandler ( Int sigNo,
2776                           vki_siginfo_t *info, struct vki_ucontext *uc )
2777 {
2778    ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2779    Bool from_user;
2780 
2781    if (0)
2782       VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2783 
2784    vg_assert(info != NULL);
2785    vg_assert(info->si_signo == sigNo);
2786    vg_assert(sigNo == VKI_SIGSEGV ||
2787 	     sigNo == VKI_SIGBUS  ||
2788 	     sigNo == VKI_SIGFPE  ||
2789 	     sigNo == VKI_SIGILL  ||
2790 	     sigNo == VKI_SIGTRAP);
2791 
2792    info->si_code = sanitize_si_code(info->si_code);
2793 
2794    from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2795 
2796    if (VG_(clo_trace_signals)) {
2797       VG_(dmsg)("sync signal handler: "
2798                 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2799                 sigNo, info->si_code, VG_(get_IP)(tid),
2800                 VG_UCONTEXT_INSTR_PTR(uc),
2801                 ( from_user ? "user" : "kernel" ));
2802    }
2803    vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2804 
2805    /* // debug code:
2806    if (0) {
2807       VG_(printf)("info->si_signo  %d\n", info->si_signo);
2808       VG_(printf)("info->si_errno  %d\n", info->si_errno);
2809       VG_(printf)("info->si_code   %d\n", info->si_code);
2810       VG_(printf)("info->si_pid    %d\n", info->si_pid);
2811       VG_(printf)("info->si_uid    %d\n", info->si_uid);
2812       VG_(printf)("info->si_status %d\n", info->si_status);
2813       VG_(printf)("info->si_addr   %p\n", info->si_addr);
2814    }
2815    */
2816 
2817    /* Figure out if the signal is being sent from outside the process.
2818       (Why do we care?)  If the signal is from the user rather than the
2819       kernel, then treat it more like an async signal than a sync signal --
2820       that is, merely queue it for later delivery. */
2821    if (from_user) {
2822       sync_signalhandler_from_user(  tid, sigNo, info, uc);
2823    } else {
2824       sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2825    }
2826 
2827 #  if defined(VGO_solaris)
2828    /* On Solaris we have to return from signal handler manually. */
2829    VG_(do_syscall2)(__NR_context, VKI_SETCONTEXT, (UWord)uc);
2830 #  endif
2831 }
2832 
2833 
2834 /*
2835    Kill this thread.  Makes it leave any syscall it might be currently
2836    blocked in, and return to the scheduler.  This doesn't mark the thread
2837    as exiting; that's the caller's job.
2838  */
sigvgkill_handler(int signo,vki_siginfo_t * si,struct vki_ucontext * uc)2839 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2840                                          struct vki_ucontext *uc)
2841 {
2842    ThreadId     tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2843    ThreadStatus at_signal = VG_(threads)[tid].status;
2844 
2845    if (VG_(clo_trace_signals))
2846       VG_(dmsg)("sigvgkill for lwp %d tid %u\n", VG_(gettid)(), tid);
2847 
2848    VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2849 
2850    vg_assert(signo == VG_SIGVGKILL);
2851    vg_assert(si->si_signo == signo);
2852 
2853    /* jrs 2006 August 3: the following assertion seems incorrect to
2854       me, and fails on AIX.  sigvgkill could be sent to a thread which
2855       is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2856       Hence comment these out ..
2857 
2858       vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2859       VG_(post_syscall)(tid);
2860 
2861       and instead do:
2862    */
2863    if (at_signal == VgTs_WaitSys)
2864       VG_(post_syscall)(tid);
2865    /* jrs 2006 August 3 ends */
2866 
2867    resume_scheduler(tid);
2868 
2869    VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2870 }
2871 
2872 static __attribute((unused))
pp_ksigaction(vki_sigaction_toK_t * sa)2873 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2874 {
2875    Int i;
2876    VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2877                sa->ksa_handler,
2878                (UInt)sa->sa_flags,
2879 #              if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
2880                   !defined(VGO_solaris)
2881                   sa->sa_restorer
2882 #              else
2883                   (void*)0
2884 #              endif
2885               );
2886    VG_(printf)("pp_ksigaction: { ");
2887    for (i = 1; i <= VG_(max_signal); i++)
2888       if (VG_(sigismember(&(sa->sa_mask),i)))
2889          VG_(printf)("%d ", i);
2890    VG_(printf)("}\n");
2891 }
2892 
2893 /*
2894    Force signal handler to default
2895  */
VG_(set_default_handler)2896 void VG_(set_default_handler)(Int signo)
2897 {
2898    vki_sigaction_toK_t sa;
2899 
2900    sa.ksa_handler = VKI_SIG_DFL;
2901    sa.sa_flags = 0;
2902 #  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
2903       !defined(VGO_solaris)
2904    sa.sa_restorer = 0;
2905 #  endif
2906    VG_(sigemptyset)(&sa.sa_mask);
2907 
2908    VG_(do_sys_sigaction)(signo, &sa, NULL);
2909 }
2910 
2911 /*
2912    Poll for pending signals, and set the next one up for delivery.
2913  */
VG_(poll_signals)2914 void VG_(poll_signals)(ThreadId tid)
2915 {
2916    vki_siginfo_t si, *sip;
2917    vki_sigset_t pollset;
2918    ThreadState *tst = VG_(get_ThreadState)(tid);
2919    vki_sigset_t saved_mask;
2920 
2921    /* look for all the signals this thread isn't blocking */
2922    /* pollset = ~tst->sig_mask */
2923    VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2924 
2925    block_all_host_signals(&saved_mask); // protect signal queue
2926 
2927    /* First look for any queued pending signals */
2928    sip = next_queued(tid, &pollset); /* this thread */
2929 
2930    if (sip == NULL)
2931       sip = next_queued(0, &pollset); /* process-wide */
2932 
2933    /* If there was nothing queued, ask the kernel for a pending signal */
2934    if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2935       if (VG_(clo_trace_signals))
2936          VG_(dmsg)("poll_signals: got signal %d for thread %u\n",
2937                    si.si_signo, tid);
2938       sip = &si;
2939    }
2940 
2941    if (sip != NULL) {
2942       /* OK, something to do; deliver it */
2943       if (VG_(clo_trace_signals))
2944          VG_(dmsg)("Polling found signal %d for tid %u\n", sip->si_signo, tid);
2945       if (!is_sig_ign(sip, tid))
2946 	 deliver_signal(tid, sip, NULL);
2947       else if (VG_(clo_trace_signals))
2948          VG_(dmsg)("   signal %d ignored\n", sip->si_signo);
2949 
2950       sip->si_signo = 0;	/* remove from signal queue, if that's
2951 				   where it came from */
2952    }
2953 
2954    restore_all_host_signals(&saved_mask);
2955 }
2956 
2957 /* At startup, copy the process' real signal state to the SCSS.
2958    Whilst doing this, block all real signals.  Then calculate SKSS and
2959    set the kernel to that.  Also initialise DCSS.
2960 */
VG_(sigstartup_actions)2961 void VG_(sigstartup_actions) ( void )
2962 {
2963    Int i, ret, vKI_SIGRTMIN;
2964    vki_sigset_t saved_procmask;
2965    vki_sigaction_fromK_t sa;
2966 
2967    VG_(memset)(&scss, 0, sizeof(scss));
2968    VG_(memset)(&skss, 0, sizeof(skss));
2969 
2970 #  if defined(VKI_SIGRTMIN)
2971    vKI_SIGRTMIN = VKI_SIGRTMIN;
2972 #  else
2973    vKI_SIGRTMIN = 0; /* eg Darwin */
2974 #  endif
2975 
2976    /* VG_(printf)("SIGSTARTUP\n"); */
2977    /* Block all signals.  saved_procmask remembers the previous mask,
2978       which the first thread inherits.
2979    */
2980    block_all_host_signals( &saved_procmask );
2981 
2982    /* Copy per-signal settings to SCSS. */
2983    for (i = 1; i <= _VKI_NSIG; i++) {
2984       /* Get the old host action */
2985       ret = VG_(sigaction)(i, NULL, &sa);
2986 
2987 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2988       /* apparently we may not even ask about the disposition of these
2989          signals, let alone change them */
2990       if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2991          continue;
2992 #     endif
2993 
2994       if (ret != 0)
2995 	 break;
2996 
2997       /* Try setting it back to see if this signal is really
2998 	 available */
2999       if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
3000           && i >= vKI_SIGRTMIN) {
3001          vki_sigaction_toK_t tsa, sa2;
3002 
3003 	 tsa.ksa_handler = (void *)sync_signalhandler;
3004 	 tsa.sa_flags = VKI_SA_SIGINFO;
3005 #        if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
3006             !defined(VGO_solaris)
3007 	 tsa.sa_restorer = 0;
3008 #        endif
3009 	 VG_(sigfillset)(&tsa.sa_mask);
3010 
3011 	 /* try setting it to some arbitrary handler */
3012 	 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
3013 	    /* failed - not really usable */
3014 	    break;
3015 	 }
3016 
3017          VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
3018 	 ret = VG_(sigaction)(i, &sa2, NULL);
3019 	 vg_assert(ret == 0);
3020       }
3021 
3022       VG_(max_signal) = i;
3023 
3024       if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
3025          VG_(printf)("snaffling handler 0x%lx for signal %d\n",
3026                      (Addr)(sa.ksa_handler), i );
3027 
3028       scss.scss_per_sig[i].scss_handler  = sa.ksa_handler;
3029       scss.scss_per_sig[i].scss_flags    = sa.sa_flags;
3030       scss.scss_per_sig[i].scss_mask     = sa.sa_mask;
3031 
3032       scss.scss_per_sig[i].scss_restorer = NULL;
3033 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
3034          !defined(VGO_solaris)
3035       scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
3036 #     endif
3037 
3038       scss.scss_per_sig[i].scss_sa_tramp = NULL;
3039 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
3040       scss.scss_per_sig[i].scss_sa_tramp = NULL;
3041       /*sa.sa_tramp;*/
3042       /* We can't know what it was, because Darwin's sys_sigaction
3043          doesn't tell us. */
3044 #     endif
3045    }
3046 
3047    if (VG_(clo_trace_signals))
3048       VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
3049 
3050    /* Our private internal signals are treated as ignored */
3051    scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
3052    scss.scss_per_sig[VG_SIGVGKILL].scss_flags   = VKI_SA_SIGINFO;
3053    VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
3054 
3055    /* Copy the process' signal mask into the root thread. */
3056    vg_assert(VG_(threads)[1].status == VgTs_Init);
3057    for (i = 2; i < VG_N_THREADS; i++)
3058       vg_assert(VG_(threads)[i].status == VgTs_Empty);
3059 
3060    VG_(threads)[1].sig_mask = saved_procmask;
3061    VG_(threads)[1].tmp_sig_mask = saved_procmask;
3062 
3063    /* Calculate SKSS and apply it.  This also sets the initial kernel
3064       mask we need to run with. */
3065    handle_SCSS_change( True /* forced update */ );
3066 
3067    /* Leave with all signals still blocked; the thread scheduler loop
3068       will set the appropriate mask at the appropriate time. */
3069 }
3070 
3071 /*--------------------------------------------------------------------*/
3072 /*--- end                                                          ---*/
3073 /*--------------------------------------------------------------------*/
3074