Home | History | Annotate | Download | only in coregrind
      1 /* -*- mode: C; c-basic-offset: 3; -*- */
      2 
      3 /*--------------------------------------------------------------------*/
      4 /*--- Implementation of POSIX signals.                 m_signals.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of Valgrind, a dynamic binary instrumentation
      9    framework.
     10 
     11    Copyright (C) 2000-2013 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 /*
     33    Signal handling.
     34 
     35    There are 4 distinct classes of signal:
     36 
     37    1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
     38    TRAP): these are signals as a result of an instruction fault.  If
     39    we get one while running client code, then we just do the
     40    appropriate thing.  If it happens while running Valgrind code, then
     41    it indicates a Valgrind bug.  Note that we "manually" implement
     42    automatic stack growth, such that if a fault happens near the
     43    client process stack, it is extended in the same way the kernel
     44    would, and the fault is never reported to the client program.
     45 
     46    2. Asynchronous variants of the above signals: If the kernel tries
     47    to deliver a sync signal while it is blocked, it just kills the
     48    process.  Therefore, we can't block those signals if we want to be
     49    able to report on bugs in Valgrind.  This means that we're also
     50    open to receiving those signals from other processes, sent with
     51    kill.  We could get away with just dropping them, since they aren't
     52    really signals that processes send to each other.
     53 
     54    3. Synchronous, general signals.  If a thread/process sends itself
     55    a signal with kill, its expected to be synchronous: ie, the signal
     56    will have been delivered by the time the syscall finishes.
     57 
     58    4. Asynchronous, general signals.  All other signals, sent by
     59    another process with kill.  These are generally blocked, except for
     60    two special cases: we poll for them each time we're about to run a
     61    thread for a time quanta, and while running blocking syscalls.
     62 
     63 
     64    In addition, we reserve one signal for internal use: SIGVGKILL.
     65    SIGVGKILL is used to terminate threads.  When one thread wants
     66    another to exit, it will set its exitreason and send it SIGVGKILL
     67    if it appears to be blocked in a syscall.
     68 
     69 
     70    We use a kernel thread for each application thread.  When the
     71    thread allows itself to be open to signals, it sets the thread
     72    signal mask to what the client application set it to.  This means
     73    that we get the kernel to do all signal routing: under Valgrind,
     74    signals get delivered in the same way as in the non-Valgrind case
     75    (the exception being for the sync signal set, since they're almost
     76    always unblocked).
     77  */
     78 
     79 /*
     80    Some more details...
     81 
     82    First off, we take note of the client's requests (via sys_sigaction
     83    and sys_sigprocmask) to set the signal state (handlers for each
     84    signal, which are process-wide, + a mask for each signal, which is
     85    per-thread).  This info is duly recorded in the SCSS (static Client
     86    signal state) in m_signals.c, and if the client later queries what
     87    the state is, we merely fish the relevant info out of SCSS and give
     88    it back.
     89 
     90    However, we set the real signal state in the kernel to something
     91    entirely different.  This is recorded in SKSS, the static Kernel
     92    signal state.  What's nice (to the extent that anything is nice w.r.t
     93    signals) is that there's a pure function to calculate SKSS from SCSS,
     94    calculate_SKSS_from_SCSS.  So when the client changes SCSS then we
     95    recompute the associated SKSS and apply any changes from the previous
     96    SKSS through to the kernel.
     97 
     98    Now, that said, the general scheme we have now is, that regardless of
     99    what the client puts into the SCSS (viz, asks for), what we would
    100    like to do is as follows:
    101 
    102    (1) run code on the virtual CPU with all signals blocked
    103 
    104    (2) at convenient moments for us (that is, when the VCPU stops, and
    105       control is back with the scheduler), ask the kernel "do you have
    106       any signals for me?"  and if it does, collect up the info, and
    107       deliver them to the client (by building sigframes).
    108 
    109    And that's almost what we do.  The signal polling is done by
    110    VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
    111    do the dirty work.  (of which more later).
    112 
    113    By polling signals, rather than catching them, we get to deal with
    114    them only at convenient moments, rather than having to recover from
    115    taking a signal while generated code is running.
    116 
    117    Now unfortunately .. the above scheme only works for so-called async
    118    signals.  An async signal is one which isn't associated with any
    119    particular instruction, eg Control-C (SIGINT).  For those, it doesn't
    120    matter if we don't deliver the signal to the client immediately; it
    121    only matters that we deliver it eventually.  Hence polling is OK.
    122 
    123    But the other group -- sync signals -- are all related by the fact
    124    that they are various ways for the host CPU to fail to execute an
    125    instruction: SIGILL, SIGSEGV, SIGFPU.  And they can't be deferred,
    126    because obviously if a host instruction can't execute, well then we
    127    have to immediately do Plan B, whatever that is.
    128 
    129    So the next approximation of what happens is:
    130 
    131    (1) run code on vcpu with all async signals blocked
    132 
    133    (2) at convenient moments (when NOT running the vcpu), poll for async
    134       signals.
    135 
    136    (1) and (2) together imply that if the host does deliver a signal to
    137       async_signalhandler while the VCPU is running, something's
    138       seriously wrong.
    139 
    140    (3) when running code on vcpu, don't block sync signals.  Instead
    141       register sync_signalhandler and catch any such via that.  Of
    142       course, that means an ugly recovery path if we do -- the
    143       sync_signalhandler has to longjump, exiting out of the generated
    144       code, and the assembly-dispatcher thingy that runs it, and gets
    145       caught in m_scheduler, which then tells m_signals to deliver the
    146       signal.
    147 
    148    Now naturally (ha ha) even that might be tolerable, but there's
    149    something worse: dealing with signals delivered to threads in
    150    syscalls.
    151 
    152    Obviously from the above, SKSS's signal mask (viz, what we really run
    153    with) is way different from SCSS's signal mask (viz, what the client
    154    thread thought it asked for).  (eg) It may well be that the client
    155    did not block control-C, so that it just expects to drop dead if it
    156    receives ^C whilst blocked in a syscall, but by default we are
    157    running with all async signals blocked, and so that signal could be
    158    arbitrarily delayed, or perhaps even lost (not sure).
    159 
    160    So what we have to do, when doing any syscall which SfMayBlock, is to
    161    quickly switch in the SCSS-specified signal mask just before the
    162    syscall, and switch it back just afterwards, and hope that we don't
    163    get caught up in some wierd race condition.  This is the primary
    164    purpose of the ultra-magical pieces of assembly code in
    165    coregrind/m_syswrap/syscall-<plat>.S
    166 
    167    -----------
    168 
    169    The ways in which V can come to hear of signals that need to be
    170    forwarded to the client as are follows:
    171 
    172     sync signals: can arrive at any time whatsoever.  These are caught
    173                   by sync_signalhandler
    174 
    175     async signals:
    176 
    177        if    running generated code
    178        then  these are blocked, so we don't expect to catch them in
    179              async_signalhandler
    180 
    181        else
    182        if    thread is blocked in a syscall marked SfMayBlock
    183        then  signals may be delivered to async_sighandler, since we
    184              temporarily unblocked them for the duration of the syscall,
    185              by using the real (SCSS) mask for this thread
    186 
    187        else  we're doing misc housekeeping activities (eg, making a translation,
    188              washing our hair, etc).  As in the normal case, these signals are
    189              blocked, but we can  and do poll for them using VG_(poll_signals).
    190 
    191    Now, re VG_(poll_signals), it polls the kernel by doing
    192    VG_(sigtimedwait_zero).  This is trivial on Linux, since it's just a
    193    syscall.  But on Darwin and AIX, we have to cobble together the
    194    functionality in a tedious, longwinded and probably error-prone way.
    195 
    196    Finally, if a gdb is debugging the process under valgrind,
    197    the signal can be ignored if gdb tells this. So, before resuming the
    198    scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
    199    is done. If this returns True, the signal is delivered.
    200  */
    201 
    202 #include "pub_core_basics.h"
    203 #include "pub_core_vki.h"
    204 #include "pub_core_vkiscnums.h"
    205 #include "pub_core_debuglog.h"
    206 #include "pub_core_threadstate.h"
    207 #include "pub_core_xarray.h"
    208 #include "pub_core_clientstate.h"
    209 #include "pub_core_aspacemgr.h"
    210 #include "pub_core_debugger.h"      // For VG_(start_debugger)
    211 #include "pub_core_errormgr.h"
    212 #include "pub_core_gdbserver.h"
    213 #include "pub_core_libcbase.h"
    214 #include "pub_core_libcassert.h"
    215 #include "pub_core_libcprint.h"
    216 #include "pub_core_libcproc.h"
    217 #include "pub_core_libcsignal.h"
    218 #include "pub_core_machine.h"
    219 #include "pub_core_mallocfree.h"
    220 #include "pub_core_options.h"
    221 #include "pub_core_scheduler.h"
    222 #include "pub_core_signals.h"
    223 #include "pub_core_sigframe.h"      // For VG_(sigframe_create)()
    224 #include "pub_core_stacks.h"        // For VG_(change_stack)()
    225 #include "pub_core_stacktrace.h"    // For VG_(get_and_pp_StackTrace)()
    226 #include "pub_core_syscall.h"
    227 #include "pub_core_syswrap.h"
    228 #include "pub_core_tooliface.h"
    229 #include "pub_core_coredump.h"
    230 
    231 
    232 /* ---------------------------------------------------------------------
    233    Forwards decls.
    234    ------------------------------------------------------------------ */
    235 
    236 static void sync_signalhandler  ( Int sigNo, vki_siginfo_t *info,
    237                                              struct vki_ucontext * );
    238 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
    239                                              struct vki_ucontext * );
    240 static void sigvgkill_handler	( Int sigNo, vki_siginfo_t *info,
    241                                              struct vki_ucontext * );
    242 
    243 /* Maximum usable signal. */
    244 Int VG_(max_signal) = _VKI_NSIG;
    245 
    246 #define N_QUEUED_SIGNALS	8
    247 
    248 typedef struct SigQueue {
    249    Int	next;
    250    vki_siginfo_t sigs[N_QUEUED_SIGNALS];
    251 } SigQueue;
    252 
    253 /* ------ Macros for pulling stuff out of ucontexts ------ */
    254 
    255 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do?  A: let's suppose the
    256    machine context (uc) reflects the situation that a syscall had just
    257    completed, quite literally -- that is, that the program counter was
    258    now at the instruction following the syscall.  (or we're slightly
    259    downstream, but we're sure no relevant register has yet changed
    260    value.)  Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
    261    the result of the syscall; it does this by fishing relevant bits of
    262    the machine state out of the uc.  Of course if the program counter
    263    was somewhere else entirely then the result is likely to be
    264    meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
    265    very careful to pay attention to the results only when it is sure
    266    that the said constraint on the program counter is indeed valid. */
    267 
    268 #if defined(VGP_x86_linux)
    269 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.eip)
    270 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.esp)
    271 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    272       /* Convert the value in uc_mcontext.eax into a SysRes. */ \
    273       VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
    274 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
    275       { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip);    \
    276         (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp);    \
    277         (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp;   \
    278       }
    279 
    280 #elif defined(VGP_amd64_linux)
    281 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.rip)
    282 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.rsp)
    283 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    284       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
    285       VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
    286 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
    287       { (srP)->r_pc = (uc)->uc_mcontext.rip;             \
    288         (srP)->r_sp = (uc)->uc_mcontext.rsp;             \
    289         (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
    290       }
    291 
    292 #elif defined(VGP_ppc32_linux)
    293 /* Comments from Paul Mackerras 25 Nov 05:
    294 
    295    > I'm tracking down a problem where V's signal handling doesn't
    296    > work properly on a ppc440gx running 2.4.20.  The problem is that
    297    > the ucontext being presented to V's sighandler seems completely
    298    > bogus.
    299 
    300    > V's kernel headers and hence ucontext layout are derived from
    301    > 2.6.9.  I compared include/asm-ppc/ucontext.h from 2.4.20 and
    302    > 2.6.13.
    303 
    304    > Can I just check my interpretation: the 2.4.20 one contains the
    305    > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
    306    > to said struct?  And so if V is using the 2.6.13 struct then a
    307    > 2.4.20 one will make no sense to it.
    308 
    309    Not quite... what is inline in the 2.4.20 version is a
    310    sigcontext_struct, not an mcontext.  The sigcontext looks like
    311    this:
    312 
    313      struct sigcontext_struct {
    314         unsigned long   _unused[4];
    315         int             signal;
    316         unsigned long   handler;
    317         unsigned long   oldmask;
    318         struct pt_regs  *regs;
    319      };
    320 
    321    The regs pointer of that struct ends up at the same offset as the
    322    uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
    323    same as the mc_gregs field of the mcontext.  In fact the integer
    324    regs are followed in memory by the floating point regs on 2.4.20.
    325 
    326    Thus if you are using the 2.6 definitions, it should work on 2.4.20
    327    provided that you go via uc->uc_regs rather than looking in
    328    uc->uc_mcontext directly.
    329 
    330    There is another subtlety: 2.4.20 doesn't save the vector regs when
    331    delivering a signal, and 2.6.x only saves the vector regs if the
    332    process has ever used an altivec instructions.  If 2.6.x does save
    333    the vector regs, it sets the MSR_VEC bit in
    334    uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it.  That bit
    335    will always be clear under 2.4.20.  So you can use that bit to tell
    336    whether uc->uc_regs->mc_vregs is valid. */
    337 #  define VG_UCONTEXT_INSTR_PTR(uc)  ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
    338 #  define VG_UCONTEXT_STACK_PTR(uc)  ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
    339 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                            \
    340       /* Convert the values in uc_mcontext r3,cr into a SysRes. */  \
    341       VG_(mk_SysRes_ppc32_linux)(                                   \
    342          (uc)->uc_regs->mc_gregs[VKI_PT_R3],                        \
    343          (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1)          \
    344       )
    345 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                     \
    346       { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]);   \
    347         (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]);    \
    348         (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
    349       }
    350 
    351 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
    352 #  define VG_UCONTEXT_INSTR_PTR(uc)  ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
    353 #  define VG_UCONTEXT_STACK_PTR(uc)  ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
    354    /* Dubious hack: if there is an error, only consider the lowest 8
    355       bits of r3.  memcheck/tests/post-syscall shows a case where an
    356       interrupted syscall should have produced a ucontext with 0x4
    357       (VKI_EINTR) in r3 but is in fact producing 0x204. */
    358    /* Awaiting clarification from PaulM.  Evidently 0x204 is
    359       ERESTART_RESTARTBLOCK, which shouldn't have made it into user
    360       space. */
    361    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
    362    {
    363       ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
    364       ULong r3  = uc->uc_mcontext.gp_regs[VKI_PT_R3];
    365       if (err) r3 &= 0xFF;
    366       return VG_(mk_SysRes_ppc64_linux)( r3, err );
    367    }
    368 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                       \
    369       { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP];            \
    370         (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1];             \
    371         (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
    372       }
    373 
    374 #elif defined(VGP_arm_linux)
    375 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.arm_pc)
    376 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.arm_sp)
    377 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    378       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
    379       VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
    380 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)       \
    381       { (srP)->r_pc = (uc)->uc_mcontext.arm_pc;         \
    382         (srP)->r_sp = (uc)->uc_mcontext.arm_sp;         \
    383         (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
    384         (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
    385         (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
    386         (srP)->misc.ARM.r7  = (uc)->uc_mcontext.arm_r7; \
    387       }
    388 
    389 #elif defined(VGP_arm64_linux)
    390 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((UWord)((uc)->uc_mcontext.pc))
    391 #  define VG_UCONTEXT_STACK_PTR(uc)       ((UWord)((uc)->uc_mcontext.sp))
    392 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    393       /* Convert the value in uc_mcontext.regs[0] into a SysRes. */ \
    394       VG_(mk_SysRes_arm64_linux)( (uc)->uc_mcontext.regs[0] )
    395 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)           \
    396       { (srP)->r_pc = (uc)->uc_mcontext.pc;                 \
    397         (srP)->r_sp = (uc)->uc_mcontext.sp;                 \
    398         (srP)->misc.ARM64.x29 = (uc)->uc_mcontext.regs[29]; \
    399         (srP)->misc.ARM64.x30 = (uc)->uc_mcontext.regs[30]; \
    400       }
    401 
    402 #elif defined(VGP_x86_darwin)
    403 
    404    static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
    405       ucontext_t* uc = (ucontext_t*)ucV;
    406       struct __darwin_mcontext32* mc = uc->uc_mcontext;
    407       struct __darwin_i386_thread_state* ss = &mc->__ss;
    408       return ss->__eip;
    409    }
    410    static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
    411       ucontext_t* uc = (ucontext_t*)ucV;
    412       struct __darwin_mcontext32* mc = uc->uc_mcontext;
    413       struct __darwin_i386_thread_state* ss = &mc->__ss;
    414       return ss->__esp;
    415    }
    416    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
    417                                                     UWord scclass ) {
    418       /* this is complicated by the problem that there are 3 different
    419          kinds of syscalls, each with its own return convention.
    420          NB: scclass is a host word, hence UWord is good for both
    421          amd64-darwin and x86-darwin */
    422       ucontext_t* uc = (ucontext_t*)ucV;
    423       struct __darwin_mcontext32* mc = uc->uc_mcontext;
    424       struct __darwin_i386_thread_state* ss = &mc->__ss;
    425       /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
    426       UInt carry = 1 & ss->__eflags;
    427       UInt err = 0;
    428       UInt wLO = 0;
    429       UInt wHI = 0;
    430       switch (scclass) {
    431          case VG_DARWIN_SYSCALL_CLASS_UNIX:
    432             err = carry;
    433             wLO = ss->__eax;
    434             wHI = ss->__edx;
    435             break;
    436          case VG_DARWIN_SYSCALL_CLASS_MACH:
    437             wLO = ss->__eax;
    438             break;
    439          case VG_DARWIN_SYSCALL_CLASS_MDEP:
    440             wLO = ss->__eax;
    441             break;
    442          default:
    443             vg_assert(0);
    444             break;
    445       }
    446       return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
    447                                         wHI, wLO );
    448    }
    449    static inline
    450    void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
    451                                         void* ucV ) {
    452       ucontext_t* uc = (ucontext_t*)(ucV);
    453       struct __darwin_mcontext32* mc = uc->uc_mcontext;
    454       struct __darwin_i386_thread_state* ss = &mc->__ss;
    455       srP->r_pc = (ULong)(ss->__eip);
    456       srP->r_sp = (ULong)(ss->__esp);
    457       srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
    458    }
    459 
    460 #elif defined(VGP_amd64_darwin)
    461 
    462    static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
    463       ucontext_t* uc = (ucontext_t*)ucV;
    464       struct __darwin_mcontext64* mc = uc->uc_mcontext;
    465       struct __darwin_x86_thread_state64* ss = &mc->__ss;
    466       return ss->__rip;
    467    }
    468    static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
    469       ucontext_t* uc = (ucontext_t*)ucV;
    470       struct __darwin_mcontext64* mc = uc->uc_mcontext;
    471       struct __darwin_x86_thread_state64* ss = &mc->__ss;
    472       return ss->__rsp;
    473    }
    474    static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
    475                                                     UWord scclass ) {
    476       /* This is copied from the x86-darwin case.  I'm not sure if it
    477 	 is correct. */
    478       ucontext_t* uc = (ucontext_t*)ucV;
    479       struct __darwin_mcontext64* mc = uc->uc_mcontext;
    480       struct __darwin_x86_thread_state64* ss = &mc->__ss;
    481       /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
    482       ULong carry = 1 & ss->__rflags;
    483       ULong err = 0;
    484       ULong wLO = 0;
    485       ULong wHI = 0;
    486       switch (scclass) {
    487          case VG_DARWIN_SYSCALL_CLASS_UNIX:
    488             err = carry;
    489             wLO = ss->__rax;
    490             wHI = ss->__rdx;
    491             break;
    492          case VG_DARWIN_SYSCALL_CLASS_MACH:
    493             wLO = ss->__rax;
    494             break;
    495          case VG_DARWIN_SYSCALL_CLASS_MDEP:
    496             wLO = ss->__rax;
    497             break;
    498          default:
    499             vg_assert(0);
    500             break;
    501       }
    502       return VG_(mk_SysRes_amd64_darwin)( scclass, err ? True : False,
    503 					  wHI, wLO );
    504    }
    505    static inline
    506    void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
    507                                         void* ucV ) {
    508       ucontext_t* uc = (ucontext_t*)ucV;
    509       struct __darwin_mcontext64* mc = uc->uc_mcontext;
    510       struct __darwin_x86_thread_state64* ss = &mc->__ss;
    511       srP->r_pc = (ULong)(ss->__rip);
    512       srP->r_sp = (ULong)(ss->__rsp);
    513       srP->misc.AMD64.r_rbp = (ULong)(ss->__rbp);
    514    }
    515 
    516 #elif defined(VGP_s390x_linux)
    517 
    518 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.regs.psw.addr)
    519 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.regs.gprs[15])
    520 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.regs.gprs[11])
    521 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    522       VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
    523 #  define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
    524 
    525 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)        \
    526       { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr);    \
    527         (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]);    \
    528         (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11];  \
    529         (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14];  \
    530       }
    531 
    532 #elif defined(VGP_mips32_linux)
    533 #  define VG_UCONTEXT_INSTR_PTR(uc)   ((UWord)(((uc)->uc_mcontext.sc_pc)))
    534 #  define VG_UCONTEXT_STACK_PTR(uc)   ((UWord)((uc)->uc_mcontext.sc_regs[29]))
    535 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.sc_regs[30])
    536 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.sc_regs[2])
    537 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                         \
    538       /* Convert the value in uc_mcontext.rax into a SysRes. */  \
    539       VG_(mk_SysRes_mips32_linux)( (uc)->uc_mcontext.sc_regs[2], \
    540                                    (uc)->uc_mcontext.sc_regs[3], \
    541                                    (uc)->uc_mcontext.sc_regs[7])
    542 
    543 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)              \
    544       { (srP)->r_pc = (uc)->uc_mcontext.sc_pc;                 \
    545         (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29];           \
    546         (srP)->misc.MIPS32.r30 = (uc)->uc_mcontext.sc_regs[30]; \
    547         (srP)->misc.MIPS32.r31 = (uc)->uc_mcontext.sc_regs[31]; \
    548         (srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
    549       }
    550 
    551 #elif defined(VGP_mips64_linux)
    552 #  define VG_UCONTEXT_INSTR_PTR(uc)       (((uc)->uc_mcontext.sc_pc))
    553 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.sc_regs[29])
    554 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.sc_regs[30])
    555 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.sc_regs[2])
    556 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                        \
    557       /* Convert the value in uc_mcontext.rax into a SysRes. */ \
    558       VG_(mk_SysRes_mips64_linux)((uc)->uc_mcontext.sc_regs[2], \
    559                                   (uc)->uc_mcontext.sc_regs[3], \
    560                                   (uc)->uc_mcontext.sc_regs[7])
    561 
    562 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)               \
    563       { (srP)->r_pc = (uc)->uc_mcontext.sc_pc;                  \
    564         (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29];            \
    565         (srP)->misc.MIPS64.r30 = (uc)->uc_mcontext.sc_regs[30]; \
    566         (srP)->misc.MIPS64.r31 = (uc)->uc_mcontext.sc_regs[31]; \
    567         (srP)->misc.MIPS64.r28 = (uc)->uc_mcontext.sc_regs[28]; \
    568       }
    569 #elif defined(VGP_tilegx_linux)
    570 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.pc)
    571 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.sp)
    572 #  define VG_UCONTEXT_FRAME_PTR(uc)       ((uc)->uc_mcontext.gregs[52])
    573 #  define VG_UCONTEXT_SYSCALL_NUM(uc)     ((uc)->uc_mcontext.gregs[10])
    574 #  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                            \
    575       /* Convert the value in uc_mcontext.rax into a SysRes. */     \
    576       VG_(mk_SysRes_tilegx_linux)((uc)->uc_mcontext.gregs[0])
    577 #  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)              \
    578       { (srP)->r_pc = (uc)->uc_mcontext.pc;                    \
    579         (srP)->r_sp = (uc)->uc_mcontext.sp;                    \
    580         (srP)->misc.TILEGX.r52 = (uc)->uc_mcontext.gregs[52];  \
    581         (srP)->misc.TILEGX.r55 = (uc)->uc_mcontext.lr;         \
    582       }
    583 #else
    584 #  error Unknown platform
    585 #endif
    586 
    587 
    588 /* ------ Macros for pulling stuff out of siginfos ------ */
    589 
    590 /* These macros allow use of uniform names when working with
    591    both the Linux and Darwin vki definitions. */
    592 #if defined(VGO_linux)
    593 #  define VKI_SIGINFO_si_addr  _sifields._sigfault._addr
    594 #  define VKI_SIGINFO_si_pid   _sifields._kill._pid
    595 #elif defined(VGO_darwin)
    596 #  define VKI_SIGINFO_si_addr  si_addr
    597 #  define VKI_SIGINFO_si_pid   si_pid
    598 #else
    599 #  error Unknown OS
    600 #endif
    601 
    602 
    603 /* ---------------------------------------------------------------------
    604    HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
    605    ------------------------------------------------------------------ */
    606 
    607 /* ---------------------------------------------------------------------
    608    Signal state for this process.
    609    ------------------------------------------------------------------ */
    610 
    611 
    612 /* Base-ment of these arrays[_VKI_NSIG].
    613 
    614    Valid signal numbers are 1 .. _VKI_NSIG inclusive.
    615    Rather than subtracting 1 for indexing these arrays, which
    616    is tedious and error-prone, they are simply dimensioned 1 larger,
    617    and entry [0] is not used.
    618  */
    619 
    620 
    621 /* -----------------------------------------------------
    622    Static client signal state (SCSS).  This is the state
    623    that the client thinks it has the kernel in.
    624    SCSS records verbatim the client's settings.  These
    625    are mashed around only when SKSS is calculated from it.
    626    -------------------------------------------------- */
    627 
    628 typedef
    629    struct {
    630       void* scss_handler;  /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
    631                               client's handler */
    632       UInt  scss_flags;
    633       vki_sigset_t scss_mask;
    634       void* scss_restorer; /* where sigreturn goes */
    635       void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
    636       /* re _restorer and _sa_tramp, we merely record the values
    637          supplied when the client does 'sigaction' and give them back
    638          when requested.  Otherwise they are simply ignored. */
    639    }
    640    SCSS_Per_Signal;
    641 
    642 typedef
    643    struct {
    644       /* per-signal info */
    645       SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
    646 
    647       /* Additional elements to SCSS not stored here:
    648          - for each thread, the thread's blocking mask
    649          - for each thread in WaitSIG, the set of waited-on sigs
    650       */
    651       }
    652       SCSS;
    653 
    654 static SCSS scss;
    655 
    656 
    657 /* -----------------------------------------------------
    658    Static kernel signal state (SKSS).  This is the state
    659    that we have the kernel in.  It is computed from SCSS.
    660    -------------------------------------------------- */
    661 
    662 /* Let's do:
    663      sigprocmask assigns to all thread masks
    664      so that at least everything is always consistent
    665    Flags:
    666      SA_SIGINFO -- we always set it, and honour it for the client
    667      SA_NOCLDSTOP -- passed to kernel
    668      SA_ONESHOT or SA_RESETHAND -- pass through
    669      SA_RESTART -- we observe this but set our handlers to always restart
    670      SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
    671      SA_ONSTACK -- pass through
    672      SA_NOCLDWAIT -- pass through
    673 */
    674 
    675 
    676 typedef
    677    struct {
    678       void* skss_handler;  /* VKI_SIG_DFL or VKI_SIG_IGN
    679                               or ptr to our handler */
    680       UInt skss_flags;
    681       /* There is no skss_mask, since we know that we will always ask
    682          for all signals to be blocked in our sighandlers. */
    683       /* Also there is no skss_restorer. */
    684    }
    685    SKSS_Per_Signal;
    686 
    687 typedef
    688    struct {
    689       SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
    690    }
    691    SKSS;
    692 
    693 static SKSS skss;
    694 
    695 /* returns True if signal is to be ignored.
    696    To check this, possibly call gdbserver with tid. */
    697 static Bool is_sig_ign(vki_siginfo_t *info, ThreadId tid)
    698 {
    699    vg_assert(info->si_signo >= 1 && info->si_signo <= _VKI_NSIG);
    700 
    701    /* If VG_(gdbserver_report_signal) tells to report the signal,
    702       then verify if this signal is not to be ignored. GDB might have
    703       modified si_signo, so we check after the call to gdbserver. */
    704    return !VG_(gdbserver_report_signal) (info, tid)
    705       || scss.scss_per_sig[info->si_signo].scss_handler == VKI_SIG_IGN;
    706 }
    707 
    708 /* ---------------------------------------------------------------------
    709    Compute the SKSS required by the current SCSS.
    710    ------------------------------------------------------------------ */
    711 
    712 static
    713 void pp_SKSS ( void )
    714 {
    715    Int sig;
    716    VG_(printf)("\n\nSKSS:\n");
    717    for (sig = 1; sig <= _VKI_NSIG; sig++) {
    718       VG_(printf)("sig %d:  handler %p,  flags 0x%x\n", sig,
    719                   skss.skss_per_sig[sig].skss_handler,
    720                   skss.skss_per_sig[sig].skss_flags );
    721 
    722    }
    723 }
    724 
    725 /* This is the core, clever bit.  Computation is as follows:
    726 
    727    For each signal
    728       handler = if client has a handler, then our handler
    729                 else if client is DFL, then our handler as well
    730                 else (client must be IGN)
    731 			then hander is IGN
    732 */
    733 static
    734 void calculate_SKSS_from_SCSS ( SKSS* dst )
    735 {
    736    Int   sig;
    737    UInt  scss_flags;
    738    UInt  skss_flags;
    739 
    740    for (sig = 1; sig <= _VKI_NSIG; sig++) {
    741       void *skss_handler;
    742       void *scss_handler;
    743 
    744       scss_handler = scss.scss_per_sig[sig].scss_handler;
    745       scss_flags   = scss.scss_per_sig[sig].scss_flags;
    746 
    747       switch(sig) {
    748       case VKI_SIGSEGV:
    749       case VKI_SIGBUS:
    750       case VKI_SIGFPE:
    751       case VKI_SIGILL:
    752       case VKI_SIGTRAP:
    753 	 /* For these, we always want to catch them and report, even
    754 	    if the client code doesn't. */
    755 	 skss_handler = sync_signalhandler;
    756 	 break;
    757 
    758       case VKI_SIGCONT:
    759 	 /* Let the kernel handle SIGCONT unless the client is actually
    760 	    catching it. */
    761       case VKI_SIGCHLD:
    762       case VKI_SIGWINCH:
    763       case VKI_SIGURG:
    764          /* For signals which are have a default action of Ignore,
    765             only set a handler if the client has set a signal handler.
    766             Otherwise the kernel will interrupt a syscall which
    767             wouldn't have otherwise been interrupted. */
    768 	 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
    769 	    skss_handler = VKI_SIG_DFL;
    770 	 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
    771 	    skss_handler = VKI_SIG_IGN;
    772 	 else
    773 	    skss_handler = async_signalhandler;
    774 	 break;
    775 
    776       default:
    777          // VKI_SIGVG* are runtime variables, so we can't make them
    778          // cases in the switch, so we handle them in the 'default' case.
    779 	 if (sig == VG_SIGVGKILL)
    780 	    skss_handler = sigvgkill_handler;
    781 	 else {
    782 	    if (scss_handler == VKI_SIG_IGN)
    783 	       skss_handler = VKI_SIG_IGN;
    784 	    else
    785 	       skss_handler = async_signalhandler;
    786 	 }
    787 	 break;
    788       }
    789 
    790       /* Flags */
    791 
    792       skss_flags = 0;
    793 
    794       /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
    795       skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
    796 
    797       /* SA_ONESHOT: ignore client setting */
    798 
    799       /* SA_RESTART: ignore client setting and always set it for us.
    800 	 Though we never rely on the kernel to restart a
    801 	 syscall, we observe whether it wanted to restart the syscall
    802 	 or not, which is needed by
    803          VG_(fixup_guest_state_after_syscall_interrupted) */
    804       skss_flags |= VKI_SA_RESTART;
    805 
    806       /* SA_NOMASK: ignore it */
    807 
    808       /* SA_ONSTACK: client setting is irrelevant here */
    809       /* We don't set a signal stack, so ignore */
    810 
    811       /* always ask for SA_SIGINFO */
    812       skss_flags |= VKI_SA_SIGINFO;
    813 
    814       /* use our own restorer */
    815       skss_flags |= VKI_SA_RESTORER;
    816 
    817       /* Create SKSS entry for this signal. */
    818       if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
    819          dst->skss_per_sig[sig].skss_handler = skss_handler;
    820       else
    821          dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
    822 
    823       dst->skss_per_sig[sig].skss_flags   = skss_flags;
    824    }
    825 
    826    /* Sanity checks. */
    827    vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
    828    vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
    829 
    830    if (0)
    831       pp_SKSS();
    832 }
    833 
    834 
    835 /* ---------------------------------------------------------------------
    836    After a possible SCSS change, update SKSS and the kernel itself.
    837    ------------------------------------------------------------------ */
    838 
    839 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
    840 // to a number before converting it to a string... sigh.
    841 extern void my_sigreturn(void);
    842 
    843 #if defined(VGP_x86_linux)
    844 #  define _MY_SIGRETURN(name) \
    845    ".text\n" \
    846    ".globl my_sigreturn\n" \
    847    "my_sigreturn:\n" \
    848    "	movl	$" #name ", %eax\n" \
    849    "	int	$0x80\n" \
    850    ".previous\n"
    851 
    852 #elif defined(VGP_amd64_linux)
    853 #  define _MY_SIGRETURN(name) \
    854    ".text\n" \
    855    ".globl my_sigreturn\n" \
    856    "my_sigreturn:\n" \
    857    "	movq	$" #name ", %rax\n" \
    858    "	syscall\n" \
    859    ".previous\n"
    860 
    861 #elif defined(VGP_ppc32_linux)
    862 #  define _MY_SIGRETURN(name) \
    863    ".text\n" \
    864    ".globl my_sigreturn\n" \
    865    "my_sigreturn:\n" \
    866    "	li	0, " #name "\n" \
    867    "	sc\n" \
    868    ".previous\n"
    869 
    870 #elif defined(VGP_ppc64be_linux)
    871 #  define _MY_SIGRETURN(name) \
    872    ".align   2\n" \
    873    ".globl   my_sigreturn\n" \
    874    ".section \".opd\",\"aw\"\n" \
    875    ".align   3\n" \
    876    "my_sigreturn:\n" \
    877    ".quad    .my_sigreturn,.TOC.@tocbase,0\n" \
    878    ".previous\n" \
    879    ".type    .my_sigreturn,@function\n" \
    880    ".globl   .my_sigreturn\n" \
    881    ".my_sigreturn:\n" \
    882    "	li	0, " #name "\n" \
    883    "	sc\n"
    884 
    885 #elif defined(VGP_ppc64le_linux)
    886 /* Little Endian supports ELF version 2.  In the future, it may
    887  * support other versions.
    888  */
    889 #  define _MY_SIGRETURN(name) \
    890    ".align   2\n" \
    891    ".globl   my_sigreturn\n" \
    892    ".type    .my_sigreturn,@function\n" \
    893    "my_sigreturn:\n" \
    894    "#if _CALL_ELF == 2 \n" \
    895    "0: addis        2,12,.TOC.-0b@ha\n" \
    896    "   addi         2,2,.TOC.-0b@l\n" \
    897    "   .localentry my_sigreturn,.-my_sigreturn\n" \
    898    "#endif \n" \
    899    "   sc\n" \
    900    "   .size my_sigreturn,.-my_sigreturn\n"
    901 
    902 #elif defined(VGP_arm_linux)
    903 #  define _MY_SIGRETURN(name) \
    904    ".text\n" \
    905    ".globl my_sigreturn\n" \
    906    "my_sigreturn:\n\t" \
    907    "    mov  r7, #" #name "\n\t" \
    908    "    svc  0x00000000\n" \
    909    ".previous\n"
    910 
    911 #elif defined(VGP_arm64_linux)
    912 #  define _MY_SIGRETURN(name) \
    913    ".text\n" \
    914    ".globl my_sigreturn\n" \
    915    "my_sigreturn:\n\t" \
    916    "    mov  x8, #" #name "\n\t" \
    917    "    svc  0x0\n" \
    918    ".previous\n"
    919 
    920 #elif defined(VGP_x86_darwin)
    921 #  define _MY_SIGRETURN(name) \
    922    ".text\n" \
    923    ".globl my_sigreturn\n" \
    924    "my_sigreturn:\n" \
    925    "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
    926    "int $0x80"
    927 
    928 #elif defined(VGP_amd64_darwin)
    929    // DDD: todo
    930 #  define _MY_SIGRETURN(name) \
    931    ".text\n" \
    932    ".globl my_sigreturn\n" \
    933    "my_sigreturn:\n" \
    934    "ud2\n"
    935 
    936 #elif defined(VGP_s390x_linux)
    937 #  define _MY_SIGRETURN(name) \
    938    ".text\n" \
    939    ".globl my_sigreturn\n" \
    940    "my_sigreturn:\n" \
    941    " svc " #name "\n" \
    942    ".previous\n"
    943 
    944 #elif defined(VGP_mips32_linux)
    945 #  define _MY_SIGRETURN(name) \
    946    ".text\n" \
    947    "my_sigreturn:\n" \
    948    "	li	$2, " #name "\n" /* apparently $2 is v0 */ \
    949    "	syscall\n" \
    950    ".previous\n"
    951 
    952 #elif defined(VGP_mips64_linux)
    953 #  define _MY_SIGRETURN(name) \
    954    ".text\n" \
    955    "my_sigreturn:\n" \
    956    "   li $2, " #name "\n" \
    957    "   syscall\n" \
    958    ".previous\n"
    959 
    960 #elif defined(VGP_tilegx_linux)
    961 #  define _MY_SIGRETURN(name) \
    962    ".text\n" \
    963    "my_sigreturn:\n" \
    964    " moveli r10 ," #name "\n" \
    965    " swint1\n" \
    966    ".previous\n"
    967 
    968 #else
    969 #  error Unknown platform
    970 #endif
    971 
    972 #define MY_SIGRETURN(name)  _MY_SIGRETURN(name)
    973 asm(
    974    MY_SIGRETURN(__NR_rt_sigreturn)
    975 );
    976 
    977 
    978 static void handle_SCSS_change ( Bool force_update )
    979 {
    980    Int  res, sig;
    981    SKSS skss_old;
    982    vki_sigaction_toK_t   ksa;
    983    vki_sigaction_fromK_t ksa_old;
    984 
    985    /* Remember old SKSS and calculate new one. */
    986    skss_old = skss;
    987    calculate_SKSS_from_SCSS ( &skss );
    988 
    989    /* Compare the new SKSS entries vs the old ones, and update kernel
    990       where they differ. */
    991    for (sig = 1; sig <= VG_(max_signal); sig++) {
    992 
    993       /* Trying to do anything with SIGKILL is pointless; just ignore
    994          it. */
    995       if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
    996          continue;
    997 
    998       if (!force_update) {
    999          if ((skss_old.skss_per_sig[sig].skss_handler
   1000               == skss.skss_per_sig[sig].skss_handler)
   1001              && (skss_old.skss_per_sig[sig].skss_flags
   1002                  == skss.skss_per_sig[sig].skss_flags))
   1003             /* no difference */
   1004             continue;
   1005       }
   1006 
   1007       ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
   1008       ksa.sa_flags    = skss.skss_per_sig[sig].skss_flags;
   1009 #     if !defined(VGP_ppc32_linux) && \
   1010          !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
   1011          !defined(VGP_mips32_linux)
   1012       ksa.sa_restorer = my_sigreturn;
   1013 #     endif
   1014       /* Re above ifdef (also the assertion below), PaulM says:
   1015          The sa_restorer field is not used at all on ppc.  Glibc
   1016          converts the sigaction you give it into a kernel sigaction,
   1017          but it doesn't put anything in the sa_restorer field.
   1018       */
   1019 
   1020       /* block all signals in handler */
   1021       VG_(sigfillset)( &ksa.sa_mask );
   1022       VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
   1023       VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
   1024 
   1025       if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
   1026          VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
   1027                    "mask(msb..lsb) 0x%llx 0x%llx\n",
   1028                    sig, ksa.ksa_handler,
   1029                    (UWord)ksa.sa_flags,
   1030                    _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
   1031                    (ULong)ksa.sa_mask.sig[0]);
   1032 
   1033       res = VG_(sigaction)( sig, &ksa, &ksa_old );
   1034       vg_assert(res == 0);
   1035 
   1036       /* Since we got the old sigaction more or less for free, might
   1037          as well extract the maximum sanity-check value from it. */
   1038       if (!force_update) {
   1039          vg_assert(ksa_old.ksa_handler
   1040                    == skss_old.skss_per_sig[sig].skss_handler);
   1041          vg_assert(ksa_old.sa_flags
   1042                    == skss_old.skss_per_sig[sig].skss_flags);
   1043 #        if !defined(VGP_ppc32_linux) && \
   1044             !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
   1045             !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux)
   1046          vg_assert(ksa_old.sa_restorer == my_sigreturn);
   1047 #        endif
   1048          VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
   1049          VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
   1050          vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
   1051       }
   1052    }
   1053 }
   1054 
   1055 
   1056 /* ---------------------------------------------------------------------
   1057    Update/query SCSS in accordance with client requests.
   1058    ------------------------------------------------------------------ */
   1059 
   1060 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
   1061    in kernel/signal.[ch] */
   1062 
   1063 /* True if we are on the alternate signal stack.  */
   1064 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
   1065 {
   1066    ThreadState *tst = VG_(get_ThreadState)(tid);
   1067 
   1068    return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
   1069 }
   1070 
   1071 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
   1072 {
   1073    ThreadState *tst = VG_(get_ThreadState)(tid);
   1074 
   1075    return (tst->altstack.ss_size == 0
   1076               ? VKI_SS_DISABLE
   1077               : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
   1078 }
   1079 
   1080 
   1081 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
   1082 {
   1083    Addr m_SP;
   1084 
   1085    vg_assert(VG_(is_valid_tid)(tid));
   1086    m_SP  = VG_(get_SP)(tid);
   1087 
   1088    if (VG_(clo_trace_signals))
   1089       VG_(dmsg)("sys_sigaltstack: tid %d, "
   1090                 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
   1091                 tid, (void*)ss,
   1092                 ss ? ss->ss_sp : 0,
   1093                 (ULong)(ss ? ss->ss_size : 0),
   1094                 (ULong)(ss ? ss->ss_flags : 0),
   1095                 (void*)oss, (void*)m_SP);
   1096 
   1097    if (oss != NULL) {
   1098       oss->ss_sp    = VG_(threads)[tid].altstack.ss_sp;
   1099       oss->ss_size  = VG_(threads)[tid].altstack.ss_size;
   1100       oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
   1101                       | sas_ss_flags(tid, m_SP);
   1102    }
   1103 
   1104    if (ss != NULL) {
   1105       if (on_sig_stack(tid, VG_(get_SP)(tid))) {
   1106          return VG_(mk_SysRes_Error)( VKI_EPERM );
   1107       }
   1108       if (ss->ss_flags != VKI_SS_DISABLE
   1109           && ss->ss_flags != VKI_SS_ONSTACK
   1110           && ss->ss_flags != 0) {
   1111          return VG_(mk_SysRes_Error)( VKI_EINVAL );
   1112       }
   1113       if (ss->ss_flags == VKI_SS_DISABLE) {
   1114          VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
   1115       } else {
   1116          if (ss->ss_size < VKI_MINSIGSTKSZ) {
   1117             return VG_(mk_SysRes_Error)( VKI_ENOMEM );
   1118          }
   1119 
   1120 	 VG_(threads)[tid].altstack.ss_sp    = ss->ss_sp;
   1121 	 VG_(threads)[tid].altstack.ss_size  = ss->ss_size;
   1122 	 VG_(threads)[tid].altstack.ss_flags = 0;
   1123       }
   1124    }
   1125    return VG_(mk_SysRes_Success)( 0 );
   1126 }
   1127 
   1128 
   1129 SysRes VG_(do_sys_sigaction) ( Int signo,
   1130                                const vki_sigaction_toK_t* new_act,
   1131                                vki_sigaction_fromK_t* old_act )
   1132 {
   1133    if (VG_(clo_trace_signals))
   1134       VG_(dmsg)("sys_sigaction: sigNo %d, "
   1135                 "new %#lx, old %#lx, new flags 0x%llx\n",
   1136                 signo, (UWord)new_act, (UWord)old_act,
   1137                 (ULong)(new_act ? new_act->sa_flags : 0));
   1138 
   1139    /* Rule out various error conditions.  The aim is to ensure that if
   1140       when the call is passed to the kernel it will definitely
   1141       succeed. */
   1142 
   1143    /* Reject out-of-range signal numbers. */
   1144    if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
   1145 
   1146    /* don't let them use our signals */
   1147    if ( (signo > VG_SIGVGRTUSERMAX)
   1148 	&& new_act
   1149 	&& !(new_act->ksa_handler == VKI_SIG_DFL
   1150              || new_act->ksa_handler == VKI_SIG_IGN) )
   1151       goto bad_signo_reserved;
   1152 
   1153    /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
   1154    if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
   1155        && new_act
   1156        && new_act->ksa_handler != VKI_SIG_DFL)
   1157       goto bad_sigkill_or_sigstop;
   1158 
   1159    /* If the client supplied non-NULL old_act, copy the relevant SCSS
   1160       entry into it. */
   1161    if (old_act) {
   1162       old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
   1163       old_act->sa_flags    = scss.scss_per_sig[signo].scss_flags;
   1164       old_act->sa_mask     = scss.scss_per_sig[signo].scss_mask;
   1165 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   1166       old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
   1167 #     endif
   1168    }
   1169 
   1170    /* And now copy new SCSS entry from new_act. */
   1171    if (new_act) {
   1172       scss.scss_per_sig[signo].scss_handler  = new_act->ksa_handler;
   1173       scss.scss_per_sig[signo].scss_flags    = new_act->sa_flags;
   1174       scss.scss_per_sig[signo].scss_mask     = new_act->sa_mask;
   1175 
   1176       scss.scss_per_sig[signo].scss_restorer = NULL;
   1177 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   1178       scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
   1179 #     endif
   1180 
   1181       scss.scss_per_sig[signo].scss_sa_tramp = NULL;
   1182 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
   1183       scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
   1184 #     endif
   1185 
   1186       VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
   1187       VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
   1188    }
   1189 
   1190    /* All happy bunnies ... */
   1191    if (new_act) {
   1192       handle_SCSS_change( False /* lazy update */ );
   1193    }
   1194    return VG_(mk_SysRes_Success)( 0 );
   1195 
   1196   bad_signo:
   1197    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
   1198       VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
   1199    }
   1200    return VG_(mk_SysRes_Error)( VKI_EINVAL );
   1201 
   1202   bad_signo_reserved:
   1203    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
   1204       VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
   1205                 VG_(signame)(signo));
   1206       VG_(umsg)("         the %s signal is used internally by Valgrind\n",
   1207                 VG_(signame)(signo));
   1208    }
   1209    return VG_(mk_SysRes_Error)( VKI_EINVAL );
   1210 
   1211   bad_sigkill_or_sigstop:
   1212    if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
   1213       VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
   1214                 VG_(signame)(signo));
   1215       VG_(umsg)("         the %s signal is uncatchable\n",
   1216                 VG_(signame)(signo));
   1217    }
   1218    return VG_(mk_SysRes_Error)( VKI_EINVAL );
   1219 }
   1220 
   1221 
   1222 static
   1223 void do_sigprocmask_bitops ( Int vki_how,
   1224 			     vki_sigset_t* orig_set,
   1225 			     vki_sigset_t* modifier )
   1226 {
   1227    switch (vki_how) {
   1228       case VKI_SIG_BLOCK:
   1229          VG_(sigaddset_from_set)( orig_set, modifier );
   1230          break;
   1231       case VKI_SIG_UNBLOCK:
   1232          VG_(sigdelset_from_set)( orig_set, modifier );
   1233          break;
   1234       case VKI_SIG_SETMASK:
   1235          *orig_set = *modifier;
   1236          break;
   1237       default:
   1238          VG_(core_panic)("do_sigprocmask_bitops");
   1239 	 break;
   1240    }
   1241 }
   1242 
   1243 static
   1244 HChar* format_sigset ( const vki_sigset_t* set )
   1245 {
   1246    static HChar buf[_VKI_NSIG_WORDS * 16 + 1];
   1247    int w;
   1248 
   1249    VG_(strcpy)(buf, "");
   1250 
   1251    for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
   1252    {
   1253 #     if _VKI_NSIG_BPW == 32
   1254       VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
   1255                    set ? (ULong)set->sig[w] : 0);
   1256 #     elif _VKI_NSIG_BPW == 64
   1257       VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
   1258                    set ? (ULong)set->sig[w] : 0);
   1259 #     else
   1260 #       error "Unsupported value for _VKI_NSIG_BPW"
   1261 #     endif
   1262    }
   1263 
   1264    return buf;
   1265 }
   1266 
   1267 /*
   1268    This updates the thread's signal mask.  There's no such thing as a
   1269    process-wide signal mask.
   1270 
   1271    Note that the thread signal masks are an implicit part of SCSS,
   1272    which is why this routine is allowed to mess with them.
   1273 */
   1274 static
   1275 void do_setmask ( ThreadId tid,
   1276                   Int how,
   1277                   vki_sigset_t* newset,
   1278 		  vki_sigset_t* oldset )
   1279 {
   1280    if (VG_(clo_trace_signals))
   1281       VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
   1282                 tid, how,
   1283                 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
   1284                    how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
   1285                       how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
   1286                 newset, newset ? format_sigset(newset) : "NULL" );
   1287 
   1288    /* Just do this thread. */
   1289    vg_assert(VG_(is_valid_tid)(tid));
   1290    if (oldset) {
   1291       *oldset = VG_(threads)[tid].sig_mask;
   1292       if (VG_(clo_trace_signals))
   1293          VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
   1294    }
   1295    if (newset) {
   1296       do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
   1297       VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
   1298       VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
   1299       VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
   1300    }
   1301 }
   1302 
   1303 
   1304 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
   1305                                  Int how,
   1306                                  vki_sigset_t* set,
   1307                                  vki_sigset_t* oldset )
   1308 {
   1309    switch(how) {
   1310       case VKI_SIG_BLOCK:
   1311       case VKI_SIG_UNBLOCK:
   1312       case VKI_SIG_SETMASK:
   1313          vg_assert(VG_(is_valid_tid)(tid));
   1314          do_setmask ( tid, how, set, oldset );
   1315          return VG_(mk_SysRes_Success)( 0 );
   1316 
   1317       default:
   1318          VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
   1319          return VG_(mk_SysRes_Error)( VKI_EINVAL );
   1320    }
   1321 }
   1322 
   1323 
   1324 /* ---------------------------------------------------------------------
   1325    LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
   1326    ------------------------------------------------------------------ */
   1327 
   1328 /* ---------------------------------------------------------------------
   1329    Handy utilities to block/restore all host signals.
   1330    ------------------------------------------------------------------ */
   1331 
   1332 /* Block all host signals, dumping the old mask in *saved_mask. */
   1333 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
   1334 {
   1335    Int           ret;
   1336    vki_sigset_t block_procmask;
   1337    VG_(sigfillset)(&block_procmask);
   1338    ret = VG_(sigprocmask)
   1339             (VKI_SIG_SETMASK, &block_procmask, saved_mask);
   1340    vg_assert(ret == 0);
   1341 }
   1342 
   1343 /* Restore the blocking mask using the supplied saved one. */
   1344 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
   1345 {
   1346    Int ret;
   1347    ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
   1348    vg_assert(ret == 0);
   1349 }
   1350 
   1351 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
   1352 {
   1353    block_all_host_signals(saved_mask);
   1354    if (VG_(threads)[tid].sig_queue != NULL) {
   1355       VG_(free)(VG_(threads)[tid].sig_queue);
   1356       VG_(threads)[tid].sig_queue = NULL;
   1357    }
   1358    restore_all_host_signals(saved_mask);
   1359 }
   1360 
   1361 /* ---------------------------------------------------------------------
   1362    The signal simulation proper.  A simplified version of what the
   1363    Linux kernel does.
   1364    ------------------------------------------------------------------ */
   1365 
   1366 /* Set up a stack frame (VgSigContext) for the client's signal
   1367    handler. */
   1368 static
   1369 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
   1370                                        const struct vki_ucontext *uc )
   1371 {
   1372    Addr         esp_top_of_frame;
   1373    ThreadState* tst;
   1374    Int		sigNo = siginfo->si_signo;
   1375 
   1376    vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
   1377    vg_assert(VG_(is_valid_tid)(tid));
   1378    tst = & VG_(threads)[tid];
   1379 
   1380    if (VG_(clo_trace_signals)) {
   1381       VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
   1382       VG_(get_and_pp_StackTrace)(tid, 10);
   1383    }
   1384 
   1385    if (/* this signal asked to run on an alt stack */
   1386        (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
   1387        && /* there is a defined and enabled alt stack, which we're not
   1388              already using.  Logic from get_sigframe in
   1389              arch/i386/kernel/signal.c. */
   1390           sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
   1391       ) {
   1392       esp_top_of_frame
   1393          = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
   1394       if (VG_(clo_trace_signals))
   1395          VG_(dmsg)("delivering signal %d (%s) to thread %d: "
   1396                    "on ALT STACK (%p-%p; %ld bytes)\n",
   1397                    sigNo, VG_(signame)(sigNo), tid, tst->altstack.ss_sp,
   1398                    (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
   1399                    (Word)tst->altstack.ss_size );
   1400 
   1401       /* Signal delivery to tools */
   1402       VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
   1403 
   1404    } else {
   1405       esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
   1406 
   1407       /* Signal delivery to tools */
   1408       VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
   1409    }
   1410 
   1411    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
   1412    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
   1413 
   1414    /* This may fail if the client stack is busted; if that happens,
   1415       the whole process will exit rather than simply calling the
   1416       signal handler. */
   1417    VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
   1418                          scss.scss_per_sig[sigNo].scss_handler,
   1419                          scss.scss_per_sig[sigNo].scss_flags,
   1420                          &tst->sig_mask,
   1421                          scss.scss_per_sig[sigNo].scss_restorer);
   1422 }
   1423 
   1424 
   1425 const HChar *VG_(signame)(Int sigNo)
   1426 {
   1427    static HChar buf[20];  // large enough
   1428 
   1429    switch(sigNo) {
   1430       case VKI_SIGHUP:    return "SIGHUP";
   1431       case VKI_SIGINT:    return "SIGINT";
   1432       case VKI_SIGQUIT:   return "SIGQUIT";
   1433       case VKI_SIGILL:    return "SIGILL";
   1434       case VKI_SIGTRAP:   return "SIGTRAP";
   1435       case VKI_SIGABRT:   return "SIGABRT";
   1436       case VKI_SIGBUS:    return "SIGBUS";
   1437       case VKI_SIGFPE:    return "SIGFPE";
   1438       case VKI_SIGKILL:   return "SIGKILL";
   1439       case VKI_SIGUSR1:   return "SIGUSR1";
   1440       case VKI_SIGUSR2:   return "SIGUSR2";
   1441       case VKI_SIGSEGV:   return "SIGSEGV";
   1442       case VKI_SIGPIPE:   return "SIGPIPE";
   1443       case VKI_SIGALRM:   return "SIGALRM";
   1444       case VKI_SIGTERM:   return "SIGTERM";
   1445 #     if defined(VKI_SIGSTKFLT)
   1446       case VKI_SIGSTKFLT: return "SIGSTKFLT";
   1447 #     endif
   1448       case VKI_SIGCHLD:   return "SIGCHLD";
   1449       case VKI_SIGCONT:   return "SIGCONT";
   1450       case VKI_SIGSTOP:   return "SIGSTOP";
   1451       case VKI_SIGTSTP:   return "SIGTSTP";
   1452       case VKI_SIGTTIN:   return "SIGTTIN";
   1453       case VKI_SIGTTOU:   return "SIGTTOU";
   1454       case VKI_SIGURG:    return "SIGURG";
   1455       case VKI_SIGXCPU:   return "SIGXCPU";
   1456       case VKI_SIGXFSZ:   return "SIGXFSZ";
   1457       case VKI_SIGVTALRM: return "SIGVTALRM";
   1458       case VKI_SIGPROF:   return "SIGPROF";
   1459       case VKI_SIGWINCH:  return "SIGWINCH";
   1460       case VKI_SIGIO:     return "SIGIO";
   1461 #     if defined(VKI_SIGPWR)
   1462       case VKI_SIGPWR:    return "SIGPWR";
   1463 #     endif
   1464 #     if defined(VKI_SIGUNUSED)
   1465       case VKI_SIGUNUSED: return "SIGUNUSED";
   1466 #     endif
   1467 
   1468 #  if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
   1469    case VKI_SIGRTMIN ... VKI_SIGRTMAX:
   1470       VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
   1471       return buf;
   1472 #  endif
   1473 
   1474    default:
   1475       VG_(sprintf)(buf, "SIG%d", sigNo);
   1476       return buf;
   1477    }
   1478 }
   1479 
   1480 /* Hit ourselves with a signal using the default handler */
   1481 void VG_(kill_self)(Int sigNo)
   1482 {
   1483    Int r;
   1484    vki_sigset_t	         mask, origmask;
   1485    vki_sigaction_toK_t   sa, origsa2;
   1486    vki_sigaction_fromK_t origsa;
   1487 
   1488    sa.ksa_handler = VKI_SIG_DFL;
   1489    sa.sa_flags = 0;
   1490 #  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   1491    sa.sa_restorer = 0;
   1492 #  endif
   1493    VG_(sigemptyset)(&sa.sa_mask);
   1494 
   1495    VG_(sigaction)(sigNo, &sa, &origsa);
   1496 
   1497    VG_(sigemptyset)(&mask);
   1498    VG_(sigaddset)(&mask, sigNo);
   1499    VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
   1500 
   1501    r = VG_(kill)(VG_(getpid)(), sigNo);
   1502 #  if defined(VGO_linux)
   1503    /* This sometimes fails with EPERM on Darwin.  I don't know why. */
   1504    vg_assert(r == 0);
   1505 #  endif
   1506 
   1507    VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
   1508    VG_(sigaction)(sigNo, &origsa2, NULL);
   1509    VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
   1510 }
   1511 
   1512 // The si_code describes where the signal came from.  Some come from the
   1513 // kernel, eg.: seg faults, illegal opcodes.  Some come from the user, eg.:
   1514 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
   1515 // request (SI_ASYNCIO).  There's lots of implementation-defined leeway in
   1516 // POSIX, but the user vs. kernal distinction is what we want here.  We also
   1517 // pass in some other details that can help when si_code is unreliable.
   1518 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
   1519 {
   1520 #  if defined(VGO_linux)
   1521    // On Linux, SI_USER is zero, negative values are from the user, positive
   1522    // values are from the kernel.  There are SI_FROMUSER and SI_FROMKERNEL
   1523    // macros but we don't use them here because other platforms don't have
   1524    // them.
   1525    return ( si_code > VKI_SI_USER ? True : False );
   1526 
   1527 #  elif defined(VGO_darwin)
   1528    // On Darwin 9.6.0, the si_code is completely unreliable.  It should be the
   1529    // case that 0 means "user", and >0 means "kernel".  But:
   1530    // - For SIGSEGV, it seems quite reliable.
   1531    // - For SIGBUS, it's always 2.
   1532    // - For SIGFPE, it's often 0, even for kernel ones (eg.
   1533    //   div-by-integer-zero always gives zero).
   1534    // - For SIGILL, it's unclear.
   1535    // - For SIGTRAP, it's always 1.
   1536    // You can see the "NOTIMP" (not implemented) status of a number of the
   1537    // sub-cases in sys/signal.h.  Hopefully future versions of Darwin will
   1538    // get this right.
   1539 
   1540    // If we're blocked waiting on a syscall, it must be a user signal, because
   1541    // the kernel won't generate sync signals within syscalls.
   1542    if (VG_(threads)[tid].status == VgTs_WaitSys) {
   1543       return False;
   1544 
   1545    // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
   1546    } else if (SIGSEGV == signum) {
   1547       return ( si_code > 0 ? True : False );
   1548 
   1549    // If it's anything else, assume it's kernel-generated.  Reason being that
   1550    // kernel-generated sync signals are more common, and it's probable that
   1551    // misdiagnosing a user signal as a kernel signal is better than the
   1552    // opposite.
   1553    } else {
   1554       return True;
   1555    }
   1556 #  else
   1557 #    error Unknown OS
   1558 #  endif
   1559 }
   1560 
   1561 // This is an arbitrary si_code that we only use internally.  It corresponds
   1562 // to the value SI_KERNEL on Linux, but that's not really of any significance
   1563 // as far as I can determine.
   1564 #define VKI_SEGV_MADE_UP_GPF    0x80
   1565 
   1566 /*
   1567    Perform the default action of a signal.  If the signal is fatal, it
   1568    marks all threads as needing to exit, but it doesn't actually kill
   1569    the process or thread.
   1570 
   1571    If we're not being quiet, then print out some more detail about
   1572    fatal signals (esp. core dumping signals).
   1573  */
   1574 static void default_action(const vki_siginfo_t *info, ThreadId tid)
   1575 {
   1576    Int  sigNo     = info->si_signo;
   1577    Bool terminate = False;	/* kills process         */
   1578    Bool core      = False;	/* kills process w/ core */
   1579    struct vki_rlimit corelim;
   1580    Bool could_core;
   1581 
   1582    vg_assert(VG_(is_running_thread)(tid));
   1583 
   1584    switch(sigNo) {
   1585       case VKI_SIGQUIT:	/* core */
   1586       case VKI_SIGILL:	/* core */
   1587       case VKI_SIGABRT:	/* core */
   1588       case VKI_SIGFPE:	/* core */
   1589       case VKI_SIGSEGV:	/* core */
   1590       case VKI_SIGBUS:	/* core */
   1591       case VKI_SIGTRAP:	/* core */
   1592       case VKI_SIGXCPU:	/* core */
   1593       case VKI_SIGXFSZ:	/* core */
   1594          terminate = True;
   1595          core = True;
   1596          break;
   1597 
   1598       case VKI_SIGHUP:	/* term */
   1599       case VKI_SIGINT:	/* term */
   1600       case VKI_SIGKILL:	/* term - we won't see this */
   1601       case VKI_SIGPIPE:	/* term */
   1602       case VKI_SIGALRM:	/* term */
   1603       case VKI_SIGTERM:	/* term */
   1604       case VKI_SIGUSR1:	/* term */
   1605       case VKI_SIGUSR2:	/* term */
   1606       case VKI_SIGIO:	/* term */
   1607 #     if defined(VKI_SIGPWR)
   1608       case VKI_SIGPWR:	/* term */
   1609 #     endif
   1610       case VKI_SIGSYS:	/* term */
   1611       case VKI_SIGPROF:	/* term */
   1612       case VKI_SIGVTALRM:	/* term */
   1613 #     if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
   1614       case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
   1615 #     endif
   1616          terminate = True;
   1617          break;
   1618    }
   1619 
   1620    vg_assert(!core || (core && terminate));
   1621 
   1622    if (VG_(clo_trace_signals))
   1623       VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
   1624                 sigNo, info->si_code, terminate ? "terminate" : "ignore",
   1625                 core ? "+core" : "");
   1626 
   1627    if (!terminate)
   1628       return;			/* nothing to do */
   1629 
   1630    could_core = core;
   1631 
   1632    if (core) {
   1633       /* If they set the core-size limit to zero, don't generate a
   1634 	 core file */
   1635 
   1636       VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
   1637 
   1638       if (corelim.rlim_cur == 0)
   1639 	 core = False;
   1640    }
   1641 
   1642    if ( (VG_(clo_verbosity) >= 1 ||
   1643          (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
   1644         ) &&
   1645         !VG_(clo_xml) ) {
   1646       VG_(umsg)(
   1647          "\n"
   1648          "Process terminating with default action of signal %d (%s)%s\n",
   1649          sigNo, VG_(signame)(sigNo), core ? ": dumping core" : "");
   1650 
   1651       /* Be helpful - decode some more details about this fault */
   1652       if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
   1653 	 const HChar *event = NULL;
   1654 	 Bool haveaddr = True;
   1655 
   1656 	 switch(sigNo) {
   1657 	 case VKI_SIGSEGV:
   1658 	    switch(info->si_code) {
   1659 	    case VKI_SEGV_MAPERR: event = "Access not within mapped region";
   1660                                   break;
   1661 	    case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
   1662                                   break;
   1663 	    case VKI_SEGV_MADE_UP_GPF:
   1664 	       /* General Protection Fault: The CPU/kernel
   1665 		  isn't telling us anything useful, but this
   1666 		  is commonly the result of exceeding a
   1667 		  segment limit. */
   1668 	       event = "General Protection Fault";
   1669 	       haveaddr = False;
   1670 	       break;
   1671 	    }
   1672 #if 0
   1673             {
   1674               HChar buf[50];  // large enough
   1675               VG_(am_show_nsegments)(0,"post segfault");
   1676               VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
   1677               VG_(system)(buf);
   1678             }
   1679 #endif
   1680 	    break;
   1681 
   1682 	 case VKI_SIGILL:
   1683 	    switch(info->si_code) {
   1684 	    case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
   1685 	    case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
   1686 	    case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
   1687 	    case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
   1688 	    case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
   1689 	    case VKI_ILL_PRVREG: event = "Privileged register"; break;
   1690 	    case VKI_ILL_COPROC: event = "Coprocessor error"; break;
   1691 	    case VKI_ILL_BADSTK: event = "Internal stack error"; break;
   1692 	    }
   1693 	    break;
   1694 
   1695 	 case VKI_SIGFPE:
   1696 	    switch (info->si_code) {
   1697 	    case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
   1698 	    case VKI_FPE_INTOVF: event = "Integer overflow"; break;
   1699 	    case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
   1700 	    case VKI_FPE_FLTOVF: event = "FP overflow"; break;
   1701 	    case VKI_FPE_FLTUND: event = "FP underflow"; break;
   1702 	    case VKI_FPE_FLTRES: event = "FP inexact"; break;
   1703 	    case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
   1704 	    case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
   1705 	    }
   1706 	    break;
   1707 
   1708 	 case VKI_SIGBUS:
   1709 	    switch (info->si_code) {
   1710 	    case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
   1711 	    case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
   1712 	    case VKI_BUS_OBJERR: event = "Hardware error"; break;
   1713 	    }
   1714 	    break;
   1715 	 } /* switch (sigNo) */
   1716 
   1717 	 if (event != NULL) {
   1718 	    if (haveaddr)
   1719                VG_(umsg)(" %s at address %p\n",
   1720                          event, info->VKI_SIGINFO_si_addr);
   1721 	    else
   1722                VG_(umsg)(" %s\n", event);
   1723 	 }
   1724       }
   1725       /* Print a stack trace.  Be cautious if the thread's SP is in an
   1726          obviously stupid place (not mapped readable) that would
   1727          likely cause a segfault. */
   1728       if (VG_(is_valid_tid)(tid)) {
   1729          Word first_ip_delta = 0;
   1730 #if defined(VGO_linux)
   1731          /* Make sure that the address stored in the stack pointer is
   1732             located in a mapped page. That is not necessarily so. E.g.
   1733             consider the scenario where the stack pointer was decreased
   1734             and now has a value that is just below the end of a page that has
   1735             not been mapped yet. In that case VG_(am_is_valid_for_client)
   1736             will consider the address of the stack pointer invalid and that
   1737             would cause a back-trace of depth 1 to be printed, instead of a
   1738             full back-trace. */
   1739          if (tid == 1) {           // main thread
   1740             Addr esp  = VG_(get_SP)(tid);
   1741             Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
   1742             if (VG_(am_addr_is_in_extensible_client_stack)(base) &&
   1743                 VG_(extend_stack)(tid, base)) {
   1744                if (VG_(clo_trace_signals))
   1745                   VG_(dmsg)("       -> extended stack base to %#lx\n",
   1746                             VG_PGROUNDDN(esp));
   1747             }
   1748          }
   1749 #endif
   1750 #if defined(VGA_s390x)
   1751          if (sigNo == VKI_SIGILL) {
   1752             /* The guest instruction address has been adjusted earlier to
   1753                point to the insn following the one that could not be decoded.
   1754                When printing the back-trace here we need to undo that
   1755                adjustment so the first line in the back-trace reports the
   1756                correct address. */
   1757             Addr  addr = (Addr)info->VKI_SIGINFO_si_addr;
   1758             UChar byte = ((UChar *)addr)[0];
   1759             Int   insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
   1760 
   1761             first_ip_delta = -insn_length;
   1762          }
   1763 #endif
   1764          ExeContext* ec = VG_(am_is_valid_for_client)
   1765                              (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
   1766                         ? VG_(record_ExeContext)( tid, first_ip_delta )
   1767                       : VG_(record_depth_1_ExeContext)( tid,
   1768                                                         first_ip_delta );
   1769          vg_assert(ec);
   1770          VG_(pp_ExeContext)( ec );
   1771       }
   1772       if (sigNo == VKI_SIGSEGV
   1773           && is_signal_from_kernel(tid, sigNo, info->si_code)
   1774           && info->si_code == VKI_SEGV_MAPERR) {
   1775          VG_(umsg)(" If you believe this happened as a result of a stack\n" );
   1776          VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
   1777          VG_(umsg)(" possible), you can try to increase the size of the\n"  );
   1778          VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
   1779          // FIXME: assumes main ThreadId == 1
   1780          if (VG_(is_valid_tid)(1)) {
   1781             VG_(umsg)(
   1782                " The main thread stack size used in this run was %lu.\n",
   1783                VG_(threads)[1].client_stack_szB);
   1784          }
   1785       }
   1786    }
   1787 
   1788    if (VG_(clo_vgdb) != Vg_VgdbNo
   1789        && VG_(dyn_vgdb_error) <= VG_(get_n_errs_shown)() + 1) {
   1790       /* Note: we add + 1 to n_errs_shown as the fatal signal was not
   1791          reported through error msg, and so was not counted. */
   1792       VG_(gdbserver_report_fatal_signal) (info, tid);
   1793    }
   1794 
   1795    if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
   1796       VG_(start_debugger)( tid );
   1797    }
   1798 
   1799    if (core) {
   1800       static const struct vki_rlimit zero = { 0, 0 };
   1801 
   1802       VG_(make_coredump)(tid, info, corelim.rlim_cur);
   1803 
   1804       /* Make sure we don't get a confusing kernel-generated
   1805 	 coredump when we finally exit */
   1806       VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
   1807    }
   1808 
   1809    /* stash fatal signal in main thread */
   1810    // what's this for?
   1811    //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
   1812 
   1813    /* everyone dies */
   1814    VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
   1815    VG_(threads)[tid].exitreason = VgSrc_FatalSig;
   1816    VG_(threads)[tid].os_state.fatalsig = sigNo;
   1817 }
   1818 
   1819 /*
   1820    This does the business of delivering a signal to a thread.  It may
   1821    be called from either a real signal handler, or from normal code to
   1822    cause the thread to enter the signal handler.
   1823 
   1824    This updates the thread state, but it does not set it to be
   1825    Runnable.
   1826 */
   1827 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
   1828                                            const struct vki_ucontext *uc )
   1829 {
   1830    Int			sigNo = info->si_signo;
   1831    SCSS_Per_Signal	*handler = &scss.scss_per_sig[sigNo];
   1832    void			*handler_fn;
   1833    ThreadState		*tst = VG_(get_ThreadState)(tid);
   1834 
   1835    if (VG_(clo_trace_signals))
   1836       VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
   1837                 sigNo, VG_(signame)(sigNo), info->si_code, tid );
   1838 
   1839    if (sigNo == VG_SIGVGKILL) {
   1840       /* If this is a SIGVGKILL, we're expecting it to interrupt any
   1841 	 blocked syscall.  It doesn't matter whether the VCPU state is
   1842 	 set to restart or not, because we don't expect it will
   1843 	 execute any more client instructions. */
   1844       vg_assert(VG_(is_exiting)(tid));
   1845       return;
   1846    }
   1847 
   1848    /* If the client specifies SIG_IGN, treat it as SIG_DFL.
   1849 
   1850       If deliver_signal() is being called on a thread, we want
   1851       the signal to get through no matter what; if they're ignoring
   1852       it, then we do this override (this is so we can send it SIGSEGV,
   1853       etc). */
   1854    handler_fn = handler->scss_handler;
   1855    if (handler_fn == VKI_SIG_IGN)
   1856       handler_fn = VKI_SIG_DFL;
   1857 
   1858    vg_assert(handler_fn != VKI_SIG_IGN);
   1859 
   1860    if (handler_fn == VKI_SIG_DFL) {
   1861       default_action(info, tid);
   1862    } else {
   1863       /* Create a signal delivery frame, and set the client's %ESP and
   1864 	 %EIP so that when execution continues, we will enter the
   1865 	 signal handler with the frame on top of the client's stack,
   1866 	 as it expects.
   1867 
   1868 	 Signal delivery can fail if the client stack is too small or
   1869 	 missing, and we can't push the frame.  If that happens,
   1870 	 push_signal_frame will cause the whole process to exit when
   1871 	 we next hit the scheduler.
   1872       */
   1873       vg_assert(VG_(is_valid_tid)(tid));
   1874 
   1875       push_signal_frame ( tid, info, uc );
   1876 
   1877       if (handler->scss_flags & VKI_SA_ONESHOT) {
   1878 	 /* Do the ONESHOT thing. */
   1879 	 handler->scss_handler = VKI_SIG_DFL;
   1880 
   1881 	 handle_SCSS_change( False /* lazy update */ );
   1882       }
   1883 
   1884       /* At this point:
   1885 	 tst->sig_mask is the current signal mask
   1886 	 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
   1887 	 handler->scss_mask is the mask set by the handler
   1888 
   1889 	 Handler gets a mask of tmp_sig_mask|handler_mask|signo
   1890        */
   1891       tst->sig_mask = tst->tmp_sig_mask;
   1892       if (!(handler->scss_flags & VKI_SA_NOMASK)) {
   1893 	 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
   1894 	 VG_(sigaddset)(&tst->sig_mask, sigNo);
   1895 	 tst->tmp_sig_mask = tst->sig_mask;
   1896       }
   1897    }
   1898 
   1899    /* Thread state is ready to go - just add Runnable */
   1900 }
   1901 
   1902 static void resume_scheduler(ThreadId tid)
   1903 {
   1904    ThreadState *tst = VG_(get_ThreadState)(tid);
   1905 
   1906    vg_assert(tst->os_state.lwpid == VG_(gettid)());
   1907 
   1908    if (tst->sched_jmpbuf_valid) {
   1909       /* Can't continue; must longjmp back to the scheduler and thus
   1910          enter the sighandler immediately. */
   1911       VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
   1912    }
   1913 }
   1914 
   1915 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
   1916 {
   1917    vki_siginfo_t info;
   1918 
   1919    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
   1920 
   1921    VG_(memset)(&info, 0, sizeof(info));
   1922    info.si_signo = VKI_SIGSEGV;
   1923    info.si_code = si_code;
   1924    info.VKI_SIGINFO_si_addr = (void*)addr;
   1925 
   1926    /* Even if gdbserver indicates to ignore the signal, we must deliver it.
   1927       So ignore the return value of VG_(gdbserver_report_signal). */
   1928    (void) VG_(gdbserver_report_signal) (&info, tid);
   1929 
   1930    /* If they're trying to block the signal, force it to be delivered */
   1931    if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
   1932       VG_(set_default_handler)(VKI_SIGSEGV);
   1933 
   1934    deliver_signal(tid, &info, NULL);
   1935 }
   1936 
   1937 // Synthesize a fault where the address is OK, but the page
   1938 // permissions are bad.
   1939 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
   1940 {
   1941    synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
   1942 }
   1943 
   1944 // Synthesize a fault where the address there's nothing mapped at the address.
   1945 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
   1946 {
   1947    synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
   1948 }
   1949 
   1950 // Synthesize a misc memory fault.
   1951 void VG_(synth_fault)(ThreadId tid)
   1952 {
   1953    synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
   1954 }
   1955 
   1956 // Synthesise a SIGILL.
   1957 void VG_(synth_sigill)(ThreadId tid, Addr addr)
   1958 {
   1959    vki_siginfo_t info;
   1960 
   1961    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
   1962 
   1963    VG_(memset)(&info, 0, sizeof(info));
   1964    info.si_signo = VKI_SIGILL;
   1965    info.si_code  = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
   1966    info.VKI_SIGINFO_si_addr = (void*)addr;
   1967 
   1968    if (VG_(gdbserver_report_signal) (&info, tid)) {
   1969       resume_scheduler(tid);
   1970       deliver_signal(tid, &info, NULL);
   1971    }
   1972    else
   1973       resume_scheduler(tid);
   1974 }
   1975 
   1976 // Synthesise a SIGBUS.
   1977 void VG_(synth_sigbus)(ThreadId tid)
   1978 {
   1979    vki_siginfo_t info;
   1980 
   1981    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
   1982 
   1983    VG_(memset)(&info, 0, sizeof(info));
   1984    info.si_signo = VKI_SIGBUS;
   1985    /* There are several meanings to SIGBUS (as per POSIX, presumably),
   1986       but the most widely understood is "invalid address alignment",
   1987       so let's use that. */
   1988    info.si_code  = VKI_BUS_ADRALN;
   1989    /* If we knew the invalid address in question, we could put it
   1990       in .si_addr.  Oh well. */
   1991    /* info.VKI_SIGINFO_si_addr = (void*)addr; */
   1992 
   1993    if (VG_(gdbserver_report_signal) (&info, tid)) {
   1994       resume_scheduler(tid);
   1995       deliver_signal(tid, &info, NULL);
   1996    }
   1997    else
   1998       resume_scheduler(tid);
   1999 }
   2000 
   2001 // Synthesise a SIGTRAP.
   2002 void VG_(synth_sigtrap)(ThreadId tid)
   2003 {
   2004    vki_siginfo_t info;
   2005    struct vki_ucontext uc;
   2006 #  if defined(VGP_x86_darwin)
   2007    struct __darwin_mcontext32 mc;
   2008 #  elif defined(VGP_amd64_darwin)
   2009    struct __darwin_mcontext64 mc;
   2010 #  endif
   2011 
   2012    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
   2013 
   2014    VG_(memset)(&info, 0, sizeof(info));
   2015    VG_(memset)(&uc,   0, sizeof(uc));
   2016    info.si_signo = VKI_SIGTRAP;
   2017    info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
   2018 
   2019 #  if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
   2020    uc.uc_mcontext.trapno = 3;     /* tjh: this is the x86 trap number
   2021                                           for a breakpoint trap... */
   2022    uc.uc_mcontext.err = 0;        /* tjh: no error code for x86
   2023                                           breakpoint trap... */
   2024 #  elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
   2025    /* the same thing, but using Darwin field/struct names */
   2026    VG_(memset)(&mc, 0, sizeof(mc));
   2027    uc.uc_mcontext = &mc;
   2028    uc.uc_mcontext->__es.__trapno = 3;
   2029    uc.uc_mcontext->__es.__err = 0;
   2030 #  endif
   2031 
   2032    /* fixs390: do we need to do anything here for s390 ? */
   2033    if (VG_(gdbserver_report_signal) (&info, tid)) {
   2034       resume_scheduler(tid);
   2035       deliver_signal(tid, &info, &uc);
   2036    }
   2037    else
   2038       resume_scheduler(tid);
   2039 }
   2040 
   2041 // Synthesise a SIGFPE.
   2042 void VG_(synth_sigfpe)(ThreadId tid, UInt code)
   2043 {
   2044 // Only tested on mips32 and mips64
   2045 #if !defined(VGA_mips32) && !defined(VGA_mips64)
   2046    vg_assert(0);
   2047 #else
   2048    vki_siginfo_t info;
   2049    struct vki_ucontext uc;
   2050 
   2051    vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
   2052 
   2053    VG_(memset)(&info, 0, sizeof(info));
   2054    VG_(memset)(&uc,   0, sizeof(uc));
   2055    info.si_signo = VKI_SIGFPE;
   2056    info.si_code = code;
   2057 
   2058    if (VG_(gdbserver_report_signal) (VKI_SIGFPE, tid)) {
   2059       resume_scheduler(tid);
   2060       deliver_signal(tid, &info, &uc);
   2061    }
   2062    else
   2063       resume_scheduler(tid);
   2064 #endif
   2065 }
   2066 
   2067 /* Make a signal pending for a thread, for later delivery.
   2068    VG_(poll_signals) will arrange for it to be delivered at the right
   2069    time.
   2070 
   2071    tid==0 means add it to the process-wide queue, and not sent it to a
   2072    specific thread.
   2073 */
   2074 static
   2075 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
   2076 {
   2077    ThreadState *tst;
   2078    SigQueue *sq;
   2079    vki_sigset_t savedmask;
   2080 
   2081    tst = VG_(get_ThreadState)(tid);
   2082 
   2083    /* Protect the signal queue against async deliveries */
   2084    block_all_host_signals(&savedmask);
   2085 
   2086    if (tst->sig_queue == NULL) {
   2087       tst->sig_queue = VG_(malloc)("signals.qs.1", sizeof(*tst->sig_queue));
   2088       VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
   2089    }
   2090    sq = tst->sig_queue;
   2091 
   2092    if (VG_(clo_trace_signals))
   2093       VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
   2094                 si->si_signo, sq->next, tid);
   2095 
   2096    /* Add signal to the queue.  If the queue gets overrun, then old
   2097       queued signals may get lost.
   2098 
   2099       XXX We should also keep a sigset of pending signals, so that at
   2100       least a non-siginfo signal gets deliviered.
   2101    */
   2102    if (sq->sigs[sq->next].si_signo != 0)
   2103       VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
   2104                 sq->sigs[sq->next].si_signo, tid);
   2105 
   2106    sq->sigs[sq->next] = *si;
   2107    sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
   2108 
   2109    restore_all_host_signals(&savedmask);
   2110 }
   2111 
   2112 /*
   2113    Returns the next queued signal for thread tid which is in "set".
   2114    tid==0 means process-wide signal.  Set si_signo to 0 when the
   2115    signal has been delivered.
   2116 
   2117    Must be called with all signals blocked, to protect against async
   2118    deliveries.
   2119 */
   2120 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
   2121 {
   2122    ThreadState *tst = VG_(get_ThreadState)(tid);
   2123    SigQueue *sq;
   2124    Int idx;
   2125    vki_siginfo_t *ret = NULL;
   2126 
   2127    sq = tst->sig_queue;
   2128    if (sq == NULL)
   2129       goto out;
   2130 
   2131    idx = sq->next;
   2132    do {
   2133       if (0)
   2134 	 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
   2135 		     sq->sigs[idx].si_signo,
   2136                      VG_(sigismember)(set, sq->sigs[idx].si_signo));
   2137 
   2138       if (sq->sigs[idx].si_signo != 0
   2139           && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
   2140 	 if (VG_(clo_trace_signals))
   2141             VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
   2142                       sq->sigs[idx].si_signo, idx, tid);
   2143 	 ret = &sq->sigs[idx];
   2144 	 goto out;
   2145       }
   2146 
   2147       idx = (idx + 1) % N_QUEUED_SIGNALS;
   2148    } while(idx != sq->next);
   2149   out:
   2150    return ret;
   2151 }
   2152 
   2153 static int sanitize_si_code(int si_code)
   2154 {
   2155 #if defined(VGO_linux)
   2156    /* The linux kernel uses the top 16 bits of si_code for it's own
   2157       use and only exports the bottom 16 bits to user space - at least
   2158       that is the theory, but it turns out that there are some kernels
   2159       around that forget to mask out the top 16 bits so we do it here.
   2160 
   2161       The kernel treats the bottom 16 bits as signed and (when it does
   2162       mask them off) sign extends them when exporting to user space so
   2163       we do the same thing here. */
   2164    return (Short)si_code;
   2165 #elif defined(VGO_darwin)
   2166    return si_code;
   2167 #else
   2168 #  error Unknown OS
   2169 #endif
   2170 }
   2171 
   2172 /*
   2173    Receive an async signal from the kernel.
   2174 
   2175    This should only happen when the thread is blocked in a syscall,
   2176    since that's the only time this set of signals is unblocked.
   2177 */
   2178 static
   2179 void async_signalhandler ( Int sigNo,
   2180                            vki_siginfo_t *info, struct vki_ucontext *uc )
   2181 {
   2182    ThreadId     tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
   2183    ThreadState* tst = VG_(get_ThreadState)(tid);
   2184    SysRes       sres;
   2185 
   2186    /* The thread isn't currently running, make it so before going on */
   2187    vg_assert(tst->status == VgTs_WaitSys);
   2188    VG_(acquire_BigLock)(tid, "async_signalhandler");
   2189 
   2190    info->si_code = sanitize_si_code(info->si_code);
   2191 
   2192    if (VG_(clo_trace_signals))
   2193       VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
   2194                 sigNo, tid, info->si_code);
   2195 
   2196    /* Update thread state properly.  The signal can only have been
   2197       delivered whilst we were in
   2198       coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
   2199       window between the two sigprocmask calls, since at all other
   2200       times, we run with async signals on the host blocked.  Hence
   2201       make enquiries on the basis that we were in or very close to a
   2202       syscall, and attempt to fix up the guest state accordingly.
   2203 
   2204       (normal async signals occurring during computation are blocked,
   2205       but periodically polled for using VG_(sigtimedwait_zero), and
   2206       delivered at a point convenient for us.  Hence this routine only
   2207       deals with signals that are delivered to a thread during a
   2208       syscall.) */
   2209 
   2210    /* First, extract a SysRes from the ucontext_t* given to this
   2211       handler.  If it is subsequently established by
   2212       VG_(fixup_guest_state_after_syscall_interrupted) that the
   2213       syscall was complete but the results had not been committed yet
   2214       to the guest state, then it'll have to commit the results itself
   2215       "by hand", and so we need to extract the SysRes.  Of course if
   2216       the thread was not in that particular window then the
   2217       SysRes will be meaningless, but that's OK too because
   2218       VG_(fixup_guest_state_after_syscall_interrupted) will detect
   2219       that the thread was not in said window and ignore the SysRes. */
   2220 
   2221    /* To make matters more complex still, on Darwin we need to know
   2222       the "class" of the syscall under consideration in order to be
   2223       able to extract the a correct SysRes.  The class will have been
   2224       saved just before the syscall, by VG_(client_syscall), into this
   2225       thread's tst->arch.vex.guest_SC_CLASS.  Hence: */
   2226 #  if defined(VGO_darwin)
   2227    sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
   2228 #  else
   2229    sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
   2230 #  endif
   2231 
   2232    /* (1) */
   2233    VG_(fixup_guest_state_after_syscall_interrupted)(
   2234       tid,
   2235       VG_UCONTEXT_INSTR_PTR(uc),
   2236       sres,
   2237       !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
   2238    );
   2239 
   2240    /* (2) */
   2241    /* Set up the thread's state to deliver a signal */
   2242    if (!is_sig_ign(info, tid))
   2243       deliver_signal(tid, info, uc);
   2244 
   2245    /* It's crucial that (1) and (2) happen in the order (1) then (2)
   2246       and not the other way around.  (1) fixes up the guest thread
   2247       state to reflect the fact that the syscall was interrupted --
   2248       either to restart the syscall or to return EINTR.  (2) then sets
   2249       up the thread state to deliver the signal.  Then we resume
   2250       execution.  First, the signal handler is run, since that's the
   2251       second adjustment we made to the thread state.  If that returns,
   2252       then we resume at the guest state created by (1), viz, either
   2253       the syscall returns EINTR or is restarted.
   2254 
   2255       If (2) was done before (1) the outcome would be completely
   2256       different, and wrong. */
   2257 
   2258    /* longjmp back to the thread's main loop to start executing the
   2259       handler. */
   2260    resume_scheduler(tid);
   2261 
   2262    VG_(core_panic)("async_signalhandler: got unexpected signal "
   2263                    "while outside of scheduler");
   2264 }
   2265 
   2266 /* Extend the stack of thread #tid to cover addr. It is expected that
   2267    addr either points into an already mapped anonymous segment or into a
   2268    reservation segment abutting the stack segment. Everything else is a bug.
   2269 
   2270    Returns True on success, False on failure.
   2271 
   2272    Succeeds without doing anything if addr is already within a segment.
   2273 
   2274    Failure could be caused by:
   2275    - addr not below a growable segment
   2276    - new stack size would exceed the stack limit for the given thread
   2277    - mmap failed for some other reason
   2278 */
   2279 Bool VG_(extend_stack)(ThreadId tid, Addr addr)
   2280 {
   2281    SizeT udelta;
   2282 
   2283    /* Get the segment containing addr. */
   2284    const NSegment* seg = VG_(am_find_nsegment)(addr);
   2285    vg_assert(seg != NULL);
   2286 
   2287    /* TODO: the test "seg->kind == SkAnonC" is really inadequate,
   2288       because although it tests whether the segment is mapped
   2289       _somehow_, it doesn't check that it has the right permissions
   2290       (r,w, maybe x) ?  */
   2291    if (seg->kind == SkAnonC)
   2292       /* addr is already mapped.  Nothing to do. */
   2293       return True;
   2294 
   2295    const NSegment* seg_next = VG_(am_next_nsegment)( seg, True/*fwds*/ );
   2296    vg_assert(seg_next != NULL);
   2297 
   2298    udelta = VG_PGROUNDUP(seg_next->start - addr);
   2299 
   2300    VG_(debugLog)(1, "signals",
   2301                     "extending a stack base 0x%llx down by %lld\n",
   2302                     (ULong)seg_next->start, (ULong)udelta);
   2303    Bool overflow;
   2304    if (! VG_(am_extend_into_adjacent_reservation_client)
   2305        ( seg_next->start, -(SSizeT)udelta, &overflow )) {
   2306       Addr new_stack_base = seg_next->start - udelta;
   2307       if (overflow)
   2308          VG_(umsg)("Stack overflow in thread #%d: can't grow stack to %#lx\n",
   2309                    tid, new_stack_base);
   2310       else
   2311          VG_(umsg)("Cannot map memory to grow the stack for thread #%d "
   2312                    "to %#lx\n", tid, new_stack_base);
   2313       return False;
   2314    }
   2315 
   2316    /* When we change the main stack, we have to let the stack handling
   2317       code know about it. */
   2318    VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
   2319 
   2320    if (VG_(clo_sanity_level) > 2)
   2321       VG_(sanity_check_general)(False);
   2322 
   2323    return True;
   2324 }
   2325 
   2326 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
   2327 
   2328 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
   2329 {
   2330    if (0)
   2331       VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
   2332    vg_assert2(NULL == catcher || NULL == fault_catcher,
   2333               "Fault catcher is already registered");
   2334 
   2335    fault_catcher = catcher;
   2336 }
   2337 
   2338 static
   2339 void sync_signalhandler_from_user ( ThreadId tid,
   2340          Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
   2341 {
   2342    ThreadId qtid;
   2343 
   2344    /* If some user-process sent us a sync signal (ie. it's not the result
   2345       of a faulting instruction), then how we treat it depends on when it
   2346       arrives... */
   2347 
   2348    if (VG_(threads)[tid].status == VgTs_WaitSys) {
   2349       /* Signal arrived while we're blocked in a syscall.  This means that
   2350          the client's signal mask was applied.  In other words, so we can't
   2351          get here unless the client wants this signal right now.  This means
   2352          we can simply use the async_signalhandler. */
   2353       if (VG_(clo_trace_signals))
   2354          VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
   2355                    sigNo);
   2356 
   2357       async_signalhandler(sigNo, info, uc);
   2358       VG_(core_panic)("async_signalhandler returned!?\n");
   2359 
   2360    } else {
   2361       /* Signal arrived while in generated client code, or while running
   2362          Valgrind core code.  That means that every thread has these signals
   2363          unblocked, so we can't rely on the kernel to route them properly, so
   2364          we need to queue them manually. */
   2365       if (VG_(clo_trace_signals))
   2366          VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
   2367 
   2368 #     if defined(VGO_linux)
   2369       /* On Linux, first we have to do a sanity check of the siginfo. */
   2370       if (info->VKI_SIGINFO_si_pid == 0) {
   2371          /* There's a per-user limit of pending siginfo signals.  If
   2372             you exceed this, by having more than that number of
   2373             pending signals with siginfo, then new signals are
   2374             delivered without siginfo.  This condition can be caused
   2375             by any unrelated program you're running at the same time
   2376             as Valgrind, if it has a large number of pending siginfo
   2377             signals which it isn't taking delivery of.
   2378 
   2379             Since we depend on siginfo to work out why we were sent a
   2380             signal and what we should do about it, we really can't
   2381             continue unless we get it. */
   2382          VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
   2383                    "I can't go on.\n", sigNo, VG_(signame)(sigNo));
   2384          VG_(printf)(
   2385 "  This may be because one of your programs has consumed your ration of\n"
   2386 "  siginfo structures.  For more information, see:\n"
   2387 "    http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
   2388 "  Basically, some program on your system is building up a large queue of\n"
   2389 "  pending signals, and this causes the siginfo data for other signals to\n"
   2390 "  be dropped because it's exceeding a system limit.  However, Valgrind\n"
   2391 "  absolutely needs siginfo for SIGSEGV.  A workaround is to track down the\n"
   2392 "  offending program and avoid running it while using Valgrind, but there\n"
   2393 "  is no easy way to do this.  Apparently the problem was fixed in kernel\n"
   2394 "  2.6.12.\n");
   2395 
   2396          /* It's a fatal signal, so we force the default handler. */
   2397          VG_(set_default_handler)(sigNo);
   2398          deliver_signal(tid, info, uc);
   2399          resume_scheduler(tid);
   2400          VG_(exit)(99);       /* If we can't resume, then just exit */
   2401       }
   2402 #     endif
   2403 
   2404       qtid = 0;         /* shared pending by default */
   2405 #     if defined(VGO_linux)
   2406       if (info->si_code == VKI_SI_TKILL)
   2407          qtid = tid;    /* directed to us specifically */
   2408 #     endif
   2409       queue_signal(qtid, info);
   2410    }
   2411 }
   2412 
   2413 /* Returns the reported fault address for an exact address */
   2414 static Addr fault_mask(Addr in)
   2415 {
   2416    /*  We have to use VG_PGROUNDDN because faults on s390x only deliver
   2417        the page address but not the address within a page.
   2418     */
   2419 #  if defined(VGA_s390x)
   2420    return VG_PGROUNDDN(in);
   2421 #  else
   2422    return in;
   2423 #endif
   2424 }
   2425 
   2426 /* Returns True if the sync signal was due to the stack requiring extension
   2427    and the extension was successful.
   2428 */
   2429 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
   2430 {
   2431    Addr fault;
   2432    Addr esp;
   2433    NSegment const *seg, *seg_next;
   2434 
   2435    if (info->si_signo != VKI_SIGSEGV)
   2436       return False;
   2437 
   2438    fault    = (Addr)info->VKI_SIGINFO_si_addr;
   2439    esp      = VG_(get_SP)(tid);
   2440    seg      = VG_(am_find_nsegment)(fault);
   2441    seg_next = seg ? VG_(am_next_nsegment)( seg, True/*fwds*/ )
   2442                   : NULL;
   2443 
   2444    if (VG_(clo_trace_signals)) {
   2445       if (seg == NULL)
   2446          VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
   2447                    "seg=NULL\n",
   2448                    info->si_code, fault, tid, esp);
   2449       else
   2450          VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
   2451                    "seg=%#lx-%#lx\n",
   2452                    info->si_code, fault, tid, esp, seg->start, seg->end);
   2453    }
   2454 
   2455    if (info->si_code == VKI_SEGV_MAPERR
   2456        && seg
   2457        && seg->kind == SkResvn
   2458        && seg->smode == SmUpper
   2459        && seg_next
   2460        && seg_next->kind == SkAnonC
   2461        && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
   2462       /* If the fault address is above esp but below the current known
   2463          stack segment base, and it was a fault because there was
   2464          nothing mapped there (as opposed to a permissions fault),
   2465          then extend the stack segment.
   2466        */
   2467       Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
   2468       if (VG_(am_addr_is_in_extensible_client_stack)(base) &&
   2469           VG_(extend_stack)(tid, base)) {
   2470          if (VG_(clo_trace_signals))
   2471             VG_(dmsg)("       -> extended stack base to %#lx\n",
   2472                       VG_PGROUNDDN(fault));
   2473          return True;
   2474       } else {
   2475          return False;
   2476       }
   2477    } else {
   2478       return False;
   2479    }
   2480 }
   2481 
   2482 static
   2483 void sync_signalhandler_from_kernel ( ThreadId tid,
   2484          Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
   2485 {
   2486    /* Check to see if some part of Valgrind itself is interested in faults.
   2487       The fault catcher should never be set whilst we're in generated code, so
   2488       check for that.  AFAIK the only use of the catcher right now is
   2489       memcheck's leak detector. */
   2490    if (fault_catcher) {
   2491       vg_assert(VG_(in_generated_code) == False);
   2492 
   2493       (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
   2494       /* If the catcher returns, then it didn't handle the fault,
   2495          so carry on panicking. */
   2496    }
   2497 
   2498    if (extend_stack_if_appropriate(tid, info)) {
   2499       /* Stack extension occurred, so we don't need to do anything else; upon
   2500          returning from this function, we'll restart the host (hence guest)
   2501          instruction. */
   2502    } else {
   2503       /* OK, this is a signal we really have to deal with.  If it came
   2504          from the client's code, then we can jump back into the scheduler
   2505          and have it delivered.  Otherwise it's a Valgrind bug. */
   2506       ThreadState *tst = VG_(get_ThreadState)(tid);
   2507 
   2508       if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
   2509          /* signal is blocked, but they're not allowed to block faults */
   2510          VG_(set_default_handler)(sigNo);
   2511       }
   2512 
   2513       if (VG_(in_generated_code)) {
   2514          if (VG_(gdbserver_report_signal) (info, tid)
   2515              || VG_(sigismember)(&tst->sig_mask, sigNo)) {
   2516             /* Can't continue; must longjmp back to the scheduler and thus
   2517                enter the sighandler immediately. */
   2518             deliver_signal(tid, info, uc);
   2519             resume_scheduler(tid);
   2520          }
   2521          else
   2522             resume_scheduler(tid);
   2523       }
   2524 
   2525       /* If resume_scheduler returns or its our fault, it means we
   2526          don't have longjmp set up, implying that we weren't running
   2527          client code, and therefore it was actually generated by
   2528          Valgrind internally.
   2529        */
   2530       VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
   2531                 "a signal %d (%s) - exiting\n",
   2532                 sigNo, VG_(signame)(sigNo));
   2533 
   2534       VG_(dmsg)("si_code=%x;  Faulting address: %p;  sp: %#lx\n",
   2535                 info->si_code, info->VKI_SIGINFO_si_addr,
   2536                 VG_UCONTEXT_STACK_PTR(uc));
   2537 
   2538       if (0)
   2539          VG_(kill_self)(sigNo);  /* generate a core dump */
   2540 
   2541       //if (tid == 0)            /* could happen after everyone has exited */
   2542       //  tid = VG_(master_tid);
   2543       vg_assert(tid != 0);
   2544 
   2545       UnwindStartRegs startRegs;
   2546       VG_(memset)(&startRegs, 0, sizeof(startRegs));
   2547 
   2548       VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
   2549       VG_(core_panic_at)("Killed by fatal signal", &startRegs);
   2550    }
   2551 }
   2552 
   2553 /*
   2554    Receive a sync signal from the host.
   2555 */
   2556 static
   2557 void sync_signalhandler ( Int sigNo,
   2558                           vki_siginfo_t *info, struct vki_ucontext *uc )
   2559 {
   2560    ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
   2561    Bool from_user;
   2562 
   2563    if (0)
   2564       VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
   2565 
   2566    vg_assert(info != NULL);
   2567    vg_assert(info->si_signo == sigNo);
   2568    vg_assert(sigNo == VKI_SIGSEGV ||
   2569 	     sigNo == VKI_SIGBUS  ||
   2570 	     sigNo == VKI_SIGFPE  ||
   2571 	     sigNo == VKI_SIGILL  ||
   2572 	     sigNo == VKI_SIGTRAP);
   2573 
   2574    info->si_code = sanitize_si_code(info->si_code);
   2575 
   2576    from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
   2577 
   2578    if (VG_(clo_trace_signals)) {
   2579       VG_(dmsg)("sync signal handler: "
   2580                 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
   2581                 sigNo, info->si_code, VG_(get_IP)(tid),
   2582                 VG_UCONTEXT_INSTR_PTR(uc),
   2583                 ( from_user ? "user" : "kernel" ));
   2584    }
   2585    vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
   2586 
   2587    /* // debug code:
   2588    if (0) {
   2589       VG_(printf)("info->si_signo  %d\n", info->si_signo);
   2590       VG_(printf)("info->si_errno  %d\n", info->si_errno);
   2591       VG_(printf)("info->si_code   %d\n", info->si_code);
   2592       VG_(printf)("info->si_pid    %d\n", info->si_pid);
   2593       VG_(printf)("info->si_uid    %d\n", info->si_uid);
   2594       VG_(printf)("info->si_status %d\n", info->si_status);
   2595       VG_(printf)("info->si_addr   %p\n", info->si_addr);
   2596    }
   2597    */
   2598 
   2599    /* Figure out if the signal is being sent from outside the process.
   2600       (Why do we care?)  If the signal is from the user rather than the
   2601       kernel, then treat it more like an async signal than a sync signal --
   2602       that is, merely queue it for later delivery. */
   2603    if (from_user) {
   2604       sync_signalhandler_from_user(  tid, sigNo, info, uc);
   2605    } else {
   2606       sync_signalhandler_from_kernel(tid, sigNo, info, uc);
   2607    }
   2608 }
   2609 
   2610 
   2611 /*
   2612    Kill this thread.  Makes it leave any syscall it might be currently
   2613    blocked in, and return to the scheduler.  This doesn't mark the thread
   2614    as exiting; that's the caller's job.
   2615  */
   2616 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
   2617                                          struct vki_ucontext *uc)
   2618 {
   2619    ThreadId     tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
   2620    ThreadStatus at_signal = VG_(threads)[tid].status;
   2621 
   2622    if (VG_(clo_trace_signals))
   2623       VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
   2624 
   2625    VG_(acquire_BigLock)(tid, "sigvgkill_handler");
   2626 
   2627    vg_assert(signo == VG_SIGVGKILL);
   2628    vg_assert(si->si_signo == signo);
   2629 
   2630    /* jrs 2006 August 3: the following assertion seems incorrect to
   2631       me, and fails on AIX.  sigvgkill could be sent to a thread which
   2632       is runnable - see VG_(nuke_all_threads_except) in the scheduler.
   2633       Hence comment these out ..
   2634 
   2635       vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
   2636       VG_(post_syscall)(tid);
   2637 
   2638       and instead do:
   2639    */
   2640    if (at_signal == VgTs_WaitSys)
   2641       VG_(post_syscall)(tid);
   2642    /* jrs 2006 August 3 ends */
   2643 
   2644    resume_scheduler(tid);
   2645 
   2646    VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
   2647 }
   2648 
   2649 static __attribute((unused))
   2650 void pp_ksigaction ( vki_sigaction_toK_t* sa )
   2651 {
   2652    Int i;
   2653    VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
   2654                sa->ksa_handler,
   2655                (UInt)sa->sa_flags,
   2656 #              if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   2657                   sa->sa_restorer
   2658 #              else
   2659                   (void*)0
   2660 #              endif
   2661               );
   2662    VG_(printf)("pp_ksigaction: { ");
   2663    for (i = 1; i <= VG_(max_signal); i++)
   2664       if (VG_(sigismember(&(sa->sa_mask),i)))
   2665          VG_(printf)("%d ", i);
   2666    VG_(printf)("}\n");
   2667 }
   2668 
   2669 /*
   2670    Force signal handler to default
   2671  */
   2672 void VG_(set_default_handler)(Int signo)
   2673 {
   2674    vki_sigaction_toK_t sa;
   2675 
   2676    sa.ksa_handler = VKI_SIG_DFL;
   2677    sa.sa_flags = 0;
   2678 #  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   2679    sa.sa_restorer = 0;
   2680 #  endif
   2681    VG_(sigemptyset)(&sa.sa_mask);
   2682 
   2683    VG_(do_sys_sigaction)(signo, &sa, NULL);
   2684 }
   2685 
   2686 /*
   2687    Poll for pending signals, and set the next one up for delivery.
   2688  */
   2689 void VG_(poll_signals)(ThreadId tid)
   2690 {
   2691    vki_siginfo_t si, *sip;
   2692    vki_sigset_t pollset;
   2693    ThreadState *tst = VG_(get_ThreadState)(tid);
   2694    vki_sigset_t saved_mask;
   2695 
   2696    /* look for all the signals this thread isn't blocking */
   2697    /* pollset = ~tst->sig_mask */
   2698    VG_(sigcomplementset)( &pollset, &tst->sig_mask );
   2699 
   2700    block_all_host_signals(&saved_mask); // protect signal queue
   2701 
   2702    /* First look for any queued pending signals */
   2703    sip = next_queued(tid, &pollset); /* this thread */
   2704 
   2705    if (sip == NULL)
   2706       sip = next_queued(0, &pollset); /* process-wide */
   2707 
   2708    /* If there was nothing queued, ask the kernel for a pending signal */
   2709    if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
   2710       if (VG_(clo_trace_signals))
   2711          VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
   2712                    si.si_signo, tid);
   2713       sip = &si;
   2714    }
   2715 
   2716    if (sip != NULL) {
   2717       /* OK, something to do; deliver it */
   2718       if (VG_(clo_trace_signals))
   2719          VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
   2720       if (!is_sig_ign(sip, tid))
   2721 	 deliver_signal(tid, sip, NULL);
   2722       else if (VG_(clo_trace_signals))
   2723          VG_(dmsg)("   signal %d ignored\n", sip->si_signo);
   2724 
   2725       sip->si_signo = 0;	/* remove from signal queue, if that's
   2726 				   where it came from */
   2727    }
   2728 
   2729    restore_all_host_signals(&saved_mask);
   2730 }
   2731 
   2732 /* At startup, copy the process' real signal state to the SCSS.
   2733    Whilst doing this, block all real signals.  Then calculate SKSS and
   2734    set the kernel to that.  Also initialise DCSS.
   2735 */
   2736 void VG_(sigstartup_actions) ( void )
   2737 {
   2738    Int i, ret, vKI_SIGRTMIN;
   2739    vki_sigset_t saved_procmask;
   2740    vki_sigaction_fromK_t sa;
   2741 
   2742    VG_(memset)(&scss, 0, sizeof(scss));
   2743    VG_(memset)(&skss, 0, sizeof(skss));
   2744 
   2745 #  if defined(VKI_SIGRTMIN)
   2746    vKI_SIGRTMIN = VKI_SIGRTMIN;
   2747 #  else
   2748    vKI_SIGRTMIN = 0; /* eg Darwin */
   2749 #  endif
   2750 
   2751    /* VG_(printf)("SIGSTARTUP\n"); */
   2752    /* Block all signals.  saved_procmask remembers the previous mask,
   2753       which the first thread inherits.
   2754    */
   2755    block_all_host_signals( &saved_procmask );
   2756 
   2757    /* Copy per-signal settings to SCSS. */
   2758    for (i = 1; i <= _VKI_NSIG; i++) {
   2759       /* Get the old host action */
   2760       ret = VG_(sigaction)(i, NULL, &sa);
   2761 
   2762 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
   2763       /* apparently we may not even ask about the disposition of these
   2764          signals, let alone change them */
   2765       if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
   2766          continue;
   2767 #     endif
   2768 
   2769       if (ret != 0)
   2770 	 break;
   2771 
   2772       /* Try setting it back to see if this signal is really
   2773 	 available */
   2774       if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
   2775           && i >= vKI_SIGRTMIN) {
   2776          vki_sigaction_toK_t tsa, sa2;
   2777 
   2778 	 tsa.ksa_handler = (void *)sync_signalhandler;
   2779 	 tsa.sa_flags = VKI_SA_SIGINFO;
   2780 #        if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   2781 	 tsa.sa_restorer = 0;
   2782 #        endif
   2783 	 VG_(sigfillset)(&tsa.sa_mask);
   2784 
   2785 	 /* try setting it to some arbitrary handler */
   2786 	 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
   2787 	    /* failed - not really usable */
   2788 	    break;
   2789 	 }
   2790 
   2791          VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
   2792 	 ret = VG_(sigaction)(i, &sa2, NULL);
   2793 	 vg_assert(ret == 0);
   2794       }
   2795 
   2796       VG_(max_signal) = i;
   2797 
   2798       if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
   2799          VG_(printf)("snaffling handler 0x%lx for signal %d\n",
   2800                      (Addr)(sa.ksa_handler), i );
   2801 
   2802       scss.scss_per_sig[i].scss_handler  = sa.ksa_handler;
   2803       scss.scss_per_sig[i].scss_flags    = sa.sa_flags;
   2804       scss.scss_per_sig[i].scss_mask     = sa.sa_mask;
   2805 
   2806       scss.scss_per_sig[i].scss_restorer = NULL;
   2807 #     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
   2808       scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
   2809 #     endif
   2810 
   2811       scss.scss_per_sig[i].scss_sa_tramp = NULL;
   2812 #     if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
   2813       scss.scss_per_sig[i].scss_sa_tramp = NULL;
   2814       /*sa.sa_tramp;*/
   2815       /* We can't know what it was, because Darwin's sys_sigaction
   2816          doesn't tell us. */
   2817 #     endif
   2818    }
   2819 
   2820    if (VG_(clo_trace_signals))
   2821       VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
   2822 
   2823    /* Our private internal signals are treated as ignored */
   2824    scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
   2825    scss.scss_per_sig[VG_SIGVGKILL].scss_flags   = VKI_SA_SIGINFO;
   2826    VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
   2827 
   2828    /* Copy the process' signal mask into the root thread. */
   2829    vg_assert(VG_(threads)[1].status == VgTs_Init);
   2830    for (i = 2; i < VG_N_THREADS; i++)
   2831       vg_assert(VG_(threads)[i].status == VgTs_Empty);
   2832 
   2833    VG_(threads)[1].sig_mask = saved_procmask;
   2834    VG_(threads)[1].tmp_sig_mask = saved_procmask;
   2835 
   2836    /* Calculate SKSS and apply it.  This also sets the initial kernel
   2837       mask we need to run with. */
   2838    handle_SCSS_change( True /* forced update */ );
   2839 
   2840    /* Leave with all signals still blocked; the thread scheduler loop
   2841       will set the appropriate mask at the appropriate time. */
   2842 }
   2843 
   2844 /*--------------------------------------------------------------------*/
   2845 /*--- end                                                          ---*/
   2846 /*--------------------------------------------------------------------*/
   2847