Home | History | Annotate | Download | only in m_syswrap
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- Platform-specific syscalls stuff.      syswrap-x86-solaris.c ---*/
      4 /*--------------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2011-2017 Petr Pavlu
     11       setup (at) dagobah.cz
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     26    02111-1307, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 */
     30 
     31 #if defined(VGP_x86_solaris)
     32 
     33 #include "libvex_guest_offsets.h"
     34 #include "pub_core_basics.h"
     35 #include "pub_core_vki.h"
     36 #include "pub_core_threadstate.h"
     37 #include "pub_core_aspacemgr.h"
     38 #include "pub_core_xarray.h"
     39 #include "pub_core_clientstate.h"
     40 #include "pub_core_debuglog.h"
     41 #include "pub_core_libcassert.h"
     42 #include "pub_core_libcbase.h"
     43 #include "pub_core_libcfile.h"
     44 #include "pub_core_libcprint.h"
     45 #include "pub_core_libcsignal.h"
     46 #include "pub_core_machine.h"           // VG_(get_SP)
     47 #include "pub_core_mallocfree.h"
     48 #include "pub_core_options.h"
     49 #include "pub_core_tooliface.h"
     50 #include "pub_core_signals.h"
     51 #include "pub_core_syscall.h"
     52 #include "pub_core_syswrap.h"
     53 
     54 #include "priv_types_n_macros.h"
     55 #include "priv_syswrap-generic.h"
     56 #include "priv_syswrap-solaris.h"
     57 
     58 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
     59    use 'retaddr' as f's return-to address.  Also, clear all the integer
     60    registers before entering f. */
     61 __attribute__((noreturn))
     62 void ML_(call_on_new_stack_0_1)(Addr stack,             /* 4(%esp) */
     63                                 Addr retaddr,           /* 8(%esp) */
     64                                 void (*f)(Word),        /* 12(%esp) */
     65                                 Word arg1);             /* 16(%esp) */
     66 __asm__ (
     67 ".text\n"
     68 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
     69 "vgModuleLocal_call_on_new_stack_0_1:\n"
     70 "   movl  %esp, %esi\n"         /* remember old stack pointer */
     71 "   movl  4(%esi), %esp\n"      /* set stack */
     72 "   pushl $0\n"                 /* align stack */
     73 "   pushl $0\n"                 /* align stack */
     74 "   pushl $0\n"                 /* align stack */
     75 "   pushl 16(%esi)\n"           /* arg1 to stack */
     76 "   pushl 8(%esi)\n"            /* retaddr to stack */
     77 "   pushl 12(%esi)\n"           /* f to stack */
     78 "   movl  $0, %eax\n"           /* zero all GP regs */
     79 "   movl  $0, %ebx\n"
     80 "   movl  $0, %ecx\n"
     81 "   movl  $0, %edx\n"
     82 "   movl  $0, %esi\n"
     83 "   movl  $0, %edi\n"
     84 "   movl  $0, %ebp\n"
     85 "   ret\n"                      /* jump to f */
     86 "   ud2\n"                      /* should never get here */
     87 ".previous\n"
     88 );
     89 
     90 /* This function is called to setup a context of a new Valgrind thread (which
     91    will run the client code). */
     92 void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
     93 {
     94    ThreadState *tst = VG_(get_ThreadState)(tid);
     95    UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
     96    UShort cs, ds, ss, es, fs, gs;
     97 
     98    VG_(memset)(uc, 0, sizeof(*uc));
     99    uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
    100 
    101    /* Start the thread with everything blocked. */
    102    VG_(sigfillset)(&uc->uc_sigmask);
    103 
    104    /* Set up the stack, it should be always 16-byte aligned before doing
    105       a function call, i.e. the first parameter is also 16-byte aligned. */
    106    vg_assert(VG_IS_16_ALIGNED(stack));
    107    stack -= 1;
    108    stack[0] = 0; /* bogus return value */
    109    stack[1] = (UWord)tst; /* the parameter */
    110 
    111    /* Set up the registers. */
    112    uc->uc_mcontext.gregs[VKI_EIP] = (UWord)ML_(start_thread_NORETURN);
    113    uc->uc_mcontext.gregs[VKI_UESP] = (UWord)stack;
    114 
    115    /* Copy segment registers. */
    116    __asm__ __volatile__(
    117       "movw %%cs, %[cs]\n"
    118       "movw %%ds, %[ds]\n"
    119       "movw %%ss, %[ss]\n"
    120       "movw %%es, %[es]\n"
    121       "movw %%fs, %[fs]\n"
    122       "movw %%gs, %[gs]\n"
    123       : [cs] "=m" (cs), [ds] "=m" (ds), [ss] "=m" (ss), [es] "=m" (es),
    124         [fs] "=m" (fs), [gs] "=m" (gs));
    125    uc->uc_mcontext.gregs[VKI_CS] = cs;
    126    uc->uc_mcontext.gregs[VKI_DS] = ds;
    127    uc->uc_mcontext.gregs[VKI_SS] = ss;
    128    uc->uc_mcontext.gregs[VKI_ES] = es;
    129    uc->uc_mcontext.gregs[VKI_FS] = fs;
    130    uc->uc_mcontext.gregs[VKI_GS] = gs;
    131 }
    132 
    133 /* Architecture-specific part of VG_(save_context). */
    134 void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
    135                                CorePart part)
    136 {
    137    ThreadState *tst = VG_(get_ThreadState)(tid);
    138    struct vki_fpchip_state *fs
    139       = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
    140    SizeT i;
    141 
    142    /* CPU */
    143    /* Common registers */
    144    uc->uc_mcontext.gregs[VKI_EIP] = tst->arch.vex.guest_EIP;
    145    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EIP,
    146             (Addr)&uc->uc_mcontext.gregs[VKI_EIP], sizeof(UWord));
    147    uc->uc_mcontext.gregs[VKI_EAX] = tst->arch.vex.guest_EAX;
    148    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EAX,
    149             (Addr)&uc->uc_mcontext.gregs[VKI_EAX], sizeof(UWord));
    150    uc->uc_mcontext.gregs[VKI_EBX] = tst->arch.vex.guest_EBX;
    151    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBX,
    152             (Addr)&uc->uc_mcontext.gregs[VKI_EBX], sizeof(UWord));
    153    uc->uc_mcontext.gregs[VKI_ECX] = tst->arch.vex.guest_ECX;
    154    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ECX,
    155             (Addr)&uc->uc_mcontext.gregs[VKI_ECX], sizeof(UWord));
    156    uc->uc_mcontext.gregs[VKI_EDX] = tst->arch.vex.guest_EDX;
    157    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDX,
    158             (Addr)&uc->uc_mcontext.gregs[VKI_EDX], sizeof(UWord));
    159    uc->uc_mcontext.gregs[VKI_EBP] = tst->arch.vex.guest_EBP;
    160    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBP,
    161             (Addr)&uc->uc_mcontext.gregs[VKI_EBP], sizeof(UWord));
    162    uc->uc_mcontext.gregs[VKI_ESI] = tst->arch.vex.guest_ESI;
    163    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESI,
    164             (Addr)&uc->uc_mcontext.gregs[VKI_ESI], sizeof(UWord));
    165    uc->uc_mcontext.gregs[VKI_EDI] = tst->arch.vex.guest_EDI;
    166    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDI,
    167             (Addr)&uc->uc_mcontext.gregs[VKI_EDI], sizeof(UWord));
    168    uc->uc_mcontext.gregs[VKI_UESP] = tst->arch.vex.guest_ESP;
    169    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESP,
    170             (Addr)&uc->uc_mcontext.gregs[VKI_UESP], sizeof(UWord));
    171    uc->uc_mcontext.gregs[VKI_ESP] = 0;
    172    VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ESP],
    173             sizeof(UWord));
    174 
    175    /* ERR and TRAPNO */
    176    uc->uc_mcontext.gregs[VKI_ERR] = 0;
    177    VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ERR],
    178             sizeof(UWord));
    179    uc->uc_mcontext.gregs[VKI_TRAPNO] = 0;
    180    VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_TRAPNO],
    181             sizeof(UWord));
    182 
    183    /* Segment registers */
    184    /* Note that segment registers are 16b in VEX, but 32b in mcontext.  Thus
    185       we tell a tool that the lower 16 bits were copied and that the higher 16
    186       bits were set (to zero).  (This assumes a little-endian
    187       architecture.) */
    188    uc->uc_mcontext.gregs[VKI_CS] = tst->arch.vex.guest_CS;
    189    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_CS,
    190             (Addr)&uc->uc_mcontext.gregs[VKI_CS], sizeof(UShort));
    191    VG_TRACK(post_mem_write, part, tid,
    192             (Addr)(&uc->uc_mcontext.gregs[VKI_CS]) + 2, sizeof(UShort));
    193    uc->uc_mcontext.gregs[VKI_DS] = tst->arch.vex.guest_DS;
    194    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_DS,
    195             (Addr)&uc->uc_mcontext.gregs[VKI_DS], sizeof(UShort));
    196    VG_TRACK(post_mem_write, part, tid,
    197             (Addr)(&uc->uc_mcontext.gregs[VKI_DS]) + 2, sizeof(UShort));
    198    uc->uc_mcontext.gregs[VKI_SS] = tst->arch.vex.guest_SS;
    199    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_SS,
    200             (Addr)&uc->uc_mcontext.gregs[VKI_SS], sizeof(UShort));
    201    VG_TRACK(post_mem_write, part, tid,
    202             (Addr)(&uc->uc_mcontext.gregs[VKI_SS]) + 2, sizeof(UShort));
    203    uc->uc_mcontext.gregs[VKI_ES] = tst->arch.vex.guest_ES;
    204    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ES,
    205             (Addr)&uc->uc_mcontext.gregs[VKI_ES], sizeof(UShort));
    206    VG_TRACK(post_mem_write, part, tid,
    207             (Addr)(&uc->uc_mcontext.gregs[VKI_ES]) + 2, sizeof(UShort));
    208    uc->uc_mcontext.gregs[VKI_FS] = tst->arch.vex.guest_FS;
    209    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_FS,
    210             (Addr)&uc->uc_mcontext.gregs[VKI_FS], sizeof(UShort));
    211    VG_TRACK(post_mem_write, part, tid,
    212             (Addr)(&uc->uc_mcontext.gregs[VKI_FS]) + 2, sizeof(UShort));
    213    uc->uc_mcontext.gregs[VKI_GS] = tst->arch.vex.guest_GS;
    214    VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_GS,
    215             (Addr)&uc->uc_mcontext.gregs[VKI_GS], sizeof(UShort));
    216    VG_TRACK(post_mem_write, part, tid,
    217             (Addr)(&uc->uc_mcontext.gregs[VKI_GS]) + 2, sizeof(UShort));
    218 
    219    /* Handle eflags (optimistically make all flags defined). */
    220    uc->uc_mcontext.gregs[VKI_EFL] =
    221       LibVEX_GuestX86_get_eflags(&tst->arch.vex);
    222    VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_EFL],
    223          sizeof(UWord));
    224    /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
    225       CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
    226       values.  The *FLAG values represent one-bit information and are saved
    227       without loss of precision into eflags.  However when CC_* values are
    228       converted into eflags then precision is lost.  What we do here is to
    229       save unmodified CC_* values into unused ucontext members (the 'long
    230       uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
    231       context in ML_(restore_machine_context)() without the loss of precision.
    232       This imposes a requirement on client programs to not use these two
    233       members. Luckily this is never a case in Solaris-gate programs and
    234       libraries. */
    235    /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
    236       that we just defined uc_filler[0,1].  This helps if someone uses an
    237       uninitialized ucontext and tries to read (use) uc_filler[0,1].  Memcheck
    238       in such a case should detect this error. */
    239    VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
    240    VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
    241    /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
    242       a tool about this copy. */
    243    VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
    244    VG_TRACK(copy_reg_to_mem, part, tid,
    245             offsetof(VexGuestX86State, guest_CC_DEP1),
    246             (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
    247    VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
    248    VG_TRACK(copy_reg_to_mem, part, tid,
    249             offsetof(VexGuestX86State, guest_CC_DEP2),
    250             (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
    251    /* Make another copy of eflags. */
    252    VKI_UC_GUEST_EFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_EFL];
    253    /* Calculate a checksum. */
    254    {
    255       UInt buf[5];
    256       UInt checksum;
    257 
    258       buf[0] = VKI_UC_GUEST_CC_OP(uc);
    259       buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
    260       buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
    261       buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
    262       buf[4] = uc->uc_mcontext.gregs[VKI_EFL];
    263       checksum = ML_(fletcher32)((UShort*)&buf, sizeof(buf) / sizeof(UShort));
    264       /* Store the checksum. */
    265       VKI_UC_GUEST_EFLAGS_CHECKSUM(uc) = checksum;
    266    }
    267 
    268    /* FPU */
    269    /* x87 */
    270    vg_assert(sizeof(fs->state) == 108);
    271    LibVEX_GuestX86_get_x87(&tst->arch.vex, (UChar*)&fs->state);
    272 
    273    /* Flags and control words */
    274    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->state, 28);
    275    /* ST registers */
    276    for (i = 0; i < 8; i++) {
    277       Addr addr = (Addr)&fs->state + 28 + i * 10;
    278       /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
    279          have to lie here. :< */
    280       VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    281                guest_FPREG[i]), addr, sizeof(ULong));
    282       VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    283                guest_FPREG[i]), addr + 8, sizeof(UShort));
    284       }
    285 
    286    /* Status word (sw) at exception */
    287    fs->status = 0;
    288    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
    289 
    290    /* SSE */
    291    fs->mxcsr = LibVEX_GuestX86_get_mxcsr(&tst->arch.vex);
    292    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
    293 
    294    /* MXCSR at exception */
    295    fs->xstatus = 0;
    296    VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
    297             sizeof(fs->xstatus));
    298 
    299    /* XMM registers */
    300 #define COPY_OUT_XMM(dest, src) \
    301    do {                         \
    302       dest._l[0] = src[0];      \
    303       dest._l[1] = src[1];      \
    304       dest._l[2] = src[2];      \
    305       dest._l[3] = src[3];      \
    306    } while (0)
    307    COPY_OUT_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
    308    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    309             guest_XMM0), (Addr)&fs->xmm[0], sizeof(U128));
    310    COPY_OUT_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
    311    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    312             guest_XMM1), (Addr)&fs->xmm[1], sizeof(U128));
    313    COPY_OUT_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
    314    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    315             guest_XMM2), (Addr)&fs->xmm[2], sizeof(U128));
    316    COPY_OUT_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
    317    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    318             guest_XMM3), (Addr)&fs->xmm[3], sizeof(U128));
    319    COPY_OUT_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
    320    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    321             guest_XMM4), (Addr)&fs->xmm[4], sizeof(U128));
    322    COPY_OUT_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
    323    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    324             guest_XMM5), (Addr)&fs->xmm[5], sizeof(U128));
    325    COPY_OUT_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
    326    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    327             guest_XMM6), (Addr)&fs->xmm[6], sizeof(U128));
    328    COPY_OUT_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
    329    VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
    330             guest_XMM7), (Addr)&fs->xmm[7], sizeof(U128));
    331 #undef COPY_OUT_XMM
    332 }
    333 
    334 /* Architecture-specific part of VG_(restore_context). */
    335 void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
    336                                   CorePart part, Bool esp_is_thrptr)
    337 {
    338    ThreadState *tst = VG_(get_ThreadState)(tid);
    339    struct vki_fpchip_state *fs
    340       = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
    341 
    342    /* CPU */
    343    if (uc->uc_flags & VKI_UC_CPU) {
    344       /* Common registers */
    345       tst->arch.vex.guest_EIP = uc->uc_mcontext.gregs[VKI_EIP];
    346       VG_TRACK(copy_mem_to_reg, part, tid,
    347                (Addr)&uc->uc_mcontext.gregs[VKI_EIP], OFFSET_x86_EIP,
    348                sizeof(UWord));
    349       tst->arch.vex.guest_EAX = uc->uc_mcontext.gregs[VKI_EAX];
    350       VG_TRACK(copy_mem_to_reg, part, tid,
    351                (Addr)&uc->uc_mcontext.gregs[VKI_EAX], OFFSET_x86_EAX,
    352                sizeof(UWord));
    353       tst->arch.vex.guest_EBX = uc->uc_mcontext.gregs[VKI_EBX];
    354       VG_TRACK(copy_mem_to_reg, part, tid,
    355                (Addr)&uc->uc_mcontext.gregs[VKI_EBX], OFFSET_x86_EBX,
    356                sizeof(UWord));
    357       tst->arch.vex.guest_ECX = uc->uc_mcontext.gregs[VKI_ECX];
    358       VG_TRACK(copy_mem_to_reg, part, tid,
    359                (Addr)&uc->uc_mcontext.gregs[VKI_ECX], OFFSET_x86_ECX,
    360                sizeof(UWord));
    361       tst->arch.vex.guest_EDX = uc->uc_mcontext.gregs[VKI_EDX];
    362       VG_TRACK(copy_mem_to_reg, part, tid,
    363                (Addr)&uc->uc_mcontext.gregs[VKI_EDX], OFFSET_x86_EDX,
    364                sizeof(UWord));
    365       tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
    366       VG_TRACK(copy_mem_to_reg, part, tid,
    367                (Addr)&uc->uc_mcontext.gregs[VKI_EBP], OFFSET_x86_EBP,
    368                sizeof(UWord));
    369       tst->arch.vex.guest_ESI = uc->uc_mcontext.gregs[VKI_ESI];
    370       VG_TRACK(copy_mem_to_reg, part, tid,
    371                (Addr)&uc->uc_mcontext.gregs[VKI_ESI], OFFSET_x86_ESI,
    372                sizeof(UWord));
    373       tst->arch.vex.guest_EDI = uc->uc_mcontext.gregs[VKI_EDI];
    374       VG_TRACK(copy_mem_to_reg, part, tid,
    375                (Addr)&uc->uc_mcontext.gregs[VKI_EDI], OFFSET_x86_EDI,
    376                sizeof(UWord));
    377       tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
    378       VG_TRACK(copy_mem_to_reg, part, tid,
    379                (Addr)&uc->uc_mcontext.gregs[VKI_UESP], OFFSET_x86_ESP,
    380                sizeof(UWord));
    381 
    382       if (esp_is_thrptr) {
    383          /* The thrptr value is passed by libc to the kernel in the otherwise
    384             unused ESP field.  This is used when a new thread is created. */
    385          VG_TRACK(pre_mem_read, part, tid,
    386                   "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
    387                   (Addr)&uc->uc_mcontext.gregs[VKI_ESP], sizeof(UWord));
    388          if (uc->uc_mcontext.gregs[VKI_ESP]) {
    389             tst->os_state.thrptr = uc->uc_mcontext.gregs[VKI_ESP];
    390             ML_(update_gdt_lwpgs)(tid);
    391          }
    392       }
    393 
    394       /* Ignore ERR and TRAPNO. */
    395 
    396       /* Segment registers */
    397       tst->arch.vex.guest_CS = uc->uc_mcontext.gregs[VKI_CS];
    398       VG_TRACK(copy_mem_to_reg, part, tid,
    399                (Addr)&uc->uc_mcontext.gregs[VKI_CS], OFFSET_x86_CS,
    400                sizeof(UShort));
    401       tst->arch.vex.guest_DS = uc->uc_mcontext.gregs[VKI_DS];
    402       VG_TRACK(copy_mem_to_reg, part, tid,
    403                (Addr)&uc->uc_mcontext.gregs[VKI_DS], OFFSET_x86_DS,
    404                sizeof(UShort));
    405       tst->arch.vex.guest_SS = uc->uc_mcontext.gregs[VKI_SS];
    406       VG_TRACK(copy_mem_to_reg, part, tid,
    407                (Addr)&uc->uc_mcontext.gregs[VKI_SS], OFFSET_x86_SS,
    408                sizeof(UShort));
    409       tst->arch.vex.guest_ES = uc->uc_mcontext.gregs[VKI_ES];
    410       VG_TRACK(copy_mem_to_reg, part, tid,
    411                (Addr)&uc->uc_mcontext.gregs[VKI_ES], OFFSET_x86_ES,
    412                sizeof(UShort));
    413       tst->arch.vex.guest_FS = uc->uc_mcontext.gregs[VKI_FS];
    414       VG_TRACK(copy_mem_to_reg, part, tid,
    415                (Addr)&uc->uc_mcontext.gregs[VKI_FS], OFFSET_x86_FS,
    416                sizeof(UShort));
    417       tst->arch.vex.guest_GS = uc->uc_mcontext.gregs[VKI_GS];
    418       VG_TRACK(copy_mem_to_reg, part, tid,
    419                (Addr)&uc->uc_mcontext.gregs[VKI_GS], OFFSET_x86_GS,
    420                sizeof(UShort));
    421 
    422       /* Eflags */
    423       {
    424          UInt eflags;
    425          UInt orig_eflags;
    426          UInt new_eflags;
    427          Bool ok_restore = False;
    428 
    429          VG_TRACK(pre_mem_read, part, tid,
    430                   "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
    431                   (Addr)&uc->uc_mcontext.gregs[VKI_EFL], sizeof(UWord));
    432          eflags = uc->uc_mcontext.gregs[VKI_EFL];
    433          orig_eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
    434          new_eflags = eflags;
    435          /* The kernel disallows the ID flag to be changed via the setcontext
    436             call, thus do the same. */
    437          if (orig_eflags & VKI_EFLAGS_ID_BIT)
    438             new_eflags |= VKI_EFLAGS_ID_BIT;
    439          else
    440             new_eflags &= ~VKI_EFLAGS_ID_BIT;
    441          LibVEX_GuestX86_put_eflags(new_eflags, &tst->arch.vex);
    442          VG_TRACK(post_reg_write, part, tid,
    443                   offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UWord));
    444          VG_TRACK(post_reg_write, part, tid,
    445                   offsetof(VexGuestX86State, guest_CC_DEP2), sizeof(UWord));
    446 
    447          /* Check if this context was created by us in VG_(save_context). In
    448             that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
    449             values which we previously stashed into unused members of the
    450             context. */
    451          if (eflags != ~VKI_UC_GUEST_EFLAGS_NEG(uc)) {
    452             VG_(debugLog)(1, "syswrap-solaris",
    453                              "The eflags value was restored from an "
    454                              "explicitly set value in thread %u.\n", tid);
    455             ok_restore = True;
    456          }
    457          else {
    458             UInt buf[5];
    459             UInt checksum;
    460 
    461             buf[0] = VKI_UC_GUEST_CC_OP(uc);
    462             buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
    463             buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
    464             buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
    465             buf[4] = eflags;
    466             checksum = ML_(fletcher32)((UShort*)&buf,
    467                                        sizeof(buf) / sizeof(UShort));
    468             if (checksum == VKI_UC_GUEST_EFLAGS_CHECKSUM(uc)) {
    469                /* Check ok, the full restoration is possible. */
    470                VG_(debugLog)(1, "syswrap-solaris",
    471                                 "The CC_* guest state values were fully "
    472                                 "restored in thread %u.\n", tid);
    473                ok_restore = True;
    474 
    475                tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
    476                tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
    477                tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
    478                VG_TRACK(copy_mem_to_reg, part, tid,
    479                         (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
    480                         offsetof(VexGuestX86State, guest_CC_DEP1),
    481                         sizeof(UWord));
    482                tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
    483                VG_TRACK(copy_mem_to_reg, part, tid,
    484                         (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
    485                         offsetof(VexGuestX86State, guest_CC_DEP2),
    486                         sizeof(UWord));
    487             }
    488          }
    489 
    490          if (!ok_restore)
    491             VG_(debugLog)(1, "syswrap-solaris",
    492                              "Cannot fully restore the CC_* guest state "
    493                              "values, using approximate eflags in thread "
    494                              "%u.\n", tid);
    495       }
    496    }
    497 
    498    if (uc->uc_flags & VKI_UC_FPU) {
    499       /* FPU */
    500       VexEmNote note;
    501       SizeT i;
    502 
    503       /* x87 */
    504       /* Flags and control words */
    505       VG_TRACK(pre_mem_read, part, tid,
    506                "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
    507                (Addr)&fs->state, 28);
    508       /* ST registers */
    509       for (i = 0; i < 8; i++) {
    510          Addr addr = (Addr)&fs->state + 28 + i * 10;
    511          VG_TRACK(copy_mem_to_reg, part, tid, addr,
    512                   offsetof(VexGuestX86State, guest_FPREG[i]), sizeof(ULong));
    513       }
    514       note = LibVEX_GuestX86_put_x87((UChar*)&fs->state, &tst->arch.vex);
    515       if (note != EmNote_NONE)
    516          VG_(message)(Vg_UserMsg,
    517                       "Error restoring x87 state in thread %u: %s.\n",
    518                       tid, LibVEX_EmNote_string(note));
    519 
    520       /* SSE */
    521       VG_TRACK(pre_mem_read, part, tid,
    522                "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
    523                (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
    524       note = LibVEX_GuestX86_put_mxcsr(fs->mxcsr, &tst->arch.vex);
    525       if (note != EmNote_NONE)
    526          VG_(message)(Vg_UserMsg,
    527                       "Error restoring mxcsr state in thread %u: %s.\n",
    528                       tid, LibVEX_EmNote_string(note));
    529       /* XMM registers */
    530 #define COPY_IN_XMM(src, dest) \
    531       do {                     \
    532          dest[0] = src._l[0];  \
    533          dest[1] = src._l[1];  \
    534          dest[2] = src._l[2];  \
    535          dest[3] = src._l[3];  \
    536       } while (0)
    537       COPY_IN_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
    538       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
    539                offsetof(VexGuestX86State, guest_XMM0), sizeof(U128));
    540       COPY_IN_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
    541       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
    542                offsetof(VexGuestX86State, guest_XMM1), sizeof(U128));
    543       COPY_IN_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
    544       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
    545                offsetof(VexGuestX86State, guest_XMM2), sizeof(U128));
    546       COPY_IN_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
    547       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
    548                offsetof(VexGuestX86State, guest_XMM3), sizeof(U128));
    549       COPY_IN_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
    550       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
    551                offsetof(VexGuestX86State, guest_XMM4), sizeof(U128));
    552       COPY_IN_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
    553       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
    554                offsetof(VexGuestX86State, guest_XMM5), sizeof(U128));
    555       COPY_IN_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
    556       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
    557                offsetof(VexGuestX86State, guest_XMM6), sizeof(U128));
    558       COPY_IN_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
    559       VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
    560                offsetof(VexGuestX86State, guest_XMM7), sizeof(U128));
    561 #undef COPY_IN_XMM
    562    }
    563 }
    564 
    565 /* Allocate GDT for a given thread. */
    566 void ML_(setup_gdt)(VexGuestX86State *vex)
    567 {
    568    Addr gdt = (Addr)VG_(calloc)("syswrap-solaris-x86.gdt",
    569                                 VEX_GUEST_X86_GDT_NENT,
    570                                 sizeof(VexGuestX86SegDescr));
    571    vex->guest_GDT = gdt;
    572 }
    573 
    574 /* Deallocate GDT for a given thread. */
    575 void ML_(cleanup_gdt)(VexGuestX86State *vex)
    576 {
    577    if (!vex->guest_GDT)
    578       return;
    579    VG_(free)((void *) (HWord) vex->guest_GDT);
    580    vex->guest_GDT = 0;
    581 }
    582 
    583 /* For a given thread, update the LWPGS descriptor in the thread's GDT
    584    according to the thread pointer. */
    585 void ML_(update_gdt_lwpgs)(ThreadId tid)
    586 {
    587    ThreadState *tst = VG_(get_ThreadState)(tid);
    588    Addr base = tst->os_state.thrptr;
    589    VexGuestX86SegDescr *gdt
    590       = (VexGuestX86SegDescr *) (HWord) tst->arch.vex.guest_GDT;
    591    VexGuestX86SegDescr desc;
    592 
    593    vg_assert(gdt);
    594 
    595    VG_(memset)(&desc, 0, sizeof(desc));
    596    if (base) {
    597       desc.LdtEnt.Bits.LimitLow = -1;
    598       desc.LdtEnt.Bits.LimitHi = -1;
    599       desc.LdtEnt.Bits.BaseLow = base & 0xffff;
    600       desc.LdtEnt.Bits.BaseMid = (base >> 16) & 0xff;
    601       desc.LdtEnt.Bits.BaseHi = (base >> 24) & 0xff;
    602       desc.LdtEnt.Bits.Pres = 1;
    603       desc.LdtEnt.Bits.Dpl = 3; /* SEL_UPL */
    604       desc.LdtEnt.Bits.Type = 19; /* SDT_MEMRWA */
    605       desc.LdtEnt.Bits.Granularity = 1; /* SDP_PAGES */
    606       desc.LdtEnt.Bits.Default_Big = 1; /* SDP_OP32 */
    607    }
    608 
    609    gdt[VKI_GDT_LWPGS] = desc;
    610 
    611    /* Write %gs. */
    612    tst->arch.vex.guest_GS = VKI_LWPGS_SEL;
    613    VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_GS,
    614             sizeof(UShort));
    615 }
    616 
    617 
    618 /* ---------------------------------------------------------------------
    619    PRE/POST wrappers for x86/Solaris-specific syscalls
    620    ------------------------------------------------------------------ */
    621 
    622 #define PRE(name)       DEFN_PRE_TEMPLATE(x86_solaris, name)
    623 #define POST(name)      DEFN_POST_TEMPLATE(x86_solaris, name)
    624 
    625 /* implementation */
    626 
    627 PRE(sys_fstatat64)
    628 {
    629    /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
    630                     int flag); */
    631    PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", SARG1, ARG2,
    632          (HChar*)ARG2, ARG3, SARG4);
    633    PRE_REG_READ4(long, "fstatat64", int, fildes, const char *, path,
    634                  struct stat64 *, buf, int, flag);
    635    if (ARG2)
    636       PRE_MEM_RASCIIZ("fstatat64(path)", ARG2);
    637    PRE_MEM_WRITE("fstatat64(buf)", ARG3, sizeof(struct vki_stat64));
    638 
    639    /* Be strict. */
    640    if (ARG1 != VKI_AT_FDCWD &&
    641        !ML_(fd_allowed)(ARG1, "fstatat64", tid, False))
    642       SET_STATUS_Failure(VKI_EBADF);
    643 }
    644 
    645 POST(sys_fstatat64)
    646 {
    647    POST_MEM_WRITE(ARG3, sizeof(struct vki_stat64));
    648 }
    649 
    650 PRE(sys_openat64)
    651 {
    652    /* int openat64(int fildes, const char *filename, int flags);
    653       int openat64(int fildes, const char *filename, int flags, mode_t mode);
    654     */
    655    *flags |= SfMayBlock;
    656 
    657    if (ARG3 & VKI_O_CREAT) {
    658       /* 4-arg version */
    659       PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", SARG1, ARG2,
    660             (HChar*)ARG2, SARG3, SARG4);
    661       PRE_REG_READ4(long, "openat64", int, fildes, const char *, filename,
    662                     int, flags, vki_mode_t, mode);
    663    }
    664    else {
    665       /* 3-arg version */
    666       PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", SARG1, ARG2, (HChar*)ARG2,
    667             SARG3);
    668       PRE_REG_READ3(long, "openat64", int, fildes, const char *, filename,
    669                     int, flags);
    670    }
    671 
    672    PRE_MEM_RASCIIZ("openat64(filename)", ARG2);
    673 
    674    /* Be strict. */
    675    if (ARG1 != VKI_AT_FDCWD && !ML_(fd_allowed)(ARG1, "openat64", tid, False))
    676       SET_STATUS_Failure(VKI_EBADF);
    677 }
    678 
    679 POST(sys_openat64)
    680 {
    681    if (!ML_(fd_allowed)(RES, "openat64", tid, True)) {
    682       VG_(close)(RES);
    683       SET_STATUS_Failure(VKI_EMFILE);
    684    }
    685    else if (VG_(clo_track_fds))
    686       ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
    687 }
    688 
    689 PRE(sys_llseek32)
    690 {
    691    /* offset_t llseek(int fildes, offset_t offset, int whence); */
    692    PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
    693    PRE_REG_READ4(long, "llseek", int, fildes, vki_u32, offset_low,
    694                  vki_u32, offset_high, int, whence);
    695 
    696    /* Stay sane. */
    697    if (!ML_(fd_allowed)(ARG1, "llseek", tid, False))
    698       SET_STATUS_Failure(VKI_EBADF);
    699 }
    700 
    701 PRE(sys_mmap64)
    702 {
    703    /* void *mmap64(void *addr, size_t len, int prot, int flags,
    704                    int fildes, uint32_t offlo, uint32_t offhi); */
    705    /* Note this wrapper assumes a little-endian architecture, offlo and offhi
    706       have to be swapped if a big-endian architecture is present. */
    707 #if !defined(VG_LITTLEENDIAN)
    708 #error "Unexpected endianness."
    709 #endif /* !VG_LITTLEENDIAN */
    710 
    711    SysRes r;
    712    ULong u;
    713    Off64T offset;
    714 
    715    /* Stay sane. */
    716    vg_assert(VKI_PAGE_SIZE == 4096);
    717    vg_assert(sizeof(u) == sizeof(offset));
    718 
    719    PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
    720          ARG1, ARG2, ARG3, ARG4, SARG5, ARG6, ARG7);
    721    PRE_REG_READ7(long, "mmap", void *, start, vki_size_t, length,
    722                  int, prot, int, flags, int, fd, uint32_t, offlo,
    723                  uint32_t, offhi);
    724 
    725    /* The offlo and offhi values can actually represent a negative value.
    726       Make sure it's passed correctly to the generic mmap wrapper. */
    727    u = ((ULong)ARG7 << 32) + ARG6;
    728    offset = *(Off64T*)&u;
    729 
    730    r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
    731    SET_STATUS_from_SysRes(r);
    732 }
    733 
    734 PRE(sys_stat64)
    735 {
    736    /* int stat64(const char *path, struct stat64 *buf); */
    737    PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
    738    PRE_REG_READ2(long, "stat64", const char *, path, struct stat64 *, buf);
    739 
    740    PRE_MEM_RASCIIZ("stat64(path)", ARG1);
    741    PRE_MEM_WRITE("stat64(buf)", ARG2, sizeof(struct vki_stat64));
    742 }
    743 
    744 POST(sys_stat64)
    745 {
    746    POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
    747 }
    748 
    749 PRE(sys_lstat64)
    750 {
    751    /* int lstat64(const char *path, struct stat64 *buf); */
    752    PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
    753    PRE_REG_READ2(long, "lstat64", const char *, path, struct stat64 *, buf);
    754 
    755    PRE_MEM_RASCIIZ("lstat64(path)", ARG1);
    756    PRE_MEM_WRITE("lstat64(buf)", ARG2, sizeof(struct vki_stat64));
    757 }
    758 
    759 POST(sys_lstat64)
    760 {
    761    POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
    762 }
    763 
    764 PRE(sys_fstat64)
    765 {
    766    /* int fstat64(int fildes, struct stat64 *buf); */
    767    PRINT("sys_fstat64 ( %ld, %#lx )", SARG1, ARG2);
    768    PRE_REG_READ2(long, "fstat64", int, fildes, struct stat64 *, buf);
    769    PRE_MEM_WRITE("fstat64(buf)", ARG2, sizeof(struct vki_stat64));
    770 
    771    /* Be strict. */
    772    if (!ML_(fd_allowed)(ARG1, "fstat64", tid, False))
    773       SET_STATUS_Failure(VKI_EBADF);
    774 }
    775 
    776 POST(sys_fstat64)
    777 {
    778    POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
    779 }
    780 
    781 static void do_statvfs64_post(struct vki_statvfs64 *stats, ThreadId tid)
    782 {
    783    POST_FIELD_WRITE(stats->f_bsize);
    784    POST_FIELD_WRITE(stats->f_frsize);
    785    POST_FIELD_WRITE(stats->f_blocks);
    786    POST_FIELD_WRITE(stats->f_bfree);
    787    POST_FIELD_WRITE(stats->f_bavail);
    788    POST_FIELD_WRITE(stats->f_files);
    789    POST_FIELD_WRITE(stats->f_ffree);
    790    POST_FIELD_WRITE(stats->f_favail);
    791    POST_FIELD_WRITE(stats->f_fsid);
    792    POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
    793    POST_FIELD_WRITE(stats->f_flag);
    794    POST_FIELD_WRITE(stats->f_namemax);
    795    POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
    796 }
    797 
    798 PRE(sys_statvfs64)
    799 {
    800    /* int statvfs64(const char *path, struct statvfs64 *buf); */
    801    *flags |= SfMayBlock;
    802    PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
    803    PRE_REG_READ2(long, "statvfs64", const char *, path,
    804                  struct vki_statvfs64 *, buf);
    805    PRE_MEM_RASCIIZ("statvfs64(path)", ARG1);
    806    PRE_MEM_WRITE("statvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
    807 }
    808 
    809 POST(sys_statvfs64)
    810 {
    811    do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
    812 }
    813 
    814 PRE(sys_fstatvfs64)
    815 {
    816    /* int fstatvfs64(int fd, struct statvfs64 *buf); */
    817    *flags |= SfMayBlock;
    818    PRINT("sys_fstatvfs64 ( %ld, %#lx )", SARG1, ARG2);
    819    PRE_REG_READ2(long, "fstatvfs64", int, fd, struct vki_statvfs64 *, buf);
    820    PRE_MEM_WRITE("fstatvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
    821 
    822    /* Be strict. */
    823    if (!ML_(fd_allowed)(ARG1, "fstatvfs64", tid, False))
    824       SET_STATUS_Failure(VKI_EBADF);
    825 }
    826 
    827 POST(sys_fstatvfs64)
    828 {
    829    do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
    830 }
    831 
    832 PRE(sys_setrlimit64)
    833 {
    834    /* int setrlimit64(int resource, struct rlimit64 *rlim); */
    835    struct vki_rlimit64 *limit = (struct vki_rlimit64 *)ARG2;
    836    PRINT("sys_setrlimit64 ( %ld, %#lx )", SARG1, ARG2);
    837    PRE_REG_READ2(long, "setrlimit64", int, resource, struct rlimit64 *, rlim);
    838    PRE_MEM_READ("setrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
    839 
    840    if (limit && limit->rlim_cur > limit->rlim_max)
    841       SET_STATUS_Failure(VKI_EINVAL);
    842    else if (ARG1 == VKI_RLIMIT_NOFILE) {
    843       if (limit->rlim_cur > VG_(fd_hard_limit) ||
    844           limit->rlim_max != VG_(fd_hard_limit)) {
    845          SET_STATUS_Failure(VKI_EPERM);
    846       }
    847       else {
    848          VG_(fd_soft_limit) = limit->rlim_cur;
    849          SET_STATUS_Success(0);
    850       }
    851    }
    852    else if (ARG1 == VKI_RLIMIT_DATA) {
    853       if (limit->rlim_cur > VG_(client_rlimit_data).rlim_max ||
    854           limit->rlim_max > VG_(client_rlimit_data).rlim_max) {
    855          SET_STATUS_Failure(VKI_EPERM);
    856       }
    857       else {
    858          VG_(client_rlimit_data).rlim_max = limit->rlim_max;
    859          VG_(client_rlimit_data).rlim_cur = limit->rlim_cur;
    860          SET_STATUS_Success(0);
    861       }
    862    }
    863    else if (ARG1 == VKI_RLIMIT_STACK && tid == 1) {
    864       if (limit->rlim_cur > VG_(client_rlimit_stack).rlim_max ||
    865           limit->rlim_max > VG_(client_rlimit_stack).rlim_max) {
    866          SET_STATUS_Failure(VKI_EPERM);
    867       }
    868       else {
    869          /* Change the value of client_stack_szB to the rlim_cur value but
    870             only if it is smaller than the size of the allocated stack for the
    871             client. */
    872          if (limit->rlim_cur <= VG_(clstk_max_size))
    873             VG_(threads)[tid].client_stack_szB = limit->rlim_cur;
    874 
    875          VG_(client_rlimit_stack).rlim_max = limit->rlim_max;
    876          VG_(client_rlimit_stack).rlim_cur = limit->rlim_cur;
    877          SET_STATUS_Success(0);
    878       }
    879    }
    880 }
    881 
    882 PRE(sys_getrlimit64)
    883 {
    884    /* int getrlimit64(int resource, struct rlimit64 *rlim); */
    885    PRINT("sys_getrlimit64 ( %ld, %#lx )", SARG1, ARG2);
    886    PRE_REG_READ2(long, "getrlimit64",
    887                  int, resource, struct rlimit64 *, rlim);
    888    PRE_MEM_WRITE("getrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
    889 }
    890 
    891 POST(sys_getrlimit64)
    892 {
    893    /* Based on common_post_getrlimit() from syswrap-generic.c. */
    894    struct vki_rlimit64 *rlim = (struct vki_rlimit64*)ARG2;
    895 
    896    POST_MEM_WRITE(ARG2, sizeof(struct vki_rlimit64));
    897 
    898    switch (ARG1 /*resource*/) {
    899    case VKI_RLIMIT_NOFILE:
    900       rlim->rlim_cur = VG_(fd_soft_limit);
    901       rlim->rlim_max = VG_(fd_hard_limit);
    902       break;
    903    case VKI_RLIMIT_DATA:
    904       rlim->rlim_cur = VG_(client_rlimit_data).rlim_cur;
    905       rlim->rlim_max = VG_(client_rlimit_data).rlim_max;
    906       break;
    907    case VKI_RLIMIT_STACK:
    908       rlim->rlim_cur = VG_(client_rlimit_stack).rlim_cur;
    909       rlim->rlim_max = VG_(client_rlimit_stack).rlim_max;
    910       break;
    911    }
    912 }
    913 
    914 PRE(sys_pread64)
    915 {
    916    /* ssize32_t pread64(int fd, void *buf, size32_t count,
    917                         uint32_t offset_1, uint32_t offset_2);
    918     */
    919    *flags |= SfMayBlock;
    920    PRINT("sys_pread64 ( %ld, %#lx, %lu, %#lx, %#lx )",
    921          SARG1, ARG2, ARG3, ARG4, ARG5);
    922    PRE_REG_READ5(long, "pread64", int, fd, void *, buf, vki_size32_t, count,
    923                  vki_uint32_t, offset_1, vki_uint32_t, offset_2);
    924    PRE_MEM_WRITE("pread64(buf)", ARG2, ARG3);
    925 
    926    /* Be strict. */
    927    if (!ML_(fd_allowed)(ARG1, "pread64", tid, False))
    928       SET_STATUS_Failure(VKI_EBADF);
    929 }
    930 
    931 POST(sys_pread64)
    932 {
    933    POST_MEM_WRITE(ARG2, RES);
    934 }
    935 
    936 PRE(sys_pwrite64)
    937 {
    938    /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
    939                          uint32_t offset_1, uint32_t offset_2);
    940     */
    941    *flags |= SfMayBlock;
    942    PRINT("sys_pwrite64 ( %ld, %#lx, %lu, %#lx, %#lx )",
    943          SARG1, ARG2, ARG3, ARG4, ARG5);
    944    PRE_REG_READ5(long, "pwrite64", int, fd, void *, buf, vki_size32_t, count,
    945                  vki_uint32_t, offset_1, vki_uint32_t, offset_2);
    946    PRE_MEM_READ("pwrite64(buf)", ARG2, ARG3);
    947 
    948    /* Be strict. */
    949    if (!ML_(fd_allowed)(ARG1, "pwrite64", tid, False))
    950       SET_STATUS_Failure(VKI_EBADF);
    951 }
    952 
    953 PRE(sys_open64)
    954 {
    955    /* int open64(const char *filename, int flags);
    956       int open64(const char *filename, int flags, mode_t mode); */
    957    *flags |= SfMayBlock;
    958 
    959    if (ARG2 & VKI_O_CREAT) {
    960       /* 3-arg version */
    961       PRINT("sys_open64 ( %#lx(%s), %#lx, %ld )", ARG1, (HChar*)ARG1, ARG2,
    962             SARG3);
    963       PRE_REG_READ3(long, "open64", const char *, filename, int, flags,
    964                     vki_mode_t, mode);
    965    }
    966    else {
    967       /* 2-arg version */
    968       PRINT("sys_open64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
    969       PRE_REG_READ2(long, "open64", const char *, filename, int, flags);
    970    }
    971    PRE_MEM_RASCIIZ("open(filename)", ARG1);
    972 }
    973 
    974 POST(sys_open64)
    975 {
    976    if (!ML_(fd_allowed)(RES, "open64", tid, True)) {
    977       VG_(close)(RES);
    978       SET_STATUS_Failure(VKI_EMFILE);
    979    }
    980    else if (VG_(clo_track_fds))
    981       ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
    982 }
    983 
    984 #undef PRE
    985 #undef POST
    986 
    987 #endif // defined(VGP_x86_solaris)
    988 
    989 /*--------------------------------------------------------------------*/
    990 /*--- end                                                          ---*/
    991 /*--------------------------------------------------------------------*/
    992