Home | History | Annotate | Download | only in m_syswrap
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- Platform-specific syscalls stuff.       syswrap-ppc32-aix5.c ---*/
      4 /*--------------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2006-2010 OpenWorks LLP
     11       info (at) open-works.co.uk
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     26    02111-1307, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 
     30    Neither the names of the U.S. Department of Energy nor the
     31    University of California nor the names of its contributors may be
     32    used to endorse or promote products derived from this software
     33    without prior written permission.
     34 */
     35 
     36 #if defined(VGP_ppc32_aix5)
     37 
     38 #include "pub_core_basics.h"
     39 #include "pub_core_vki.h"
     40 #include "pub_core_vkiscnums.h"
     41 #include "pub_core_threadstate.h"
     42 #include "pub_core_debuglog.h"
     43 #include "pub_core_libcassert.h"
     44 #include "pub_core_libcprint.h"
     45 #include "pub_core_libcproc.h"
     46 #include "pub_core_options.h"
     47 #include "pub_core_scheduler.h"
     48 #include "pub_core_sigframe.h"      // For VG_(sigframe_destroy)()
     49 #include "pub_core_signals.h"
     50 #include "pub_core_syscall.h"
     51 #include "pub_core_syswrap.h"
     52 #include "pub_core_tooliface.h"
     53 
     54 #include "priv_types_n_macros.h"
     55 #include "priv_syswrap-aix5.h"      /* for decls of aix5-common wrappers */
     56 #include "priv_syswrap-main.h"
     57 
     58 
     59 /* --------- HACKS --------- */
     60 /* XXXXXXXXXXXX these HACKS are copies of stuff in syswrap-linux.c;
     61    check for duplication. */
     62 /* HACK: is in syswrap-generic.c, but that doesn't get built on AIX. */
     63 /* Dump out a summary, and a more detailed list, of open file descriptors. */
     64 void VG_(show_open_fds) ( void )
     65 {
     66   I_die_here;
     67 }
     68 static Bool i_am_the_only_thread ( void )
     69 {
     70    Int c = VG_(count_living_threads)();
     71    vg_assert(c >= 1); /* stay sane */
     72    return c == 1;
     73 }
     74 void VG_(reap_threads)(ThreadId self)
     75 {
     76    while (!i_am_the_only_thread()) {
     77       /* Let other thread(s) run */
     78       VG_(vg_yield)();
     79       VG_(poll_signals)(self);
     80    }
     81    vg_assert(i_am_the_only_thread());
     82 }
     83 void VG_(init_preopened_fds) ( void )
     84 {
     85    I_die_here;
     86 }
     87 
     88 
     89 // Run a thread from beginning to end and return the thread's
     90 // scheduler-return-code.
     91 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
     92 {
     93    VgSchedReturnCode ret;
     94    ThreadId     tid = (ThreadId)tidW;
     95    ThreadState* tst = VG_(get_ThreadState)(tid);
     96 
     97    VG_(debugLog)(1, "syswrap-aix32",
     98                     "thread_wrapper(tid=%lld): entry\n",
     99                     (ULong)tidW);
    100 
    101    vg_assert(tst->status == VgTs_Init);
    102 
    103    /* make sure we get the CPU lock before doing anything significant */
    104    VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
    105 
    106    if (0)
    107       VG_(printf)("thread tid %d started: stack = %p\n",
    108                   tid, &tid);
    109 
    110    VG_TRACK( pre_thread_first_insn, tid );
    111 
    112    tst->os_state.lwpid = VG_(gettid)();
    113    tst->os_state.threadgroup = VG_(getpid)();
    114 
    115    /* Thread created with all signals blocked; scheduler will set the
    116       appropriate mask */
    117    ret = VG_(scheduler)(tid);
    118 
    119    vg_assert(VG_(is_exiting)(tid));
    120 
    121    vg_assert(tst->status == VgTs_Runnable);
    122    vg_assert(VG_(is_running_thread)(tid));
    123 
    124    VG_(debugLog)(1, "syswrap-aix32",
    125                     "thread_wrapper(tid=%lld): exit\n",
    126                     (ULong)tidW);
    127 
    128    /* Return to caller, still holding the lock. */
    129    return ret;
    130 }
    131 
    132 
    133 /* Run a thread all the way to the end, then do appropriate exit actions
    134    (this is the last-one-out-turn-off-the-lights bit).  */
    135 static void run_a_thread_NORETURN ( Word tidW )
    136 {
    137    ThreadId          tid = (ThreadId)tidW;
    138    VgSchedReturnCode src;
    139    Int               c;
    140 
    141    VG_(debugLog)(1, "syswrap-aix32",
    142                     "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
    143                     (ULong)tidW);
    144 
    145    /* Run the thread all the way through. */
    146    src = thread_wrapper(tid);
    147 
    148    VG_(debugLog)(1, "syswrap-aix32",
    149                     "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
    150                     (ULong)tidW);
    151 
    152    c = VG_(count_living_threads)();
    153    vg_assert(c >= 1); /* stay sane */
    154 
    155    vg_assert(src == VgSrc_ExitThread
    156              || src == VgSrc_ExitProcess
    157              || src == VgSrc_FatalSig);
    158 
    159    if (c == 1 || src == VgSrc_ExitProcess) {
    160 
    161       VG_(debugLog)(1, "syswrap-aix32",
    162                        "run_a_thread_NORETURN(tid=%lld): "
    163                           "exit process (%d threads remaining)\n",
    164                           (ULong)tidW, c);
    165 
    166       /* We are the last one standing.  Keep hold of the lock and
    167          carry on to show final tool results, then exit the entire system.
    168          Use the continuation pointer set at startup in m_main. */
    169       ( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src);
    170 
    171    } else {
    172 
    173       ThreadState *tst;
    174 
    175       VG_(debugLog)(1, "syswrap-aix32",
    176                        "run_a_thread_NORETURN(tid=%lld): "
    177                           "not last one standing\n",
    178                           (ULong)tidW);
    179 
    180       /* OK, thread is dead, but others still exist.  Just exit. */
    181       vg_assert(c >= 2);
    182       tst = VG_(get_ThreadState)(tid);
    183 
    184       /* This releases the run lock */
    185       VG_(exit_thread)(tid);
    186       vg_assert(tst->status == VgTs_Zombie);
    187 
    188       /* We have to use this sequence to terminate the thread to
    189          prevent a subtle race.  If VG_(exit_thread)() had left the
    190          ThreadState as Empty, then it could have been reallocated,
    191          reusing the stack while we're doing these last cleanups.
    192          Instead, VG_(exit_thread) leaves it as Zombie to prevent
    193          reallocation.  We need to make sure we don't touch the stack
    194          between marking it Empty and exiting.  Hence the
    195          assembler. */
    196       { UInt block[4];
    197         vg_assert(sizeof(tst->status == 4));
    198         vg_assert(__NR_AIX5_thread_terminate
    199                   != __NR_AIX5_UNKNOWN);
    200         block[0] = (UInt)VgTs_Empty;
    201         block[1] = (UInt) & (tst->status);
    202         block[2] = (UInt) tst->os_state.exitcode;
    203         block[3] = __NR_AIX5_thread_terminate;
    204         asm volatile (
    205           "mr 29,%0\n\t"           /* r29 = &block[0] */
    206           "lwz 20, 0(29)\n\t"      /* r20 = VgTs_Empty */
    207           "lwz 21, 4(29)\n\t"      /* r21 = & (tst->status) */
    208           "lwz 22, 8(29)\n\t"      /* r22 = tst->os_state.exitcode */
    209           "lwz 23, 12(29)\n\t"     /* r23 = __NR_exit */
    210           /* after this point we can't safely use the stack. */
    211           "stw 20, 0(21)\n\t"      /* tst->status = VgTs_Empty */
    212           "mr 2,23\n\t"            /* r2 = __NR_exit */
    213           "mr 3,22\n\t"            /* set r3 = tst->os_state.exitcode */
    214           /* set up for syscall */
    215           "crorc 6,6,6\n\t"
    216           ".long 0x48000005\n\t"   /* "bl here+4" */
    217           "mflr 29\n\t"
    218           "addi 29,29,16\n\t"
    219           "mtlr 29\n\t"
    220           "sc\n\t"                 /* exit(tst->os_state.exitcode) */
    221           :
    222           : "b" (&block[0])
    223           : "lr", "memory", "r2", "r3", "r20", "r21", "r22", "r23", "r29"
    224         );
    225       }
    226 
    227       VG_(core_panic)("Thread exit failed?\n");
    228    }
    229 
    230    /*NOTREACHED*/
    231    vg_assert(0);
    232 }
    233 
    234 
    235 static Word start_thread_NORETURN ( void* arg )
    236 {
    237    ThreadState* tst = (ThreadState*)arg;
    238    ThreadId     tid = tst->tid;
    239 
    240    run_a_thread_NORETURN ( (Word)tid );
    241    /*NOTREACHED*/
    242    vg_assert(0);
    243 }
    244 
    245 
    246 /* Call f(arg1), but first switch stacks, using 'stack' as the new
    247    stack.  f itself needs to never return. */
    248 __attribute__((noreturn))
    249 static
    250 void call_on_new_stack_0_1_NORETURN ( Addr stack,
    251                                       void (*f_NORETURN)(Word),
    252                                       Word arg1 )
    253 {
    254    UWord* fdescr = (UWord*)f_NORETURN;
    255    volatile UWord block[5];
    256    block[0] = fdescr[0];  /* nia */
    257    block[1] = stack;      /* r1 */
    258    block[2] = fdescr[1];  /* r2 */
    259    block[3] = arg1;       /* r3 */
    260    block[4] = fdescr[2];  /* r11 */
    261    __asm__ __volatile__(
    262       "mr  4,%0\n\t" /* r4 = block */
    263       "lwz 1,4(4)\n\t"
    264       "lwz 2,8(4)\n\t"
    265       "lwz 3,12(4)\n\t"
    266       "lwz 11,16(4)\n\t"
    267       "lwz 4,0(4)\n\t"
    268       "mtctr 4\n\t"
    269       "bctr\n"
    270       : /*out*/ : /*in*/ "b"(&block[0])
    271    );
    272    /*NOTREACHED*/
    273    __asm__ __volatile__("trap");
    274    while (1) {} /* convince gcc that this really doesn't return */
    275 }
    276 
    277 
    278 /* Allocate a stack for the main thread, and run it all the way to the
    279    end.  Although we already have a working VgStack
    280    (VG_(interim_stack)) it's better to allocate a new one, so that
    281    overflow detection works uniformly for all threads.
    282 */
    283 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
    284 {
    285    Addr sp;
    286    VG_(debugLog)(1, "syswrap-aix32",
    287                     "entering VG_(main_thread_wrapper_NORETURN)\n");
    288 
    289    sp = ML_(allocstack)(tid);
    290 
    291    /* make a stack frame */
    292    sp -= 16;
    293    sp &= ~0xF;
    294    *(UWord *)sp = 0;
    295 
    296    /* If we can't even allocate the first thread's stack, we're hosed.
    297       Give up. */
    298    vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
    299 
    300    /* shouldn't be any other threads around yet */
    301    vg_assert( VG_(count_living_threads)() == 1 );
    302 
    303    call_on_new_stack_0_1_NORETURN(
    304       (Addr)sp,               /* stack */
    305       run_a_thread_NORETURN,  /* fn to call */
    306       (Word)tid               /* arg to give it */
    307    );
    308 
    309    /*NOTREACHED*/
    310    vg_assert(0);
    311 }
    312 
    313 /* --------- end HACKS --------- */
    314 
    315 
    316 /* ---------------------------------------------------------------------
    317    More thread stuff
    318    ------------------------------------------------------------------ */
    319 
    320 void VG_(cleanup_thread) ( ThreadArchState* arch )
    321 {
    322 }
    323 
    324 
    325 /* ---------------------------------------------------------------------
    326    PRE/POST wrappers for ppc32/AIX5-specific syscalls
    327    ------------------------------------------------------------------ */
    328 
    329 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
    330 #include <sys/thread.h>
    331 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
    332 
    333 
    334 /* Add prototypes for the wrappers declared here, so that gcc doesn't
    335    harass us for not having prototypes.  Really this is a kludge --
    336    the right thing to do is to make these wrappers 'static' since they
    337    aren't visible outside this file, but that requires even more macro
    338    magic. */
    339 
    340 #define PRE(name)       DEFN_PRE_TEMPLATE(ppc32_aix5, name)
    341 #define POST(name)      DEFN_POST_TEMPLATE(ppc32_aix5, name)
    342 
    343 DECL_TEMPLATE(ppc32_aix5, sys___loadx);
    344 DECL_TEMPLATE(ppc32_aix5, sys___unload);
    345 DECL_TEMPLATE(ppc32_aix5, sys__clock_gettime);
    346 DECL_TEMPLATE(ppc32_aix5, sys_thread_setmymask_fast);
    347 DECL_TEMPLATE(ppc32_aix5, sys_thread_setstate);
    348 DECL_TEMPLATE(ppc32_aix5, sys_FAKE_SIGRETURN);
    349 
    350 
    351 PRE(sys___loadx)
    352 {
    353    *flags |= SfMayBlock;
    354    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_LOAD) {
    355       PRINT("__loadx(0x%lx(DL_LOAD),0x%lx,%ld,0x%lx(%s),0x%lx(%s))",
    356             ARG1,ARG2,ARG3,
    357             ARG4,(HChar*)ARG4,
    358             ARG5, (ARG5 ? (HChar*)ARG5 : "nil") );
    359       /* It would appear that (ARG2, ARG3) describe a buffer
    360          which is written into by the kernel on success. */
    361       PRE_MEM_WRITE("__loadx(DL_LOAD)(ARG2,ARG3)", ARG2, ARG3);
    362    }
    363    else
    364    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_POSTLOADQ) {
    365       PRINT("__loadx(0x%lx(DL_POSTLOADQ),0x%lx,%ld,0x%lx)",
    366             ARG1,ARG2,ARG3,ARG4);
    367     /* It would appear that (ARG2, ARG3) describe a buffer
    368         which is written into by the kernel on success. */
    369      PRE_MEM_WRITE("__loadx(DL_POSTLOADQ)(ARG2,ARG3)", ARG2, ARG3);
    370    }
    371    else
    372    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_GLOBALSYM) {
    373       PRINT("__loadx(0x%lx(DL_GLOBALSYM),0x%lx(%s),0x%lx,0x%lx,0x%lx)",
    374             ARG1, ARG2,(Char*)ARG2,
    375             ARG3, ARG4, ARG5);
    376    }
    377    else
    378    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_EXITQ) {
    379       PRINT("__loadx(0x%lx(DL_EXITQ),0x%lx,%ld)", ARG1, ARG2, ARG3);
    380       PRE_MEM_WRITE("__loadx(DL_EXITQ)(ARG2,ARG3)", ARG2, ARG3);
    381    }
    382    else
    383    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_EXECQ) {
    384       PRINT("__loadx(0x%lx(DL_EXECQ),0x%lx,%ld)", ARG1, ARG2, ARG3);
    385       PRE_MEM_WRITE("__loadx(DL_EXECQ)(ARG2,ARG3)", ARG2, ARG3);
    386    }
    387    else
    388    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_GETSYM) {
    389       PRINT("__loadx(0x%lx(DL_GETSYM),0x%lx(%s),%ld,0x%lx)",
    390             ARG1, ARG2,(Char*)ARG2, ARG3, ARG4);
    391    }
    392    else
    393    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_PREUNLOADQ) {
    394       PRINT("__loadx(0x%lx(DL_PREUNLOADQ),0x%lx,%ld,0x%lx)",
    395             ARG1,ARG2,ARG3,ARG4);
    396       PRE_MEM_WRITE("__loadx(DL_PREUNLOADQ)(ARG2,ARG3)", ARG2, ARG3);
    397    }
    398    else
    399    if ((ARG1 & VKI_DL_FUNCTION_MASK) == 0x0D000000) {
    400       PRINT("__loadx(0x%lx(UNDOCUMENTED),0x%lx,0x%lx,0x%lx)",
    401             ARG1,ARG2,ARG3,ARG4);
    402       /* This doesn't appear to have any args, from the examples I've
    403          seen. */
    404    }
    405    else {
    406       PRINT("__loadx (BOGUS HANDLER) (0x%lx, ..)", ARG1);
    407    }
    408 }
    409 POST(sys___loadx)
    410 {
    411    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_PREUNLOADQ
    412        && SUCCESS) {
    413       POST_MEM_WRITE(ARG2, ARG3);
    414    }
    415    else
    416 
    417    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_POSTLOADQ
    418        && SUCCESS) {
    419       POST_MEM_WRITE(ARG2, ARG3);
    420    }
    421    else
    422 
    423    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_EXITQ
    424        && SUCCESS) {
    425       POST_MEM_WRITE(ARG2, ARG3);
    426    }
    427    else
    428 
    429    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_EXECQ
    430        && SUCCESS) {
    431       POST_MEM_WRITE(ARG2, ARG3);
    432    }
    433    else
    434 
    435    if ((ARG1 & VKI_DL_FUNCTION_MASK) == VKI_DL_LOAD
    436        && SUCCESS) {
    437 
    438       /* See comment in pre-handler */
    439       POST_MEM_WRITE(ARG2, ARG3);
    440 
    441       /* A module load succeeded.  Tell m_debuginfo, m_transtab, and
    442          the tool. */
    443       ML_(aix5_rescan_procmap_after_load_or_unload)();
    444    }
    445 }
    446 
    447 PRE(sys___unload)
    448 {
    449    PRINT("__unload (UNDOCUMENTED) ( %#lx )", ARG1);
    450 }
    451 POST(sys___unload)
    452 {
    453    /* A module unload succeeded.  Tell m_debuginfo, m_transtab, and the
    454       tool. */
    455    ML_(aix5_rescan_procmap_after_load_or_unload)();
    456 }
    457 
    458 PRE(sys__clock_gettime)
    459 {
    460    /* Seems like ARG3 points at a destination buffer? */
    461    /* _clock_gettime (UNDOCUMENTED) ( 0, 0xA, 0x2FF21808 ) */
    462    PRINT("_clock_gettime (UNDOCUMENTED) ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3 );
    463    PRE_REG_READ3(int, "_clock_gettime", int, arg1, int, arg2, void*, arg3);
    464    PRE_MEM_WRITE( "_clock_gettime(dst)", ARG3, sizeof(struct timespec) );
    465 }
    466 POST(sys__clock_gettime)
    467 {
    468    vg_assert(SUCCESS);
    469    POST_MEM_WRITE( ARG3, sizeof(struct timespec) );
    470 }
    471 
    472 PRE(sys_thread_setmymask_fast)
    473 {
    474    /* args: a 64-bit signal mask in ARG1/2.*/
    475    /* On the assumption that this simply sets the thread's signal
    476       mask, we act like sigprocmask(SIG_SETMASK, set, NULL) and don't
    477       hand this to the kernel.  Layout verified 30 July 06. */
    478    vki_sigset_t set;
    479    PRINT("thread_setmymask_fast (BOGUS HANDLER)( %08lx %08lx )", ARG1,ARG2 );
    480    vg_assert(sizeof(vki_sigset_t) == 8);
    481    set.sig[0] = ARG1; /* sigs 1-32 */
    482    set.sig[1] = ARG2; /* sigs 32-64 */
    483    SET_STATUS_from_SysRes(
    484       VG_(do_sys_sigprocmask) ( tid, VKI_SIG_SETMASK, &set, NULL )
    485    );
    486 }
    487 
    488 PRE(sys_thread_setstate)
    489 {
    490    UWord          dst_lwpid = (UWord)ARG1;
    491    struct tstate* ats_new   = (struct tstate*)ARG2;
    492    struct tstate* ats_old   = (struct tstate*)ARG3;
    493    ThreadId       dst_tid   = VG_INVALID_THREADID;
    494    ThreadState*   dst_ts    = NULL;
    495    Int i;
    496 
    497    /* Arrgh.  We MUST retain the lock during this syscall.  Reason is
    498       that this is sometimes used for asynchronous thread cancellation
    499       (nuking other threads).  If we don't have the lock during the
    500       syscall, then it's possible that the thread we're nuking might
    501       get the lock before it gets killed off, and so we can never
    502       re-acquire the lock after this syscall, and the system
    503       deadlocks. */
    504 
    505    /* 10 July 06: above comment is a misdiagnosis.  It appears that
    506       for thread cancellation (that is, with ->flags == TSTATE_INTR)
    507       the target thread is has its PC changed by the the kernel to
    508       something else, possibly to pthread_exit(), so that it can run
    509       its cancellation handlers and exit.  Currently is unknown how
    510       the kernel knows what to set the target thread's PC to.  I did
    511       establish that all the other data passed in the struct is not
    512       relevant: when ->flags == TSTATE_INTR, all the other words can
    513       be set to 0x0 or 0xFFFFFFFF and the syscall still works.  So the
    514       address is not passed like that.  Also I looked at args to
    515       thread_setmystate_fast, which is used when a thread sets its
    516       cancellation state, but none of those are code addresses.
    517 
    518       Also, it's ok for the kernel to simply change the target
    519       thread's PC to something else for async thread cancellation, but
    520       for deferred cancellation something else is needed, and I can't
    521       see how that would work either.
    522 
    523       Anyway, net result is, target thread ends up not running on the
    524       simulator (not dead), which is why it's necessary to hold onto
    525       the lock at this point. */
    526 
    527    /* 30 July 06: added kludge to intercept attempts to cancel another
    528       thread and instead just force that thread to run
    529       pthread_exit(PTHREAD_CANCELED).  This allows V to keep
    530       control. */
    531 
    532    PRINT("thread_setstate (BOGUS HANDLER) "
    533          "( %ld, %p,%p )", dst_lwpid, ats_new, ats_old);
    534    if (1 && VG_(clo_trace_syscalls) && ats_new)
    535       ML_(aix5debugstuff_show_tstate)((Addr)ats_new,
    536                                       "thread_setstate (NEW)");
    537 
    538    /* Intercept and handle ourselves any attempts to cancel
    539       another thread (including this one). */
    540 
    541    if (ats_new && (!ats_old) && ats_new->flags == TSTATE_INTR) {
    542       dst_ts = NULL;
    543       if (VG_(clo_trace_syscalls))
    544          VG_(printf)("(INTR for lwpid %ld)", dst_lwpid);
    545       dst_tid = VG_INVALID_THREADID;
    546       for (i = 0; i < VG_N_THREADS; i++) {
    547          dst_ts = VG_(get_ThreadState)(i);
    548          if ((dst_ts->status == VgTs_Runnable
    549               || dst_ts->status == VgTs_Yielding
    550               || dst_ts->status == VgTs_WaitSys)
    551              && dst_ts->os_state.lwpid == dst_lwpid) {
    552             dst_tid = i;
    553             break;
    554          }
    555       }
    556       if (VG_(clo_trace_syscalls)) {
    557          if (dst_tid == VG_INVALID_THREADID)
    558             VG_(printf)("(== unknown tid)");
    559          else
    560             VG_(printf)("(== tid %d)", (Int)dst_tid);
    561       }
    562       if (dst_tid != VG_INVALID_THREADID) {
    563          /* A cancel has been requested for ctid.  If the target
    564             thread has cancellation enabled, honour it right now.  If
    565             not, mark the thread as having a cancellation request, so
    566             that if it later enables cancellation then the
    567             cancellation will take effect. */
    568          vg_assert(dst_ts);
    569          if (dst_ts->os_state.cancel_progress == Canc_NoRequest) {
    570             if (dst_ts->os_state.cancel_disabled) {
    571                if (VG_(clo_trace_syscalls))
    572                   VG_(printf)("(target has cancel disabled"
    573                               "; request lodged)");
    574                dst_ts->os_state.cancel_progress = Canc_Requested;
    575             } else {
    576                if (VG_(clo_trace_syscalls))
    577                   VG_(printf)("(forcing target into pthread_exit)");
    578                dst_ts->os_state.cancel_progress = Canc_Actioned;
    579                Bool ok = ML_(aix5_force_thread_into_pthread_exit)(dst_tid);
    580                if (!ok) {
    581                   /* now at serious risk of deadlock/livelock.  Give up
    582                      rather than continue. */
    583                   ML_(aix5_set_threadstate_for_emergency_exit)
    584                      (tid, "pthread_cancel(case2-32): "
    585                            "cannot find pthread_exit; aborting");
    586                   SET_STATUS_Success(0);
    587                   return;
    588                }
    589             }
    590          }
    591          SET_STATUS_Success(0);
    592          return;
    593       }
    594    }
    595 
    596    /* Well, it's not a cancellation request.  Maybe it is the
    597       initialisation of a previously created thread? */
    598 
    599    if (ats_new && !ats_old) {
    600       dst_tid = VG_INVALID_THREADID;
    601       for (i = 0; i < VG_N_THREADS; i++) {
    602          dst_ts = VG_(get_ThreadState)(i);
    603          if (dst_ts->status == VgTs_Init
    604              && dst_ts->os_state.lwpid == dst_lwpid) {
    605             dst_tid = i;
    606             break;
    607          }
    608       }
    609       if (dst_tid != VG_INVALID_THREADID) {
    610          /* Found the associated child */
    611          if (VG_(clo_trace_syscalls))
    612             VG_(printf)("(initialised child tid %d)", (Int)dst_tid);
    613          dst_ts = VG_(get_ThreadState)(dst_tid);
    614          UWord* stack = (UWord*)ML_(allocstack)(dst_tid);
    615          /* XXX TODO: check allocstack failure */
    616 
    617          /* copy the specified child register state into the guest
    618             slot (we need that context to run on the simulated CPU,
    619             not the real one) and put pointers to our own
    620             run-the-simulator function into what we'll hand off to the
    621             kernel instead. */
    622 
    623          /* The guest thread is to start running whatever context
    624             this syscall showed up with. */
    625          dst_ts->arch.vex.guest_GPR0  = ats_new->mst.gpr[0];
    626          dst_ts->arch.vex.guest_GPR1  = ats_new->mst.gpr[1]; /* sp */
    627          dst_ts->arch.vex.guest_GPR2  = ats_new->mst.gpr[2]; /* toc */
    628          dst_ts->arch.vex.guest_GPR3  = ats_new->mst.gpr[3]; /* initarg */
    629          dst_ts->arch.vex.guest_GPR4  = ats_new->mst.gpr[4];
    630          dst_ts->arch.vex.guest_GPR5  = ats_new->mst.gpr[5];
    631          dst_ts->arch.vex.guest_GPR6  = ats_new->mst.gpr[6];
    632          dst_ts->arch.vex.guest_GPR7  = ats_new->mst.gpr[7];
    633          dst_ts->arch.vex.guest_GPR8  = ats_new->mst.gpr[8];
    634          dst_ts->arch.vex.guest_GPR9  = ats_new->mst.gpr[9];
    635          dst_ts->arch.vex.guest_GPR10 = ats_new->mst.gpr[10];
    636          dst_ts->arch.vex.guest_GPR11 = ats_new->mst.gpr[11]; /* ?? */
    637          dst_ts->arch.vex.guest_GPR12 = ats_new->mst.gpr[12];
    638          dst_ts->arch.vex.guest_GPR13 = ats_new->mst.gpr[13];
    639          dst_ts->arch.vex.guest_GPR14 = ats_new->mst.gpr[14];
    640          dst_ts->arch.vex.guest_GPR15 = ats_new->mst.gpr[15];
    641          dst_ts->arch.vex.guest_GPR16 = ats_new->mst.gpr[16];
    642          dst_ts->arch.vex.guest_GPR17 = ats_new->mst.gpr[17];
    643          dst_ts->arch.vex.guest_GPR18 = ats_new->mst.gpr[18];
    644          dst_ts->arch.vex.guest_GPR19 = ats_new->mst.gpr[19];
    645          dst_ts->arch.vex.guest_GPR20 = ats_new->mst.gpr[20];
    646          dst_ts->arch.vex.guest_GPR21 = ats_new->mst.gpr[21];
    647          dst_ts->arch.vex.guest_GPR22 = ats_new->mst.gpr[22];
    648          dst_ts->arch.vex.guest_GPR23 = ats_new->mst.gpr[23];
    649          dst_ts->arch.vex.guest_GPR24 = ats_new->mst.gpr[24];
    650          dst_ts->arch.vex.guest_GPR25 = ats_new->mst.gpr[25];
    651          dst_ts->arch.vex.guest_GPR26 = ats_new->mst.gpr[26];
    652          dst_ts->arch.vex.guest_GPR27 = ats_new->mst.gpr[27];
    653          dst_ts->arch.vex.guest_GPR28 = ats_new->mst.gpr[28];
    654          dst_ts->arch.vex.guest_GPR29 = ats_new->mst.gpr[29];
    655          dst_ts->arch.vex.guest_GPR30 = ats_new->mst.gpr[30];
    656          dst_ts->arch.vex.guest_GPR31 = ats_new->mst.gpr[31];
    657          dst_ts->arch.vex.guest_CIA   = ats_new->mst.iar; /* pc */
    658          dst_ts->arch.vex.guest_LR    = ats_new->mst.lr;
    659          dst_ts->arch.vex.guest_CTR   = ats_new->mst.ctr;
    660          LibVEX_GuestPPC32_put_CR( ats_new->mst.cr, &dst_ts->arch.vex );
    661          LibVEX_GuestPPC32_put_XER( ats_new->mst.xer, &dst_ts->arch.vex );
    662 
    663          /* Record what seems like the highest legitimate stack
    664             address for this thread, so that the stack unwinder works
    665             properly.  It seems reasonable to use the R1 value
    666             supplied here. */
    667          dst_ts->client_stack_highest_word = dst_ts->arch.vex.guest_GPR1;
    668 
    669          /* The host thread is to start running
    670             start_thread_NORETURN */
    671          UWord* wrapper_fdescr = (UWord*) & start_thread_NORETURN;
    672          ats_new->mst.gpr[1] = (UWord)stack;
    673          ats_new->mst.gpr[2] = wrapper_fdescr[1];
    674          ats_new->mst.iar    = wrapper_fdescr[0];
    675          ats_new->mst.gpr[3] = (UWord)dst_ts;
    676 
    677          /* Set initial cancellation status for the thread. */
    678          dst_ts->os_state.cancel_async    = False;
    679          dst_ts->os_state.cancel_disabled = False;
    680          dst_ts->os_state.cancel_progress = Canc_NoRequest;
    681       }
    682    }
    683 }
    684 POST(sys_thread_setstate)
    685 {
    686    if (ARG3)
    687       POST_MEM_WRITE( ARG3, sizeof(struct tstate) );
    688    if (0 && VG_(clo_trace_syscalls) && ARG3)
    689       ML_(aix5debugstuff_show_tstate)(ARG3, "thread_setstate (OLD)");
    690 }
    691 
    692 PRE(sys_FAKE_SIGRETURN)
    693 {
    694    /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
    695       an explanation of what follows. */
    696    /* This handles the fake signal-return system call created by
    697       sigframe-ppc32-aix5.c. */
    698 
    699    PRINT("FAKE_SIGRETURN ( )");
    700 
    701    vg_assert(VG_(is_valid_tid)(tid));
    702    vg_assert(tid >= 1 && tid < VG_N_THREADS);
    703    vg_assert(VG_(is_running_thread)(tid));
    704 
    705    /* Remove the signal frame from this thread's (guest) stack,
    706       in the process restoring the pre-signal guest state. */
    707    VG_(sigframe_destroy)(tid, True);
    708 
    709    /* Tell the driver not to update the guest state with the "result",
    710       and set a bogus result to keep it happy. */
    711    *flags |= SfNoWriteResult;
    712    SET_STATUS_Success(0);
    713 
    714    /* Check to see if any signals arose as a result of this. */
    715    *flags |= SfPollAfter;
    716 }
    717 
    718 
    719 /* ---------------------------------------------------------------------
    720    The ppc32/AIX5 syscall table
    721    ------------------------------------------------------------------ */
    722 
    723 typedef
    724    struct {
    725       UInt* pSysNo;
    726       SyscallTableEntry wrappers;
    727    }
    728    AIX5SCTabEntry;
    729 
    730 #undef PLAXY
    731 #undef PLAX_
    732 
    733 #define PLAXY(sysno, name)                     \
    734    { & sysno,                                  \
    735      { & WRAPPER_PRE_NAME(ppc32_aix5, name),   \
    736        & WRAPPER_POST_NAME(ppc32_aix5, name) }}
    737 
    738 #define PLAX_(sysno, name)                     \
    739    { & sysno,                                  \
    740      { & WRAPPER_PRE_NAME(ppc32_aix5, name),   \
    741        NULL }}
    742 
    743 static /* but not const */
    744 AIX5SCTabEntry aix5_ppc32_syscall_table[]
    745 = {
    746     AIXXY(__NR_AIX5___libc_sbrk,        sys___libc_sbrk),
    747     PLAXY(__NR_AIX5___loadx,            sys___loadx),
    748     AIXX_(__NR_AIX5___msleep,           sys___msleep),
    749     PLAXY(__NR_AIX5___unload,           sys___unload),
    750     PLAXY(__NR_AIX5__clock_gettime,     sys__clock_gettime),
    751     AIXX_(__NR_AIX5__clock_settime,     sys__clock_settime),
    752     AIXX_(__NR_AIX5__exit,              sys__exit),
    753     AIXX_(__NR_AIX5__fp_fpscrx_sc,      sys__fp_fpscrx_sc),
    754     AIXX_(__NR_AIX5__getpgrp,           sys__getpgrp),
    755     AIXX_(__NR_AIX5__getpid,            sys__getpid),
    756     AIXX_(__NR_AIX5__getppid,           sys__getppid),
    757     AIXX_(__NR_AIX5__getpriority,       sys__getpriority),
    758     AIXXY(__NR_AIX5__nsleep,            sys__nsleep),
    759     AIXX_(__NR_AIX5__pause,             sys__pause),
    760     AIXXY(__NR_AIX5__poll,              sys__poll),
    761     AIXX_(__NR_AIX5__select,            sys__select),
    762     AIXX_(__NR_AIX5__sem_wait,          sys__sem_wait),
    763     AIXX_(__NR_AIX5__setpgid,           sys__setpgid),
    764     AIXX_(__NR_AIX5__setsid,            sys__setsid),
    765     AIXXY(__NR_AIX5__sigaction,         sys__sigaction),
    766     AIXX_(__NR_AIX5__thread_self,       sys__thread_self),
    767     AIXX_(__NR_AIX5__thread_setsched,   sys__thread_setsched),
    768     AIXX_(__NR_AIX5_access,             sys_access),
    769     AIXX_(__NR_AIX5_accessx,            sys_accessx),
    770     AIXXY(__NR_AIX5_appgetrlimit,       sys_appgetrlimit),
    771     AIXXY(__NR_AIX5_appgetrusage,       sys_appgetrusage),
    772     AIXX_(__NR_AIX5_apprestimer,        sys_apprestimer),
    773     AIXX_(__NR_AIX5_appsetrlimit,       sys_appsetrlimit),
    774     AIXX_(__NR_AIX5_appulimit,          sys_appulimit),
    775     AIXX_(__NR_AIX5_bind,               sys_bind),
    776     AIXX_(__NR_AIX5_chdir,              sys_chdir),
    777     AIXX_(__NR_AIX5_chmod,              sys_chmod),
    778     AIXX_(__NR_AIX5_chown,              sys_chown),
    779     AIXX_(__NR_AIX5_close,              sys_close),
    780     AIXX_(__NR_AIX5_connext,            sys_connext),
    781     AIXX_(__NR_AIX5_execve,             sys_execve),
    782     AIXXY(__NR_AIX5_finfo,              sys_finfo),
    783     AIXXY(__NR_AIX5_fstatfs,            sys_fstatfs),
    784     AIXXY(__NR_AIX5_fstatx,             sys_fstatx),
    785     AIXX_(__NR_AIX5_fsync,              sys_fsync),
    786     AIXXY(__NR_AIX5_getdirent,          sys_getdirent),
    787     AIXXY(__NR_AIX5_getdirent64,        sys_getdirent64),
    788     AIXXY(__NR_AIX5_getdomainname,      sys_getdomainname),
    789     AIXX_(__NR_AIX5_getgidx,            sys_getgidx),
    790     AIXXY(__NR_AIX5_getgroups,          sys_getgroups),
    791     AIXXY(__NR_AIX5_gethostname,        sys_gethostname),
    792     AIXXY(__NR_AIX5_getpriv,            sys_getpriv),
    793     AIXXY(__NR_AIX5_getprocs,           sys_getprocs),
    794     AIXXY(__NR_AIX5_getprocs64,         sys_getprocs), /* XXX: correct? */
    795     AIXX_(__NR_AIX5_getrpid,            sys_getrpid),
    796     AIXXY(__NR_AIX5_getsockopt,         sys_getsockopt),
    797     AIXX_(__NR_AIX5_gettimerid,         sys_gettimerid),
    798     AIXX_(__NR_AIX5_getuidx,            sys_getuidx),
    799     AIXXY(__NR_AIX5_incinterval,        sys_incinterval),
    800     AIXXY(__NR_AIX5_kfcntl,             sys_kfcntl),
    801     AIXX_(__NR_AIX5_kfork,              sys_kfork),
    802     AIXX_(__NR_AIX5_kftruncate,         sys_kftruncate),
    803     AIXX_(__NR_AIX5_kgetsidx,           sys_kgetsidx),
    804     AIXX_(__NR_AIX5_kill,               sys_kill),
    805     AIXXY(__NR_AIX5_kioctl,             sys_kioctl),
    806     AIXX_(__NR_AIX5_klseek,             sys_klseek),
    807     AIXX_(__NR_AIX5_knlist,             sys_knlist),
    808     AIXXY(__NR_AIX5_kpread,             sys_kpread),
    809     AIXXY(__NR_AIX5_kread,              sys_kread),
    810     AIXXY(__NR_AIX5_kreadv,             sys_kreadv),
    811     AIXX_(__NR_AIX5_kthread_ctl,        sys_kthread_ctl),
    812     AIXX_(__NR_AIX5_ktruncate,          sys_ktruncate),
    813     AIXXY(__NR_AIX5_kwaitpid,           sys_kwaitpid),
    814     AIXX_(__NR_AIX5_kwrite,             sys_kwrite),
    815     AIXX_(__NR_AIX5_kwritev,            sys_kwritev),
    816     AIXX_(__NR_AIX5_listen,             sys_listen),
    817     AIXX_(__NR_AIX5_loadbind,           sys_loadbind),
    818     AIXXY(__NR_AIX5_loadquery,          sys_loadquery),
    819     AIXX_(__NR_AIX5_lseek,              sys_lseek),
    820     AIXX_(__NR_AIX5_mkdir,              sys_mkdir),
    821     AIXXY(__NR_AIX5_mmap,               sys_mmap),
    822     AIXXY(__NR_AIX5_mntctl,             sys_mntctl),
    823     AIXXY(__NR_AIX5_mprotect,           sys_mprotect),
    824     AIXXY(__NR_AIX5_munmap,             sys_munmap),
    825     AIXXY(__NR_AIX5_naccept,            sys_naccept),
    826     AIXXY(__NR_AIX5_ngetpeername,       sys_ngetpeername),
    827     AIXXY(__NR_AIX5_ngetsockname,       sys_ngetsockname),
    828     AIXXY(__NR_AIX5_nrecvfrom,          sys_nrecvfrom),
    829     AIXX_(__NR_AIX5_nrecvmsg,           sys_nrecvmsg),
    830     AIXX_(__NR_AIX5_nsendmsg,           sys_nsendmsg),
    831     AIXX_(__NR_AIX5_open,               sys_open),
    832     AIXXY(__NR_AIX5_pipe,               sys_pipe),
    833     AIXX_(__NR_AIX5_privcheck,          sys_privcheck),
    834     AIXXY(__NR_AIX5_readlink,           sys_readlink),
    835     AIXXY(__NR_AIX5_recv,               sys_recv),
    836     AIXX_(__NR_AIX5_rename,             sys_rename),
    837     AIXXY(__NR_AIX5_sbrk,               sys_sbrk),
    838     AIXX_(__NR_AIX5_sched_get_priority_max, sys_sched_get_priority_max),
    839     AIXX_(__NR_AIX5_sem_destroy,        sys_sem_destroy),
    840     AIXXY(__NR_AIX5_sem_init,           sys_sem_init),
    841     AIXXY(__NR_AIX5_sem_post,           sys_sem_post),
    842     AIXX_(__NR_AIX5_send,               sys_send),
    843     AIXX_(__NR_AIX5_setgid,             sys_setgid),
    844     AIXX_(__NR_AIX5_setsockopt,         sys_setsockopt),
    845     AIXX_(__NR_AIX5_setuid,             sys_setuid),
    846     AIXXY(__NR_AIX5_shmat,              sys_shmat),
    847     AIXXY(__NR_AIX5_shmctl,             sys_shmctl),
    848     AIXXY(__NR_AIX5_shmdt,              sys_shmdt),
    849     AIXX_(__NR_AIX5_shmget,             sys_shmget),
    850     AIXX_(__NR_AIX5_shutdown,           sys_shutdown),
    851     AIXX_(__NR_AIX5_sigcleanup,         sys_sigcleanup),
    852     AIXXY(__NR_AIX5_sigprocmask,        sys_sigprocmask),
    853     AIXX_(__NR_AIX5_socket,             sys_socket),
    854     AIXXY(__NR_AIX5_statx,              sys_statx),
    855     AIXXY(__NR_AIX5_statfs,             sys_statfs),
    856     AIXX_(__NR_AIX5_symlink,            sys_symlink),
    857     AIXXY(__NR_AIX5_sys_parm,           sys_sys_parm),
    858     AIXXY(__NR_AIX5_sysconfig,          sys_sysconfig),
    859     AIXXY(__NR_AIX5_thread_create,      sys_thread_create),
    860     AIXX_(__NR_AIX5_thread_init,        sys_thread_init),
    861     AIXX_(__NR_AIX5_thread_kill,        sys_thread_kill),
    862     PLAX_(__NR_AIX5_thread_setmymask_fast, sys_thread_setmymask_fast),
    863     AIXXY(__NR_AIX5_thread_setmystate,  sys_thread_setmystate),
    864     AIXX_(__NR_AIX5_thread_setmystate_fast, sys_thread_setmystate_fast),
    865     PLAXY(__NR_AIX5_thread_setstate,    sys_thread_setstate),
    866     AIXX_(__NR_AIX5_thread_terminate_unlock, sys_thread_terminate_unlock),
    867     AIXX_(__NR_AIX5_thread_tsleep,      sys_thread_tsleep),
    868     AIXX_(__NR_AIX5_thread_tsleep_event, sys_thread_tsleep_event),
    869     AIXX_(__NR_AIX5_thread_twakeup,     sys_thread_twakeup),
    870     AIXX_(__NR_AIX5_thread_twakeup_event, sys_thread_twakeup_event),
    871     AIXX_(__NR_AIX5_thread_unlock,      sys_thread_unlock),
    872     AIXX_(__NR_AIX5_thread_waitlock,    sys_thread_waitlock),
    873     AIXX_(__NR_AIX5_thread_waitlock_,   sys_thread_waitlock_),
    874     AIXXY(__NR_AIX5_times,              sys_times),
    875     AIXX_(__NR_AIX5_umask,              sys_umask),
    876     AIXXY(__NR_AIX5_uname,              sys_uname),
    877     AIXX_(__NR_AIX5_unlink,             sys_unlink),
    878     AIXX_(__NR_AIX5_utimes,             sys_utimes),
    879     AIXXY(__NR_AIX5_vmgetinfo,          sys_vmgetinfo),
    880     AIXX_(__NR_AIX5_yield,              sys_yield),
    881     PLAX_(__NR_AIX5_FAKE_SIGRETURN,     sys_FAKE_SIGRETURN)
    882   };
    883 
    884 SyscallTableEntry* ML_(get_ppc32_aix5_syscall_entry) ( UInt sysno )
    885 {
    886    Int            i;
    887    AIX5SCTabEntry tmp;
    888 
    889    const Int tab_size = sizeof(aix5_ppc32_syscall_table)
    890                         / sizeof(aix5_ppc32_syscall_table[0]);
    891 
    892    for (i = 0; i < tab_size; i++)
    893       if (sysno == *(aix5_ppc32_syscall_table[i].pSysNo))
    894          break;
    895 
    896    vg_assert(i >= 0 && i <= tab_size);
    897    if (i == tab_size)
    898       return NULL; /* can't find a wrapper */
    899 
    900    /* Move found one a bit closer to the front, so as to
    901       make future searches cheaper. */
    902    if (i > 0) {
    903       tmp = aix5_ppc32_syscall_table[i-1];
    904       aix5_ppc32_syscall_table[i-1] = aix5_ppc32_syscall_table[i];
    905       aix5_ppc32_syscall_table[i] = tmp;
    906       i--;
    907    }
    908 
    909    vg_assert(i >= 0 && i < tab_size);
    910    return &aix5_ppc32_syscall_table[i].wrappers;
    911 }
    912 
    913 #endif // defined(VGP_ppc32_aix5)
    914 
    915 /*--------------------------------------------------------------------*/
    916 /*--- end                                                          ---*/
    917 /*--------------------------------------------------------------------*/
    918