Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 #include <sys/types.h>
     29 #include <unistd.h>
     30 #include <signal.h>
     31 #include <stdint.h>
     32 #include <stdio.h>
     33 #include <stdlib.h>
     34 #include <errno.h>
     35 #include <sys/atomics.h>
     36 #include <bionic_tls.h>
     37 #include <sys/mman.h>
     38 #include <pthread.h>
     39 #include <time.h>
     40 #include "pthread_internal.h"
     41 #include "thread_private.h"
     42 #include <limits.h>
     43 #include <memory.h>
     44 #include <assert.h>
     45 #include <malloc.h>
     46 #include <linux/futex.h>
     47 
     48 extern int  __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
     49 extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
     50 extern void _exit_thread(int  retCode);
     51 extern int  __set_errno(int);
     52 
     53 #define  __likely(cond)    __builtin_expect(!!(cond), 1)
     54 #define  __unlikely(cond)  __builtin_expect(!!(cond), 0)
     55 
     56 void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
     57 
     58 #define PTHREAD_ATTR_FLAG_DETACHED      0x00000001
     59 #define PTHREAD_ATTR_FLAG_USER_STACK    0x00000002
     60 
     61 #define DEFAULT_STACKSIZE (1024 * 1024)
     62 #define STACKBASE 0x10000000
     63 
     64 static uint8_t * gStackBase = (uint8_t *)STACKBASE;
     65 
     66 static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
     67 
     68 
     69 static const pthread_attr_t gDefaultPthreadAttr = {
     70     .flags = 0,
     71     .stack_base = NULL,
     72     .stack_size = DEFAULT_STACKSIZE,
     73     .guard_size = PAGE_SIZE,
     74     .sched_policy = SCHED_NORMAL,
     75     .sched_priority = 0
     76 };
     77 
     78 #define  INIT_THREADS  1
     79 
     80 static pthread_internal_t*  gThreadList = NULL;
     81 static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
     82 static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
     83 
     84 
     85 /* we simply malloc/free the internal pthread_internal_t structures. we may
     86  * want to use a different allocation scheme in the future, but this one should
     87  * be largely enough
     88  */
     89 static pthread_internal_t*
     90 _pthread_internal_alloc(void)
     91 {
     92     pthread_internal_t*   thread;
     93 
     94     thread = calloc( sizeof(*thread), 1 );
     95     if (thread)
     96         thread->intern = 1;
     97 
     98     return thread;
     99 }
    100 
    101 static void
    102 _pthread_internal_free( pthread_internal_t*  thread )
    103 {
    104     if (thread && thread->intern) {
    105         thread->intern = 0;  /* just in case */
    106         free (thread);
    107     }
    108 }
    109 
    110 
    111 static void
    112 _pthread_internal_remove_locked( pthread_internal_t*  thread )
    113 {
    114     thread->next->pref = thread->pref;
    115     thread->pref[0]    = thread->next;
    116 }
    117 
    118 static void
    119 _pthread_internal_remove( pthread_internal_t*  thread )
    120 {
    121     pthread_mutex_lock(&gThreadListLock);
    122     _pthread_internal_remove_locked(thread);
    123     pthread_mutex_unlock(&gThreadListLock);
    124 }
    125 
    126 static void
    127 _pthread_internal_add( pthread_internal_t*  thread )
    128 {
    129     pthread_mutex_lock(&gThreadListLock);
    130     thread->pref = &gThreadList;
    131     thread->next = thread->pref[0];
    132     if (thread->next)
    133         thread->next->pref = &thread->next;
    134     thread->pref[0] = thread;
    135     pthread_mutex_unlock(&gThreadListLock);
    136 }
    137 
    138 pthread_internal_t*
    139 __get_thread(void)
    140 {
    141     void**  tls = (void**)__get_tls();
    142 
    143     return  (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
    144 }
    145 
    146 
    147 void*
    148 __get_stack_base(int  *p_stack_size)
    149 {
    150     pthread_internal_t*  thread = __get_thread();
    151 
    152     *p_stack_size = thread->attr.stack_size;
    153     return thread->attr.stack_base;
    154 }
    155 
    156 
    157 void  __init_tls(void**  tls, void*  thread)
    158 {
    159     int  nn;
    160 
    161     ((pthread_internal_t*)thread)->tls = tls;
    162 
    163     // slot 0 must point to the tls area, this is required by the implementation
    164     // of the x86 Linux kernel thread-local-storage
    165     tls[TLS_SLOT_SELF]      = (void*)tls;
    166     tls[TLS_SLOT_THREAD_ID] = thread;
    167     for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
    168        tls[nn] = 0;
    169 
    170     __set_tls( (void*)tls );
    171 }
    172 
    173 
    174 /*
    175  * This trampoline is called from the assembly clone() function
    176  */
    177 void __thread_entry(int (*func)(void*), void *arg, void **tls)
    178 {
    179     int retValue;
    180     pthread_internal_t * thrInfo;
    181 
    182     // Wait for our creating thread to release us. This lets it have time to
    183     // notify gdb about this thread before it starts doing anything.
    184     pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
    185     pthread_mutex_lock(start_mutex);
    186     pthread_mutex_destroy(start_mutex);
    187 
    188     thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
    189 
    190     __init_tls( tls, thrInfo );
    191 
    192     pthread_exit( (void*)func(arg) );
    193 }
    194 
    195 void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
    196 {
    197     if (attr == NULL) {
    198         thread->attr = gDefaultPthreadAttr;
    199     } else {
    200         thread->attr = *attr;
    201     }
    202     thread->attr.stack_base = stack_base;
    203     thread->kernel_id       = kernel_id;
    204 
    205     // set the scheduling policy/priority of the thread
    206     if (thread->attr.sched_policy != SCHED_NORMAL) {
    207         struct sched_param param;
    208         param.sched_priority = thread->attr.sched_priority;
    209         sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
    210     }
    211 
    212     pthread_cond_init(&thread->join_cond, NULL);
    213     thread->join_count = 0;
    214 
    215     thread->cleanup_stack = NULL;
    216 
    217     _pthread_internal_add(thread);
    218 }
    219 
    220 
    221 /* XXX stacks not reclaimed if thread spawn fails */
    222 /* XXX stacks address spaces should be reused if available again */
    223 
    224 static void *mkstack(size_t size, size_t guard_size)
    225 {
    226     void * stack;
    227 
    228     pthread_mutex_lock(&mmap_lock);
    229 
    230     stack = mmap((void *)gStackBase, size,
    231                  PROT_READ | PROT_WRITE,
    232                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
    233                  -1, 0);
    234 
    235     if(stack == MAP_FAILED) {
    236         stack = NULL;
    237         goto done;
    238     }
    239 
    240     if(mprotect(stack, guard_size, PROT_NONE)){
    241         munmap(stack, size);
    242         stack = NULL;
    243         goto done;
    244     }
    245 
    246 done:
    247     pthread_mutex_unlock(&mmap_lock);
    248     return stack;
    249 }
    250 
    251 /*
    252  * Create a new thread. The thread's stack is layed out like so:
    253  *
    254  * +---------------------------+
    255  * |     pthread_internal_t    |
    256  * +---------------------------+
    257  * |                           |
    258  * |          TLS area         |
    259  * |                           |
    260  * +---------------------------+
    261  * |                           |
    262  * .                           .
    263  * .         stack area        .
    264  * .                           .
    265  * |                           |
    266  * +---------------------------+
    267  * |         guard page        |
    268  * +---------------------------+
    269  *
    270  *  note that TLS[0] must be a pointer to itself, this is required
    271  *  by the thread-local storage implementation of the x86 Linux
    272  *  kernel, where the TLS pointer is read by reading fs:[0]
    273  */
    274 int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
    275                    void *(*start_routine)(void *), void * arg)
    276 {
    277     char*   stack;
    278     void**  tls;
    279     int tid;
    280     pthread_mutex_t * start_mutex;
    281     pthread_internal_t * thread;
    282     int                  madestack = 0;
    283     int     old_errno = errno;
    284 
    285     /* this will inform the rest of the C library that at least one thread
    286      * was created. this will enforce certain functions to acquire/release
    287      * locks (e.g. atexit()) to protect shared global structures.
    288      *
    289      * this works because pthread_create() is not called by the C library
    290      * initialization routine that sets up the main thread's data structures.
    291      */
    292     __isthreaded = 1;
    293 
    294     thread = _pthread_internal_alloc();
    295     if (thread == NULL)
    296         return ENOMEM;
    297 
    298     if (attr == NULL) {
    299         attr = &gDefaultPthreadAttr;
    300     }
    301 
    302     // make sure the stack is PAGE_SIZE aligned
    303     size_t stackSize = (attr->stack_size +
    304                         (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
    305 
    306     if (!attr->stack_base) {
    307         stack = mkstack(stackSize, attr->guard_size);
    308         if(stack == NULL) {
    309             _pthread_internal_free(thread);
    310             return ENOMEM;
    311         }
    312         madestack = 1;
    313     } else {
    314         stack = attr->stack_base;
    315     }
    316 
    317     // Make room for TLS
    318     tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
    319 
    320     // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
    321     // it from doing anything until after we notify the debugger about it
    322     start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
    323     pthread_mutex_init(start_mutex, NULL);
    324     pthread_mutex_lock(start_mutex);
    325 
    326     tls[TLS_SLOT_THREAD_ID] = thread;
    327 
    328     tid = __pthread_clone((int(*)(void*))start_routine, tls,
    329                 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
    330                 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
    331                 arg);
    332 
    333     if(tid < 0) {
    334         int  result;
    335         if (madestack)
    336             munmap(stack, stackSize);
    337         _pthread_internal_free(thread);
    338         result = errno;
    339         errno = old_errno;
    340         return result;
    341     }
    342 
    343     _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
    344 
    345     if (!madestack)
    346         thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
    347 
    348     // Notify any debuggers about the new thread
    349     pthread_mutex_lock(&gDebuggerNotificationLock);
    350     _thread_created_hook(tid);
    351     pthread_mutex_unlock(&gDebuggerNotificationLock);
    352 
    353     // Let the thread do it's thing
    354     pthread_mutex_unlock(start_mutex);
    355 
    356     *thread_out = (pthread_t)thread;
    357     return 0;
    358 }
    359 
    360 
    361 int pthread_attr_init(pthread_attr_t * attr)
    362 {
    363     *attr = gDefaultPthreadAttr;
    364     return 0;
    365 }
    366 
    367 int pthread_attr_destroy(pthread_attr_t * attr)
    368 {
    369     memset(attr, 0x42, sizeof(pthread_attr_t));
    370     return 0;
    371 }
    372 
    373 int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
    374 {
    375     if (state == PTHREAD_CREATE_DETACHED) {
    376         attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
    377     } else if (state == PTHREAD_CREATE_JOINABLE) {
    378         attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
    379     } else {
    380         return EINVAL;
    381     }
    382     return 0;
    383 }
    384 
    385 int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
    386 {
    387     *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
    388            ? PTHREAD_CREATE_DETACHED
    389            : PTHREAD_CREATE_JOINABLE;
    390     return 0;
    391 }
    392 
    393 int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
    394 {
    395     attr->sched_policy = policy;
    396     return 0;
    397 }
    398 
    399 int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
    400 {
    401     *policy = attr->sched_policy;
    402     return 0;
    403 }
    404 
    405 int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
    406 {
    407     attr->sched_priority = param->sched_priority;
    408     return 0;
    409 }
    410 
    411 int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
    412 {
    413     param->sched_priority = attr->sched_priority;
    414     return 0;
    415 }
    416 
    417 int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
    418 {
    419     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
    420         return EINVAL;
    421     }
    422     attr->stack_size = stack_size;
    423     return 0;
    424 }
    425 
    426 int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
    427 {
    428     *stack_size = attr->stack_size;
    429     return 0;
    430 }
    431 
    432 int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
    433 {
    434 #if 1
    435     // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
    436     return ENOSYS;
    437 #else
    438     if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
    439         return EINVAL;
    440     }
    441     attr->stack_base = stack_addr;
    442     return 0;
    443 #endif
    444 }
    445 
    446 int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
    447 {
    448     *stack_addr = (char*)attr->stack_base + attr->stack_size;
    449     return 0;
    450 }
    451 
    452 int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
    453 {
    454     if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
    455         return EINVAL;
    456     }
    457     if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
    458         return EINVAL;
    459     }
    460     attr->stack_base = stack_base;
    461     attr->stack_size = stack_size;
    462     return 0;
    463 }
    464 
    465 int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
    466 {
    467     *stack_base = attr->stack_base;
    468     *stack_size = attr->stack_size;
    469     return 0;
    470 }
    471 
    472 int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
    473 {
    474     if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
    475         return EINVAL;
    476     }
    477 
    478     attr->guard_size = guard_size;
    479     return 0;
    480 }
    481 
    482 int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
    483 {
    484     *guard_size = attr->guard_size;
    485     return 0;
    486 }
    487 
    488 int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
    489 {
    490     pthread_internal_t * thread = (pthread_internal_t *)thid;
    491     *attr = thread->attr;
    492     return 0;
    493 }
    494 
    495 int pthread_attr_setscope(pthread_attr_t *attr, int  scope)
    496 {
    497     if (scope == PTHREAD_SCOPE_SYSTEM)
    498         return 0;
    499     if (scope == PTHREAD_SCOPE_PROCESS)
    500         return ENOTSUP;
    501 
    502     return EINVAL;
    503 }
    504 
    505 int pthread_attr_getscope(pthread_attr_t const *attr)
    506 {
    507     return PTHREAD_SCOPE_SYSTEM;
    508 }
    509 
    510 
    511 /* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
    512  *         and thread cancelation
    513  */
    514 
    515 void __pthread_cleanup_push( __pthread_cleanup_t*      c,
    516                              __pthread_cleanup_func_t  routine,
    517                              void*                     arg )
    518 {
    519     pthread_internal_t*  thread = __get_thread();
    520 
    521     c->__cleanup_routine  = routine;
    522     c->__cleanup_arg      = arg;
    523     c->__cleanup_prev     = thread->cleanup_stack;
    524     thread->cleanup_stack = c;
    525 }
    526 
    527 void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
    528 {
    529     pthread_internal_t*  thread = __get_thread();
    530 
    531     thread->cleanup_stack = c->__cleanup_prev;
    532     if (execute)
    533         c->__cleanup_routine(c->__cleanup_arg);
    534 }
    535 
    536 /* used by pthread_exit() to clean all TLS keys of the current thread */
    537 static void pthread_key_clean_all(void);
    538 
    539 void pthread_exit(void * retval)
    540 {
    541     pthread_internal_t*  thread     = __get_thread();
    542     void*                stack_base = thread->attr.stack_base;
    543     int                  stack_size = thread->attr.stack_size;
    544     int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
    545 
    546     // call the cleanup handlers first
    547     while (thread->cleanup_stack) {
    548         __pthread_cleanup_t*  c = thread->cleanup_stack;
    549         thread->cleanup_stack   = c->__cleanup_prev;
    550         c->__cleanup_routine(c->__cleanup_arg);
    551     }
    552 
    553     // call the TLS destructors, it is important to do that before removing this
    554     // thread from the global list. this will ensure that if someone else deletes
    555     // a TLS key, the corresponding value will be set to NULL in this thread's TLS
    556     // space (see pthread_key_delete)
    557     pthread_key_clean_all();
    558 
    559     // if the thread is detached, destroy the pthread_internal_t
    560     // otherwise, keep it in memory and signal any joiners
    561     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
    562         _pthread_internal_remove(thread);
    563         _pthread_internal_free(thread);
    564     } else {
    565        /* the join_count field is used to store the number of threads waiting for
    566         * the termination of this thread with pthread_join(),
    567         *
    568         * if it is positive we need to signal the waiters, and we do not touch
    569         * the count (it will be decremented by the waiters, the last one will
    570         * also remove/free the thread structure
    571         *
    572         * if it is zero, we set the count value to -1 to indicate that the
    573         * thread is in 'zombie' state: it has stopped executing, and its stack
    574         * is gone (as well as its TLS area). when another thread calls pthread_join()
    575         * on it, it will immediately free the thread and return.
    576         */
    577         pthread_mutex_lock(&gThreadListLock);
    578         thread->return_value = retval;
    579         if (thread->join_count > 0) {
    580             pthread_cond_broadcast(&thread->join_cond);
    581         } else {
    582             thread->join_count = -1;  /* zombie thread */
    583         }
    584         pthread_mutex_unlock(&gThreadListLock);
    585     }
    586 
    587     // destroy the thread stack
    588     if (user_stack)
    589         _exit_thread((int)retval);
    590     else
    591         _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
    592 }
    593 
    594 int pthread_join(pthread_t thid, void ** ret_val)
    595 {
    596     pthread_internal_t*  thread = (pthread_internal_t*)thid;
    597     int                  count;
    598 
    599     // check that the thread still exists and is not detached
    600     pthread_mutex_lock(&gThreadListLock);
    601 
    602     for (thread = gThreadList; thread != NULL; thread = thread->next)
    603         if (thread == (pthread_internal_t*)thid)
    604             goto FoundIt;
    605 
    606     pthread_mutex_unlock(&gThreadListLock);
    607     return ESRCH;
    608 
    609 FoundIt:
    610     if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
    611         pthread_mutex_unlock(&gThreadListLock);
    612         return EINVAL;
    613     }
    614 
    615    /* wait for thread death when needed
    616     *
    617     * if the 'join_count' is negative, this is a 'zombie' thread that
    618     * is already dead and without stack/TLS
    619     *
    620     * otherwise, we need to increment 'join-count' and wait to be signaled
    621     */
    622    count = thread->join_count;
    623     if (count >= 0) {
    624         thread->join_count += 1;
    625         pthread_cond_wait( &thread->join_cond, &gThreadListLock );
    626         count = --thread->join_count;
    627     }
    628     if (ret_val)
    629         *ret_val = thread->return_value;
    630 
    631     /* remove thread descriptor when we're the last joiner or when the
    632      * thread was already a zombie.
    633      */
    634     if (count <= 0) {
    635         _pthread_internal_remove_locked(thread);
    636         _pthread_internal_free(thread);
    637     }
    638     pthread_mutex_unlock(&gThreadListLock);
    639     return 0;
    640 }
    641 
    642 int  pthread_detach( pthread_t  thid )
    643 {
    644     pthread_internal_t*  thread;
    645     int                  result = 0;
    646     int                  flags;
    647 
    648     pthread_mutex_lock(&gThreadListLock);
    649     for (thread = gThreadList; thread != NULL; thread = thread->next)
    650         if (thread == (pthread_internal_t*)thid)
    651             goto FoundIt;
    652 
    653     result = ESRCH;
    654     goto Exit;
    655 
    656 FoundIt:
    657     do {
    658         flags = thread->attr.flags;
    659 
    660         if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
    661             /* thread is not joinable ! */
    662             result = EINVAL;
    663             goto Exit;
    664         }
    665     }
    666     while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
    667                               (volatile int*)&thread->attr.flags ) != 0 );
    668 Exit:
    669     pthread_mutex_unlock(&gThreadListLock);
    670     return result;
    671 }
    672 
    673 pthread_t pthread_self(void)
    674 {
    675     return (pthread_t)__get_thread();
    676 }
    677 
    678 int pthread_equal(pthread_t one, pthread_t two)
    679 {
    680     return (one == two ? 1 : 0);
    681 }
    682 
    683 int pthread_getschedparam(pthread_t thid, int * policy,
    684                           struct sched_param * param)
    685 {
    686     int  old_errno = errno;
    687 
    688     pthread_internal_t * thread = (pthread_internal_t *)thid;
    689     int err = sched_getparam(thread->kernel_id, param);
    690     if (!err) {
    691         *policy = sched_getscheduler(thread->kernel_id);
    692     } else {
    693         err = errno;
    694         errno = old_errno;
    695     }
    696     return err;
    697 }
    698 
    699 int pthread_setschedparam(pthread_t thid, int policy,
    700                           struct sched_param const * param)
    701 {
    702     pthread_internal_t * thread = (pthread_internal_t *)thid;
    703     int                  old_errno = errno;
    704     int                  ret;
    705 
    706     ret = sched_setscheduler(thread->kernel_id, policy, param);
    707     if (ret < 0) {
    708         ret = errno;
    709         errno = old_errno;
    710     }
    711     return ret;
    712 }
    713 
    714 
    715 int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
    716 int __futex_wake(volatile void *ftx, int count);
    717 
    718 int __futex_syscall3(volatile void *ftx, int op, int val);
    719 int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
    720 
    721 #ifndef FUTEX_PRIVATE_FLAG
    722 #define FUTEX_PRIVATE_FLAG  128
    723 #endif
    724 
    725 #ifndef FUTEX_WAIT_PRIVATE
    726 #define FUTEX_WAIT_PRIVATE  (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
    727 #endif
    728 
    729 #ifndef FUTEX_WAKE_PRIVATE
    730 #define FUTEX_WAKE_PRIVATE  (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
    731 #endif
    732 
    733 // mutex lock states
    734 //
    735 // 0: unlocked
    736 // 1: locked, no waiters
    737 // 2: locked, maybe waiters
    738 
    739 /* a mutex is implemented as a 32-bit integer holding the following fields
    740  *
    741  * bits:     name     description
    742  * 31-16     tid      owner thread's kernel id (recursive and errorcheck only)
    743  * 15-14     type     mutex type
    744  * 13        shared   process-shared flag
    745  * 12-2      counter  counter of recursive mutexes
    746  * 1-0       state    lock state (0, 1 or 2)
    747  */
    748 
    749 
    750 #define  MUTEX_OWNER(m)  (((m)->value >> 16) & 0xffff)
    751 #define  MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
    752 
    753 #define  MUTEX_TYPE_MASK       0xc000
    754 #define  MUTEX_TYPE_NORMAL     0x0000
    755 #define  MUTEX_TYPE_RECURSIVE  0x4000
    756 #define  MUTEX_TYPE_ERRORCHECK 0x8000
    757 
    758 #define  MUTEX_COUNTER_SHIFT  2
    759 #define  MUTEX_COUNTER_MASK   0x1ffc
    760 #define  MUTEX_SHARED_MASK    0x2000
    761 
    762 /* a mutex attribute holds the following fields
    763  *
    764  * bits:     name       description
    765  * 0-3       type       type of mutex
    766  * 4         shared     process-shared flag
    767  */
    768 #define  MUTEXATTR_TYPE_MASK   0x000f
    769 #define  MUTEXATTR_SHARED_MASK 0x0010
    770 
    771 
    772 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
    773 {
    774     if (attr) {
    775         *attr = PTHREAD_MUTEX_DEFAULT;
    776         return 0;
    777     } else {
    778         return EINVAL;
    779     }
    780 }
    781 
    782 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    783 {
    784     if (attr) {
    785         *attr = -1;
    786         return 0;
    787     } else {
    788         return EINVAL;
    789     }
    790 }
    791 
    792 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
    793 {
    794     if (attr) {
    795         int  atype = (*attr & MUTEXATTR_TYPE_MASK);
    796 
    797          if (atype >= PTHREAD_MUTEX_NORMAL &&
    798              atype <= PTHREAD_MUTEX_ERRORCHECK) {
    799             *type = atype;
    800             return 0;
    801         }
    802     }
    803     return EINVAL;
    804 }
    805 
    806 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    807 {
    808     if (attr && type >= PTHREAD_MUTEX_NORMAL &&
    809                 type <= PTHREAD_MUTEX_ERRORCHECK ) {
    810         *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
    811         return 0;
    812     }
    813     return EINVAL;
    814 }
    815 
    816 /* process-shared mutexes are not supported at the moment */
    817 
    818 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
    819 {
    820     if (!attr)
    821         return EINVAL;
    822 
    823     switch (pshared) {
    824     case PTHREAD_PROCESS_PRIVATE:
    825         *attr &= ~MUTEXATTR_SHARED_MASK;
    826         return 0;
    827 
    828     case PTHREAD_PROCESS_SHARED:
    829         /* our current implementation of pthread actually supports shared
    830          * mutexes but won't cleanup if a process dies with the mutex held.
    831          * Nevertheless, it's better than nothing. Shared mutexes are used
    832          * by surfaceflinger and audioflinger.
    833          */
    834         *attr |= MUTEXATTR_SHARED_MASK;
    835         return 0;
    836     }
    837     return EINVAL;
    838 }
    839 
    840 int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
    841 {
    842     if (!attr || !pshared)
    843         return EINVAL;
    844 
    845     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
    846                                                : PTHREAD_PROCESS_PRIVATE;
    847     return 0;
    848 }
    849 
    850 int pthread_mutex_init(pthread_mutex_t *mutex,
    851                        const pthread_mutexattr_t *attr)
    852 {
    853     int value = 0;
    854 
    855     if (mutex == NULL)
    856         return EINVAL;
    857 
    858     if (__likely(attr == NULL)) {
    859         mutex->value = MUTEX_TYPE_NORMAL;
    860         return 0;
    861     }
    862 
    863     if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
    864         value |= MUTEX_SHARED_MASK;
    865 
    866     switch (*attr & MUTEXATTR_TYPE_MASK) {
    867     case PTHREAD_MUTEX_NORMAL:
    868         value |= MUTEX_TYPE_NORMAL;
    869         break;
    870     case PTHREAD_MUTEX_RECURSIVE:
    871         value |= MUTEX_TYPE_RECURSIVE;
    872         break;
    873     case PTHREAD_MUTEX_ERRORCHECK:
    874         value |= MUTEX_TYPE_ERRORCHECK;
    875         break;
    876     default:
    877         return EINVAL;
    878     }
    879 
    880     mutex->value = value;
    881     return 0;
    882 }
    883 
    884 int pthread_mutex_destroy(pthread_mutex_t *mutex)
    885 {
    886     if (__unlikely(mutex == NULL))
    887         return EINVAL;
    888 
    889     mutex->value = 0xdead10cc;
    890     return 0;
    891 }
    892 
    893 
    894 /*
    895  * Lock a non-recursive mutex.
    896  *
    897  * As noted above, there are three states:
    898  *   0 (unlocked, no contention)
    899  *   1 (locked, no contention)
    900  *   2 (locked, contention)
    901  *
    902  * Non-recursive mutexes don't use the thread-id or counter fields, and the
    903  * "type" value is zero, so the only bits that will be set are the ones in
    904  * the lock state field.
    905  */
    906 static __inline__ void
    907 _normal_lock(pthread_mutex_t*  mutex)
    908 {
    909     /* We need to preserve the shared flag during operations */
    910     int  shared = mutex->value & MUTEX_SHARED_MASK;
    911     /*
    912      * The common case is an unlocked mutex, so we begin by trying to
    913      * change the lock's state from 0 to 1.  __atomic_cmpxchg() returns 0
    914      * if it made the swap successfully.  If the result is nonzero, this
    915      * lock is already held by another thread.
    916      */
    917     if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
    918         /*
    919          * We want to go to sleep until the mutex is available, which
    920          * requires promoting it to state 2.  We need to swap in the new
    921          * state value and then wait until somebody wakes us up.
    922          *
    923          * __atomic_swap() returns the previous value.  We swap 2 in and
    924          * see if we got zero back; if so, we have acquired the lock.  If
    925          * not, another thread still holds the lock and we wait again.
    926          *
    927          * The second argument to the __futex_wait() call is compared
    928          * against the current value.  If it doesn't match, __futex_wait()
    929          * returns immediately (otherwise, it sleeps for a time specified
    930          * by the third argument; 0 means sleep forever).  This ensures
    931          * that the mutex is in state 2 when we go to sleep on it, which
    932          * guarantees a wake-up call.
    933          */
    934         int  wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
    935 
    936         while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
    937             __futex_syscall4(&mutex->value, wait_op, shared|2, 0);
    938     }
    939 }
    940 
    941 /*
    942  * Release a non-recursive mutex.  The caller is responsible for determining
    943  * that we are in fact the owner of this lock.
    944  */
    945 static __inline__ void
    946 _normal_unlock(pthread_mutex_t*  mutex)
    947 {
    948     /* We need to preserve the shared flag during operations */
    949     int  shared = mutex->value & MUTEX_SHARED_MASK;
    950 
    951     /*
    952      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
    953      * to release the lock.  __atomic_dec() returns the previous value;
    954      * if it wasn't 1 we have to do some additional work.
    955      */
    956     if (__atomic_dec(&mutex->value) != (shared|1)) {
    957         int  wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
    958         /*
    959          * Start by releasing the lock.  The decrement changed it from
    960          * "contended lock" to "uncontended lock", which means we still
    961          * hold it, and anybody who tries to sneak in will push it back
    962          * to state 2.
    963          *
    964          * Once we set it to zero the lock is up for grabs.  We follow
    965          * this with a __futex_wake() to ensure that one of the waiting
    966          * threads has a chance to grab it.
    967          *
    968          * This doesn't cause a race with the swap/wait pair in
    969          * _normal_lock(), because the __futex_wait() call there will
    970          * return immediately if the mutex value isn't 2.
    971          */
    972         mutex->value = shared;
    973 
    974         /*
    975          * Wake up one waiting thread.  We don't know which thread will be
    976          * woken or when it'll start executing -- futexes make no guarantees
    977          * here.  There may not even be a thread waiting.
    978          *
    979          * The newly-woken thread will replace the 0 we just set above
    980          * with 2, which means that when it eventually releases the mutex
    981          * it will also call FUTEX_WAKE.  This results in one extra wake
    982          * call whenever a lock is contended, but lets us avoid forgetting
    983          * anyone without requiring us to track the number of sleepers.
    984          *
    985          * It's possible for another thread to sneak in and grab the lock
    986          * between the zero assignment above and the wake call below.  If
    987          * the new thread is "slow" and holds the lock for a while, we'll
    988          * wake up a sleeper, which will swap in a 2 and then go back to
    989          * sleep since the lock is still held.  If the new thread is "fast",
    990          * running to completion before we call wake, the thread we
    991          * eventually wake will find an unlocked mutex and will execute.
    992          * Either way we have correct behavior and nobody is orphaned on
    993          * the wait queue.
    994          */
    995         __futex_syscall3(&mutex->value, wake_op, 1);
    996     }
    997 }
    998 
    999 static pthread_mutex_t  __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
   1000 
   1001 static void
   1002 _recursive_lock(void)
   1003 {
   1004     _normal_lock(&__recursive_lock);
   1005 }
   1006 
   1007 static void
   1008 _recursive_unlock(void)
   1009 {
   1010     _normal_unlock(&__recursive_lock );
   1011 }
   1012 
   1013 int pthread_mutex_lock(pthread_mutex_t *mutex)
   1014 {
   1015     int mtype, tid, new_lock_type, shared, wait_op;
   1016 
   1017     if (__unlikely(mutex == NULL))
   1018         return EINVAL;
   1019 
   1020     mtype = (mutex->value & MUTEX_TYPE_MASK);
   1021     shared = (mutex->value & MUTEX_SHARED_MASK);
   1022 
   1023     /* Handle normal case first */
   1024     if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
   1025         _normal_lock(mutex);
   1026         return 0;
   1027     }
   1028 
   1029     /* Do we already own this recursive or error-check mutex ? */
   1030     tid = __get_thread()->kernel_id;
   1031     if ( tid == MUTEX_OWNER(mutex) )
   1032     {
   1033         int  oldv, counter;
   1034 
   1035         if (mtype == MUTEX_TYPE_ERRORCHECK) {
   1036             /* trying to re-lock a mutex we already acquired */
   1037             return EDEADLK;
   1038         }
   1039         /*
   1040          * We own the mutex, but other threads are able to change
   1041          * the contents (e.g. promoting it to "contended"), so we
   1042          * need to hold the global lock.
   1043          */
   1044         _recursive_lock();
   1045         oldv         = mutex->value;
   1046         counter      = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
   1047         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
   1048         _recursive_unlock();
   1049         return 0;
   1050     }
   1051 
   1052     /* We don't own the mutex, so try to get it.
   1053      *
   1054      * First, we try to change its state from 0 to 1, if this
   1055      * doesn't work, try to change it to state 2.
   1056      */
   1057     new_lock_type = 1;
   1058 
   1059     /* compute futex wait opcode and restore shared flag in mtype */
   1060     wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
   1061     mtype  |= shared;
   1062 
   1063     for (;;) {
   1064         int  oldv;
   1065 
   1066         _recursive_lock();
   1067         oldv = mutex->value;
   1068         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
   1069             mutex->value = ((tid << 16) | mtype | new_lock_type);
   1070         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
   1071             oldv ^= 3;
   1072             mutex->value = oldv;
   1073         }
   1074         _recursive_unlock();
   1075 
   1076         if (oldv == mtype)
   1077             break;
   1078 
   1079         /*
   1080          * The lock was held, possibly contended by others.  From
   1081          * now on, if we manage to acquire the lock, we have to
   1082          * assume that others are still contending for it so that
   1083          * we'll wake them when we unlock it.
   1084          */
   1085         new_lock_type = 2;
   1086 
   1087         __futex_syscall4(&mutex->value, wait_op, oldv, NULL);
   1088     }
   1089     return 0;
   1090 }
   1091 
   1092 
   1093 int pthread_mutex_unlock(pthread_mutex_t *mutex)
   1094 {
   1095     int mtype, tid, oldv, shared;
   1096 
   1097     if (__unlikely(mutex == NULL))
   1098         return EINVAL;
   1099 
   1100     mtype  = (mutex->value & MUTEX_TYPE_MASK);
   1101     shared = (mutex->value & MUTEX_SHARED_MASK);
   1102 
   1103     /* Handle common case first */
   1104     if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
   1105         _normal_unlock(mutex);
   1106         return 0;
   1107     }
   1108 
   1109     /* Do we already own this recursive or error-check mutex ? */
   1110     tid = __get_thread()->kernel_id;
   1111     if ( tid != MUTEX_OWNER(mutex) )
   1112         return EPERM;
   1113 
   1114     /* We do, decrement counter or release the mutex if it is 0 */
   1115     _recursive_lock();
   1116     oldv = mutex->value;
   1117     if (oldv & MUTEX_COUNTER_MASK) {
   1118         mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
   1119         oldv = 0;
   1120     } else {
   1121         mutex->value = shared | mtype;
   1122     }
   1123     _recursive_unlock();
   1124 
   1125     /* Wake one waiting thread, if any */
   1126     if ((oldv & 3) == 2) {
   1127         int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
   1128         __futex_syscall3(&mutex->value, wake_op, 1);
   1129     }
   1130     return 0;
   1131 }
   1132 
   1133 
   1134 int pthread_mutex_trylock(pthread_mutex_t *mutex)
   1135 {
   1136     int mtype, tid, oldv, shared;
   1137 
   1138     if (__unlikely(mutex == NULL))
   1139         return EINVAL;
   1140 
   1141     mtype  = (mutex->value & MUTEX_TYPE_MASK);
   1142     shared = (mutex->value & MUTEX_SHARED_MASK);
   1143 
   1144     /* Handle common case first */
   1145     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
   1146     {
   1147         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
   1148             return 0;
   1149 
   1150         return EBUSY;
   1151     }
   1152 
   1153     /* Do we already own this recursive or error-check mutex ? */
   1154     tid = __get_thread()->kernel_id;
   1155     if ( tid == MUTEX_OWNER(mutex) )
   1156     {
   1157         int counter;
   1158 
   1159         if (mtype == MUTEX_TYPE_ERRORCHECK) {
   1160             /* already locked by ourselves */
   1161             return EDEADLK;
   1162         }
   1163 
   1164         _recursive_lock();
   1165         oldv = mutex->value;
   1166         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
   1167         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
   1168         _recursive_unlock();
   1169         return 0;
   1170     }
   1171 
   1172     /* Restore sharing bit in mtype */
   1173     mtype |= shared;
   1174 
   1175     /* Try to lock it, just once. */
   1176     _recursive_lock();
   1177     oldv = mutex->value;
   1178     if (oldv == mtype)  /* uncontended released lock => state 1 */
   1179         mutex->value = ((tid << 16) | mtype | 1);
   1180     _recursive_unlock();
   1181 
   1182     if (oldv != mtype)
   1183         return EBUSY;
   1184 
   1185     return 0;
   1186 }
   1187 
   1188 
   1189 /* initialize 'ts' with the difference between 'abstime' and the current time
   1190  * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
   1191  */
   1192 static int
   1193 __timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
   1194 {
   1195     clock_gettime(clock, ts);
   1196     ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
   1197     ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
   1198     if (ts->tv_nsec < 0) {
   1199         ts->tv_sec--;
   1200         ts->tv_nsec += 1000000000;
   1201     }
   1202     if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
   1203         return -1;
   1204 
   1205     return 0;
   1206 }
   1207 
   1208 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
   1209  * milliseconds.
   1210  */
   1211 static void
   1212 __timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
   1213 {
   1214     clock_gettime(clock, abstime);
   1215     abstime->tv_sec  += msecs/1000;
   1216     abstime->tv_nsec += (msecs%1000)*1000000;
   1217     if (abstime->tv_nsec >= 1000000000) {
   1218         abstime->tv_sec++;
   1219         abstime->tv_nsec -= 1000000000;
   1220     }
   1221 }
   1222 
   1223 int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
   1224 {
   1225     clockid_t        clock = CLOCK_MONOTONIC;
   1226     struct timespec  abstime;
   1227     struct timespec  ts;
   1228     int              mtype, tid, oldv, new_lock_type, shared, wait_op;
   1229 
   1230     /* compute absolute expiration time */
   1231     __timespec_to_relative_msec(&abstime, msecs, clock);
   1232 
   1233     if (__unlikely(mutex == NULL))
   1234         return EINVAL;
   1235 
   1236     mtype  = (mutex->value & MUTEX_TYPE_MASK);
   1237     shared = (mutex->value & MUTEX_SHARED_MASK);
   1238 
   1239     /* Handle common case first */
   1240     if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
   1241     {
   1242         int  wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
   1243 
   1244         /* fast path for unconteded lock */
   1245         if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
   1246             return 0;
   1247 
   1248         /* loop while needed */
   1249         while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
   1250             if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
   1251                 return EBUSY;
   1252 
   1253             __futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
   1254         }
   1255         return 0;
   1256     }
   1257 
   1258     /* Do we already own this recursive or error-check mutex ? */
   1259     tid = __get_thread()->kernel_id;
   1260     if ( tid == MUTEX_OWNER(mutex) )
   1261     {
   1262         int  oldv, counter;
   1263 
   1264         if (mtype == MUTEX_TYPE_ERRORCHECK) {
   1265             /* already locked by ourselves */
   1266             return EDEADLK;
   1267         }
   1268 
   1269         _recursive_lock();
   1270         oldv = mutex->value;
   1271         counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
   1272         mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
   1273         _recursive_unlock();
   1274         return 0;
   1275     }
   1276 
   1277     /* We don't own the mutex, so try to get it.
   1278      *
   1279      * First, we try to change its state from 0 to 1, if this
   1280      * doesn't work, try to change it to state 2.
   1281      */
   1282     new_lock_type = 1;
   1283 
   1284     /* Compute wait op and restore sharing bit in mtype */
   1285     wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
   1286     mtype  |= shared;
   1287 
   1288     for (;;) {
   1289         int  oldv;
   1290         struct timespec  ts;
   1291 
   1292         _recursive_lock();
   1293         oldv = mutex->value;
   1294         if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
   1295             mutex->value = ((tid << 16) | mtype | new_lock_type);
   1296         } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
   1297             oldv ^= 3;
   1298             mutex->value = oldv;
   1299         }
   1300         _recursive_unlock();
   1301 
   1302         if (oldv == mtype)
   1303             break;
   1304 
   1305         /*
   1306          * The lock was held, possibly contended by others.  From
   1307          * now on, if we manage to acquire the lock, we have to
   1308          * assume that others are still contending for it so that
   1309          * we'll wake them when we unlock it.
   1310          */
   1311         new_lock_type = 2;
   1312 
   1313         if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
   1314             return EBUSY;
   1315 
   1316         __futex_syscall4(&mutex->value, wait_op, oldv, &ts);
   1317     }
   1318     return 0;
   1319 }
   1320 
   1321 int pthread_condattr_init(pthread_condattr_t *attr)
   1322 {
   1323     if (attr == NULL)
   1324         return EINVAL;
   1325 
   1326     *attr = PTHREAD_PROCESS_PRIVATE;
   1327     return 0;
   1328 }
   1329 
   1330 int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
   1331 {
   1332     if (attr == NULL || pshared == NULL)
   1333         return EINVAL;
   1334 
   1335     *pshared = *attr;
   1336     return 0;
   1337 }
   1338 
   1339 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
   1340 {
   1341     if (attr == NULL)
   1342         return EINVAL;
   1343 
   1344     if (pshared != PTHREAD_PROCESS_SHARED &&
   1345         pshared != PTHREAD_PROCESS_PRIVATE)
   1346         return EINVAL;
   1347 
   1348     *attr = pshared;
   1349     return 0;
   1350 }
   1351 
   1352 int pthread_condattr_destroy(pthread_condattr_t *attr)
   1353 {
   1354     if (attr == NULL)
   1355         return EINVAL;
   1356 
   1357     *attr = 0xdeada11d;
   1358     return 0;
   1359 }
   1360 
   1361 /* We use one bit in condition variable values as the 'shared' flag
   1362  * The rest is a counter.
   1363  */
   1364 #define COND_SHARED_MASK        0x0001
   1365 #define COND_COUNTER_INCREMENT  0x0002
   1366 #define COND_COUNTER_MASK       (~COND_SHARED_MASK)
   1367 
   1368 #define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
   1369 
   1370 /* XXX *technically* there is a race condition that could allow
   1371  * XXX a signal to be missed.  If thread A is preempted in _wait()
   1372  * XXX after unlocking the mutex and before waiting, and if other
   1373  * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
   1374  * XXX before thread A is scheduled again and calls futex_wait(),
   1375  * XXX then the signal will be lost.
   1376  */
   1377 
   1378 int pthread_cond_init(pthread_cond_t *cond,
   1379                       const pthread_condattr_t *attr)
   1380 {
   1381     if (cond == NULL)
   1382         return EINVAL;
   1383 
   1384     cond->value = 0;
   1385 
   1386     if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
   1387         cond->value |= COND_SHARED_MASK;
   1388 
   1389     return 0;
   1390 }
   1391 
   1392 int pthread_cond_destroy(pthread_cond_t *cond)
   1393 {
   1394     if (cond == NULL)
   1395         return EINVAL;
   1396 
   1397     cond->value = 0xdeadc04d;
   1398     return 0;
   1399 }
   1400 
   1401 /* This function is used by pthread_cond_broadcast and
   1402  * pthread_cond_signal to atomically decrement the counter
   1403  * then wake-up 'counter' threads.
   1404  */
   1405 static int
   1406 __pthread_cond_pulse(pthread_cond_t *cond, int  counter)
   1407 {
   1408     long flags;
   1409     int  wake_op;
   1410 
   1411     if (__unlikely(cond == NULL))
   1412         return EINVAL;
   1413 
   1414     flags = (cond->value & ~COND_COUNTER_MASK);
   1415     for (;;) {
   1416         long oldval = cond->value;
   1417         long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
   1418                       | flags;
   1419         if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
   1420             break;
   1421     }
   1422 
   1423     wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
   1424     __futex_syscall3(&cond->value, wake_op, counter);
   1425     return 0;
   1426 }
   1427 
   1428 int pthread_cond_broadcast(pthread_cond_t *cond)
   1429 {
   1430     return __pthread_cond_pulse(cond, INT_MAX);
   1431 }
   1432 
   1433 int pthread_cond_signal(pthread_cond_t *cond)
   1434 {
   1435     return __pthread_cond_pulse(cond, 1);
   1436 }
   1437 
   1438 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
   1439 {
   1440     return pthread_cond_timedwait(cond, mutex, NULL);
   1441 }
   1442 
   1443 int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
   1444                                       pthread_mutex_t * mutex,
   1445                                       const struct timespec *reltime)
   1446 {
   1447     int  status;
   1448     int  oldvalue = cond->value;
   1449     int  wait_op  = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
   1450 
   1451     pthread_mutex_unlock(mutex);
   1452     status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime);
   1453     pthread_mutex_lock(mutex);
   1454 
   1455     if (status == (-ETIMEDOUT)) return ETIMEDOUT;
   1456     return 0;
   1457 }
   1458 
   1459 int __pthread_cond_timedwait(pthread_cond_t *cond,
   1460                              pthread_mutex_t * mutex,
   1461                              const struct timespec *abstime,
   1462                              clockid_t clock)
   1463 {
   1464     struct timespec ts;
   1465     struct timespec * tsp;
   1466 
   1467     if (abstime != NULL) {
   1468         if (__timespec_to_absolute(&ts, abstime, clock) < 0)
   1469             return ETIMEDOUT;
   1470         tsp = &ts;
   1471     } else {
   1472         tsp = NULL;
   1473     }
   1474 
   1475     return __pthread_cond_timedwait_relative(cond, mutex, tsp);
   1476 }
   1477 
   1478 int pthread_cond_timedwait(pthread_cond_t *cond,
   1479                            pthread_mutex_t * mutex,
   1480                            const struct timespec *abstime)
   1481 {
   1482     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
   1483 }
   1484 
   1485 
   1486 /* this one exists only for backward binary compatibility */
   1487 int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
   1488                                      pthread_mutex_t * mutex,
   1489                                      const struct timespec *abstime)
   1490 {
   1491     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
   1492 }
   1493 
   1494 int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
   1495                                      pthread_mutex_t * mutex,
   1496                                      const struct timespec *abstime)
   1497 {
   1498     return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
   1499 }
   1500 
   1501 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
   1502                                       pthread_mutex_t * mutex,
   1503                                       const struct timespec *reltime)
   1504 {
   1505     return __pthread_cond_timedwait_relative(cond, mutex, reltime);
   1506 }
   1507 
   1508 int pthread_cond_timeout_np(pthread_cond_t *cond,
   1509                             pthread_mutex_t * mutex,
   1510                             unsigned msecs)
   1511 {
   1512     struct timespec ts;
   1513 
   1514     ts.tv_sec = msecs / 1000;
   1515     ts.tv_nsec = (msecs % 1000) * 1000000;
   1516 
   1517     return __pthread_cond_timedwait_relative(cond, mutex, &ts);
   1518 }
   1519 
   1520 
   1521 
   1522 /* A technical note regarding our thread-local-storage (TLS) implementation:
   1523  *
   1524  * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
   1525  * though the first TLSMAP_START keys are reserved for Bionic to hold
   1526  * special thread-specific variables like errno or a pointer to
   1527  * the current thread's descriptor.
   1528  *
   1529  * while stored in the TLS area, these entries cannot be accessed through
   1530  * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
   1531  *
   1532  * also, some entries in the key table are pre-allocated (see tlsmap_lock)
   1533  * to greatly simplify and speedup some OpenGL-related operations. though the
   1534  * initialy value will be NULL on all threads.
   1535  *
   1536  * you can use pthread_getspecific()/setspecific() on these, and in theory
   1537  * you could also call pthread_key_delete() as well, though this would
   1538  * probably break some apps.
   1539  *
   1540  * The 'tlsmap_t' type defined below implements a shared global map of
   1541  * currently created/allocated TLS keys and the destructors associated
   1542  * with them. You should use tlsmap_lock/unlock to access it to avoid
   1543  * any race condition.
   1544  *
   1545  * the global TLS map simply contains a bitmap of allocated keys, and
   1546  * an array of destructors.
   1547  *
   1548  * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
   1549  * pointers. the TLS area of the main thread is stack-allocated in
   1550  * __libc_init_common, while the TLS area of other threads is placed at
   1551  * the top of their stack in pthread_create.
   1552  *
   1553  * when pthread_key_create() is called, it finds the first free key in the
   1554  * bitmap, then set it to 1, saving the destructor altogether
   1555  *
   1556  * when pthread_key_delete() is called. it will erase the key's bitmap bit
   1557  * and its destructor, and will also clear the key data in the TLS area of
   1558  * all created threads. As mandated by Posix, it is the responsability of
   1559  * the caller of pthread_key_delete() to properly reclaim the objects that
   1560  * were pointed to by these data fields (either before or after the call).
   1561  *
   1562  */
   1563 
   1564 /* TLS Map implementation
   1565  */
   1566 
   1567 #define TLSMAP_START      (TLS_SLOT_MAX_WELL_KNOWN+1)
   1568 #define TLSMAP_SIZE       BIONIC_TLS_SLOTS
   1569 #define TLSMAP_BITS       32
   1570 #define TLSMAP_WORDS      ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
   1571 #define TLSMAP_WORD(m,k)  (m)->map[(k)/TLSMAP_BITS]
   1572 #define TLSMAP_MASK(k)    (1U << ((k)&(TLSMAP_BITS-1)))
   1573 
   1574 /* this macro is used to quickly check that a key belongs to a reasonable range */
   1575 #define TLSMAP_VALIDATE_KEY(key)  \
   1576     ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
   1577 
   1578 /* the type of tls key destructor functions */
   1579 typedef void (*tls_dtor_t)(void*);
   1580 
   1581 typedef struct {
   1582     int         init;                  /* see comment in tlsmap_lock() */
   1583     uint32_t    map[TLSMAP_WORDS];     /* bitmap of allocated keys */
   1584     tls_dtor_t  dtors[TLSMAP_SIZE];    /* key destructors */
   1585 } tlsmap_t;
   1586 
   1587 static pthread_mutex_t  _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
   1588 static tlsmap_t         _tlsmap;
   1589 
   1590 /* lock the global TLS map lock and return a handle to it */
   1591 static __inline__ tlsmap_t* tlsmap_lock(void)
   1592 {
   1593     tlsmap_t*   m = &_tlsmap;
   1594 
   1595     pthread_mutex_lock(&_tlsmap_lock);
   1596     /* we need to initialize the first entry of the 'map' array
   1597      * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
   1598      * when declaring _tlsmap is a bit awkward and is going to
   1599      * produce warnings, so do it the first time we use the map
   1600      * instead
   1601      */
   1602     if (__unlikely(!m->init)) {
   1603         TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
   1604         m->init          = 1;
   1605     }
   1606     return m;
   1607 }
   1608 
   1609 /* unlock the global TLS map */
   1610 static __inline__ void tlsmap_unlock(tlsmap_t*  m)
   1611 {
   1612     pthread_mutex_unlock(&_tlsmap_lock);
   1613     (void)m;  /* a good compiler is a happy compiler */
   1614 }
   1615 
   1616 /* test to see wether a key is allocated */
   1617 static __inline__ int tlsmap_test(tlsmap_t*  m, int  key)
   1618 {
   1619     return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
   1620 }
   1621 
   1622 /* set the destructor and bit flag on a newly allocated key */
   1623 static __inline__ void tlsmap_set(tlsmap_t*  m, int  key, tls_dtor_t  dtor)
   1624 {
   1625     TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
   1626     m->dtors[key]       = dtor;
   1627 }
   1628 
   1629 /* clear the destructor and bit flag on an existing key */
   1630 static __inline__ void  tlsmap_clear(tlsmap_t*  m, int  key)
   1631 {
   1632     TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
   1633     m->dtors[key]       = NULL;
   1634 }
   1635 
   1636 /* allocate a new TLS key, return -1 if no room left */
   1637 static int tlsmap_alloc(tlsmap_t*  m, tls_dtor_t  dtor)
   1638 {
   1639     int  key;
   1640 
   1641     for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
   1642         if ( !tlsmap_test(m, key) ) {
   1643             tlsmap_set(m, key, dtor);
   1644             return key;
   1645         }
   1646     }
   1647     return -1;
   1648 }
   1649 
   1650 
   1651 int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
   1652 {
   1653     uint32_t   err = ENOMEM;
   1654     tlsmap_t*  map = tlsmap_lock();
   1655     int        k   = tlsmap_alloc(map, destructor_function);
   1656 
   1657     if (k >= 0) {
   1658         *key = k;
   1659         err  = 0;
   1660     }
   1661     tlsmap_unlock(map);
   1662     return err;
   1663 }
   1664 
   1665 
   1666 /* This deletes a pthread_key_t. note that the standard mandates that this does
   1667  * not call the destructor of non-NULL key values. Instead, it is the
   1668  * responsability of the caller to properly dispose of the corresponding data
   1669  * and resources, using any mean it finds suitable.
   1670  *
   1671  * On the other hand, this function will clear the corresponding key data
   1672  * values in all known threads. this prevents later (invalid) calls to
   1673  * pthread_getspecific() to receive invalid/stale values.
   1674  */
   1675 int pthread_key_delete(pthread_key_t key)
   1676 {
   1677     uint32_t             err;
   1678     pthread_internal_t*  thr;
   1679     tlsmap_t*            map;
   1680 
   1681     if (!TLSMAP_VALIDATE_KEY(key)) {
   1682         return EINVAL;
   1683     }
   1684 
   1685     map = tlsmap_lock();
   1686 
   1687     if (!tlsmap_test(map, key)) {
   1688         err = EINVAL;
   1689         goto err1;
   1690     }
   1691 
   1692     /* clear value in all threads */
   1693     pthread_mutex_lock(&gThreadListLock);
   1694     for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
   1695         /* avoid zombie threads with a negative 'join_count'. these are really
   1696          * already dead and don't have a TLS area anymore.
   1697          *
   1698          * similarly, it is possible to have thr->tls == NULL for threads that
   1699          * were just recently created through pthread_create() but whose
   1700          * startup trampoline (__thread_entry) hasn't been run yet by the
   1701          * scheduler. so check for this too.
   1702          */
   1703         if (thr->join_count < 0 || !thr->tls)
   1704             continue;
   1705 
   1706         thr->tls[key] = NULL;
   1707     }
   1708     tlsmap_clear(map, key);
   1709 
   1710     pthread_mutex_unlock(&gThreadListLock);
   1711     err = 0;
   1712 
   1713 err1:
   1714     tlsmap_unlock(map);
   1715     return err;
   1716 }
   1717 
   1718 
   1719 int pthread_setspecific(pthread_key_t key, const void *ptr)
   1720 {
   1721     int        err = EINVAL;
   1722     tlsmap_t*  map;
   1723 
   1724     if (TLSMAP_VALIDATE_KEY(key)) {
   1725         /* check that we're trying to set data for an allocated key */
   1726         map = tlsmap_lock();
   1727         if (tlsmap_test(map, key)) {
   1728             ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
   1729             err = 0;
   1730         }
   1731         tlsmap_unlock(map);
   1732     }
   1733     return err;
   1734 }
   1735 
   1736 void * pthread_getspecific(pthread_key_t key)
   1737 {
   1738     if (!TLSMAP_VALIDATE_KEY(key)) {
   1739         return NULL;
   1740     }
   1741 
   1742     /* for performance reason, we do not lock/unlock the global TLS map
   1743      * to check that the key is properly allocated. if the key was not
   1744      * allocated, the value read from the TLS should always be NULL
   1745      * due to pthread_key_delete() clearing the values for all threads.
   1746      */
   1747     return (void *)(((unsigned *)__get_tls())[key]);
   1748 }
   1749 
   1750 /* Posix mandates that this be defined in <limits.h> but we don't have
   1751  * it just yet.
   1752  */
   1753 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
   1754 #  define PTHREAD_DESTRUCTOR_ITERATIONS  4
   1755 #endif
   1756 
   1757 /* this function is called from pthread_exit() to remove all TLS key data
   1758  * from this thread's TLS area. this must call the destructor of all keys
   1759  * that have a non-NULL data value (and a non-NULL destructor).
   1760  *
   1761  * because destructors can do funky things like deleting/creating other
   1762  * keys, we need to implement this in a loop
   1763  */
   1764 static void pthread_key_clean_all(void)
   1765 {
   1766     tlsmap_t*    map;
   1767     void**       tls = (void**)__get_tls();
   1768     int          rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
   1769 
   1770     map = tlsmap_lock();
   1771 
   1772     for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
   1773     {
   1774         int  kk, count = 0;
   1775 
   1776         for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
   1777             if ( tlsmap_test(map, kk) )
   1778             {
   1779                 void*       data = tls[kk];
   1780                 tls_dtor_t  dtor = map->dtors[kk];
   1781 
   1782                 if (data != NULL && dtor != NULL)
   1783                 {
   1784                    /* we need to clear the key data now, this will prevent the
   1785                     * destructor (or a later one) from seeing the old value if
   1786                     * it calls pthread_getspecific() for some odd reason
   1787                     *
   1788                     * we do not do this if 'dtor == NULL' just in case another
   1789                     * destructor function might be responsible for manually
   1790                     * releasing the corresponding data.
   1791                     */
   1792                     tls[kk] = NULL;
   1793 
   1794                    /* because the destructor is free to call pthread_key_create
   1795                     * and/or pthread_key_delete, we need to temporarily unlock
   1796                     * the TLS map
   1797                     */
   1798                     tlsmap_unlock(map);
   1799                     (*dtor)(data);
   1800                     map = tlsmap_lock();
   1801 
   1802                     count += 1;
   1803                 }
   1804             }
   1805         }
   1806 
   1807         /* if we didn't call any destructor, there is no need to check the
   1808          * TLS data again
   1809          */
   1810         if (count == 0)
   1811             break;
   1812     }
   1813     tlsmap_unlock(map);
   1814 }
   1815 
   1816 // man says this should be in <linux/unistd.h>, but it isn't
   1817 extern int tkill(int tid, int sig);
   1818 
   1819 int pthread_kill(pthread_t tid, int sig)
   1820 {
   1821     int  ret;
   1822     int  old_errno = errno;
   1823     pthread_internal_t * thread = (pthread_internal_t *)tid;
   1824 
   1825     ret = tkill(thread->kernel_id, sig);
   1826     if (ret < 0) {
   1827         ret = errno;
   1828         errno = old_errno;
   1829     }
   1830 
   1831     return ret;
   1832 }
   1833 
   1834 extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
   1835 
   1836 int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
   1837 {
   1838     /* pthread_sigmask must return the error code, but the syscall
   1839      * will set errno instead and return 0/-1
   1840      */
   1841     int ret, old_errno = errno;
   1842 
   1843     ret = __rt_sigprocmask(how, set, oset, _NSIG / 8);
   1844     if (ret < 0)
   1845         ret = errno;
   1846 
   1847     errno = old_errno;
   1848     return ret;
   1849 }
   1850 
   1851 
   1852 int pthread_getcpuclockid(pthread_t  tid, clockid_t  *clockid)
   1853 {
   1854     const int            CLOCK_IDTYPE_BITS = 3;
   1855     pthread_internal_t*  thread = (pthread_internal_t*)tid;
   1856 
   1857     if (!thread)
   1858         return ESRCH;
   1859 
   1860     *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
   1861     return 0;
   1862 }
   1863 
   1864 
   1865 /* NOTE: this implementation doesn't support a init function that throws a C++ exception
   1866  *       or calls fork()
   1867  */
   1868 int  pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
   1869 {
   1870     static pthread_mutex_t   once_lock = PTHREAD_MUTEX_INITIALIZER;
   1871 
   1872     if (*once_control == PTHREAD_ONCE_INIT) {
   1873         _normal_lock( &once_lock );
   1874         if (*once_control == PTHREAD_ONCE_INIT) {
   1875             (*init_routine)();
   1876             *once_control = ~PTHREAD_ONCE_INIT;
   1877         }
   1878         _normal_unlock( &once_lock );
   1879     }
   1880     return 0;
   1881 }
   1882