Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <pthread.h>
     30 
     31 #include <errno.h>
     32 #include <limits.h>
     33 #include <sys/mman.h>
     34 #include <unistd.h>
     35 
     36 #include "pthread_internal.h"
     37 
     38 #include "private/bionic_atomic_inline.h"
     39 #include "private/bionic_futex.h"
     40 #include "private/bionic_tls.h"
     41 
     42 extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
     43 extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
     44 
     45 /* a mutex is implemented as a 32-bit integer holding the following fields
     46  *
     47  * bits:     name     description
     48  * 31-16     tid      owner thread's tid (recursive and errorcheck only)
     49  * 15-14     type     mutex type
     50  * 13        shared   process-shared flag
     51  * 12-2      counter  counter of recursive mutexes
     52  * 1-0       state    lock state (0, 1 or 2)
     53  */
     54 
     55 /* Convenience macro, creates a mask of 'bits' bits that starts from
     56  * the 'shift'-th least significant bit in a 32-bit word.
     57  *
     58  * Examples: FIELD_MASK(0,4)  -> 0xf
     59  *           FIELD_MASK(16,9) -> 0x1ff0000
     60  */
     61 #define  FIELD_MASK(shift,bits)           (((1 << (bits))-1) << (shift))
     62 
     63 /* This one is used to create a bit pattern from a given field value */
     64 #define  FIELD_TO_BITS(val,shift,bits)    (((val) & ((1 << (bits))-1)) << (shift))
     65 
     66 /* And this one does the opposite, i.e. extract a field's value from a bit pattern */
     67 #define  FIELD_FROM_BITS(val,shift,bits)  (((val) >> (shift)) & ((1 << (bits))-1))
     68 
     69 /* Mutex state:
     70  *
     71  * 0 for unlocked
     72  * 1 for locked, no waiters
     73  * 2 for locked, maybe waiters
     74  */
     75 #define  MUTEX_STATE_SHIFT      0
     76 #define  MUTEX_STATE_LEN        2
     77 
     78 #define  MUTEX_STATE_MASK           FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
     79 #define  MUTEX_STATE_FROM_BITS(v)   FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
     80 #define  MUTEX_STATE_TO_BITS(v)     FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
     81 
     82 #define  MUTEX_STATE_UNLOCKED            0   /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
     83 #define  MUTEX_STATE_LOCKED_UNCONTENDED  1   /* must be 1 due to atomic dec in unlock operation */
     84 #define  MUTEX_STATE_LOCKED_CONTENDED    2   /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
     85 
     86 #define  MUTEX_STATE_FROM_BITS(v)    FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
     87 #define  MUTEX_STATE_TO_BITS(v)      FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
     88 
     89 #define  MUTEX_STATE_BITS_UNLOCKED            MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
     90 #define  MUTEX_STATE_BITS_LOCKED_UNCONTENDED  MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
     91 #define  MUTEX_STATE_BITS_LOCKED_CONTENDED    MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
     92 
     93 /* return true iff the mutex if locked with no waiters */
     94 #define  MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v)  (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
     95 
     96 /* return true iff the mutex if locked with maybe waiters */
     97 #define  MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v)   (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
     98 
     99 /* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
    100 #define  MUTEX_STATE_BITS_FLIP_CONTENTION(v)      ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
    101 
    102 /* Mutex counter:
    103  *
    104  * We need to check for overflow before incrementing, and we also need to
    105  * detect when the counter is 0
    106  */
    107 #define  MUTEX_COUNTER_SHIFT         2
    108 #define  MUTEX_COUNTER_LEN           11
    109 #define  MUTEX_COUNTER_MASK          FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
    110 
    111 #define  MUTEX_COUNTER_BITS_WILL_OVERFLOW(v)    (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
    112 #define  MUTEX_COUNTER_BITS_IS_ZERO(v)          (((v) & MUTEX_COUNTER_MASK) == 0)
    113 
    114 /* Used to increment the counter directly after overflow has been checked */
    115 #define  MUTEX_COUNTER_BITS_ONE      FIELD_TO_BITS(1,MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
    116 
    117 /* Returns true iff the counter is 0 */
    118 #define  MUTEX_COUNTER_BITS_ARE_ZERO(v)  (((v) & MUTEX_COUNTER_MASK) == 0)
    119 
    120 /* Mutex shared bit flag
    121  *
    122  * This flag is set to indicate that the mutex is shared among processes.
    123  * This changes the futex opcode we use for futex wait/wake operations
    124  * (non-shared operations are much faster).
    125  */
    126 #define  MUTEX_SHARED_SHIFT    13
    127 #define  MUTEX_SHARED_MASK     FIELD_MASK(MUTEX_SHARED_SHIFT,1)
    128 
    129 /* Mutex type:
    130  *
    131  * We support normal, recursive and errorcheck mutexes.
    132  *
    133  * The constants defined here *cannot* be changed because they must match
    134  * the C library ABI which defines the following initialization values in
    135  * <pthread.h>:
    136  *
    137  *   __PTHREAD_MUTEX_INIT_VALUE
    138  *   __PTHREAD_RECURSIVE_MUTEX_VALUE
    139  *   __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
    140  */
    141 #define  MUTEX_TYPE_SHIFT      14
    142 #define  MUTEX_TYPE_LEN        2
    143 #define  MUTEX_TYPE_MASK       FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
    144 
    145 #define  MUTEX_TYPE_NORMAL          0  /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
    146 #define  MUTEX_TYPE_RECURSIVE       1
    147 #define  MUTEX_TYPE_ERRORCHECK      2
    148 
    149 #define  MUTEX_TYPE_TO_BITS(t)       FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
    150 
    151 #define  MUTEX_TYPE_BITS_NORMAL      MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
    152 #define  MUTEX_TYPE_BITS_RECURSIVE   MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
    153 #define  MUTEX_TYPE_BITS_ERRORCHECK  MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
    154 
    155 /* Mutex owner field:
    156  *
    157  * This is only used for recursive and errorcheck mutexes. It holds the
    158  * tid of the owning thread. Note that this works because the Linux
    159  * kernel _only_ uses 16-bit values for tids.
    160  *
    161  * More specifically, it will wrap to 10000 when it reaches over 32768 for
    162  * application processes. You can check this by running the following inside
    163  * an adb shell session:
    164  *
    165     OLDPID=$$;
    166     while true; do
    167     NEWPID=$(sh -c 'echo $$')
    168     if [ "$NEWPID" -gt 32768 ]; then
    169         echo "AARGH: new PID $NEWPID is too high!"
    170         exit 1
    171     fi
    172     if [ "$NEWPID" -lt "$OLDPID" ]; then
    173         echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
    174     else
    175         echo -n "$NEWPID!"
    176     fi
    177     OLDPID=$NEWPID
    178     done
    179 
    180  * Note that you can run the same example on a desktop Linux system,
    181  * the wrapping will also happen at 32768, but will go back to 300 instead.
    182  */
    183 #define  MUTEX_OWNER_SHIFT     16
    184 #define  MUTEX_OWNER_LEN       16
    185 
    186 #define  MUTEX_OWNER_FROM_BITS(v)    FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
    187 #define  MUTEX_OWNER_TO_BITS(v)      FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
    188 
    189 /* Convenience macros.
    190  *
    191  * These are used to form or modify the bit pattern of a given mutex value
    192  */
    193 
    194 
    195 
    196 /* a mutex attribute holds the following fields
    197  *
    198  * bits:     name       description
    199  * 0-3       type       type of mutex
    200  * 4         shared     process-shared flag
    201  */
    202 #define  MUTEXATTR_TYPE_MASK   0x000f
    203 #define  MUTEXATTR_SHARED_MASK 0x0010
    204 
    205 
    206 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
    207 {
    208     *attr = PTHREAD_MUTEX_DEFAULT;
    209     return 0;
    210 }
    211 
    212 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    213 {
    214     *attr = -1;
    215     return 0;
    216 }
    217 
    218 int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
    219 {
    220     int type = (*attr & MUTEXATTR_TYPE_MASK);
    221 
    222     if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
    223         return EINVAL;
    224     }
    225 
    226     *type_p = type;
    227     return 0;
    228 }
    229 
    230 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    231 {
    232     if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
    233         return EINVAL;
    234     }
    235 
    236     *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
    237     return 0;
    238 }
    239 
    240 /* process-shared mutexes are not supported at the moment */
    241 
    242 int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
    243 {
    244     switch (pshared) {
    245     case PTHREAD_PROCESS_PRIVATE:
    246         *attr &= ~MUTEXATTR_SHARED_MASK;
    247         return 0;
    248 
    249     case PTHREAD_PROCESS_SHARED:
    250         /* our current implementation of pthread actually supports shared
    251          * mutexes but won't cleanup if a process dies with the mutex held.
    252          * Nevertheless, it's better than nothing. Shared mutexes are used
    253          * by surfaceflinger and audioflinger.
    254          */
    255         *attr |= MUTEXATTR_SHARED_MASK;
    256         return 0;
    257     }
    258     return EINVAL;
    259 }
    260 
    261 int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
    262     *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
    263     return 0;
    264 }
    265 
    266 int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
    267     if (__predict_true(attr == NULL)) {
    268         mutex->value = MUTEX_TYPE_BITS_NORMAL;
    269         return 0;
    270     }
    271 
    272     int value = 0;
    273     if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
    274         value |= MUTEX_SHARED_MASK;
    275     }
    276 
    277     switch (*attr & MUTEXATTR_TYPE_MASK) {
    278     case PTHREAD_MUTEX_NORMAL:
    279         value |= MUTEX_TYPE_BITS_NORMAL;
    280         break;
    281     case PTHREAD_MUTEX_RECURSIVE:
    282         value |= MUTEX_TYPE_BITS_RECURSIVE;
    283         break;
    284     case PTHREAD_MUTEX_ERRORCHECK:
    285         value |= MUTEX_TYPE_BITS_ERRORCHECK;
    286         break;
    287     default:
    288         return EINVAL;
    289     }
    290 
    291     mutex->value = value;
    292     return 0;
    293 }
    294 
    295 
    296 /*
    297  * Lock a non-recursive mutex.
    298  *
    299  * As noted above, there are three states:
    300  *   0 (unlocked, no contention)
    301  *   1 (locked, no contention)
    302  *   2 (locked, contention)
    303  *
    304  * Non-recursive mutexes don't use the thread-id or counter fields, and the
    305  * "type" value is zero, so the only bits that will be set are the ones in
    306  * the lock state field.
    307  */
    308 static inline void _normal_lock(pthread_mutex_t* mutex, int shared) {
    309     /* convenience shortcuts */
    310     const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    311     const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    312     /*
    313      * The common case is an unlocked mutex, so we begin by trying to
    314      * change the lock's state from 0 (UNLOCKED) to 1 (LOCKED).
    315      * __bionic_cmpxchg() returns 0 if it made the swap successfully.
    316      * If the result is nonzero, this lock is already held by another thread.
    317      */
    318     if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) != 0) {
    319         const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
    320         /*
    321          * We want to go to sleep until the mutex is available, which
    322          * requires promoting it to state 2 (CONTENDED). We need to
    323          * swap in the new state value and then wait until somebody wakes us up.
    324          *
    325          * __bionic_swap() returns the previous value.  We swap 2 in and
    326          * see if we got zero back; if so, we have acquired the lock.  If
    327          * not, another thread still holds the lock and we wait again.
    328          *
    329          * The second argument to the __futex_wait() call is compared
    330          * against the current value.  If it doesn't match, __futex_wait()
    331          * returns immediately (otherwise, it sleeps for a time specified
    332          * by the third argument; 0 means sleep forever).  This ensures
    333          * that the mutex is in state 2 when we go to sleep on it, which
    334          * guarantees a wake-up call.
    335          */
    336         while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
    337             __futex_wait_ex(&mutex->value, shared, locked_contended, NULL);
    338         }
    339     }
    340     ANDROID_MEMBAR_FULL();
    341 }
    342 
    343 /*
    344  * Release a non-recursive mutex.  The caller is responsible for determining
    345  * that we are in fact the owner of this lock.
    346  */
    347 static inline void _normal_unlock(pthread_mutex_t* mutex, int shared) {
    348     ANDROID_MEMBAR_FULL();
    349 
    350     /*
    351      * The mutex state will be 1 or (rarely) 2.  We use an atomic decrement
    352      * to release the lock.  __bionic_atomic_dec() returns the previous value;
    353      * if it wasn't 1 we have to do some additional work.
    354      */
    355     if (__bionic_atomic_dec(&mutex->value) != (shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED)) {
    356         /*
    357          * Start by releasing the lock.  The decrement changed it from
    358          * "contended lock" to "uncontended lock", which means we still
    359          * hold it, and anybody who tries to sneak in will push it back
    360          * to state 2.
    361          *
    362          * Once we set it to zero the lock is up for grabs.  We follow
    363          * this with a __futex_wake() to ensure that one of the waiting
    364          * threads has a chance to grab it.
    365          *
    366          * This doesn't cause a race with the swap/wait pair in
    367          * _normal_lock(), because the __futex_wait() call there will
    368          * return immediately if the mutex value isn't 2.
    369          */
    370         mutex->value = shared;
    371 
    372         /*
    373          * Wake up one waiting thread.  We don't know which thread will be
    374          * woken or when it'll start executing -- futexes make no guarantees
    375          * here.  There may not even be a thread waiting.
    376          *
    377          * The newly-woken thread will replace the 0 we just set above
    378          * with 2, which means that when it eventually releases the mutex
    379          * it will also call FUTEX_WAKE.  This results in one extra wake
    380          * call whenever a lock is contended, but lets us avoid forgetting
    381          * anyone without requiring us to track the number of sleepers.
    382          *
    383          * It's possible for another thread to sneak in and grab the lock
    384          * between the zero assignment above and the wake call below.  If
    385          * the new thread is "slow" and holds the lock for a while, we'll
    386          * wake up a sleeper, which will swap in a 2 and then go back to
    387          * sleep since the lock is still held.  If the new thread is "fast",
    388          * running to completion before we call wake, the thread we
    389          * eventually wake will find an unlocked mutex and will execute.
    390          * Either way we have correct behavior and nobody is orphaned on
    391          * the wait queue.
    392          */
    393         __futex_wake_ex(&mutex->value, shared, 1);
    394     }
    395 }
    396 
    397 /* This common inlined function is used to increment the counter of an
    398  * errorcheck or recursive mutex.
    399  *
    400  * For errorcheck mutexes, it will return EDEADLK
    401  * If the counter overflows, it will return EAGAIN
    402  * Otherwise, it atomically increments the counter and returns 0
    403  * after providing an acquire barrier.
    404  *
    405  * mtype is the current mutex type
    406  * mvalue is the current mutex value (already loaded)
    407  * mutex pointers to the mutex.
    408  */
    409 static inline __always_inline int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) {
    410     if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
    411         /* trying to re-lock a mutex we already acquired */
    412         return EDEADLK;
    413     }
    414 
    415     /* Detect recursive lock overflow and return EAGAIN.
    416      * This is safe because only the owner thread can modify the
    417      * counter bits in the mutex value.
    418      */
    419     if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
    420         return EAGAIN;
    421     }
    422 
    423     /* We own the mutex, but other threads are able to change
    424      * the lower bits (e.g. promoting it to "contended"), so we
    425      * need to use an atomic cmpxchg loop to update the counter.
    426      */
    427     for (;;) {
    428         /* increment counter, overflow was already checked */
    429         int newval = mvalue + MUTEX_COUNTER_BITS_ONE;
    430         if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
    431             /* mutex is still locked, not need for a memory barrier */
    432             return 0;
    433         }
    434         /* the value was changed, this happens when another thread changes
    435          * the lower state bits from 1 to 2 to indicate contention. This
    436          * cannot change the counter, so simply reload and try again.
    437          */
    438         mvalue = mutex->value;
    439     }
    440 }
    441 
    442 int pthread_mutex_lock(pthread_mutex_t* mutex) {
    443 #if !defined(__LP64__)
    444     if (mutex == NULL) {
    445         return EINVAL;
    446     }
    447 #endif
    448 
    449     int mvalue, mtype, tid, shared;
    450 
    451     mvalue = mutex->value;
    452     mtype = (mvalue & MUTEX_TYPE_MASK);
    453     shared = (mvalue & MUTEX_SHARED_MASK);
    454 
    455     /* Handle non-recursive case first */
    456     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
    457         _normal_lock(mutex, shared);
    458         return 0;
    459     }
    460 
    461     /* Do we already own this recursive or error-check mutex ? */
    462     tid = __get_thread()->tid;
    463     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
    464         return _recursive_increment(mutex, mvalue, mtype);
    465 
    466     /* Add in shared state to avoid extra 'or' operations below */
    467     mtype |= shared;
    468 
    469     /* First, if the mutex is unlocked, try to quickly acquire it.
    470      * In the optimistic case where this works, set the state to 1 to
    471      * indicate locked with no contention */
    472     if (mvalue == mtype) {
    473         int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    474         if (__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0) {
    475             ANDROID_MEMBAR_FULL();
    476             return 0;
    477         }
    478         /* argh, the value changed, reload before entering the loop */
    479         mvalue = mutex->value;
    480     }
    481 
    482     for (;;) {
    483         int newval;
    484 
    485         /* if the mutex is unlocked, its value should be 'mtype' and
    486          * we try to acquire it by setting its owner and state atomically.
    487          * NOTE: We put the state to 2 since we _know_ there is contention
    488          * when we are in this loop. This ensures all waiters will be
    489          * unlocked.
    490          */
    491         if (mvalue == mtype) {
    492             newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
    493             /* TODO: Change this to __bionic_cmpxchg_acquire when we
    494              *        implement it to get rid of the explicit memory
    495              *        barrier below.
    496              */
    497             if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
    498                 mvalue = mutex->value;
    499                 continue;
    500             }
    501             ANDROID_MEMBAR_FULL();
    502             return 0;
    503         }
    504 
    505         /* the mutex is already locked by another thread, if its state is 1
    506          * we will change it to 2 to indicate contention. */
    507         if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
    508             newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue); /* locked state 1 => state 2 */
    509             if (__predict_false(__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0)) {
    510                 mvalue = mutex->value;
    511                 continue;
    512             }
    513             mvalue = newval;
    514         }
    515 
    516         /* wait until the mutex is unlocked */
    517         __futex_wait_ex(&mutex->value, shared, mvalue, NULL);
    518 
    519         mvalue = mutex->value;
    520     }
    521     /* NOTREACHED */
    522 }
    523 
    524 int pthread_mutex_unlock(pthread_mutex_t* mutex) {
    525 #if !defined(__LP64__)
    526     if (mutex == NULL) {
    527         return EINVAL;
    528     }
    529 #endif
    530 
    531     int mvalue, mtype, tid, shared;
    532 
    533     mvalue = mutex->value;
    534     mtype  = (mvalue & MUTEX_TYPE_MASK);
    535     shared = (mvalue & MUTEX_SHARED_MASK);
    536 
    537     /* Handle common case first */
    538     if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
    539         _normal_unlock(mutex, shared);
    540         return 0;
    541     }
    542 
    543     /* Do we already own this recursive or error-check mutex ? */
    544     tid = __get_thread()->tid;
    545     if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
    546         return EPERM;
    547 
    548     /* If the counter is > 0, we can simply decrement it atomically.
    549      * Since other threads can mutate the lower state bits (and only the
    550      * lower state bits), use a cmpxchg to do it.
    551      */
    552     if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
    553         for (;;) {
    554             int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
    555             if (__predict_true(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
    556                 /* success: we still own the mutex, so no memory barrier */
    557                 return 0;
    558             }
    559             /* the value changed, so reload and loop */
    560             mvalue = mutex->value;
    561         }
    562     }
    563 
    564     /* the counter is 0, so we're going to unlock the mutex by resetting
    565      * its value to 'unlocked'. We need to perform a swap in order
    566      * to read the current state, which will be 2 if there are waiters
    567      * to awake.
    568      *
    569      * TODO: Change this to __bionic_swap_release when we implement it
    570      *        to get rid of the explicit memory barrier below.
    571      */
    572     ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
    573     mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);
    574 
    575     /* Wake one waiting thread, if any */
    576     if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
    577         __futex_wake_ex(&mutex->value, shared, 1);
    578     }
    579     return 0;
    580 }
    581 
    582 int pthread_mutex_trylock(pthread_mutex_t* mutex) {
    583     int mvalue, mtype, tid, shared;
    584 
    585     mvalue = mutex->value;
    586     mtype  = (mvalue & MUTEX_TYPE_MASK);
    587     shared = (mvalue & MUTEX_SHARED_MASK);
    588 
    589     /* Handle common case first */
    590     if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) )
    591     {
    592         if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED,
    593                              shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
    594                              &mutex->value) == 0) {
    595             ANDROID_MEMBAR_FULL();
    596             return 0;
    597         }
    598 
    599         return EBUSY;
    600     }
    601 
    602     /* Do we already own this recursive or error-check mutex ? */
    603     tid = __get_thread()->tid;
    604     if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
    605         return _recursive_increment(mutex, mvalue, mtype);
    606 
    607     /* Same as pthread_mutex_lock, except that we don't want to wait, and
    608      * the only operation that can succeed is a single cmpxchg to acquire the
    609      * lock if it is released / not owned by anyone. No need for a complex loop.
    610      */
    611     mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
    612     mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    613 
    614     if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
    615         ANDROID_MEMBAR_FULL();
    616         return 0;
    617     }
    618 
    619     return EBUSY;
    620 }
    621 
    622 static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout, clockid_t clock) {
    623   timespec ts;
    624 
    625   int mvalue = mutex->value;
    626   int mtype  = (mvalue & MUTEX_TYPE_MASK);
    627   int shared = (mvalue & MUTEX_SHARED_MASK);
    628 
    629   // Handle common case first.
    630   if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
    631     const int unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    632     const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    633     const int locked_contended   = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
    634 
    635     // Fast path for uncontended lock. Note: MUTEX_TYPE_BITS_NORMAL is 0.
    636     if (__bionic_cmpxchg(unlocked, locked_uncontended, &mutex->value) == 0) {
    637       ANDROID_MEMBAR_FULL();
    638       return 0;
    639     }
    640 
    641     // Loop while needed.
    642     while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
    643       if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
    644         return ETIMEDOUT;
    645       }
    646       __futex_wait_ex(&mutex->value, shared, locked_contended, &ts);
    647     }
    648     ANDROID_MEMBAR_FULL();
    649     return 0;
    650   }
    651 
    652   // Do we already own this recursive or error-check mutex?
    653   pid_t tid = __get_thread()->tid;
    654   if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
    655     return _recursive_increment(mutex, mvalue, mtype);
    656   }
    657 
    658   // The following implements the same loop as pthread_mutex_lock_impl
    659   // but adds checks to ensure that the operation never exceeds the
    660   // absolute expiration time.
    661   mtype |= shared;
    662 
    663   // First try a quick lock.
    664   if (mvalue == mtype) {
    665     mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
    666     if (__predict_true(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) {
    667       ANDROID_MEMBAR_FULL();
    668       return 0;
    669     }
    670     mvalue = mutex->value;
    671   }
    672 
    673   while (true) {
    674     // If the value is 'unlocked', try to acquire it directly.
    675     // NOTE: put state to 2 since we know there is contention.
    676     if (mvalue == mtype) { // Unlocked.
    677       mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
    678       if (__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0) {
    679         ANDROID_MEMBAR_FULL();
    680         return 0;
    681       }
    682       // The value changed before we could lock it. We need to check
    683       // the time to avoid livelocks, reload the value, then loop again.
    684       if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
    685         return ETIMEDOUT;
    686       }
    687 
    688       mvalue = mutex->value;
    689       continue;
    690     }
    691 
    692     // The value is locked. If 'uncontended', try to switch its state
    693     // to 'contented' to ensure we get woken up later.
    694     if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
    695       int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
    696       if (__bionic_cmpxchg(mvalue, newval, &mutex->value) != 0) {
    697         // This failed because the value changed, reload it.
    698         mvalue = mutex->value;
    699       } else {
    700         // This succeeded, update mvalue.
    701         mvalue = newval;
    702       }
    703     }
    704 
    705     // Check time and update 'ts'.
    706     if (__timespec_from_absolute(&ts, abs_timeout, clock) < 0) {
    707       return ETIMEDOUT;
    708     }
    709 
    710     // Only wait to be woken up if the state is '2', otherwise we'll
    711     // simply loop right now. This can happen when the second cmpxchg
    712     // in our loop failed because the mutex was unlocked by another thread.
    713     if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
    714       if (__futex_wait_ex(&mutex->value, shared, mvalue, &ts) == -ETIMEDOUT) {
    715         return ETIMEDOUT;
    716       }
    717       mvalue = mutex->value;
    718     }
    719   }
    720   /* NOTREACHED */
    721 }
    722 
    723 #if !defined(__LP64__)
    724 extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
    725   timespec abs_timeout;
    726   clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
    727   abs_timeout.tv_sec  += ms / 1000;
    728   abs_timeout.tv_nsec += (ms % 1000) * 1000000;
    729   if (abs_timeout.tv_nsec >= 1000000000) {
    730     abs_timeout.tv_sec++;
    731     abs_timeout.tv_nsec -= 1000000000;
    732   }
    733 
    734   int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
    735   if (error == ETIMEDOUT) {
    736     error = EBUSY;
    737   }
    738   return error;
    739 }
    740 #endif
    741 
    742 int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
    743   return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
    744 }
    745 
    746 int pthread_mutex_destroy(pthread_mutex_t* mutex) {
    747   // Use trylock to ensure that the mutex is valid and not already locked.
    748   int error = pthread_mutex_trylock(mutex);
    749   if (error != 0) {
    750     return error;
    751   }
    752   mutex->value = 0xdead10cc;
    753   return 0;
    754 }
    755