Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/types.h>
     30 #include <sys/atomics.h>
     31 #include <sys/system_properties.h>
     32 #include <sys/mman.h>
     33 
     34 //#include <dlfcn.h>
     35 #include <stdint.h>
     36 #include <stdio.h>
     37 #include <stdlib.h>
     38 #include <errno.h>
     39 #include <pthread.h>
     40 #include <unwind.h>
     41 #include <unistd.h>
     42 
     43 #include "bionic_tls.h"
     44 #include "debug_mapinfo.h"
     45 #include "debug_stacktrace.h"
     46 #include "libc_logging.h"
     47 
     48 /*
     49  * ===========================================================================
     50  *      Deadlock prediction
     51  * ===========================================================================
     52  */
     53 /*
     54 The idea is to predict the possibility of deadlock by recording the order
     55 in which locks are acquired.  If we see an attempt to acquire a lock
     56 out of order, we can identify the locks and offending code.
     57 
     58 To make this work, we need to keep track of the locks held by each thread,
     59 and create history trees for each lock.  When a thread tries to acquire
     60 a new lock, we walk through the "history children" of the lock, looking
     61 for a match with locks the thread already holds.  If we find a match,
     62 it means the thread has made a request that could result in a deadlock.
     63 
     64 To support recursive locks, we always allow re-locking a currently-held
     65 lock, and maintain a recursion depth count.
     66 
     67 An ASCII-art example, where letters represent locks:
     68 
     69         A
     70        /|\
     71       / | \
     72      B  |  D
     73       \ |
     74        \|
     75         C
     76 
     77 The above is the tree we'd have after handling lock synchronization
     78 sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
     79 a child of B.  (The lines represent pointers between parent and child.
     80 Every node can have multiple parents and multiple children.)
     81 
     82 If we hold AC, and want to lock B, we recursively search through B's
     83 children to see if A or C appears.  It does, so we reject the attempt.
     84 (A straightforward way to implement it: add a link from C to B, then
     85 determine whether the graph starting at B contains a cycle.)
     86 
     87 If we hold AC and want to lock D, we would succeed, creating a new link
     88 from C to D.
     89 
     90 Updates to MutexInfo structs are only allowed for the thread that holds
     91 the lock, so we actually do most of our deadlock prediction work after
     92 the lock has been acquired.
     93 */
     94 
     95 // =============================================================================
     96 // log functions
     97 // =============================================================================
     98 
     99 #define LOGD(format, ...)  \
    100     __libc_format_log(ANDROID_LOG_DEBUG, "pthread_debug", (format), ##__VA_ARGS__ )
    101 
    102 #define LOGW(format, ...)  \
    103     __libc_format_log(ANDROID_LOG_WARN, "pthread_debug", (format), ##__VA_ARGS__ )
    104 
    105 #define LOGE(format, ...)  \
    106     __libc_format_log(ANDROID_LOG_ERROR, "pthread_debug", (format), ##__VA_ARGS__ )
    107 
    108 #define LOGI(format, ...)  \
    109     __libc_format_log(ANDROID_LOG_INFO, "pthread_debug", (format), ##__VA_ARGS__ )
    110 
    111 static const char* const kStartBanner =
    112         "===============================================================";
    113 
    114 static const char* const kEndBanner =
    115         "===============================================================";
    116 
    117 extern const char* __progname;
    118 
    119 #define STACK_TRACE_DEPTH 16
    120 
    121 /****************************************************************************/
    122 
    123 /*
    124  * level <= 0 : deadlock prediction disabled
    125  * level    1 : deadlock prediction enabled, w/o call stacks
    126  * level    2 : deadlock prediction enabled w/ call stacks
    127  */
    128 #define CAPTURE_CALLSTACK 2
    129 static int sPthreadDebugLevel = 0;
    130 static pid_t sPthreadDebugDisabledThread = -1;
    131 static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
    132 
    133 /****************************************************************************/
    134 
    135 /* some simple/lame malloc replacement
    136  * NOT thread-safe and leaks everything
    137  */
    138 
    139 #define DBG_ALLOC_BLOCK_SIZE PAGESIZE
    140 static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
    141 static char* sDbgAllocPtr = NULL;
    142 
    143 template <typename T>
    144 static T* DbgAllocLocked(size_t count = 1) {
    145     size_t size = sizeof(T) * count;
    146     if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
    147         sDbgAllocOffset = 0;
    148         sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
    149                                                     PROT_READ|PROT_WRITE,
    150                                                     MAP_ANON | MAP_PRIVATE, 0, 0));
    151         if (sDbgAllocPtr == MAP_FAILED) {
    152             return NULL;
    153         }
    154     }
    155     void* addr = sDbgAllocPtr + sDbgAllocOffset;
    156     sDbgAllocOffset += size;
    157     return reinterpret_cast<T*>(addr);
    158 }
    159 
    160 static void* debug_realloc(void *ptr, size_t size, size_t old_size) {
    161     void* addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
    162             MAP_ANON | MAP_PRIVATE, 0, 0);
    163     if (addr != MAP_FAILED) {
    164         if (ptr) {
    165             memcpy(addr, ptr, old_size);
    166             munmap(ptr, old_size);
    167         }
    168     } else {
    169         addr = NULL;
    170     }
    171     return addr;
    172 }
    173 
    174 /*****************************************************************************/
    175 
    176 struct MutexInfo;
    177 
    178 typedef struct CallStack {
    179     uintptr_t    depth;
    180     uintptr_t*   addrs;
    181 } CallStack;
    182 
    183 typedef struct MutexInfo* MutexInfoListEntry;
    184 typedef struct CallStack  CallStackListEntry;
    185 
    186 typedef struct GrowingList {
    187     int alloc;
    188     int count;
    189     union {
    190         void*               data;
    191         MutexInfoListEntry* list;
    192         CallStackListEntry* stack;
    193     };
    194 } GrowingList;
    195 
    196 typedef GrowingList MutexInfoList;
    197 typedef GrowingList CallStackList;
    198 
    199 typedef struct MutexInfo {
    200     // thread currently holding the lock or 0
    201     pid_t               owner;
    202 
    203     // most-recently-locked doubly-linked list
    204     struct MutexInfo*   prev;
    205     struct MutexInfo*   next;
    206 
    207     // for reentrant locks
    208     int                 lockCount;
    209     // when looking for loops in the graph, marks visited nodes
    210     int                 historyMark;
    211     // the actual mutex
    212     pthread_mutex_t*    mutex;
    213     // list of locks directly acquired AFTER this one in the same thread
    214     MutexInfoList       children;
    215     // list of locks directly acquired BEFORE this one in the same thread
    216     MutexInfoList       parents;
    217     // list of call stacks when a new link is established to this lock form its parent
    218     CallStackList       stacks;
    219     // call stack when this lock was acquired last
    220     int                 stackDepth;
    221     uintptr_t           stackTrace[STACK_TRACE_DEPTH];
    222 } MutexInfo;
    223 
    224 static void growingListInit(GrowingList* list) {
    225     list->alloc = 0;
    226     list->count = 0;
    227     list->data = NULL;
    228 }
    229 
    230 static void growingListAdd(GrowingList* pList, size_t objSize) {
    231     if (pList->count == pList->alloc) {
    232         size_t oldsize = pList->alloc * objSize;
    233         pList->alloc += PAGESIZE / objSize;
    234         size_t size = pList->alloc * objSize;
    235         pList->data = debug_realloc(pList->data, size, oldsize);
    236     }
    237     pList->count++;
    238 }
    239 
    240 static void initMutexInfo(MutexInfo* object, pthread_mutex_t* mutex) {
    241     object->owner = 0;
    242     object->prev = 0;
    243     object->next = 0;
    244     object->lockCount = 0;
    245     object->historyMark = 0;
    246     object->mutex = mutex;
    247     growingListInit(&object->children);
    248     growingListInit(&object->parents);
    249     growingListInit(&object->stacks);
    250     object->stackDepth = 0;
    251 }
    252 
    253 typedef struct ThreadInfo {
    254     pid_t       pid;
    255     MutexInfo*  mrl;
    256 } ThreadInfo;
    257 
    258 static void initThreadInfo(ThreadInfo* object, pid_t pid) {
    259     object->pid = pid;
    260     object->mrl = NULL;
    261 }
    262 
    263 /****************************************************************************/
    264 
    265 static MutexInfo* get_mutex_info(pthread_mutex_t *mutex);
    266 static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object);
    267 static void mutex_unlock_checked(MutexInfo* object);
    268 
    269 /****************************************************************************/
    270 
    271 extern "C" int pthread_mutex_lock_impl(pthread_mutex_t *mutex);
    272 extern "C" int pthread_mutex_unlock_impl(pthread_mutex_t *mutex);
    273 
    274 static int pthread_mutex_lock_unchecked(pthread_mutex_t *mutex) {
    275     return pthread_mutex_lock_impl(mutex);
    276 }
    277 
    278 static int pthread_mutex_unlock_unchecked(pthread_mutex_t *mutex) {
    279     return pthread_mutex_unlock_impl(mutex);
    280 }
    281 
    282 /****************************************************************************/
    283 
    284 static void dup_backtrace(CallStack* stack, size_t count, uintptr_t const* addrs) {
    285     stack->depth = count;
    286     stack->addrs = DbgAllocLocked<uintptr_t>(count);
    287     memcpy(stack->addrs, addrs, count * sizeof(uintptr_t));
    288 }
    289 
    290 /****************************************************************************/
    291 
    292 static int historyListHas(
    293         const MutexInfoList* list, MutexInfo const * obj) {
    294     int i;
    295     for (i=0; i<list->count; i++) {
    296         if (list->list[i] == obj) {
    297             return i;
    298         }
    299     }
    300     return -1;
    301 }
    302 
    303 static void historyListAdd(MutexInfoList* pList, MutexInfo* obj) {
    304     growingListAdd(pList, sizeof(MutexInfoListEntry));
    305     pList->list[pList->count - 1] = obj;
    306 }
    307 
    308 static int historyListRemove(MutexInfoList* pList, MutexInfo* obj) {
    309     int i;
    310     for (i = pList->count-1; i >= 0; i--) {
    311         if (pList->list[i] == obj) {
    312             break;
    313         }
    314     }
    315     if (i < 0) {
    316         // not found!
    317         return 0;
    318     }
    319 
    320     if (i != pList->count-1) {
    321         // copy the last entry to the new free slot
    322         pList->list[i] = pList->list[pList->count-1];
    323     }
    324     pList->count--;
    325     memset(&pList->list[pList->count], 0, sizeof(MutexInfoListEntry));
    326     return 1;
    327 }
    328 
    329 static void linkParentToChild(MutexInfo* parent, MutexInfo* child) {
    330     historyListAdd(&parent->children, child);
    331     historyListAdd(&child->parents, parent);
    332 }
    333 
    334 static void unlinkParentFromChild(MutexInfo* parent, MutexInfo* child) {
    335     historyListRemove(&parent->children, child);
    336     historyListRemove(&child->parents, parent);
    337 }
    338 
    339 /****************************************************************************/
    340 
    341 static void callstackListAdd(CallStackList* pList,
    342         int count, uintptr_t const* addrs) {
    343     growingListAdd(pList, sizeof(CallStackListEntry));
    344     dup_backtrace(&pList->stack[pList->count - 1], count, addrs);
    345 }
    346 
    347 /****************************************************************************/
    348 
    349 /*
    350  * Recursively traverse the object hierarchy starting at "obj".  We mark
    351  * ourselves on entry and clear the mark on exit.  If we ever encounter
    352  * a marked object, we have a cycle.
    353  *
    354  * Returns "true" if all is well, "false" if we found a cycle.
    355  */
    356 
    357 static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
    358 {
    359     /*
    360      * Have we been here before?
    361      */
    362     if (obj->historyMark) {
    363         int stackDepth;
    364         uintptr_t addrs[STACK_TRACE_DEPTH];
    365 
    366         /* Turn off prediction temporarily in this thread while logging */
    367         sPthreadDebugDisabledThread = gettid();
    368 
    369         backtrace_startup();
    370 
    371         LOGW("%s\n", kStartBanner);
    372         LOGW("pid: %d, tid: %d >>> %s <<<", getpid(), gettid(), __progname);
    373         LOGW("Illegal lock attempt:\n");
    374         LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
    375         stackDepth = get_backtrace(addrs, STACK_TRACE_DEPTH);
    376         log_backtrace(addrs, stackDepth);
    377 
    378         LOGW("+++ Currently held locks in this thread (in reverse order):");
    379         MutexInfo* cur = obj;
    380         pid_t ourtid = gettid();
    381         int i;
    382         for (i=0 ; i<cur->parents.count ; i++) {
    383             MutexInfo* parent = cur->parents.list[i];
    384             if (parent->owner == ourtid) {
    385                 LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
    386                 if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
    387                     log_backtrace(parent->stackTrace, parent->stackDepth);
    388                 }
    389                 cur = parent;
    390                 break;
    391             }
    392         }
    393 
    394         LOGW("+++ Earlier, the following lock order (from last to first) was established\n");
    395         return 0;
    396     }
    397 
    398     obj->historyMark = 1;
    399 
    400     MutexInfoList* pList = &obj->children;
    401     int result = 1;
    402     int i;
    403     for (i = pList->count-1; i >= 0; i--) {
    404         MutexInfo* child = pList->list[i];
    405         if (!traverseTree(child,  obj)) {
    406             LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
    407             if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
    408                 int index = historyListHas(&obj->parents, objParent);
    409                 if ((size_t)index < (size_t)obj->stacks.count) {
    410                     log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
    411                 } else {
    412                     log_backtrace(obj->stackTrace, obj->stackDepth);
    413                 }
    414             }
    415             result = 0;
    416             break;
    417         }
    418     }
    419 
    420     obj->historyMark = 0;
    421     return result;
    422 }
    423 
    424 /****************************************************************************/
    425 
    426 static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
    427 {
    428     pid_t tid = gettid();
    429     if (object->owner == tid) {
    430         object->lockCount++;
    431         return;
    432     }
    433 
    434     object->owner = tid;
    435     object->lockCount = 0;
    436 
    437     if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
    438         // always record the call stack when acquiring a lock.
    439         // it's not efficient, but is useful during diagnostics
    440         object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
    441     }
    442 
    443     // no other locks held in this thread -- no deadlock possible!
    444     if (mrl == NULL)
    445         return;
    446 
    447     // check if the lock we're trying to acquire is a direct descendant of
    448     // the most recently locked mutex in this thread, in which case we're
    449     // in a good situation -- no deadlock possible
    450     if (historyListHas(&mrl->children, object) >= 0)
    451         return;
    452 
    453     pthread_mutex_lock_unchecked(&sDbgLock);
    454 
    455     linkParentToChild(mrl, object);
    456     if (!traverseTree(object, mrl)) {
    457         backtrace_shutdown();
    458         LOGW("%s\n", kEndBanner);
    459         unlinkParentFromChild(mrl, object);
    460         // reenable pthread debugging for this thread
    461         sPthreadDebugDisabledThread = -1;
    462     } else {
    463         // record the call stack for this link
    464         // NOTE: the call stack is added at the same index
    465         // as mrl in object->parents[]
    466         // ie: object->parents.count == object->stacks.count, which is
    467         // also the index.
    468         if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
    469             callstackListAdd(&object->stacks,
    470                     object->stackDepth, object->stackTrace);
    471         }
    472     }
    473 
    474     pthread_mutex_unlock_unchecked(&sDbgLock);
    475 }
    476 
    477 static void mutex_unlock_checked(MutexInfo* object)
    478 {
    479     pid_t tid = gettid();
    480     if (object->owner == tid) {
    481         if (object->lockCount == 0) {
    482             object->owner = 0;
    483         } else {
    484             object->lockCount--;
    485         }
    486     }
    487 }
    488 
    489 
    490 // =============================================================================
    491 // Hash Table functions
    492 // =============================================================================
    493 
    494 /****************************************************************************/
    495 
    496 #define HASHTABLE_SIZE      256
    497 
    498 typedef struct HashEntry HashEntry;
    499 struct HashEntry {
    500     size_t slot;
    501     HashEntry* prev;
    502     HashEntry* next;
    503     void* data;
    504 };
    505 
    506 typedef struct HashTable HashTable;
    507 struct HashTable {
    508     HashEntry* slots[HASHTABLE_SIZE];
    509 };
    510 
    511 static HashTable sMutexMap;
    512 static HashTable sThreadMap;
    513 
    514 /****************************************************************************/
    515 
    516 static uint32_t get_hashcode(void const * key, size_t keySize)
    517 {
    518     uint32_t h = keySize;
    519     char const* data = (char const*)key;
    520     size_t i;
    521     for (i = 0; i < keySize; i++) {
    522         h = h * 31 + *data;
    523         data++;
    524     }
    525     return (uint32_t)h;
    526 }
    527 
    528 static size_t get_index(uint32_t h)
    529 {
    530     // We apply this secondary hashing discovered by Doug Lea to defend
    531     // against bad hashes.
    532     h += ~(h << 9);
    533     h ^= (((unsigned int) h) >> 14);
    534     h += (h << 4);
    535     h ^= (((unsigned int) h) >> 10);
    536     return (size_t)h & (HASHTABLE_SIZE - 1);
    537 }
    538 
    539 /****************************************************************************/
    540 
    541 static void hashmap_init(HashTable* table) {
    542     memset(table, 0, sizeof(HashTable));
    543 }
    544 
    545 static void hashmap_removeEntry(HashTable* table, HashEntry* entry)
    546 {
    547     HashEntry* prev = entry->prev;
    548     HashEntry* next = entry->next;
    549     if (prev != NULL) entry->prev->next = next;
    550     if (next != NULL) entry->next->prev = prev;
    551     if (prev == NULL) {
    552         // we are the head of the list. set the head to be next
    553         table->slots[entry->slot] = entry->next;
    554     }
    555 }
    556 
    557 static HashEntry* hashmap_lookup(HashTable* table,
    558         void const* key, size_t ksize,
    559         int (*equals)(void const* data, void const* key))
    560 {
    561     const uint32_t hash = get_hashcode(key, ksize);
    562     const size_t slot = get_index(hash);
    563 
    564     HashEntry* entry = table->slots[slot];
    565     while (entry) {
    566         if (equals(entry->data, key)) {
    567             break;
    568         }
    569         entry = entry->next;
    570     }
    571 
    572     if (entry == NULL) {
    573         // create a new entry
    574         entry = DbgAllocLocked<HashEntry>();
    575         entry->data = NULL;
    576         entry->slot = slot;
    577         entry->prev = NULL;
    578         entry->next = table->slots[slot];
    579         if (entry->next != NULL) {
    580             entry->next->prev = entry;
    581         }
    582         table->slots[slot] = entry;
    583     }
    584     return entry;
    585 }
    586 
    587 /****************************************************************************/
    588 
    589 static int MutexInfo_equals(void const* data, void const* key) {
    590     return ((MutexInfo const *)data)->mutex == *(pthread_mutex_t **)key;
    591 }
    592 
    593 static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
    594 {
    595     pthread_mutex_lock_unchecked(&sDbgLock);
    596 
    597     HashEntry* entry = hashmap_lookup(&sMutexMap,
    598             &mutex, sizeof(mutex),
    599             &MutexInfo_equals);
    600     if (entry->data == NULL) {
    601         MutexInfo* mutex_info = DbgAllocLocked<MutexInfo>();
    602         entry->data = mutex_info;
    603         initMutexInfo(mutex_info, mutex);
    604     }
    605 
    606     pthread_mutex_unlock_unchecked(&sDbgLock);
    607 
    608     return (MutexInfo *)entry->data;
    609 }
    610 
    611 /****************************************************************************/
    612 
    613 static int ThreadInfo_equals(void const* data, void const* key) {
    614     return ((ThreadInfo const *)data)->pid == *(pid_t *)key;
    615 }
    616 
    617 static ThreadInfo* get_thread_info(pid_t pid)
    618 {
    619     pthread_mutex_lock_unchecked(&sDbgLock);
    620 
    621     HashEntry* entry = hashmap_lookup(&sThreadMap,
    622             &pid, sizeof(pid),
    623             &ThreadInfo_equals);
    624     if (entry->data == NULL) {
    625         ThreadInfo* thread_info = DbgAllocLocked<ThreadInfo>();
    626         entry->data = thread_info;
    627         initThreadInfo(thread_info, pid);
    628     }
    629 
    630     pthread_mutex_unlock_unchecked(&sDbgLock);
    631 
    632     return (ThreadInfo *)entry->data;
    633 }
    634 
    635 static void push_most_recently_locked(MutexInfo* mrl) {
    636     ThreadInfo* tinfo = get_thread_info(gettid());
    637     mrl->next = NULL;
    638     mrl->prev = tinfo->mrl;
    639     tinfo->mrl = mrl;
    640 }
    641 
    642 static void remove_most_recently_locked(MutexInfo* mrl) {
    643     ThreadInfo* tinfo = get_thread_info(gettid());
    644     if (mrl->next) {
    645         (mrl->next)->prev = mrl->prev;
    646     }
    647     if (mrl->prev) {
    648         (mrl->prev)->next = mrl->next;
    649     }
    650     if (tinfo->mrl == mrl) {
    651         tinfo->mrl = mrl->next;
    652     }
    653 }
    654 
    655 static MutexInfo* get_most_recently_locked() {
    656     ThreadInfo* tinfo = get_thread_info(gettid());
    657     return tinfo->mrl;
    658 }
    659 
    660 /****************************************************************************/
    661 
    662 /* pthread_debug_init() is called from libc_init_dynamic() just
    663  * after system properties have been initialized
    664  */
    665 
    666 extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
    667     char env[PROP_VALUE_MAX];
    668     if (__system_property_get("debug.libc.pthread", env)) {
    669         int level = atoi(env);
    670         if (level) {
    671             LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
    672                     level, getpid(), __progname);
    673             hashmap_init(&sMutexMap);
    674             sPthreadDebugLevel = level;
    675         }
    676     }
    677 }
    678 
    679 /*
    680  * See if we were allowed to grab the lock at this time.  We do it
    681  * *after* acquiring the lock, rather than before, so that we can
    682  * freely update the MutexInfo struct.  This seems counter-intuitive,
    683  * but our goal is deadlock *prediction* not deadlock *prevention*.
    684  * (If we actually deadlock, the situation is easy to diagnose from
    685  * a thread dump, so there's no point making a special effort to do
    686  * the checks before the lock is held.)
    687  */
    688 
    689 extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
    690 {
    691     if (sPthreadDebugLevel == 0) return;
    692     // prediction disabled for this thread
    693     if (sPthreadDebugDisabledThread == gettid())
    694         return;
    695     MutexInfo* object = get_mutex_info(mutex);
    696     MutexInfo* mrl = get_most_recently_locked();
    697     mutex_lock_checked(mrl, object);
    698     push_most_recently_locked(object);
    699 }
    700 
    701 /*
    702  * pthread_debug_mutex_unlock_check() must be called with the mutex
    703  * still held (ie: before calling the real unlock)
    704  */
    705 
    706 extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
    707 {
    708     if (sPthreadDebugLevel == 0) return;
    709     // prediction disabled for this thread
    710     if (sPthreadDebugDisabledThread == gettid())
    711         return;
    712     MutexInfo* object = get_mutex_info(mutex);
    713     remove_most_recently_locked(object);
    714     mutex_unlock_checked(object);
    715 }
    716