Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "pthread_internal.h"
     30 
     31 #include <errno.h>
     32 #include <stdlib.h>
     33 #include <string.h>
     34 #include <sys/mman.h>
     35 
     36 #include <async_safe/log.h>
     37 
     38 #include "private/bionic_futex.h"
     39 #include "private/bionic_sdk_version.h"
     40 #include "private/bionic_tls.h"
     41 
     42 static pthread_internal_t* g_thread_list = nullptr;
     43 static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
     44 
     45 template <bool write> class ScopedRWLock {
     46  public:
     47   ScopedRWLock(pthread_rwlock_t* rwlock) : rwlock_(rwlock) {
     48     (write ? pthread_rwlock_wrlock : pthread_rwlock_rdlock)(rwlock_);
     49   }
     50 
     51   ~ScopedRWLock() {
     52     pthread_rwlock_unlock(rwlock_);
     53   }
     54 
     55  private:
     56   pthread_rwlock_t* rwlock_;
     57   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedRWLock);
     58 };
     59 
     60 typedef ScopedRWLock<true> ScopedWriteLock;
     61 typedef ScopedRWLock<false> ScopedReadLock;
     62 
     63 pthread_t __pthread_internal_add(pthread_internal_t* thread) {
     64   ScopedWriteLock locker(&g_thread_list_lock);
     65 
     66   // We insert at the head.
     67   thread->next = g_thread_list;
     68   thread->prev = nullptr;
     69   if (thread->next != nullptr) {
     70     thread->next->prev = thread;
     71   }
     72   g_thread_list = thread;
     73   return reinterpret_cast<pthread_t>(thread);
     74 }
     75 
     76 void __pthread_internal_remove(pthread_internal_t* thread) {
     77   ScopedWriteLock locker(&g_thread_list_lock);
     78 
     79   if (thread->next != nullptr) {
     80     thread->next->prev = thread->prev;
     81   }
     82   if (thread->prev != nullptr) {
     83     thread->prev->next = thread->next;
     84   } else {
     85     g_thread_list = thread->next;
     86   }
     87 }
     88 
     89 static void __pthread_internal_free(pthread_internal_t* thread) {
     90   if (thread->mmap_size != 0) {
     91     // Free mapped space, including thread stack and pthread_internal_t.
     92     munmap(thread->attr.stack_base, thread->mmap_size);
     93   }
     94 }
     95 
     96 void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
     97   __pthread_internal_remove(thread);
     98   __pthread_internal_free(thread);
     99 }
    100 
    101 pthread_internal_t* __pthread_internal_find(pthread_t thread_id) {
    102   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
    103 
    104   // Check if we're looking for ourselves before acquiring the lock.
    105   if (thread == __get_thread()) return thread;
    106 
    107   {
    108     // Make sure to release the lock before the abort below. Otherwise,
    109     // some apps might deadlock in their own crash handlers (see b/6565627).
    110     ScopedReadLock locker(&g_thread_list_lock);
    111     for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
    112       if (t == thread) return thread;
    113     }
    114   }
    115 
    116   // Historically we'd return null, but
    117   if (bionic_get_application_target_sdk_version() >= __ANDROID_API_O__) {
    118     if (thread == nullptr) {
    119       // This seems to be a common mistake, and it's relatively harmless because
    120       // there will never be a valid thread at address 0, whereas other invalid
    121       // addresses might sometimes contain threads or things that look enough like
    122       // threads for us to do some real damage by continuing.
    123       // TODO: try getting rid of this when Treble lets us keep vendor blobs on an old API level.
    124       async_safe_format_log(ANDROID_LOG_WARN, "libc", "invalid pthread_t (0) passed to libc");
    125     } else {
    126       async_safe_fatal("invalid pthread_t %p passed to libc", thread);
    127     }
    128   }
    129   return nullptr;
    130 }
    131