Home | History | Annotate | Download | only in bionic
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <pthread.h>
     30 
     31 #include <errno.h>
     32 #include <sys/mman.h>
     33 #include <unistd.h>
     34 
     35 #include "pthread_internal.h"
     36 
     37 #include "private/bionic_macros.h"
     38 #include "private/bionic_ssp.h"
     39 #include "private/bionic_tls.h"
     40 #include "private/libc_logging.h"
     41 #include "private/ErrnoRestorer.h"
     42 #include "private/ScopedPthreadMutexLocker.h"
     43 
     44 // x86 uses segment descriptors rather than a direct pointer to TLS.
     45 #if __i386__
     46 #include <asm/ldt.h>
     47 extern "C" __LIBC_HIDDEN__ void __init_user_desc(struct user_desc*, int, void*);
     48 #endif
     49 
     50 extern "C" int __isthreaded;
     51 
     52 // This code is used both by each new pthread and the code that initializes the main thread.
     53 void __init_tls(pthread_internal_t* thread) {
     54   if (thread->user_allocated_stack()) {
     55     // We don't know where the user got their stack, so assume the worst and zero the TLS area.
     56     memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*));
     57   }
     58 
     59   // Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
     60   thread->tls[TLS_SLOT_SELF] = thread->tls;
     61   thread->tls[TLS_SLOT_THREAD_ID] = thread;
     62   // GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
     63   thread->tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
     64 }
     65 
     66 void __init_alternate_signal_stack(pthread_internal_t* thread) {
     67   // Create and set an alternate signal stack.
     68   stack_t ss;
     69   ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
     70   if (ss.ss_sp != MAP_FAILED) {
     71     ss.ss_size = SIGSTKSZ;
     72     ss.ss_flags = 0;
     73     sigaltstack(&ss, NULL);
     74     thread->alternate_signal_stack = ss.ss_sp;
     75   }
     76 }
     77 
     78 int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
     79   int error = 0;
     80 
     81   // Set the scheduling policy/priority of the thread.
     82   if (thread->attr.sched_policy != SCHED_NORMAL) {
     83     sched_param param;
     84     param.sched_priority = thread->attr.sched_priority;
     85     if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
     86 #if __LP64__
     87       // For backwards compatibility reasons, we only report failures on 64-bit devices.
     88       error = errno;
     89 #endif
     90       __libc_format_log(ANDROID_LOG_WARN, "libc",
     91                         "pthread_create sched_setscheduler call failed: %s", strerror(errno));
     92     }
     93   }
     94 
     95   thread->cleanup_stack = NULL;
     96 
     97   if (add_to_thread_list) {
     98     _pthread_internal_add(thread);
     99   }
    100 
    101   return error;
    102 }
    103 
    104 static void* __create_thread_stack(pthread_internal_t* thread) {
    105   // Create a new private anonymous map.
    106   int prot = PROT_READ | PROT_WRITE;
    107   int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
    108   void* stack = mmap(NULL, thread->attr.stack_size, prot, flags, -1, 0);
    109   if (stack == MAP_FAILED) {
    110     __libc_format_log(ANDROID_LOG_WARN,
    111                       "libc",
    112                       "pthread_create failed: couldn't allocate %zd-byte stack: %s",
    113                       thread->attr.stack_size, strerror(errno));
    114     return NULL;
    115   }
    116 
    117   // Set the guard region at the end of the stack to PROT_NONE.
    118   if (mprotect(stack, thread->attr.guard_size, PROT_NONE) == -1) {
    119     __libc_format_log(ANDROID_LOG_WARN, "libc",
    120                       "pthread_create failed: couldn't mprotect PROT_NONE %zd-byte stack guard region: %s",
    121                       thread->attr.guard_size, strerror(errno));
    122     munmap(stack, thread->attr.stack_size);
    123     return NULL;
    124   }
    125 
    126   return stack;
    127 }
    128 
    129 static int __pthread_start(void* arg) {
    130   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(arg);
    131 
    132   // Wait for our creating thread to release us. This lets it have time to
    133   // notify gdb about this thread before we start doing anything.
    134   // This also provides the memory barrier needed to ensure that all memory
    135   // accesses previously made by the creating thread are visible to us.
    136   pthread_mutex_lock(&thread->startup_handshake_mutex);
    137   pthread_mutex_destroy(&thread->startup_handshake_mutex);
    138 
    139   __init_alternate_signal_stack(thread);
    140 
    141   void* result = thread->start_routine(thread->start_routine_arg);
    142   pthread_exit(result);
    143 
    144   return 0;
    145 }
    146 
    147 // A dummy start routine for pthread_create failures where we've created a thread but aren't
    148 // going to run user code on it. We swap out the user's start routine for this and take advantage
    149 // of the regular thread teardown to free up resources.
    150 static void* __do_nothing(void*) {
    151   return NULL;
    152 }
    153 
    154 int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
    155                    void* (*start_routine)(void*), void* arg) {
    156   ErrnoRestorer errno_restorer;
    157 
    158   // Inform the rest of the C library that at least one thread was created.
    159   __isthreaded = 1;
    160 
    161   pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(calloc(sizeof(*thread), 1));
    162   if (thread == NULL) {
    163     __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: couldn't allocate thread");
    164     return EAGAIN;
    165   }
    166 
    167   if (attr == NULL) {
    168     pthread_attr_init(&thread->attr);
    169   } else {
    170     thread->attr = *attr;
    171     attr = NULL; // Prevent misuse below.
    172   }
    173 
    174   // Make sure the stack size and guard size are multiples of PAGE_SIZE.
    175   thread->attr.stack_size = BIONIC_ALIGN(thread->attr.stack_size, PAGE_SIZE);
    176   thread->attr.guard_size = BIONIC_ALIGN(thread->attr.guard_size, PAGE_SIZE);
    177 
    178   if (thread->attr.stack_base == NULL) {
    179     // The caller didn't provide a stack, so allocate one.
    180     thread->attr.stack_base = __create_thread_stack(thread);
    181     if (thread->attr.stack_base == NULL) {
    182       free(thread);
    183       return EAGAIN;
    184     }
    185   } else {
    186     // The caller did provide a stack, so remember we're not supposed to free it.
    187     thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK;
    188   }
    189 
    190   // Make room for the TLS area.
    191   // The child stack is the same address, just growing in the opposite direction.
    192   // At offsets >= 0, we have the TLS slots.
    193   // At offsets < 0, we have the child stack.
    194   thread->tls = reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(thread->attr.stack_base) +
    195                                          thread->attr.stack_size - BIONIC_TLS_SLOTS * sizeof(void*));
    196   void* child_stack = thread->tls;
    197   __init_tls(thread);
    198 
    199   // Create a mutex for the thread in TLS to wait on once it starts so we can keep
    200   // it from doing anything until after we notify the debugger about it
    201   //
    202   // This also provides the memory barrier we need to ensure that all
    203   // memory accesses previously performed by this thread are visible to
    204   // the new thread.
    205   pthread_mutex_init(&thread->startup_handshake_mutex, NULL);
    206   pthread_mutex_lock(&thread->startup_handshake_mutex);
    207 
    208   thread->start_routine = start_routine;
    209   thread->start_routine_arg = arg;
    210 
    211   thread->set_cached_pid(getpid());
    212 
    213   int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM |
    214       CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
    215   void* tls = thread->tls;
    216 #if defined(__i386__)
    217   // On x86 (but not x86-64), CLONE_SETTLS takes a pointer to a struct user_desc rather than
    218   // a pointer to the TLS itself.
    219   user_desc tls_descriptor;
    220   __init_user_desc(&tls_descriptor, false, tls);
    221   tls = &tls_descriptor;
    222 #endif
    223   int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid));
    224   if (rc == -1) {
    225     int clone_errno = errno;
    226     // We don't have to unlock the mutex at all because clone(2) failed so there's no child waiting to
    227     // be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
    228     // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
    229     pthread_mutex_unlock(&thread->startup_handshake_mutex);
    230     if (!thread->user_allocated_stack()) {
    231       munmap(thread->attr.stack_base, thread->attr.stack_size);
    232     }
    233     free(thread);
    234     __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
    235     return clone_errno;
    236   }
    237 
    238   int init_errno = __init_thread(thread, true);
    239   if (init_errno != 0) {
    240     // Mark the thread detached and replace its start_routine with a no-op.
    241     // Letting the thread run is the easiest way to clean up its resources.
    242     thread->attr.flags |= PTHREAD_ATTR_FLAG_DETACHED;
    243     thread->start_routine = __do_nothing;
    244     pthread_mutex_unlock(&thread->startup_handshake_mutex);
    245     return init_errno;
    246   }
    247 
    248   // Publish the pthread_t and unlock the mutex to let the new thread start running.
    249   *thread_out = reinterpret_cast<pthread_t>(thread);
    250   pthread_mutex_unlock(&thread->startup_handshake_mutex);
    251 
    252   return 0;
    253 }
    254