Home | History | Annotate | Download | only in tests
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <gtest/gtest.h>
     18 
     19 #include <errno.h>
     20 #include <inttypes.h>
     21 #include <limits.h>
     22 #include <malloc.h>
     23 #include <pthread.h>
     24 #include <signal.h>
     25 #include <stdio.h>
     26 #include <sys/mman.h>
     27 #include <sys/prctl.h>
     28 #include <sys/syscall.h>
     29 #include <time.h>
     30 #include <unistd.h>
     31 #include <unwind.h>
     32 
     33 #include <atomic>
     34 #include <vector>
     35 
     36 #include <android-base/scopeguard.h>
     37 
     38 #include "private/bionic_constants.h"
     39 #include "private/bionic_macros.h"
     40 #include "BionicDeathTest.h"
     41 #include "ScopedSignalHandler.h"
     42 #include "utils.h"
     43 
     44 TEST(pthread, pthread_key_create) {
     45   pthread_key_t key;
     46   ASSERT_EQ(0, pthread_key_create(&key, NULL));
     47   ASSERT_EQ(0, pthread_key_delete(key));
     48   // Can't delete a key that's already been deleted.
     49   ASSERT_EQ(EINVAL, pthread_key_delete(key));
     50 }
     51 
     52 TEST(pthread, pthread_keys_max) {
     53   // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
     54   ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
     55 }
     56 
     57 TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
     58   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
     59   ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
     60 }
     61 
     62 TEST(pthread, pthread_key_many_distinct) {
     63   // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
     64   // pthread keys, but We should be able to allocate at least this many keys.
     65   int nkeys = PTHREAD_KEYS_MAX / 2;
     66   std::vector<pthread_key_t> keys;
     67 
     68   auto scope_guard = android::base::make_scope_guard([&keys] {
     69     for (const auto& key : keys) {
     70       EXPECT_EQ(0, pthread_key_delete(key));
     71     }
     72   });
     73 
     74   for (int i = 0; i < nkeys; ++i) {
     75     pthread_key_t key;
     76     // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
     77     ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
     78     keys.push_back(key);
     79     ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
     80   }
     81 
     82   for (int i = keys.size() - 1; i >= 0; --i) {
     83     ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
     84     pthread_key_t key = keys.back();
     85     keys.pop_back();
     86     ASSERT_EQ(0, pthread_key_delete(key));
     87   }
     88 }
     89 
     90 TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
     91   std::vector<pthread_key_t> keys;
     92   int rv = 0;
     93 
     94   // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
     95   // be more than we are allowed to allocate now.
     96   for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
     97     pthread_key_t key;
     98     rv = pthread_key_create(&key, NULL);
     99     if (rv == EAGAIN) {
    100       break;
    101     }
    102     EXPECT_EQ(0, rv);
    103     keys.push_back(key);
    104   }
    105 
    106   // Don't leak keys.
    107   for (const auto& key : keys) {
    108     EXPECT_EQ(0, pthread_key_delete(key));
    109   }
    110   keys.clear();
    111 
    112   // We should have eventually reached the maximum number of keys and received
    113   // EAGAIN.
    114   ASSERT_EQ(EAGAIN, rv);
    115 }
    116 
    117 TEST(pthread, pthread_key_delete) {
    118   void* expected = reinterpret_cast<void*>(1234);
    119   pthread_key_t key;
    120   ASSERT_EQ(0, pthread_key_create(&key, NULL));
    121   ASSERT_EQ(0, pthread_setspecific(key, expected));
    122   ASSERT_EQ(expected, pthread_getspecific(key));
    123   ASSERT_EQ(0, pthread_key_delete(key));
    124   // After deletion, pthread_getspecific returns NULL.
    125   ASSERT_EQ(NULL, pthread_getspecific(key));
    126   // And you can't use pthread_setspecific with the deleted key.
    127   ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
    128 }
    129 
    130 TEST(pthread, pthread_key_fork) {
    131   void* expected = reinterpret_cast<void*>(1234);
    132   pthread_key_t key;
    133   ASSERT_EQ(0, pthread_key_create(&key, NULL));
    134   ASSERT_EQ(0, pthread_setspecific(key, expected));
    135   ASSERT_EQ(expected, pthread_getspecific(key));
    136 
    137   pid_t pid = fork();
    138   ASSERT_NE(-1, pid) << strerror(errno);
    139 
    140   if (pid == 0) {
    141     // The surviving thread inherits all the forking thread's TLS values...
    142     ASSERT_EQ(expected, pthread_getspecific(key));
    143     _exit(99);
    144   }
    145 
    146   AssertChildExited(pid, 99);
    147 
    148   ASSERT_EQ(expected, pthread_getspecific(key));
    149   ASSERT_EQ(0, pthread_key_delete(key));
    150 }
    151 
    152 static void* DirtyKeyFn(void* key) {
    153   return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
    154 }
    155 
    156 TEST(pthread, pthread_key_dirty) {
    157   pthread_key_t key;
    158   ASSERT_EQ(0, pthread_key_create(&key, NULL));
    159 
    160   size_t stack_size = 640 * 1024;
    161   void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
    162   ASSERT_NE(MAP_FAILED, stack);
    163   memset(stack, 0xff, stack_size);
    164 
    165   pthread_attr_t attr;
    166   ASSERT_EQ(0, pthread_attr_init(&attr));
    167   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
    168 
    169   pthread_t t;
    170   ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
    171 
    172   void* result;
    173   ASSERT_EQ(0, pthread_join(t, &result));
    174   ASSERT_EQ(nullptr, result); // Not ~0!
    175 
    176   ASSERT_EQ(0, munmap(stack, stack_size));
    177   ASSERT_EQ(0, pthread_key_delete(key));
    178 }
    179 
    180 TEST(pthread, static_pthread_key_used_before_creation) {
    181 #if defined(__BIONIC__)
    182   // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
    183   // So here tests if the static/global default value 0 can be detected as invalid key.
    184   static pthread_key_t key;
    185   ASSERT_EQ(nullptr, pthread_getspecific(key));
    186   ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
    187   ASSERT_EQ(EINVAL, pthread_key_delete(key));
    188 #else
    189   GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
    190 #endif
    191 }
    192 
    193 static void* IdFn(void* arg) {
    194   return arg;
    195 }
    196 
    197 class SpinFunctionHelper {
    198  public:
    199   SpinFunctionHelper() {
    200     SpinFunctionHelper::spin_flag_ = true;
    201   }
    202   ~SpinFunctionHelper() {
    203     UnSpin();
    204   }
    205   auto GetFunction() -> void* (*)(void*) {
    206     return SpinFunctionHelper::SpinFn;
    207   }
    208 
    209   void UnSpin() {
    210     SpinFunctionHelper::spin_flag_ = false;
    211   }
    212 
    213  private:
    214   static void* SpinFn(void*) {
    215     while (spin_flag_) {}
    216     return NULL;
    217   }
    218   static std::atomic<bool> spin_flag_;
    219 };
    220 
    221 // It doesn't matter if spin_flag_ is used in several tests,
    222 // because it is always set to false after each test. Each thread
    223 // loops on spin_flag_ can find it becomes false at some time.
    224 std::atomic<bool> SpinFunctionHelper::spin_flag_;
    225 
    226 static void* JoinFn(void* arg) {
    227   return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
    228 }
    229 
    230 static void AssertDetached(pthread_t t, bool is_detached) {
    231   pthread_attr_t attr;
    232   ASSERT_EQ(0, pthread_getattr_np(t, &attr));
    233   int detach_state;
    234   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
    235   pthread_attr_destroy(&attr);
    236   ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
    237 }
    238 
    239 static void MakeDeadThread(pthread_t& t) {
    240   ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
    241   ASSERT_EQ(0, pthread_join(t, NULL));
    242 }
    243 
    244 TEST(pthread, pthread_create) {
    245   void* expected_result = reinterpret_cast<void*>(123);
    246   // Can we create a thread?
    247   pthread_t t;
    248   ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
    249   // If we join, do we get the expected value back?
    250   void* result;
    251   ASSERT_EQ(0, pthread_join(t, &result));
    252   ASSERT_EQ(expected_result, result);
    253 }
    254 
    255 TEST(pthread, pthread_create_EAGAIN) {
    256   pthread_attr_t attributes;
    257   ASSERT_EQ(0, pthread_attr_init(&attributes));
    258   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
    259 
    260   pthread_t t;
    261   ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
    262 }
    263 
    264 TEST(pthread, pthread_no_join_after_detach) {
    265   SpinFunctionHelper spin_helper;
    266 
    267   pthread_t t1;
    268   ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL));
    269 
    270   // After a pthread_detach...
    271   ASSERT_EQ(0, pthread_detach(t1));
    272   AssertDetached(t1, true);
    273 
    274   // ...pthread_join should fail.
    275   ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
    276 }
    277 
    278 TEST(pthread, pthread_no_op_detach_after_join) {
    279   SpinFunctionHelper spin_helper;
    280 
    281   pthread_t t1;
    282   ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL));
    283 
    284   // If thread 2 is already waiting to join thread 1...
    285   pthread_t t2;
    286   ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
    287 
    288   sleep(1); // (Give t2 a chance to call pthread_join.)
    289 
    290 #if defined(__BIONIC__)
    291   ASSERT_EQ(EINVAL, pthread_detach(t1));
    292 #else
    293   ASSERT_EQ(0, pthread_detach(t1));
    294 #endif
    295   AssertDetached(t1, false);
    296 
    297   spin_helper.UnSpin();
    298 
    299   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
    300   void* join_result;
    301   ASSERT_EQ(0, pthread_join(t2, &join_result));
    302   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
    303 }
    304 
    305 TEST(pthread, pthread_join_self) {
    306   ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
    307 }
    308 
    309 struct TestBug37410 {
    310   pthread_t main_thread;
    311   pthread_mutex_t mutex;
    312 
    313   static void main() {
    314     TestBug37410 data;
    315     data.main_thread = pthread_self();
    316     ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
    317     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
    318 
    319     pthread_t t;
    320     ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
    321 
    322     // Wait for the thread to be running...
    323     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
    324     ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
    325 
    326     // ...and exit.
    327     pthread_exit(NULL);
    328   }
    329 
    330  private:
    331   static void* thread_fn(void* arg) {
    332     TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
    333 
    334     // Let the main thread know we're running.
    335     pthread_mutex_unlock(&data->mutex);
    336 
    337     // And wait for the main thread to exit.
    338     pthread_join(data->main_thread, NULL);
    339 
    340     return NULL;
    341   }
    342 };
    343 
    344 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
    345 // run this test (which exits normally) in its own process.
    346 
    347 class pthread_DeathTest : public BionicDeathTest {};
    348 
    349 TEST_F(pthread_DeathTest, pthread_bug_37410) {
    350   // http://code.google.com/p/android/issues/detail?id=37410
    351   ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
    352 }
    353 
    354 static void* SignalHandlerFn(void* arg) {
    355   sigset_t wait_set;
    356   sigfillset(&wait_set);
    357   return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
    358 }
    359 
    360 TEST(pthread, pthread_sigmask) {
    361   // Check that SIGUSR1 isn't blocked.
    362   sigset_t original_set;
    363   sigemptyset(&original_set);
    364   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
    365   ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
    366 
    367   // Block SIGUSR1.
    368   sigset_t set;
    369   sigemptyset(&set);
    370   sigaddset(&set, SIGUSR1);
    371   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
    372 
    373   // Check that SIGUSR1 is blocked.
    374   sigset_t final_set;
    375   sigemptyset(&final_set);
    376   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
    377   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
    378   // ...and that sigprocmask agrees with pthread_sigmask.
    379   sigemptyset(&final_set);
    380   ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
    381   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
    382 
    383   // Spawn a thread that calls sigwait and tells us what it received.
    384   pthread_t signal_thread;
    385   int received_signal = -1;
    386   ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
    387 
    388   // Send that thread SIGUSR1.
    389   pthread_kill(signal_thread, SIGUSR1);
    390 
    391   // See what it got.
    392   void* join_result;
    393   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
    394   ASSERT_EQ(SIGUSR1, received_signal);
    395   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
    396 
    397   // Restore the original signal mask.
    398   ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
    399 }
    400 
    401 static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
    402   ASSERT_EQ(0, pthread_setname_np(t, "short"));
    403   char name[32];
    404   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
    405   ASSERT_STREQ("short", name);
    406 
    407   // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
    408   ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
    409   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
    410   ASSERT_STREQ("123456789012345", name);
    411 
    412   ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
    413 
    414   // The passed-in buffer should be at least 16 bytes.
    415   ASSERT_EQ(0, pthread_getname_np(t, name, 16));
    416   ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
    417 }
    418 
    419 TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
    420   test_pthread_setname_np__pthread_getname_np(pthread_self());
    421 }
    422 
    423 TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
    424   SpinFunctionHelper spin_helper;
    425 
    426   pthread_t t;
    427   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
    428   test_pthread_setname_np__pthread_getname_np(t);
    429   spin_helper.UnSpin();
    430   ASSERT_EQ(0, pthread_join(t, nullptr));
    431 }
    432 
    433 // http://b/28051133: a kernel misfeature means that you can't change the
    434 // name of another thread if you've set PR_SET_DUMPABLE to 0.
    435 TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
    436   ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
    437 
    438   SpinFunctionHelper spin_helper;
    439 
    440   pthread_t t;
    441   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
    442   test_pthread_setname_np__pthread_getname_np(t);
    443   spin_helper.UnSpin();
    444   ASSERT_EQ(0, pthread_join(t, nullptr));
    445 }
    446 
    447 TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
    448   pthread_t dead_thread;
    449   MakeDeadThread(dead_thread);
    450 
    451   EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"), "invalid pthread_t");
    452 }
    453 
    454 TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
    455   pthread_t null_thread = 0;
    456   EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
    457 }
    458 
    459 TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
    460   pthread_t dead_thread;
    461   MakeDeadThread(dead_thread);
    462 
    463   char name[64];
    464   EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)), "invalid pthread_t");
    465 }
    466 
    467 TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
    468   pthread_t null_thread = 0;
    469 
    470   char name[64];
    471   EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
    472 }
    473 
    474 TEST(pthread, pthread_kill__0) {
    475   // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
    476   ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
    477 }
    478 
    479 TEST(pthread, pthread_kill__invalid_signal) {
    480   ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
    481 }
    482 
    483 static void pthread_kill__in_signal_handler_helper(int signal_number) {
    484   static int count = 0;
    485   ASSERT_EQ(SIGALRM, signal_number);
    486   if (++count == 1) {
    487     // Can we call pthread_kill from a signal handler?
    488     ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
    489   }
    490 }
    491 
    492 TEST(pthread, pthread_kill__in_signal_handler) {
    493   ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
    494   ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
    495 }
    496 
    497 TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
    498   pthread_t dead_thread;
    499   MakeDeadThread(dead_thread);
    500 
    501   EXPECT_DEATH(pthread_detach(dead_thread), "invalid pthread_t");
    502 }
    503 
    504 TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
    505   pthread_t null_thread = 0;
    506   EXPECT_EQ(ESRCH, pthread_detach(null_thread));
    507 }
    508 
    509 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
    510   SpinFunctionHelper spin_helper;
    511 
    512   pthread_t t;
    513   ASSERT_EQ(0, pthread_create(&t, NULL, spin_helper.GetFunction(), NULL));
    514 
    515   clockid_t c;
    516   ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
    517   timespec ts;
    518   ASSERT_EQ(0, clock_gettime(c, &ts));
    519   spin_helper.UnSpin();
    520   ASSERT_EQ(0, pthread_join(t, nullptr));
    521 }
    522 
    523 TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
    524   pthread_t dead_thread;
    525   MakeDeadThread(dead_thread);
    526 
    527   clockid_t c;
    528   EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c), "invalid pthread_t");
    529 }
    530 
    531 TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
    532   pthread_t null_thread = 0;
    533   clockid_t c;
    534   EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
    535 }
    536 
    537 TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
    538   pthread_t dead_thread;
    539   MakeDeadThread(dead_thread);
    540 
    541   int policy;
    542   sched_param param;
    543   EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param), "invalid pthread_t");
    544 }
    545 
    546 TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
    547   pthread_t null_thread = 0;
    548   int policy;
    549   sched_param param;
    550   EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
    551 }
    552 
    553 TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
    554   pthread_t dead_thread;
    555   MakeDeadThread(dead_thread);
    556 
    557   int policy = 0;
    558   sched_param param;
    559   EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param), "invalid pthread_t");
    560 }
    561 
    562 TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
    563   pthread_t null_thread = 0;
    564   int policy = 0;
    565   sched_param param;
    566   EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
    567 }
    568 
    569 TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
    570   pthread_t dead_thread;
    571   MakeDeadThread(dead_thread);
    572 
    573   EXPECT_DEATH(pthread_join(dead_thread, NULL), "invalid pthread_t");
    574 }
    575 
    576 TEST_F(pthread_DeathTest, pthread_join__null_thread) {
    577   pthread_t null_thread = 0;
    578   EXPECT_EQ(ESRCH, pthread_join(null_thread, NULL));
    579 }
    580 
    581 TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
    582   pthread_t dead_thread;
    583   MakeDeadThread(dead_thread);
    584 
    585   EXPECT_DEATH(pthread_kill(dead_thread, 0), "invalid pthread_t");
    586 }
    587 
    588 TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
    589   pthread_t null_thread = 0;
    590   EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
    591 }
    592 
    593 TEST(pthread, pthread_join__multijoin) {
    594   SpinFunctionHelper spin_helper;
    595 
    596   pthread_t t1;
    597   ASSERT_EQ(0, pthread_create(&t1, NULL, spin_helper.GetFunction(), NULL));
    598 
    599   pthread_t t2;
    600   ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
    601 
    602   sleep(1); // (Give t2 a chance to call pthread_join.)
    603 
    604   // Multiple joins to the same thread should fail.
    605   ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
    606 
    607   spin_helper.UnSpin();
    608 
    609   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
    610   void* join_result;
    611   ASSERT_EQ(0, pthread_join(t2, &join_result));
    612   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
    613 }
    614 
    615 TEST(pthread, pthread_join__race) {
    616   // http://b/11693195 --- pthread_join could return before the thread had actually exited.
    617   // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
    618   for (size_t i = 0; i < 1024; ++i) {
    619     size_t stack_size = 640*1024;
    620     void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
    621 
    622     pthread_attr_t a;
    623     pthread_attr_init(&a);
    624     pthread_attr_setstack(&a, stack, stack_size);
    625 
    626     pthread_t t;
    627     ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
    628     ASSERT_EQ(0, pthread_join(t, NULL));
    629     ASSERT_EQ(0, munmap(stack, stack_size));
    630   }
    631 }
    632 
    633 static void* GetActualGuardSizeFn(void* arg) {
    634   pthread_attr_t attributes;
    635   pthread_getattr_np(pthread_self(), &attributes);
    636   pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
    637   return NULL;
    638 }
    639 
    640 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
    641   size_t result;
    642   pthread_t t;
    643   pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
    644   pthread_join(t, NULL);
    645   return result;
    646 }
    647 
    648 static void* GetActualStackSizeFn(void* arg) {
    649   pthread_attr_t attributes;
    650   pthread_getattr_np(pthread_self(), &attributes);
    651   pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
    652   return NULL;
    653 }
    654 
    655 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
    656   size_t result;
    657   pthread_t t;
    658   pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
    659   pthread_join(t, NULL);
    660   return result;
    661 }
    662 
    663 TEST(pthread, pthread_attr_setguardsize) {
    664   pthread_attr_t attributes;
    665   ASSERT_EQ(0, pthread_attr_init(&attributes));
    666 
    667   // Get the default guard size.
    668   size_t default_guard_size;
    669   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
    670 
    671   // No such thing as too small: will be rounded up to one page by pthread_create.
    672   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
    673   size_t guard_size;
    674   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
    675   ASSERT_EQ(128U, guard_size);
    676   ASSERT_EQ(4096U, GetActualGuardSize(attributes));
    677 
    678   // Large enough and a multiple of the page size.
    679   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
    680   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
    681   ASSERT_EQ(32*1024U, guard_size);
    682 
    683   // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
    684   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
    685   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
    686   ASSERT_EQ(32*1024U + 1, guard_size);
    687 }
    688 
    689 TEST(pthread, pthread_attr_setstacksize) {
    690   pthread_attr_t attributes;
    691   ASSERT_EQ(0, pthread_attr_init(&attributes));
    692 
    693   // Get the default stack size.
    694   size_t default_stack_size;
    695   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
    696 
    697   // Too small.
    698   ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
    699   size_t stack_size;
    700   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
    701   ASSERT_EQ(default_stack_size, stack_size);
    702   ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
    703 
    704   // Large enough and a multiple of the page size; may be rounded up by pthread_create.
    705   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
    706   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
    707   ASSERT_EQ(32*1024U, stack_size);
    708   ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
    709 
    710   // Large enough but not aligned; will be rounded up by pthread_create.
    711   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
    712   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
    713   ASSERT_EQ(32*1024U + 1, stack_size);
    714 #if defined(__BIONIC__)
    715   ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
    716 #else // __BIONIC__
    717   // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
    718   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
    719 #endif // __BIONIC__
    720 }
    721 
    722 TEST(pthread, pthread_rwlockattr_smoke) {
    723   pthread_rwlockattr_t attr;
    724   ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
    725 
    726   int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
    727   for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
    728     ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
    729     int pshared;
    730     ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
    731     ASSERT_EQ(pshared_value_array[i], pshared);
    732   }
    733 
    734   int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
    735                       PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
    736   for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
    737     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
    738     int kind;
    739     ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
    740     ASSERT_EQ(kind_array[i], kind);
    741   }
    742 
    743   ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
    744 }
    745 
    746 TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
    747   pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
    748   pthread_rwlock_t lock2;
    749   ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL));
    750   ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
    751 }
    752 
    753 TEST(pthread, pthread_rwlock_smoke) {
    754   pthread_rwlock_t l;
    755   ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
    756 
    757   // Single read lock
    758   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
    759   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    760 
    761   // Multiple read lock
    762   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
    763   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
    764   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    765   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    766 
    767   // Write lock
    768   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
    769   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    770 
    771   // Try writer lock
    772   ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
    773   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
    774   ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
    775   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    776 
    777   // Try reader lock
    778   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
    779   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
    780   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
    781   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    782   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    783 
    784   // Try writer lock after unlock
    785   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
    786   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    787 
    788   // EDEADLK in "read after write"
    789   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
    790   ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
    791   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    792 
    793   // EDEADLK in "write after write"
    794   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
    795   ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
    796   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
    797 
    798   ASSERT_EQ(0, pthread_rwlock_destroy(&l));
    799 }
    800 
    801 struct RwlockWakeupHelperArg {
    802   pthread_rwlock_t lock;
    803   enum Progress {
    804     LOCK_INITIALIZED,
    805     LOCK_WAITING,
    806     LOCK_RELEASED,
    807     LOCK_ACCESSED,
    808     LOCK_TIMEDOUT,
    809   };
    810   std::atomic<Progress> progress;
    811   std::atomic<pid_t> tid;
    812   std::function<int (pthread_rwlock_t*)> trylock_function;
    813   std::function<int (pthread_rwlock_t*)> lock_function;
    814   std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
    815 };
    816 
    817 static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
    818   arg->tid = gettid();
    819   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
    820   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
    821 
    822   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
    823   ASSERT_EQ(0, arg->lock_function(&arg->lock));
    824   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
    825   ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
    826 
    827   arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
    828 }
    829 
    830 static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
    831   RwlockWakeupHelperArg wakeup_arg;
    832   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
    833   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
    834   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
    835   wakeup_arg.tid = 0;
    836   wakeup_arg.trylock_function = pthread_rwlock_trywrlock;
    837   wakeup_arg.lock_function = lock_function;
    838 
    839   pthread_t thread;
    840   ASSERT_EQ(0, pthread_create(&thread, NULL,
    841     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
    842   WaitUntilThreadSleep(wakeup_arg.tid);
    843   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
    844 
    845   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
    846   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
    847 
    848   ASSERT_EQ(0, pthread_join(thread, NULL));
    849   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
    850   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
    851 }
    852 
    853 TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
    854   test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
    855 }
    856 
    857 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
    858   timespec ts;
    859   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
    860   ts.tv_sec += 1;
    861   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
    862     return pthread_rwlock_timedwrlock(lock, &ts);
    863   });
    864 }
    865 
    866 static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
    867   RwlockWakeupHelperArg wakeup_arg;
    868   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
    869   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
    870   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
    871   wakeup_arg.tid = 0;
    872   wakeup_arg.trylock_function = pthread_rwlock_tryrdlock;
    873   wakeup_arg.lock_function = lock_function;
    874 
    875   pthread_t thread;
    876   ASSERT_EQ(0, pthread_create(&thread, NULL,
    877     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
    878   WaitUntilThreadSleep(wakeup_arg.tid);
    879   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
    880 
    881   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
    882   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
    883 
    884   ASSERT_EQ(0, pthread_join(thread, NULL));
    885   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
    886   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
    887 }
    888 
    889 TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
    890   test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
    891 }
    892 
    893 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
    894   timespec ts;
    895   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
    896   ts.tv_sec += 1;
    897   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
    898     return pthread_rwlock_timedrdlock(lock, &ts);
    899   });
    900 }
    901 
    902 static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
    903   arg->tid = gettid();
    904   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
    905   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
    906 
    907   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
    908 
    909   timespec ts;
    910   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
    911   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
    912   ts.tv_nsec = -1;
    913   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
    914   ts.tv_nsec = NS_PER_S;
    915   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
    916   ts.tv_nsec = NS_PER_S - 1;
    917   ts.tv_sec = -1;
    918   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
    919   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
    920   ts.tv_sec += 1;
    921   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
    922   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
    923   arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
    924 }
    925 
    926 TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
    927   RwlockWakeupHelperArg wakeup_arg;
    928   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
    929   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
    930   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
    931   wakeup_arg.tid = 0;
    932   wakeup_arg.trylock_function = pthread_rwlock_tryrdlock;
    933   wakeup_arg.timed_lock_function = pthread_rwlock_timedrdlock;
    934 
    935   pthread_t thread;
    936   ASSERT_EQ(0, pthread_create(&thread, nullptr,
    937       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
    938   WaitUntilThreadSleep(wakeup_arg.tid);
    939   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
    940 
    941   ASSERT_EQ(0, pthread_join(thread, nullptr));
    942   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
    943   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
    944   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
    945 }
    946 
    947 TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
    948   RwlockWakeupHelperArg wakeup_arg;
    949   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
    950   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
    951   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
    952   wakeup_arg.tid = 0;
    953   wakeup_arg.trylock_function = pthread_rwlock_trywrlock;
    954   wakeup_arg.timed_lock_function = pthread_rwlock_timedwrlock;
    955 
    956   pthread_t thread;
    957   ASSERT_EQ(0, pthread_create(&thread, nullptr,
    958       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
    959   WaitUntilThreadSleep(wakeup_arg.tid);
    960   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
    961 
    962   ASSERT_EQ(0, pthread_join(thread, nullptr));
    963   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
    964   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
    965   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
    966 }
    967 
    968 class RwlockKindTestHelper {
    969  private:
    970   struct ThreadArg {
    971     RwlockKindTestHelper* helper;
    972     std::atomic<pid_t>& tid;
    973 
    974     ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
    975       : helper(helper), tid(tid) { }
    976   };
    977 
    978  public:
    979   pthread_rwlock_t lock;
    980 
    981  public:
    982   explicit RwlockKindTestHelper(int kind_type) {
    983     InitRwlock(kind_type);
    984   }
    985 
    986   ~RwlockKindTestHelper() {
    987     DestroyRwlock();
    988   }
    989 
    990   void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
    991     tid = 0;
    992     ThreadArg* arg = new ThreadArg(this, tid);
    993     ASSERT_EQ(0, pthread_create(&thread, NULL,
    994                                 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
    995   }
    996 
    997   void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
    998     tid = 0;
    999     ThreadArg* arg = new ThreadArg(this, tid);
   1000     ASSERT_EQ(0, pthread_create(&thread, NULL,
   1001                                 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
   1002   }
   1003 
   1004  private:
   1005   void InitRwlock(int kind_type) {
   1006     pthread_rwlockattr_t attr;
   1007     ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
   1008     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
   1009     ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
   1010     ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
   1011   }
   1012 
   1013   void DestroyRwlock() {
   1014     ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
   1015   }
   1016 
   1017   static void WriterThreadFn(ThreadArg* arg) {
   1018     arg->tid = gettid();
   1019 
   1020     RwlockKindTestHelper* helper = arg->helper;
   1021     ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
   1022     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
   1023     delete arg;
   1024   }
   1025 
   1026   static void ReaderThreadFn(ThreadArg* arg) {
   1027     arg->tid = gettid();
   1028 
   1029     RwlockKindTestHelper* helper = arg->helper;
   1030     ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
   1031     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
   1032     delete arg;
   1033   }
   1034 };
   1035 
   1036 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
   1037   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
   1038   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
   1039 
   1040   pthread_t writer_thread;
   1041   std::atomic<pid_t> writer_tid;
   1042   helper.CreateWriterThread(writer_thread, writer_tid);
   1043   WaitUntilThreadSleep(writer_tid);
   1044 
   1045   pthread_t reader_thread;
   1046   std::atomic<pid_t> reader_tid;
   1047   helper.CreateReaderThread(reader_thread, reader_tid);
   1048   ASSERT_EQ(0, pthread_join(reader_thread, NULL));
   1049 
   1050   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
   1051   ASSERT_EQ(0, pthread_join(writer_thread, NULL));
   1052 }
   1053 
   1054 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
   1055   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
   1056   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
   1057 
   1058   pthread_t writer_thread;
   1059   std::atomic<pid_t> writer_tid;
   1060   helper.CreateWriterThread(writer_thread, writer_tid);
   1061   WaitUntilThreadSleep(writer_tid);
   1062 
   1063   pthread_t reader_thread;
   1064   std::atomic<pid_t> reader_tid;
   1065   helper.CreateReaderThread(reader_thread, reader_tid);
   1066   WaitUntilThreadSleep(reader_tid);
   1067 
   1068   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
   1069   ASSERT_EQ(0, pthread_join(writer_thread, NULL));
   1070   ASSERT_EQ(0, pthread_join(reader_thread, NULL));
   1071 }
   1072 
   1073 static int g_once_fn_call_count = 0;
   1074 static void OnceFn() {
   1075   ++g_once_fn_call_count;
   1076 }
   1077 
   1078 TEST(pthread, pthread_once_smoke) {
   1079   pthread_once_t once_control = PTHREAD_ONCE_INIT;
   1080   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
   1081   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
   1082   ASSERT_EQ(1, g_once_fn_call_count);
   1083 }
   1084 
   1085 static std::string pthread_once_1934122_result = "";
   1086 
   1087 static void Routine2() {
   1088   pthread_once_1934122_result += "2";
   1089 }
   1090 
   1091 static void Routine1() {
   1092   pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
   1093   pthread_once_1934122_result += "1";
   1094   pthread_once(&once_control_2, &Routine2);
   1095 }
   1096 
   1097 TEST(pthread, pthread_once_1934122) {
   1098   // Very old versions of Android couldn't call pthread_once from a
   1099   // pthread_once init routine. http://b/1934122.
   1100   pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
   1101   ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
   1102   ASSERT_EQ("12", pthread_once_1934122_result);
   1103 }
   1104 
   1105 static int g_atfork_prepare_calls = 0;
   1106 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
   1107 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
   1108 static int g_atfork_parent_calls = 0;
   1109 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
   1110 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
   1111 static int g_atfork_child_calls = 0;
   1112 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
   1113 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
   1114 
   1115 TEST(pthread, pthread_atfork_smoke) {
   1116   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
   1117   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
   1118 
   1119   pid_t pid = fork();
   1120   ASSERT_NE(-1, pid) << strerror(errno);
   1121 
   1122   // Child and parent calls are made in the order they were registered.
   1123   if (pid == 0) {
   1124     ASSERT_EQ(12, g_atfork_child_calls);
   1125     _exit(0);
   1126   }
   1127   ASSERT_EQ(12, g_atfork_parent_calls);
   1128 
   1129   // Prepare calls are made in the reverse order.
   1130   ASSERT_EQ(21, g_atfork_prepare_calls);
   1131   AssertChildExited(pid, 0);
   1132 }
   1133 
   1134 TEST(pthread, pthread_attr_getscope) {
   1135   pthread_attr_t attr;
   1136   ASSERT_EQ(0, pthread_attr_init(&attr));
   1137 
   1138   int scope;
   1139   ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
   1140   ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
   1141 }
   1142 
   1143 TEST(pthread, pthread_condattr_init) {
   1144   pthread_condattr_t attr;
   1145   pthread_condattr_init(&attr);
   1146 
   1147   clockid_t clock;
   1148   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
   1149   ASSERT_EQ(CLOCK_REALTIME, clock);
   1150 
   1151   int pshared;
   1152   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
   1153   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
   1154 }
   1155 
   1156 TEST(pthread, pthread_condattr_setclock) {
   1157   pthread_condattr_t attr;
   1158   pthread_condattr_init(&attr);
   1159 
   1160   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
   1161   clockid_t clock;
   1162   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
   1163   ASSERT_EQ(CLOCK_REALTIME, clock);
   1164 
   1165   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
   1166   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
   1167   ASSERT_EQ(CLOCK_MONOTONIC, clock);
   1168 
   1169   ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
   1170 }
   1171 
   1172 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
   1173 #if defined(__BIONIC__)
   1174   pthread_condattr_t attr;
   1175   pthread_condattr_init(&attr);
   1176 
   1177   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
   1178   ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
   1179 
   1180   pthread_cond_t cond_var;
   1181   ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
   1182 
   1183   ASSERT_EQ(0, pthread_cond_signal(&cond_var));
   1184   ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
   1185 
   1186   attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
   1187   clockid_t clock;
   1188   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
   1189   ASSERT_EQ(CLOCK_MONOTONIC, clock);
   1190   int pshared;
   1191   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
   1192   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
   1193 #else  // !defined(__BIONIC__)
   1194   GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
   1195 #endif  // !defined(__BIONIC__)
   1196 }
   1197 
   1198 class pthread_CondWakeupTest : public ::testing::Test {
   1199  protected:
   1200   pthread_mutex_t mutex;
   1201   pthread_cond_t cond;
   1202 
   1203   enum Progress {
   1204     INITIALIZED,
   1205     WAITING,
   1206     SIGNALED,
   1207     FINISHED,
   1208   };
   1209   std::atomic<Progress> progress;
   1210   pthread_t thread;
   1211   std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
   1212 
   1213  protected:
   1214   void SetUp() override {
   1215     ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
   1216   }
   1217 
   1218   void InitCond(clockid_t clock=CLOCK_REALTIME) {
   1219     pthread_condattr_t attr;
   1220     ASSERT_EQ(0, pthread_condattr_init(&attr));
   1221     ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
   1222     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
   1223     ASSERT_EQ(0, pthread_condattr_destroy(&attr));
   1224   }
   1225 
   1226   void StartWaitingThread(std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
   1227     progress = INITIALIZED;
   1228     this->wait_function = wait_function;
   1229     ASSERT_EQ(0, pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
   1230     while (progress != WAITING) {
   1231       usleep(5000);
   1232     }
   1233     usleep(5000);
   1234   }
   1235 
   1236   void TearDown() override {
   1237     ASSERT_EQ(0, pthread_join(thread, nullptr));
   1238     ASSERT_EQ(FINISHED, progress);
   1239     ASSERT_EQ(0, pthread_cond_destroy(&cond));
   1240     ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
   1241   }
   1242 
   1243  private:
   1244   static void WaitThreadFn(pthread_CondWakeupTest* test) {
   1245     ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
   1246     test->progress = WAITING;
   1247     while (test->progress == WAITING) {
   1248       ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
   1249     }
   1250     ASSERT_EQ(SIGNALED, test->progress);
   1251     test->progress = FINISHED;
   1252     ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
   1253   }
   1254 };
   1255 
   1256 TEST_F(pthread_CondWakeupTest, signal_wait) {
   1257   InitCond();
   1258   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
   1259     return pthread_cond_wait(cond, mutex);
   1260   });
   1261   progress = SIGNALED;
   1262   ASSERT_EQ(0, pthread_cond_signal(&cond));
   1263 }
   1264 
   1265 TEST_F(pthread_CondWakeupTest, broadcast_wait) {
   1266   InitCond();
   1267   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
   1268     return pthread_cond_wait(cond, mutex);
   1269   });
   1270   progress = SIGNALED;
   1271   ASSERT_EQ(0, pthread_cond_broadcast(&cond));
   1272 }
   1273 
   1274 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
   1275   InitCond(CLOCK_REALTIME);
   1276   timespec ts;
   1277   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
   1278   ts.tv_sec += 1;
   1279   StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
   1280     return pthread_cond_timedwait(cond, mutex, &ts);
   1281   });
   1282   progress = SIGNALED;
   1283   ASSERT_EQ(0, pthread_cond_signal(&cond));
   1284 }
   1285 
   1286 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
   1287   InitCond(CLOCK_MONOTONIC);
   1288   timespec ts;
   1289   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
   1290   ts.tv_sec += 1;
   1291   StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) {
   1292     return pthread_cond_timedwait(cond, mutex, &ts);
   1293   });
   1294   progress = SIGNALED;
   1295   ASSERT_EQ(0, pthread_cond_signal(&cond));
   1296 }
   1297 
   1298 TEST(pthread, pthread_cond_timedwait_timeout) {
   1299   pthread_mutex_t mutex;
   1300   ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
   1301   pthread_cond_t cond;
   1302   ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
   1303   ASSERT_EQ(0, pthread_mutex_lock(&mutex));
   1304   timespec ts;
   1305   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
   1306   ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts));
   1307   ts.tv_nsec = -1;
   1308   ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts));
   1309   ts.tv_nsec = NS_PER_S;
   1310   ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts));
   1311   ts.tv_nsec = NS_PER_S - 1;
   1312   ts.tv_sec = -1;
   1313   ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts));
   1314   ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
   1315 }
   1316 
   1317 TEST(pthread, pthread_attr_getstack__main_thread) {
   1318   // This test is only meaningful for the main thread, so make sure we're running on it!
   1319   ASSERT_EQ(getpid(), syscall(__NR_gettid));
   1320 
   1321   // Get the main thread's attributes.
   1322   pthread_attr_t attributes;
   1323   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
   1324 
   1325   // Check that we correctly report that the main thread has no guard page.
   1326   size_t guard_size;
   1327   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
   1328   ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
   1329 
   1330   // Get the stack base and the stack size (both ways).
   1331   void* stack_base;
   1332   size_t stack_size;
   1333   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
   1334   size_t stack_size2;
   1335   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
   1336 
   1337   // The two methods of asking for the stack size should agree.
   1338   EXPECT_EQ(stack_size, stack_size2);
   1339 
   1340 #if defined(__BIONIC__)
   1341   // What does /proc/self/maps' [stack] line say?
   1342   void* maps_stack_hi = NULL;
   1343   std::vector<map_record> maps;
   1344   ASSERT_TRUE(Maps::parse_maps(&maps));
   1345   for (const auto& map : maps) {
   1346     if (map.pathname == "[stack]") {
   1347       maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
   1348       break;
   1349     }
   1350   }
   1351 
   1352   // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
   1353   // Remember that the stack grows down (and is mapped in on demand), so the low address of the
   1354   // region isn't very interesting.
   1355   EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
   1356 
   1357   // The stack size should correspond to RLIMIT_STACK.
   1358   rlimit rl;
   1359   ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
   1360   uint64_t original_rlim_cur = rl.rlim_cur;
   1361   if (rl.rlim_cur == RLIM_INFINITY) {
   1362     rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
   1363   }
   1364   EXPECT_EQ(rl.rlim_cur, stack_size);
   1365 
   1366   auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
   1367     rl.rlim_cur = original_rlim_cur;
   1368     ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
   1369   });
   1370 
   1371   //
   1372   // What if RLIMIT_STACK is smaller than the stack's current extent?
   1373   //
   1374   rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
   1375   rl.rlim_max = RLIM_INFINITY;
   1376   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
   1377 
   1378   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
   1379   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
   1380   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
   1381 
   1382   EXPECT_EQ(stack_size, stack_size2);
   1383   ASSERT_EQ(1024U, stack_size);
   1384 
   1385   //
   1386   // What if RLIMIT_STACK isn't a whole number of pages?
   1387   //
   1388   rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
   1389   rl.rlim_max = RLIM_INFINITY;
   1390   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
   1391 
   1392   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
   1393   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
   1394   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
   1395 
   1396   EXPECT_EQ(stack_size, stack_size2);
   1397   ASSERT_EQ(6666U, stack_size);
   1398 #endif
   1399 }
   1400 
   1401 struct GetStackSignalHandlerArg {
   1402   volatile bool done;
   1403   void* signal_stack_base;
   1404   size_t signal_stack_size;
   1405   void* main_stack_base;
   1406   size_t main_stack_size;
   1407 };
   1408 
   1409 static GetStackSignalHandlerArg getstack_signal_handler_arg;
   1410 
   1411 static void getstack_signal_handler(int sig) {
   1412   ASSERT_EQ(SIGUSR1, sig);
   1413   // Use sleep() to make current thread be switched out by the kernel to provoke the error.
   1414   sleep(1);
   1415   pthread_attr_t attr;
   1416   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
   1417   void* stack_base;
   1418   size_t stack_size;
   1419   ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
   1420 
   1421   // Verify if the stack used by the signal handler is the alternate stack just registered.
   1422   ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
   1423   ASSERT_LT(static_cast<void*>(&attr),
   1424             static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
   1425             getstack_signal_handler_arg.signal_stack_size);
   1426 
   1427   // Verify if the main thread's stack got in the signal handler is correct.
   1428   ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
   1429   ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
   1430 
   1431   getstack_signal_handler_arg.done = true;
   1432 }
   1433 
   1434 // The previous code obtained the main thread's stack by reading the entry in
   1435 // /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
   1436 // relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
   1437 // switches a process while the main thread is in an alternate stack, then the kernel will label
   1438 // the wrong map with [stack]. This test verifies that when the above situation happens, the main
   1439 // thread's stack is found correctly.
   1440 TEST(pthread, pthread_attr_getstack_in_signal_handler) {
   1441   // This test is only meaningful for the main thread, so make sure we're running on it!
   1442   ASSERT_EQ(getpid(), syscall(__NR_gettid));
   1443 
   1444   const size_t sig_stack_size = 16 * 1024;
   1445   void* sig_stack = mmap(NULL, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
   1446                          -1, 0);
   1447   ASSERT_NE(MAP_FAILED, sig_stack);
   1448   stack_t ss;
   1449   ss.ss_sp = sig_stack;
   1450   ss.ss_size = sig_stack_size;
   1451   ss.ss_flags = 0;
   1452   stack_t oss;
   1453   ASSERT_EQ(0, sigaltstack(&ss, &oss));
   1454 
   1455   pthread_attr_t attr;
   1456   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
   1457   void* main_stack_base;
   1458   size_t main_stack_size;
   1459   ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
   1460 
   1461   ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
   1462   getstack_signal_handler_arg.done = false;
   1463   getstack_signal_handler_arg.signal_stack_base = sig_stack;
   1464   getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
   1465   getstack_signal_handler_arg.main_stack_base = main_stack_base;
   1466   getstack_signal_handler_arg.main_stack_size = main_stack_size;
   1467   kill(getpid(), SIGUSR1);
   1468   ASSERT_EQ(true, getstack_signal_handler_arg.done);
   1469 
   1470   ASSERT_EQ(0, sigaltstack(&oss, nullptr));
   1471   ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
   1472 }
   1473 
   1474 static void pthread_attr_getstack_18908062_helper(void*) {
   1475   char local_variable;
   1476   pthread_attr_t attributes;
   1477   pthread_getattr_np(pthread_self(), &attributes);
   1478   void* stack_base;
   1479   size_t stack_size;
   1480   pthread_attr_getstack(&attributes, &stack_base, &stack_size);
   1481 
   1482   // Test whether &local_variable is in [stack_base, stack_base + stack_size).
   1483   ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
   1484   ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
   1485 }
   1486 
   1487 // Check whether something on stack is in the range of
   1488 // [stack_base, stack_base + stack_size). see b/18908062.
   1489 TEST(pthread, pthread_attr_getstack_18908062) {
   1490   pthread_t t;
   1491   ASSERT_EQ(0, pthread_create(&t, NULL,
   1492             reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
   1493             NULL));
   1494   pthread_join(t, NULL);
   1495 }
   1496 
   1497 #if defined(__BIONIC__)
   1498 static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
   1499 
   1500 static void* pthread_gettid_np_helper(void* arg) {
   1501   *reinterpret_cast<pid_t*>(arg) = gettid();
   1502 
   1503   // Wait for our parent to call pthread_gettid_np on us before exiting.
   1504   pthread_mutex_lock(&pthread_gettid_np_mutex);
   1505   pthread_mutex_unlock(&pthread_gettid_np_mutex);
   1506   return NULL;
   1507 }
   1508 #endif
   1509 
   1510 TEST(pthread, pthread_gettid_np) {
   1511 #if defined(__BIONIC__)
   1512   ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
   1513 
   1514   // Ensure the other thread doesn't exit until after we've called
   1515   // pthread_gettid_np on it.
   1516   pthread_mutex_lock(&pthread_gettid_np_mutex);
   1517 
   1518   pid_t t_gettid_result;
   1519   pthread_t t;
   1520   pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
   1521 
   1522   pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
   1523 
   1524   // Release the other thread and wait for it to exit.
   1525   pthread_mutex_unlock(&pthread_gettid_np_mutex);
   1526   pthread_join(t, NULL);
   1527 
   1528   ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
   1529 #else
   1530   GTEST_LOG_(INFO) << "This test does nothing.\n";
   1531 #endif
   1532 }
   1533 
   1534 static size_t cleanup_counter = 0;
   1535 
   1536 static void AbortCleanupRoutine(void*) {
   1537   abort();
   1538 }
   1539 
   1540 static void CountCleanupRoutine(void*) {
   1541   ++cleanup_counter;
   1542 }
   1543 
   1544 static void PthreadCleanupTester() {
   1545   pthread_cleanup_push(CountCleanupRoutine, NULL);
   1546   pthread_cleanup_push(CountCleanupRoutine, NULL);
   1547   pthread_cleanup_push(AbortCleanupRoutine, NULL);
   1548 
   1549   pthread_cleanup_pop(0); // Pop the abort without executing it.
   1550   pthread_cleanup_pop(1); // Pop one count while executing it.
   1551   ASSERT_EQ(1U, cleanup_counter);
   1552   // Exit while the other count is still on the cleanup stack.
   1553   pthread_exit(NULL);
   1554 
   1555   // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
   1556   pthread_cleanup_pop(0);
   1557 }
   1558 
   1559 static void* PthreadCleanupStartRoutine(void*) {
   1560   PthreadCleanupTester();
   1561   return NULL;
   1562 }
   1563 
   1564 TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
   1565   pthread_t t;
   1566   ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
   1567   pthread_join(t, NULL);
   1568   ASSERT_EQ(2U, cleanup_counter);
   1569 }
   1570 
   1571 TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
   1572   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
   1573 }
   1574 
   1575 TEST(pthread, pthread_mutexattr_gettype) {
   1576   pthread_mutexattr_t attr;
   1577   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
   1578 
   1579   int attr_type;
   1580 
   1581   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
   1582   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
   1583   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
   1584 
   1585   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
   1586   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
   1587   ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
   1588 
   1589   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
   1590   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
   1591   ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
   1592 
   1593   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
   1594 }
   1595 
   1596 struct PthreadMutex {
   1597   pthread_mutex_t lock;
   1598 
   1599   explicit PthreadMutex(int mutex_type) {
   1600     init(mutex_type);
   1601   }
   1602 
   1603   ~PthreadMutex() {
   1604     destroy();
   1605   }
   1606 
   1607  private:
   1608   void init(int mutex_type) {
   1609     pthread_mutexattr_t attr;
   1610     ASSERT_EQ(0, pthread_mutexattr_init(&attr));
   1611     ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
   1612     ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
   1613     ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
   1614   }
   1615 
   1616   void destroy() {
   1617     ASSERT_EQ(0, pthread_mutex_destroy(&lock));
   1618   }
   1619 
   1620   DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
   1621 };
   1622 
   1623 TEST(pthread, pthread_mutex_lock_NORMAL) {
   1624   PthreadMutex m(PTHREAD_MUTEX_NORMAL);
   1625 
   1626   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
   1627   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1628   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
   1629   ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
   1630   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1631 }
   1632 
   1633 TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
   1634   PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
   1635 
   1636   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
   1637   ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
   1638   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1639   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
   1640   ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
   1641   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1642   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
   1643 }
   1644 
   1645 TEST(pthread, pthread_mutex_lock_RECURSIVE) {
   1646   PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
   1647 
   1648   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
   1649   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
   1650   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1651   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1652   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
   1653   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
   1654   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1655   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1656   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
   1657 }
   1658 
   1659 TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
   1660   pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
   1661   PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
   1662   ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
   1663   pthread_mutex_destroy(&lock_normal);
   1664 
   1665   pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
   1666   PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
   1667   ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
   1668   pthread_mutex_destroy(&lock_errorcheck);
   1669 
   1670   pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
   1671   PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
   1672   ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
   1673   ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
   1674 }
   1675 class MutexWakeupHelper {
   1676  private:
   1677   PthreadMutex m;
   1678   enum Progress {
   1679     LOCK_INITIALIZED,
   1680     LOCK_WAITING,
   1681     LOCK_RELEASED,
   1682     LOCK_ACCESSED
   1683   };
   1684   std::atomic<Progress> progress;
   1685   std::atomic<pid_t> tid;
   1686 
   1687   static void thread_fn(MutexWakeupHelper* helper) {
   1688     helper->tid = gettid();
   1689     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
   1690     helper->progress = LOCK_WAITING;
   1691 
   1692     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
   1693     ASSERT_EQ(LOCK_RELEASED, helper->progress);
   1694     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
   1695 
   1696     helper->progress = LOCK_ACCESSED;
   1697   }
   1698 
   1699  public:
   1700   explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
   1701   }
   1702 
   1703   void test() {
   1704     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
   1705     progress = LOCK_INITIALIZED;
   1706     tid = 0;
   1707 
   1708     pthread_t thread;
   1709     ASSERT_EQ(0, pthread_create(&thread, NULL,
   1710       reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
   1711 
   1712     WaitUntilThreadSleep(tid);
   1713     ASSERT_EQ(LOCK_WAITING, progress);
   1714 
   1715     progress = LOCK_RELEASED;
   1716     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
   1717 
   1718     ASSERT_EQ(0, pthread_join(thread, NULL));
   1719     ASSERT_EQ(LOCK_ACCESSED, progress);
   1720   }
   1721 };
   1722 
   1723 TEST(pthread, pthread_mutex_NORMAL_wakeup) {
   1724   MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
   1725   helper.test();
   1726 }
   1727 
   1728 TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
   1729   MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
   1730   helper.test();
   1731 }
   1732 
   1733 TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
   1734   MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
   1735   helper.test();
   1736 }
   1737 
   1738 TEST(pthread, pthread_mutex_owner_tid_limit) {
   1739 #if defined(__BIONIC__) && !defined(__LP64__)
   1740   FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
   1741   ASSERT_TRUE(fp != NULL);
   1742   long pid_max;
   1743   ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
   1744   fclose(fp);
   1745   // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
   1746   ASSERT_LE(pid_max, 65536);
   1747 #else
   1748   GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
   1749 #endif
   1750 }
   1751 
   1752 TEST(pthread, pthread_mutex_timedlock) {
   1753   pthread_mutex_t m;
   1754   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
   1755 
   1756   // If the mutex is already locked, pthread_mutex_timedlock should time out.
   1757   ASSERT_EQ(0, pthread_mutex_lock(&m));
   1758 
   1759   timespec ts;
   1760   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
   1761   ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
   1762   ts.tv_nsec = -1;
   1763   ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts));
   1764   ts.tv_nsec = NS_PER_S;
   1765   ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts));
   1766   ts.tv_nsec = NS_PER_S - 1;
   1767   ts.tv_sec = -1;
   1768   ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
   1769 
   1770   // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
   1771   ASSERT_EQ(0, pthread_mutex_unlock(&m));
   1772 
   1773   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
   1774   ts.tv_sec += 1;
   1775   ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
   1776 
   1777   ASSERT_EQ(0, pthread_mutex_unlock(&m));
   1778   ASSERT_EQ(0, pthread_mutex_destroy(&m));
   1779 }
   1780 
   1781 class StrictAlignmentAllocator {
   1782  public:
   1783   void* allocate(size_t size, size_t alignment) {
   1784     char* p = new char[size + alignment * 2];
   1785     allocated_array.push_back(p);
   1786     while (!is_strict_aligned(p, alignment)) {
   1787       ++p;
   1788     }
   1789     return p;
   1790   }
   1791 
   1792   ~StrictAlignmentAllocator() {
   1793     for (const auto& p : allocated_array) {
   1794       delete[] p;
   1795     }
   1796   }
   1797 
   1798  private:
   1799   bool is_strict_aligned(char* p, size_t alignment) {
   1800     return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
   1801   }
   1802 
   1803   std::vector<char*> allocated_array;
   1804 };
   1805 
   1806 TEST(pthread, pthread_types_allow_four_bytes_alignment) {
   1807 #if defined(__BIONIC__)
   1808   // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
   1809   StrictAlignmentAllocator allocator;
   1810   pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
   1811                              allocator.allocate(sizeof(pthread_mutex_t), 4));
   1812   ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
   1813   ASSERT_EQ(0, pthread_mutex_lock(mutex));
   1814   ASSERT_EQ(0, pthread_mutex_unlock(mutex));
   1815   ASSERT_EQ(0, pthread_mutex_destroy(mutex));
   1816 
   1817   pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
   1818                            allocator.allocate(sizeof(pthread_cond_t), 4));
   1819   ASSERT_EQ(0, pthread_cond_init(cond, NULL));
   1820   ASSERT_EQ(0, pthread_cond_signal(cond));
   1821   ASSERT_EQ(0, pthread_cond_broadcast(cond));
   1822   ASSERT_EQ(0, pthread_cond_destroy(cond));
   1823 
   1824   pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
   1825                                allocator.allocate(sizeof(pthread_rwlock_t), 4));
   1826   ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
   1827   ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
   1828   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
   1829   ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
   1830   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
   1831   ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
   1832 
   1833 #else
   1834   GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
   1835 #endif
   1836 }
   1837 
   1838 TEST(pthread, pthread_mutex_lock_null_32) {
   1839 #if defined(__BIONIC__) && !defined(__LP64__)
   1840   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
   1841   // EINVAL in that case: http://b/19995172.
   1842   //
   1843   // We decorate the public defintion with _Nonnull so that people recompiling
   1844   // their code with get a warning and might fix their bug, but need to pass
   1845   // NULL here to test that we remain compatible.
   1846   pthread_mutex_t* null_value = nullptr;
   1847   ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
   1848 #else
   1849   GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
   1850 #endif
   1851 }
   1852 
   1853 TEST(pthread, pthread_mutex_unlock_null_32) {
   1854 #if defined(__BIONIC__) && !defined(__LP64__)
   1855   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
   1856   // EINVAL in that case: http://b/19995172.
   1857   //
   1858   // We decorate the public defintion with _Nonnull so that people recompiling
   1859   // their code with get a warning and might fix their bug, but need to pass
   1860   // NULL here to test that we remain compatible.
   1861   pthread_mutex_t* null_value = nullptr;
   1862   ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
   1863 #else
   1864   GTEST_LOG_(INFO) << "This test tests bionic implementation details on 32 bit devices.";
   1865 #endif
   1866 }
   1867 
   1868 TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
   1869 #if defined(__BIONIC__) && defined(__LP64__)
   1870   pthread_mutex_t* null_value = nullptr;
   1871   ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
   1872 #else
   1873   GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
   1874 #endif
   1875 }
   1876 
   1877 TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
   1878 #if defined(__BIONIC__) && defined(__LP64__)
   1879   pthread_mutex_t* null_value = nullptr;
   1880   ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
   1881 #else
   1882   GTEST_LOG_(INFO) << "This test tests bionic implementation details on 64 bit devices.";
   1883 #endif
   1884 }
   1885 
   1886 extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
   1887 
   1888 static volatile bool signal_handler_on_altstack_done;
   1889 
   1890 __attribute__((__noinline__))
   1891 static void signal_handler_backtrace() {
   1892   // Check if we have enough stack space for unwinding.
   1893   int count = 0;
   1894   _Unwind_Backtrace(FrameCounter, &count);
   1895   ASSERT_GT(count, 0);
   1896 }
   1897 
   1898 __attribute__((__noinline__))
   1899 static void signal_handler_logging() {
   1900   // Check if we have enough stack space for logging.
   1901   std::string s(2048, '*');
   1902   GTEST_LOG_(INFO) << s;
   1903   signal_handler_on_altstack_done = true;
   1904 }
   1905 
   1906 __attribute__((__noinline__))
   1907 static void signal_handler_snprintf() {
   1908   // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
   1909   char buf[PATH_MAX + 2048];
   1910   ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
   1911 }
   1912 
   1913 static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
   1914   ASSERT_EQ(SIGUSR1, signo);
   1915   signal_handler_backtrace();
   1916   signal_handler_logging();
   1917   signal_handler_snprintf();
   1918 }
   1919 
   1920 TEST(pthread, big_enough_signal_stack) {
   1921   signal_handler_on_altstack_done = false;
   1922   ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
   1923   kill(getpid(), SIGUSR1);
   1924   ASSERT_TRUE(signal_handler_on_altstack_done);
   1925 }
   1926 
   1927 TEST(pthread, pthread_barrierattr_smoke) {
   1928   pthread_barrierattr_t attr;
   1929   ASSERT_EQ(0, pthread_barrierattr_init(&attr));
   1930   int pshared;
   1931   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
   1932   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
   1933   ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
   1934   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
   1935   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
   1936   ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
   1937 }
   1938 
   1939 struct BarrierTestHelperData {
   1940   size_t thread_count;
   1941   pthread_barrier_t barrier;
   1942   std::atomic<int> finished_mask;
   1943   std::atomic<int> serial_thread_count;
   1944   size_t iteration_count;
   1945   std::atomic<size_t> finished_iteration_count;
   1946 
   1947   BarrierTestHelperData(size_t thread_count, size_t iteration_count)
   1948       : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
   1949         iteration_count(iteration_count), finished_iteration_count(0) {
   1950   }
   1951 };
   1952 
   1953 struct BarrierTestHelperArg {
   1954   int id;
   1955   BarrierTestHelperData* data;
   1956 };
   1957 
   1958 static void BarrierTestHelper(BarrierTestHelperArg* arg) {
   1959   for (size_t i = 0; i < arg->data->iteration_count; ++i) {
   1960     int result = pthread_barrier_wait(&arg->data->barrier);
   1961     if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
   1962       arg->data->serial_thread_count++;
   1963     } else {
   1964       ASSERT_EQ(0, result);
   1965     }
   1966     int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
   1967     mask |= 1 << arg->id;
   1968     if (mask == ((1 << arg->data->thread_count) - 1)) {
   1969       ASSERT_EQ(1, arg->data->serial_thread_count);
   1970       arg->data->finished_iteration_count++;
   1971       arg->data->finished_mask = 0;
   1972       arg->data->serial_thread_count = 0;
   1973     }
   1974   }
   1975 }
   1976 
   1977 TEST(pthread, pthread_barrier_smoke) {
   1978   const size_t BARRIER_ITERATION_COUNT = 10;
   1979   const size_t BARRIER_THREAD_COUNT = 10;
   1980   BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
   1981   ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
   1982   std::vector<pthread_t> threads(data.thread_count);
   1983   std::vector<BarrierTestHelperArg> args(threads.size());
   1984   for (size_t i = 0; i < threads.size(); ++i) {
   1985     args[i].id = i;
   1986     args[i].data = &data;
   1987     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
   1988                                 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
   1989   }
   1990   for (size_t i = 0; i < threads.size(); ++i) {
   1991     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
   1992   }
   1993   ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
   1994   ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
   1995 }
   1996 
   1997 struct BarrierDestroyTestArg {
   1998   std::atomic<int> tid;
   1999   pthread_barrier_t* barrier;
   2000 };
   2001 
   2002 static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
   2003   arg->tid = gettid();
   2004   ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
   2005 }
   2006 
   2007 TEST(pthread, pthread_barrier_destroy) {
   2008   pthread_barrier_t barrier;
   2009   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
   2010   pthread_t thread;
   2011   BarrierDestroyTestArg arg;
   2012   arg.tid = 0;
   2013   arg.barrier = &barrier;
   2014   ASSERT_EQ(0, pthread_create(&thread, nullptr,
   2015                               reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
   2016   WaitUntilThreadSleep(arg.tid);
   2017   ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
   2018   ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
   2019   // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
   2020   ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
   2021   ASSERT_EQ(0, pthread_join(thread, nullptr));
   2022 #if defined(__BIONIC__)
   2023   ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
   2024 #endif
   2025 }
   2026 
   2027 struct BarrierOrderingTestHelperArg {
   2028   pthread_barrier_t* barrier;
   2029   size_t* array;
   2030   size_t array_length;
   2031   size_t id;
   2032 };
   2033 
   2034 void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
   2035   const size_t ITERATION_COUNT = 10000;
   2036   for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
   2037     arg->array[arg->id] = i;
   2038     int result = pthread_barrier_wait(arg->barrier);
   2039     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
   2040     for (size_t j = 0; j < arg->array_length; ++j) {
   2041       ASSERT_EQ(i, arg->array[j]);
   2042     }
   2043     result = pthread_barrier_wait(arg->barrier);
   2044     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
   2045   }
   2046 }
   2047 
   2048 TEST(pthread, pthread_barrier_check_ordering) {
   2049   const size_t THREAD_COUNT = 4;
   2050   pthread_barrier_t barrier;
   2051   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
   2052   size_t array[THREAD_COUNT];
   2053   std::vector<pthread_t> threads(THREAD_COUNT);
   2054   std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
   2055   for (size_t i = 0; i < THREAD_COUNT; ++i) {
   2056     args[i].barrier = &barrier;
   2057     args[i].array = array;
   2058     args[i].array_length = THREAD_COUNT;
   2059     args[i].id = i;
   2060     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
   2061                                 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
   2062                                 &args[i]));
   2063   }
   2064   for (size_t i = 0; i < THREAD_COUNT; ++i) {
   2065     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
   2066   }
   2067 }
   2068 
   2069 TEST(pthread, pthread_spinlock_smoke) {
   2070   pthread_spinlock_t lock;
   2071   ASSERT_EQ(0, pthread_spin_init(&lock, 0));
   2072   ASSERT_EQ(0, pthread_spin_trylock(&lock));
   2073   ASSERT_EQ(0, pthread_spin_unlock(&lock));
   2074   ASSERT_EQ(0, pthread_spin_lock(&lock));
   2075   ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
   2076   ASSERT_EQ(0, pthread_spin_unlock(&lock));
   2077   ASSERT_EQ(0, pthread_spin_destroy(&lock));
   2078 }
   2079 
   2080 TEST(pthread, pthread_attr_setdetachstate) {
   2081   pthread_attr_t attr;
   2082   ASSERT_EQ(0, pthread_attr_init(&attr));
   2083 
   2084   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
   2085   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
   2086   ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
   2087 }
   2088 
   2089 TEST(pthread, pthread_create__mmap_failures) {
   2090   pthread_attr_t attr;
   2091   ASSERT_EQ(0, pthread_attr_init(&attr));
   2092   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
   2093 
   2094   const auto kPageSize = sysconf(_SC_PAGE_SIZE);
   2095 
   2096   // Use up all the VMAs. By default this is 64Ki.
   2097   std::vector<void*> pages;
   2098   int prot = PROT_NONE;
   2099   while (true) {
   2100     void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
   2101     if (page == MAP_FAILED) break;
   2102     pages.push_back(page);
   2103     prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
   2104   }
   2105 
   2106   // Try creating threads, freeing up a page each time we fail.
   2107   size_t EAGAIN_count = 0;
   2108   size_t i = 0;
   2109   for (; i < pages.size(); ++i) {
   2110     pthread_t t;
   2111     int status = pthread_create(&t, &attr, IdFn, nullptr);
   2112     if (status != EAGAIN) break;
   2113     ++EAGAIN_count;
   2114     ASSERT_EQ(0, munmap(pages[i], kPageSize));
   2115   }
   2116 
   2117   // Creating a thread uses at least six VMAs: the stack, the TLS, and a guard each side of both.
   2118   // So we should have seen at least six failures.
   2119   ASSERT_GE(EAGAIN_count, 6U);
   2120 
   2121   for (; i < pages.size(); ++i) {
   2122     ASSERT_EQ(0, munmap(pages[i], kPageSize));
   2123   }
   2124 }
   2125