Home | History | Annotate | Download | only in threading
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/threading/worker_pool_posix.h"
      6 
      7 #include <set>
      8 
      9 #include "base/bind.h"
     10 #include "base/callback.h"
     11 #include "base/macros.h"
     12 #include "base/synchronization/condition_variable.h"
     13 #include "base/synchronization/lock.h"
     14 #include "base/synchronization/waitable_event.h"
     15 #include "base/threading/platform_thread.h"
     16 #include "testing/gtest/include/gtest/gtest.h"
     17 
     18 namespace base {
     19 
     20 // Peer class to provide passthrough access to PosixDynamicThreadPool internals.
     21 class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer {
     22  public:
     23   explicit PosixDynamicThreadPoolPeer(PosixDynamicThreadPool* pool)
     24       : pool_(pool) {}
     25 
     26   Lock* lock() { return &pool_->lock_; }
     27   ConditionVariable* pending_tasks_available_cv() {
     28     return &pool_->pending_tasks_available_cv_;
     29   }
     30   const std::queue<PendingTask>& pending_tasks() const {
     31     return pool_->pending_tasks_;
     32   }
     33   int num_idle_threads() const { return pool_->num_idle_threads_; }
     34   ConditionVariable* num_idle_threads_cv() {
     35     return pool_->num_idle_threads_cv_.get();
     36   }
     37   void set_num_idle_threads_cv(ConditionVariable* cv) {
     38     pool_->num_idle_threads_cv_.reset(cv);
     39   }
     40 
     41  private:
     42   PosixDynamicThreadPool* pool_;
     43 
     44   DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPoolPeer);
     45 };
     46 
     47 namespace {
     48 
     49 // IncrementingTask's main purpose is to increment a counter.  It also updates a
     50 // set of unique thread ids, and signals a ConditionVariable on completion.
     51 // Note that since it does not block, there is no way to control the number of
     52 // threads used if more than one IncrementingTask is consecutively posted to the
     53 // thread pool, since the first one might finish executing before the subsequent
     54 // PostTask() calls get invoked.
     55 void IncrementingTask(Lock* counter_lock,
     56                       int* counter,
     57                       Lock* unique_threads_lock,
     58                       std::set<PlatformThreadId>* unique_threads) {
     59   {
     60     base::AutoLock locked(*unique_threads_lock);
     61     unique_threads->insert(PlatformThread::CurrentId());
     62   }
     63   base::AutoLock locked(*counter_lock);
     64   (*counter)++;
     65 }
     66 
     67 // BlockingIncrementingTask is a simple wrapper around IncrementingTask that
     68 // allows for waiting at the start of Run() for a WaitableEvent to be signalled.
     69 struct BlockingIncrementingTaskArgs {
     70   Lock* counter_lock;
     71   int* counter;
     72   Lock* unique_threads_lock;
     73   std::set<PlatformThreadId>* unique_threads;
     74   Lock* num_waiting_to_start_lock;
     75   int* num_waiting_to_start;
     76   ConditionVariable* num_waiting_to_start_cv;
     77   base::WaitableEvent* start;
     78 };
     79 
     80 void BlockingIncrementingTask(const BlockingIncrementingTaskArgs& args) {
     81   {
     82     base::AutoLock num_waiting_to_start_locked(*args.num_waiting_to_start_lock);
     83     (*args.num_waiting_to_start)++;
     84   }
     85   args.num_waiting_to_start_cv->Signal();
     86   args.start->Wait();
     87   IncrementingTask(args.counter_lock, args.counter, args.unique_threads_lock,
     88                    args.unique_threads);
     89 }
     90 
     91 class PosixDynamicThreadPoolTest : public testing::Test {
     92  protected:
     93   PosixDynamicThreadPoolTest()
     94       : pool_(new base::PosixDynamicThreadPool("dynamic_pool", 60 * 60)),
     95         peer_(pool_.get()),
     96         counter_(0),
     97         num_waiting_to_start_(0),
     98         num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
     99         start_(WaitableEvent::ResetPolicy::MANUAL,
    100                WaitableEvent::InitialState::NOT_SIGNALED) {}
    101 
    102   void SetUp() override {
    103     peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
    104   }
    105 
    106   void WaitForTasksToStart(int num_tasks) {
    107     base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
    108     while (num_waiting_to_start_ < num_tasks) {
    109       num_waiting_to_start_cv_.Wait();
    110     }
    111   }
    112 
    113   void WaitForIdleThreads(int num_idle_threads) {
    114     base::AutoLock pool_locked(*peer_.lock());
    115     while (peer_.num_idle_threads() < num_idle_threads) {
    116       peer_.num_idle_threads_cv()->Wait();
    117     }
    118   }
    119 
    120   base::Closure CreateNewIncrementingTaskCallback() {
    121     return base::Bind(&IncrementingTask, &counter_lock_, &counter_,
    122                       &unique_threads_lock_, &unique_threads_);
    123   }
    124 
    125   base::Closure CreateNewBlockingIncrementingTaskCallback() {
    126     BlockingIncrementingTaskArgs args = {
    127         &counter_lock_, &counter_, &unique_threads_lock_, &unique_threads_,
    128         &num_waiting_to_start_lock_, &num_waiting_to_start_,
    129         &num_waiting_to_start_cv_, &start_
    130     };
    131     return base::Bind(&BlockingIncrementingTask, args);
    132   }
    133 
    134   scoped_refptr<base::PosixDynamicThreadPool> pool_;
    135   base::PosixDynamicThreadPool::PosixDynamicThreadPoolPeer peer_;
    136   Lock counter_lock_;
    137   int counter_;
    138   Lock unique_threads_lock_;
    139   std::set<PlatformThreadId> unique_threads_;
    140   Lock num_waiting_to_start_lock_;
    141   int num_waiting_to_start_;
    142   ConditionVariable num_waiting_to_start_cv_;
    143   base::WaitableEvent start_;
    144 };
    145 
    146 }  // namespace
    147 
    148 TEST_F(PosixDynamicThreadPoolTest, Basic) {
    149   EXPECT_EQ(0, peer_.num_idle_threads());
    150   EXPECT_EQ(0U, unique_threads_.size());
    151   EXPECT_EQ(0U, peer_.pending_tasks().size());
    152 
    153   // Add one task and wait for it to be completed.
    154   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    155 
    156   WaitForIdleThreads(1);
    157 
    158   EXPECT_EQ(1U, unique_threads_.size()) <<
    159       "There should be only one thread allocated for one task.";
    160   EXPECT_EQ(1, counter_);
    161 }
    162 
    163 TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) {
    164   // Add one task and wait for it to be completed.
    165   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    166 
    167   WaitForIdleThreads(1);
    168 
    169   // Add another 2 tasks.  One should reuse the existing worker thread.
    170   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    171   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    172 
    173   WaitForTasksToStart(2);
    174   start_.Signal();
    175   WaitForIdleThreads(2);
    176 
    177   EXPECT_EQ(2U, unique_threads_.size());
    178   EXPECT_EQ(2, peer_.num_idle_threads());
    179   EXPECT_EQ(3, counter_);
    180 }
    181 
    182 TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) {
    183   // Add two blocking tasks.
    184   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    185   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    186 
    187   EXPECT_EQ(0, counter_) << "Blocking tasks should not have started yet.";
    188 
    189   WaitForTasksToStart(2);
    190   start_.Signal();
    191   WaitForIdleThreads(2);
    192 
    193   EXPECT_EQ(2U, unique_threads_.size());
    194   EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle.";
    195   EXPECT_EQ(2, counter_);
    196 }
    197 
    198 TEST_F(PosixDynamicThreadPoolTest, Complex) {
    199   // Add two non blocking tasks and wait for them to finish.
    200   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    201 
    202   WaitForIdleThreads(1);
    203 
    204   // Add two blocking tasks, start them simultaneously, and wait for them to
    205   // finish.
    206   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    207   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    208 
    209   WaitForTasksToStart(2);
    210   start_.Signal();
    211   WaitForIdleThreads(2);
    212 
    213   EXPECT_EQ(3, counter_);
    214   EXPECT_EQ(2, peer_.num_idle_threads());
    215   EXPECT_EQ(2U, unique_threads_.size());
    216 
    217   // Wake up all idle threads so they can exit.
    218   {
    219     base::AutoLock locked(*peer_.lock());
    220     while (peer_.num_idle_threads() > 0) {
    221       peer_.pending_tasks_available_cv()->Signal();
    222       peer_.num_idle_threads_cv()->Wait();
    223     }
    224   }
    225 
    226   // Add another non blocking task.  There are no threads to reuse.
    227   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    228   WaitForIdleThreads(1);
    229 
    230   // The POSIX implementation of PlatformThread::CurrentId() uses pthread_self()
    231   // which is not guaranteed to be unique after a thread joins. The OS X
    232   // implemntation of pthread_self() returns the address of the pthread_t, which
    233   // is merely a malloc()ed pointer stored in the first TLS slot. When a thread
    234   // joins and that structure is freed, the block of memory can be put on the
    235   // OS free list, meaning the same address could be reused in a subsequent
    236   // allocation. This in fact happens when allocating in a loop as this test
    237   // does.
    238   //
    239   // Because there are two concurrent threads, there's at least the guarantee
    240   // of having two unique thread IDs in the set. But after those two threads are
    241   // joined, the next-created thread can get a re-used ID if the allocation of
    242   // the pthread_t structure is taken from the free list. Therefore, there can
    243   // be either 2 or 3 unique thread IDs in the set at this stage in the test.
    244   EXPECT_TRUE(unique_threads_.size() >= 2 && unique_threads_.size() <= 3)
    245       << "unique_threads_.size() = " << unique_threads_.size();
    246   EXPECT_EQ(1, peer_.num_idle_threads());
    247   EXPECT_EQ(4, counter_);
    248 }
    249 
    250 }  // namespace base
    251