Home | History | Annotate | Download | only in threading
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/threading/worker_pool_posix.h"
      6 
      7 #include <set>
      8 
      9 #include "base/bind.h"
     10 #include "base/callback.h"
     11 #include "base/synchronization/condition_variable.h"
     12 #include "base/synchronization/lock.h"
     13 #include "base/threading/platform_thread.h"
     14 #include "base/synchronization/waitable_event.h"
     15 #include "testing/gtest/include/gtest/gtest.h"
     16 
     17 namespace base {
     18 
     19 // Peer class to provide passthrough access to PosixDynamicThreadPool internals.
     20 class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer {
     21  public:
     22   explicit PosixDynamicThreadPoolPeer(PosixDynamicThreadPool* pool)
     23       : pool_(pool) {}
     24 
     25   Lock* lock() { return &pool_->lock_; }
     26   ConditionVariable* pending_tasks_available_cv() {
     27     return &pool_->pending_tasks_available_cv_;
     28   }
     29   const std::queue<PendingTask>& pending_tasks() const {
     30     return pool_->pending_tasks_;
     31   }
     32   int num_idle_threads() const { return pool_->num_idle_threads_; }
     33   ConditionVariable* num_idle_threads_cv() {
     34     return pool_->num_idle_threads_cv_.get();
     35   }
     36   void set_num_idle_threads_cv(ConditionVariable* cv) {
     37     pool_->num_idle_threads_cv_.reset(cv);
     38   }
     39 
     40  private:
     41   PosixDynamicThreadPool* pool_;
     42 
     43   DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPoolPeer);
     44 };
     45 
     46 namespace {
     47 
     48 // IncrementingTask's main purpose is to increment a counter.  It also updates a
     49 // set of unique thread ids, and signals a ConditionVariable on completion.
     50 // Note that since it does not block, there is no way to control the number of
     51 // threads used if more than one IncrementingTask is consecutively posted to the
     52 // thread pool, since the first one might finish executing before the subsequent
     53 // PostTask() calls get invoked.
     54 void IncrementingTask(Lock* counter_lock,
     55                       int* counter,
     56                       Lock* unique_threads_lock,
     57                       std::set<PlatformThreadId>* unique_threads) {
     58   {
     59     base::AutoLock locked(*unique_threads_lock);
     60     unique_threads->insert(PlatformThread::CurrentId());
     61   }
     62   base::AutoLock locked(*counter_lock);
     63   (*counter)++;
     64 }
     65 
     66 // BlockingIncrementingTask is a simple wrapper around IncrementingTask that
     67 // allows for waiting at the start of Run() for a WaitableEvent to be signalled.
     68 struct BlockingIncrementingTaskArgs {
     69   Lock* counter_lock;
     70   int* counter;
     71   Lock* unique_threads_lock;
     72   std::set<PlatformThreadId>* unique_threads;
     73   Lock* num_waiting_to_start_lock;
     74   int* num_waiting_to_start;
     75   ConditionVariable* num_waiting_to_start_cv;
     76   base::WaitableEvent* start;
     77 };
     78 
     79 void BlockingIncrementingTask(const BlockingIncrementingTaskArgs& args) {
     80   {
     81     base::AutoLock num_waiting_to_start_locked(*args.num_waiting_to_start_lock);
     82     (*args.num_waiting_to_start)++;
     83   }
     84   args.num_waiting_to_start_cv->Signal();
     85   args.start->Wait();
     86   IncrementingTask(args.counter_lock, args.counter, args.unique_threads_lock,
     87                    args.unique_threads);
     88 }
     89 
     90 class PosixDynamicThreadPoolTest : public testing::Test {
     91  protected:
     92   PosixDynamicThreadPoolTest()
     93       : pool_(new base::PosixDynamicThreadPool("dynamic_pool", 60*60)),
     94         peer_(pool_.get()),
     95         counter_(0),
     96         num_waiting_to_start_(0),
     97         num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
     98         start_(true, false) {}
     99 
    100   virtual void SetUp() OVERRIDE {
    101     peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
    102   }
    103 
    104   virtual void TearDown() OVERRIDE {
    105     // Wake up the idle threads so they can terminate.
    106     if (pool_.get()) pool_->Terminate();
    107   }
    108 
    109   void WaitForTasksToStart(int num_tasks) {
    110     base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
    111     while (num_waiting_to_start_ < num_tasks) {
    112       num_waiting_to_start_cv_.Wait();
    113     }
    114   }
    115 
    116   void WaitForIdleThreads(int num_idle_threads) {
    117     base::AutoLock pool_locked(*peer_.lock());
    118     while (peer_.num_idle_threads() < num_idle_threads) {
    119       peer_.num_idle_threads_cv()->Wait();
    120     }
    121   }
    122 
    123   base::Closure CreateNewIncrementingTaskCallback() {
    124     return base::Bind(&IncrementingTask, &counter_lock_, &counter_,
    125                       &unique_threads_lock_, &unique_threads_);
    126   }
    127 
    128   base::Closure CreateNewBlockingIncrementingTaskCallback() {
    129     BlockingIncrementingTaskArgs args = {
    130         &counter_lock_, &counter_, &unique_threads_lock_, &unique_threads_,
    131         &num_waiting_to_start_lock_, &num_waiting_to_start_,
    132         &num_waiting_to_start_cv_, &start_
    133     };
    134     return base::Bind(&BlockingIncrementingTask, args);
    135   }
    136 
    137   scoped_refptr<base::PosixDynamicThreadPool> pool_;
    138   base::PosixDynamicThreadPool::PosixDynamicThreadPoolPeer peer_;
    139   Lock counter_lock_;
    140   int counter_;
    141   Lock unique_threads_lock_;
    142   std::set<PlatformThreadId> unique_threads_;
    143   Lock num_waiting_to_start_lock_;
    144   int num_waiting_to_start_;
    145   ConditionVariable num_waiting_to_start_cv_;
    146   base::WaitableEvent start_;
    147 };
    148 
    149 }  // namespace
    150 
    151 TEST_F(PosixDynamicThreadPoolTest, Basic) {
    152   EXPECT_EQ(0, peer_.num_idle_threads());
    153   EXPECT_EQ(0U, unique_threads_.size());
    154   EXPECT_EQ(0U, peer_.pending_tasks().size());
    155 
    156   // Add one task and wait for it to be completed.
    157   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    158 
    159   WaitForIdleThreads(1);
    160 
    161   EXPECT_EQ(1U, unique_threads_.size()) <<
    162       "There should be only one thread allocated for one task.";
    163   EXPECT_EQ(1, peer_.num_idle_threads());
    164   EXPECT_EQ(1, counter_);
    165 }
    166 
    167 TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) {
    168   // Add one task and wait for it to be completed.
    169   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    170 
    171   WaitForIdleThreads(1);
    172 
    173   // Add another 2 tasks.  One should reuse the existing worker thread.
    174   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    175   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    176 
    177   WaitForTasksToStart(2);
    178   start_.Signal();
    179   WaitForIdleThreads(2);
    180 
    181   EXPECT_EQ(2U, unique_threads_.size());
    182   EXPECT_EQ(2, peer_.num_idle_threads());
    183   EXPECT_EQ(3, counter_);
    184 }
    185 
    186 TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) {
    187   // Add two blocking tasks.
    188   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    189   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    190 
    191   EXPECT_EQ(0, counter_) << "Blocking tasks should not have started yet.";
    192 
    193   WaitForTasksToStart(2);
    194   start_.Signal();
    195   WaitForIdleThreads(2);
    196 
    197   EXPECT_EQ(2U, unique_threads_.size());
    198   EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle.";
    199   EXPECT_EQ(2, counter_);
    200 }
    201 
    202 TEST_F(PosixDynamicThreadPoolTest, Complex) {
    203   // Add two non blocking tasks and wait for them to finish.
    204   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    205 
    206   WaitForIdleThreads(1);
    207 
    208   // Add two blocking tasks, start them simultaneously, and wait for them to
    209   // finish.
    210   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    211   pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
    212 
    213   WaitForTasksToStart(2);
    214   start_.Signal();
    215   WaitForIdleThreads(2);
    216 
    217   EXPECT_EQ(3, counter_);
    218   EXPECT_EQ(2, peer_.num_idle_threads());
    219   EXPECT_EQ(2U, unique_threads_.size());
    220 
    221   // Wake up all idle threads so they can exit.
    222   {
    223     base::AutoLock locked(*peer_.lock());
    224     while (peer_.num_idle_threads() > 0) {
    225       peer_.pending_tasks_available_cv()->Signal();
    226       peer_.num_idle_threads_cv()->Wait();
    227     }
    228   }
    229 
    230   // Add another non blocking task.  There are no threads to reuse.
    231   pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
    232   WaitForIdleThreads(1);
    233 
    234   // The POSIX implementation of PlatformThread::CurrentId() uses pthread_self()
    235   // which is not guaranteed to be unique after a thread joins. The OS X
    236   // implemntation of pthread_self() returns the address of the pthread_t, which
    237   // is merely a malloc()ed pointer stored in the first TLS slot. When a thread
    238   // joins and that structure is freed, the block of memory can be put on the
    239   // OS free list, meaning the same address could be reused in a subsequent
    240   // allocation. This in fact happens when allocating in a loop as this test
    241   // does.
    242   //
    243   // Because there are two concurrent threads, there's at least the guarantee
    244   // of having two unique thread IDs in the set. But after those two threads are
    245   // joined, the next-created thread can get a re-used ID if the allocation of
    246   // the pthread_t structure is taken from the free list. Therefore, there can
    247   // be either 2 or 3 unique thread IDs in the set at this stage in the test.
    248   EXPECT_TRUE(unique_threads_.size() >= 2 && unique_threads_.size() <= 3)
    249       << "unique_threads_.size() = " << unique_threads_.size();
    250   EXPECT_EQ(1, peer_.num_idle_threads());
    251   EXPECT_EQ(4, counter_);
    252 }
    253 
    254 }  // namespace base
    255