Home | History | Annotate | Download | only in resources
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "cc/resources/raster_worker_pool.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/atomic_sequence_num.h"
     10 #include "base/debug/trace_event_synthetic_delay.h"
     11 #include "base/lazy_instance.h"
     12 #include "base/strings/stringprintf.h"
     13 #include "base/threading/simple_thread.h"
     14 #include "base/threading/thread_local.h"
     15 #include "cc/base/scoped_ptr_deque.h"
     16 
     17 namespace cc {
     18 namespace {
     19 
     20 // Synthetic delay for raster tasks that are required for activation. Global to
     21 // avoid static initializer on critical path.
     22 struct RasterRequiredForActivationSyntheticDelayInitializer {
     23   RasterRequiredForActivationSyntheticDelayInitializer()
     24       : delay(base::debug::TraceEventSyntheticDelay::Lookup(
     25             "cc.RasterRequiredForActivation")) {}
     26   base::debug::TraceEventSyntheticDelay* delay;
     27 };
     28 static base::LazyInstance<RasterRequiredForActivationSyntheticDelayInitializer>
     29     g_raster_required_for_activation_delay = LAZY_INSTANCE_INITIALIZER;
     30 
     31 class RasterTaskGraphRunner : public TaskGraphRunner,
     32                               public base::DelegateSimpleThread::Delegate {
     33  public:
     34   RasterTaskGraphRunner() {
     35     size_t num_threads = RasterWorkerPool::GetNumRasterThreads();
     36     while (workers_.size() < num_threads) {
     37       scoped_ptr<base::DelegateSimpleThread> worker =
     38           make_scoped_ptr(new base::DelegateSimpleThread(
     39               this,
     40               base::StringPrintf("CompositorRasterWorker%u",
     41                                  static_cast<unsigned>(workers_.size() + 1))
     42                   .c_str()));
     43       worker->Start();
     44 #if defined(OS_ANDROID) || defined(OS_LINUX)
     45       worker->SetThreadPriority(base::kThreadPriority_Background);
     46 #endif
     47       workers_.push_back(worker.Pass());
     48     }
     49   }
     50 
     51   virtual ~RasterTaskGraphRunner() { NOTREACHED(); }
     52 
     53   size_t GetPictureCloneIndexForCurrentThread() {
     54     // Use index 0 if called on non-raster thread.
     55     ThreadLocalState* thread_local_state = current_tls_.Get();
     56     return thread_local_state ? current_tls_.Get()->picture_clone_index : 0;
     57   }
     58 
     59  private:
     60   struct ThreadLocalState {
     61     explicit ThreadLocalState(size_t picture_clone_index)
     62         : picture_clone_index(picture_clone_index) {}
     63 
     64     size_t picture_clone_index;
     65   };
     66 
     67   // Overridden from base::DelegateSimpleThread::Delegate:
     68   virtual void Run() OVERRIDE {
     69     // Use picture clone index 0..num_threads.
     70     int picture_clone_index = picture_clone_index_sequence_.GetNext();
     71     DCHECK_LE(0, picture_clone_index);
     72     DCHECK_GT(RasterWorkerPool::GetNumRasterThreads(), picture_clone_index);
     73     current_tls_.Set(new ThreadLocalState(picture_clone_index));
     74 
     75     TaskGraphRunner::Run();
     76   }
     77 
     78   ScopedPtrDeque<base::DelegateSimpleThread> workers_;
     79   base::AtomicSequenceNumber picture_clone_index_sequence_;
     80   base::ThreadLocalPointer<ThreadLocalState> current_tls_;
     81 };
     82 
     83 base::LazyInstance<RasterTaskGraphRunner>::Leaky g_task_graph_runner =
     84     LAZY_INSTANCE_INITIALIZER;
     85 
     86 const int kDefaultNumRasterThreads = 1;
     87 
     88 int g_num_raster_threads = 0;
     89 
     90 class RasterFinishedTaskImpl : public RasterizerTask {
     91  public:
     92   explicit RasterFinishedTaskImpl(
     93       base::SequencedTaskRunner* task_runner,
     94       const base::Closure& on_raster_finished_callback)
     95       : task_runner_(task_runner),
     96         on_raster_finished_callback_(on_raster_finished_callback) {}
     97 
     98   // Overridden from Task:
     99   virtual void RunOnWorkerThread() OVERRIDE {
    100     TRACE_EVENT0("cc", "RasterFinishedTaskImpl::RunOnWorkerThread");
    101     RasterFinished();
    102   }
    103 
    104   // Overridden from RasterizerTask:
    105   virtual void ScheduleOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
    106   virtual void CompleteOnOriginThread(RasterizerTaskClient* client) OVERRIDE {}
    107   virtual void RunReplyOnOriginThread() OVERRIDE {}
    108 
    109  protected:
    110   virtual ~RasterFinishedTaskImpl() {}
    111 
    112   void RasterFinished() {
    113     task_runner_->PostTask(FROM_HERE, on_raster_finished_callback_);
    114   }
    115 
    116  private:
    117   scoped_refptr<base::SequencedTaskRunner> task_runner_;
    118   const base::Closure on_raster_finished_callback_;
    119 
    120   DISALLOW_COPY_AND_ASSIGN(RasterFinishedTaskImpl);
    121 };
    122 
    123 class RasterRequiredForActivationFinishedTaskImpl
    124     : public RasterFinishedTaskImpl {
    125  public:
    126   RasterRequiredForActivationFinishedTaskImpl(
    127       base::SequencedTaskRunner* task_runner,
    128       const base::Closure& on_raster_finished_callback,
    129       size_t tasks_required_for_activation_count)
    130       : RasterFinishedTaskImpl(task_runner, on_raster_finished_callback),
    131         tasks_required_for_activation_count_(
    132             tasks_required_for_activation_count) {
    133     if (tasks_required_for_activation_count_) {
    134       g_raster_required_for_activation_delay.Get().delay->BeginParallel(
    135           &activation_delay_end_time_);
    136     }
    137   }
    138 
    139   // Overridden from Task:
    140   virtual void RunOnWorkerThread() OVERRIDE {
    141     TRACE_EVENT0(
    142         "cc", "RasterRequiredForActivationFinishedTaskImpl::RunOnWorkerThread");
    143 
    144     if (tasks_required_for_activation_count_) {
    145       g_raster_required_for_activation_delay.Get().delay->EndParallel(
    146           activation_delay_end_time_);
    147     }
    148     RasterFinished();
    149   }
    150 
    151  private:
    152   virtual ~RasterRequiredForActivationFinishedTaskImpl() {}
    153 
    154   base::TimeTicks activation_delay_end_time_;
    155   const size_t tasks_required_for_activation_count_;
    156 
    157   DISALLOW_COPY_AND_ASSIGN(RasterRequiredForActivationFinishedTaskImpl);
    158 };
    159 
    160 }  // namespace
    161 
    162 // This allows an external rasterize on-demand system to run raster tasks
    163 // with highest priority using the same task graph runner instance.
    164 unsigned RasterWorkerPool::kOnDemandRasterTaskPriority = 0u;
    165 // This allows a micro benchmark system to run tasks with highest priority,
    166 // since it should finish as quickly as possible.
    167 unsigned RasterWorkerPool::kBenchmarkRasterTaskPriority = 0u;
    168 // Task priorities that make sure raster finished tasks run before any
    169 // remaining raster tasks.
    170 unsigned RasterWorkerPool::kRasterFinishedTaskPriority = 2u;
    171 unsigned RasterWorkerPool::kRasterRequiredForActivationFinishedTaskPriority =
    172     1u;
    173 unsigned RasterWorkerPool::kRasterTaskPriorityBase = 3u;
    174 
    175 RasterWorkerPool::RasterWorkerPool() {}
    176 
    177 RasterWorkerPool::~RasterWorkerPool() {}
    178 
    179 // static
    180 void RasterWorkerPool::SetNumRasterThreads(int num_threads) {
    181   DCHECK_LT(0, num_threads);
    182   DCHECK_EQ(0, g_num_raster_threads);
    183 
    184   g_num_raster_threads = num_threads;
    185 }
    186 
    187 // static
    188 int RasterWorkerPool::GetNumRasterThreads() {
    189   if (!g_num_raster_threads)
    190     g_num_raster_threads = kDefaultNumRasterThreads;
    191 
    192   return g_num_raster_threads;
    193 }
    194 
    195 // static
    196 TaskGraphRunner* RasterWorkerPool::GetTaskGraphRunner() {
    197   return g_task_graph_runner.Pointer();
    198 }
    199 
    200 // static
    201 size_t RasterWorkerPool::GetPictureCloneIndexForCurrentThread() {
    202   return g_task_graph_runner.Pointer()->GetPictureCloneIndexForCurrentThread();
    203 }
    204 
    205 // static
    206 scoped_refptr<RasterizerTask> RasterWorkerPool::CreateRasterFinishedTask(
    207     base::SequencedTaskRunner* task_runner,
    208     const base::Closure& on_raster_finished_callback) {
    209   return make_scoped_refptr(
    210       new RasterFinishedTaskImpl(task_runner, on_raster_finished_callback));
    211 }
    212 
    213 // static
    214 scoped_refptr<RasterizerTask>
    215 RasterWorkerPool::CreateRasterRequiredForActivationFinishedTask(
    216     size_t tasks_required_for_activation_count,
    217     base::SequencedTaskRunner* task_runner,
    218     const base::Closure& on_raster_finished_callback) {
    219   return make_scoped_refptr(new RasterRequiredForActivationFinishedTaskImpl(
    220       task_runner,
    221       on_raster_finished_callback,
    222       tasks_required_for_activation_count));
    223 }
    224 
    225 // static
    226 void RasterWorkerPool::ScheduleTasksOnOriginThread(RasterizerTaskClient* client,
    227                                                    TaskGraph* graph) {
    228   TRACE_EVENT0("cc", "Rasterizer::ScheduleTasksOnOriginThread");
    229 
    230   for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
    231        it != graph->nodes.end();
    232        ++it) {
    233     TaskGraph::Node& node = *it;
    234     RasterizerTask* task = static_cast<RasterizerTask*>(node.task);
    235 
    236     if (!task->HasBeenScheduled()) {
    237       task->WillSchedule();
    238       task->ScheduleOnOriginThread(client);
    239       task->DidSchedule();
    240     }
    241   }
    242 }
    243 
    244 // static
    245 void RasterWorkerPool::InsertNodeForTask(TaskGraph* graph,
    246                                          RasterizerTask* task,
    247                                          unsigned priority,
    248                                          size_t dependencies) {
    249   DCHECK(std::find_if(graph->nodes.begin(),
    250                       graph->nodes.end(),
    251                       TaskGraph::Node::TaskComparator(task)) ==
    252          graph->nodes.end());
    253   graph->nodes.push_back(TaskGraph::Node(task, priority, dependencies));
    254 }
    255 
    256 // static
    257 void RasterWorkerPool::InsertNodesForRasterTask(
    258     TaskGraph* graph,
    259     RasterTask* raster_task,
    260     const ImageDecodeTask::Vector& decode_tasks,
    261     unsigned priority) {
    262   size_t dependencies = 0u;
    263 
    264   // Insert image decode tasks.
    265   for (ImageDecodeTask::Vector::const_iterator it = decode_tasks.begin();
    266        it != decode_tasks.end();
    267        ++it) {
    268     ImageDecodeTask* decode_task = it->get();
    269 
    270     // Skip if already decoded.
    271     if (decode_task->HasCompleted())
    272       continue;
    273 
    274     dependencies++;
    275 
    276     // Add decode task if it doesn't already exists in graph.
    277     TaskGraph::Node::Vector::iterator decode_it =
    278         std::find_if(graph->nodes.begin(),
    279                      graph->nodes.end(),
    280                      TaskGraph::Node::TaskComparator(decode_task));
    281     if (decode_it == graph->nodes.end())
    282       InsertNodeForTask(graph, decode_task, priority, 0u);
    283 
    284     graph->edges.push_back(TaskGraph::Edge(decode_task, raster_task));
    285   }
    286 
    287   InsertNodeForTask(graph, raster_task, priority, dependencies);
    288 }
    289 
    290 }  // namespace cc
    291