Home | History | Annotate | Download | only in resources
      1 // Copyright 2014 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "cc/resources/gpu_raster_worker_pool.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/debug/trace_event.h"
     10 #include "cc/output/context_provider.h"
     11 #include "cc/resources/raster_buffer.h"
     12 #include "cc/resources/resource.h"
     13 #include "cc/resources/resource_provider.h"
     14 #include "cc/resources/scoped_gpu_raster.h"
     15 #include "gpu/command_buffer/client/gles2_interface.h"
     16 #include "third_party/skia/include/core/SkMultiPictureDraw.h"
     17 #include "third_party/skia/include/core/SkPictureRecorder.h"
     18 #include "third_party/skia/include/core/SkSurface.h"
     19 #include "third_party/skia/include/gpu/GrContext.h"
     20 #include "third_party/skia/include/utils/SkNullCanvas.h"
     21 
     22 namespace cc {
     23 namespace {
     24 
     25 class RasterBufferImpl : public RasterBuffer {
     26  public:
     27   RasterBufferImpl(ResourceProvider* resource_provider,
     28                    const Resource* resource,
     29                    SkMultiPictureDraw* multi_picture_draw)
     30       : resource_provider_(resource_provider),
     31         resource_(resource),
     32         surface_(resource_provider->LockForWriteToSkSurface(resource->id())),
     33         multi_picture_draw_(multi_picture_draw) {}
     34   virtual ~RasterBufferImpl() {
     35     resource_provider_->UnlockForWriteToSkSurface(resource_->id());
     36   }
     37 
     38   // Overridden from RasterBuffer:
     39   virtual skia::RefPtr<SkCanvas> AcquireSkCanvas() OVERRIDE {
     40     if (!surface_)
     41       return skia::AdoptRef(SkCreateNullCanvas());
     42 
     43     skia::RefPtr<SkCanvas> canvas = skia::SharePtr(recorder_.beginRecording(
     44         resource_->size().width(), resource_->size().height()));
     45 
     46     // Balanced with restore() call in ReleaseSkCanvas. save()/restore() calls
     47     // are needed to ensure that canvas returns to its previous state after use.
     48     canvas->save();
     49     return canvas;
     50   }
     51   virtual void ReleaseSkCanvas(const skia::RefPtr<SkCanvas>& canvas) OVERRIDE {
     52     if (!surface_)
     53       return;
     54 
     55     // Balanced with save() call in AcquireSkCanvas.
     56     canvas->restore();
     57 
     58     // Add the canvas and recorded picture to |multi_picture_draw_|.
     59     skia::RefPtr<SkPicture> picture = skia::AdoptRef(recorder_.endRecording());
     60     multi_picture_draw_->add(surface_->getCanvas(), picture.get());
     61   }
     62 
     63  private:
     64   ResourceProvider* resource_provider_;
     65   const Resource* resource_;
     66   SkSurface* surface_;
     67   SkMultiPictureDraw* multi_picture_draw_;
     68   SkPictureRecorder recorder_;
     69 
     70   DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
     71 };
     72 
     73 }  // namespace
     74 
     75 // static
     76 scoped_ptr<RasterWorkerPool> GpuRasterWorkerPool::Create(
     77     base::SequencedTaskRunner* task_runner,
     78     ContextProvider* context_provider,
     79     ResourceProvider* resource_provider) {
     80   return make_scoped_ptr<RasterWorkerPool>(new GpuRasterWorkerPool(
     81       task_runner, context_provider, resource_provider));
     82 }
     83 
     84 GpuRasterWorkerPool::GpuRasterWorkerPool(base::SequencedTaskRunner* task_runner,
     85                                          ContextProvider* context_provider,
     86                                          ResourceProvider* resource_provider)
     87     : task_runner_(task_runner),
     88       task_graph_runner_(new TaskGraphRunner),
     89       namespace_token_(task_graph_runner_->GetNamespaceToken()),
     90       context_provider_(context_provider),
     91       resource_provider_(resource_provider),
     92       run_tasks_on_origin_thread_pending_(false),
     93       raster_finished_weak_ptr_factory_(this),
     94       weak_ptr_factory_(this) {
     95   DCHECK(context_provider_);
     96 }
     97 
     98 GpuRasterWorkerPool::~GpuRasterWorkerPool() {
     99   DCHECK_EQ(0u, completed_tasks_.size());
    100 }
    101 
    102 Rasterizer* GpuRasterWorkerPool::AsRasterizer() {
    103   return this;
    104 }
    105 
    106 void GpuRasterWorkerPool::SetClient(RasterizerClient* client) {
    107   client_ = client;
    108 }
    109 
    110 void GpuRasterWorkerPool::Shutdown() {
    111   TRACE_EVENT0("cc", "GpuRasterWorkerPool::Shutdown");
    112 
    113   TaskGraph empty;
    114   task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
    115   task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
    116 }
    117 
    118 void GpuRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
    119   TRACE_EVENT0("cc", "GpuRasterWorkerPool::ScheduleTasks");
    120 
    121   // Mark all task sets as pending.
    122   raster_pending_.set();
    123 
    124   unsigned priority = kRasterTaskPriorityBase;
    125 
    126   graph_.Reset();
    127 
    128   // Cancel existing OnRasterFinished callbacks.
    129   raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
    130 
    131   scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
    132 
    133   size_t task_count[kNumberOfTaskSets] = {0};
    134 
    135   for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
    136     new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
    137         task_runner_.get(),
    138         base::Bind(&GpuRasterWorkerPool::OnRasterFinished,
    139                    raster_finished_weak_ptr_factory_.GetWeakPtr(),
    140                    task_set));
    141   }
    142 
    143   for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
    144        it != queue->items.end();
    145        ++it) {
    146     const RasterTaskQueue::Item& item = *it;
    147     RasterTask* task = item.task;
    148     DCHECK(!task->HasCompleted());
    149 
    150     for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
    151       if (!item.task_sets[task_set])
    152         continue;
    153 
    154       ++task_count[task_set];
    155 
    156       graph_.edges.push_back(
    157           TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
    158     }
    159 
    160     InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
    161   }
    162 
    163   for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
    164     InsertNodeForTask(&graph_,
    165                       new_raster_finished_tasks[task_set].get(),
    166                       kRasterFinishedTaskPriority,
    167                       task_count[task_set]);
    168   }
    169 
    170   ScheduleTasksOnOriginThread(this, &graph_);
    171   task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
    172 
    173   ScheduleRunTasksOnOriginThread();
    174 
    175   std::copy(new_raster_finished_tasks,
    176             new_raster_finished_tasks + kNumberOfTaskSets,
    177             raster_finished_tasks_);
    178 }
    179 
    180 void GpuRasterWorkerPool::CheckForCompletedTasks() {
    181   TRACE_EVENT0("cc", "GpuRasterWorkerPool::CheckForCompletedTasks");
    182 
    183   task_graph_runner_->CollectCompletedTasks(namespace_token_,
    184                                             &completed_tasks_);
    185   for (Task::Vector::const_iterator it = completed_tasks_.begin();
    186        it != completed_tasks_.end();
    187        ++it) {
    188     RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
    189 
    190     task->WillComplete();
    191     task->CompleteOnOriginThread(this);
    192     task->DidComplete();
    193 
    194     task->RunReplyOnOriginThread();
    195   }
    196   completed_tasks_.clear();
    197 }
    198 
    199 scoped_ptr<RasterBuffer> GpuRasterWorkerPool::AcquireBufferForRaster(
    200     const Resource* resource) {
    201   // RasterBuffer implementation depends on a SkSurface having been acquired for
    202   // the resource.
    203   resource_provider_->AcquireSkSurface(resource->id());
    204 
    205   return make_scoped_ptr<RasterBuffer>(
    206       new RasterBufferImpl(resource_provider_, resource, &multi_picture_draw_));
    207 }
    208 
    209 void GpuRasterWorkerPool::ReleaseBufferForRaster(
    210     scoped_ptr<RasterBuffer> buffer) {
    211   // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
    212 }
    213 
    214 void GpuRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
    215   TRACE_EVENT1(
    216       "cc", "GpuRasterWorkerPool::OnRasterFinished", "task_set", task_set);
    217 
    218   DCHECK(raster_pending_[task_set]);
    219   raster_pending_[task_set] = false;
    220   client_->DidFinishRunningTasks(task_set);
    221 }
    222 
    223 void GpuRasterWorkerPool::ScheduleRunTasksOnOriginThread() {
    224   if (run_tasks_on_origin_thread_pending_)
    225     return;
    226 
    227   task_runner_->PostTask(
    228       FROM_HERE,
    229       base::Bind(&GpuRasterWorkerPool::RunTasksOnOriginThread,
    230                  weak_ptr_factory_.GetWeakPtr()));
    231   run_tasks_on_origin_thread_pending_ = true;
    232 }
    233 
    234 void GpuRasterWorkerPool::RunTasksOnOriginThread() {
    235   TRACE_EVENT0("cc", "GpuRasterWorkerPool::RunTasksOnOriginThread");
    236 
    237   DCHECK(run_tasks_on_origin_thread_pending_);
    238   run_tasks_on_origin_thread_pending_ = false;
    239 
    240   ScopedGpuRaster gpu_raster(context_provider_);
    241   task_graph_runner_->RunUntilIdle();
    242 
    243   // Draw each all of the pictures that were collected.  This will also clear
    244   // the pictures and canvases added to |multi_picture_draw_|
    245   multi_picture_draw_.draw();
    246 }
    247 
    248 }  // namespace cc
    249