Home | History | Annotate | Download | only in service
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "gpu/command_buffer/service/gpu_scheduler.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/command_line.h"
      9 #include "base/compiler_specific.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/message_loop/message_loop.h"
     12 #include "base/time/time.h"
     13 #include "ui/gl/gl_bindings.h"
     14 #include "ui/gl/gl_fence.h"
     15 #include "ui/gl/gl_switches.h"
     16 
     17 #if defined(OS_WIN)
     18 #include "base/win/windows_version.h"
     19 #endif
     20 
     21 using ::base::SharedMemory;
     22 
     23 namespace gpu {
     24 
     25 namespace {
     26 const int64 kRescheduleTimeOutDelay = 1000;
     27 const int64 kUnscheduleFenceTimeOutDelay = 10000;
     28 }
     29 
     30 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
     31                            AsyncAPIInterface* handler,
     32                            gles2::GLES2Decoder* decoder)
     33     : command_buffer_(command_buffer),
     34       handler_(handler),
     35       decoder_(decoder),
     36       unscheduled_count_(0),
     37       rescheduled_count_(0),
     38       reschedule_task_factory_(this),
     39       was_preempted_(false) {}
     40 
     41 GpuScheduler::~GpuScheduler() {
     42 }
     43 
     44 void GpuScheduler::PutChanged() {
     45   TRACE_EVENT1(
     46      "gpu", "GpuScheduler:PutChanged",
     47      "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
     48 
     49   CommandBuffer::State state = command_buffer_->GetState();
     50 
     51   // If there is no parser, exit.
     52   if (!parser_.get()) {
     53     DCHECK_EQ(state.get_offset, state.put_offset);
     54     return;
     55   }
     56 
     57   parser_->set_put(state.put_offset);
     58   if (state.error != error::kNoError)
     59     return;
     60 
     61   // Check that the GPU has passed all fences.
     62   if (!PollUnscheduleFences())
     63     return;
     64 
     65   // One of the unschedule fence tasks might have unscheduled us.
     66   if (!IsScheduled())
     67     return;
     68 
     69   base::TimeTicks begin_time(base::TimeTicks::HighResNow());
     70   error::Error error = error::kNoError;
     71   while (!parser_->IsEmpty()) {
     72     if (IsPreempted())
     73       break;
     74 
     75     DCHECK(IsScheduled());
     76     DCHECK(unschedule_fences_.empty());
     77 
     78     error = parser_->ProcessCommand();
     79 
     80     if (error == error::kDeferCommandUntilLater) {
     81       DCHECK_GT(unscheduled_count_, 0);
     82       break;
     83     }
     84 
     85     // TODO(piman): various classes duplicate various pieces of state, leading
     86     // to needlessly complex update logic. It should be possible to simply
     87     // share the state across all of them.
     88     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
     89 
     90     if (error::IsError(error)) {
     91       LOG(ERROR) << "[" << decoder_ << "] "
     92                  << "GPU PARSE ERROR: " << error;
     93       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
     94       command_buffer_->SetParseError(error);
     95       break;
     96     }
     97 
     98     if (!command_processed_callback_.is_null())
     99       command_processed_callback_.Run();
    100 
    101     if (unscheduled_count_ > 0)
    102       break;
    103   }
    104 
    105   if (decoder_) {
    106     if (!error::IsError(error) && decoder_->WasContextLost()) {
    107       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
    108       command_buffer_->SetParseError(error::kLostContext);
    109     }
    110     decoder_->AddProcessingCommandsTime(
    111         base::TimeTicks::HighResNow() - begin_time);
    112   }
    113 }
    114 
    115 void GpuScheduler::SetScheduled(bool scheduled) {
    116   TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
    117                "new unscheduled_count_",
    118                unscheduled_count_ + (scheduled? -1 : 1));
    119   if (scheduled) {
    120     // If the scheduler was rescheduled after a timeout, ignore the subsequent
    121     // calls to SetScheduled when they eventually arrive until they are all
    122     // accounted for.
    123     if (rescheduled_count_ > 0) {
    124       --rescheduled_count_;
    125       return;
    126     } else {
    127       --unscheduled_count_;
    128     }
    129 
    130     DCHECK_GE(unscheduled_count_, 0);
    131 
    132     if (unscheduled_count_ == 0) {
    133       TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
    134                              "GpuScheduler", this);
    135       // When the scheduler transitions from the unscheduled to the scheduled
    136       // state, cancel the task that would reschedule it after a timeout.
    137       reschedule_task_factory_.InvalidateWeakPtrs();
    138 
    139       if (!scheduling_changed_callback_.is_null())
    140         scheduling_changed_callback_.Run(true);
    141     }
    142   } else {
    143     ++unscheduled_count_;
    144     if (unscheduled_count_ == 1) {
    145       TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
    146                                "GpuScheduler", this);
    147 #if defined(OS_WIN)
    148       if (base::win::GetVersion() < base::win::VERSION_VISTA) {
    149         // When the scheduler transitions from scheduled to unscheduled, post a
    150         // delayed task that it will force it back into a scheduled state after
    151         // a timeout. This should only be necessary on pre-Vista.
    152         base::MessageLoop::current()->PostDelayedTask(
    153             FROM_HERE,
    154             base::Bind(&GpuScheduler::RescheduleTimeOut,
    155                        reschedule_task_factory_.GetWeakPtr()),
    156             base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
    157       }
    158 #endif
    159       if (!scheduling_changed_callback_.is_null())
    160         scheduling_changed_callback_.Run(false);
    161     }
    162   }
    163 }
    164 
    165 bool GpuScheduler::IsScheduled() {
    166   return unscheduled_count_ == 0;
    167 }
    168 
    169 bool GpuScheduler::HasMoreWork() {
    170   return !unschedule_fences_.empty() ||
    171          (decoder_ && decoder_->ProcessPendingQueries()) ||
    172          HasMoreIdleWork();
    173 }
    174 
    175 void GpuScheduler::SetSchedulingChangedCallback(
    176     const SchedulingChangedCallback& callback) {
    177   scheduling_changed_callback_ = callback;
    178 }
    179 
    180 Buffer GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
    181   return command_buffer_->GetTransferBuffer(shm_id);
    182 }
    183 
    184 void GpuScheduler::set_token(int32 token) {
    185   command_buffer_->SetToken(token);
    186 }
    187 
    188 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
    189   Buffer ring_buffer = command_buffer_->GetTransferBuffer(transfer_buffer_id);
    190   if (!ring_buffer.ptr) {
    191     return false;
    192   }
    193 
    194   if (!parser_.get()) {
    195     parser_.reset(new CommandParser(handler_));
    196   }
    197 
    198   parser_->SetBuffer(
    199       ring_buffer.ptr,
    200       ring_buffer.size,
    201       0,
    202       ring_buffer.size);
    203 
    204   SetGetOffset(0);
    205   return true;
    206 }
    207 
    208 bool GpuScheduler::SetGetOffset(int32 offset) {
    209   if (parser_->set_get(offset)) {
    210     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
    211     return true;
    212   }
    213   return false;
    214 }
    215 
    216 int32 GpuScheduler::GetGetOffset() {
    217   return parser_->get();
    218 }
    219 
    220 void GpuScheduler::SetCommandProcessedCallback(
    221     const base::Closure& callback) {
    222   command_processed_callback_ = callback;
    223 }
    224 
    225 void GpuScheduler::DeferToFence(base::Closure task) {
    226   unschedule_fences_.push(make_linked_ptr(
    227        new UnscheduleFence(gfx::GLFence::Create(), task)));
    228   SetScheduled(false);
    229 }
    230 
    231 bool GpuScheduler::PollUnscheduleFences() {
    232   if (unschedule_fences_.empty())
    233     return true;
    234 
    235   if (unschedule_fences_.front()->fence.get()) {
    236     base::Time now = base::Time::Now();
    237     base::TimeDelta timeout =
    238         base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
    239 
    240     while (!unschedule_fences_.empty()) {
    241       const UnscheduleFence& fence = *unschedule_fences_.front();
    242       if (fence.fence->HasCompleted() ||
    243           now - fence.issue_time > timeout) {
    244         unschedule_fences_.front()->task.Run();
    245         unschedule_fences_.pop();
    246         SetScheduled(true);
    247       } else {
    248         return false;
    249       }
    250     }
    251   } else {
    252     glFinish();
    253 
    254     while (!unschedule_fences_.empty()) {
    255       unschedule_fences_.front()->task.Run();
    256       unschedule_fences_.pop();
    257       SetScheduled(true);
    258     }
    259   }
    260 
    261   return true;
    262 }
    263 
    264 bool GpuScheduler::IsPreempted() {
    265   if (!preemption_flag_.get())
    266     return false;
    267 
    268   if (!was_preempted_ && preemption_flag_->IsSet()) {
    269     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
    270     was_preempted_ = true;
    271   } else if (was_preempted_ && !preemption_flag_->IsSet()) {
    272     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
    273     was_preempted_ = false;
    274   }
    275 
    276   return preemption_flag_->IsSet();
    277 }
    278 
    279 bool GpuScheduler::HasMoreIdleWork() {
    280   return (decoder_ && decoder_->HasMoreIdleWork());
    281 }
    282 
    283 void GpuScheduler::PerformIdleWork() {
    284   if (!decoder_)
    285     return;
    286   decoder_->PerformIdleWork();
    287 }
    288 
    289 void GpuScheduler::RescheduleTimeOut() {
    290   int new_count = unscheduled_count_ + rescheduled_count_;
    291 
    292   rescheduled_count_ = 0;
    293 
    294   while (unscheduled_count_)
    295     SetScheduled(true);
    296 
    297   rescheduled_count_ = new_count;
    298 }
    299 
    300 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
    301                                                base::Closure task_)
    302   : fence(fence_),
    303     issue_time(base::Time::Now()),
    304     task(task_) {
    305 }
    306 
    307 GpuScheduler::UnscheduleFence::~UnscheduleFence() {
    308 }
    309 
    310 }  // namespace gpu
    311