Home | History | Annotate | Download | only in service
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "gpu/command_buffer/service/gpu_scheduler.h"
      6 
      7 #include "base/bind.h"
      8 #include "base/command_line.h"
      9 #include "base/compiler_specific.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/message_loop/message_loop.h"
     12 #include "base/time/time.h"
     13 #include "ui/gl/gl_bindings.h"
     14 #include "ui/gl/gl_fence.h"
     15 #include "ui/gl/gl_switches.h"
     16 
     17 #if defined(OS_WIN)
     18 #include "base/win/windows_version.h"
     19 #endif
     20 
     21 using ::base::SharedMemory;
     22 
     23 namespace gpu {
     24 
     25 const int64 kUnscheduleFenceTimeOutDelay = 10000;
     26 
     27 #if defined(OS_WIN)
     28 const int64 kRescheduleTimeOutDelay = 1000;
     29 #endif
     30 
     31 GpuScheduler::GpuScheduler(CommandBufferServiceBase* command_buffer,
     32                            AsyncAPIInterface* handler,
     33                            gles2::GLES2Decoder* decoder)
     34     : command_buffer_(command_buffer),
     35       handler_(handler),
     36       decoder_(decoder),
     37       unscheduled_count_(0),
     38       rescheduled_count_(0),
     39       was_preempted_(false),
     40       reschedule_task_factory_(this) {}
     41 
     42 GpuScheduler::~GpuScheduler() {
     43 }
     44 
     45 void GpuScheduler::PutChanged() {
     46   TRACE_EVENT1(
     47      "gpu", "GpuScheduler:PutChanged",
     48      "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
     49 
     50   CommandBuffer::State state = command_buffer_->GetLastState();
     51 
     52   // If there is no parser, exit.
     53   if (!parser_.get()) {
     54     DCHECK_EQ(state.get_offset, state.put_offset);
     55     return;
     56   }
     57 
     58   parser_->set_put(state.put_offset);
     59   if (state.error != error::kNoError)
     60     return;
     61 
     62   // Check that the GPU has passed all fences.
     63   if (!PollUnscheduleFences())
     64     return;
     65 
     66   // One of the unschedule fence tasks might have unscheduled us.
     67   if (!IsScheduled())
     68     return;
     69 
     70   base::TimeTicks begin_time(base::TimeTicks::HighResNow());
     71   error::Error error = error::kNoError;
     72   if (decoder_)
     73     decoder_->BeginDecoding();
     74   while (!parser_->IsEmpty()) {
     75     if (IsPreempted())
     76       break;
     77 
     78     DCHECK(IsScheduled());
     79     DCHECK(unschedule_fences_.empty());
     80 
     81     error = parser_->ProcessCommands(CommandParser::kParseCommandsSlice);
     82 
     83     if (error == error::kDeferCommandUntilLater) {
     84       DCHECK_GT(unscheduled_count_, 0);
     85       break;
     86     }
     87 
     88     // TODO(piman): various classes duplicate various pieces of state, leading
     89     // to needlessly complex update logic. It should be possible to simply
     90     // share the state across all of them.
     91     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
     92 
     93     if (error::IsError(error)) {
     94       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
     95       command_buffer_->SetParseError(error);
     96       break;
     97     }
     98 
     99     if (!command_processed_callback_.is_null())
    100       command_processed_callback_.Run();
    101 
    102     if (unscheduled_count_ > 0)
    103       break;
    104   }
    105 
    106   if (decoder_) {
    107     if (!error::IsError(error) && decoder_->WasContextLost()) {
    108       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
    109       command_buffer_->SetParseError(error::kLostContext);
    110     }
    111     decoder_->EndDecoding();
    112     decoder_->AddProcessingCommandsTime(
    113         base::TimeTicks::HighResNow() - begin_time);
    114   }
    115 }
    116 
    117 void GpuScheduler::SetScheduled(bool scheduled) {
    118   TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
    119                "new unscheduled_count_",
    120                unscheduled_count_ + (scheduled? -1 : 1));
    121   if (scheduled) {
    122     // If the scheduler was rescheduled after a timeout, ignore the subsequent
    123     // calls to SetScheduled when they eventually arrive until they are all
    124     // accounted for.
    125     if (rescheduled_count_ > 0) {
    126       --rescheduled_count_;
    127       return;
    128     } else {
    129       --unscheduled_count_;
    130     }
    131 
    132     DCHECK_GE(unscheduled_count_, 0);
    133 
    134     if (unscheduled_count_ == 0) {
    135       TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
    136                              "GpuScheduler", this);
    137       // When the scheduler transitions from the unscheduled to the scheduled
    138       // state, cancel the task that would reschedule it after a timeout.
    139       reschedule_task_factory_.InvalidateWeakPtrs();
    140 
    141       if (!scheduling_changed_callback_.is_null())
    142         scheduling_changed_callback_.Run(true);
    143     }
    144   } else {
    145     ++unscheduled_count_;
    146     if (unscheduled_count_ == 1) {
    147       TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
    148                                "GpuScheduler", this);
    149 #if defined(OS_WIN)
    150       if (base::win::GetVersion() < base::win::VERSION_VISTA) {
    151         // When the scheduler transitions from scheduled to unscheduled, post a
    152         // delayed task that it will force it back into a scheduled state after
    153         // a timeout. This should only be necessary on pre-Vista.
    154         base::MessageLoop::current()->PostDelayedTask(
    155             FROM_HERE,
    156             base::Bind(&GpuScheduler::RescheduleTimeOut,
    157                        reschedule_task_factory_.GetWeakPtr()),
    158             base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
    159       }
    160 #endif
    161       if (!scheduling_changed_callback_.is_null())
    162         scheduling_changed_callback_.Run(false);
    163     }
    164   }
    165 }
    166 
    167 bool GpuScheduler::IsScheduled() {
    168   return unscheduled_count_ == 0;
    169 }
    170 
    171 bool GpuScheduler::HasMoreWork() {
    172   return !unschedule_fences_.empty() ||
    173          (decoder_ && decoder_->ProcessPendingQueries()) ||
    174          HasMoreIdleWork();
    175 }
    176 
    177 void GpuScheduler::SetSchedulingChangedCallback(
    178     const SchedulingChangedCallback& callback) {
    179   scheduling_changed_callback_ = callback;
    180 }
    181 
    182 scoped_refptr<Buffer> GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
    183   return command_buffer_->GetTransferBuffer(shm_id);
    184 }
    185 
    186 void GpuScheduler::set_token(int32 token) {
    187   command_buffer_->SetToken(token);
    188 }
    189 
    190 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
    191   scoped_refptr<Buffer> ring_buffer =
    192       command_buffer_->GetTransferBuffer(transfer_buffer_id);
    193   if (!ring_buffer.get()) {
    194     return false;
    195   }
    196 
    197   if (!parser_.get()) {
    198     parser_.reset(new CommandParser(handler_));
    199   }
    200 
    201   parser_->SetBuffer(
    202       ring_buffer->memory(), ring_buffer->size(), 0, ring_buffer->size());
    203 
    204   SetGetOffset(0);
    205   return true;
    206 }
    207 
    208 bool GpuScheduler::SetGetOffset(int32 offset) {
    209   if (parser_->set_get(offset)) {
    210     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
    211     return true;
    212   }
    213   return false;
    214 }
    215 
    216 int32 GpuScheduler::GetGetOffset() {
    217   return parser_->get();
    218 }
    219 
    220 void GpuScheduler::SetCommandProcessedCallback(
    221     const base::Closure& callback) {
    222   command_processed_callback_ = callback;
    223 }
    224 
    225 void GpuScheduler::DeferToFence(base::Closure task) {
    226   unschedule_fences_.push(make_linked_ptr(
    227        new UnscheduleFence(gfx::GLFence::Create(), task)));
    228   SetScheduled(false);
    229 }
    230 
    231 bool GpuScheduler::PollUnscheduleFences() {
    232   if (unschedule_fences_.empty())
    233     return true;
    234 
    235   if (unschedule_fences_.front()->fence.get()) {
    236     base::Time now = base::Time::Now();
    237     base::TimeDelta timeout =
    238         base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
    239 
    240     while (!unschedule_fences_.empty()) {
    241       const UnscheduleFence& fence = *unschedule_fences_.front();
    242       if (fence.fence->HasCompleted() ||
    243           now - fence.issue_time > timeout) {
    244         unschedule_fences_.front()->task.Run();
    245         unschedule_fences_.pop();
    246         SetScheduled(true);
    247       } else {
    248         return false;
    249       }
    250     }
    251   } else {
    252     glFinish();
    253 
    254     while (!unschedule_fences_.empty()) {
    255       unschedule_fences_.front()->task.Run();
    256       unschedule_fences_.pop();
    257       SetScheduled(true);
    258     }
    259   }
    260 
    261   return true;
    262 }
    263 
    264 bool GpuScheduler::IsPreempted() {
    265   if (!preemption_flag_.get())
    266     return false;
    267 
    268   if (!was_preempted_ && preemption_flag_->IsSet()) {
    269     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
    270     was_preempted_ = true;
    271   } else if (was_preempted_ && !preemption_flag_->IsSet()) {
    272     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
    273     was_preempted_ = false;
    274   }
    275 
    276   return preemption_flag_->IsSet();
    277 }
    278 
    279 bool GpuScheduler::HasMoreIdleWork() {
    280   return (decoder_ && decoder_->HasMoreIdleWork());
    281 }
    282 
    283 void GpuScheduler::PerformIdleWork() {
    284   if (!decoder_)
    285     return;
    286   decoder_->PerformIdleWork();
    287 }
    288 
    289 void GpuScheduler::RescheduleTimeOut() {
    290   int new_count = unscheduled_count_ + rescheduled_count_;
    291 
    292   rescheduled_count_ = 0;
    293 
    294   while (unscheduled_count_)
    295     SetScheduled(true);
    296 
    297   rescheduled_count_ = new_count;
    298 }
    299 
    300 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
    301                                                base::Closure task_)
    302   : fence(fence_),
    303     issue_time(base::Time::Now()),
    304     task(task_) {
    305 }
    306 
    307 GpuScheduler::UnscheduleFence::~UnscheduleFence() {
    308 }
    309 
    310 }  // namespace gpu
    311