Home | History | Annotate | Download | only in service
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
      6 
      7 #include <queue>
      8 #include <utility>
      9 
     10 #include <GLES2/gl2.h>
     11 #ifndef GL_GLEXT_PROTOTYPES
     12 #define GL_GLEXT_PROTOTYPES 1
     13 #endif
     14 #include <GLES2/gl2ext.h>
     15 #include <GLES2/gl2extchromium.h>
     16 
     17 #include "base/bind.h"
     18 #include "base/bind_helpers.h"
     19 #include "base/lazy_instance.h"
     20 #include "base/logging.h"
     21 #include "base/memory/weak_ptr.h"
     22 #include "base/message_loop/message_loop_proxy.h"
     23 #include "base/sequence_checker.h"
     24 #include "base/threading/thread.h"
     25 #include "gpu/command_buffer/common/id_allocator.h"
     26 #include "gpu/command_buffer/service/command_buffer_service.h"
     27 #include "gpu/command_buffer/service/context_group.h"
     28 #include "gpu/command_buffer/service/gl_context_virtual.h"
     29 #include "gpu/command_buffer/service/gpu_scheduler.h"
     30 #include "gpu/command_buffer/service/image_manager.h"
     31 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
     32 #include "ui/gfx/size.h"
     33 #include "ui/gl/gl_context.h"
     34 #include "ui/gl/gl_image.h"
     35 #include "ui/gl/gl_share_group.h"
     36 
     37 #if defined(OS_ANDROID)
     38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
     39 #include "ui/gl/android/surface_texture_bridge.h"
     40 #endif
     41 
     42 namespace gpu {
     43 
     44 namespace {
     45 
     46 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
     47     g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
     48 
     49 static bool g_use_virtualized_gl_context = false;
     50 static bool g_uses_explicit_scheduling = false;
     51 
     52 template <typename T>
     53 static void RunTaskWithResult(base::Callback<T(void)> task,
     54                               T* result,
     55                               base::WaitableEvent* completion) {
     56   *result = task.Run();
     57   completion->Signal();
     58 }
     59 
     60 class GpuInProcessThread
     61     : public base::Thread,
     62       public base::RefCountedThreadSafe<GpuInProcessThread> {
     63  public:
     64   GpuInProcessThread();
     65 
     66  private:
     67   friend class base::RefCountedThreadSafe<GpuInProcessThread>;
     68   virtual ~GpuInProcessThread();
     69 
     70   DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
     71 };
     72 
     73 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
     74   Start();
     75 }
     76 
     77 GpuInProcessThread::~GpuInProcessThread() {
     78   Stop();
     79 }
     80 
     81 // Used with explicit scheduling when there is no dedicated GPU thread.
     82 class GpuCommandQueue {
     83  public:
     84   GpuCommandQueue();
     85   ~GpuCommandQueue();
     86 
     87   void QueueTask(const base::Closure& task);
     88   void RunTasks();
     89   void SetScheduleCallback(const base::Closure& callback);
     90 
     91  private:
     92   base::Lock tasks_lock_;
     93   std::queue<base::Closure> tasks_;
     94   base::Closure schedule_callback_;
     95 
     96   DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
     97 };
     98 
     99 GpuCommandQueue::GpuCommandQueue() {}
    100 
    101 GpuCommandQueue::~GpuCommandQueue() {
    102   base::AutoLock lock(tasks_lock_);
    103   DCHECK(tasks_.empty());
    104 }
    105 
    106 void GpuCommandQueue::QueueTask(const base::Closure& task) {
    107   {
    108     base::AutoLock lock(tasks_lock_);
    109     tasks_.push(task);
    110   }
    111 
    112   DCHECK(!schedule_callback_.is_null());
    113   schedule_callback_.Run();
    114 }
    115 
    116 void GpuCommandQueue::RunTasks() {
    117   size_t num_tasks;
    118   {
    119     base::AutoLock lock(tasks_lock_);
    120     num_tasks = tasks_.size();
    121   }
    122 
    123   while (num_tasks) {
    124     base::Closure task;
    125     {
    126       base::AutoLock lock(tasks_lock_);
    127       task = tasks_.front();
    128       tasks_.pop();
    129       num_tasks = tasks_.size();
    130     }
    131 
    132     task.Run();
    133   }
    134 }
    135 
    136 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
    137   DCHECK(schedule_callback_.is_null());
    138   schedule_callback_ = callback;
    139 }
    140 
    141 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
    142     LAZY_INSTANCE_INITIALIZER;
    143 
    144 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
    145  public:
    146   explicit SchedulerClientBase(bool need_thread);
    147   virtual ~SchedulerClientBase();
    148 
    149   static bool HasClients();
    150 
    151  protected:
    152   scoped_refptr<GpuInProcessThread> thread_;
    153 
    154  private:
    155   static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
    156   static base::LazyInstance<base::Lock> all_clients_lock_;
    157 };
    158 
    159 base::LazyInstance<std::set<SchedulerClientBase*> >
    160     SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
    161 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
    162     LAZY_INSTANCE_INITIALIZER;
    163 
    164 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
    165   base::AutoLock(all_clients_lock_.Get());
    166   if (need_thread) {
    167     if (!all_clients_.Get().empty()) {
    168       SchedulerClientBase* other = *all_clients_.Get().begin();
    169       thread_ = other->thread_;
    170       DCHECK(thread_.get());
    171     } else {
    172       thread_ = new GpuInProcessThread;
    173     }
    174   }
    175   all_clients_.Get().insert(this);
    176 }
    177 
    178 SchedulerClientBase::~SchedulerClientBase() {
    179   base::AutoLock(all_clients_lock_.Get());
    180   all_clients_.Get().erase(this);
    181 }
    182 
    183 bool SchedulerClientBase::HasClients() {
    184   base::AutoLock(all_clients_lock_.Get());
    185   return !all_clients_.Get().empty();
    186 }
    187 
    188 // A client that talks to the GPU thread
    189 class ThreadClient : public SchedulerClientBase {
    190  public:
    191   ThreadClient();
    192   virtual void QueueTask(const base::Closure& task) OVERRIDE;
    193 };
    194 
    195 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
    196   DCHECK(thread_.get());
    197 }
    198 
    199 void ThreadClient::QueueTask(const base::Closure& task) {
    200   thread_->message_loop()->PostTask(FROM_HERE, task);
    201 }
    202 
    203 // A client that talks to the GpuCommandQueue
    204 class QueueClient : public SchedulerClientBase {
    205  public:
    206   QueueClient();
    207   virtual void QueueTask(const base::Closure& task) OVERRIDE;
    208 };
    209 
    210 QueueClient::QueueClient() : SchedulerClientBase(false) {
    211   DCHECK(!thread_.get());
    212 }
    213 
    214 void QueueClient::QueueTask(const base::Closure& task) {
    215   g_gpu_queue.Get().QueueTask(task);
    216 }
    217 
    218 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
    219 CreateSchedulerClient() {
    220   scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
    221   if (g_uses_explicit_scheduling)
    222     client.reset(new QueueClient);
    223   else
    224     client.reset(new ThreadClient);
    225 
    226   return client.Pass();
    227 }
    228 
    229 class ScopedEvent {
    230  public:
    231   ScopedEvent(base::WaitableEvent* event) : event_(event) {}
    232   ~ScopedEvent() { event_->Signal(); }
    233 
    234  private:
    235   base::WaitableEvent* event_;
    236 };
    237 
    238 }  // anonyous namespace
    239 
    240 InProcessCommandBuffer::InProcessCommandBuffer()
    241     : context_lost_(false),
    242       share_group_id_(0),
    243       last_put_offset_(-1),
    244       flush_event_(false, false),
    245       queue_(CreateSchedulerClient()) {}
    246 
    247 InProcessCommandBuffer::~InProcessCommandBuffer() {
    248   Destroy();
    249 }
    250 
    251 bool InProcessCommandBuffer::IsContextLost() {
    252   CheckSequencedThread();
    253   if (context_lost_ || !command_buffer_) {
    254     return true;
    255   }
    256   CommandBuffer::State state = GetState();
    257   return error::IsError(state.error);
    258 }
    259 
    260 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
    261   CheckSequencedThread();
    262   DCHECK(!surface_->IsOffscreen());
    263   surface_->Resize(size);
    264 }
    265 
    266 bool InProcessCommandBuffer::MakeCurrent() {
    267   CheckSequencedThread();
    268   command_buffer_lock_.AssertAcquired();
    269 
    270   if (!context_lost_ && decoder_->MakeCurrent())
    271     return true;
    272   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
    273   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
    274   command_buffer_->SetParseError(gpu::error::kLostContext);
    275   return false;
    276 }
    277 
    278 void InProcessCommandBuffer::PumpCommands() {
    279   CheckSequencedThread();
    280   command_buffer_lock_.AssertAcquired();
    281 
    282   if (!MakeCurrent())
    283     return;
    284 
    285   gpu_scheduler_->PutChanged();
    286 }
    287 
    288 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
    289   CheckSequencedThread();
    290   command_buffer_lock_.AssertAcquired();
    291   command_buffer_->SetGetBuffer(transfer_buffer_id);
    292   return true;
    293 }
    294 
    295 bool InProcessCommandBuffer::Initialize(
    296     scoped_refptr<gfx::GLSurface> surface,
    297     bool is_offscreen,
    298     bool share_resources,
    299     gfx::AcceleratedWidget window,
    300     const gfx::Size& size,
    301     const char* allowed_extensions,
    302     const std::vector<int32>& attribs,
    303     gfx::GpuPreference gpu_preference,
    304     const base::Closure& context_lost_callback,
    305     unsigned int share_group_id) {
    306 
    307   share_resources_ = share_resources;
    308   context_lost_callback_ = WrapCallback(context_lost_callback);
    309   share_group_id_ = share_group_id;
    310 
    311   if (surface) {
    312     // GPU thread must be the same as client thread due to GLSurface not being
    313     // thread safe.
    314     sequence_checker_.reset(new base::SequenceChecker);
    315     surface_ = surface;
    316   }
    317 
    318   base::Callback<bool(void)> init_task =
    319       base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
    320                  base::Unretained(this),
    321                  is_offscreen,
    322                  window,
    323                  size,
    324                  allowed_extensions,
    325                  attribs,
    326                  gpu_preference);
    327 
    328   base::WaitableEvent completion(true, false);
    329   bool result = false;
    330   QueueTask(
    331       base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
    332   completion.Wait();
    333   return result;
    334 }
    335 
    336 bool InProcessCommandBuffer::InitializeOnGpuThread(
    337     bool is_offscreen,
    338     gfx::AcceleratedWidget window,
    339     const gfx::Size& size,
    340     const char* allowed_extensions,
    341     const std::vector<int32>& attribs,
    342     gfx::GpuPreference gpu_preference) {
    343   CheckSequencedThread();
    344   // Use one share group for all contexts.
    345   CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
    346                          (new gfx::GLShareGroup));
    347 
    348   DCHECK(size.width() >= 0 && size.height() >= 0);
    349 
    350   TransferBufferManager* manager = new TransferBufferManager();
    351   transfer_buffer_manager_.reset(manager);
    352   manager->Initialize();
    353 
    354   scoped_ptr<CommandBufferService> command_buffer(
    355       new CommandBufferService(transfer_buffer_manager_.get()));
    356   command_buffer->SetPutOffsetChangeCallback(base::Bind(
    357       &InProcessCommandBuffer::PumpCommands, base::Unretained(this)));
    358   command_buffer->SetParseErrorCallback(base::Bind(
    359       &InProcessCommandBuffer::OnContextLost, base::Unretained(this)));
    360 
    361   if (!command_buffer->Initialize()) {
    362     LOG(ERROR) << "Could not initialize command buffer.";
    363     DestroyOnGpuThread();
    364     return false;
    365   }
    366 
    367   InProcessCommandBuffer* context_group = NULL;
    368 
    369   if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
    370     DCHECK(share_group_id_);
    371     for (std::set<InProcessCommandBuffer*>::iterator it =
    372              g_all_shared_contexts.Get().begin();
    373          it != g_all_shared_contexts.Get().end();
    374          ++it) {
    375       if ((*it)->share_group_id_ == share_group_id_) {
    376         context_group = *it;
    377         DCHECK(context_group->share_resources_);
    378         context_lost_ = context_group->IsContextLost();
    379         break;
    380       }
    381     }
    382     if (!context_group)
    383       share_group = new gfx::GLShareGroup;
    384   }
    385 
    386   StreamTextureManager* stream_texture_manager = NULL;
    387 #if defined(OS_ANDROID)
    388   stream_texture_manager = stream_texture_manager_ =
    389       context_group ? context_group->stream_texture_manager_.get()
    390                     : new StreamTextureManagerInProcess;
    391 #endif
    392 
    393   bool bind_generates_resource = false;
    394   decoder_.reset(gles2::GLES2Decoder::Create(
    395       context_group ? context_group->decoder_->GetContextGroup()
    396                     : new gles2::ContextGroup(NULL,
    397                                               NULL,
    398                                               NULL,
    399                                               stream_texture_manager,
    400                                               bind_generates_resource)));
    401 
    402   gpu_scheduler_.reset(
    403       new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
    404   command_buffer->SetGetBufferChangeCallback(base::Bind(
    405       &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
    406   command_buffer_ = command_buffer.Pass();
    407 
    408   decoder_->set_engine(gpu_scheduler_.get());
    409 
    410   if (!surface_) {
    411     if (is_offscreen)
    412       surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
    413     else
    414       surface_ = gfx::GLSurface::CreateViewGLSurface(window);
    415   }
    416 
    417   if (!surface_.get()) {
    418     LOG(ERROR) << "Could not create GLSurface.";
    419     DestroyOnGpuThread();
    420     return false;
    421   }
    422 
    423   if (g_use_virtualized_gl_context) {
    424     context_ = share_group->GetSharedContext();
    425     if (!context_.get()) {
    426       context_ = gfx::GLContext::CreateGLContext(
    427           share_group.get(), surface_.get(), gpu_preference);
    428       share_group->SetSharedContext(context_.get());
    429     }
    430 
    431     context_ = new GLContextVirtual(
    432         share_group.get(), context_.get(), decoder_->AsWeakPtr());
    433     if (context_->Initialize(surface_.get(), gpu_preference)) {
    434       VLOG(1) << "Created virtual GL context.";
    435     } else {
    436       context_ = NULL;
    437     }
    438   } else {
    439     context_ = gfx::GLContext::CreateGLContext(
    440         share_group.get(), surface_.get(), gpu_preference);
    441   }
    442 
    443   if (!context_.get()) {
    444     LOG(ERROR) << "Could not create GLContext.";
    445     DestroyOnGpuThread();
    446     return false;
    447   }
    448 
    449   if (!context_->MakeCurrent(surface_.get())) {
    450     LOG(ERROR) << "Could not make context current.";
    451     DestroyOnGpuThread();
    452     return false;
    453   }
    454 
    455   gles2::DisallowedFeatures disallowed_features;
    456   disallowed_features.swap_buffer_complete_callback = true;
    457   disallowed_features.gpu_memory_manager = true;
    458   if (!decoder_->Initialize(surface_,
    459                             context_,
    460                             is_offscreen,
    461                             size,
    462                             disallowed_features,
    463                             allowed_extensions,
    464                             attribs)) {
    465     LOG(ERROR) << "Could not initialize decoder.";
    466     DestroyOnGpuThread();
    467     return false;
    468   }
    469 
    470   if (!is_offscreen) {
    471     decoder_->SetResizeCallback(base::Bind(
    472         &InProcessCommandBuffer::OnResizeView, base::Unretained(this)));
    473   }
    474 
    475   if (share_resources_) {
    476     g_all_shared_contexts.Pointer()->insert(this);
    477   }
    478 
    479   return true;
    480 }
    481 
    482 void InProcessCommandBuffer::Destroy() {
    483   CheckSequencedThread();
    484   base::WaitableEvent completion(true, false);
    485   bool result = false;
    486   base::Callback<bool(void)> destroy_task = base::Bind(
    487       &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
    488   QueueTask(
    489       base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
    490   completion.Wait();
    491 }
    492 
    493 bool InProcessCommandBuffer::DestroyOnGpuThread() {
    494   CheckSequencedThread();
    495   command_buffer_.reset();
    496   // Clean up GL resources if possible.
    497   bool have_context = context_ && context_->MakeCurrent(surface_);
    498   if (decoder_) {
    499     decoder_->Destroy(have_context);
    500     decoder_.reset();
    501   }
    502   context_ = NULL;
    503   surface_ = NULL;
    504 
    505   g_all_shared_contexts.Pointer()->erase(this);
    506   return true;
    507 }
    508 
    509 void InProcessCommandBuffer::CheckSequencedThread() {
    510   DCHECK(!sequence_checker_ ||
    511          sequence_checker_->CalledOnValidSequencedThread());
    512 }
    513 
    514 unsigned int InProcessCommandBuffer::CreateImageForGpuMemoryBuffer(
    515     gfx::GpuMemoryBufferHandle buffer,
    516     gfx::Size size) {
    517   CheckSequencedThread();
    518   unsigned int image_id;
    519   {
    520     // TODO: ID allocation should go through CommandBuffer
    521     base::AutoLock lock(command_buffer_lock_);
    522     gles2::ContextGroup* group = decoder_->GetContextGroup();
    523     image_id =
    524         group->GetIdAllocator(gles2::id_namespaces::kImages)->AllocateID();
    525   }
    526   base::Closure image_task =
    527       base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
    528                  base::Unretained(this), buffer, size, image_id);
    529   QueueTask(image_task);
    530   return image_id;
    531 }
    532 
    533 void InProcessCommandBuffer::CreateImageOnGpuThread(
    534     gfx::GpuMemoryBufferHandle buffer,
    535     gfx::Size size,
    536     unsigned int image_id) {
    537   CheckSequencedThread();
    538   scoped_refptr<gfx::GLImage> gl_image =
    539       gfx::GLImage::CreateGLImageForGpuMemoryBuffer(buffer, size);
    540    decoder_->GetContextGroup()->image_manager()->AddImage(gl_image, image_id);
    541 }
    542 
    543 void InProcessCommandBuffer::RemoveImage(unsigned int image_id) {
    544   CheckSequencedThread();
    545   {
    546     // TODO: ID allocation should go through CommandBuffer
    547     base::AutoLock lock(command_buffer_lock_);
    548     gles2::ContextGroup* group = decoder_->GetContextGroup();
    549     group->GetIdAllocator(gles2::id_namespaces::kImages)->FreeID(image_id);
    550   }
    551   base::Closure image_manager_task =
    552       base::Bind(&InProcessCommandBuffer::RemoveImageOnGpuThread,
    553                  base::Unretained(this),
    554                  image_id);
    555   QueueTask(image_manager_task);
    556 }
    557 
    558 void InProcessCommandBuffer::RemoveImageOnGpuThread(unsigned int image_id) {
    559   CheckSequencedThread();
    560   decoder_->GetContextGroup()->image_manager()->RemoveImage(image_id);
    561 }
    562 
    563 void InProcessCommandBuffer::OnContextLost() {
    564   CheckSequencedThread();
    565   if (!context_lost_callback_.is_null()) {
    566     context_lost_callback_.Run();
    567     context_lost_callback_.Reset();
    568   }
    569 
    570   context_lost_ = true;
    571   if (share_resources_) {
    572     for (std::set<InProcessCommandBuffer*>::iterator it =
    573              g_all_shared_contexts.Get().begin();
    574          it != g_all_shared_contexts.Get().end();
    575          ++it) {
    576       (*it)->context_lost_ = true;
    577     }
    578   }
    579 }
    580 
    581 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
    582   CheckSequencedThread();
    583   base::AutoLock lock(state_after_last_flush_lock_);
    584   if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
    585     last_state_ = state_after_last_flush_;
    586   return last_state_;
    587 }
    588 
    589 CommandBuffer::State InProcessCommandBuffer::GetState() {
    590   CheckSequencedThread();
    591   return GetStateFast();
    592 }
    593 
    594 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
    595   CheckSequencedThread();
    596   return last_state_;
    597 }
    598 
    599 int32 InProcessCommandBuffer::GetLastToken() {
    600   CheckSequencedThread();
    601   GetStateFast();
    602   return last_state_.token;
    603 }
    604 
    605 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
    606   CheckSequencedThread();
    607   ScopedEvent handle_flush(&flush_event_);
    608   base::AutoLock lock(command_buffer_lock_);
    609   command_buffer_->Flush(put_offset);
    610   {
    611     // Update state before signaling the flush event.
    612     base::AutoLock lock(state_after_last_flush_lock_);
    613     state_after_last_flush_ = command_buffer_->GetState();
    614   }
    615   DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
    616          (error::IsError(state_after_last_flush_.error) && context_lost_));
    617 }
    618 
    619 void InProcessCommandBuffer::Flush(int32 put_offset) {
    620   CheckSequencedThread();
    621   if (last_state_.error != gpu::error::kNoError)
    622     return;
    623 
    624   if (last_put_offset_ == put_offset)
    625     return;
    626 
    627   last_put_offset_ = put_offset;
    628   base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
    629                                   base::Unretained(this),
    630                                   put_offset);
    631   QueueTask(task);
    632 }
    633 
    634 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
    635                                                        int32 last_known_get) {
    636   CheckSequencedThread();
    637   if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
    638     return last_state_;
    639 
    640   Flush(put_offset);
    641   GetStateFast();
    642   while (last_known_get == last_state_.get_offset &&
    643          last_state_.error == gpu::error::kNoError) {
    644     flush_event_.Wait();
    645     GetStateFast();
    646   }
    647 
    648   return last_state_;
    649 }
    650 
    651 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
    652   CheckSequencedThread();
    653   if (last_state_.error != gpu::error::kNoError)
    654     return;
    655 
    656   {
    657     base::AutoLock lock(command_buffer_lock_);
    658     command_buffer_->SetGetBuffer(shm_id);
    659     last_put_offset_ = 0;
    660   }
    661   {
    662     base::AutoLock lock(state_after_last_flush_lock_);
    663     state_after_last_flush_ = command_buffer_->GetState();
    664   }
    665 }
    666 
    667 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
    668                                                          int32* id) {
    669   CheckSequencedThread();
    670   base::AutoLock lock(command_buffer_lock_);
    671   return command_buffer_->CreateTransferBuffer(size, id);
    672 }
    673 
    674 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
    675   CheckSequencedThread();
    676   base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
    677                                   base::Unretained(command_buffer_.get()),
    678                                   id);
    679 
    680   QueueTask(task);
    681 }
    682 
    683 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
    684   NOTREACHED();
    685   return gpu::Buffer();
    686 }
    687 
    688 uint32 InProcessCommandBuffer::InsertSyncPoint() {
    689   NOTREACHED();
    690   return 0;
    691 }
    692 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
    693                                              const base::Closure& callback) {
    694   CheckSequencedThread();
    695   QueueTask(WrapCallback(callback));
    696 }
    697 
    698 gpu::error::Error InProcessCommandBuffer::GetLastError() {
    699   CheckSequencedThread();
    700   return last_state_.error;
    701 }
    702 
    703 bool InProcessCommandBuffer::Initialize() {
    704   NOTREACHED();
    705   return false;
    706 }
    707 
    708 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
    709 
    710 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
    711 
    712 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
    713   NOTREACHED();
    714 }
    715 
    716 void InProcessCommandBuffer::SetContextLostReason(
    717     gpu::error::ContextLostReason reason) {
    718   NOTREACHED();
    719 }
    720 
    721 namespace {
    722 
    723 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
    724                          const base::Closure& callback) {
    725   if (!loop->BelongsToCurrentThread()) {
    726     loop->PostTask(FROM_HERE, callback);
    727   } else {
    728     callback.Run();
    729   }
    730 }
    731 
    732 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
    733   DCHECK(callback.get());
    734   callback->Run();
    735 }
    736 
    737 }  // anonymous namespace
    738 
    739 base::Closure InProcessCommandBuffer::WrapCallback(
    740     const base::Closure& callback) {
    741   // Make sure the callback gets deleted on the target thread by passing
    742   // ownership.
    743   scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
    744   base::Closure callback_on_client_thread =
    745       base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
    746   base::Closure wrapped_callback =
    747       base::Bind(&PostCallback, base::MessageLoopProxy::current(),
    748                  callback_on_client_thread);
    749   return wrapped_callback;
    750 }
    751 
    752 #if defined(OS_ANDROID)
    753 scoped_refptr<gfx::SurfaceTextureBridge>
    754 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
    755   DCHECK(stream_texture_manager_);
    756   return stream_texture_manager_->GetSurfaceTexture(stream_id);
    757 }
    758 #endif
    759 
    760 // static
    761 void InProcessCommandBuffer::EnableVirtualizedContext() {
    762   g_use_virtualized_gl_context = true;
    763 }
    764 
    765 // static
    766 void InProcessCommandBuffer::SetScheduleCallback(
    767     const base::Closure& callback) {
    768   DCHECK(!g_uses_explicit_scheduling);
    769   DCHECK(!SchedulerClientBase::HasClients());
    770   g_uses_explicit_scheduling = true;
    771   g_gpu_queue.Get().SetScheduleCallback(callback);
    772 }
    773 
    774 // static
    775 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
    776   g_gpu_queue.Get().RunTasks();
    777 }
    778 
    779 }  // namespace gpu
    780