Home | History | Annotate | Download | only in gpu
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/bind.h"
      6 #include "base/bind_helpers.h"
      7 #include "base/command_line.h"
      8 #include "base/debug/trace_event.h"
      9 #include "base/hash.h"
     10 #include "base/memory/shared_memory.h"
     11 #include "base/time/time.h"
     12 #include "build/build_config.h"
     13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
     14 #include "content/common/gpu/gpu_channel.h"
     15 #include "content/common/gpu/gpu_channel_manager.h"
     16 #include "content/common/gpu/gpu_command_buffer_stub.h"
     17 #include "content/common/gpu/gpu_memory_manager.h"
     18 #include "content/common/gpu/gpu_memory_tracking.h"
     19 #include "content/common/gpu/gpu_messages.h"
     20 #include "content/common/gpu/gpu_watchdog.h"
     21 #include "content/common/gpu/image_transport_surface.h"
     22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
     23 #include "content/common/gpu/sync_point_manager.h"
     24 #include "content/public/common/content_client.h"
     25 #include "gpu/command_buffer/common/constants.h"
     26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
     27 #include "gpu/command_buffer/common/mailbox.h"
     28 #include "gpu/command_buffer/service/gl_context_virtual.h"
     29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
     30 #include "gpu/command_buffer/service/gpu_control_service.h"
     31 #include "gpu/command_buffer/service/image_manager.h"
     32 #include "gpu/command_buffer/service/logger.h"
     33 #include "gpu/command_buffer/service/mailbox_manager.h"
     34 #include "gpu/command_buffer/service/memory_tracking.h"
     35 #include "gpu/command_buffer/service/query_manager.h"
     36 #include "ui/gl/gl_bindings.h"
     37 #include "ui/gl/gl_switches.h"
     38 
     39 #if defined(OS_WIN)
     40 #include "content/public/common/sandbox_init.h"
     41 #endif
     42 
     43 #if defined(OS_ANDROID)
     44 #include "content/common/gpu/stream_texture_manager_android.h"
     45 #endif
     46 
     47 namespace content {
     48 namespace {
     49 
     50 // The GpuCommandBufferMemoryTracker class provides a bridge between the
     51 // ContextGroup's memory type managers and the GpuMemoryManager class.
     52 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
     53  public:
     54   explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
     55       tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
     56           CreateTrackingGroup(channel->renderer_pid(), this)) {
     57   }
     58 
     59   virtual void TrackMemoryAllocatedChange(
     60       size_t old_size,
     61       size_t new_size,
     62       gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
     63     tracking_group_->TrackMemoryAllocatedChange(
     64         old_size, new_size, pool);
     65   }
     66 
     67   virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
     68     return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
     69   };
     70 
     71  private:
     72   virtual ~GpuCommandBufferMemoryTracker() {
     73   }
     74   scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
     75 
     76   DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
     77 };
     78 
     79 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
     80 // url_hash matches.
     81 void FastSetActiveURL(const GURL& url, size_t url_hash) {
     82   // Leave the previously set URL in the empty case -- empty URLs are given by
     83   // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
     84   // onscreen context URL was set previously and will show up even when a crash
     85   // occurs during offscreen command processing.
     86   if (url.is_empty())
     87     return;
     88   static size_t g_last_url_hash = 0;
     89   if (url_hash != g_last_url_hash) {
     90     g_last_url_hash = url_hash;
     91     GetContentClient()->SetActiveURL(url);
     92   }
     93 }
     94 
     95 // The first time polling a fence, delay some extra time to allow other
     96 // stubs to process some work, or else the timing of the fences could
     97 // allow a pattern of alternating fast and slow frames to occur.
     98 const int64 kHandleMoreWorkPeriodMs = 2;
     99 const int64 kHandleMoreWorkPeriodBusyMs = 1;
    100 
    101 // Prevents idle work from being starved.
    102 const int64 kMaxTimeSinceIdleMs = 10;
    103 
    104 }  // namespace
    105 
    106 GpuCommandBufferStub::GpuCommandBufferStub(
    107     GpuChannel* channel,
    108     GpuCommandBufferStub* share_group,
    109     const gfx::GLSurfaceHandle& handle,
    110     gpu::gles2::MailboxManager* mailbox_manager,
    111     gpu::gles2::ImageManager* image_manager,
    112     const gfx::Size& size,
    113     const gpu::gles2::DisallowedFeatures& disallowed_features,
    114     const std::vector<int32>& attribs,
    115     gfx::GpuPreference gpu_preference,
    116     bool use_virtualized_gl_context,
    117     int32 route_id,
    118     int32 surface_id,
    119     GpuWatchdog* watchdog,
    120     bool software,
    121     const GURL& active_url)
    122     : channel_(channel),
    123       handle_(handle),
    124       initial_size_(size),
    125       disallowed_features_(disallowed_features),
    126       requested_attribs_(attribs),
    127       gpu_preference_(gpu_preference),
    128       use_virtualized_gl_context_(use_virtualized_gl_context),
    129       route_id_(route_id),
    130       surface_id_(surface_id),
    131       software_(software),
    132       last_flush_count_(0),
    133       last_memory_allocation_valid_(false),
    134       watchdog_(watchdog),
    135       sync_point_wait_count_(0),
    136       delayed_work_scheduled_(false),
    137       previous_messages_processed_(0),
    138       active_url_(active_url),
    139       total_gpu_memory_(0) {
    140   active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
    141   FastSetActiveURL(active_url_, active_url_hash_);
    142   if (share_group) {
    143     context_group_ = share_group->context_group_;
    144   } else {
    145     gpu::StreamTextureManager* stream_texture_manager = NULL;
    146 #if defined(OS_ANDROID)
    147     stream_texture_manager = channel_->stream_texture_manager();
    148 #endif
    149     context_group_ = new gpu::gles2::ContextGroup(
    150         mailbox_manager,
    151         image_manager,
    152         new GpuCommandBufferMemoryTracker(channel),
    153         stream_texture_manager,
    154         NULL,
    155         true);
    156   }
    157 
    158   use_virtualized_gl_context_ |=
    159       context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
    160 }
    161 
    162 GpuCommandBufferStub::~GpuCommandBufferStub() {
    163   Destroy();
    164 
    165   GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
    166   gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
    167 }
    168 
    169 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
    170     return channel()->gpu_channel_manager()->gpu_memory_manager();
    171 }
    172 
    173 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
    174   devtools_gpu_instrumentation::ScopedGpuTask task(this);
    175   FastSetActiveURL(active_url_, active_url_hash_);
    176 
    177   // Ensure the appropriate GL context is current before handling any IPC
    178   // messages directed at the command buffer. This ensures that the message
    179   // handler can assume that the context is current (not necessary for
    180   // Echo, RetireSyncPoint, or WaitSyncPoint).
    181   if (decoder_.get() &&
    182       message.type() != GpuCommandBufferMsg_Echo::ID &&
    183       message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
    184       message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
    185       message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
    186     if (!MakeCurrent())
    187       return false;
    188   }
    189 
    190   // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
    191   // here. This is so the reply can be delayed if the scheduler is unscheduled.
    192   bool handled = true;
    193   IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
    194     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
    195                                     OnInitialize);
    196     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
    197                                     OnSetGetBuffer);
    198     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
    199                         OnProduceFrontBuffer);
    200     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
    201     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
    202     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
    203                                     OnGetStateFast);
    204     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
    205     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
    206     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
    207     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
    208                         OnRegisterTransferBuffer);
    209     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
    210                         OnDestroyTransferBuffer);
    211     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
    212                                     OnGetTransferBuffer);
    213     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
    214                                     OnCreateVideoDecoder)
    215     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
    216                         OnSetSurfaceVisible)
    217     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
    218                         OnRetireSyncPoint)
    219     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
    220                         OnSignalSyncPoint)
    221     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
    222                         OnSignalQuery)
    223     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
    224                         OnReceivedClientManagedMemoryStats)
    225     IPC_MESSAGE_HANDLER(
    226         GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
    227         OnSetClientHasMemoryAllocationChangedCallback)
    228     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
    229                         OnRegisterGpuMemoryBuffer);
    230     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer,
    231                         OnDestroyGpuMemoryBuffer);
    232     IPC_MESSAGE_UNHANDLED(handled = false)
    233   IPC_END_MESSAGE_MAP()
    234 
    235   // Ensure that any delayed work that was created will be handled.
    236   ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
    237 
    238   DCHECK(handled);
    239   return handled;
    240 }
    241 
    242 bool GpuCommandBufferStub::Send(IPC::Message* message) {
    243   return channel_->Send(message);
    244 }
    245 
    246 bool GpuCommandBufferStub::IsScheduled() {
    247   return (!scheduler_.get() || scheduler_->IsScheduled());
    248 }
    249 
    250 bool GpuCommandBufferStub::HasMoreWork() {
    251   return scheduler_.get() && scheduler_->HasMoreWork();
    252 }
    253 
    254 void GpuCommandBufferStub::PollWork() {
    255   TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
    256   delayed_work_scheduled_ = false;
    257   FastSetActiveURL(active_url_, active_url_hash_);
    258   if (decoder_.get() && !MakeCurrent())
    259     return;
    260 
    261   if (scheduler_) {
    262     bool fences_complete = scheduler_->PollUnscheduleFences();
    263     // Perform idle work if all fences are complete.
    264     if (fences_complete) {
    265       uint64 current_messages_processed =
    266           channel()->gpu_channel_manager()->MessagesProcessed();
    267       // We're idle when no messages were processed or scheduled.
    268       bool is_idle =
    269           (previous_messages_processed_ == current_messages_processed) &&
    270           !channel()->gpu_channel_manager()->HandleMessagesScheduled();
    271       if (!is_idle && !last_idle_time_.is_null()) {
    272         base::TimeDelta time_since_idle = base::TimeTicks::Now() -
    273             last_idle_time_;
    274         base::TimeDelta max_time_since_idle =
    275             base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
    276 
    277         // Force idle when it's been too long since last time we were idle.
    278         if (time_since_idle > max_time_since_idle)
    279           is_idle = true;
    280       }
    281 
    282       if (is_idle) {
    283         last_idle_time_ = base::TimeTicks::Now();
    284         scheduler_->PerformIdleWork();
    285       }
    286     }
    287   }
    288   ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
    289 }
    290 
    291 bool GpuCommandBufferStub::HasUnprocessedCommands() {
    292   if (command_buffer_) {
    293     gpu::CommandBuffer::State state = command_buffer_->GetLastState();
    294     return state.put_offset != state.get_offset &&
    295         !gpu::error::IsError(state.error);
    296   }
    297   return false;
    298 }
    299 
    300 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
    301   if (!HasMoreWork()) {
    302     last_idle_time_ = base::TimeTicks();
    303     return;
    304   }
    305 
    306   if (delayed_work_scheduled_)
    307     return;
    308   delayed_work_scheduled_ = true;
    309 
    310   // Idle when no messages are processed between now and when
    311   // PollWork is called.
    312   previous_messages_processed_ =
    313       channel()->gpu_channel_manager()->MessagesProcessed();
    314   if (last_idle_time_.is_null())
    315     last_idle_time_ = base::TimeTicks::Now();
    316 
    317   // IsScheduled() returns true after passing all unschedule fences
    318   // and this is when we can start performing idle work. Idle work
    319   // is done synchronously so we can set delay to 0 and instead poll
    320   // for more work at the rate idle work is performed. This also ensures
    321   // that idle work is done as efficiently as possible without any
    322   // unnecessary delays.
    323   if (scheduler_.get() &&
    324       scheduler_->IsScheduled() &&
    325       scheduler_->HasMoreIdleWork()) {
    326     delay = 0;
    327   }
    328 
    329   base::MessageLoop::current()->PostDelayedTask(
    330       FROM_HERE,
    331       base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
    332       base::TimeDelta::FromMilliseconds(delay));
    333 }
    334 
    335 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
    336   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
    337   Send(new IPC::Message(message));
    338 }
    339 
    340 bool GpuCommandBufferStub::MakeCurrent() {
    341   if (decoder_->MakeCurrent())
    342     return true;
    343   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
    344   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
    345   command_buffer_->SetParseError(gpu::error::kLostContext);
    346   CheckContextLost();
    347   return false;
    348 }
    349 
    350 void GpuCommandBufferStub::Destroy() {
    351   if (handle_.is_null() && !active_url_.is_empty()) {
    352     GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
    353     gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
    354         active_url_));
    355   }
    356 
    357   memory_manager_client_state_.reset();
    358 
    359   while (!sync_points_.empty())
    360     OnRetireSyncPoint(sync_points_.front());
    361 
    362   if (decoder_)
    363     decoder_->set_engine(NULL);
    364 
    365   // The scheduler has raw references to the decoder and the command buffer so
    366   // destroy it before those.
    367   scheduler_.reset();
    368 
    369   bool have_context = false;
    370   if (decoder_ && command_buffer_ &&
    371       command_buffer_->GetState().error != gpu::error::kLostContext)
    372     have_context = decoder_->MakeCurrent();
    373   FOR_EACH_OBSERVER(DestructionObserver,
    374                     destruction_observers_,
    375                     OnWillDestroyStub());
    376 
    377   if (decoder_) {
    378     decoder_->Destroy(have_context);
    379     decoder_.reset();
    380   }
    381 
    382   command_buffer_.reset();
    383 
    384   // Remove this after crbug.com/248395 is sorted out.
    385   surface_ = NULL;
    386 }
    387 
    388 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
    389   Destroy();
    390   GpuCommandBufferMsg_Initialize::WriteReplyParams(
    391       reply_message, false, gpu::Capabilities());
    392   Send(reply_message);
    393 }
    394 
    395 void GpuCommandBufferStub::OnInitialize(
    396     base::SharedMemoryHandle shared_state_handle,
    397     IPC::Message* reply_message) {
    398   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
    399   DCHECK(!command_buffer_.get());
    400 
    401   scoped_ptr<base::SharedMemory> shared_state_shm(
    402       new base::SharedMemory(shared_state_handle, false));
    403 
    404   command_buffer_.reset(new gpu::CommandBufferService(
    405       context_group_->transfer_buffer_manager()));
    406 
    407   if (!command_buffer_->Initialize()) {
    408     DLOG(ERROR) << "CommandBufferService failed to initialize.\n";
    409     OnInitializeFailed(reply_message);
    410     return;
    411   }
    412 
    413   decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
    414 
    415   scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
    416                                          decoder_.get(),
    417                                          decoder_.get()));
    418   if (preemption_flag_.get())
    419     scheduler_->SetPreemptByFlag(preemption_flag_);
    420 
    421   decoder_->set_engine(scheduler_.get());
    422 
    423   if (!handle_.is_null()) {
    424 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
    425     if (software_) {
    426       DLOG(ERROR) << "No software support.\n";
    427       OnInitializeFailed(reply_message);
    428       return;
    429     }
    430 #endif
    431 
    432     surface_ = ImageTransportSurface::CreateSurface(
    433         channel_->gpu_channel_manager(),
    434         this,
    435         handle_);
    436   } else {
    437     GpuChannelManager* manager = channel_->gpu_channel_manager();
    438     surface_ = manager->GetDefaultOffscreenSurface();
    439   }
    440 
    441   if (!surface_.get()) {
    442     DLOG(ERROR) << "Failed to create surface.\n";
    443     OnInitializeFailed(reply_message);
    444     return;
    445   }
    446 
    447   scoped_refptr<gfx::GLContext> context;
    448   if (use_virtualized_gl_context_ && channel_->share_group()) {
    449     context = channel_->share_group()->GetSharedContext();
    450     if (!context.get()) {
    451       context = gfx::GLContext::CreateGLContext(
    452           channel_->share_group(),
    453           channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
    454           gpu_preference_);
    455       channel_->share_group()->SetSharedContext(context.get());
    456     }
    457     // This should be a non-virtual GL context.
    458     DCHECK(context->GetHandle());
    459     context = new gpu::GLContextVirtual(
    460         channel_->share_group(), context.get(), decoder_->AsWeakPtr());
    461     if (!context->Initialize(surface_.get(), gpu_preference_)) {
    462       // TODO(sievers): The real context created above for the default
    463       // offscreen surface might not be compatible with this surface.
    464       // Need to adjust at least GLX to be able to create the initial context
    465       // with a config that is compatible with onscreen and offscreen surfaces.
    466       context = NULL;
    467 
    468       DLOG(ERROR) << "Failed to initialize virtual GL context.";
    469       OnInitializeFailed(reply_message);
    470       return;
    471     }
    472   }
    473   if (!context.get()) {
    474     context = gfx::GLContext::CreateGLContext(
    475         channel_->share_group(), surface_.get(), gpu_preference_);
    476   }
    477   if (!context.get()) {
    478     DLOG(ERROR) << "Failed to create context.\n";
    479     OnInitializeFailed(reply_message);
    480     return;
    481   }
    482 
    483   if (!context->MakeCurrent(surface_.get())) {
    484     LOG(ERROR) << "Failed to make context current.";
    485     OnInitializeFailed(reply_message);
    486     return;
    487   }
    488 
    489   if (!context->GetGLStateRestorer()) {
    490     context->SetGLStateRestorer(
    491         new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
    492   }
    493 
    494   if (!context->GetTotalGpuMemory(&total_gpu_memory_))
    495     total_gpu_memory_ = 0;
    496 
    497   if (!context_group_->has_program_cache()) {
    498     context_group_->set_program_cache(
    499         channel_->gpu_channel_manager()->program_cache());
    500   }
    501 
    502   // Initialize the decoder with either the view or pbuffer GLContext.
    503   if (!decoder_->Initialize(surface_,
    504                             context,
    505                             !surface_id(),
    506                             initial_size_,
    507                             disallowed_features_,
    508                             requested_attribs_)) {
    509     DLOG(ERROR) << "Failed to initialize decoder.";
    510     OnInitializeFailed(reply_message);
    511     return;
    512   }
    513 
    514   gpu_control_.reset(
    515       new gpu::GpuControlService(context_group_->image_manager(),
    516                                  NULL,
    517                                  context_group_->mailbox_manager(),
    518                                  NULL,
    519                                  decoder_->GetCapabilities()));
    520 
    521   if (CommandLine::ForCurrentProcess()->HasSwitch(
    522       switches::kEnableGPUServiceLogging)) {
    523     decoder_->set_log_commands(true);
    524   }
    525 
    526   decoder_->GetLogger()->SetMsgCallback(
    527       base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
    528                  base::Unretained(this)));
    529   decoder_->SetShaderCacheCallback(
    530       base::Bind(&GpuCommandBufferStub::SendCachedShader,
    531                  base::Unretained(this)));
    532   decoder_->SetWaitSyncPointCallback(
    533       base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
    534                  base::Unretained(this)));
    535 
    536   command_buffer_->SetPutOffsetChangeCallback(
    537       base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
    538   command_buffer_->SetGetBufferChangeCallback(
    539       base::Bind(&gpu::GpuScheduler::SetGetBuffer,
    540                  base::Unretained(scheduler_.get())));
    541   command_buffer_->SetParseErrorCallback(
    542       base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
    543   scheduler_->SetSchedulingChangedCallback(
    544       base::Bind(&GpuChannel::StubSchedulingChanged,
    545                  base::Unretained(channel_)));
    546 
    547   if (watchdog_) {
    548     scheduler_->SetCommandProcessedCallback(
    549         base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
    550                    base::Unretained(this)));
    551   }
    552 
    553   if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
    554     DLOG(ERROR) << "Failed to map shared stae buffer.";
    555     OnInitializeFailed(reply_message);
    556     return;
    557   }
    558 
    559   GpuCommandBufferMsg_Initialize::WriteReplyParams(
    560       reply_message, true, gpu_control_->GetCapabilities());
    561   Send(reply_message);
    562 
    563   if (handle_.is_null() && !active_url_.is_empty()) {
    564     GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
    565     gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
    566         active_url_));
    567   }
    568 }
    569 
    570 void GpuCommandBufferStub::OnSetLatencyInfo(
    571     const ui::LatencyInfo& latency_info) {
    572   if (!latency_info_callback_.is_null())
    573     latency_info_callback_.Run(latency_info);
    574 }
    575 
    576 void GpuCommandBufferStub::SetLatencyInfoCallback(
    577     const LatencyInfoCallback& callback) {
    578   latency_info_callback_ = callback;
    579 }
    580 
    581 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
    582   // The command buffer is pairs of enum, value
    583   // search for the requested attribute, return the value.
    584   for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
    585        it != requested_attribs_.end(); ++it) {
    586     if (*it++ == attr) {
    587       return *it;
    588     }
    589   }
    590   return -1;
    591 }
    592 
    593 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
    594                                           IPC::Message* reply_message) {
    595   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
    596   if (command_buffer_)
    597     command_buffer_->SetGetBuffer(shm_id);
    598   Send(reply_message);
    599 }
    600 
    601 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
    602   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
    603   if (!decoder_) {
    604     LOG(ERROR) << "Can't produce front buffer before initialization.";
    605     return;
    606   }
    607 
    608   decoder_->ProduceFrontBuffer(mailbox);
    609 }
    610 
    611 void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
    612   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
    613   if (command_buffer_) {
    614     gpu::CommandBuffer::State state = command_buffer_->GetState();
    615     CheckContextLost();
    616     GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
    617   } else {
    618     DLOG(ERROR) << "no command_buffer.";
    619     reply_message->set_reply_error();
    620   }
    621   Send(reply_message);
    622 }
    623 
    624 void GpuCommandBufferStub::OnParseError() {
    625   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
    626   DCHECK(command_buffer_.get());
    627   gpu::CommandBuffer::State state = command_buffer_->GetState();
    628   IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
    629       route_id_, state.context_lost_reason);
    630   msg->set_unblock(true);
    631   Send(msg);
    632 
    633   // Tell the browser about this context loss as well, so it can
    634   // determine whether client APIs like WebGL need to be immediately
    635   // blocked from automatically running.
    636   GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
    637   gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
    638       handle_.is_null(), state.context_lost_reason, active_url_));
    639 
    640   CheckContextLost();
    641 }
    642 
    643 void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
    644   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
    645   DCHECK(command_buffer_.get());
    646   CheckContextLost();
    647   gpu::CommandBuffer::State state = command_buffer_->GetState();
    648   GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
    649   Send(reply_message);
    650 }
    651 
    652 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
    653                                         uint32 flush_count) {
    654   TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
    655                "put_offset", put_offset);
    656   DCHECK(command_buffer_.get());
    657   if (flush_count - last_flush_count_ < 0x8000000U) {
    658     last_flush_count_ = flush_count;
    659     command_buffer_->Flush(put_offset);
    660   } else {
    661     // We received this message out-of-order. This should not happen but is here
    662     // to catch regressions. Ignore the message.
    663     NOTREACHED() << "Received a Flush message out-of-order";
    664   }
    665 
    666   ReportState();
    667 }
    668 
    669 void GpuCommandBufferStub::OnRescheduled() {
    670   gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
    671   command_buffer_->Flush(pre_state.put_offset);
    672   gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
    673 
    674   if (pre_state.get_offset != post_state.get_offset)
    675     ReportState();
    676 }
    677 
    678 void GpuCommandBufferStub::OnRegisterTransferBuffer(
    679     int32 id,
    680     base::SharedMemoryHandle transfer_buffer,
    681     uint32 size) {
    682   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
    683   base::SharedMemory shared_memory(transfer_buffer, false);
    684 
    685   if (command_buffer_)
    686     command_buffer_->RegisterTransferBuffer(id, &shared_memory, size);
    687 }
    688 
    689 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
    690   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
    691 
    692   if (command_buffer_)
    693     command_buffer_->DestroyTransferBuffer(id);
    694 }
    695 
    696 void GpuCommandBufferStub::OnGetTransferBuffer(
    697     int32 id,
    698     IPC::Message* reply_message) {
    699   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
    700   if (command_buffer_) {
    701     base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
    702     uint32 size = 0;
    703 
    704     gpu::Buffer buffer = command_buffer_->GetTransferBuffer(id);
    705     if (buffer.shared_memory) {
    706 #if defined(OS_WIN)
    707       transfer_buffer = NULL;
    708       BrokerDuplicateHandle(buffer.shared_memory->handle(),
    709           channel_->renderer_pid(), &transfer_buffer, FILE_MAP_READ |
    710           FILE_MAP_WRITE, 0);
    711       DCHECK(transfer_buffer != NULL);
    712 #else
    713       buffer.shared_memory->ShareToProcess(channel_->renderer_pid(),
    714                                            &transfer_buffer);
    715 #endif
    716       size = buffer.size;
    717     }
    718 
    719     GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
    720                                                             transfer_buffer,
    721                                                             size);
    722   } else {
    723     reply_message->set_reply_error();
    724   }
    725   Send(reply_message);
    726 }
    727 
    728 void GpuCommandBufferStub::OnCommandProcessed() {
    729   if (watchdog_)
    730     watchdog_->CheckArmed();
    731 }
    732 
    733 void GpuCommandBufferStub::ReportState() {
    734   if (!CheckContextLost())
    735     command_buffer_->UpdateState();
    736 }
    737 
    738 void GpuCommandBufferStub::PutChanged() {
    739   FastSetActiveURL(active_url_, active_url_hash_);
    740   scheduler_->PutChanged();
    741 }
    742 
    743 void GpuCommandBufferStub::OnCreateVideoDecoder(
    744     media::VideoCodecProfile profile,
    745     IPC::Message* reply_message) {
    746   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
    747   int decoder_route_id = channel_->GenerateRouteID();
    748   GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
    749       decoder_route_id, this, channel_->io_message_loop());
    750   decoder->Initialize(profile, reply_message);
    751   // decoder is registered as a DestructionObserver of this stub and will
    752   // self-delete during destruction of this stub.
    753 }
    754 
    755 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
    756   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
    757   if (memory_manager_client_state_)
    758     memory_manager_client_state_->SetVisible(visible);
    759 }
    760 
    761 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
    762   sync_points_.push_back(sync_point);
    763 }
    764 
    765 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
    766   DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
    767   sync_points_.pop_front();
    768   if (context_group_->mailbox_manager()->UsesSync() && MakeCurrent())
    769     context_group_->mailbox_manager()->PushTextureUpdates();
    770   GpuChannelManager* manager = channel_->gpu_channel_manager();
    771   manager->sync_point_manager()->RetireSyncPoint(sync_point);
    772 }
    773 
    774 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
    775   if (sync_point_wait_count_ == 0) {
    776     TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
    777                              "GpuCommandBufferStub", this);
    778   }
    779   scheduler_->SetScheduled(false);
    780   ++sync_point_wait_count_;
    781   GpuChannelManager* manager = channel_->gpu_channel_manager();
    782   manager->sync_point_manager()->AddSyncPointCallback(
    783       sync_point,
    784       base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
    785                  this->AsWeakPtr()));
    786   return scheduler_->IsScheduled();
    787 }
    788 
    789 void GpuCommandBufferStub::OnSyncPointRetired() {
    790   --sync_point_wait_count_;
    791   if (sync_point_wait_count_ == 0) {
    792     TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
    793                            "GpuCommandBufferStub", this);
    794   }
    795   scheduler_->SetScheduled(true);
    796 }
    797 
    798 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
    799   GpuChannelManager* manager = channel_->gpu_channel_manager();
    800   manager->sync_point_manager()->AddSyncPointCallback(
    801       sync_point,
    802       base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
    803                  this->AsWeakPtr(),
    804                  id));
    805 }
    806 
    807 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
    808   Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
    809 }
    810 
    811 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
    812   if (decoder_) {
    813     gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
    814     if (query_manager) {
    815       gpu::gles2::QueryManager::Query* query =
    816           query_manager->GetQuery(query_id);
    817       if (query) {
    818         query->AddCallback(
    819           base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
    820                      this->AsWeakPtr(),
    821                      id));
    822         return;
    823       }
    824     }
    825   }
    826   // Something went wrong, run callback immediately.
    827   OnSignalSyncPointAck(id);
    828 }
    829 
    830 
    831 void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
    832     const gpu::ManagedMemoryStats& stats) {
    833   TRACE_EVENT0(
    834       "gpu",
    835       "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
    836   if (memory_manager_client_state_)
    837     memory_manager_client_state_->SetManagedMemoryStats(stats);
    838 }
    839 
    840 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
    841     bool has_callback) {
    842   TRACE_EVENT0(
    843       "gpu",
    844       "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
    845   if (has_callback) {
    846     if (!memory_manager_client_state_) {
    847       memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
    848           this, surface_id_ != 0, true));
    849     }
    850   } else {
    851     memory_manager_client_state_.reset();
    852   }
    853 }
    854 
    855 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
    856     int32 id,
    857     gfx::GpuMemoryBufferHandle gpu_memory_buffer,
    858     uint32 width,
    859     uint32 height,
    860     uint32 internalformat) {
    861   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
    862   if (gpu_control_) {
    863     gpu_control_->RegisterGpuMemoryBuffer(id,
    864                                           gpu_memory_buffer,
    865                                           width,
    866                                           height,
    867                                           internalformat);
    868   }
    869 }
    870 
    871 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
    872   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
    873   if (gpu_control_)
    874     gpu_control_->DestroyGpuMemoryBuffer(id);
    875 }
    876 
    877 void GpuCommandBufferStub::SendConsoleMessage(
    878     int32 id,
    879     const std::string& message) {
    880   GPUCommandBufferConsoleMessage console_message;
    881   console_message.id = id;
    882   console_message.message = message;
    883   IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
    884       route_id_, console_message);
    885   msg->set_unblock(true);
    886   Send(msg);
    887 }
    888 
    889 void GpuCommandBufferStub::SendCachedShader(
    890     const std::string& key, const std::string& shader) {
    891   channel_->CacheShader(key, shader);
    892 }
    893 
    894 void GpuCommandBufferStub::AddDestructionObserver(
    895     DestructionObserver* observer) {
    896   destruction_observers_.AddObserver(observer);
    897 }
    898 
    899 void GpuCommandBufferStub::RemoveDestructionObserver(
    900     DestructionObserver* observer) {
    901   destruction_observers_.RemoveObserver(observer);
    902 }
    903 
    904 void GpuCommandBufferStub::SetPreemptByFlag(
    905     scoped_refptr<gpu::PreemptionFlag> flag) {
    906   preemption_flag_ = flag;
    907   if (scheduler_)
    908     scheduler_->SetPreemptByFlag(preemption_flag_);
    909 }
    910 
    911 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
    912   *bytes = total_gpu_memory_;
    913   return !!total_gpu_memory_;
    914 }
    915 
    916 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
    917   if (!surface_.get())
    918     return gfx::Size();
    919   return surface_->GetSize();
    920 }
    921 
    922 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
    923   return context_group_->memory_tracker();
    924 }
    925 
    926 void GpuCommandBufferStub::SetMemoryAllocation(
    927     const gpu::MemoryAllocation& allocation) {
    928   if (!last_memory_allocation_valid_ ||
    929       !allocation.Equals(last_memory_allocation_)) {
    930     Send(new GpuCommandBufferMsg_SetMemoryAllocation(
    931         route_id_, allocation));
    932   }
    933 
    934   last_memory_allocation_valid_ = true;
    935   last_memory_allocation_ = allocation;
    936 }
    937 
    938 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
    939     bool suggest_have_frontbuffer) {
    940   // This can be called outside of OnMessageReceived, so the context needs
    941   // to be made current before calling methods on the surface.
    942   if (surface_.get() && MakeCurrent())
    943     surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
    944 }
    945 
    946 bool GpuCommandBufferStub::CheckContextLost() {
    947   DCHECK(command_buffer_);
    948   gpu::CommandBuffer::State state = command_buffer_->GetState();
    949   bool was_lost = state.error == gpu::error::kLostContext;
    950   // Lose all other contexts if the reset was triggered by the robustness
    951   // extension instead of being synthetic.
    952   if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
    953       (gfx::GLContext::LosesAllContextsOnContextLost() ||
    954        use_virtualized_gl_context_))
    955     channel_->LoseAllContexts();
    956   return was_lost;
    957 }
    958 
    959 void GpuCommandBufferStub::MarkContextLost() {
    960   if (!command_buffer_ ||
    961       command_buffer_->GetState().error == gpu::error::kLostContext)
    962     return;
    963 
    964   command_buffer_->SetContextLostReason(gpu::error::kUnknown);
    965   if (decoder_)
    966     decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
    967   command_buffer_->SetParseError(gpu::error::kLostContext);
    968 }
    969 
    970 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
    971   return GetMemoryManager()->GetClientMemoryUsage(this);
    972 }
    973 
    974 }  // namespace content
    975