Home | History | Annotate | Download | only in client
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/common/gpu/client/gpu_channel_host.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/message_loop/message_loop.h"
     12 #include "base/message_loop/message_loop_proxy.h"
     13 #include "base/posix/eintr_wrapper.h"
     14 #include "base/threading/thread_restrictions.h"
     15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
     16 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
     17 #include "content/common/gpu/gpu_messages.h"
     18 #include "gpu/command_buffer/common/mailbox.h"
     19 #include "ipc/ipc_sync_message_filter.h"
     20 #include "url/gurl.h"
     21 
     22 #if defined(OS_WIN)
     23 #include "content/public/common/sandbox_init.h"
     24 #endif
     25 
     26 using base::AutoLock;
     27 using base::MessageLoopProxy;
     28 
     29 namespace content {
     30 
     31 GpuListenerInfo::GpuListenerInfo() {}
     32 
     33 GpuListenerInfo::~GpuListenerInfo() {}
     34 
     35 // static
     36 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
     37     GpuChannelHostFactory* factory,
     38     int gpu_host_id,
     39     const gpu::GPUInfo& gpu_info,
     40     const IPC::ChannelHandle& channel_handle) {
     41   DCHECK(factory->IsMainThread());
     42   scoped_refptr<GpuChannelHost> host = new GpuChannelHost(
     43       factory, gpu_host_id, gpu_info);
     44   host->Connect(channel_handle);
     45   return host;
     46 }
     47 
     48 // static
     49 bool GpuChannelHost::IsValidGpuMemoryBuffer(
     50     gfx::GpuMemoryBufferHandle handle) {
     51   switch (handle.type) {
     52     case gfx::SHARED_MEMORY_BUFFER:
     53 #if defined(OS_MACOSX)
     54     case gfx::IO_SURFACE_BUFFER:
     55 #endif
     56       return true;
     57     default:
     58       return false;
     59   }
     60 }
     61 
     62 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
     63                                int gpu_host_id,
     64                                const gpu::GPUInfo& gpu_info)
     65     : factory_(factory),
     66       gpu_host_id_(gpu_host_id),
     67       gpu_info_(gpu_info) {
     68   next_transfer_buffer_id_.GetNext();
     69   next_gpu_memory_buffer_id_.GetNext();
     70 }
     71 
     72 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle) {
     73   // Open a channel to the GPU process. We pass NULL as the main listener here
     74   // since we need to filter everything to route it to the right thread.
     75   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
     76   channel_.reset(new IPC::SyncChannel(channel_handle,
     77                                       IPC::Channel::MODE_CLIENT,
     78                                       NULL,
     79                                       io_loop.get(),
     80                                       true,
     81                                       factory_->GetShutDownEvent()));
     82 
     83   sync_filter_ = new IPC::SyncMessageFilter(
     84       factory_->GetShutDownEvent());
     85 
     86   channel_->AddFilter(sync_filter_.get());
     87 
     88   channel_filter_ = new MessageFilter();
     89 
     90   // Install the filter last, because we intercept all leftover
     91   // messages.
     92   channel_->AddFilter(channel_filter_.get());
     93 }
     94 
     95 bool GpuChannelHost::Send(IPC::Message* msg) {
     96   // Callee takes ownership of message, regardless of whether Send is
     97   // successful. See IPC::Sender.
     98   scoped_ptr<IPC::Message> message(msg);
     99   // The GPU process never sends synchronous IPCs so clear the unblock flag to
    100   // preserve order.
    101   message->set_unblock(false);
    102 
    103   // Currently we need to choose between two different mechanisms for sending.
    104   // On the main thread we use the regular channel Send() method, on another
    105   // thread we use SyncMessageFilter. We also have to be careful interpreting
    106   // IsMainThread() since it might return false during shutdown,
    107   // impl we are actually calling from the main thread (discard message then).
    108   //
    109   // TODO: Can we just always use sync_filter_ since we setup the channel
    110   //       without a main listener?
    111   if (factory_->IsMainThread()) {
    112     // http://crbug.com/125264
    113     base::ThreadRestrictions::ScopedAllowWait allow_wait;
    114     return channel_->Send(message.release());
    115   } else if (base::MessageLoop::current()) {
    116     return sync_filter_->Send(message.release());
    117   }
    118 
    119   return false;
    120 }
    121 
    122 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
    123     int32 surface_id,
    124     CommandBufferProxyImpl* share_group,
    125     const std::vector<int32>& attribs,
    126     const GURL& active_url,
    127     gfx::GpuPreference gpu_preference) {
    128   TRACE_EVENT1("gpu",
    129                "GpuChannelHost::CreateViewCommandBuffer",
    130                "surface_id",
    131                surface_id);
    132 
    133   GPUCreateCommandBufferConfig init_params;
    134   init_params.share_group_id =
    135       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
    136   init_params.attribs = attribs;
    137   init_params.active_url = active_url;
    138   init_params.gpu_preference = gpu_preference;
    139   int32 route_id = factory_->CreateViewCommandBuffer(surface_id, init_params);
    140   if (route_id == MSG_ROUTING_NONE)
    141     return NULL;
    142 
    143   CommandBufferProxyImpl* command_buffer =
    144       new CommandBufferProxyImpl(this, route_id);
    145   AddRoute(route_id, command_buffer->AsWeakPtr());
    146 
    147   AutoLock lock(context_lock_);
    148   proxies_[route_id] = command_buffer;
    149   return command_buffer;
    150 }
    151 
    152 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
    153     const gfx::Size& size,
    154     CommandBufferProxyImpl* share_group,
    155     const std::vector<int32>& attribs,
    156     const GURL& active_url,
    157     gfx::GpuPreference gpu_preference) {
    158   TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
    159 
    160   GPUCreateCommandBufferConfig init_params;
    161   init_params.share_group_id =
    162       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
    163   init_params.attribs = attribs;
    164   init_params.active_url = active_url;
    165   init_params.gpu_preference = gpu_preference;
    166   int32 route_id;
    167   if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
    168                                                            init_params,
    169                                                            &route_id))) {
    170     return NULL;
    171   }
    172 
    173   if (route_id == MSG_ROUTING_NONE)
    174     return NULL;
    175 
    176   CommandBufferProxyImpl* command_buffer =
    177       new CommandBufferProxyImpl(this, route_id);
    178   AddRoute(route_id, command_buffer->AsWeakPtr());
    179 
    180   AutoLock lock(context_lock_);
    181   proxies_[route_id] = command_buffer;
    182   return command_buffer;
    183 }
    184 
    185 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
    186     int command_buffer_route_id,
    187     media::VideoCodecProfile profile,
    188     media::VideoDecodeAccelerator::Client* client) {
    189   AutoLock lock(context_lock_);
    190   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
    191   DCHECK(it != proxies_.end());
    192   CommandBufferProxyImpl* proxy = it->second;
    193   return proxy->CreateVideoDecoder(profile, client).Pass();
    194 }
    195 
    196 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
    197     media::VideoEncodeAccelerator::Client* client) {
    198   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
    199 
    200   scoped_ptr<media::VideoEncodeAccelerator> vea;
    201   int32 route_id = MSG_ROUTING_NONE;
    202   if (!Send(new GpuChannelMsg_CreateVideoEncoder(&route_id)))
    203     return vea.Pass();
    204   if (route_id == MSG_ROUTING_NONE)
    205     return vea.Pass();
    206 
    207   vea.reset(new GpuVideoEncodeAcceleratorHost(client, this, route_id));
    208   return vea.Pass();
    209 }
    210 
    211 void GpuChannelHost::DestroyCommandBuffer(
    212     CommandBufferProxyImpl* command_buffer) {
    213   TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
    214 
    215   int route_id = command_buffer->GetRouteID();
    216   Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
    217   RemoveRoute(route_id);
    218 
    219   AutoLock lock(context_lock_);
    220   proxies_.erase(route_id);
    221   delete command_buffer;
    222 }
    223 
    224 bool GpuChannelHost::CollectRenderingStatsForSurface(
    225     int surface_id, GpuRenderingStats* stats) {
    226   TRACE_EVENT0("gpu", "GpuChannelHost::CollectRenderingStats");
    227 
    228   return Send(new GpuChannelMsg_CollectRenderingStatsForSurface(surface_id,
    229                                                                 stats));
    230 }
    231 
    232 void GpuChannelHost::AddRoute(
    233     int route_id, base::WeakPtr<IPC::Listener> listener) {
    234   DCHECK(MessageLoopProxy::current().get());
    235 
    236   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
    237   io_loop->PostTask(FROM_HERE,
    238                     base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
    239                                channel_filter_.get(), route_id, listener,
    240                                MessageLoopProxy::current()));
    241 }
    242 
    243 void GpuChannelHost::RemoveRoute(int route_id) {
    244   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
    245   io_loop->PostTask(FROM_HERE,
    246                     base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
    247                                channel_filter_.get(), route_id));
    248 }
    249 
    250 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
    251     base::SharedMemoryHandle source_handle) {
    252   if (IsLost())
    253     return base::SharedMemory::NULLHandle();
    254 
    255 #if defined(OS_WIN)
    256   // Windows needs to explicitly duplicate the handle out to another process.
    257   base::SharedMemoryHandle target_handle;
    258   if (!BrokerDuplicateHandle(source_handle,
    259                              channel_->peer_pid(),
    260                              &target_handle,
    261                              FILE_GENERIC_READ | FILE_GENERIC_WRITE,
    262                              0)) {
    263     return base::SharedMemory::NULLHandle();
    264   }
    265 
    266   return target_handle;
    267 #else
    268   int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
    269   if (duped_handle < 0)
    270     return base::SharedMemory::NULLHandle();
    271 
    272   return base::FileDescriptor(duped_handle, true);
    273 #endif
    274 }
    275 
    276 bool GpuChannelHost::GenerateMailboxNames(unsigned num,
    277                                           std::vector<gpu::Mailbox>* names) {
    278   DCHECK(names->empty());
    279   TRACE_EVENT0("gpu", "GenerateMailboxName");
    280   size_t generate_count = channel_filter_->GetMailboxNames(num, names);
    281 
    282   if (names->size() < num) {
    283     std::vector<gpu::Mailbox> new_names;
    284     if (!Send(new GpuChannelMsg_GenerateMailboxNames(num - names->size(),
    285                                                      &new_names)))
    286       return false;
    287     names->insert(names->end(), new_names.begin(), new_names.end());
    288   }
    289 
    290   if (generate_count > 0)
    291     Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count));
    292 
    293   return true;
    294 }
    295 
    296 int32 GpuChannelHost::ReserveTransferBufferId() {
    297   return next_transfer_buffer_id_.GetNext();
    298 }
    299 
    300 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
    301     gfx::GpuMemoryBufferHandle source_handle) {
    302   switch (source_handle.type) {
    303     case gfx::SHARED_MEMORY_BUFFER: {
    304       gfx::GpuMemoryBufferHandle handle;
    305       handle.type = gfx::SHARED_MEMORY_BUFFER;
    306       handle.handle = ShareToGpuProcess(source_handle.handle);
    307       return handle;
    308     }
    309 #if defined(OS_MACOSX)
    310     case gfx::IO_SURFACE_BUFFER:
    311       return source_handle;
    312 #endif
    313     default:
    314       NOTREACHED();
    315       return gfx::GpuMemoryBufferHandle();
    316   }
    317 }
    318 
    319 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
    320   return next_gpu_memory_buffer_id_.GetNext();
    321 }
    322 
    323 GpuChannelHost::~GpuChannelHost() {
    324   // channel_ must be destroyed on the main thread.
    325   if (!factory_->IsMainThread())
    326     factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
    327 }
    328 
    329 
    330 GpuChannelHost::MessageFilter::MessageFilter()
    331     : lost_(false),
    332       requested_mailboxes_(0) {
    333 }
    334 
    335 GpuChannelHost::MessageFilter::~MessageFilter() {}
    336 
    337 void GpuChannelHost::MessageFilter::AddRoute(
    338     int route_id,
    339     base::WeakPtr<IPC::Listener> listener,
    340     scoped_refptr<MessageLoopProxy> loop) {
    341   DCHECK(listeners_.find(route_id) == listeners_.end());
    342   GpuListenerInfo info;
    343   info.listener = listener;
    344   info.loop = loop;
    345   listeners_[route_id] = info;
    346 }
    347 
    348 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
    349   ListenerMap::iterator it = listeners_.find(route_id);
    350   if (it != listeners_.end())
    351     listeners_.erase(it);
    352 }
    353 
    354 bool GpuChannelHost::MessageFilter::OnMessageReceived(
    355     const IPC::Message& message) {
    356   // Never handle sync message replies or we will deadlock here.
    357   if (message.is_reply())
    358     return false;
    359 
    360   if (message.routing_id() == MSG_ROUTING_CONTROL)
    361     return OnControlMessageReceived(message);
    362 
    363   ListenerMap::iterator it = listeners_.find(message.routing_id());
    364 
    365   if (it != listeners_.end()) {
    366     const GpuListenerInfo& info = it->second;
    367     info.loop->PostTask(
    368         FROM_HERE,
    369         base::Bind(
    370             base::IgnoreResult(&IPC::Listener::OnMessageReceived),
    371             info.listener,
    372             message));
    373   }
    374 
    375   return true;
    376 }
    377 
    378 void GpuChannelHost::MessageFilter::OnChannelError() {
    379   // Set the lost state before signalling the proxies. That way, if they
    380   // themselves post a task to recreate the context, they will not try to re-use
    381   // this channel host.
    382   {
    383     AutoLock lock(lock_);
    384     lost_ = true;
    385   }
    386 
    387   // Inform all the proxies that an error has occurred. This will be reported
    388   // via OpenGL as a lost context.
    389   for (ListenerMap::iterator it = listeners_.begin();
    390        it != listeners_.end();
    391        it++) {
    392     const GpuListenerInfo& info = it->second;
    393     info.loop->PostTask(
    394         FROM_HERE,
    395         base::Bind(&IPC::Listener::OnChannelError, info.listener));
    396   }
    397 
    398   listeners_.clear();
    399 }
    400 
    401 bool GpuChannelHost::MessageFilter::IsLost() const {
    402   AutoLock lock(lock_);
    403   return lost_;
    404 }
    405 
    406 size_t GpuChannelHost::MessageFilter::GetMailboxNames(
    407     size_t num, std::vector<gpu::Mailbox>* names) {
    408   AutoLock lock(lock_);
    409   size_t count = std::min(num, mailbox_name_pool_.size());
    410   names->insert(names->begin(),
    411                 mailbox_name_pool_.end() - count,
    412                 mailbox_name_pool_.end());
    413   mailbox_name_pool_.erase(mailbox_name_pool_.end() - count,
    414                            mailbox_name_pool_.end());
    415 
    416   const size_t ideal_mailbox_pool_size = 100;
    417   size_t total = mailbox_name_pool_.size() + requested_mailboxes_;
    418   DCHECK_LE(total, ideal_mailbox_pool_size);
    419   if (total >= ideal_mailbox_pool_size / 2)
    420     return 0;
    421   size_t request = ideal_mailbox_pool_size - total;
    422   requested_mailboxes_ += request;
    423   return request;
    424 }
    425 
    426 bool GpuChannelHost::MessageFilter::OnControlMessageReceived(
    427     const IPC::Message& message) {
    428   bool handled = true;
    429 
    430   IPC_BEGIN_MESSAGE_MAP(GpuChannelHost::MessageFilter, message)
    431   IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesReply,
    432                       OnGenerateMailboxNamesReply)
    433   IPC_MESSAGE_UNHANDLED(handled = false)
    434   IPC_END_MESSAGE_MAP()
    435 
    436   DCHECK(handled);
    437   return handled;
    438 }
    439 
    440 void GpuChannelHost::MessageFilter::OnGenerateMailboxNamesReply(
    441     const std::vector<gpu::Mailbox>& names) {
    442   TRACE_EVENT0("gpu", "OnGenerateMailboxNamesReply");
    443   AutoLock lock(lock_);
    444   DCHECK_LE(names.size(), requested_mailboxes_);
    445   requested_mailboxes_ -= names.size();
    446   mailbox_name_pool_.insert(mailbox_name_pool_.end(),
    447                             names.begin(),
    448                             names.end());
    449 }
    450 
    451 
    452 }  // namespace content
    453