Home | History | Annotate | Download | only in client
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/common/gpu/client/gpu_channel_host.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/message_loop/message_loop.h"
     12 #include "base/message_loop/message_loop_proxy.h"
     13 #include "base/posix/eintr_wrapper.h"
     14 #include "base/threading/thread_restrictions.h"
     15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
     16 #include "content/common/gpu/gpu_messages.h"
     17 #include "ipc/ipc_sync_message_filter.h"
     18 #include "url/gurl.h"
     19 
     20 #if defined(OS_WIN)
     21 #include "content/public/common/sandbox_init.h"
     22 #endif
     23 
     24 using base::AutoLock;
     25 using base::MessageLoopProxy;
     26 
     27 namespace content {
     28 
     29 GpuListenerInfo::GpuListenerInfo() {}
     30 
     31 GpuListenerInfo::~GpuListenerInfo() {}
     32 
     33 // static
     34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
     35     GpuChannelHostFactory* factory,
     36     const gpu::GPUInfo& gpu_info,
     37     const IPC::ChannelHandle& channel_handle,
     38     base::WaitableEvent* shutdown_event) {
     39   DCHECK(factory->IsMainThread());
     40   scoped_refptr<GpuChannelHost> host = new GpuChannelHost(factory, gpu_info);
     41   host->Connect(channel_handle, shutdown_event);
     42   return host;
     43 }
     44 
     45 // static
     46 bool GpuChannelHost::IsValidGpuMemoryBuffer(
     47     gfx::GpuMemoryBufferHandle handle) {
     48   switch (handle.type) {
     49     case gfx::SHARED_MEMORY_BUFFER:
     50 #if defined(OS_MACOSX)
     51     case gfx::IO_SURFACE_BUFFER:
     52 #endif
     53 #if defined(OS_ANDROID)
     54     case gfx::SURFACE_TEXTURE_BUFFER:
     55 #endif
     56 #if defined(USE_X11)
     57     case gfx::X11_PIXMAP_BUFFER:
     58 #endif
     59       return true;
     60     default:
     61       return false;
     62   }
     63 }
     64 
     65 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
     66                                const gpu::GPUInfo& gpu_info)
     67     : factory_(factory),
     68       gpu_info_(gpu_info) {
     69   next_transfer_buffer_id_.GetNext();
     70   next_gpu_memory_buffer_id_.GetNext();
     71   next_route_id_.GetNext();
     72 }
     73 
     74 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
     75                              base::WaitableEvent* shutdown_event) {
     76   // Open a channel to the GPU process. We pass NULL as the main listener here
     77   // since we need to filter everything to route it to the right thread.
     78   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
     79   channel_ = IPC::SyncChannel::Create(channel_handle,
     80                                       IPC::Channel::MODE_CLIENT,
     81                                       NULL,
     82                                       io_loop.get(),
     83                                       true,
     84                                       shutdown_event);
     85 
     86   sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
     87 
     88   channel_->AddFilter(sync_filter_.get());
     89 
     90   channel_filter_ = new MessageFilter();
     91 
     92   // Install the filter last, because we intercept all leftover
     93   // messages.
     94   channel_->AddFilter(channel_filter_.get());
     95 }
     96 
     97 bool GpuChannelHost::Send(IPC::Message* msg) {
     98   // Callee takes ownership of message, regardless of whether Send is
     99   // successful. See IPC::Sender.
    100   scoped_ptr<IPC::Message> message(msg);
    101   // The GPU process never sends synchronous IPCs so clear the unblock flag to
    102   // preserve order.
    103   message->set_unblock(false);
    104 
    105   // Currently we need to choose between two different mechanisms for sending.
    106   // On the main thread we use the regular channel Send() method, on another
    107   // thread we use SyncMessageFilter. We also have to be careful interpreting
    108   // IsMainThread() since it might return false during shutdown,
    109   // impl we are actually calling from the main thread (discard message then).
    110   //
    111   // TODO: Can we just always use sync_filter_ since we setup the channel
    112   //       without a main listener?
    113   if (factory_->IsMainThread()) {
    114     // http://crbug.com/125264
    115     base::ThreadRestrictions::ScopedAllowWait allow_wait;
    116     bool result = channel_->Send(message.release());
    117     if (!result)
    118       DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
    119     return result;
    120   } else if (base::MessageLoop::current()) {
    121     bool result = sync_filter_->Send(message.release());
    122     if (!result)
    123       DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
    124     return result;
    125   }
    126 
    127   return false;
    128 }
    129 
    130 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
    131     int32 surface_id,
    132     CommandBufferProxyImpl* share_group,
    133     const std::vector<int32>& attribs,
    134     const GURL& active_url,
    135     gfx::GpuPreference gpu_preference) {
    136   TRACE_EVENT1("gpu",
    137                "GpuChannelHost::CreateViewCommandBuffer",
    138                "surface_id",
    139                surface_id);
    140 
    141   GPUCreateCommandBufferConfig init_params;
    142   init_params.share_group_id =
    143       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
    144   init_params.attribs = attribs;
    145   init_params.active_url = active_url;
    146   init_params.gpu_preference = gpu_preference;
    147   int32 route_id = GenerateRouteID();
    148   CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
    149       surface_id, init_params, route_id);
    150   if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
    151     LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
    152 
    153     if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
    154       // The GPU channel needs to be considered lost. The caller will
    155       // then set up a new connection, and the GPU channel and any
    156       // view command buffers will all be associated with the same GPU
    157       // process.
    158       DCHECK(MessageLoopProxy::current().get());
    159 
    160       scoped_refptr<base::MessageLoopProxy> io_loop =
    161           factory_->GetIOLoopProxy();
    162       io_loop->PostTask(
    163           FROM_HERE,
    164           base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
    165                      channel_filter_.get()));
    166     }
    167 
    168     return NULL;
    169   }
    170 
    171   CommandBufferProxyImpl* command_buffer =
    172       new CommandBufferProxyImpl(this, route_id);
    173   AddRoute(route_id, command_buffer->AsWeakPtr());
    174 
    175   AutoLock lock(context_lock_);
    176   proxies_[route_id] = command_buffer;
    177   return command_buffer;
    178 }
    179 
    180 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
    181     const gfx::Size& size,
    182     CommandBufferProxyImpl* share_group,
    183     const std::vector<int32>& attribs,
    184     const GURL& active_url,
    185     gfx::GpuPreference gpu_preference) {
    186   TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
    187 
    188   GPUCreateCommandBufferConfig init_params;
    189   init_params.share_group_id =
    190       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
    191   init_params.attribs = attribs;
    192   init_params.active_url = active_url;
    193   init_params.gpu_preference = gpu_preference;
    194   int32 route_id = GenerateRouteID();
    195   bool succeeded = false;
    196   if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
    197                                                            init_params,
    198                                                            route_id,
    199                                                            &succeeded))) {
    200     LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
    201     return NULL;
    202   }
    203 
    204   if (!succeeded) {
    205     LOG(ERROR)
    206         << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
    207     return NULL;
    208   }
    209 
    210   CommandBufferProxyImpl* command_buffer =
    211       new CommandBufferProxyImpl(this, route_id);
    212   AddRoute(route_id, command_buffer->AsWeakPtr());
    213 
    214   AutoLock lock(context_lock_);
    215   proxies_[route_id] = command_buffer;
    216   return command_buffer;
    217 }
    218 
    219 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
    220     int command_buffer_route_id) {
    221   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
    222   AutoLock lock(context_lock_);
    223   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
    224   DCHECK(it != proxies_.end());
    225   return it->second->CreateVideoDecoder();
    226 }
    227 
    228 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
    229     int command_buffer_route_id) {
    230   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
    231   AutoLock lock(context_lock_);
    232   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
    233   DCHECK(it != proxies_.end());
    234   return it->second->CreateVideoEncoder();
    235 }
    236 
    237 void GpuChannelHost::DestroyCommandBuffer(
    238     CommandBufferProxyImpl* command_buffer) {
    239   TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
    240 
    241   int route_id = command_buffer->GetRouteID();
    242   Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
    243   RemoveRoute(route_id);
    244 
    245   AutoLock lock(context_lock_);
    246   proxies_.erase(route_id);
    247   delete command_buffer;
    248 }
    249 
    250 void GpuChannelHost::AddRoute(
    251     int route_id, base::WeakPtr<IPC::Listener> listener) {
    252   DCHECK(MessageLoopProxy::current().get());
    253 
    254   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
    255   io_loop->PostTask(FROM_HERE,
    256                     base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
    257                                channel_filter_.get(), route_id, listener,
    258                                MessageLoopProxy::current()));
    259 }
    260 
    261 void GpuChannelHost::RemoveRoute(int route_id) {
    262   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
    263   io_loop->PostTask(FROM_HERE,
    264                     base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
    265                                channel_filter_.get(), route_id));
    266 }
    267 
    268 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
    269     base::SharedMemoryHandle source_handle) {
    270   if (IsLost())
    271     return base::SharedMemory::NULLHandle();
    272 
    273 #if defined(OS_WIN)
    274   // Windows needs to explicitly duplicate the handle out to another process.
    275   base::SharedMemoryHandle target_handle;
    276   if (!BrokerDuplicateHandle(source_handle,
    277                              channel_->GetPeerPID(),
    278                              &target_handle,
    279                              FILE_GENERIC_READ | FILE_GENERIC_WRITE,
    280                              0)) {
    281     return base::SharedMemory::NULLHandle();
    282   }
    283 
    284   return target_handle;
    285 #else
    286   int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
    287   if (duped_handle < 0)
    288     return base::SharedMemory::NULLHandle();
    289 
    290   return base::FileDescriptor(duped_handle, true);
    291 #endif
    292 }
    293 
    294 int32 GpuChannelHost::ReserveTransferBufferId() {
    295   return next_transfer_buffer_id_.GetNext();
    296 }
    297 
    298 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
    299     gfx::GpuMemoryBufferHandle source_handle) {
    300   switch (source_handle.type) {
    301     case gfx::SHARED_MEMORY_BUFFER: {
    302       gfx::GpuMemoryBufferHandle handle;
    303       handle.type = gfx::SHARED_MEMORY_BUFFER;
    304       handle.handle = ShareToGpuProcess(source_handle.handle);
    305       return handle;
    306     }
    307 #if defined(USE_OZONE)
    308     case gfx::OZONE_NATIVE_BUFFER:
    309       return source_handle;
    310 #endif
    311 #if defined(OS_MACOSX)
    312     case gfx::IO_SURFACE_BUFFER:
    313       return source_handle;
    314 #endif
    315 #if defined(OS_ANDROID)
    316     case gfx::SURFACE_TEXTURE_BUFFER:
    317       return source_handle;
    318 #endif
    319 #if defined(USE_X11)
    320     case gfx::X11_PIXMAP_BUFFER:
    321       return source_handle;
    322 #endif
    323     default:
    324       NOTREACHED();
    325       return gfx::GpuMemoryBufferHandle();
    326   }
    327 }
    328 
    329 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
    330   return next_gpu_memory_buffer_id_.GetNext();
    331 }
    332 
    333 int32 GpuChannelHost::GenerateRouteID() {
    334   return next_route_id_.GetNext();
    335 }
    336 
    337 GpuChannelHost::~GpuChannelHost() {
    338   // channel_ must be destroyed on the main thread.
    339   if (!factory_->IsMainThread())
    340     factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
    341 }
    342 
    343 
    344 GpuChannelHost::MessageFilter::MessageFilter()
    345     : lost_(false) {
    346 }
    347 
    348 GpuChannelHost::MessageFilter::~MessageFilter() {}
    349 
    350 void GpuChannelHost::MessageFilter::AddRoute(
    351     int route_id,
    352     base::WeakPtr<IPC::Listener> listener,
    353     scoped_refptr<MessageLoopProxy> loop) {
    354   DCHECK(listeners_.find(route_id) == listeners_.end());
    355   GpuListenerInfo info;
    356   info.listener = listener;
    357   info.loop = loop;
    358   listeners_[route_id] = info;
    359 }
    360 
    361 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
    362   ListenerMap::iterator it = listeners_.find(route_id);
    363   if (it != listeners_.end())
    364     listeners_.erase(it);
    365 }
    366 
    367 bool GpuChannelHost::MessageFilter::OnMessageReceived(
    368     const IPC::Message& message) {
    369   // Never handle sync message replies or we will deadlock here.
    370   if (message.is_reply())
    371     return false;
    372 
    373   ListenerMap::iterator it = listeners_.find(message.routing_id());
    374   if (it == listeners_.end())
    375     return false;
    376 
    377   const GpuListenerInfo& info = it->second;
    378   info.loop->PostTask(
    379       FROM_HERE,
    380       base::Bind(
    381           base::IgnoreResult(&IPC::Listener::OnMessageReceived),
    382           info.listener,
    383           message));
    384   return true;
    385 }
    386 
    387 void GpuChannelHost::MessageFilter::OnChannelError() {
    388   // Set the lost state before signalling the proxies. That way, if they
    389   // themselves post a task to recreate the context, they will not try to re-use
    390   // this channel host.
    391   {
    392     AutoLock lock(lock_);
    393     lost_ = true;
    394   }
    395 
    396   // Inform all the proxies that an error has occurred. This will be reported
    397   // via OpenGL as a lost context.
    398   for (ListenerMap::iterator it = listeners_.begin();
    399        it != listeners_.end();
    400        it++) {
    401     const GpuListenerInfo& info = it->second;
    402     info.loop->PostTask(
    403         FROM_HERE,
    404         base::Bind(&IPC::Listener::OnChannelError, info.listener));
    405   }
    406 
    407   listeners_.clear();
    408 }
    409 
    410 bool GpuChannelHost::MessageFilter::IsLost() const {
    411   AutoLock lock(lock_);
    412   return lost_;
    413 }
    414 
    415 }  // namespace content
    416