1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "content/common/gpu/gpu_channel_manager.h" 6 7 #include "base/bind.h" 8 #include "base/command_line.h" 9 #include "content/common/gpu/gpu_channel.h" 10 #include "content/common/gpu/gpu_memory_buffer_factory.h" 11 #include "content/common/gpu/gpu_memory_manager.h" 12 #include "content/common/gpu/gpu_messages.h" 13 #include "content/common/gpu/sync_point_manager.h" 14 #include "content/common/message_router.h" 15 #include "gpu/command_buffer/service/feature_info.h" 16 #include "gpu/command_buffer/service/gpu_switches.h" 17 #include "gpu/command_buffer/service/mailbox_manager.h" 18 #include "gpu/command_buffer/service/memory_program_cache.h" 19 #include "gpu/command_buffer/service/shader_translator_cache.h" 20 #include "ipc/message_filter.h" 21 #include "ui/gl/gl_bindings.h" 22 #include "ui/gl/gl_share_group.h" 23 24 namespace content { 25 26 namespace { 27 28 class GpuChannelManagerMessageFilter : public IPC::MessageFilter { 29 public: 30 GpuChannelManagerMessageFilter( 31 GpuMemoryBufferFactory* gpu_memory_buffer_factory) 32 : sender_(NULL), gpu_memory_buffer_factory_(gpu_memory_buffer_factory) {} 33 34 virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE { 35 DCHECK(!sender_); 36 sender_ = sender; 37 } 38 39 virtual void OnFilterRemoved() OVERRIDE { 40 DCHECK(sender_); 41 sender_ = NULL; 42 } 43 44 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE { 45 DCHECK(sender_); 46 bool handled = true; 47 IPC_BEGIN_MESSAGE_MAP(GpuChannelManagerMessageFilter, message) 48 IPC_MESSAGE_HANDLER(GpuMsg_CreateGpuMemoryBuffer, OnCreateGpuMemoryBuffer) 49 IPC_MESSAGE_UNHANDLED(handled = false) 50 IPC_END_MESSAGE_MAP() 51 return handled; 52 } 53 54 protected: 55 virtual ~GpuChannelManagerMessageFilter() {} 56 57 void OnCreateGpuMemoryBuffer(const gfx::GpuMemoryBufferHandle& handle, 58 const gfx::Size& size, 59 unsigned internalformat, 60 unsigned usage) { 61 TRACE_EVENT2("gpu", 62 "GpuChannelManagerMessageFilter::OnCreateGpuMemoryBuffer", 63 "primary_id", 64 handle.global_id.primary_id, 65 "secondary_id", 66 handle.global_id.secondary_id); 67 sender_->Send(new GpuHostMsg_GpuMemoryBufferCreated( 68 gpu_memory_buffer_factory_->CreateGpuMemoryBuffer( 69 handle, size, internalformat, usage))); 70 } 71 72 IPC::Sender* sender_; 73 GpuMemoryBufferFactory* gpu_memory_buffer_factory_; 74 }; 75 76 } // namespace 77 78 GpuChannelManager::GpuChannelManager(MessageRouter* router, 79 GpuWatchdog* watchdog, 80 base::MessageLoopProxy* io_message_loop, 81 base::WaitableEvent* shutdown_event, 82 IPC::SyncChannel* channel) 83 : io_message_loop_(io_message_loop), 84 shutdown_event_(shutdown_event), 85 router_(router), 86 gpu_memory_manager_( 87 this, 88 GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit), 89 watchdog_(watchdog), 90 sync_point_manager_(new SyncPointManager), 91 gpu_memory_buffer_factory_(GpuMemoryBufferFactory::Create()), 92 channel_(channel), 93 filter_(new GpuChannelManagerMessageFilter( 94 gpu_memory_buffer_factory_.get())), 95 weak_factory_(this) { 96 DCHECK(router_); 97 DCHECK(io_message_loop); 98 DCHECK(shutdown_event); 99 channel_->AddFilter(filter_.get()); 100 } 101 102 GpuChannelManager::~GpuChannelManager() { 103 gpu_channels_.clear(); 104 if (default_offscreen_surface_.get()) { 105 default_offscreen_surface_->Destroy(); 106 default_offscreen_surface_ = NULL; 107 } 108 } 109 110 gpu::gles2::ProgramCache* GpuChannelManager::program_cache() { 111 if (!program_cache_.get() && 112 (gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary || 113 gfx::g_driver_gl.ext.b_GL_OES_get_program_binary) && 114 !CommandLine::ForCurrentProcess()->HasSwitch( 115 switches::kDisableGpuProgramCache)) { 116 program_cache_.reset(new gpu::gles2::MemoryProgramCache()); 117 } 118 return program_cache_.get(); 119 } 120 121 gpu::gles2::ShaderTranslatorCache* 122 GpuChannelManager::shader_translator_cache() { 123 if (!shader_translator_cache_.get()) 124 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache; 125 return shader_translator_cache_.get(); 126 } 127 128 void GpuChannelManager::RemoveChannel(int client_id) { 129 Send(new GpuHostMsg_DestroyChannel(client_id)); 130 gpu_channels_.erase(client_id); 131 } 132 133 int GpuChannelManager::GenerateRouteID() { 134 static int last_id = 0; 135 return ++last_id; 136 } 137 138 void GpuChannelManager::AddRoute(int32 routing_id, IPC::Listener* listener) { 139 router_->AddRoute(routing_id, listener); 140 } 141 142 void GpuChannelManager::RemoveRoute(int32 routing_id) { 143 router_->RemoveRoute(routing_id); 144 } 145 146 GpuChannel* GpuChannelManager::LookupChannel(int32 client_id) { 147 GpuChannelMap::const_iterator iter = gpu_channels_.find(client_id); 148 if (iter == gpu_channels_.end()) 149 return NULL; 150 else 151 return iter->second; 152 } 153 154 bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) { 155 bool handled = true; 156 IPC_BEGIN_MESSAGE_MAP(GpuChannelManager, msg) 157 IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel, OnEstablishChannel) 158 IPC_MESSAGE_HANDLER(GpuMsg_CloseChannel, OnCloseChannel) 159 IPC_MESSAGE_HANDLER(GpuMsg_CreateViewCommandBuffer, 160 OnCreateViewCommandBuffer) 161 IPC_MESSAGE_HANDLER(GpuMsg_DestroyGpuMemoryBuffer, OnDestroyGpuMemoryBuffer) 162 IPC_MESSAGE_HANDLER(GpuMsg_LoadedShader, OnLoadedShader) 163 IPC_MESSAGE_UNHANDLED(handled = false) 164 IPC_END_MESSAGE_MAP() 165 return handled; 166 } 167 168 bool GpuChannelManager::Send(IPC::Message* msg) { return router_->Send(msg); } 169 170 void GpuChannelManager::OnEstablishChannel(int client_id, 171 bool share_context, 172 bool allow_future_sync_points) { 173 IPC::ChannelHandle channel_handle; 174 175 gfx::GLShareGroup* share_group = NULL; 176 gpu::gles2::MailboxManager* mailbox_manager = NULL; 177 if (share_context) { 178 if (!share_group_.get()) { 179 share_group_ = new gfx::GLShareGroup; 180 DCHECK(!mailbox_manager_.get()); 181 mailbox_manager_ = new gpu::gles2::MailboxManager; 182 } 183 share_group = share_group_.get(); 184 mailbox_manager = mailbox_manager_.get(); 185 } 186 187 scoped_ptr<GpuChannel> channel(new GpuChannel(this, 188 watchdog_, 189 share_group, 190 mailbox_manager, 191 client_id, 192 false, 193 allow_future_sync_points)); 194 channel->Init(io_message_loop_.get(), shutdown_event_); 195 channel_handle.name = channel->GetChannelName(); 196 197 #if defined(OS_POSIX) 198 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so 199 // that it gets closed after it has been sent. 200 int renderer_fd = channel->TakeRendererFileDescriptor(); 201 DCHECK_NE(-1, renderer_fd); 202 channel_handle.socket = base::FileDescriptor(renderer_fd, true); 203 #endif 204 205 gpu_channels_.set(client_id, channel.Pass()); 206 207 Send(new GpuHostMsg_ChannelEstablished(channel_handle)); 208 } 209 210 void GpuChannelManager::OnCloseChannel( 211 const IPC::ChannelHandle& channel_handle) { 212 for (GpuChannelMap::iterator iter = gpu_channels_.begin(); 213 iter != gpu_channels_.end(); ++iter) { 214 if (iter->second->GetChannelName() == channel_handle.name) { 215 gpu_channels_.erase(iter); 216 return; 217 } 218 } 219 } 220 221 void GpuChannelManager::OnCreateViewCommandBuffer( 222 const gfx::GLSurfaceHandle& window, 223 int32 surface_id, 224 int32 client_id, 225 const GPUCreateCommandBufferConfig& init_params, 226 int32 route_id) { 227 DCHECK(surface_id); 228 CreateCommandBufferResult result = CREATE_COMMAND_BUFFER_FAILED; 229 230 GpuChannelMap::const_iterator iter = gpu_channels_.find(client_id); 231 if (iter != gpu_channels_.end()) { 232 result = iter->second->CreateViewCommandBuffer( 233 window, surface_id, init_params, route_id); 234 } 235 236 Send(new GpuHostMsg_CommandBufferCreated(result)); 237 } 238 void GpuChannelManager::DestroyGpuMemoryBuffer( 239 const gfx::GpuMemoryBufferHandle& handle) { 240 io_message_loop_->PostTask( 241 FROM_HERE, 242 base::Bind(&GpuChannelManager::DestroyGpuMemoryBufferOnIO, 243 base::Unretained(this), 244 handle)); 245 } 246 247 void GpuChannelManager::DestroyGpuMemoryBufferOnIO( 248 const gfx::GpuMemoryBufferHandle& handle) { 249 gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(handle); 250 } 251 252 void GpuChannelManager::OnDestroyGpuMemoryBuffer( 253 const gfx::GpuMemoryBufferHandle& handle, 254 int32 sync_point) { 255 if (!sync_point) { 256 DestroyGpuMemoryBuffer(handle); 257 } else { 258 sync_point_manager()->AddSyncPointCallback( 259 sync_point, 260 base::Bind(&GpuChannelManager::DestroyGpuMemoryBuffer, 261 base::Unretained(this), 262 handle)); 263 } 264 } 265 266 void GpuChannelManager::OnLoadedShader(std::string program_proto) { 267 if (program_cache()) 268 program_cache()->LoadProgram(program_proto); 269 } 270 271 bool GpuChannelManager::HandleMessagesScheduled() { 272 for (GpuChannelMap::iterator iter = gpu_channels_.begin(); 273 iter != gpu_channels_.end(); ++iter) { 274 if (iter->second->handle_messages_scheduled()) 275 return true; 276 } 277 return false; 278 } 279 280 uint64 GpuChannelManager::MessagesProcessed() { 281 uint64 messages_processed = 0; 282 283 for (GpuChannelMap::iterator iter = gpu_channels_.begin(); 284 iter != gpu_channels_.end(); ++iter) { 285 messages_processed += iter->second->messages_processed(); 286 } 287 return messages_processed; 288 } 289 290 void GpuChannelManager::LoseAllContexts() { 291 for (GpuChannelMap::iterator iter = gpu_channels_.begin(); 292 iter != gpu_channels_.end(); ++iter) { 293 iter->second->MarkAllContextsLost(); 294 } 295 base::MessageLoop::current()->PostTask( 296 FROM_HERE, 297 base::Bind(&GpuChannelManager::OnLoseAllContexts, 298 weak_factory_.GetWeakPtr())); 299 } 300 301 void GpuChannelManager::OnLoseAllContexts() { 302 gpu_channels_.clear(); 303 } 304 305 gfx::GLSurface* GpuChannelManager::GetDefaultOffscreenSurface() { 306 if (!default_offscreen_surface_.get()) { 307 default_offscreen_surface_ = 308 gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size()); 309 } 310 return default_offscreen_surface_.get(); 311 } 312 313 } // namespace content 314