1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_ 6 #define CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_ 7 8 #include <deque> 9 #include <string> 10 #include <vector> 11 12 #include "base/memory/scoped_vector.h" 13 #include "base/memory/weak_ptr.h" 14 #include "base/observer_list.h" 15 #include "content/common/content_export.h" 16 #include "content/common/gpu/gpu_memory_manager.h" 17 #include "content/common/gpu/gpu_memory_manager_client.h" 18 #include "gpu/command_buffer/common/constants.h" 19 #include "gpu/command_buffer/common/gpu_memory_allocation.h" 20 #include "gpu/command_buffer/service/command_buffer_service.h" 21 #include "gpu/command_buffer/service/context_group.h" 22 #include "gpu/command_buffer/service/gpu_scheduler.h" 23 #include "ipc/ipc_listener.h" 24 #include "ipc/ipc_sender.h" 25 #include "media/base/video_decoder_config.h" 26 #include "ui/events/latency_info.h" 27 #include "ui/gfx/gpu_memory_buffer.h" 28 #include "ui/gfx/native_widget_types.h" 29 #include "ui/gfx/size.h" 30 #include "ui/gl/gl_surface.h" 31 #include "ui/gl/gpu_preference.h" 32 #include "url/gurl.h" 33 34 namespace gpu { 35 struct Mailbox; 36 namespace gles2 { 37 class MailboxManager; 38 } 39 } 40 41 namespace content { 42 43 class GpuChannel; 44 class GpuVideoDecodeAccelerator; 45 class GpuVideoEncodeAccelerator; 46 class GpuWatchdog; 47 struct WaitForCommandState; 48 49 class GpuCommandBufferStub 50 : public GpuMemoryManagerClient, 51 public IPC::Listener, 52 public IPC::Sender, 53 public base::SupportsWeakPtr<GpuCommandBufferStub> { 54 public: 55 class DestructionObserver { 56 public: 57 // Called in Destroy(), before the context/surface are released. 58 virtual void OnWillDestroyStub() = 0; 59 60 protected: 61 virtual ~DestructionObserver() {} 62 }; 63 64 typedef base::Callback<void(const std::vector<ui::LatencyInfo>&)> 65 LatencyInfoCallback; 66 67 GpuCommandBufferStub( 68 GpuChannel* channel, 69 GpuCommandBufferStub* share_group, 70 const gfx::GLSurfaceHandle& handle, 71 gpu::gles2::MailboxManager* mailbox_manager, 72 const gfx::Size& size, 73 const gpu::gles2::DisallowedFeatures& disallowed_features, 74 const std::vector<int32>& attribs, 75 gfx::GpuPreference gpu_preference, 76 bool use_virtualized_gl_context, 77 int32 route_id, 78 int32 surface_id, 79 GpuWatchdog* watchdog, 80 bool software, 81 const GURL& active_url); 82 83 virtual ~GpuCommandBufferStub(); 84 85 // IPC::Listener implementation: 86 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; 87 88 // IPC::Sender implementation: 89 virtual bool Send(IPC::Message* msg) OVERRIDE; 90 91 // GpuMemoryManagerClient implementation: 92 virtual gfx::Size GetSurfaceSize() const OVERRIDE; 93 virtual gpu::gles2::MemoryTracker* GetMemoryTracker() const OVERRIDE; 94 virtual void SetMemoryAllocation( 95 const gpu::MemoryAllocation& allocation) OVERRIDE; 96 virtual void SuggestHaveFrontBuffer(bool suggest_have_frontbuffer) OVERRIDE; 97 virtual bool GetTotalGpuMemory(uint64* bytes) OVERRIDE; 98 99 // Whether this command buffer can currently handle IPC messages. 100 bool IsScheduled(); 101 102 // If the command buffer is pre-empted and cannot process commands. 103 bool IsPreempted() const { 104 return scheduler_.get() && scheduler_->IsPreempted(); 105 } 106 107 // Whether there are commands in the buffer that haven't been processed. 108 bool HasUnprocessedCommands(); 109 110 gpu::gles2::GLES2Decoder* decoder() const { return decoder_.get(); } 111 gpu::GpuScheduler* scheduler() const { return scheduler_.get(); } 112 GpuChannel* channel() const { return channel_; } 113 114 // Identifies the target surface. 115 int32 surface_id() const { return surface_id_; } 116 117 // Identifies the various GpuCommandBufferStubs in the GPU process belonging 118 // to the same renderer process. 119 int32 route_id() const { return route_id_; } 120 121 gfx::GpuPreference gpu_preference() { return gpu_preference_; } 122 123 int32 GetRequestedAttribute(int attr) const; 124 125 // Sends a message to the console. 126 void SendConsoleMessage(int32 id, const std::string& message); 127 128 void SendCachedShader(const std::string& key, const std::string& shader); 129 130 gfx::GLSurface* surface() const { return surface_.get(); } 131 132 void AddDestructionObserver(DestructionObserver* observer); 133 void RemoveDestructionObserver(DestructionObserver* observer); 134 135 // Associates a sync point to this stub. When the stub is destroyed, it will 136 // retire all sync points that haven't been previously retired. 137 void AddSyncPoint(uint32 sync_point); 138 139 void SetPreemptByFlag(scoped_refptr<gpu::PreemptionFlag> flag); 140 141 void SetLatencyInfoCallback(const LatencyInfoCallback& callback); 142 143 void MarkContextLost(); 144 145 uint64 GetMemoryUsage() const; 146 147 private: 148 GpuMemoryManager* GetMemoryManager() const; 149 bool MakeCurrent(); 150 void Destroy(); 151 152 // Cleans up and sends reply if OnInitialize failed. 153 void OnInitializeFailed(IPC::Message* reply_message); 154 155 // Message handlers: 156 void OnInitialize(base::SharedMemoryHandle shared_state_shm, 157 IPC::Message* reply_message); 158 void OnSetGetBuffer(int32 shm_id, IPC::Message* reply_message); 159 void OnProduceFrontBuffer(const gpu::Mailbox& mailbox); 160 void OnGetState(IPC::Message* reply_message); 161 void OnWaitForTokenInRange(int32 start, 162 int32 end, 163 IPC::Message* reply_message); 164 void OnWaitForGetOffsetInRange(int32 start, 165 int32 end, 166 IPC::Message* reply_message); 167 void OnAsyncFlush(int32 put_offset, uint32 flush_count, 168 const std::vector<ui::LatencyInfo>& latency_info); 169 void OnEcho(const IPC::Message& message); 170 void OnRescheduled(); 171 void OnRegisterTransferBuffer(int32 id, 172 base::SharedMemoryHandle transfer_buffer, 173 uint32 size); 174 void OnDestroyTransferBuffer(int32 id); 175 void OnGetTransferBuffer(int32 id, IPC::Message* reply_message); 176 177 void OnCreateVideoDecoder(media::VideoCodecProfile profile, 178 int32 route_id, 179 IPC::Message* reply_message); 180 void OnCreateVideoEncoder(media::VideoFrame::Format input_format, 181 const gfx::Size& input_visible_size, 182 media::VideoCodecProfile output_profile, 183 uint32 initial_bitrate, 184 int32 route_id, 185 IPC::Message* reply_message); 186 187 void OnSetSurfaceVisible(bool visible); 188 189 void OnEnsureBackbuffer(); 190 191 void OnRetireSyncPoint(uint32 sync_point); 192 bool OnWaitSyncPoint(uint32 sync_point); 193 void OnSyncPointRetired(); 194 void OnSignalSyncPoint(uint32 sync_point, uint32 id); 195 void OnSignalSyncPointAck(uint32 id); 196 void OnSignalQuery(uint32 query, uint32 id); 197 198 void OnSetClientHasMemoryAllocationChangedCallback(bool has_callback); 199 200 void OnRegisterGpuMemoryBuffer(int32 id, 201 gfx::GpuMemoryBufferHandle handle, 202 uint32 width, 203 uint32 height, 204 uint32 internalformat); 205 void OnUnregisterGpuMemoryBuffer(int32 id); 206 207 void OnCommandProcessed(); 208 void OnParseError(); 209 void OnCreateStreamTexture( 210 uint32 texture_id, int32 stream_id, bool* succeeded); 211 212 void ReportState(); 213 214 // Wrapper for GpuScheduler::PutChanged that sets the crash report URL. 215 void PutChanged(); 216 217 // Poll the command buffer to execute work. 218 void PollWork(); 219 220 // Whether this command buffer needs to be polled again in the future. 221 bool HasMoreWork(); 222 223 void ScheduleDelayedWork(int64 delay); 224 225 bool CheckContextLost(); 226 void CheckCompleteWaits(); 227 228 // The lifetime of objects of this class is managed by a GpuChannel. The 229 // GpuChannels destroy all the GpuCommandBufferStubs that they own when they 230 // are destroyed. So a raw pointer is safe. 231 GpuChannel* channel_; 232 233 // The group of contexts that share namespaces with this context. 234 scoped_refptr<gpu::gles2::ContextGroup> context_group_; 235 236 gfx::GLSurfaceHandle handle_; 237 gfx::Size initial_size_; 238 gpu::gles2::DisallowedFeatures disallowed_features_; 239 std::vector<int32> requested_attribs_; 240 gfx::GpuPreference gpu_preference_; 241 bool use_virtualized_gl_context_; 242 int32 route_id_; 243 int32 surface_id_; 244 bool software_; 245 uint32 last_flush_count_; 246 247 scoped_ptr<gpu::CommandBufferService> command_buffer_; 248 scoped_ptr<gpu::gles2::GLES2Decoder> decoder_; 249 scoped_ptr<gpu::GpuScheduler> scheduler_; 250 scoped_refptr<gfx::GLSurface> surface_; 251 252 scoped_ptr<GpuMemoryManagerClientState> memory_manager_client_state_; 253 // The last memory allocation received from the GpuMemoryManager (used to 254 // elide redundant work). 255 bool last_memory_allocation_valid_; 256 gpu::MemoryAllocation last_memory_allocation_; 257 258 GpuWatchdog* watchdog_; 259 260 ObserverList<DestructionObserver> destruction_observers_; 261 262 // A queue of sync points associated with this stub. 263 std::deque<uint32> sync_points_; 264 int sync_point_wait_count_; 265 266 bool delayed_work_scheduled_; 267 uint64 previous_messages_processed_; 268 base::TimeTicks last_idle_time_; 269 270 scoped_refptr<gpu::PreemptionFlag> preemption_flag_; 271 272 LatencyInfoCallback latency_info_callback_; 273 274 GURL active_url_; 275 size_t active_url_hash_; 276 277 size_t total_gpu_memory_; 278 scoped_ptr<WaitForCommandState> wait_for_token_; 279 scoped_ptr<WaitForCommandState> wait_for_get_offset_; 280 281 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferStub); 282 }; 283 284 } // namespace content 285 286 #endif // CONTENT_COMMON_GPU_GPU_COMMAND_BUFFER_STUB_H_ 287