1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_ 6 #define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_ 7 8 #include <list> 9 #include <map> 10 11 #include "base/basictypes.h" 12 #include "base/cancelable_callback.h" 13 #include "base/containers/hash_tables.h" 14 #include "base/gtest_prod_util.h" 15 #include "base/memory/weak_ptr.h" 16 #include "content/common/content_export.h" 17 #include "content/common/gpu/gpu_memory_allocation.h" 18 #include "content/public/common/gpu_memory_stats.h" 19 #include "gpu/command_buffer/service/memory_tracking.h" 20 21 namespace content { 22 23 class GpuChannelManager; 24 class GpuMemoryManagerClient; 25 class GpuMemoryManagerClientState; 26 class GpuMemoryTrackingGroup; 27 28 class CONTENT_EXPORT GpuMemoryManager : 29 public base::SupportsWeakPtr<GpuMemoryManager> { 30 public: 31 #if defined(OS_ANDROID) 32 enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 1 }; 33 #else 34 enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 }; 35 #endif 36 enum ScheduleManageTime { 37 // Add a call to Manage to the thread's message loop immediately. 38 kScheduleManageNow, 39 // Add a Manage call to the thread's message loop for execution 1/60th of 40 // of a second from now. 41 kScheduleManageLater, 42 }; 43 44 GpuMemoryManager(GpuChannelManager* channel_manager, 45 uint64 max_surfaces_with_frontbuffer_soft_limit); 46 ~GpuMemoryManager(); 47 48 // Schedule a Manage() call. If immediate is true, we PostTask without delay. 49 // Otherwise PostDelayedTask using a CancelableClosure and allow multiple 50 // delayed calls to "queue" up. This way, we do not spam clients in certain 51 // lower priority situations. An immediate schedule manage will cancel any 52 // queued delayed manage. 53 void ScheduleManage(ScheduleManageTime schedule_manage_time); 54 55 // Retrieve GPU Resource consumption statistics for the task manager 56 void GetVideoMemoryUsageStats( 57 content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const; 58 59 GpuMemoryManagerClientState* CreateClientState( 60 GpuMemoryManagerClient* client, bool has_surface, bool visible); 61 62 GpuMemoryTrackingGroup* CreateTrackingGroup( 63 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker); 64 65 private: 66 friend class GpuMemoryManagerTest; 67 friend class GpuMemoryTrackingGroup; 68 friend class GpuMemoryManagerClientState; 69 70 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 71 TestManageBasicFunctionality); 72 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 73 TestManageChangingVisibility); 74 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 75 TestManageManyVisibleStubs); 76 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 77 TestManageManyNotVisibleStubs); 78 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 79 TestManageChangingLastUsedTime); 80 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 81 TestManageChangingImportanceShareGroup); 82 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 83 TestForegroundStubsGetBonusAllocation); 84 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 85 TestUpdateAvailableGpuMemory); 86 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 87 GpuMemoryAllocationCompareTests); 88 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 89 StubMemoryStatsForLastManageTests); 90 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 91 TestManagedUsageTracking); 92 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 93 BackgroundMru); 94 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 95 BackgroundDiscardPersistent); 96 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 97 UnmanagedTracking); 98 FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, 99 DefaultAllocation); 100 101 typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*> 102 TrackingGroupMap; 103 104 typedef std::list<GpuMemoryManagerClientState*> ClientStateList; 105 106 void Manage(); 107 void SetClientsHibernatedState() const; 108 void AssignSurfacesAllocations(); 109 void AssignNonSurfacesAllocations(); 110 111 // Math helper function to compute the maximum value of cap such that 112 // sum_i min(bytes[i], cap) <= bytes_sum_limit 113 static uint64 ComputeCap(std::vector<uint64> bytes, uint64 bytes_sum_limit); 114 115 // Compute the allocation for clients when visible and not visible. 116 void ComputeVisibleSurfacesAllocations(); 117 void ComputeNonvisibleSurfacesAllocations(); 118 void DistributeRemainingMemoryToVisibleSurfaces(); 119 120 // Compute the budget for a client. Allow at most bytes_above_required_cap 121 // bytes above client_state's required level. Allow at most 122 // bytes_above_minimum_cap bytes above client_state's minimum level. Allow 123 // at most bytes_overall_cap bytes total. 124 uint64 ComputeClientAllocationWhenVisible( 125 GpuMemoryManagerClientState* client_state, 126 uint64 bytes_above_required_cap, 127 uint64 bytes_above_minimum_cap, 128 uint64 bytes_overall_cap); 129 uint64 ComputeClientAllocationWhenNonvisible( 130 GpuMemoryManagerClientState* client_state); 131 132 // Update the amount of GPU memory we think we have in the system, based 133 // on what the stubs' contexts report. 134 void UpdateAvailableGpuMemory(); 135 void UpdateUnmanagedMemoryLimits(); 136 137 // The amount of video memory which is available for allocation. 138 uint64 GetAvailableGpuMemory() const; 139 140 // Minimum value of available GPU memory, no matter how little the GPU 141 // reports. This is the default value. 142 uint64 GetDefaultAvailableGpuMemory() const; 143 144 // Maximum cap on total GPU memory, no matter how much the GPU reports. 145 uint64 GetMaximumTotalGpuMemory() const; 146 147 // The maximum and minimum amount of memory that a client may be assigned. 148 uint64 GetMaximumClientAllocation() const; 149 uint64 GetMinimumClientAllocation() const { 150 return bytes_minimum_per_client_; 151 } 152 // The default amount of memory that a client is assigned, if it has not 153 // reported any memory usage stats yet. 154 uint64 GetDefaultClientAllocation() const { 155 return bytes_default_per_client_; 156 } 157 158 static uint64 CalcAvailableFromGpuTotal(uint64 total_gpu_memory); 159 160 // Send memory usage stats to the browser process. 161 void SendUmaStatsToBrowser(); 162 163 // Get the current number of bytes allocated. 164 uint64 GetCurrentUsage() const { 165 return bytes_allocated_managed_current_ + 166 bytes_allocated_unmanaged_current_; 167 } 168 169 // GpuMemoryTrackingGroup interface 170 void TrackMemoryAllocatedChange( 171 GpuMemoryTrackingGroup* tracking_group, 172 uint64 old_size, 173 uint64 new_size, 174 gpu::gles2::MemoryTracker::Pool tracking_pool); 175 void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group); 176 bool EnsureGPUMemoryAvailable(uint64 size_needed); 177 178 // GpuMemoryManagerClientState interface 179 void SetClientStateVisible( 180 GpuMemoryManagerClientState* client_state, bool visible); 181 void SetClientStateManagedMemoryStats( 182 GpuMemoryManagerClientState* client_state, 183 const GpuManagedMemoryStats& stats); 184 void OnDestroyClientState(GpuMemoryManagerClientState* client); 185 186 // Add or remove a client from its clients list (visible, nonvisible, or 187 // nonsurface). When adding the client, add it to the front of the list. 188 void AddClientToList(GpuMemoryManagerClientState* client_state); 189 void RemoveClientFromList(GpuMemoryManagerClientState* client_state); 190 ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state); 191 192 // Interfaces for testing 193 void TestingDisableScheduleManage() { disable_schedule_manage_ = true; } 194 void TestingSetAvailableGpuMemory(uint64 bytes) { 195 bytes_available_gpu_memory_ = bytes; 196 bytes_available_gpu_memory_overridden_ = true; 197 } 198 199 void TestingSetMinimumClientAllocation(uint64 bytes) { 200 bytes_minimum_per_client_ = bytes; 201 } 202 203 void TestingSetDefaultClientAllocation(uint64 bytes) { 204 bytes_default_per_client_ = bytes; 205 } 206 207 void TestingSetUnmanagedLimitStep(uint64 bytes) { 208 bytes_unmanaged_limit_step_ = bytes; 209 } 210 211 GpuChannelManager* channel_manager_; 212 213 // A list of all visible and nonvisible clients, in most-recently-used 214 // order (most recently used is first). 215 ClientStateList clients_visible_mru_; 216 ClientStateList clients_nonvisible_mru_; 217 218 // A list of all clients that don't have a surface. 219 ClientStateList clients_nonsurface_; 220 221 // All context groups' tracking structures 222 TrackingGroupMap tracking_groups_; 223 224 base::CancelableClosure delayed_manage_callback_; 225 bool manage_immediate_scheduled_; 226 227 uint64 max_surfaces_with_frontbuffer_soft_limit_; 228 229 // The maximum amount of memory that may be allocated for GPU resources 230 uint64 bytes_available_gpu_memory_; 231 bool bytes_available_gpu_memory_overridden_; 232 233 // Whether or not clients can be allocated memory when nonvisible. 234 bool allow_nonvisible_memory_; 235 236 // The minimum and default allocations for a single client. 237 uint64 bytes_minimum_per_client_; 238 uint64 bytes_default_per_client_; 239 240 // The current total memory usage, and historical maximum memory usage 241 uint64 bytes_allocated_managed_current_; 242 uint64 bytes_allocated_managed_visible_; 243 uint64 bytes_allocated_managed_nonvisible_; 244 uint64 bytes_allocated_unmanaged_current_; 245 uint64 bytes_allocated_historical_max_; 246 247 // If bytes_allocated_unmanaged_current_ leaves the interval [low_, high_), 248 // then ScheduleManage to take the change into account. 249 uint64 bytes_allocated_unmanaged_high_; 250 uint64 bytes_allocated_unmanaged_low_; 251 252 // Update bytes_allocated_unmanaged_low/high_ in intervals of step_. 253 uint64 bytes_unmanaged_limit_step_; 254 255 // Used to disable automatic changes to Manage() in testing. 256 bool disable_schedule_manage_; 257 258 DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager); 259 }; 260 261 } // namespace content 262 263 #endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_ 264