Home | History | Annotate | Download | only in gpu
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
      6 #define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
      7 
      8 #include <list>
      9 #include <map>
     10 
     11 #include "base/basictypes.h"
     12 #include "base/cancelable_callback.h"
     13 #include "base/containers/hash_tables.h"
     14 #include "base/gtest_prod_util.h"
     15 #include "base/memory/weak_ptr.h"
     16 #include "content/common/content_export.h"
     17 #include "content/public/common/gpu_memory_stats.h"
     18 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
     19 #include "gpu/command_buffer/service/memory_tracking.h"
     20 
     21 namespace content {
     22 
     23 class GpuChannelManager;
     24 class GpuMemoryManagerClient;
     25 class GpuMemoryManagerClientState;
     26 class GpuMemoryTrackingGroup;
     27 
     28 class CONTENT_EXPORT GpuMemoryManager :
     29     public base::SupportsWeakPtr<GpuMemoryManager> {
     30  public:
     31 #if defined(OS_ANDROID) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
     32   enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 1 };
     33 #else
     34   enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
     35 #endif
     36   enum ScheduleManageTime {
     37     // Add a call to Manage to the thread's message loop immediately.
     38     kScheduleManageNow,
     39     // Add a Manage call to the thread's message loop for execution 1/60th of
     40     // of a second from now.
     41     kScheduleManageLater,
     42   };
     43 
     44   GpuMemoryManager(GpuChannelManager* channel_manager,
     45                    uint64 max_surfaces_with_frontbuffer_soft_limit);
     46   ~GpuMemoryManager();
     47 
     48   // Schedule a Manage() call. If immediate is true, we PostTask without delay.
     49   // Otherwise PostDelayedTask using a CancelableClosure and allow multiple
     50   // delayed calls to "queue" up. This way, we do not spam clients in certain
     51   // lower priority situations. An immediate schedule manage will cancel any
     52   // queued delayed manage.
     53   void ScheduleManage(ScheduleManageTime schedule_manage_time);
     54 
     55   // Retrieve GPU Resource consumption statistics for the task manager
     56   void GetVideoMemoryUsageStats(
     57       content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const;
     58 
     59   GpuMemoryManagerClientState* CreateClientState(
     60       GpuMemoryManagerClient* client, bool has_surface, bool visible);
     61 
     62   GpuMemoryTrackingGroup* CreateTrackingGroup(
     63       base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
     64 
     65   uint64 GetClientMemoryUsage(const GpuMemoryManagerClient* client) const;
     66 
     67  private:
     68   friend class GpuMemoryManagerTest;
     69   friend class GpuMemoryTrackingGroup;
     70   friend class GpuMemoryManagerClientState;
     71 
     72   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     73                            TestManageBasicFunctionality);
     74   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     75                            TestManageChangingVisibility);
     76   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     77                            TestManageManyVisibleStubs);
     78   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     79                            TestManageManyNotVisibleStubs);
     80   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     81                            TestManageChangingLastUsedTime);
     82   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     83                            TestManageChangingImportanceShareGroup);
     84   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     85                            TestForegroundStubsGetBonusAllocation);
     86   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     87                            TestUpdateAvailableGpuMemory);
     88   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     89                            GpuMemoryAllocationCompareTests);
     90   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     91                            StubMemoryStatsForLastManageTests);
     92   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     93                            TestManagedUsageTracking);
     94   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     95                            BackgroundMru);
     96   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     97                            AllowNonvisibleMemory);
     98   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
     99                            BackgroundDiscardPersistent);
    100   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
    101                            UnmanagedTracking);
    102   FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
    103                            DefaultAllocation);
    104 
    105   typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*>
    106       TrackingGroupMap;
    107 
    108   typedef std::list<GpuMemoryManagerClientState*> ClientStateList;
    109 
    110   void Manage();
    111   void SetClientsHibernatedState() const;
    112   void AssignSurfacesAllocations();
    113   void AssignNonSurfacesAllocations();
    114 
    115   // Math helper function to compute the maximum value of cap such that
    116   // sum_i min(bytes[i], cap) <= bytes_sum_limit
    117   static uint64 ComputeCap(std::vector<uint64> bytes, uint64 bytes_sum_limit);
    118 
    119   // Compute the allocation for clients when visible and not visible.
    120   void ComputeVisibleSurfacesAllocations();
    121   void DistributeRemainingMemoryToVisibleSurfaces();
    122 
    123   // Compute the budget for a client. Allow at most bytes_above_required_cap
    124   // bytes above client_state's required level. Allow at most
    125   // bytes_above_minimum_cap bytes above client_state's minimum level. Allow
    126   // at most bytes_overall_cap bytes total.
    127   uint64 ComputeClientAllocationWhenVisible(
    128       GpuMemoryManagerClientState* client_state,
    129       uint64 bytes_above_required_cap,
    130       uint64 bytes_above_minimum_cap,
    131       uint64 bytes_overall_cap);
    132 
    133   // Update the amount of GPU memory we think we have in the system, based
    134   // on what the stubs' contexts report.
    135   void UpdateAvailableGpuMemory();
    136   void UpdateUnmanagedMemoryLimits();
    137 
    138   // The amount of video memory which is available for allocation.
    139   uint64 GetAvailableGpuMemory() const;
    140 
    141   // Minimum value of available GPU memory, no matter how little the GPU
    142   // reports. This is the default value.
    143   uint64 GetDefaultAvailableGpuMemory() const;
    144 
    145   // Maximum cap on total GPU memory, no matter how much the GPU reports.
    146   uint64 GetMaximumTotalGpuMemory() const;
    147 
    148   // The maximum and minimum amount of memory that a client may be assigned.
    149   uint64 GetMaximumClientAllocation() const;
    150   uint64 GetMinimumClientAllocation() const {
    151     return bytes_minimum_per_client_;
    152   }
    153   // The default amount of memory that a client is assigned, if it has not
    154   // reported any memory usage stats yet.
    155   uint64 GetDefaultClientAllocation() const {
    156     return bytes_default_per_client_;
    157   }
    158 
    159   static uint64 CalcAvailableFromGpuTotal(uint64 total_gpu_memory);
    160 
    161   // Send memory usage stats to the browser process.
    162   void SendUmaStatsToBrowser();
    163 
    164   // Get the current number of bytes allocated.
    165   uint64 GetCurrentUsage() const {
    166     return bytes_allocated_managed_current_ +
    167         bytes_allocated_unmanaged_current_;
    168   }
    169 
    170   // GpuMemoryTrackingGroup interface
    171   void TrackMemoryAllocatedChange(
    172       GpuMemoryTrackingGroup* tracking_group,
    173       uint64 old_size,
    174       uint64 new_size,
    175       gpu::gles2::MemoryTracker::Pool tracking_pool);
    176   void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group);
    177   bool EnsureGPUMemoryAvailable(uint64 size_needed);
    178 
    179   // GpuMemoryManagerClientState interface
    180   void SetClientStateVisible(
    181       GpuMemoryManagerClientState* client_state, bool visible);
    182   void SetClientStateManagedMemoryStats(
    183       GpuMemoryManagerClientState* client_state,
    184       const gpu::ManagedMemoryStats& stats);
    185   void OnDestroyClientState(GpuMemoryManagerClientState* client);
    186 
    187   // Add or remove a client from its clients list (visible, nonvisible, or
    188   // nonsurface). When adding the client, add it to the front of the list.
    189   void AddClientToList(GpuMemoryManagerClientState* client_state);
    190   void RemoveClientFromList(GpuMemoryManagerClientState* client_state);
    191   ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state);
    192 
    193   // Interfaces for testing
    194   void TestingDisableScheduleManage() { disable_schedule_manage_ = true; }
    195   void TestingSetAvailableGpuMemory(uint64 bytes) {
    196     bytes_available_gpu_memory_ = bytes;
    197     bytes_available_gpu_memory_overridden_ = true;
    198   }
    199 
    200   void TestingSetMinimumClientAllocation(uint64 bytes) {
    201     bytes_minimum_per_client_ = bytes;
    202   }
    203 
    204   void TestingSetDefaultClientAllocation(uint64 bytes) {
    205     bytes_default_per_client_ = bytes;
    206   }
    207 
    208   void TestingSetUnmanagedLimitStep(uint64 bytes) {
    209     bytes_unmanaged_limit_step_ = bytes;
    210   }
    211 
    212   GpuChannelManager* channel_manager_;
    213 
    214   // A list of all visible and nonvisible clients, in most-recently-used
    215   // order (most recently used is first).
    216   ClientStateList clients_visible_mru_;
    217   ClientStateList clients_nonvisible_mru_;
    218 
    219   // A list of all clients that don't have a surface.
    220   ClientStateList clients_nonsurface_;
    221 
    222   // All context groups' tracking structures
    223   TrackingGroupMap tracking_groups_;
    224 
    225   base::CancelableClosure delayed_manage_callback_;
    226   bool manage_immediate_scheduled_;
    227 
    228   uint64 max_surfaces_with_frontbuffer_soft_limit_;
    229 
    230   // The priority cutoff used for all renderers.
    231   gpu::MemoryAllocation::PriorityCutoff priority_cutoff_;
    232 
    233   // The maximum amount of memory that may be allocated for GPU resources
    234   uint64 bytes_available_gpu_memory_;
    235   bool bytes_available_gpu_memory_overridden_;
    236 
    237   // The minimum and default allocations for a single client.
    238   uint64 bytes_minimum_per_client_;
    239   uint64 bytes_default_per_client_;
    240 
    241   // The current total memory usage, and historical maximum memory usage
    242   uint64 bytes_allocated_managed_current_;
    243   uint64 bytes_allocated_unmanaged_current_;
    244   uint64 bytes_allocated_historical_max_;
    245 
    246   // If bytes_allocated_unmanaged_current_ leaves the interval [low_, high_),
    247   // then ScheduleManage to take the change into account.
    248   uint64 bytes_allocated_unmanaged_high_;
    249   uint64 bytes_allocated_unmanaged_low_;
    250 
    251   // Update bytes_allocated_unmanaged_low/high_ in intervals of step_.
    252   uint64 bytes_unmanaged_limit_step_;
    253 
    254   // Used to disable automatic changes to Manage() in testing.
    255   bool disable_schedule_manage_;
    256 
    257   DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager);
    258 };
    259 
    260 }  // namespace content
    261 
    262 #endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
    263