Home | History | Annotate | Download | only in gpu
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/common/gpu/gpu_memory_manager.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/bind.h"
     10 #include "base/command_line.h"
     11 #include "base/debug/trace_event.h"
     12 #include "base/message_loop/message_loop.h"
     13 #include "base/process/process_handle.h"
     14 #include "base/strings/string_number_conversions.h"
     15 #include "content/common/gpu/gpu_channel_manager.h"
     16 #include "content/common/gpu/gpu_memory_allocation.h"
     17 #include "content/common/gpu/gpu_memory_manager_client.h"
     18 #include "content/common/gpu/gpu_memory_tracking.h"
     19 #include "content/common/gpu/gpu_memory_uma_stats.h"
     20 #include "content/common/gpu/gpu_messages.h"
     21 #include "gpu/command_buffer/service/gpu_switches.h"
     22 
     23 namespace content {
     24 namespace {
     25 
     26 const int kDelayedScheduleManageTimeoutMs = 67;
     27 
     28 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024;
     29 
     30 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
     31   DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
     32   *total_size += (new_size - old_size);
     33 }
     34 
     35 template<typename T>
     36 T RoundUp(T n, T mul) {
     37   return ((n + mul - 1) / mul) * mul;
     38 }
     39 
     40 template<typename T>
     41 T RoundDown(T n, T mul) {
     42   return (n / mul) * mul;
     43 }
     44 
     45 }
     46 
     47 GpuMemoryManager::GpuMemoryManager(
     48     GpuChannelManager* channel_manager,
     49     uint64 max_surfaces_with_frontbuffer_soft_limit)
     50     : channel_manager_(channel_manager),
     51       manage_immediate_scheduled_(false),
     52       max_surfaces_with_frontbuffer_soft_limit_(
     53           max_surfaces_with_frontbuffer_soft_limit),
     54       bytes_available_gpu_memory_(0),
     55       bytes_available_gpu_memory_overridden_(false),
     56       bytes_minimum_per_client_(0),
     57       bytes_default_per_client_(0),
     58       bytes_allocated_managed_current_(0),
     59       bytes_allocated_managed_visible_(0),
     60       bytes_allocated_managed_nonvisible_(0),
     61       bytes_allocated_unmanaged_current_(0),
     62       bytes_allocated_historical_max_(0),
     63       bytes_allocated_unmanaged_high_(0),
     64       bytes_allocated_unmanaged_low_(0),
     65       bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep),
     66       disable_schedule_manage_(false)
     67 {
     68   CommandLine* command_line = CommandLine::ForCurrentProcess();
     69 
     70 #if defined(OS_ANDROID)
     71   bytes_default_per_client_ = 16 * 1024 * 1024;
     72   bytes_minimum_per_client_ = 16 * 1024 * 1024;
     73 #elif defined(OS_CHROMEOS)
     74   bytes_default_per_client_ = 64 * 1024 * 1024;
     75   bytes_minimum_per_client_ = 4 * 1024 * 1024;
     76 #else
     77   bytes_default_per_client_ = 64 * 1024 * 1024;
     78   bytes_minimum_per_client_ = 64 * 1024 * 1024;
     79 #endif
     80 
     81   // On Android, always discard everything that is nonvisible.
     82   // On Mac, use as little memory as possible to avoid stability issues.
     83 #if defined(OS_ANDROID) || defined(OS_MACOSX)
     84   allow_nonvisible_memory_ = false;
     85 #else
     86   allow_nonvisible_memory_ = true;
     87 #endif
     88 
     89   if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) {
     90     base::StringToUint64(
     91         command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb),
     92         &bytes_available_gpu_memory_);
     93     bytes_available_gpu_memory_ *= 1024 * 1024;
     94     bytes_available_gpu_memory_overridden_ = true;
     95   } else
     96     bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory();
     97 }
     98 
     99 GpuMemoryManager::~GpuMemoryManager() {
    100   DCHECK(tracking_groups_.empty());
    101   DCHECK(clients_visible_mru_.empty());
    102   DCHECK(clients_nonvisible_mru_.empty());
    103   DCHECK(clients_nonsurface_.empty());
    104   DCHECK(!bytes_allocated_managed_current_);
    105   DCHECK(!bytes_allocated_unmanaged_current_);
    106   DCHECK(!bytes_allocated_managed_visible_);
    107   DCHECK(!bytes_allocated_managed_nonvisible_);
    108 }
    109 
    110 uint64 GpuMemoryManager::GetAvailableGpuMemory() const {
    111   // Allow unmanaged allocations to over-subscribe by at most (high_ - low_)
    112   // before restricting managed (compositor) memory based on unmanaged usage.
    113   if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_)
    114     return 0;
    115   return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_;
    116 }
    117 
    118 uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
    119 #if defined(OS_ANDROID)
    120   return 16 * 1024 * 1024;
    121 #elif defined(OS_CHROMEOS)
    122   return 1024 * 1024 * 1024;
    123 #else
    124   return 256 * 1024 * 1024;
    125 #endif
    126 }
    127 
    128 uint64 GpuMemoryManager::GetMaximumTotalGpuMemory() const {
    129 #if defined(OS_ANDROID)
    130   return 256 * 1024 * 1024;
    131 #else
    132   return 1024 * 1024 * 1024;
    133 #endif
    134 }
    135 
    136 uint64 GpuMemoryManager::GetMaximumClientAllocation() const {
    137 #if defined(OS_ANDROID) || defined(OS_CHROMEOS)
    138   return bytes_available_gpu_memory_;
    139 #else
    140   // This is to avoid allowing a single page on to use a full 256MB of memory
    141   // (the current total limit). Long-scroll pages will hit this limit,
    142   // resulting in instability on some platforms (e.g, issue 141377).
    143   return bytes_available_gpu_memory_ / 2;
    144 #endif
    145 }
    146 
    147 uint64 GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory) {
    148 #if defined(OS_ANDROID)
    149   // We don't need to reduce the total on Android, since
    150   // the total is an estimate to begin with.
    151   return total_gpu_memory;
    152 #else
    153   // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
    154   // memory, whichever is less.
    155   return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024);
    156 #endif
    157 }
    158 
    159 void GpuMemoryManager::UpdateAvailableGpuMemory() {
    160   // If the amount of video memory to use was specified at the command
    161   // line, never change it.
    162   if (bytes_available_gpu_memory_overridden_)
    163     return;
    164 
    165   // On non-Android, we use an operating system query when possible.
    166   // We do not have a reliable concept of multiple GPUs existing in
    167   // a system, so just be safe and go with the minimum encountered.
    168   uint64 bytes_min = 0;
    169 
    170   // Only use the clients that are visible, because otherwise the set of clients
    171   // we are querying could become extremely large.
    172   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    173       it != clients_visible_mru_.end();
    174       ++it) {
    175     const GpuMemoryManagerClientState* client_state = *it;
    176     if (!client_state->has_surface_)
    177       continue;
    178     if (!client_state->visible_)
    179       continue;
    180 
    181     uint64 bytes = 0;
    182     if (client_state->client_->GetTotalGpuMemory(&bytes)) {
    183       if (!bytes_min || bytes < bytes_min)
    184         bytes_min = bytes;
    185     }
    186   }
    187 
    188   if (!bytes_min)
    189     return;
    190 
    191   bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min);
    192 
    193   // Never go below the default allocation
    194   bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_,
    195                                          GetDefaultAvailableGpuMemory());
    196 
    197   // Never go above the maximum.
    198   bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_,
    199                                          GetMaximumTotalGpuMemory());
    200 }
    201 
    202 void GpuMemoryManager::UpdateUnmanagedMemoryLimits() {
    203   // Set the limit to be [current_, current_ + step_ / 4), with the endpoints
    204   // of the intervals rounded down and up to the nearest step_, to avoid
    205   // thrashing the interval.
    206   bytes_allocated_unmanaged_high_ = RoundUp(
    207       bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4,
    208       bytes_unmanaged_limit_step_);
    209   bytes_allocated_unmanaged_low_ = RoundDown(
    210       bytes_allocated_unmanaged_current_,
    211       bytes_unmanaged_limit_step_);
    212 }
    213 
    214 void GpuMemoryManager::ScheduleManage(
    215     ScheduleManageTime schedule_manage_time) {
    216   if (disable_schedule_manage_)
    217     return;
    218   if (manage_immediate_scheduled_)
    219     return;
    220   if (schedule_manage_time == kScheduleManageNow) {
    221     base::MessageLoop::current()->PostTask(
    222         FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
    223     manage_immediate_scheduled_ = true;
    224     if (!delayed_manage_callback_.IsCancelled())
    225       delayed_manage_callback_.Cancel();
    226   } else {
    227     if (!delayed_manage_callback_.IsCancelled())
    228       return;
    229     delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
    230                                               AsWeakPtr()));
    231     base::MessageLoop::current()->PostDelayedTask(
    232         FROM_HERE,
    233         delayed_manage_callback_.callback(),
    234         base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
    235   }
    236 }
    237 
    238 void GpuMemoryManager::TrackMemoryAllocatedChange(
    239     GpuMemoryTrackingGroup* tracking_group,
    240     uint64 old_size,
    241     uint64 new_size,
    242     gpu::gles2::MemoryTracker::Pool tracking_pool) {
    243   TrackValueChanged(old_size, new_size, &tracking_group->size_);
    244   switch (tracking_pool) {
    245     case gpu::gles2::MemoryTracker::kManaged:
    246       TrackValueChanged(old_size, new_size, &bytes_allocated_managed_current_);
    247       break;
    248     case gpu::gles2::MemoryTracker::kUnmanaged:
    249       TrackValueChanged(old_size,
    250                         new_size,
    251                         &bytes_allocated_unmanaged_current_);
    252       break;
    253     default:
    254       NOTREACHED();
    255       break;
    256   }
    257   if (new_size != old_size) {
    258     TRACE_COUNTER1("gpu",
    259                    "GpuMemoryUsage",
    260                    GetCurrentUsage());
    261   }
    262 
    263   // If we've gone past our current limit on unmanaged memory, schedule a
    264   // re-manage to take int account the unmanaged memory.
    265   if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_)
    266     ScheduleManage(kScheduleManageNow);
    267   if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_)
    268     ScheduleManage(kScheduleManageLater);
    269 
    270   if (GetCurrentUsage() > bytes_allocated_historical_max_) {
    271       bytes_allocated_historical_max_ = GetCurrentUsage();
    272       // If we're blowing into new memory usage territory, spam the browser
    273       // process with the most up-to-date information about our memory usage.
    274       SendUmaStatsToBrowser();
    275   }
    276 }
    277 
    278 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
    279   // TODO: Check if there is enough space. Lose contexts until there is.
    280   return true;
    281 }
    282 
    283 GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
    284     GpuMemoryManagerClient* client,
    285     bool has_surface,
    286     bool visible) {
    287   TrackingGroupMap::iterator tracking_group_it =
    288       tracking_groups_.find(client->GetMemoryTracker());
    289   DCHECK(tracking_group_it != tracking_groups_.end());
    290   GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second;
    291 
    292   GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState(
    293       this, client, tracking_group, has_surface, visible);
    294   TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated,
    295                     client_state->visible_ ?
    296                         &bytes_allocated_managed_visible_ :
    297                         &bytes_allocated_managed_nonvisible_);
    298   AddClientToList(client_state);
    299   ScheduleManage(kScheduleManageNow);
    300   return client_state;
    301 }
    302 
    303 void GpuMemoryManager::OnDestroyClientState(
    304     GpuMemoryManagerClientState* client_state) {
    305   RemoveClientFromList(client_state);
    306   TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0,
    307                     client_state->visible_ ?
    308                         &bytes_allocated_managed_visible_ :
    309                         &bytes_allocated_managed_nonvisible_);
    310   ScheduleManage(kScheduleManageLater);
    311 }
    312 
    313 void GpuMemoryManager::SetClientStateVisible(
    314     GpuMemoryManagerClientState* client_state, bool visible) {
    315   DCHECK(client_state->has_surface_);
    316   if (client_state->visible_ == visible)
    317     return;
    318 
    319   RemoveClientFromList(client_state);
    320   client_state->visible_ = visible;
    321   AddClientToList(client_state);
    322 
    323   TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0,
    324                     client_state->visible_ ?
    325                         &bytes_allocated_managed_nonvisible_ :
    326                         &bytes_allocated_managed_visible_);
    327   TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated,
    328                     client_state->visible_ ?
    329                         &bytes_allocated_managed_visible_ :
    330                         &bytes_allocated_managed_nonvisible_);
    331   ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
    332 }
    333 
    334 void GpuMemoryManager::SetClientStateManagedMemoryStats(
    335     GpuMemoryManagerClientState* client_state,
    336     const GpuManagedMemoryStats& stats)
    337 {
    338   TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated,
    339                     stats.bytes_allocated,
    340                     client_state->visible_ ?
    341                         &bytes_allocated_managed_visible_ :
    342                         &bytes_allocated_managed_nonvisible_);
    343   client_state->managed_memory_stats_ = stats;
    344 
    345   // If this is the first time that stats have been received for this
    346   // client, use them immediately.
    347   if (!client_state->managed_memory_stats_received_) {
    348     client_state->managed_memory_stats_received_ = true;
    349     ScheduleManage(kScheduleManageNow);
    350     return;
    351   }
    352 
    353   // If these statistics sit outside of the range that we used in our
    354   // computation of memory allocations then recompute the allocations.
    355   if (client_state->managed_memory_stats_.bytes_nice_to_have >
    356       client_state->bytes_nicetohave_limit_high_) {
    357     ScheduleManage(kScheduleManageNow);
    358   } else if (client_state->managed_memory_stats_.bytes_nice_to_have <
    359              client_state->bytes_nicetohave_limit_low_) {
    360     ScheduleManage(kScheduleManageLater);
    361   }
    362 }
    363 
    364 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
    365     base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
    366   GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
    367       pid, memory_tracker, this);
    368   DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
    369   tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
    370                                          tracking_group));
    371   return tracking_group;
    372 }
    373 
    374 void GpuMemoryManager::OnDestroyTrackingGroup(
    375     GpuMemoryTrackingGroup* tracking_group) {
    376   DCHECK(tracking_groups_.count(tracking_group->GetMemoryTracker()));
    377   tracking_groups_.erase(tracking_group->GetMemoryTracker());
    378 }
    379 
    380 void GpuMemoryManager::GetVideoMemoryUsageStats(
    381     GPUVideoMemoryUsageStats* video_memory_usage_stats) const {
    382   // For each context group, assign its memory usage to its PID
    383   video_memory_usage_stats->process_map.clear();
    384   for (TrackingGroupMap::const_iterator i =
    385        tracking_groups_.begin(); i != tracking_groups_.end(); ++i) {
    386     const GpuMemoryTrackingGroup* tracking_group = i->second;
    387     video_memory_usage_stats->process_map[
    388         tracking_group->GetPid()].video_memory += tracking_group->GetSize();
    389   }
    390 
    391   // Assign the total across all processes in the GPU process
    392   video_memory_usage_stats->process_map[
    393       base::GetCurrentProcId()].video_memory = GetCurrentUsage();
    394   video_memory_usage_stats->process_map[
    395       base::GetCurrentProcId()].has_duplicates = true;
    396 
    397   video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
    398   video_memory_usage_stats->bytes_allocated_historical_max =
    399       bytes_allocated_historical_max_;
    400 }
    401 
    402 void GpuMemoryManager::Manage() {
    403   manage_immediate_scheduled_ = false;
    404   delayed_manage_callback_.Cancel();
    405 
    406   // Update the amount of GPU memory available on the system.
    407   UpdateAvailableGpuMemory();
    408 
    409   // Update the limit on unmanaged memory.
    410   UpdateUnmanagedMemoryLimits();
    411 
    412   // Determine which clients are "hibernated" (which determines the
    413   // distribution of frontbuffers and memory among clients that don't have
    414   // surfaces).
    415   SetClientsHibernatedState();
    416 
    417   // Assign memory allocations to clients that have surfaces.
    418   AssignSurfacesAllocations();
    419 
    420   // Assign memory allocations to clients that don't have surfaces.
    421   AssignNonSurfacesAllocations();
    422 
    423   SendUmaStatsToBrowser();
    424 }
    425 
    426 // static
    427 uint64 GpuMemoryManager::ComputeCap(
    428     std::vector<uint64> bytes, uint64 bytes_sum_limit)
    429 {
    430   size_t bytes_size = bytes.size();
    431   uint64 bytes_sum = 0;
    432 
    433   if (bytes_size == 0)
    434     return std::numeric_limits<uint64>::max();
    435 
    436   // Sort and add up all entries
    437   std::sort(bytes.begin(), bytes.end());
    438   for (size_t i = 0; i < bytes_size; ++i)
    439     bytes_sum += bytes[i];
    440 
    441   // As we go through the below loop, let bytes_partial_sum be the
    442   // sum of bytes[0] + ... + bytes[bytes_size - i - 1]
    443   uint64 bytes_partial_sum = bytes_sum;
    444 
    445   // Try using each entry as a cap, and see where we get cut off.
    446   for (size_t i = 0; i < bytes_size; ++i) {
    447     // Try limiting cap to bytes[bytes_size - i - 1]
    448     uint64 test_cap = bytes[bytes_size - i - 1];
    449     uint64 bytes_sum_with_test_cap = i * test_cap + bytes_partial_sum;
    450 
    451     // If that fits, raise test_cap to give an even distribution to the
    452     // last i entries.
    453     if (bytes_sum_with_test_cap <= bytes_sum_limit) {
    454       if (i == 0)
    455         return std::numeric_limits<uint64>::max();
    456       else
    457         return test_cap + (bytes_sum_limit - bytes_sum_with_test_cap) / i;
    458     } else {
    459       bytes_partial_sum -= test_cap;
    460     }
    461   }
    462 
    463   // If we got here, then we can't fully accommodate any of the clients,
    464   // so distribute bytes_sum_limit evenly.
    465   return bytes_sum_limit / bytes_size;
    466 }
    467 
    468 uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible(
    469     GpuMemoryManagerClientState* client_state,
    470     uint64 bytes_above_required_cap,
    471     uint64 bytes_above_minimum_cap,
    472     uint64 bytes_overall_cap) {
    473   GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_;
    474 
    475   if (!client_state->managed_memory_stats_received_)
    476     return GetDefaultClientAllocation();
    477 
    478   uint64 bytes_required = 9 * stats->bytes_required / 8;
    479   bytes_required = std::min(bytes_required, GetMaximumClientAllocation());
    480   bytes_required = std::max(bytes_required, GetMinimumClientAllocation());
    481 
    482   uint64 bytes_nicetohave = 4 * stats->bytes_nice_to_have / 3;
    483   bytes_nicetohave = std::min(bytes_nicetohave, GetMaximumClientAllocation());
    484   bytes_nicetohave = std::max(bytes_nicetohave, GetMinimumClientAllocation());
    485   bytes_nicetohave = std::max(bytes_nicetohave, bytes_required);
    486 
    487   uint64 allocation = GetMinimumClientAllocation();
    488   allocation += std::min(bytes_required - GetMinimumClientAllocation(),
    489                          bytes_above_minimum_cap);
    490   allocation += std::min(bytes_nicetohave - bytes_required,
    491                          bytes_above_required_cap);
    492   allocation = std::min(allocation,
    493                         bytes_overall_cap);
    494   return allocation;
    495 }
    496 
    497 uint64 GpuMemoryManager::ComputeClientAllocationWhenNonvisible(
    498     GpuMemoryManagerClientState* client_state) {
    499 
    500   if (!client_state->managed_memory_stats_received_)
    501     return 0;
    502 
    503   return 9 * client_state->managed_memory_stats_.bytes_required / 8;
    504 }
    505 
    506 void GpuMemoryManager::ComputeVisibleSurfacesAllocations() {
    507   uint64 bytes_available_total = GetAvailableGpuMemory();
    508   uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max();
    509   uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
    510   uint64 bytes_overall_cap_visible = GetMaximumClientAllocation();
    511 
    512   // Compute memory usage at three levels
    513   // - painting everything that is nicetohave for visible clients
    514   // - painting only what that is visible
    515   // - giving every client the minimum allocation
    516   uint64 bytes_nicetohave_visible = 0;
    517   uint64 bytes_required_visible = 0;
    518   uint64 bytes_minimum_visible = 0;
    519   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    520        it != clients_visible_mru_.end();
    521        ++it) {
    522     GpuMemoryManagerClientState* client_state = *it;
    523     client_state->bytes_allocation_ideal_nicetohave_ =
    524         ComputeClientAllocationWhenVisible(
    525             client_state,
    526             bytes_above_required_cap,
    527             bytes_above_minimum_cap,
    528             bytes_overall_cap_visible);
    529     client_state->bytes_allocation_ideal_required_ =
    530         ComputeClientAllocationWhenVisible(
    531             client_state,
    532             0,
    533             bytes_above_minimum_cap,
    534             bytes_overall_cap_visible);
    535     client_state->bytes_allocation_ideal_minimum_ =
    536         ComputeClientAllocationWhenVisible(
    537             client_state,
    538             0,
    539             0,
    540             bytes_overall_cap_visible);
    541 
    542     bytes_nicetohave_visible +=
    543         client_state->bytes_allocation_ideal_nicetohave_;
    544     bytes_required_visible +=
    545         client_state->bytes_allocation_ideal_required_;
    546     bytes_minimum_visible +=
    547         client_state->bytes_allocation_ideal_minimum_;
    548   }
    549 
    550   // Determine which of those three points we can satisfy, and limit
    551   // bytes_above_required_cap and bytes_above_minimum_cap to not go
    552   // over the limit.
    553   if (bytes_minimum_visible > bytes_available_total) {
    554     bytes_above_required_cap = 0;
    555     bytes_above_minimum_cap = 0;
    556   } else if (bytes_required_visible > bytes_available_total) {
    557     std::vector<uint64> bytes_to_fit;
    558     for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    559          it != clients_visible_mru_.end();
    560          ++it) {
    561       GpuMemoryManagerClientState* client_state = *it;
    562       bytes_to_fit.push_back(client_state->bytes_allocation_ideal_required_ -
    563                              client_state->bytes_allocation_ideal_minimum_);
    564     }
    565     bytes_above_required_cap = 0;
    566     bytes_above_minimum_cap = ComputeCap(
    567         bytes_to_fit, bytes_available_total - bytes_minimum_visible);
    568   } else if (bytes_nicetohave_visible > bytes_available_total) {
    569     std::vector<uint64> bytes_to_fit;
    570     for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    571          it != clients_visible_mru_.end();
    572          ++it) {
    573       GpuMemoryManagerClientState* client_state = *it;
    574       bytes_to_fit.push_back(client_state->bytes_allocation_ideal_nicetohave_ -
    575                              client_state->bytes_allocation_ideal_required_);
    576     }
    577     bytes_above_required_cap = ComputeCap(
    578         bytes_to_fit, bytes_available_total - bytes_required_visible);
    579     bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
    580   }
    581 
    582   // Given those computed limits, set the actual memory allocations for the
    583   // visible clients, tracking the largest allocation and the total allocation
    584   // for future use.
    585   uint64 bytes_allocated_visible = 0;
    586   uint64 bytes_allocated_max_client_allocation = 0;
    587   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    588        it != clients_visible_mru_.end();
    589        ++it) {
    590     GpuMemoryManagerClientState* client_state = *it;
    591     client_state->bytes_allocation_when_visible_ =
    592         ComputeClientAllocationWhenVisible(
    593             client_state,
    594             bytes_above_required_cap,
    595             bytes_above_minimum_cap,
    596             bytes_overall_cap_visible);
    597     bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
    598     bytes_allocated_max_client_allocation = std::max(
    599         bytes_allocated_max_client_allocation,
    600         client_state->bytes_allocation_when_visible_);
    601   }
    602 
    603   // Set the limit for nonvisible clients for when they become visible.
    604   // Use the same formula, with a lowered overall cap to in case any of the
    605   // currently-nonvisible clients are much more resource-intensive than any
    606   // of the existing clients.
    607   uint64 bytes_overall_cap_nonvisible = bytes_allocated_max_client_allocation;
    608   if (bytes_available_total > bytes_allocated_visible) {
    609     bytes_overall_cap_nonvisible +=
    610         bytes_available_total - bytes_allocated_visible;
    611   }
    612   bytes_overall_cap_nonvisible = std::min(bytes_overall_cap_nonvisible,
    613                                           GetMaximumClientAllocation());
    614   for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
    615        it != clients_nonvisible_mru_.end();
    616        ++it) {
    617     GpuMemoryManagerClientState* client_state = *it;
    618     client_state->bytes_allocation_when_visible_ =
    619         ComputeClientAllocationWhenVisible(
    620             client_state,
    621             bytes_above_required_cap,
    622             bytes_above_minimum_cap,
    623             bytes_overall_cap_nonvisible);
    624   }
    625 }
    626 
    627 void GpuMemoryManager::ComputeNonvisibleSurfacesAllocations() {
    628   uint64 bytes_allocated_visible = 0;
    629   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    630        it != clients_visible_mru_.end();
    631        ++it) {
    632     GpuMemoryManagerClientState* client_state = *it;
    633     bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
    634   }
    635 
    636   // Allow up to 1/4 of the memory that was available for visible clients to
    637   // go to nonvisible clients.
    638   uint64 bytes_available_total = GetAvailableGpuMemory();
    639   uint64 bytes_available_nonvisible = 0;
    640   uint64 bytes_allocated_nonvisible = 0;
    641   if (bytes_available_total > bytes_allocated_visible) {
    642     bytes_available_nonvisible = std::min(
    643         bytes_available_total / 4,
    644         bytes_available_total - bytes_allocated_visible);
    645   }
    646 
    647   // Clamp the amount of memory available to non-visible clients.
    648   if (!allow_nonvisible_memory_)
    649     bytes_available_nonvisible = 0;
    650 
    651   // Determine which now-visible clients should keep their contents when
    652   // they are made nonvisible.
    653   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    654        it != clients_visible_mru_.end();
    655        ++it) {
    656     GpuMemoryManagerClientState* client_state = *it;
    657 
    658     // Compute the amount of space available have for this renderer when it is
    659     // nonvisible. Do not count this client's allocation while visible against
    660     // the nonvisible clients' allocation total.
    661     uint64 bytes_available_nonvisible_adjusted = std::min(
    662         bytes_available_nonvisible +
    663             client_state->bytes_allocation_when_visible_ / 4,
    664         bytes_available_total / 4);
    665 
    666     // Allow this client to keep its contents if they fit in the allocation.
    667     client_state->bytes_allocation_when_nonvisible_ =
    668         ComputeClientAllocationWhenNonvisible(client_state);
    669     if (client_state->bytes_allocation_when_nonvisible_ >
    670         bytes_available_nonvisible_adjusted)
    671       client_state->bytes_allocation_when_nonvisible_ = 0;
    672   }
    673 
    674   // Compute which currently nonvisible clients should keep their contents.
    675   for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
    676        it != clients_nonvisible_mru_.end();
    677        ++it) {
    678     GpuMemoryManagerClientState* client_state = *it;
    679 
    680     // If this client is nonvisible and has already had its contents discarded,
    681     // don't re-generate the contents until the client becomes visible again.
    682     if (!client_state->bytes_allocation_when_nonvisible_)
    683       continue;
    684 
    685     client_state->bytes_allocation_when_nonvisible_ =
    686         ComputeClientAllocationWhenNonvisible(client_state);
    687 
    688     // Take into account all more recently used nonvisible clients, and only if
    689     // this client still fits, all it to keep its contents.
    690     if (bytes_allocated_nonvisible +
    691         client_state->bytes_allocation_when_nonvisible_ >
    692         bytes_available_nonvisible) {
    693       client_state->bytes_allocation_when_nonvisible_ = 0;
    694     }
    695     bytes_allocated_nonvisible +=
    696         client_state->bytes_allocation_when_nonvisible_;
    697   }
    698 }
    699 
    700 void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() {
    701   uint64 bytes_available_total = GetAvailableGpuMemory();
    702   uint64 bytes_allocated_total = 0;
    703 
    704   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    705        it != clients_visible_mru_.end();
    706        ++it) {
    707     GpuMemoryManagerClientState* client_state = *it;
    708     bytes_allocated_total += client_state->bytes_allocation_when_visible_;
    709   }
    710   for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
    711        it != clients_nonvisible_mru_.end();
    712        ++it) {
    713     GpuMemoryManagerClientState* client_state = *it;
    714     bytes_allocated_total += client_state->bytes_allocation_when_nonvisible_;
    715   }
    716 
    717   if (bytes_allocated_total >= bytes_available_total)
    718     return;
    719 
    720   std::vector<uint64> bytes_extra_requests;
    721   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    722        it != clients_visible_mru_.end();
    723        ++it) {
    724     GpuMemoryManagerClientState* client_state = *it;
    725     CHECK(GetMaximumClientAllocation() >=
    726           client_state->bytes_allocation_when_visible_);
    727     uint64 bytes_extra = GetMaximumClientAllocation() -
    728                          client_state->bytes_allocation_when_visible_;
    729     bytes_extra_requests.push_back(bytes_extra);
    730   }
    731   uint64 bytes_extra_cap = ComputeCap(
    732       bytes_extra_requests, bytes_available_total - bytes_allocated_total);
    733   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    734        it != clients_visible_mru_.end();
    735        ++it) {
    736     GpuMemoryManagerClientState* client_state = *it;
    737     uint64 bytes_extra = GetMaximumClientAllocation() -
    738                          client_state->bytes_allocation_when_visible_;
    739     client_state->bytes_allocation_when_visible_ += std::min(
    740         bytes_extra, bytes_extra_cap);
    741   }
    742 }
    743 
    744 void GpuMemoryManager::AssignSurfacesAllocations() {
    745   // Compute allocation when for all clients.
    746   ComputeVisibleSurfacesAllocations();
    747   ComputeNonvisibleSurfacesAllocations();
    748 
    749   // Distribute the remaining memory to visible clients.
    750   DistributeRemainingMemoryToVisibleSurfaces();
    751 
    752   // Send that allocation to the clients.
    753   ClientStateList clients = clients_visible_mru_;
    754   clients.insert(clients.end(),
    755                  clients_nonvisible_mru_.begin(),
    756                  clients_nonvisible_mru_.end());
    757   for (ClientStateList::const_iterator it = clients.begin();
    758        it != clients.end();
    759        ++it) {
    760     GpuMemoryManagerClientState* client_state = *it;
    761 
    762     // Re-assign memory limits to this client when its "nice to have" bucket
    763     // grows or shrinks by 1/4.
    764     client_state->bytes_nicetohave_limit_high_ =
    765         5 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
    766     client_state->bytes_nicetohave_limit_low_ =
    767         3 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
    768 
    769     // Populate and send the allocation to the client
    770     GpuMemoryAllocation allocation;
    771 
    772     allocation.browser_allocation.suggest_have_frontbuffer =
    773         !client_state->hibernated_;
    774 
    775     allocation.renderer_allocation.bytes_limit_when_visible =
    776         client_state->bytes_allocation_when_visible_;
    777     // Use a more conservative memory allocation policy on Mac because the
    778     // platform is unstable when under memory pressure.
    779     // http://crbug.com/141377
    780     allocation.renderer_allocation.priority_cutoff_when_visible =
    781 #if defined(OS_MACOSX)
    782         GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave;
    783 #else
    784         GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
    785 #endif
    786 
    787     allocation.renderer_allocation.bytes_limit_when_not_visible =
    788         client_state->bytes_allocation_when_nonvisible_;
    789     allocation.renderer_allocation.priority_cutoff_when_not_visible =
    790         GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
    791 
    792     client_state->client_->SetMemoryAllocation(allocation);
    793   }
    794 }
    795 
    796 void GpuMemoryManager::AssignNonSurfacesAllocations() {
    797   for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
    798        it != clients_nonsurface_.end();
    799        ++it) {
    800     GpuMemoryManagerClientState* client_state = *it;
    801     GpuMemoryAllocation allocation;
    802 
    803     if (!client_state->hibernated_) {
    804       allocation.renderer_allocation.bytes_limit_when_visible =
    805           GetMinimumClientAllocation();
    806       allocation.renderer_allocation.priority_cutoff_when_visible =
    807           GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
    808     }
    809 
    810     client_state->client_->SetMemoryAllocation(allocation);
    811   }
    812 }
    813 
    814 void GpuMemoryManager::SetClientsHibernatedState() const {
    815   // Re-set all tracking groups as being hibernated.
    816   for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
    817        it != tracking_groups_.end();
    818        ++it) {
    819     GpuMemoryTrackingGroup* tracking_group = it->second;
    820     tracking_group->hibernated_ = true;
    821   }
    822   // All clients with surfaces that are visible are non-hibernated.
    823   uint64 non_hibernated_clients = 0;
    824   for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
    825        it != clients_visible_mru_.end();
    826        ++it) {
    827     GpuMemoryManagerClientState* client_state = *it;
    828     client_state->hibernated_ = false;
    829     client_state->tracking_group_->hibernated_ = false;
    830     non_hibernated_clients++;
    831   }
    832   // Then an additional few clients with surfaces are non-hibernated too, up to
    833   // a fixed limit.
    834   for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
    835        it != clients_nonvisible_mru_.end();
    836        ++it) {
    837     GpuMemoryManagerClientState* client_state = *it;
    838     if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) {
    839       client_state->hibernated_ = false;
    840       client_state->tracking_group_->hibernated_ = false;
    841       non_hibernated_clients++;
    842     } else {
    843       client_state->hibernated_ = true;
    844     }
    845   }
    846   // Clients that don't have surfaces are non-hibernated if they are
    847   // in a GL share group with a non-hibernated surface.
    848   for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
    849        it != clients_nonsurface_.end();
    850        ++it) {
    851     GpuMemoryManagerClientState* client_state = *it;
    852     client_state->hibernated_ = client_state->tracking_group_->hibernated_;
    853   }
    854 }
    855 
    856 void GpuMemoryManager::SendUmaStatsToBrowser() {
    857   if (!channel_manager_)
    858     return;
    859   GPUMemoryUmaStats params;
    860   params.bytes_allocated_current = GetCurrentUsage();
    861   params.bytes_allocated_max = bytes_allocated_historical_max_;
    862   params.bytes_limit = bytes_available_gpu_memory_;
    863   params.client_count = clients_visible_mru_.size() +
    864                         clients_nonvisible_mru_.size() +
    865                         clients_nonsurface_.size();
    866   params.context_group_count = tracking_groups_.size();
    867   channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
    868 }
    869 
    870 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
    871     GpuMemoryManagerClientState* client_state) {
    872   if (client_state->has_surface_) {
    873     if (client_state->visible_)
    874       return &clients_visible_mru_;
    875     else
    876       return &clients_nonvisible_mru_;
    877   }
    878   return &clients_nonsurface_;
    879 }
    880 
    881 void GpuMemoryManager::AddClientToList(
    882     GpuMemoryManagerClientState* client_state) {
    883   DCHECK(!client_state->list_iterator_valid_);
    884   ClientStateList* client_list = GetClientList(client_state);
    885   client_state->list_iterator_ = client_list->insert(
    886       client_list->begin(), client_state);
    887   client_state->list_iterator_valid_ = true;
    888 }
    889 
    890 void GpuMemoryManager::RemoveClientFromList(
    891     GpuMemoryManagerClientState* client_state) {
    892   DCHECK(client_state->list_iterator_valid_);
    893   ClientStateList* client_list = GetClientList(client_state);
    894   client_list->erase(client_state->list_iterator_);
    895   client_state->list_iterator_valid_ = false;
    896 }
    897 
    898 }  // namespace content
    899