Home | History | Annotate | Download | only in memory
      1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/memory/discardable_memory_android.h"
      6 
      7 #include <sys/mman.h>
      8 #include <sys/resource.h>
      9 #include <sys/time.h>
     10 #include <unistd.h>
     11 
     12 #include <limits>
     13 
     14 #include "base/basictypes.h"
     15 #include "base/compiler_specific.h"
     16 #include "base/file_util.h"
     17 #include "base/lazy_instance.h"
     18 #include "base/logging.h"
     19 #include "base/memory/discardable_memory.h"
     20 #include "base/memory/discardable_memory_allocator_android.h"
     21 #include "base/synchronization/lock.h"
     22 #include "third_party/ashmem/ashmem.h"
     23 
     24 namespace base {
     25 namespace {
     26 
     27 const size_t kPageSize = 4096;
     28 
     29 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator";
     30 
     31 struct GlobalContext {
     32   GlobalContext()
     33       : ashmem_fd_limit(GetSoftFDLimit()),
     34         allocator(kAshmemAllocatorName),
     35         ashmem_fd_count_(0) {
     36   }
     37 
     38   const int ashmem_fd_limit;
     39   internal::DiscardableMemoryAllocator allocator;
     40   Lock lock;
     41 
     42   int ashmem_fd_count() const {
     43     lock.AssertAcquired();
     44     return ashmem_fd_count_;
     45   }
     46 
     47   void decrement_ashmem_fd_count() {
     48     lock.AssertAcquired();
     49     --ashmem_fd_count_;
     50   }
     51 
     52   void increment_ashmem_fd_count() {
     53     lock.AssertAcquired();
     54     ++ashmem_fd_count_;
     55   }
     56 
     57  private:
     58   static int GetSoftFDLimit() {
     59     struct rlimit limit_info;
     60     if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0)
     61       return 128;
     62     // Allow 25% of file descriptor capacity for ashmem.
     63     return limit_info.rlim_cur / 4;
     64   }
     65 
     66   int ashmem_fd_count_;
     67 };
     68 
     69 LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER;
     70 
     71 // This is the default implementation of DiscardableMemory on Android which is
     72 // used when file descriptor usage is under the soft limit. When file descriptor
     73 // usage gets too high the discardable memory allocator is used instead. See
     74 // ShouldUseAllocator() below for more details.
     75 class DiscardableMemoryAndroidSimple : public DiscardableMemory {
     76  public:
     77   DiscardableMemoryAndroidSimple(int fd, void* address, size_t size)
     78       : fd_(fd),
     79         memory_(address),
     80         size_(size) {
     81     DCHECK_GE(fd_, 0);
     82     DCHECK(memory_);
     83   }
     84 
     85   virtual ~DiscardableMemoryAndroidSimple() {
     86     internal::CloseAshmemRegion(fd_, size_, memory_);
     87   }
     88 
     89   // DiscardableMemory:
     90   virtual LockDiscardableMemoryStatus Lock() OVERRIDE {
     91     return internal::LockAshmemRegion(fd_, 0, size_, memory_);
     92   }
     93 
     94   virtual void Unlock() OVERRIDE {
     95     internal::UnlockAshmemRegion(fd_, 0, size_, memory_);
     96   }
     97 
     98   virtual void* Memory() const OVERRIDE {
     99     return memory_;
    100   }
    101 
    102  private:
    103   const int fd_;
    104   void* const memory_;
    105   const size_t size_;
    106 
    107   DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple);
    108 };
    109 
    110 int GetCurrentNumberOfAshmemFDs() {
    111   AutoLock lock(g_context.Get().lock);
    112   return g_context.Get().ashmem_fd_count();
    113 }
    114 
    115 // Returns whether the provided size can be safely page-aligned (without causing
    116 // an overflow).
    117 bool CheckSizeCanBeAlignedToNextPage(size_t size) {
    118   return size <= std::numeric_limits<size_t>::max() - kPageSize + 1;
    119 }
    120 
    121 }  // namespace
    122 
    123 namespace internal {
    124 
    125 size_t AlignToNextPage(size_t size) {
    126   DCHECK_EQ(static_cast<int>(kPageSize), getpagesize());
    127   DCHECK(CheckSizeCanBeAlignedToNextPage(size));
    128   const size_t mask = ~(kPageSize - 1);
    129   return (size + kPageSize - 1) & mask;
    130 }
    131 
    132 bool CreateAshmemRegion(const char* name,
    133                         size_t size,
    134                         int* out_fd,
    135                         void** out_address) {
    136   AutoLock lock(g_context.Get().lock);
    137   if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit)
    138     return false;
    139   int fd = ashmem_create_region(name, size);
    140   if (fd < 0) {
    141     DLOG(ERROR) << "ashmem_create_region() failed";
    142     return false;
    143   }
    144   file_util::ScopedFD fd_closer(&fd);
    145 
    146   const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
    147   if (err < 0) {
    148     DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
    149     return false;
    150   }
    151 
    152   // There is a problem using MAP_PRIVATE here. As we are constantly calling
    153   // Lock() and Unlock(), data could get lost if they are not written to the
    154   // underlying file when Unlock() gets called.
    155   void* const address = mmap(
    156       NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
    157   if (address == MAP_FAILED) {
    158     DPLOG(ERROR) << "Failed to map memory.";
    159     return false;
    160   }
    161 
    162   ignore_result(fd_closer.release());
    163   g_context.Get().increment_ashmem_fd_count();
    164   *out_fd = fd;
    165   *out_address = address;
    166   return true;
    167 }
    168 
    169 bool CloseAshmemRegion(int fd, size_t size, void* address) {
    170   AutoLock lock(g_context.Get().lock);
    171   g_context.Get().decrement_ashmem_fd_count();
    172   if (munmap(address, size) == -1) {
    173     DPLOG(ERROR) << "Failed to unmap memory.";
    174     close(fd);
    175     return false;
    176   }
    177   return close(fd) == 0;
    178 }
    179 
    180 LockDiscardableMemoryStatus LockAshmemRegion(int fd,
    181                                              size_t off,
    182                                              size_t size,
    183                                              const void* address) {
    184   const int result = ashmem_pin_region(fd, off, size);
    185   DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE));
    186   return result == ASHMEM_WAS_PURGED ?
    187       DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS;
    188 }
    189 
    190 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) {
    191   const int failed = ashmem_unpin_region(fd, off, size);
    192   if (failed)
    193     DLOG(ERROR) << "Failed to unpin memory.";
    194   // This allows us to catch accesses to unlocked memory.
    195   DCHECK_EQ(0, mprotect(address, size, PROT_NONE));
    196   return !failed;
    197 }
    198 
    199 }  // namespace internal
    200 
    201 // static
    202 bool DiscardableMemory::SupportedNatively() {
    203   return true;
    204 }
    205 
    206 // Allocation can happen in two ways:
    207 // - Each client-requested allocation is backed by an individual ashmem region.
    208 // This allows deleting ashmem regions individually by closing the ashmem file
    209 // descriptor. This is the default path that is taken when file descriptor usage
    210 // allows us to do so or when the allocation size would require and entire
    211 // ashmem region.
    212 // - Allocations are performed by the global allocator when file descriptor
    213 // usage gets too high. This still allows unpinning but does not allow deleting
    214 // (i.e. releasing the physical pages backing) individual regions.
    215 //
    216 // TODO(pliard): consider tuning the size threshold used below. For instance we
    217 // might want to make it a fraction of kMinAshmemRegionSize and also
    218 // systematically have small allocations go through the allocator to let big
    219 // allocations systematically go through individual ashmem regions.
    220 //
    221 // static
    222 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory(
    223     size_t size) {
    224   if (!CheckSizeCanBeAlignedToNextPage(size))
    225     return scoped_ptr<DiscardableMemory>();
    226   // Pinning & unpinning works with page granularity therefore align the size
    227   // upfront.
    228   const size_t aligned_size = internal::AlignToNextPage(size);
    229   // Note that the following code is slightly racy. The worst that can happen in
    230   // practice though is taking the wrong decision (e.g. using the allocator
    231   // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock
    232   // acquired for the whole allocation would cause a deadlock when the allocator
    233   // tries to create an ashmem region.
    234   const size_t kAllocatorRegionSize =
    235       internal::DiscardableMemoryAllocator::kMinAshmemRegionSize;
    236   GlobalContext* const global_context = g_context.Pointer();
    237   if (aligned_size >= kAllocatorRegionSize ||
    238       GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) {
    239     int fd;
    240     void* address;
    241     if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) {
    242       return scoped_ptr<DiscardableMemory>(
    243           new DiscardableMemoryAndroidSimple(fd, address, aligned_size));
    244     }
    245   }
    246   return global_context->allocator.Allocate(size);
    247 }
    248 
    249 // static
    250 bool DiscardableMemory::PurgeForTestingSupported() {
    251   return false;
    252 }
    253 
    254 // static
    255 void DiscardableMemory::PurgeForTesting() {
    256   NOTIMPLEMENTED();
    257 }
    258 
    259 }  // namespace base
    260