Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_MEM_MAP_H_
     18 #define ART_RUNTIME_MEM_MAP_H_
     19 
     20 #include <stddef.h>
     21 #include <sys/types.h>
     22 
     23 #include <map>
     24 #include <mutex>
     25 #include <string>
     26 
     27 #include "android-base/thread_annotations.h"
     28 
     29 namespace art {
     30 
     31 #if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
     32 #define USE_ART_LOW_4G_ALLOCATOR 1
     33 #else
     34 #if defined(__LP64__) && !defined(__x86_64__)
     35 #error "Unrecognized 64-bit architecture."
     36 #endif
     37 #define USE_ART_LOW_4G_ALLOCATOR 0
     38 #endif
     39 
     40 #ifdef __linux__
     41 static constexpr bool kMadviseZeroes = true;
     42 #define HAVE_MREMAP_SYSCALL true
     43 #else
     44 static constexpr bool kMadviseZeroes = false;
     45 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
     46 // present.
     47 #define HAVE_MREMAP_SYSCALL false
     48 #endif
     49 
     50 // Used to keep track of mmap segments.
     51 //
     52 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
     53 // for free pages. For security, the start of this scan should be randomized. This requires a
     54 // dynamic initializer.
     55 // For this to work, it is paramount that there are no other static initializers that access MemMap.
     56 // Otherwise, calls might see uninitialized values.
     57 class MemMap {
     58  public:
     59   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
     60 
     61   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
     62   // relinquishes ownership of the source mmap.
     63   //
     64   // For the call to be successful:
     65   //   * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
     66   //     [source->Begin(), source->End()].
     67   //   * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
     68   //     with them.
     69   //   * kCanReplaceMapping must be true.
     70   //   * Neither source nor dest may use manual redzones.
     71   //   * Both source and dest must have the same offset from the nearest page boundary.
     72   //   * mremap must succeed when called on the mappings.
     73   //
     74   // If this call succeeds it will return true and:
     75   //   * Deallocate *source
     76   //   * Sets *source to nullptr
     77   //   * The protection of this will remain the same.
     78   //   * The size of this will be the size of the source
     79   //   * The data in this will be the data from source.
     80   //
     81   // If this call fails it will return false and make no changes to *source or this. The ownership
     82   // of the source mmap is returned to the caller.
     83   bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
     84 
     85   // Request an anonymous region of length 'byte_count' and a requested base address.
     86   // Use null as the requested base address if you don't care.
     87   // "reuse" allows re-mapping an address range from an existing mapping.
     88   //
     89   // The word "anonymous" in this context means "not backed by a file". The supplied
     90   // 'name' will be used -- on systems that support it -- to give the mapping
     91   // a name.
     92   //
     93   // On success, returns returns a MemMap instance.  On failure, returns null.
     94   static MemMap* MapAnonymous(const char* name,
     95                               uint8_t* addr,
     96                               size_t byte_count,
     97                               int prot,
     98                               bool low_4gb,
     99                               bool reuse,
    100                               std::string* error_msg,
    101                               bool use_ashmem = true);
    102 
    103   // Create placeholder for a region allocated by direct call to mmap.
    104   // This is useful when we do not have control over the code calling mmap,
    105   // but when we still want to keep track of it in the list.
    106   // The region is not considered to be owned and will not be unmmaped.
    107   static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
    108 
    109   // Map part of a file, taking care of non-page aligned offsets.  The
    110   // "start" offset is absolute, not relative.
    111   //
    112   // On success, returns returns a MemMap instance.  On failure, returns null.
    113   static MemMap* MapFile(size_t byte_count,
    114                          int prot,
    115                          int flags,
    116                          int fd,
    117                          off_t start,
    118                          bool low_4gb,
    119                          const char* filename,
    120                          std::string* error_msg) {
    121     return MapFileAtAddress(nullptr,
    122                             byte_count,
    123                             prot,
    124                             flags,
    125                             fd,
    126                             start,
    127                             /*low_4gb*/low_4gb,
    128                             /*reuse*/false,
    129                             filename,
    130                             error_msg);
    131   }
    132 
    133   // Map part of a file, taking care of non-page aligned offsets.  The "start" offset is absolute,
    134   // not relative. This version allows requesting a specific address for the base of the mapping.
    135   // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
    136   // the memory. If error_msg is null then we do not print /proc/maps to the log if
    137   // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
    138   // printing /proc/maps takes several milliseconds in the worst case.
    139   //
    140   // On success, returns returns a MemMap instance.  On failure, returns null.
    141   static MemMap* MapFileAtAddress(uint8_t* addr,
    142                                   size_t byte_count,
    143                                   int prot,
    144                                   int flags,
    145                                   int fd,
    146                                   off_t start,
    147                                   bool low_4gb,
    148                                   bool reuse,
    149                                   const char* filename,
    150                                   std::string* error_msg);
    151 
    152   // Releases the memory mapping.
    153   ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
    154 
    155   const std::string& GetName() const {
    156     return name_;
    157   }
    158 
    159   bool Sync();
    160 
    161   bool Protect(int prot);
    162 
    163   void MadviseDontNeedAndZero();
    164 
    165   int GetProtect() const {
    166     return prot_;
    167   }
    168 
    169   uint8_t* Begin() const {
    170     return begin_;
    171   }
    172 
    173   size_t Size() const {
    174     return size_;
    175   }
    176 
    177   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
    178   void SetSize(size_t new_size);
    179 
    180   uint8_t* End() const {
    181     return Begin() + Size();
    182   }
    183 
    184   void* BaseBegin() const {
    185     return base_begin_;
    186   }
    187 
    188   size_t BaseSize() const {
    189     return base_size_;
    190   }
    191 
    192   void* BaseEnd() const {
    193     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
    194   }
    195 
    196   bool HasAddress(const void* addr) const {
    197     return Begin() <= addr && addr < End();
    198   }
    199 
    200   // Unmap the pages at end and remap them to create another memory map.
    201   MemMap* RemapAtEnd(uint8_t* new_end,
    202                      const char* tail_name,
    203                      int tail_prot,
    204                      std::string* error_msg,
    205                      bool use_ashmem = true);
    206 
    207   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
    208       REQUIRES(!MemMap::mem_maps_lock_);
    209   static void DumpMaps(std::ostream& os, bool terse = false)
    210       REQUIRES(!MemMap::mem_maps_lock_);
    211 
    212   // Init and Shutdown are NOT thread safe.
    213   // Both may be called multiple times and MemMap objects may be created any
    214   // time after the first call to Init and before the first call to Shutodwn.
    215   static void Init() REQUIRES(!MemMap::mem_maps_lock_);
    216   static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
    217 
    218   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
    219   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
    220   // intermittently.
    221   void TryReadable();
    222 
    223   // Align the map by unmapping the unaligned parts at the lower and the higher ends.
    224   void AlignBy(size_t size);
    225 
    226   // For annotation reasons.
    227   static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
    228     return nullptr;
    229   }
    230 
    231  private:
    232   MemMap(const std::string& name,
    233          uint8_t* begin,
    234          size_t size,
    235          void* base_begin,
    236          size_t base_size,
    237          int prot,
    238          bool reuse,
    239          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
    240 
    241   static void DumpMapsLocked(std::ostream& os, bool terse)
    242       REQUIRES(MemMap::mem_maps_lock_);
    243   static bool HasMemMap(MemMap* map)
    244       REQUIRES(MemMap::mem_maps_lock_);
    245   static MemMap* GetLargestMemMapAt(void* address)
    246       REQUIRES(MemMap::mem_maps_lock_);
    247   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
    248       REQUIRES(!MemMap::mem_maps_lock_);
    249 
    250   // Internal version of mmap that supports low 4gb emulation.
    251   static void* MapInternal(void* addr,
    252                            size_t length,
    253                            int prot,
    254                            int flags,
    255                            int fd,
    256                            off_t offset,
    257                            bool low_4gb)
    258       REQUIRES(!MemMap::mem_maps_lock_);
    259   static void* MapInternalArtLow4GBAllocator(size_t length,
    260                                              int prot,
    261                                              int flags,
    262                                              int fd,
    263                                              off_t offset)
    264       REQUIRES(!MemMap::mem_maps_lock_);
    265 
    266   const std::string name_;
    267   uint8_t* begin_;  // Start of data. May be changed by AlignBy.
    268   size_t size_;  // Length of data.
    269 
    270   void* base_begin_;  // Page-aligned base address. May be changed by AlignBy.
    271   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
    272   int prot_;  // Protection of the map.
    273 
    274   // When reuse_ is true, this is just a view of an existing mapping
    275   // and we do not take ownership and are not responsible for
    276   // unmapping.
    277   const bool reuse_;
    278 
    279   // When already_unmapped_ is true the destructor will not call munmap.
    280   bool already_unmapped_;
    281 
    282   const size_t redzone_size_;
    283 
    284 #if USE_ART_LOW_4G_ALLOCATOR
    285   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
    286 #endif
    287 
    288   static std::mutex* mem_maps_lock_;
    289 
    290   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
    291 };
    292 
    293 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
    294 
    295 // Zero and release pages if possible, no requirements on alignments.
    296 void ZeroAndReleasePages(void* address, size_t length);
    297 
    298 }  // namespace art
    299 
    300 #endif  // ART_RUNTIME_MEM_MAP_H_
    301