Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mem_map.h"
     18 
     19 #include <inttypes.h>
     20 #include <stdlib.h>
     21 #if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
     22 #include <sys/resource.h>
     23 #endif
     24 
     25 #if defined(__linux__)
     26 #include <sys/prctl.h>
     27 #endif
     28 
     29 #include <map>
     30 #include <memory>
     31 #include <sstream>
     32 
     33 #include "android-base/stringprintf.h"
     34 #include "android-base/unique_fd.h"
     35 
     36 #include "allocator.h"
     37 #include "bit_utils.h"
     38 #include "globals.h"
     39 #include "logging.h"  // For VLOG_IS_ON.
     40 #include "memory_tool.h"
     41 #include "mman.h"  // For the PROT_* and MAP_* constants.
     42 #include "utils.h"
     43 
     44 #ifndef MAP_ANONYMOUS
     45 #define MAP_ANONYMOUS MAP_ANON
     46 #endif
     47 
     48 namespace art {
     49 
     50 using android::base::StringPrintf;
     51 using android::base::unique_fd;
     52 
     53 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
     54 using AllocationTrackingMultiMap =
     55     std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
     56 
     57 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
     58 
     59 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
     60 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
     61 
     62 // A map containing unique strings used for indentifying anonymous mappings
     63 static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
     64 
     65 // Retrieve iterator to a `gMaps` entry that is known to exist.
     66 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
     67   DCHECK(map.IsValid());
     68   DCHECK(gMaps != nullptr);
     69   for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
     70        it != end && it->first == map.BaseBegin();
     71        ++it) {
     72     if (it->second == &map) {
     73       return it;
     74     }
     75   }
     76   LOG(FATAL) << "MemMap not found";
     77   UNREACHABLE();
     78 }
     79 
     80 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
     81   os << "MemMap:" << std::endl;
     82   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
     83     void* base = it->first;
     84     MemMap* map = it->second;
     85     CHECK_EQ(base, map->BaseBegin());
     86     os << *map << std::endl;
     87   }
     88   return os;
     89 }
     90 
     91 std::mutex* MemMap::mem_maps_lock_ = nullptr;
     92 
     93 #if USE_ART_LOW_4G_ALLOCATOR
     94 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
     95 
     96 // The regular start of memory allocations. The first 64KB is protected by SELinux.
     97 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
     98 
     99 // Generate random starting position.
    100 // To not interfere with image position, take the image's address and only place it below. Current
    101 // formula (sketch):
    102 //
    103 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
    104 // ----------------------------------------
    105 //                    = 0000111111111111111
    106 // & ~(kPageSize - 1) =~0000000000000001111
    107 // ----------------------------------------
    108 // mask               = 0000111111111110000
    109 // & random data      = YYYYYYYYYYYYYYYYYYY
    110 // -----------------------------------
    111 // tmp                = 0000YYYYYYYYYYY0000
    112 // + LOW_MEM_START    = 0000000000001000000
    113 // --------------------------------------
    114 // start
    115 //
    116 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
    117 // do not have Bionic, simply start with LOW_MEM_START.
    118 
    119 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
    120 #ifdef __BIONIC__
    121 uintptr_t CreateStartPos(uint64_t input) {
    122   CHECK_NE(0, ART_BASE_ADDRESS);
    123 
    124   // Start with all bits below highest bit in ART_BASE_ADDRESS.
    125   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
    126   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
    127 
    128   // Lowest (usually 12) bits are not used, as aligned by page size.
    129   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
    130 
    131   // Mask input data.
    132   return (input & mask) + LOW_MEM_START;
    133 }
    134 #endif
    135 
    136 static uintptr_t GenerateNextMemPos() {
    137 #ifdef __BIONIC__
    138   uint64_t random_data;
    139   arc4random_buf(&random_data, sizeof(random_data));
    140   return CreateStartPos(random_data);
    141 #else
    142   // No arc4random on host, see above.
    143   return LOW_MEM_START;
    144 #endif
    145 }
    146 
    147 // Initialize linear scan to random position.
    148 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
    149 #endif
    150 
    151 // Return true if the address range is contained in a single memory map by either reading
    152 // the gMaps variable or the /proc/self/map entry.
    153 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
    154   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
    155   uintptr_t end = begin + size;
    156 
    157   {
    158     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    159     for (auto& pair : *gMaps) {
    160       MemMap* const map = pair.second;
    161       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
    162           end <= reinterpret_cast<uintptr_t>(map->End())) {
    163         return true;
    164       }
    165     }
    166   }
    167 
    168   if (error_msg != nullptr) {
    169     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
    170     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
    171                               "any existing map. See process maps in the log.", begin, end);
    172   }
    173   return false;
    174 }
    175 
    176 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
    177 // the expected value, calling munmap if validation fails, giving the
    178 // reason in error_msg.
    179 //
    180 // If the expected_ptr is null, nothing is checked beyond the fact
    181 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
    182 // non-null, we check that pointer is the actual_ptr == expected_ptr,
    183 // and if not, report in error_msg what the conflict mapping was if
    184 // found, or a generic error in other cases.
    185 bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
    186                             std::string* error_msg) {
    187   // Handled first by caller for more specific error messages.
    188   CHECK(actual_ptr != MAP_FAILED);
    189 
    190   if (expected_ptr == nullptr) {
    191     return true;
    192   }
    193 
    194   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
    195   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
    196 
    197   if (expected_ptr == actual_ptr) {
    198     return true;
    199   }
    200 
    201   // We asked for an address but didn't get what we wanted, all paths below here should fail.
    202   int result = TargetMUnmap(actual_ptr, byte_count);
    203   if (result == -1) {
    204     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
    205   }
    206 
    207   if (error_msg != nullptr) {
    208     // We call this here so that we can try and generate a full error
    209     // message with the overlapping mapping. There's no guarantee that
    210     // that there will be an overlap though, since
    211     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
    212     //   true, even if there is no overlap
    213     // - There might have been an overlap at the point of mmap, but the
    214     //   overlapping region has since been unmapped.
    215 
    216     // Tell the client the mappings that were in place at the time.
    217     if (kIsDebugBuild) {
    218       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    219     }
    220 
    221     std::ostringstream os;
    222     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
    223                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
    224                         actual, expected);
    225     *error_msg = os.str();
    226   }
    227   return false;
    228 }
    229 
    230 bool MemMap::CheckReservation(uint8_t* expected_ptr,
    231                               size_t byte_count,
    232                               const char* name,
    233                               const MemMap& reservation,
    234                               /*out*/std::string* error_msg) {
    235   if (!reservation.IsValid()) {
    236     *error_msg = StringPrintf("Invalid reservation for %s", name);
    237     return false;
    238   }
    239   DCHECK_ALIGNED(reservation.Begin(), kPageSize);
    240   if (reservation.Begin() != expected_ptr) {
    241     *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
    242                               name,
    243                               reservation.Begin(),
    244                               expected_ptr);
    245     return false;
    246   }
    247   if (byte_count > reservation.Size()) {
    248     *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
    249                               byte_count,
    250                               reservation.Size());
    251     return false;
    252   }
    253   return true;
    254 }
    255 
    256 
    257 #if USE_ART_LOW_4G_ALLOCATOR
    258 void* MemMap::TryMemMapLow4GB(void* ptr,
    259                                     size_t page_aligned_byte_count,
    260                                     int prot,
    261                                     int flags,
    262                                     int fd,
    263                                     off_t offset) {
    264   void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
    265   if (actual != MAP_FAILED) {
    266     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
    267     // 4GB. If this is the case, unmap and retry.
    268     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
    269       TargetMUnmap(actual, page_aligned_byte_count);
    270       actual = MAP_FAILED;
    271     }
    272   }
    273   return actual;
    274 }
    275 #endif
    276 
    277 void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
    278   // Debug naming is only used for Android target builds. For Linux targets,
    279   // we'll still call prctl but it wont do anything till we upstream the prctl.
    280   if (kIsTargetFuchsia || !kIsTargetBuild) {
    281     return;
    282   }
    283 
    284   // lock as std::map is not thread-safe
    285   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    286 
    287   std::string debug_friendly_name("dalvik-");
    288   debug_friendly_name += name;
    289   auto it = debugStrMap.find(debug_friendly_name);
    290 
    291   if (it == debugStrMap.end()) {
    292     it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
    293   }
    294 
    295   DCHECK(it != debugStrMap.end());
    296 #if defined(PR_SET_VMA) && defined(__linux__)
    297   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
    298 #else
    299   // Prevent variable unused compiler errors.
    300   UNUSED(map_ptr, size);
    301 #endif
    302 }
    303 
    304 MemMap MemMap::MapAnonymous(const char* name,
    305                             uint8_t* addr,
    306                             size_t byte_count,
    307                             int prot,
    308                             bool low_4gb,
    309                             bool reuse,
    310                             /*inout*/MemMap* reservation,
    311                             /*out*/std::string* error_msg,
    312                             bool use_debug_name) {
    313 #ifndef __LP64__
    314   UNUSED(low_4gb);
    315 #endif
    316   if (byte_count == 0) {
    317     *error_msg = "Empty MemMap requested.";
    318     return Invalid();
    319   }
    320   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    321 
    322   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    323   if (reuse) {
    324     // reuse means it is okay that it overlaps an existing page mapping.
    325     // Only use this if you actually made the page reservation yourself.
    326     CHECK(addr != nullptr);
    327     DCHECK(reservation == nullptr);
    328 
    329     DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
    330     flags |= MAP_FIXED;
    331   } else if (reservation != nullptr) {
    332     CHECK(addr != nullptr);
    333     if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
    334       return MemMap::Invalid();
    335     }
    336     flags |= MAP_FIXED;
    337   }
    338 
    339   unique_fd fd;
    340 
    341   // We need to store and potentially set an error number for pretty printing of errors
    342   int saved_errno = 0;
    343 
    344   void* actual = MapInternal(addr,
    345                              page_aligned_byte_count,
    346                              prot,
    347                              flags,
    348                              fd.get(),
    349                              0,
    350                              low_4gb);
    351   saved_errno = errno;
    352 
    353   if (actual == MAP_FAILED) {
    354     if (error_msg != nullptr) {
    355       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
    356         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    357       }
    358 
    359       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
    360                                     "See process maps in the log.",
    361                                 addr,
    362                                 page_aligned_byte_count,
    363                                 prot,
    364                                 flags,
    365                                 fd.get(),
    366                                 strerror(saved_errno));
    367     }
    368     return Invalid();
    369   }
    370   if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
    371     return Invalid();
    372   }
    373 
    374   if (use_debug_name) {
    375     SetDebugName(actual, name, page_aligned_byte_count);
    376   }
    377 
    378   if (reservation != nullptr) {
    379     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
    380     DCHECK_EQ(actual, reservation->Begin());
    381     reservation->ReleaseReservedMemory(byte_count);
    382   }
    383   return MemMap(name,
    384                 reinterpret_cast<uint8_t*>(actual),
    385                 byte_count,
    386                 actual,
    387                 page_aligned_byte_count,
    388                 prot,
    389                 reuse);
    390 }
    391 
    392 MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
    393   if (byte_count == 0) {
    394     return Invalid();
    395   }
    396   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    397   return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
    398 }
    399 
    400 template<typename A, typename B>
    401 static ptrdiff_t PointerDiff(A* a, B* b) {
    402   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
    403 }
    404 
    405 bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
    406 #if !HAVE_MREMAP_SYSCALL
    407   UNUSED(source);
    408   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
    409   return false;
    410 #else  // !HAVE_MREMAP_SYSCALL
    411   CHECK(source != nullptr);
    412   CHECK(source->IsValid());
    413   if (!MemMap::kCanReplaceMapping) {
    414     *error = "Unable to perform atomic replace due to runtime environment!";
    415     return false;
    416   }
    417   // neither can be reuse.
    418   if (source->reuse_ || reuse_) {
    419     *error = "One or both mappings is not a real mmap!";
    420     return false;
    421   }
    422   // TODO Support redzones.
    423   if (source->redzone_size_ != 0 || redzone_size_ != 0) {
    424     *error = "source and dest have different redzone sizes";
    425     return false;
    426   }
    427   // Make sure they have the same offset from the actual mmap'd address
    428   if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
    429     *error =
    430         "source starts at a different offset from the mmap. Cannot atomically replace mappings";
    431     return false;
    432   }
    433   // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
    434   // memcpy but the check is explicit and actually done).
    435   if (source->BaseBegin() > BaseBegin() &&
    436       reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
    437       reinterpret_cast<uint8_t*>(source->BaseBegin())) {
    438     *error = "destination memory pages overlap with source memory pages";
    439     return false;
    440   }
    441   // Change the protection to match the new location.
    442   int old_prot = source->GetProtect();
    443   if (!source->Protect(GetProtect())) {
    444     *error = "Could not change protections for source to those required for dest.";
    445     return false;
    446   }
    447 
    448   // Do the mremap.
    449   void* res = mremap(/*old_address*/source->BaseBegin(),
    450                      /*old_size*/source->BaseSize(),
    451                      /*new_size*/source->BaseSize(),
    452                      /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
    453                      /*new_address*/BaseBegin());
    454   if (res == MAP_FAILED) {
    455     int saved_errno = errno;
    456     // Wasn't able to move mapping. Change the protection of source back to the original one and
    457     // return.
    458     source->Protect(old_prot);
    459     *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
    460     return false;
    461   }
    462   CHECK(res == BaseBegin());
    463 
    464   // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
    465   // them later.
    466   size_t new_base_size = std::max(source->base_size_, base_size_);
    467 
    468   // Invalidate *source, don't unmap it though since it is already gone.
    469   size_t source_size = source->size_;
    470   source->Invalidate();
    471 
    472   size_ = source_size;
    473   base_size_ = new_base_size;
    474   // Reduce base_size if needed (this will unmap the extra pages).
    475   SetSize(source_size);
    476 
    477   return true;
    478 #endif  // !HAVE_MREMAP_SYSCALL
    479 }
    480 
    481 MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
    482                                 size_t byte_count,
    483                                 int prot,
    484                                 int flags,
    485                                 int fd,
    486                                 off_t start,
    487                                 bool low_4gb,
    488                                 const char* filename,
    489                                 bool reuse,
    490                                 /*inout*/MemMap* reservation,
    491                                 /*out*/std::string* error_msg) {
    492   CHECK_NE(0, prot);
    493   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
    494 
    495   // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
    496   // reservation, i.e we expect this mapping to be contained within an existing map.
    497   if (reuse) {
    498     // reuse means it is okay that it overlaps an existing page mapping.
    499     // Only use this if you actually made the page reservation yourself.
    500     CHECK(expected_ptr != nullptr);
    501     DCHECK(reservation == nullptr);
    502     DCHECK(error_msg != nullptr);
    503     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
    504         << ((error_msg != nullptr) ? *error_msg : std::string());
    505     flags |= MAP_FIXED;
    506   } else if (reservation != nullptr) {
    507     DCHECK(error_msg != nullptr);
    508     if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
    509       return Invalid();
    510     }
    511     flags |= MAP_FIXED;
    512   } else {
    513     CHECK_EQ(0, flags & MAP_FIXED);
    514     // Don't bother checking for an overlapping region here. We'll
    515     // check this if required after the fact inside CheckMapRequest.
    516   }
    517 
    518   if (byte_count == 0) {
    519     *error_msg = "Empty MemMap requested";
    520     return Invalid();
    521   }
    522   // Adjust 'offset' to be page-aligned as required by mmap.
    523   int page_offset = start % kPageSize;
    524   off_t page_aligned_offset = start - page_offset;
    525   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
    526   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
    527   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
    528   // not necessarily to virtual memory. mmap will page align 'expected' for us.
    529   uint8_t* page_aligned_expected =
    530       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
    531 
    532   size_t redzone_size = 0;
    533   if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
    534     redzone_size = kPageSize;
    535     page_aligned_byte_count += redzone_size;
    536   }
    537 
    538   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
    539                                                            page_aligned_byte_count,
    540                                                            prot,
    541                                                            flags,
    542                                                            fd,
    543                                                            page_aligned_offset,
    544                                                            low_4gb));
    545   if (actual == MAP_FAILED) {
    546     if (error_msg != nullptr) {
    547       auto saved_errno = errno;
    548 
    549       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
    550         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    551       }
    552 
    553       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
    554                                 ") of file '%s' failed: %s. See process maps in the log.",
    555                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
    556                                 static_cast<int64_t>(page_aligned_offset), filename,
    557                                 strerror(saved_errno));
    558     }
    559     return Invalid();
    560   }
    561   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    562     return Invalid();
    563   }
    564   if (redzone_size != 0) {
    565     const uint8_t *real_start = actual + page_offset;
    566     const uint8_t *real_end = actual + page_offset + byte_count;
    567     const uint8_t *mapping_end = actual + page_aligned_byte_count;
    568 
    569     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
    570     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
    571     page_aligned_byte_count -= redzone_size;
    572   }
    573 
    574   if (reservation != nullptr) {
    575     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
    576     DCHECK_EQ(actual, reservation->Begin());
    577     reservation->ReleaseReservedMemory(byte_count);
    578   }
    579   return MemMap(filename,
    580                 actual + page_offset,
    581                 byte_count,
    582                 actual,
    583                 page_aligned_byte_count,
    584                 prot,
    585                 reuse,
    586                 redzone_size);
    587 }
    588 
    589 MemMap::MemMap(MemMap&& other) noexcept
    590     : MemMap() {
    591   swap(other);
    592 }
    593 
    594 MemMap::~MemMap() {
    595   Reset();
    596 }
    597 
    598 void MemMap::DoReset() {
    599   DCHECK(IsValid());
    600 
    601   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
    602   // before it is returned to the system.
    603   if (redzone_size_ != 0) {
    604     MEMORY_TOOL_MAKE_UNDEFINED(
    605         reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
    606         redzone_size_);
    607   }
    608 
    609   if (!reuse_) {
    610     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
    611     if (!already_unmapped_) {
    612       int result = TargetMUnmap(base_begin_, base_size_);
    613       if (result == -1) {
    614         PLOG(FATAL) << "munmap failed";
    615       }
    616     }
    617   }
    618 
    619   Invalidate();
    620 }
    621 
    622 void MemMap::Invalidate() {
    623   DCHECK(IsValid());
    624 
    625   // Remove it from gMaps.
    626   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    627   auto it = GetGMapsEntry(*this);
    628   gMaps->erase(it);
    629 
    630   // Mark it as invalid.
    631   base_size_ = 0u;
    632   DCHECK(!IsValid());
    633 }
    634 
    635 void MemMap::swap(MemMap& other) {
    636   if (IsValid() || other.IsValid()) {
    637     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    638     DCHECK(gMaps != nullptr);
    639     auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
    640     auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
    641     if (IsValid()) {
    642       DCHECK(this_it != gMaps->end());
    643       DCHECK_EQ(this_it->second, this);
    644       this_it->second = &other;
    645     }
    646     if (other.IsValid()) {
    647       DCHECK(other_it != gMaps->end());
    648       DCHECK_EQ(other_it->second, &other);
    649       other_it->second = this;
    650     }
    651     // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
    652     // with the `gMaps` key when other threads try to use `gMaps`.
    653     SwapMembers(other);
    654   } else {
    655     SwapMembers(other);
    656   }
    657 }
    658 
    659 void MemMap::SwapMembers(MemMap& other) {
    660   name_.swap(other.name_);
    661   std::swap(begin_, other.begin_);
    662   std::swap(size_, other.size_);
    663   std::swap(base_begin_, other.base_begin_);
    664   std::swap(base_size_, other.base_size_);
    665   std::swap(prot_, other.prot_);
    666   std::swap(reuse_, other.reuse_);
    667   std::swap(already_unmapped_, other.already_unmapped_);
    668   std::swap(redzone_size_, other.redzone_size_);
    669 }
    670 
    671 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
    672                size_t base_size, int prot, bool reuse, size_t redzone_size)
    673     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
    674       prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
    675   if (size_ == 0) {
    676     CHECK(begin_ == nullptr);
    677     CHECK(base_begin_ == nullptr);
    678     CHECK_EQ(base_size_, 0U);
    679   } else {
    680     CHECK(begin_ != nullptr);
    681     CHECK(base_begin_ != nullptr);
    682     CHECK_NE(base_size_, 0U);
    683 
    684     // Add it to gMaps.
    685     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    686     DCHECK(gMaps != nullptr);
    687     gMaps->insert(std::make_pair(base_begin_, this));
    688   }
    689 }
    690 
    691 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
    692                           const char* tail_name,
    693                           int tail_prot,
    694                           std::string* error_msg,
    695                           bool use_debug_name) {
    696   return RemapAtEnd(new_end,
    697                     tail_name,
    698                     tail_prot,
    699                     MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
    700                     /* fd= */ -1,
    701                     /* offset= */ 0,
    702                     error_msg,
    703                     use_debug_name);
    704 }
    705 
    706 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
    707                           const char* tail_name,
    708                           int tail_prot,
    709                           int flags,
    710                           int fd,
    711                           off_t offset,
    712                           std::string* error_msg,
    713                           bool use_debug_name) {
    714   DCHECK_GE(new_end, Begin());
    715   DCHECK_LE(new_end, End());
    716   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
    717   DCHECK_ALIGNED(begin_, kPageSize);
    718   DCHECK_ALIGNED(base_begin_, kPageSize);
    719   DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
    720   DCHECK_ALIGNED(new_end, kPageSize);
    721   uint8_t* old_end = begin_ + size_;
    722   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
    723   uint8_t* new_base_end = new_end;
    724   DCHECK_LE(new_base_end, old_base_end);
    725   if (new_base_end == old_base_end) {
    726     return Invalid();
    727   }
    728   size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
    729   size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
    730   DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
    731   size_t tail_size = old_end - new_end;
    732   uint8_t* tail_base_begin = new_base_end;
    733   size_t tail_base_size = old_base_end - new_base_end;
    734   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
    735   DCHECK_ALIGNED(tail_base_size, kPageSize);
    736 
    737   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
    738   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
    739   // removes old mappings for the overlapping region. This makes the operation atomic
    740   // and prevents other threads from racing to allocate memory in the requested region.
    741   uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
    742                                                           tail_base_size,
    743                                                           tail_prot,
    744                                                           flags,
    745                                                           fd,
    746                                                           offset));
    747   if (actual == MAP_FAILED) {
    748     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    749     *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
    750                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
    751                               fd);
    752     return Invalid();
    753   }
    754   // Update *this.
    755   if (new_base_size == 0u) {
    756     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    757     auto it = GetGMapsEntry(*this);
    758     gMaps->erase(it);
    759   }
    760 
    761   if (use_debug_name) {
    762     SetDebugName(actual, tail_name, tail_base_size);
    763   }
    764 
    765   size_ = new_size;
    766   base_size_ = new_base_size;
    767   // Return the new mapping.
    768   return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
    769 }
    770 
    771 MemMap MemMap::TakeReservedMemory(size_t byte_count) {
    772   uint8_t* begin = Begin();
    773   ReleaseReservedMemory(byte_count);  // Performs necessary DCHECK()s on this reservation.
    774   size_t base_size = RoundUp(byte_count, kPageSize);
    775   return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse= */ false);
    776 }
    777 
    778 void MemMap::ReleaseReservedMemory(size_t byte_count) {
    779   // Check the reservation mapping.
    780   DCHECK(IsValid());
    781   DCHECK(!reuse_);
    782   DCHECK(!already_unmapped_);
    783   DCHECK_EQ(redzone_size_, 0u);
    784   DCHECK_EQ(begin_, base_begin_);
    785   DCHECK_EQ(size_, base_size_);
    786   DCHECK_ALIGNED(begin_, kPageSize);
    787   DCHECK_ALIGNED(size_, kPageSize);
    788 
    789   // Check and round up the `byte_count`.
    790   DCHECK_NE(byte_count, 0u);
    791   DCHECK_LE(byte_count, size_);
    792   byte_count = RoundUp(byte_count, kPageSize);
    793 
    794   if (byte_count == size_) {
    795     Invalidate();
    796   } else {
    797     // Shrink the reservation MemMap and update its `gMaps` entry.
    798     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    799     auto it = GetGMapsEntry(*this);
    800     auto node = gMaps->extract(it);
    801     begin_ += byte_count;
    802     size_ -= byte_count;
    803     base_begin_ = begin_;
    804     base_size_ = size_;
    805     node.key() = base_begin_;
    806     gMaps->insert(std::move(node));
    807   }
    808 }
    809 
    810 void MemMap::MadviseDontNeedAndZero() {
    811   if (base_begin_ != nullptr || base_size_ != 0) {
    812     if (!kMadviseZeroes) {
    813       memset(base_begin_, 0, base_size_);
    814     }
    815 #ifdef _WIN32
    816     // It is benign not to madvise away the pages here.
    817     PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
    818 #else
    819     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
    820     if (result == -1) {
    821       PLOG(WARNING) << "madvise failed";
    822     }
    823 #endif
    824   }
    825 }
    826 
    827 bool MemMap::Sync() {
    828 #ifdef _WIN32
    829   // TODO: add FlushViewOfFile support.
    830   PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
    831   return false;
    832 #else
    833   // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
    834   // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
    835   // only accepts page-aligned base address, and excludes the higher-end noaccess protection
    836   // from the msync range. b/27552451.
    837   return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
    838 #endif
    839 }
    840 
    841 bool MemMap::Protect(int prot) {
    842   if (base_begin_ == nullptr && base_size_ == 0) {
    843     prot_ = prot;
    844     return true;
    845   }
    846 
    847 #ifndef _WIN32
    848   if (mprotect(base_begin_, base_size_, prot) == 0) {
    849     prot_ = prot;
    850     return true;
    851   }
    852 #endif
    853 
    854   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
    855               << prot << ") failed";
    856   return false;
    857 }
    858 
    859 bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
    860   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    861   CHECK(begin_map.IsValid());
    862   CHECK(end_map.IsValid());
    863   CHECK(HasMemMap(begin_map));
    864   CHECK(HasMemMap(end_map));
    865   CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
    866   MemMap* map = &begin_map;
    867   while (map->BaseBegin() != end_map.BaseBegin()) {
    868     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
    869     if (next_map == nullptr) {
    870       // Found a gap.
    871       return false;
    872     }
    873     map = next_map;
    874   }
    875   return true;
    876 }
    877 
    878 void MemMap::DumpMaps(std::ostream& os, bool terse) {
    879   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    880   DumpMapsLocked(os, terse);
    881 }
    882 
    883 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
    884   const auto& mem_maps = *gMaps;
    885   if (!terse) {
    886     os << mem_maps;
    887     return;
    888   }
    889 
    890   // Terse output example:
    891   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
    892   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
    893   // The details:
    894   //   "+0x20P" means 0x20 pages taken by a single mapping,
    895   //   "~0x11dP" means a gap of 0x11d pages,
    896   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
    897   os << "MemMap:" << std::endl;
    898   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
    899     MemMap* map = it->second;
    900     void* base = it->first;
    901     CHECK_EQ(base, map->BaseBegin());
    902     os << "[MemMap: " << base;
    903     ++it;
    904     // Merge consecutive maps with the same protect flags and name.
    905     constexpr size_t kMaxGaps = 9;
    906     size_t num_gaps = 0;
    907     size_t num = 1u;
    908     size_t size = map->BaseSize();
    909     CHECK_ALIGNED(size, kPageSize);
    910     void* end = map->BaseEnd();
    911     while (it != maps_end &&
    912         it->second->GetProtect() == map->GetProtect() &&
    913         it->second->GetName() == map->GetName() &&
    914         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
    915       if (it->second->BaseBegin() != end) {
    916         ++num_gaps;
    917         os << "+0x" << std::hex << (size / kPageSize) << "P";
    918         if (num != 1u) {
    919           os << "(" << std::dec << num << ")";
    920         }
    921         size_t gap =
    922             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
    923         CHECK_ALIGNED(gap, kPageSize);
    924         os << "~0x" << std::hex << (gap / kPageSize) << "P";
    925         num = 0u;
    926         size = 0u;
    927       }
    928       CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
    929       ++num;
    930       size += it->second->BaseSize();
    931       end = it->second->BaseEnd();
    932       ++it;
    933     }
    934     os << "+0x" << std::hex << (size / kPageSize) << "P";
    935     if (num != 1u) {
    936       os << "(" << std::dec << num << ")";
    937     }
    938     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
    939   }
    940 }
    941 
    942 bool MemMap::HasMemMap(MemMap& map) {
    943   void* base_begin = map.BaseBegin();
    944   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
    945        it != end && it->first == base_begin; ++it) {
    946     if (it->second == &map) {
    947       return true;
    948     }
    949   }
    950   return false;
    951 }
    952 
    953 MemMap* MemMap::GetLargestMemMapAt(void* address) {
    954   size_t largest_size = 0;
    955   MemMap* largest_map = nullptr;
    956   DCHECK(gMaps != nullptr);
    957   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
    958        it != end && it->first == address; ++it) {
    959     MemMap* map = it->second;
    960     CHECK(map != nullptr);
    961     if (largest_size < map->BaseSize()) {
    962       largest_size = map->BaseSize();
    963       largest_map = map;
    964     }
    965   }
    966   return largest_map;
    967 }
    968 
    969 void MemMap::Init() {
    970   if (mem_maps_lock_ != nullptr) {
    971     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
    972     return;
    973   }
    974   mem_maps_lock_ = new std::mutex();
    975   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
    976   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    977   DCHECK(gMaps == nullptr);
    978   gMaps = new Maps;
    979 
    980   TargetMMapInit();
    981 }
    982 
    983 void MemMap::Shutdown() {
    984   if (mem_maps_lock_ == nullptr) {
    985     // If MemMap::Shutdown is called more than once, there is no effect.
    986     return;
    987   }
    988   {
    989     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
    990     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
    991     DCHECK(gMaps != nullptr);
    992     delete gMaps;
    993     gMaps = nullptr;
    994   }
    995   delete mem_maps_lock_;
    996   mem_maps_lock_ = nullptr;
    997 }
    998 
    999 void MemMap::SetSize(size_t new_size) {
   1000   CHECK_LE(new_size, size_);
   1001   size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
   1002                                  kPageSize);
   1003   if (new_base_size == base_size_) {
   1004     size_ = new_size;
   1005     return;
   1006   }
   1007   CHECK_LT(new_base_size, base_size_);
   1008   MEMORY_TOOL_MAKE_UNDEFINED(
   1009       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
   1010                               new_base_size),
   1011       base_size_ - new_base_size);
   1012   CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
   1013                         reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
   1014                         base_size_ - new_base_size), 0)
   1015                         << new_base_size << " " << base_size_;
   1016   base_size_ = new_base_size;
   1017   size_ = new_size;
   1018 }
   1019 
   1020 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
   1021                                             int prot,
   1022                                             int flags,
   1023                                             int fd,
   1024                                             off_t offset) {
   1025 #if USE_ART_LOW_4G_ALLOCATOR
   1026   void* actual = MAP_FAILED;
   1027 
   1028   bool first_run = true;
   1029 
   1030   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   1031   for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
   1032     // Use gMaps as an optimization to skip over large maps.
   1033     // Find the first map which is address > ptr.
   1034     auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
   1035     if (it != gMaps->begin()) {
   1036       auto before_it = it;
   1037       --before_it;
   1038       // Start at the end of the map before the upper bound.
   1039       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
   1040       CHECK_ALIGNED(ptr, kPageSize);
   1041     }
   1042     while (it != gMaps->end()) {
   1043       // How much space do we have until the next map?
   1044       size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
   1045       // If the space may be sufficient, break out of the loop.
   1046       if (delta >= length) {
   1047         break;
   1048       }
   1049       // Otherwise, skip to the end of the map.
   1050       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
   1051       CHECK_ALIGNED(ptr, kPageSize);
   1052       ++it;
   1053     }
   1054 
   1055     // Try to see if we get lucky with this address since none of the ART maps overlap.
   1056     actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
   1057     if (actual != MAP_FAILED) {
   1058       next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
   1059       return actual;
   1060     }
   1061 
   1062     if (4U * GB - ptr < length) {
   1063       // Not enough memory until 4GB.
   1064       if (first_run) {
   1065         // Try another time from the bottom;
   1066         ptr = LOW_MEM_START - kPageSize;
   1067         first_run = false;
   1068         continue;
   1069       } else {
   1070         // Second try failed.
   1071         break;
   1072       }
   1073     }
   1074 
   1075     uintptr_t tail_ptr;
   1076 
   1077     // Check pages are free.
   1078     bool safe = true;
   1079     for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
   1080       if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
   1081         safe = false;
   1082         break;
   1083       } else {
   1084         DCHECK_EQ(errno, ENOMEM);
   1085       }
   1086     }
   1087 
   1088     next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
   1089 
   1090     if (safe == true) {
   1091       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
   1092       if (actual != MAP_FAILED) {
   1093         return actual;
   1094       }
   1095     } else {
   1096       // Skip over last page.
   1097       ptr = tail_ptr;
   1098     }
   1099   }
   1100 
   1101   if (actual == MAP_FAILED) {
   1102     LOG(ERROR) << "Could not find contiguous low-memory space.";
   1103     errno = ENOMEM;
   1104   }
   1105   return actual;
   1106 #else
   1107   UNUSED(length, prot, flags, fd, offset);
   1108   LOG(FATAL) << "Unreachable";
   1109   UNREACHABLE();
   1110 #endif
   1111 }
   1112 
   1113 void* MemMap::MapInternal(void* addr,
   1114                           size_t length,
   1115                           int prot,
   1116                           int flags,
   1117                           int fd,
   1118                           off_t offset,
   1119                           bool low_4gb) {
   1120 #ifdef __LP64__
   1121   // When requesting low_4g memory and having an expectation, the requested range should fit into
   1122   // 4GB.
   1123   if (low_4gb && (
   1124       // Start out of bounds.
   1125       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
   1126       // End out of bounds. For simplicity, this will fail for the last page of memory.
   1127       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
   1128     LOG(ERROR) << "The requested address space (" << addr << ", "
   1129                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
   1130                << ") cannot fit in low_4gb";
   1131     return MAP_FAILED;
   1132   }
   1133 #else
   1134   UNUSED(low_4gb);
   1135 #endif
   1136   DCHECK_ALIGNED(length, kPageSize);
   1137   // TODO:
   1138   // A page allocator would be a useful abstraction here, as
   1139   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
   1140   void* actual = MAP_FAILED;
   1141 #if USE_ART_LOW_4G_ALLOCATOR
   1142   // MAP_32BIT only available on x86_64.
   1143   if (low_4gb && addr == nullptr) {
   1144     // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
   1145     // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
   1146     // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
   1147     //
   1148     // To avoid the issue, always map non-executable first, and mprotect if necessary.
   1149     const int orig_prot = prot;
   1150     const int prot_non_exec = prot & ~PROT_EXEC;
   1151     actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
   1152 
   1153     if (actual == MAP_FAILED) {
   1154       return MAP_FAILED;
   1155     }
   1156 
   1157     // See if we need to remap with the executable bit now.
   1158     if (orig_prot != prot_non_exec) {
   1159       if (mprotect(actual, length, orig_prot) != 0) {
   1160         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
   1161         TargetMUnmap(actual, length);
   1162         errno = ENOMEM;
   1163         return MAP_FAILED;
   1164       }
   1165     }
   1166     return actual;
   1167   }
   1168 
   1169   actual = TargetMMap(addr, length, prot, flags, fd, offset);
   1170 #else
   1171 #if defined(__LP64__)
   1172   if (low_4gb && addr == nullptr) {
   1173     flags |= MAP_32BIT;
   1174   }
   1175 #endif
   1176   actual = TargetMMap(addr, length, prot, flags, fd, offset);
   1177 #endif
   1178   return actual;
   1179 }
   1180 
   1181 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
   1182   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
   1183                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
   1184                      mem_map.GetName().c_str());
   1185   return os;
   1186 }
   1187 
   1188 void MemMap::TryReadable() {
   1189   if (base_begin_ == nullptr && base_size_ == 0) {
   1190     return;
   1191   }
   1192   CHECK_NE(prot_ & PROT_READ, 0);
   1193   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
   1194   volatile uint8_t* end = begin + base_size_;
   1195   DCHECK(IsAligned<kPageSize>(begin));
   1196   DCHECK(IsAligned<kPageSize>(end));
   1197   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
   1198   // reads.
   1199   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
   1200     // This read could fault if protection wasn't set correctly.
   1201     uint8_t value = *ptr;
   1202     UNUSED(value);
   1203   }
   1204 }
   1205 
   1206 void ZeroAndReleasePages(void* address, size_t length) {
   1207   if (length == 0) {
   1208     return;
   1209   }
   1210   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
   1211   uint8_t* const mem_end = mem_begin + length;
   1212   uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
   1213   uint8_t* const page_end = AlignDown(mem_end, kPageSize);
   1214   if (!kMadviseZeroes || page_begin >= page_end) {
   1215     // No possible area to madvise.
   1216     std::fill(mem_begin, mem_end, 0);
   1217   } else {
   1218     // Spans one or more pages.
   1219     DCHECK_LE(mem_begin, page_begin);
   1220     DCHECK_LE(page_begin, page_end);
   1221     DCHECK_LE(page_end, mem_end);
   1222     std::fill(mem_begin, page_begin, 0);
   1223 #ifdef _WIN32
   1224     LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
   1225 #else
   1226     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
   1227 #endif
   1228     std::fill(page_end, mem_end, 0);
   1229   }
   1230 }
   1231 
   1232 void MemMap::AlignBy(size_t size) {
   1233   CHECK_EQ(begin_, base_begin_) << "Unsupported";
   1234   CHECK_EQ(size_, base_size_) << "Unsupported";
   1235   CHECK_GT(size, static_cast<size_t>(kPageSize));
   1236   CHECK_ALIGNED(size, kPageSize);
   1237   CHECK(!reuse_);
   1238   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
   1239       IsAlignedParam(base_size_, size)) {
   1240     // Already aligned.
   1241     return;
   1242   }
   1243   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
   1244   uint8_t* base_end = base_begin + base_size_;
   1245   uint8_t* aligned_base_begin = AlignUp(base_begin, size);
   1246   uint8_t* aligned_base_end = AlignDown(base_end, size);
   1247   CHECK_LE(base_begin, aligned_base_begin);
   1248   CHECK_LE(aligned_base_end, base_end);
   1249   size_t aligned_base_size = aligned_base_end - aligned_base_begin;
   1250   CHECK_LT(aligned_base_begin, aligned_base_end)
   1251       << "base_begin = " << reinterpret_cast<void*>(base_begin)
   1252       << " base_end = " << reinterpret_cast<void*>(base_end);
   1253   CHECK_GE(aligned_base_size, size);
   1254   // Unmap the unaligned parts.
   1255   if (base_begin < aligned_base_begin) {
   1256     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
   1257     CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
   1258         << "base_begin=" << reinterpret_cast<void*>(base_begin)
   1259         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
   1260   }
   1261   if (aligned_base_end < base_end) {
   1262     MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
   1263     CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
   1264         << "base_end=" << reinterpret_cast<void*>(base_end)
   1265         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
   1266   }
   1267   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   1268   if (base_begin < aligned_base_begin) {
   1269     auto it = GetGMapsEntry(*this);
   1270     auto node = gMaps->extract(it);
   1271     node.key() = aligned_base_begin;
   1272     gMaps->insert(std::move(node));
   1273   }
   1274   base_begin_ = aligned_base_begin;
   1275   base_size_ = aligned_base_size;
   1276   begin_ = aligned_base_begin;
   1277   size_ = aligned_base_size;
   1278   DCHECK(gMaps != nullptr);
   1279 }
   1280 
   1281 }  // namespace art
   1282