Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mem_map.h"
     18 
     19 #include "base/memory_tool.h"
     20 #include <backtrace/BacktraceMap.h>
     21 #include <inttypes.h>
     22 #include <stdlib.h>
     23 
     24 #include <memory>
     25 #include <sstream>
     26 
     27 #include "base/stringprintf.h"
     28 
     29 #pragma GCC diagnostic push
     30 #pragma GCC diagnostic ignored "-Wshadow"
     31 #include "ScopedFd.h"
     32 #pragma GCC diagnostic pop
     33 
     34 #include "thread-inl.h"
     35 #include "utils.h"
     36 
     37 #include <cutils/ashmem.h>
     38 
     39 #ifndef ANDROID_OS
     40 #include <sys/resource.h>
     41 #endif
     42 
     43 #ifndef MAP_ANONYMOUS
     44 #define MAP_ANONYMOUS MAP_ANON
     45 #endif
     46 
     47 namespace art {
     48 
     49 static std::ostream& operator<<(
     50     std::ostream& os,
     51     std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
     52   for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
     53     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
     54                        static_cast<uint32_t>(it->start),
     55                        static_cast<uint32_t>(it->end),
     56                        (it->flags & PROT_READ) ? 'r' : '-',
     57                        (it->flags & PROT_WRITE) ? 'w' : '-',
     58                        (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
     59   }
     60   return os;
     61 }
     62 
     63 std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
     64   os << "MemMap:" << std::endl;
     65   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
     66     void* base = it->first;
     67     MemMap* map = it->second;
     68     CHECK_EQ(base, map->BaseBegin());
     69     os << *map << std::endl;
     70   }
     71   return os;
     72 }
     73 
     74 MemMap::Maps* MemMap::maps_ = nullptr;
     75 
     76 #if USE_ART_LOW_4G_ALLOCATOR
     77 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
     78 
     79 // The regular start of memory allocations. The first 64KB is protected by SELinux.
     80 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
     81 
     82 // Generate random starting position.
     83 // To not interfere with image position, take the image's address and only place it below. Current
     84 // formula (sketch):
     85 //
     86 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
     87 // ----------------------------------------
     88 //                    = 0000111111111111111
     89 // & ~(kPageSize - 1) =~0000000000000001111
     90 // ----------------------------------------
     91 // mask               = 0000111111111110000
     92 // & random data      = YYYYYYYYYYYYYYYYYYY
     93 // -----------------------------------
     94 // tmp                = 0000YYYYYYYYYYY0000
     95 // + LOW_MEM_START    = 0000000000001000000
     96 // --------------------------------------
     97 // start
     98 //
     99 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
    100 // do not have Bionic, simply start with LOW_MEM_START.
    101 
    102 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
    103 #ifdef __BIONIC__
    104 uintptr_t CreateStartPos(uint64_t input) {
    105   CHECK_NE(0, ART_BASE_ADDRESS);
    106 
    107   // Start with all bits below highest bit in ART_BASE_ADDRESS.
    108   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
    109   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
    110 
    111   // Lowest (usually 12) bits are not used, as aligned by page size.
    112   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
    113 
    114   // Mask input data.
    115   return (input & mask) + LOW_MEM_START;
    116 }
    117 #endif
    118 
    119 static uintptr_t GenerateNextMemPos() {
    120 #ifdef __BIONIC__
    121   uint64_t random_data;
    122   arc4random_buf(&random_data, sizeof(random_data));
    123   return CreateStartPos(random_data);
    124 #else
    125   // No arc4random on host, see above.
    126   return LOW_MEM_START;
    127 #endif
    128 }
    129 
    130 // Initialize linear scan to random position.
    131 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
    132 #endif
    133 
    134 // Return true if the address range is contained in a single memory map by either reading
    135 // the maps_ variable or the /proc/self/map entry.
    136 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
    137   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
    138   uintptr_t end = begin + size;
    139 
    140   // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
    141   // further.
    142   {
    143     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    144     for (auto& pair : *maps_) {
    145       MemMap* const map = pair.second;
    146       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
    147           end <= reinterpret_cast<uintptr_t>(map->End())) {
    148         return true;
    149       }
    150     }
    151   }
    152 
    153   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    154   if (map == nullptr) {
    155     if (error_msg != nullptr) {
    156       *error_msg = StringPrintf("Failed to build process map");
    157     }
    158     return false;
    159   }
    160 
    161   ScopedBacktraceMapIteratorLock lock(map.get());
    162   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    163     if ((begin >= it->start && begin < it->end)  // start of new within old
    164         && (end > it->start && end <= it->end)) {  // end of new within old
    165       return true;
    166     }
    167   }
    168   if (error_msg != nullptr) {
    169     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
    170     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
    171                               "any existing map. See process maps in the log.", begin, end);
    172   }
    173   return false;
    174 }
    175 
    176 // Return true if the address range does not conflict with any /proc/self/maps entry.
    177 static bool CheckNonOverlapping(uintptr_t begin,
    178                                 uintptr_t end,
    179                                 std::string* error_msg) {
    180   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    181   if (map.get() == nullptr) {
    182     *error_msg = StringPrintf("Failed to build process map");
    183     return false;
    184   }
    185   ScopedBacktraceMapIteratorLock(map.get());
    186   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    187     if ((begin >= it->start && begin < it->end)      // start of new within old
    188         || (end > it->start && end < it->end)        // end of new within old
    189         || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
    190       std::ostringstream map_info;
    191       map_info << std::make_pair(it, map->end());
    192       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
    193                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
    194                                 begin, end,
    195                                 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
    196                                 it->name.c_str(),
    197                                 map_info.str().c_str());
    198       return false;
    199     }
    200   }
    201   return true;
    202 }
    203 
    204 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
    205 // the expected value, calling munmap if validation fails, giving the
    206 // reason in error_msg.
    207 //
    208 // If the expected_ptr is null, nothing is checked beyond the fact
    209 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
    210 // non-null, we check that pointer is the actual_ptr == expected_ptr,
    211 // and if not, report in error_msg what the conflict mapping was if
    212 // found, or a generic error in other cases.
    213 static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
    214                             std::string* error_msg) {
    215   // Handled first by caller for more specific error messages.
    216   CHECK(actual_ptr != MAP_FAILED);
    217 
    218   if (expected_ptr == nullptr) {
    219     return true;
    220   }
    221 
    222   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
    223   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
    224   uintptr_t limit = expected + byte_count;
    225 
    226   if (expected_ptr == actual_ptr) {
    227     return true;
    228   }
    229 
    230   // We asked for an address but didn't get what we wanted, all paths below here should fail.
    231   int result = munmap(actual_ptr, byte_count);
    232   if (result == -1) {
    233     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
    234   }
    235 
    236   if (error_msg != nullptr) {
    237     // We call this here so that we can try and generate a full error
    238     // message with the overlapping mapping. There's no guarantee that
    239     // that there will be an overlap though, since
    240     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
    241     //   true, even if there is no overlap
    242     // - There might have been an overlap at the point of mmap, but the
    243     //   overlapping region has since been unmapped.
    244     std::string error_detail;
    245     CheckNonOverlapping(expected, limit, &error_detail);
    246     std::ostringstream os;
    247     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
    248                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
    249                         actual, expected);
    250     if (!error_detail.empty()) {
    251       os << " : " << error_detail;
    252     }
    253     *error_msg = os.str();
    254   }
    255   return false;
    256 }
    257 
    258 #if USE_ART_LOW_4G_ALLOCATOR
    259 static inline void* TryMemMapLow4GB(void* ptr,
    260                                     size_t page_aligned_byte_count,
    261                                     int prot,
    262                                     int flags,
    263                                     int fd,
    264                                     off_t offset) {
    265   void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
    266   if (actual != MAP_FAILED) {
    267     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
    268     // 4GB. If this is the case, unmap and retry.
    269     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
    270       munmap(actual, page_aligned_byte_count);
    271       actual = MAP_FAILED;
    272     }
    273   }
    274   return actual;
    275 }
    276 #endif
    277 
    278 MemMap* MemMap::MapAnonymous(const char* name,
    279                              uint8_t* expected_ptr,
    280                              size_t byte_count,
    281                              int prot,
    282                              bool low_4gb,
    283                              bool reuse,
    284                              std::string* error_msg,
    285                              bool use_ashmem) {
    286 #ifndef __LP64__
    287   UNUSED(low_4gb);
    288 #endif
    289   if (byte_count == 0) {
    290     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
    291   }
    292   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    293 
    294   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    295   if (reuse) {
    296     // reuse means it is okay that it overlaps an existing page mapping.
    297     // Only use this if you actually made the page reservation yourself.
    298     CHECK(expected_ptr != nullptr);
    299 
    300     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
    301     flags |= MAP_FIXED;
    302   }
    303 
    304   ScopedFd fd(-1);
    305 
    306   if (use_ashmem) {
    307     if (!kIsTargetBuild) {
    308       // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
    309       // fail due to ulimit restrictions. If they will then use a regular mmap.
    310       struct rlimit rlimit_fsize;
    311       CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
    312       use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
    313         (page_aligned_byte_count < rlimit_fsize.rlim_cur);
    314     }
    315   }
    316 
    317   if (use_ashmem) {
    318     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    319     // prefixed "dalvik-".
    320     std::string debug_friendly_name("dalvik-");
    321     debug_friendly_name += name;
    322     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
    323     if (fd.get() == -1) {
    324       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
    325       return nullptr;
    326     }
    327     flags &= ~MAP_ANONYMOUS;
    328   }
    329 
    330   // We need to store and potentially set an error number for pretty printing of errors
    331   int saved_errno = 0;
    332 
    333   void* actual = MapInternal(expected_ptr,
    334                              page_aligned_byte_count,
    335                              prot,
    336                              flags,
    337                              fd.get(),
    338                              0,
    339                              low_4gb);
    340   saved_errno = errno;
    341 
    342   if (actual == MAP_FAILED) {
    343     if (error_msg != nullptr) {
    344       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    345 
    346       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
    347                                     "See process maps in the log.",
    348                                 expected_ptr,
    349                                 page_aligned_byte_count,
    350                                 prot,
    351                                 flags,
    352                                 fd.get(),
    353                                 strerror(saved_errno));
    354     }
    355     return nullptr;
    356   }
    357   std::ostringstream check_map_request_error_msg;
    358   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    359     return nullptr;
    360   }
    361   return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
    362                     page_aligned_byte_count, prot, reuse);
    363 }
    364 
    365 MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
    366   if (byte_count == 0) {
    367     return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
    368   }
    369   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    370   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
    371 }
    372 
    373 MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
    374                                  size_t byte_count,
    375                                  int prot,
    376                                  int flags,
    377                                  int fd,
    378                                  off_t start,
    379                                  bool low_4gb,
    380                                  bool reuse,
    381                                  const char* filename,
    382                                  std::string* error_msg) {
    383   CHECK_NE(0, prot);
    384   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
    385 
    386   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
    387   // expect his mapping to be contained within an existing map.
    388   if (reuse) {
    389     // reuse means it is okay that it overlaps an existing page mapping.
    390     // Only use this if you actually made the page reservation yourself.
    391     CHECK(expected_ptr != nullptr);
    392 
    393     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
    394         << ((error_msg != nullptr) ? *error_msg : std::string());
    395     flags |= MAP_FIXED;
    396   } else {
    397     CHECK_EQ(0, flags & MAP_FIXED);
    398     // Don't bother checking for an overlapping region here. We'll
    399     // check this if required after the fact inside CheckMapRequest.
    400   }
    401 
    402   if (byte_count == 0) {
    403     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
    404   }
    405   // Adjust 'offset' to be page-aligned as required by mmap.
    406   int page_offset = start % kPageSize;
    407   off_t page_aligned_offset = start - page_offset;
    408   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
    409   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
    410   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
    411   // not necessarily to virtual memory. mmap will page align 'expected' for us.
    412   uint8_t* page_aligned_expected =
    413       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
    414 
    415   size_t redzone_size = 0;
    416   if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
    417     redzone_size = kPageSize;
    418     page_aligned_byte_count += redzone_size;
    419   }
    420 
    421   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
    422                                                            page_aligned_byte_count,
    423                                                            prot,
    424                                                            flags,
    425                                                            fd,
    426                                                            page_aligned_offset,
    427                                                            low_4gb));
    428   if (actual == MAP_FAILED) {
    429     if (error_msg != nullptr) {
    430       auto saved_errno = errno;
    431 
    432       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
    433         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    434       }
    435 
    436       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
    437                                 ") of file '%s' failed: %s. See process maps in the log.",
    438                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
    439                                 static_cast<int64_t>(page_aligned_offset), filename,
    440                                 strerror(saved_errno));
    441     }
    442     return nullptr;
    443   }
    444   std::ostringstream check_map_request_error_msg;
    445   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    446     return nullptr;
    447   }
    448   if (redzone_size != 0) {
    449     const uint8_t *real_start = actual + page_offset;
    450     const uint8_t *real_end = actual + page_offset + byte_count;
    451     const uint8_t *mapping_end = actual + page_aligned_byte_count;
    452 
    453     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
    454     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
    455     page_aligned_byte_count -= redzone_size;
    456   }
    457 
    458   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
    459                     prot, reuse, redzone_size);
    460 }
    461 
    462 MemMap::~MemMap() {
    463   if (base_begin_ == nullptr && base_size_ == 0) {
    464     return;
    465   }
    466 
    467   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
    468   // before it is returned to the system.
    469   if (redzone_size_ != 0) {
    470     MEMORY_TOOL_MAKE_UNDEFINED(
    471         reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
    472         redzone_size_);
    473   }
    474 
    475   if (!reuse_) {
    476     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
    477     int result = munmap(base_begin_, base_size_);
    478     if (result == -1) {
    479       PLOG(FATAL) << "munmap failed";
    480     }
    481   }
    482 
    483   // Remove it from maps_.
    484   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    485   bool found = false;
    486   DCHECK(maps_ != nullptr);
    487   for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
    488        it != end && it->first == base_begin_; ++it) {
    489     if (it->second == this) {
    490       found = true;
    491       maps_->erase(it);
    492       break;
    493     }
    494   }
    495   CHECK(found) << "MemMap not found";
    496 }
    497 
    498 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
    499                size_t base_size, int prot, bool reuse, size_t redzone_size)
    500     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
    501       prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
    502   if (size_ == 0) {
    503     CHECK(begin_ == nullptr);
    504     CHECK(base_begin_ == nullptr);
    505     CHECK_EQ(base_size_, 0U);
    506   } else {
    507     CHECK(begin_ != nullptr);
    508     CHECK(base_begin_ != nullptr);
    509     CHECK_NE(base_size_, 0U);
    510 
    511     // Add it to maps_.
    512     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    513     DCHECK(maps_ != nullptr);
    514     maps_->insert(std::make_pair(base_begin_, this));
    515   }
    516 }
    517 
    518 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
    519                            std::string* error_msg, bool use_ashmem) {
    520   DCHECK_GE(new_end, Begin());
    521   DCHECK_LE(new_end, End());
    522   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
    523   DCHECK_ALIGNED(begin_, kPageSize);
    524   DCHECK_ALIGNED(base_begin_, kPageSize);
    525   DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
    526   DCHECK_ALIGNED(new_end, kPageSize);
    527   uint8_t* old_end = begin_ + size_;
    528   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
    529   uint8_t* new_base_end = new_end;
    530   DCHECK_LE(new_base_end, old_base_end);
    531   if (new_base_end == old_base_end) {
    532     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
    533   }
    534   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
    535   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
    536   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
    537   size_t tail_size = old_end - new_end;
    538   uint8_t* tail_base_begin = new_base_end;
    539   size_t tail_base_size = old_base_end - new_base_end;
    540   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
    541   DCHECK_ALIGNED(tail_base_size, kPageSize);
    542 
    543   int int_fd = -1;
    544   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    545   if (use_ashmem) {
    546     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    547     // prefixed "dalvik-".
    548     std::string debug_friendly_name("dalvik-");
    549     debug_friendly_name += tail_name;
    550     int_fd = ashmem_create_region(debug_friendly_name.c_str(), tail_base_size);
    551     flags = MAP_PRIVATE | MAP_FIXED;
    552     if (int_fd == -1) {
    553       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
    554                                 tail_name, strerror(errno));
    555       return nullptr;
    556     }
    557   }
    558   ScopedFd fd(int_fd);
    559 
    560   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
    561   // Unmap/map the tail region.
    562   int result = munmap(tail_base_begin, tail_base_size);
    563   if (result == -1) {
    564     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    565     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
    566                               tail_base_begin, tail_base_size, name_.c_str());
    567     return nullptr;
    568   }
    569   // Don't cause memory allocation between the munmap and the mmap
    570   // calls. Otherwise, libc (or something else) might take this memory
    571   // region. Note this isn't perfect as there's no way to prevent
    572   // other threads to try to take this memory region here.
    573   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
    574                                               flags, fd.get(), 0));
    575   if (actual == MAP_FAILED) {
    576     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    577     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
    578                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
    579                               fd.get());
    580     return nullptr;
    581   }
    582   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
    583 }
    584 
    585 void MemMap::MadviseDontNeedAndZero() {
    586   if (base_begin_ != nullptr || base_size_ != 0) {
    587     if (!kMadviseZeroes) {
    588       memset(base_begin_, 0, base_size_);
    589     }
    590     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
    591     if (result == -1) {
    592       PLOG(WARNING) << "madvise failed";
    593     }
    594   }
    595 }
    596 
    597 bool MemMap::Sync() {
    598   bool result;
    599   if (redzone_size_ != 0) {
    600     // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
    601     // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
    602     // noaccess protection from the msync range. b/27552451.
    603     uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
    604     MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
    605     result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
    606     MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
    607   } else {
    608     result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
    609   }
    610   return result;
    611 }
    612 
    613 bool MemMap::Protect(int prot) {
    614   if (base_begin_ == nullptr && base_size_ == 0) {
    615     prot_ = prot;
    616     return true;
    617   }
    618 
    619   if (mprotect(base_begin_, base_size_, prot) == 0) {
    620     prot_ = prot;
    621     return true;
    622   }
    623 
    624   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
    625               << prot << ") failed";
    626   return false;
    627 }
    628 
    629 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
    630   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    631   CHECK(begin_map != nullptr);
    632   CHECK(end_map != nullptr);
    633   CHECK(HasMemMap(begin_map));
    634   CHECK(HasMemMap(end_map));
    635   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
    636   MemMap* map = begin_map;
    637   while (map->BaseBegin() != end_map->BaseBegin()) {
    638     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
    639     if (next_map == nullptr) {
    640       // Found a gap.
    641       return false;
    642     }
    643     map = next_map;
    644   }
    645   return true;
    646 }
    647 
    648 void MemMap::DumpMaps(std::ostream& os, bool terse) {
    649   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    650   DumpMapsLocked(os, terse);
    651 }
    652 
    653 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
    654   const auto& mem_maps = *maps_;
    655   if (!terse) {
    656     os << mem_maps;
    657     return;
    658   }
    659 
    660   // Terse output example:
    661   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
    662   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
    663   // The details:
    664   //   "+0x20P" means 0x20 pages taken by a single mapping,
    665   //   "~0x11dP" means a gap of 0x11d pages,
    666   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
    667   os << "MemMap:" << std::endl;
    668   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
    669     MemMap* map = it->second;
    670     void* base = it->first;
    671     CHECK_EQ(base, map->BaseBegin());
    672     os << "[MemMap: " << base;
    673     ++it;
    674     // Merge consecutive maps with the same protect flags and name.
    675     constexpr size_t kMaxGaps = 9;
    676     size_t num_gaps = 0;
    677     size_t num = 1u;
    678     size_t size = map->BaseSize();
    679     CHECK_ALIGNED(size, kPageSize);
    680     void* end = map->BaseEnd();
    681     while (it != maps_end &&
    682         it->second->GetProtect() == map->GetProtect() &&
    683         it->second->GetName() == map->GetName() &&
    684         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
    685       if (it->second->BaseBegin() != end) {
    686         ++num_gaps;
    687         os << "+0x" << std::hex << (size / kPageSize) << "P";
    688         if (num != 1u) {
    689           os << "(" << std::dec << num << ")";
    690         }
    691         size_t gap =
    692             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
    693         CHECK_ALIGNED(gap, kPageSize);
    694         os << "~0x" << std::hex << (gap / kPageSize) << "P";
    695         num = 0u;
    696         size = 0u;
    697       }
    698       CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
    699       ++num;
    700       size += it->second->BaseSize();
    701       end = it->second->BaseEnd();
    702       ++it;
    703     }
    704     os << "+0x" << std::hex << (size / kPageSize) << "P";
    705     if (num != 1u) {
    706       os << "(" << std::dec << num << ")";
    707     }
    708     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
    709   }
    710 }
    711 
    712 bool MemMap::HasMemMap(MemMap* map) {
    713   void* base_begin = map->BaseBegin();
    714   for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
    715        it != end && it->first == base_begin; ++it) {
    716     if (it->second == map) {
    717       return true;
    718     }
    719   }
    720   return false;
    721 }
    722 
    723 MemMap* MemMap::GetLargestMemMapAt(void* address) {
    724   size_t largest_size = 0;
    725   MemMap* largest_map = nullptr;
    726   DCHECK(maps_ != nullptr);
    727   for (auto it = maps_->lower_bound(address), end = maps_->end();
    728        it != end && it->first == address; ++it) {
    729     MemMap* map = it->second;
    730     CHECK(map != nullptr);
    731     if (largest_size < map->BaseSize()) {
    732       largest_size = map->BaseSize();
    733       largest_map = map;
    734     }
    735   }
    736   return largest_map;
    737 }
    738 
    739 void MemMap::Init() {
    740   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    741   if (maps_ == nullptr) {
    742     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
    743     maps_ = new Maps;
    744   }
    745 }
    746 
    747 void MemMap::Shutdown() {
    748   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    749   delete maps_;
    750   maps_ = nullptr;
    751 }
    752 
    753 void MemMap::SetSize(size_t new_size) {
    754   if (new_size == base_size_) {
    755     return;
    756   }
    757   CHECK_ALIGNED(new_size, kPageSize);
    758   CHECK_EQ(base_size_, size_) << "Unsupported";
    759   CHECK_LE(new_size, base_size_);
    760   MEMORY_TOOL_MAKE_UNDEFINED(
    761       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
    762                               new_size),
    763       base_size_ - new_size);
    764   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
    765                   base_size_ - new_size), 0) << new_size << " " << base_size_;
    766   base_size_ = new_size;
    767   size_ = new_size;
    768 }
    769 
    770 void* MemMap::MapInternal(void* addr,
    771                           size_t length,
    772                           int prot,
    773                           int flags,
    774                           int fd,
    775                           off_t offset,
    776                           bool low_4gb) {
    777 #ifdef __LP64__
    778   // When requesting low_4g memory and having an expectation, the requested range should fit into
    779   // 4GB.
    780   if (low_4gb && (
    781       // Start out of bounds.
    782       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
    783       // End out of bounds. For simplicity, this will fail for the last page of memory.
    784       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
    785     LOG(ERROR) << "The requested address space (" << addr << ", "
    786                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
    787                << ") cannot fit in low_4gb";
    788     return MAP_FAILED;
    789   }
    790 #else
    791   UNUSED(low_4gb);
    792 #endif
    793   DCHECK_ALIGNED(length, kPageSize);
    794   if (low_4gb) {
    795     DCHECK_EQ(flags & MAP_FIXED, 0);
    796   }
    797   // TODO:
    798   // A page allocator would be a useful abstraction here, as
    799   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
    800   void* actual = MAP_FAILED;
    801 #if USE_ART_LOW_4G_ALLOCATOR
    802   // MAP_32BIT only available on x86_64.
    803   if (low_4gb && addr == nullptr) {
    804     bool first_run = true;
    805 
    806     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    807     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
    808       // Use maps_ as an optimization to skip over large maps.
    809       // Find the first map which is address > ptr.
    810       auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
    811       if (it != maps_->begin()) {
    812         auto before_it = it;
    813         --before_it;
    814         // Start at the end of the map before the upper bound.
    815         ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
    816         CHECK_ALIGNED(ptr, kPageSize);
    817       }
    818       while (it != maps_->end()) {
    819         // How much space do we have until the next map?
    820         size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
    821         // If the space may be sufficient, break out of the loop.
    822         if (delta >= length) {
    823           break;
    824         }
    825         // Otherwise, skip to the end of the map.
    826         ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
    827         CHECK_ALIGNED(ptr, kPageSize);
    828         ++it;
    829       }
    830 
    831       // Try to see if we get lucky with this address since none of the ART maps overlap.
    832       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
    833       if (actual != MAP_FAILED) {
    834         next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
    835         return actual;
    836       }
    837 
    838       if (4U * GB - ptr < length) {
    839         // Not enough memory until 4GB.
    840         if (first_run) {
    841           // Try another time from the bottom;
    842           ptr = LOW_MEM_START - kPageSize;
    843           first_run = false;
    844           continue;
    845         } else {
    846           // Second try failed.
    847           break;
    848         }
    849       }
    850 
    851       uintptr_t tail_ptr;
    852 
    853       // Check pages are free.
    854       bool safe = true;
    855       for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
    856         if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
    857           safe = false;
    858           break;
    859         } else {
    860           DCHECK_EQ(errno, ENOMEM);
    861         }
    862       }
    863 
    864       next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
    865 
    866       if (safe == true) {
    867         actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
    868         if (actual != MAP_FAILED) {
    869           return actual;
    870         }
    871       } else {
    872         // Skip over last page.
    873         ptr = tail_ptr;
    874       }
    875     }
    876 
    877     if (actual == MAP_FAILED) {
    878       LOG(ERROR) << "Could not find contiguous low-memory space.";
    879       errno = ENOMEM;
    880     }
    881   } else {
    882     actual = mmap(addr, length, prot, flags, fd, offset);
    883   }
    884 
    885 #else
    886 #if defined(__LP64__)
    887   if (low_4gb && addr == nullptr) {
    888     flags |= MAP_32BIT;
    889   }
    890 #endif
    891   actual = mmap(addr, length, prot, flags, fd, offset);
    892 #endif
    893   return actual;
    894 }
    895 
    896 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
    897   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
    898                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
    899                      mem_map.GetName().c_str());
    900   return os;
    901 }
    902 
    903 void MemMap::TryReadable() {
    904   if (base_begin_ == nullptr && base_size_ == 0) {
    905     return;
    906   }
    907   CHECK_NE(prot_ & PROT_READ, 0);
    908   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
    909   volatile uint8_t* end = begin + base_size_;
    910   DCHECK(IsAligned<kPageSize>(begin));
    911   DCHECK(IsAligned<kPageSize>(end));
    912   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
    913   // reads.
    914   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
    915     // This read could fault if protection wasn't set correctly.
    916     uint8_t value = *ptr;
    917     UNUSED(value);
    918   }
    919 }
    920 
    921 }  // namespace art
    922