Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mem_map.h"
     18 
     19 #include <backtrace/BacktraceMap.h>
     20 #include <inttypes.h>
     21 
     22 #include <memory>
     23 #include <sstream>
     24 
     25 // See CreateStartPos below.
     26 #ifdef __BIONIC__
     27 #include <sys/auxv.h>
     28 #endif
     29 
     30 #include "base/stringprintf.h"
     31 
     32 #pragma GCC diagnostic push
     33 #pragma GCC diagnostic ignored "-Wshadow"
     34 #include "ScopedFd.h"
     35 #pragma GCC diagnostic pop
     36 
     37 #include "thread-inl.h"
     38 #include "utils.h"
     39 
     40 #define USE_ASHMEM 1
     41 
     42 #ifdef USE_ASHMEM
     43 #include <cutils/ashmem.h>
     44 #ifndef ANDROID_OS
     45 #include <sys/resource.h>
     46 #endif
     47 #endif
     48 
     49 #ifndef MAP_ANONYMOUS
     50 #define MAP_ANONYMOUS MAP_ANON
     51 #endif
     52 
     53 namespace art {
     54 
     55 static std::ostream& operator<<(
     56     std::ostream& os,
     57     std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
     58   for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
     59     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
     60                        static_cast<uint32_t>(it->start),
     61                        static_cast<uint32_t>(it->end),
     62                        (it->flags & PROT_READ) ? 'r' : '-',
     63                        (it->flags & PROT_WRITE) ? 'w' : '-',
     64                        (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
     65   }
     66   return os;
     67 }
     68 
     69 std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
     70   os << "MemMap:" << std::endl;
     71   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
     72     void* base = it->first;
     73     MemMap* map = it->second;
     74     CHECK_EQ(base, map->BaseBegin());
     75     os << *map << std::endl;
     76   }
     77   return os;
     78 }
     79 
     80 MemMap::Maps* MemMap::maps_ = nullptr;
     81 
     82 #if USE_ART_LOW_4G_ALLOCATOR
     83 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
     84 
     85 // The regular start of memory allocations. The first 64KB is protected by SELinux.
     86 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
     87 
     88 // Generate random starting position.
     89 // To not interfere with image position, take the image's address and only place it below. Current
     90 // formula (sketch):
     91 //
     92 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
     93 // ----------------------------------------
     94 //                    = 0000111111111111111
     95 // & ~(kPageSize - 1) =~0000000000000001111
     96 // ----------------------------------------
     97 // mask               = 0000111111111110000
     98 // & random data      = YYYYYYYYYYYYYYYYYYY
     99 // -----------------------------------
    100 // tmp                = 0000YYYYYYYYYYY0000
    101 // + LOW_MEM_START    = 0000000000001000000
    102 // --------------------------------------
    103 // start
    104 //
    105 // getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we
    106 // do not have Bionic, simply start with LOW_MEM_START.
    107 
    108 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
    109 #ifdef __BIONIC__
    110 uintptr_t CreateStartPos(uint64_t input) {
    111   CHECK_NE(0, ART_BASE_ADDRESS);
    112 
    113   // Start with all bits below highest bit in ART_BASE_ADDRESS.
    114   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
    115   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
    116 
    117   // Lowest (usually 12) bits are not used, as aligned by page size.
    118   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
    119 
    120   // Mask input data.
    121   return (input & mask) + LOW_MEM_START;
    122 }
    123 #endif
    124 
    125 static uintptr_t GenerateNextMemPos() {
    126 #ifdef __BIONIC__
    127   uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM));
    128   // The lower 8B are taken for the stack guard. Use the upper 8B (with mask).
    129   return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8));
    130 #else
    131   // No auxv on host, see above.
    132   return LOW_MEM_START;
    133 #endif
    134 }
    135 
    136 // Initialize linear scan to random position.
    137 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
    138 #endif
    139 
    140 // Return true if the address range is contained in a single /proc/self/map entry.
    141 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size,
    142                                        std::string* error_msg) {
    143   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
    144   uintptr_t end = begin + size;
    145   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    146   if (map.get() == nullptr) {
    147     *error_msg = StringPrintf("Failed to build process map");
    148     return false;
    149   }
    150   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    151     if ((begin >= it->start && begin < it->end)  // start of new within old
    152         && (end > it->start && end <= it->end)) {  // end of new within old
    153       return true;
    154     }
    155   }
    156   PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
    157   *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
    158                             "any existing map. See process maps in the log.", begin, end);
    159   return false;
    160 }
    161 
    162 // Return true if the address range does not conflict with any /proc/self/maps entry.
    163 static bool CheckNonOverlapping(uintptr_t begin,
    164                                 uintptr_t end,
    165                                 std::string* error_msg) {
    166   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    167   if (map.get() == nullptr) {
    168     *error_msg = StringPrintf("Failed to build process map");
    169     return false;
    170   }
    171   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    172     if ((begin >= it->start && begin < it->end)      // start of new within old
    173         || (end > it->start && end < it->end)        // end of new within old
    174         || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
    175       std::ostringstream map_info;
    176       map_info << std::make_pair(it, map->end());
    177       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
    178                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
    179                                 begin, end,
    180                                 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
    181                                 it->name.c_str(),
    182                                 map_info.str().c_str());
    183       return false;
    184     }
    185   }
    186   return true;
    187 }
    188 
    189 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
    190 // the expected value, calling munmap if validation fails, giving the
    191 // reason in error_msg.
    192 //
    193 // If the expected_ptr is null, nothing is checked beyond the fact
    194 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
    195 // non-null, we check that pointer is the actual_ptr == expected_ptr,
    196 // and if not, report in error_msg what the conflict mapping was if
    197 // found, or a generic error in other cases.
    198 static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
    199                             std::string* error_msg) {
    200   // Handled first by caller for more specific error messages.
    201   CHECK(actual_ptr != MAP_FAILED);
    202 
    203   if (expected_ptr == nullptr) {
    204     return true;
    205   }
    206 
    207   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
    208   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
    209   uintptr_t limit = expected + byte_count;
    210 
    211   if (expected_ptr == actual_ptr) {
    212     return true;
    213   }
    214 
    215   // We asked for an address but didn't get what we wanted, all paths below here should fail.
    216   int result = munmap(actual_ptr, byte_count);
    217   if (result == -1) {
    218     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
    219   }
    220 
    221   // We call this here so that we can try and generate a full error
    222   // message with the overlapping mapping. There's no guarantee that
    223   // that there will be an overlap though, since
    224   // - The kernel is not *required* to honour expected_ptr unless MAP_FIXED is
    225   //   true, even if there is no overlap
    226   // - There might have been an overlap at the point of mmap, but the
    227   //   overlapping region has since been unmapped.
    228   std::string error_detail;
    229   CheckNonOverlapping(expected, limit, &error_detail);
    230 
    231   std::ostringstream os;
    232   os <<  StringPrintf("Failed to mmap at expected address, mapped at "
    233                       "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
    234                       actual, expected);
    235   if (!error_detail.empty()) {
    236     os << " : " << error_detail;
    237   }
    238 
    239   *error_msg = os.str();
    240   return false;
    241 }
    242 
    243 #if USE_ART_LOW_4G_ALLOCATOR
    244 static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags,
    245                                     int fd) {
    246   void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0);
    247   if (actual != MAP_FAILED) {
    248     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
    249     // 4GB. If this is the case, unmap and retry.
    250     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
    251       munmap(actual, page_aligned_byte_count);
    252       actual = MAP_FAILED;
    253     }
    254   }
    255   return actual;
    256 }
    257 #endif
    258 
    259 MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
    260                              bool low_4gb, bool reuse, std::string* error_msg) {
    261 #ifndef __LP64__
    262   UNUSED(low_4gb);
    263 #endif
    264   if (byte_count == 0) {
    265     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
    266   }
    267   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    268 
    269   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    270   if (reuse) {
    271     // reuse means it is okay that it overlaps an existing page mapping.
    272     // Only use this if you actually made the page reservation yourself.
    273     CHECK(expected_ptr != nullptr);
    274 
    275     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
    276     flags |= MAP_FIXED;
    277   }
    278 
    279   ScopedFd fd(-1);
    280 
    281 #ifdef USE_ASHMEM
    282 #ifdef HAVE_ANDROID_OS
    283   const bool use_ashmem = true;
    284 #else
    285   // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
    286   // fail due to ulimit restrictions. If they will then use a regular mmap.
    287   struct rlimit rlimit_fsize;
    288   CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
    289   const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
    290       (page_aligned_byte_count < rlimit_fsize.rlim_cur);
    291 #endif
    292   if (use_ashmem) {
    293     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    294     // prefixed "dalvik-".
    295     std::string debug_friendly_name("dalvik-");
    296     debug_friendly_name += name;
    297     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
    298     if (fd.get() == -1) {
    299       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
    300       return nullptr;
    301     }
    302     flags &= ~MAP_ANONYMOUS;
    303   }
    304 #endif
    305 
    306   // We need to store and potentially set an error number for pretty printing of errors
    307   int saved_errno = 0;
    308 
    309 #ifdef __LP64__
    310   // When requesting low_4g memory and having an expectation, the requested range should fit into
    311   // 4GB.
    312   if (low_4gb && (
    313       // Start out of bounds.
    314       (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
    315       // End out of bounds. For simplicity, this will fail for the last page of memory.
    316       (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
    317     *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
    318                               expected_ptr, expected_ptr + page_aligned_byte_count);
    319     return nullptr;
    320   }
    321 #endif
    322 
    323   // TODO:
    324   // A page allocator would be a useful abstraction here, as
    325   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
    326   // 2) The linear scheme, even with simple saving of the last known position, is very crude
    327 #if USE_ART_LOW_4G_ALLOCATOR
    328   // MAP_32BIT only available on x86_64.
    329   void* actual = MAP_FAILED;
    330   if (low_4gb && expected_ptr == nullptr) {
    331     bool first_run = true;
    332 
    333     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    334     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
    335       // Use maps_ as an optimization to skip over large maps.
    336       // Find the first map which is address > ptr.
    337       auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
    338       if (it != maps_->begin()) {
    339         auto before_it = it;
    340         --before_it;
    341         // Start at the end of the map before the upper bound.
    342         ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
    343         CHECK_ALIGNED(ptr, kPageSize);
    344       }
    345       while (it != maps_->end()) {
    346         // How much space do we have until the next map?
    347         size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
    348         // If the space may be sufficient, break out of the loop.
    349         if (delta >= page_aligned_byte_count) {
    350           break;
    351         }
    352         // Otherwise, skip to the end of the map.
    353         ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
    354         CHECK_ALIGNED(ptr, kPageSize);
    355         ++it;
    356       }
    357 
    358       // Try to see if we get lucky with this address since none of the ART maps overlap.
    359       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
    360                                fd.get());
    361       if (actual != MAP_FAILED) {
    362         next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count;
    363         break;
    364       }
    365 
    366       if (4U * GB - ptr < page_aligned_byte_count) {
    367         // Not enough memory until 4GB.
    368         if (first_run) {
    369           // Try another time from the bottom;
    370           ptr = LOW_MEM_START - kPageSize;
    371           first_run = false;
    372           continue;
    373         } else {
    374           // Second try failed.
    375           break;
    376         }
    377       }
    378 
    379       uintptr_t tail_ptr;
    380 
    381       // Check pages are free.
    382       bool safe = true;
    383       for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
    384         if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
    385           safe = false;
    386           break;
    387         } else {
    388           DCHECK_EQ(errno, ENOMEM);
    389         }
    390       }
    391 
    392       next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
    393 
    394       if (safe == true) {
    395         actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags,
    396                                  fd.get());
    397         if (actual != MAP_FAILED) {
    398             break;
    399         }
    400       } else {
    401         // Skip over last page.
    402         ptr = tail_ptr;
    403       }
    404     }
    405 
    406     if (actual == MAP_FAILED) {
    407       LOG(ERROR) << "Could not find contiguous low-memory space.";
    408       saved_errno = ENOMEM;
    409     }
    410   } else {
    411     actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
    412     saved_errno = errno;
    413   }
    414 
    415 #else
    416 #if defined(__LP64__)
    417   if (low_4gb && expected_ptr == nullptr) {
    418     flags |= MAP_32BIT;
    419   }
    420 #endif
    421 
    422   void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
    423   saved_errno = errno;
    424 #endif
    425 
    426   if (actual == MAP_FAILED) {
    427     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    428 
    429     *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process "
    430                               "maps in the log.", expected_ptr, page_aligned_byte_count, prot,
    431                               flags, fd.get(), strerror(saved_errno));
    432     return nullptr;
    433   }
    434   std::ostringstream check_map_request_error_msg;
    435   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    436     return nullptr;
    437   }
    438   return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
    439                     page_aligned_byte_count, prot, false);
    440 }
    441 
    442 MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
    443   if (byte_count == 0) {
    444     return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
    445   }
    446   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    447   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
    448 }
    449 
    450 MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
    451                                  int fd, off_t start, bool reuse, const char* filename,
    452                                  std::string* error_msg) {
    453   CHECK_NE(0, prot);
    454   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
    455 
    456   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
    457   // expect his mapping to be contained within an existing map.
    458   if (reuse) {
    459     // reuse means it is okay that it overlaps an existing page mapping.
    460     // Only use this if you actually made the page reservation yourself.
    461     CHECK(expected_ptr != nullptr);
    462 
    463     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
    464     flags |= MAP_FIXED;
    465   } else {
    466     CHECK_EQ(0, flags & MAP_FIXED);
    467     // Don't bother checking for an overlapping region here. We'll
    468     // check this if required after the fact inside CheckMapRequest.
    469   }
    470 
    471   if (byte_count == 0) {
    472     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
    473   }
    474   // Adjust 'offset' to be page-aligned as required by mmap.
    475   int page_offset = start % kPageSize;
    476   off_t page_aligned_offset = start - page_offset;
    477   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
    478   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
    479   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
    480   // not necessarily to virtual memory. mmap will page align 'expected' for us.
    481   uint8_t* page_aligned_expected =
    482       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
    483 
    484   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
    485                                               page_aligned_byte_count,
    486                                               prot,
    487                                               flags,
    488                                               fd,
    489                                               page_aligned_offset));
    490   if (actual == MAP_FAILED) {
    491     auto saved_errno = errno;
    492 
    493     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    494 
    495     *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
    496                               ") of file '%s' failed: %s. See process maps in the log.",
    497                               page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
    498                               static_cast<int64_t>(page_aligned_offset), filename,
    499                               strerror(saved_errno));
    500     return nullptr;
    501   }
    502   std::ostringstream check_map_request_error_msg;
    503   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    504     return nullptr;
    505   }
    506   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
    507                     prot, reuse);
    508 }
    509 
    510 MemMap::~MemMap() {
    511   if (base_begin_ == nullptr && base_size_ == 0) {
    512     return;
    513   }
    514   if (!reuse_) {
    515     int result = munmap(base_begin_, base_size_);
    516     if (result == -1) {
    517       PLOG(FATAL) << "munmap failed";
    518     }
    519   }
    520 
    521   // Remove it from maps_.
    522   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    523   bool found = false;
    524   DCHECK(maps_ != nullptr);
    525   for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
    526        it != end && it->first == base_begin_; ++it) {
    527     if (it->second == this) {
    528       found = true;
    529       maps_->erase(it);
    530       break;
    531     }
    532   }
    533   CHECK(found) << "MemMap not found";
    534 }
    535 
    536 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
    537                size_t base_size, int prot, bool reuse)
    538     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
    539       prot_(prot), reuse_(reuse) {
    540   if (size_ == 0) {
    541     CHECK(begin_ == nullptr);
    542     CHECK(base_begin_ == nullptr);
    543     CHECK_EQ(base_size_, 0U);
    544   } else {
    545     CHECK(begin_ != nullptr);
    546     CHECK(base_begin_ != nullptr);
    547     CHECK_NE(base_size_, 0U);
    548 
    549     // Add it to maps_.
    550     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    551     DCHECK(maps_ != nullptr);
    552     maps_->insert(std::make_pair(base_begin_, this));
    553   }
    554 }
    555 
    556 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
    557                            std::string* error_msg) {
    558   DCHECK_GE(new_end, Begin());
    559   DCHECK_LE(new_end, End());
    560   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
    561   DCHECK(IsAligned<kPageSize>(begin_));
    562   DCHECK(IsAligned<kPageSize>(base_begin_));
    563   DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_));
    564   DCHECK(IsAligned<kPageSize>(new_end));
    565   uint8_t* old_end = begin_ + size_;
    566   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
    567   uint8_t* new_base_end = new_end;
    568   DCHECK_LE(new_base_end, old_base_end);
    569   if (new_base_end == old_base_end) {
    570     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
    571   }
    572   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
    573   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
    574   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
    575   size_t tail_size = old_end - new_end;
    576   uint8_t* tail_base_begin = new_base_end;
    577   size_t tail_base_size = old_base_end - new_base_end;
    578   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
    579   DCHECK(IsAligned<kPageSize>(tail_base_size));
    580 
    581 #ifdef USE_ASHMEM
    582   // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    583   // prefixed "dalvik-".
    584   std::string debug_friendly_name("dalvik-");
    585   debug_friendly_name += tail_name;
    586   ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
    587   int flags = MAP_PRIVATE | MAP_FIXED;
    588   if (fd.get() == -1) {
    589     *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
    590                               tail_name, strerror(errno));
    591     return nullptr;
    592   }
    593 #else
    594   ScopedFd fd(-1);
    595   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    596 #endif
    597 
    598   // Unmap/map the tail region.
    599   int result = munmap(tail_base_begin, tail_base_size);
    600   if (result == -1) {
    601     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    602     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
    603                               tail_base_begin, tail_base_size, name_.c_str());
    604     return nullptr;
    605   }
    606   // Don't cause memory allocation between the munmap and the mmap
    607   // calls. Otherwise, libc (or something else) might take this memory
    608   // region. Note this isn't perfect as there's no way to prevent
    609   // other threads to try to take this memory region here.
    610   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
    611                                               flags, fd.get(), 0));
    612   if (actual == MAP_FAILED) {
    613     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
    614     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
    615                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
    616                               fd.get());
    617     return nullptr;
    618   }
    619   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
    620 }
    621 
    622 void MemMap::MadviseDontNeedAndZero() {
    623   if (base_begin_ != nullptr || base_size_ != 0) {
    624     if (!kMadviseZeroes) {
    625       memset(base_begin_, 0, base_size_);
    626     }
    627     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
    628     if (result == -1) {
    629       PLOG(WARNING) << "madvise failed";
    630     }
    631   }
    632 }
    633 
    634 bool MemMap::Protect(int prot) {
    635   if (base_begin_ == nullptr && base_size_ == 0) {
    636     prot_ = prot;
    637     return true;
    638   }
    639 
    640   if (mprotect(base_begin_, base_size_, prot) == 0) {
    641     prot_ = prot;
    642     return true;
    643   }
    644 
    645   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
    646               << prot << ") failed";
    647   return false;
    648 }
    649 
    650 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
    651   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    652   CHECK(begin_map != nullptr);
    653   CHECK(end_map != nullptr);
    654   CHECK(HasMemMap(begin_map));
    655   CHECK(HasMemMap(end_map));
    656   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
    657   MemMap* map = begin_map;
    658   while (map->BaseBegin() != end_map->BaseBegin()) {
    659     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
    660     if (next_map == nullptr) {
    661       // Found a gap.
    662       return false;
    663     }
    664     map = next_map;
    665   }
    666   return true;
    667 }
    668 
    669 void MemMap::DumpMaps(std::ostream& os, bool terse) {
    670   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    671   DumpMapsLocked(os, terse);
    672 }
    673 
    674 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
    675   const auto& mem_maps = *maps_;
    676   if (!terse) {
    677     os << mem_maps;
    678     return;
    679   }
    680 
    681   // Terse output example:
    682   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
    683   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
    684   // The details:
    685   //   "+0x20P" means 0x20 pages taken by a single mapping,
    686   //   "~0x11dP" means a gap of 0x11d pages,
    687   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
    688   os << "MemMap:" << std::endl;
    689   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
    690     MemMap* map = it->second;
    691     void* base = it->first;
    692     CHECK_EQ(base, map->BaseBegin());
    693     os << "[MemMap: " << base;
    694     ++it;
    695     // Merge consecutive maps with the same protect flags and name.
    696     constexpr size_t kMaxGaps = 9;
    697     size_t num_gaps = 0;
    698     size_t num = 1u;
    699     size_t size = map->BaseSize();
    700     CHECK(IsAligned<kPageSize>(size));
    701     void* end = map->BaseEnd();
    702     while (it != maps_end &&
    703         it->second->GetProtect() == map->GetProtect() &&
    704         it->second->GetName() == map->GetName() &&
    705         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
    706       if (it->second->BaseBegin() != end) {
    707         ++num_gaps;
    708         os << "+0x" << std::hex << (size / kPageSize) << "P";
    709         if (num != 1u) {
    710           os << "(" << std::dec << num << ")";
    711         }
    712         size_t gap =
    713             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
    714         CHECK(IsAligned<kPageSize>(gap));
    715         os << "~0x" << std::hex << (gap / kPageSize) << "P";
    716         num = 0u;
    717         size = 0u;
    718       }
    719       CHECK(IsAligned<kPageSize>(it->second->BaseSize()));
    720       ++num;
    721       size += it->second->BaseSize();
    722       end = it->second->BaseEnd();
    723       ++it;
    724     }
    725     os << "+0x" << std::hex << (size / kPageSize) << "P";
    726     if (num != 1u) {
    727       os << "(" << std::dec << num << ")";
    728     }
    729     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
    730   }
    731 }
    732 
    733 bool MemMap::HasMemMap(MemMap* map) {
    734   void* base_begin = map->BaseBegin();
    735   for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
    736        it != end && it->first == base_begin; ++it) {
    737     if (it->second == map) {
    738       return true;
    739     }
    740   }
    741   return false;
    742 }
    743 
    744 MemMap* MemMap::GetLargestMemMapAt(void* address) {
    745   size_t largest_size = 0;
    746   MemMap* largest_map = nullptr;
    747   DCHECK(maps_ != nullptr);
    748   for (auto it = maps_->lower_bound(address), end = maps_->end();
    749        it != end && it->first == address; ++it) {
    750     MemMap* map = it->second;
    751     CHECK(map != nullptr);
    752     if (largest_size < map->BaseSize()) {
    753       largest_size = map->BaseSize();
    754       largest_map = map;
    755     }
    756   }
    757   return largest_map;
    758 }
    759 
    760 void MemMap::Init() {
    761   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    762   if (maps_ == nullptr) {
    763     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
    764     maps_ = new Maps;
    765   }
    766 }
    767 
    768 void MemMap::Shutdown() {
    769   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    770   delete maps_;
    771   maps_ = nullptr;
    772 }
    773 
    774 void MemMap::SetSize(size_t new_size) {
    775   if (new_size == base_size_) {
    776     return;
    777   }
    778   CHECK_ALIGNED(new_size, kPageSize);
    779   CHECK_EQ(base_size_, size_) << "Unsupported";
    780   CHECK_LE(new_size, base_size_);
    781   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
    782                   base_size_ - new_size), 0) << new_size << " " << base_size_;
    783   base_size_ = new_size;
    784   size_ = new_size;
    785 }
    786 
    787 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
    788   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
    789                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
    790                      mem_map.GetName().c_str());
    791   return os;
    792 }
    793 
    794 }  // namespace art
    795