Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mem_map.h"
     18 #include "thread-inl.h"
     19 
     20 #include <inttypes.h>
     21 #include <backtrace/BacktraceMap.h>
     22 #include <memory>
     23 
     24 // See CreateStartPos below.
     25 #ifdef __BIONIC__
     26 #include <sys/auxv.h>
     27 #endif
     28 
     29 #include "base/stringprintf.h"
     30 #include "ScopedFd.h"
     31 #include "utils.h"
     32 
     33 #define USE_ASHMEM 1
     34 
     35 #ifdef USE_ASHMEM
     36 #include <cutils/ashmem.h>
     37 #ifndef ANDROID_OS
     38 #include <sys/resource.h>
     39 #endif
     40 #endif
     41 
     42 #ifndef MAP_ANONYMOUS
     43 #define MAP_ANONYMOUS MAP_ANON
     44 #endif
     45 
     46 namespace art {
     47 
     48 static std::ostream& operator<<(
     49     std::ostream& os,
     50     std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
     51   for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
     52     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
     53                        static_cast<uint32_t>(it->start),
     54                        static_cast<uint32_t>(it->end),
     55                        (it->flags & PROT_READ) ? 'r' : '-',
     56                        (it->flags & PROT_WRITE) ? 'w' : '-',
     57                        (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
     58   }
     59   return os;
     60 }
     61 
     62 std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
     63   os << "MemMap:" << std::endl;
     64   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
     65     void* base = it->first;
     66     MemMap* map = it->second;
     67     CHECK_EQ(base, map->BaseBegin());
     68     os << *map << std::endl;
     69   }
     70   return os;
     71 }
     72 
     73 MemMap::Maps* MemMap::maps_ = nullptr;
     74 
     75 #if USE_ART_LOW_4G_ALLOCATOR
     76 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
     77 
     78 // The regular start of memory allocations. The first 64KB is protected by SELinux.
     79 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
     80 
     81 // Generate random starting position.
     82 // To not interfere with image position, take the image's address and only place it below. Current
     83 // formula (sketch):
     84 //
     85 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
     86 // ----------------------------------------
     87 //                    = 0000111111111111111
     88 // & ~(kPageSize - 1) =~0000000000000001111
     89 // ----------------------------------------
     90 // mask               = 0000111111111110000
     91 // & random data      = YYYYYYYYYYYYYYYYYYY
     92 // -----------------------------------
     93 // tmp                = 0000YYYYYYYYYYY0000
     94 // + LOW_MEM_START    = 0000000000001000000
     95 // --------------------------------------
     96 // start
     97 //
     98 // getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we
     99 // do not have Bionic, simply start with LOW_MEM_START.
    100 
    101 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
    102 #ifdef __BIONIC__
    103 uintptr_t CreateStartPos(uint64_t input) {
    104   CHECK_NE(0, ART_BASE_ADDRESS);
    105 
    106   // Start with all bits below highest bit in ART_BASE_ADDRESS.
    107   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
    108   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
    109 
    110   // Lowest (usually 12) bits are not used, as aligned by page size.
    111   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
    112 
    113   // Mask input data.
    114   return (input & mask) + LOW_MEM_START;
    115 }
    116 #endif
    117 
    118 static uintptr_t GenerateNextMemPos() {
    119 #ifdef __BIONIC__
    120   uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM));
    121   // The lower 8B are taken for the stack guard. Use the upper 8B (with mask).
    122   return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8));
    123 #else
    124   // No auxv on host, see above.
    125   return LOW_MEM_START;
    126 #endif
    127 }
    128 
    129 // Initialize linear scan to random position.
    130 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
    131 #endif
    132 
    133 #if !defined(__APPLE__)  // TODO: Reanable after b/16861075 BacktraceMap issue is addressed.
    134 // Return true if the address range is contained in a single /proc/self/map entry.
    135 static bool ContainedWithinExistingMap(uintptr_t begin,
    136                                        uintptr_t end,
    137                                        std::string* error_msg) {
    138   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    139   if (map.get() == nullptr) {
    140     *error_msg = StringPrintf("Failed to build process map");
    141     return false;
    142   }
    143   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    144     if ((begin >= it->start && begin < it->end)  // start of new within old
    145         && (end > it->start && end <= it->end)) {  // end of new within old
    146       return true;
    147     }
    148   }
    149   std::string maps;
    150   ReadFileToString("/proc/self/maps", &maps);
    151   *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
    152                             "any existing map:\n%s\n",
    153                             begin, end, maps.c_str());
    154   return false;
    155 }
    156 #endif
    157 
    158 // Return true if the address range does not conflict with any /proc/self/maps entry.
    159 static bool CheckNonOverlapping(uintptr_t begin,
    160                                 uintptr_t end,
    161                                 std::string* error_msg) {
    162   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
    163   if (map.get() == nullptr) {
    164     *error_msg = StringPrintf("Failed to build process map");
    165     return false;
    166   }
    167   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
    168     if ((begin >= it->start && begin < it->end)      // start of new within old
    169         || (end > it->start && end < it->end)        // end of new within old
    170         || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
    171       std::ostringstream map_info;
    172       map_info << std::make_pair(it, map->end());
    173       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
    174                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
    175                                 begin, end,
    176                                 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
    177                                 it->name.c_str(),
    178                                 map_info.str().c_str());
    179       return false;
    180     }
    181   }
    182   return true;
    183 }
    184 
    185 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
    186 // the expected value, calling munmap if validation fails, giving the
    187 // reason in error_msg.
    188 //
    189 // If the expected_ptr is nullptr, nothing is checked beyond the fact
    190 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
    191 // non-null, we check that pointer is the actual_ptr == expected_ptr,
    192 // and if not, report in error_msg what the conflict mapping was if
    193 // found, or a generic error in other cases.
    194 static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
    195                             std::string* error_msg) {
    196   // Handled first by caller for more specific error messages.
    197   CHECK(actual_ptr != MAP_FAILED);
    198 
    199   if (expected_ptr == nullptr) {
    200     return true;
    201   }
    202 
    203   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
    204   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
    205   uintptr_t limit = expected + byte_count;
    206 
    207   if (expected_ptr == actual_ptr) {
    208     return true;
    209   }
    210 
    211   // We asked for an address but didn't get what we wanted, all paths below here should fail.
    212   int result = munmap(actual_ptr, byte_count);
    213   if (result == -1) {
    214     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
    215   }
    216 
    217   // We call this here so that we can try and generate a full error
    218   // message with the overlapping mapping. There's no guarantee that
    219   // that there will be an overlap though, since
    220   // - The kernel is not *required* to honour expected_ptr unless MAP_FIXED is
    221   //   true, even if there is no overlap
    222   // - There might have been an overlap at the point of mmap, but the
    223   //   overlapping region has since been unmapped.
    224   std::string error_detail;
    225   CheckNonOverlapping(expected, limit, &error_detail);
    226 
    227   std::ostringstream os;
    228   os <<  StringPrintf("Failed to mmap at expected address, mapped at "
    229                       "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
    230                       actual, expected);
    231   if (!error_detail.empty()) {
    232     os << " : " << error_detail;
    233   }
    234 
    235   *error_msg = os.str();
    236   return false;
    237 }
    238 
    239 MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
    240                              bool low_4gb, std::string* error_msg) {
    241   if (byte_count == 0) {
    242     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
    243   }
    244   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
    245 
    246   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    247   ScopedFd fd(-1);
    248 
    249 #ifdef USE_ASHMEM
    250 #ifdef HAVE_ANDROID_OS
    251   const bool use_ashmem = true;
    252 #else
    253   // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
    254   // fail due to ulimit restrictions. If they will then use a regular mmap.
    255   struct rlimit rlimit_fsize;
    256   CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
    257   const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
    258       (page_aligned_byte_count < rlimit_fsize.rlim_cur);
    259 #endif
    260   if (use_ashmem) {
    261     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    262     // prefixed "dalvik-".
    263     std::string debug_friendly_name("dalvik-");
    264     debug_friendly_name += name;
    265     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
    266     if (fd.get() == -1) {
    267       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
    268       return nullptr;
    269     }
    270     flags = MAP_PRIVATE;
    271   }
    272 #endif
    273 
    274   // We need to store and potentially set an error number for pretty printing of errors
    275   int saved_errno = 0;
    276 
    277 #ifdef __LP64__
    278   // When requesting low_4g memory and having an expectation, the requested range should fit into
    279   // 4GB.
    280   if (low_4gb && (
    281       // Start out of bounds.
    282       (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
    283       // End out of bounds. For simplicity, this will fail for the last page of memory.
    284       (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
    285     *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
    286                               expected_ptr, expected_ptr + page_aligned_byte_count);
    287     return nullptr;
    288   }
    289 #endif
    290 
    291   // TODO:
    292   // A page allocator would be a useful abstraction here, as
    293   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
    294   // 2) The linear scheme, even with simple saving of the last known position, is very crude
    295 #if USE_ART_LOW_4G_ALLOCATOR
    296   // MAP_32BIT only available on x86_64.
    297   void* actual = MAP_FAILED;
    298   if (low_4gb && expected_ptr == nullptr) {
    299     bool first_run = true;
    300 
    301     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
    302       if (4U * GB - ptr < page_aligned_byte_count) {
    303         // Not enough memory until 4GB.
    304         if (first_run) {
    305           // Try another time from the bottom;
    306           ptr = LOW_MEM_START - kPageSize;
    307           first_run = false;
    308           continue;
    309         } else {
    310           // Second try failed.
    311           break;
    312         }
    313       }
    314 
    315       uintptr_t tail_ptr;
    316 
    317       // Check pages are free.
    318       bool safe = true;
    319       for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
    320         if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
    321           safe = false;
    322           break;
    323         } else {
    324           DCHECK_EQ(errno, ENOMEM);
    325         }
    326       }
    327 
    328       next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
    329 
    330       if (safe == true) {
    331         actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
    332                       0);
    333         if (actual != MAP_FAILED) {
    334           // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
    335           // 4GB. If this is the case, unmap and retry.
    336           if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
    337             break;
    338           } else {
    339             munmap(actual, page_aligned_byte_count);
    340             actual = MAP_FAILED;
    341           }
    342         }
    343       } else {
    344         // Skip over last page.
    345         ptr = tail_ptr;
    346       }
    347     }
    348 
    349     if (actual == MAP_FAILED) {
    350       LOG(ERROR) << "Could not find contiguous low-memory space.";
    351       saved_errno = ENOMEM;
    352     }
    353   } else {
    354     actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
    355     saved_errno = errno;
    356   }
    357 
    358 #else
    359 #if defined(__LP64__)
    360   if (low_4gb && expected_ptr == nullptr) {
    361     flags |= MAP_32BIT;
    362   }
    363 #endif
    364 
    365   void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
    366   saved_errno = errno;
    367 #endif
    368 
    369   if (actual == MAP_FAILED) {
    370     std::string maps;
    371     ReadFileToString("/proc/self/maps", &maps);
    372 
    373     *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
    374                               expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
    375                               strerror(saved_errno), maps.c_str());
    376     return nullptr;
    377   }
    378   std::ostringstream check_map_request_error_msg;
    379   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    380     return nullptr;
    381   }
    382   return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
    383                     page_aligned_byte_count, prot, false);
    384 }
    385 
    386 MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
    387                                  off_t start, bool reuse, const char* filename,
    388                                  std::string* error_msg) {
    389   CHECK_NE(0, prot);
    390   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
    391 
    392   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
    393   // expect his mapping to be contained within an existing map.
    394   if (reuse) {
    395     // reuse means it is okay that it overlaps an existing page mapping.
    396     // Only use this if you actually made the page reservation yourself.
    397     CHECK(expected_ptr != nullptr);
    398 
    399 #if !defined(__APPLE__)  // TODO: Reanable after b/16861075 BacktraceMap issue is addressed.
    400     uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
    401     uintptr_t limit = expected + byte_count;
    402     DCHECK(ContainedWithinExistingMap(expected, limit, error_msg));
    403 #endif
    404     flags |= MAP_FIXED;
    405   } else {
    406     CHECK_EQ(0, flags & MAP_FIXED);
    407     // Don't bother checking for an overlapping region here. We'll
    408     // check this if required after the fact inside CheckMapRequest.
    409   }
    410 
    411   if (byte_count == 0) {
    412     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
    413   }
    414   // Adjust 'offset' to be page-aligned as required by mmap.
    415   int page_offset = start % kPageSize;
    416   off_t page_aligned_offset = start - page_offset;
    417   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
    418   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
    419   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
    420   // not necessarily to virtual memory. mmap will page align 'expected' for us.
    421   byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
    422 
    423   byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
    424                                               page_aligned_byte_count,
    425                                               prot,
    426                                               flags,
    427                                               fd,
    428                                               page_aligned_offset));
    429   if (actual == MAP_FAILED) {
    430     auto saved_errno = errno;
    431 
    432     std::string maps;
    433     ReadFileToString("/proc/self/maps", &maps);
    434 
    435     *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
    436                               ") of file '%s' failed: %s\n%s",
    437                               page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
    438                               static_cast<int64_t>(page_aligned_offset), filename,
    439                               strerror(saved_errno), maps.c_str());
    440     return nullptr;
    441   }
    442   std::ostringstream check_map_request_error_msg;
    443   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
    444     return nullptr;
    445   }
    446   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
    447                     prot, reuse);
    448 }
    449 
    450 MemMap::~MemMap() {
    451   if (base_begin_ == nullptr && base_size_ == 0) {
    452     return;
    453   }
    454   if (!reuse_) {
    455     int result = munmap(base_begin_, base_size_);
    456     if (result == -1) {
    457       PLOG(FATAL) << "munmap failed";
    458     }
    459   }
    460 
    461   // Remove it from maps_.
    462   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    463   bool found = false;
    464   DCHECK(maps_ != nullptr);
    465   for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
    466        it != end && it->first == base_begin_; ++it) {
    467     if (it->second == this) {
    468       found = true;
    469       maps_->erase(it);
    470       break;
    471     }
    472   }
    473   CHECK(found) << "MemMap not found";
    474 }
    475 
    476 MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
    477                size_t base_size, int prot, bool reuse)
    478     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
    479       prot_(prot), reuse_(reuse) {
    480   if (size_ == 0) {
    481     CHECK(begin_ == nullptr);
    482     CHECK(base_begin_ == nullptr);
    483     CHECK_EQ(base_size_, 0U);
    484   } else {
    485     CHECK(begin_ != nullptr);
    486     CHECK(base_begin_ != nullptr);
    487     CHECK_NE(base_size_, 0U);
    488 
    489     // Add it to maps_.
    490     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    491     DCHECK(maps_ != nullptr);
    492     maps_->insert(std::make_pair(base_begin_, this));
    493   }
    494 };
    495 
    496 MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
    497                            std::string* error_msg) {
    498   DCHECK_GE(new_end, Begin());
    499   DCHECK_LE(new_end, End());
    500   DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
    501   DCHECK(IsAligned<kPageSize>(begin_));
    502   DCHECK(IsAligned<kPageSize>(base_begin_));
    503   DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
    504   DCHECK(IsAligned<kPageSize>(new_end));
    505   byte* old_end = begin_ + size_;
    506   byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
    507   byte* new_base_end = new_end;
    508   DCHECK_LE(new_base_end, old_base_end);
    509   if (new_base_end == old_base_end) {
    510     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
    511   }
    512   size_ = new_end - reinterpret_cast<byte*>(begin_);
    513   base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
    514   DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
    515   size_t tail_size = old_end - new_end;
    516   byte* tail_base_begin = new_base_end;
    517   size_t tail_base_size = old_base_end - new_base_end;
    518   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
    519   DCHECK(IsAligned<kPageSize>(tail_base_size));
    520 
    521 #ifdef USE_ASHMEM
    522   // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
    523   // prefixed "dalvik-".
    524   std::string debug_friendly_name("dalvik-");
    525   debug_friendly_name += tail_name;
    526   ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
    527   int flags = MAP_PRIVATE | MAP_FIXED;
    528   if (fd.get() == -1) {
    529     *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
    530                               tail_name, strerror(errno));
    531     return nullptr;
    532   }
    533 #else
    534   ScopedFd fd(-1);
    535   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
    536 #endif
    537 
    538   // Unmap/map the tail region.
    539   int result = munmap(tail_base_begin, tail_base_size);
    540   if (result == -1) {
    541     std::string maps;
    542     ReadFileToString("/proc/self/maps", &maps);
    543     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
    544                               tail_base_begin, tail_base_size, name_.c_str(),
    545                               maps.c_str());
    546     return nullptr;
    547   }
    548   // Don't cause memory allocation between the munmap and the mmap
    549   // calls. Otherwise, libc (or something else) might take this memory
    550   // region. Note this isn't perfect as there's no way to prevent
    551   // other threads to try to take this memory region here.
    552   byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
    553                                               flags, fd.get(), 0));
    554   if (actual == MAP_FAILED) {
    555     std::string maps;
    556     ReadFileToString("/proc/self/maps", &maps);
    557     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
    558                               tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
    559                               maps.c_str());
    560     return nullptr;
    561   }
    562   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
    563 }
    564 
    565 void MemMap::MadviseDontNeedAndZero() {
    566   if (base_begin_ != nullptr || base_size_ != 0) {
    567     if (!kMadviseZeroes) {
    568       memset(base_begin_, 0, base_size_);
    569     }
    570     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
    571     if (result == -1) {
    572       PLOG(WARNING) << "madvise failed";
    573     }
    574   }
    575 }
    576 
    577 bool MemMap::Protect(int prot) {
    578   if (base_begin_ == nullptr && base_size_ == 0) {
    579     prot_ = prot;
    580     return true;
    581   }
    582 
    583   if (mprotect(base_begin_, base_size_, prot) == 0) {
    584     prot_ = prot;
    585     return true;
    586   }
    587 
    588   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
    589               << prot << ") failed";
    590   return false;
    591 }
    592 
    593 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
    594   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    595   CHECK(begin_map != nullptr);
    596   CHECK(end_map != nullptr);
    597   CHECK(HasMemMap(begin_map));
    598   CHECK(HasMemMap(end_map));
    599   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
    600   MemMap* map = begin_map;
    601   while (map->BaseBegin() != end_map->BaseBegin()) {
    602     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
    603     if (next_map == nullptr) {
    604       // Found a gap.
    605       return false;
    606     }
    607     map = next_map;
    608   }
    609   return true;
    610 }
    611 
    612 void MemMap::DumpMaps(std::ostream& os) {
    613   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    614   DumpMapsLocked(os);
    615 }
    616 
    617 void MemMap::DumpMapsLocked(std::ostream& os) {
    618   os << maps_;
    619 }
    620 
    621 bool MemMap::HasMemMap(MemMap* map) {
    622   void* base_begin = map->BaseBegin();
    623   for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
    624        it != end && it->first == base_begin; ++it) {
    625     if (it->second == map) {
    626       return true;
    627     }
    628   }
    629   return false;
    630 }
    631 
    632 MemMap* MemMap::GetLargestMemMapAt(void* address) {
    633   size_t largest_size = 0;
    634   MemMap* largest_map = nullptr;
    635   DCHECK(maps_ != nullptr);
    636   for (auto it = maps_->lower_bound(address), end = maps_->end();
    637        it != end && it->first == address; ++it) {
    638     MemMap* map = it->second;
    639     CHECK(map != nullptr);
    640     if (largest_size < map->BaseSize()) {
    641       largest_size = map->BaseSize();
    642       largest_map = map;
    643     }
    644   }
    645   return largest_map;
    646 }
    647 
    648 void MemMap::Init() {
    649   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    650   if (maps_ == nullptr) {
    651     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
    652     maps_ = new Maps;
    653   }
    654 }
    655 
    656 void MemMap::Shutdown() {
    657   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
    658   delete maps_;
    659   maps_ = nullptr;
    660 }
    661 
    662 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
    663   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
    664                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
    665                      mem_map.GetName().c_str());
    666   return os;
    667 }
    668 
    669 }  // namespace art
    670