Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mem_map.h"
     18 
     19 #include <sys/mman.h>
     20 
     21 #include <memory>
     22 #include <random>
     23 
     24 #include "base/memory_tool.h"
     25 #include "base/unix_file/fd_file.h"
     26 #include "common_runtime_test.h"
     27 
     28 namespace art {
     29 
     30 class MemMapTest : public CommonRuntimeTest {
     31  public:
     32   static uint8_t* BaseBegin(MemMap* mem_map) {
     33     return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
     34   }
     35 
     36   static size_t BaseSize(MemMap* mem_map) {
     37     return mem_map->base_size_;
     38   }
     39 
     40   static bool IsAddressMapped(void* addr) {
     41     bool res = msync(addr, 1, MS_SYNC) == 0;
     42     if (!res && errno != ENOMEM) {
     43       PLOG(FATAL) << "Unexpected error occurred on msync";
     44     }
     45     return res;
     46   }
     47 
     48   static std::vector<uint8_t> RandomData(size_t size) {
     49     std::random_device rd;
     50     std::uniform_int_distribution<uint8_t> dist;
     51     std::vector<uint8_t> res;
     52     res.resize(size);
     53     for (size_t i = 0; i < size; i++) {
     54       res[i] = dist(rd);
     55     }
     56     return res;
     57   }
     58 
     59   static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
     60     // Find a valid map address and unmap it before returning.
     61     std::string error_msg;
     62     std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
     63                                                      nullptr,
     64                                                      size,
     65                                                      PROT_READ,
     66                                                      low_4gb,
     67                                                      false,
     68                                                      &error_msg));
     69     CHECK(map != nullptr);
     70     return map->Begin();
     71   }
     72 
     73   static void RemapAtEndTest(bool low_4gb) {
     74     std::string error_msg;
     75     // Cast the page size to size_t.
     76     const size_t page_size = static_cast<size_t>(kPageSize);
     77     // Map a two-page memory region.
     78     MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
     79                                       nullptr,
     80                                       2 * page_size,
     81                                       PROT_READ | PROT_WRITE,
     82                                       low_4gb,
     83                                       false,
     84                                       &error_msg);
     85     // Check its state and write to it.
     86     uint8_t* base0 = m0->Begin();
     87     ASSERT_TRUE(base0 != nullptr) << error_msg;
     88     size_t size0 = m0->Size();
     89     EXPECT_EQ(m0->Size(), 2 * page_size);
     90     EXPECT_EQ(BaseBegin(m0), base0);
     91     EXPECT_EQ(BaseSize(m0), size0);
     92     memset(base0, 42, 2 * page_size);
     93     // Remap the latter half into a second MemMap.
     94     MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
     95                                 "MemMapTest_RemapAtEndTest_map1",
     96                                 PROT_READ | PROT_WRITE,
     97                                 &error_msg);
     98     // Check the states of the two maps.
     99     EXPECT_EQ(m0->Begin(), base0) << error_msg;
    100     EXPECT_EQ(m0->Size(), page_size);
    101     EXPECT_EQ(BaseBegin(m0), base0);
    102     EXPECT_EQ(BaseSize(m0), page_size);
    103     uint8_t* base1 = m1->Begin();
    104     size_t size1 = m1->Size();
    105     EXPECT_EQ(base1, base0 + page_size);
    106     EXPECT_EQ(size1, page_size);
    107     EXPECT_EQ(BaseBegin(m1), base1);
    108     EXPECT_EQ(BaseSize(m1), size1);
    109     // Write to the second region.
    110     memset(base1, 43, page_size);
    111     // Check the contents of the two regions.
    112     for (size_t i = 0; i < page_size; ++i) {
    113       EXPECT_EQ(base0[i], 42);
    114     }
    115     for (size_t i = 0; i < page_size; ++i) {
    116       EXPECT_EQ(base1[i], 43);
    117     }
    118     // Unmap the first region.
    119     delete m0;
    120     // Make sure the second region is still accessible after the first
    121     // region is unmapped.
    122     for (size_t i = 0; i < page_size; ++i) {
    123       EXPECT_EQ(base1[i], 43);
    124     }
    125     delete m1;
    126   }
    127 
    128   void CommonInit() {
    129     MemMap::Init();
    130   }
    131 
    132 #if defined(__LP64__) && !defined(__x86_64__)
    133   static uintptr_t GetLinearScanPos() {
    134     return MemMap::next_mem_pos_;
    135   }
    136 #endif
    137 };
    138 
    139 #if defined(__LP64__) && !defined(__x86_64__)
    140 
    141 #ifdef __BIONIC__
    142 extern uintptr_t CreateStartPos(uint64_t input);
    143 #endif
    144 
    145 TEST_F(MemMapTest, Start) {
    146   CommonInit();
    147   uintptr_t start = GetLinearScanPos();
    148   EXPECT_LE(64 * KB, start);
    149   EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
    150 #ifdef __BIONIC__
    151   // Test a couple of values. Make sure they are different.
    152   uintptr_t last = 0;
    153   for (size_t i = 0; i < 100; ++i) {
    154     uintptr_t random_start = CreateStartPos(i * kPageSize);
    155     EXPECT_NE(last, random_start);
    156     last = random_start;
    157   }
    158 
    159   // Even on max, should be below ART_BASE_ADDRESS.
    160   EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
    161 #endif
    162   // End of test.
    163 }
    164 #endif
    165 
    166 // We need mremap to be able to test ReplaceMapping at all
    167 #if HAVE_MREMAP_SYSCALL
    168 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
    169   std::string error_msg;
    170   std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
    171                                                     nullptr,
    172                                                     kPageSize,
    173                                                     PROT_READ,
    174                                                     false,
    175                                                     false,
    176                                                     &error_msg));
    177   ASSERT_TRUE(dest != nullptr);
    178   MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
    179                                         nullptr,
    180                                         kPageSize,
    181                                         PROT_WRITE | PROT_READ,
    182                                         false,
    183                                         false,
    184                                         &error_msg);
    185   ASSERT_TRUE(source != nullptr);
    186   void* source_addr = source->Begin();
    187   void* dest_addr = dest->Begin();
    188   ASSERT_TRUE(IsAddressMapped(source_addr));
    189   ASSERT_TRUE(IsAddressMapped(dest_addr));
    190 
    191   std::vector<uint8_t> data = RandomData(kPageSize);
    192   memcpy(source->Begin(), data.data(), data.size());
    193 
    194   ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
    195 
    196   ASSERT_FALSE(IsAddressMapped(source_addr));
    197   ASSERT_TRUE(IsAddressMapped(dest_addr));
    198   ASSERT_TRUE(source == nullptr);
    199 
    200   ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
    201 
    202   ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
    203 }
    204 
    205 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
    206   std::string error_msg;
    207   std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
    208                                                     nullptr,
    209                                                     5 * kPageSize,  // Need to make it larger
    210                                                                     // initially so we know
    211                                                                     // there won't be mappings
    212                                                                     // in the way we we move
    213                                                                     // source.
    214                                                     PROT_READ,
    215                                                     false,
    216                                                     false,
    217                                                     &error_msg));
    218   ASSERT_TRUE(dest != nullptr);
    219   MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
    220                                         nullptr,
    221                                         3 * kPageSize,
    222                                         PROT_WRITE | PROT_READ,
    223                                         false,
    224                                         false,
    225                                         &error_msg);
    226   ASSERT_TRUE(source != nullptr);
    227   uint8_t* source_addr = source->Begin();
    228   uint8_t* dest_addr = dest->Begin();
    229   ASSERT_TRUE(IsAddressMapped(source_addr));
    230 
    231   // Fill the source with random data.
    232   std::vector<uint8_t> data = RandomData(3 * kPageSize);
    233   memcpy(source->Begin(), data.data(), data.size());
    234 
    235   // Make the dest smaller so that we know we'll have space.
    236   dest->SetSize(kPageSize);
    237 
    238   ASSERT_TRUE(IsAddressMapped(dest_addr));
    239   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
    240   ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
    241 
    242   ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
    243 
    244   ASSERT_FALSE(IsAddressMapped(source_addr));
    245   ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
    246   ASSERT_TRUE(IsAddressMapped(dest_addr));
    247   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
    248   ASSERT_TRUE(source == nullptr);
    249 
    250   ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
    251 }
    252 
    253 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
    254   std::string error_msg;
    255   std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
    256                                                     nullptr,
    257                                                     3 * kPageSize,
    258                                                     PROT_READ,
    259                                                     false,
    260                                                     false,
    261                                                     &error_msg));
    262   ASSERT_TRUE(dest != nullptr);
    263   MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
    264                                         nullptr,
    265                                         kPageSize,
    266                                         PROT_WRITE | PROT_READ,
    267                                         false,
    268                                         false,
    269                                         &error_msg);
    270   ASSERT_TRUE(source != nullptr);
    271   uint8_t* source_addr = source->Begin();
    272   uint8_t* dest_addr = dest->Begin();
    273   ASSERT_TRUE(IsAddressMapped(source_addr));
    274   ASSERT_TRUE(IsAddressMapped(dest_addr));
    275   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
    276   ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
    277 
    278   std::vector<uint8_t> data = RandomData(kPageSize);
    279   memcpy(source->Begin(), data.data(), kPageSize);
    280 
    281   ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
    282 
    283   ASSERT_FALSE(IsAddressMapped(source_addr));
    284   ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
    285   ASSERT_TRUE(IsAddressMapped(dest_addr));
    286   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
    287   ASSERT_TRUE(source == nullptr);
    288 
    289   ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
    290 }
    291 
    292 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
    293   std::string error_msg;
    294   std::unique_ptr<MemMap> dest(
    295       MemMap::MapAnonymous(
    296           "MapAnonymousEmpty-atomic-replace-dest",
    297           nullptr,
    298           3 * kPageSize,  // Need to make it larger initially so we know there won't be mappings in
    299                           // the way we we move source.
    300           PROT_READ | PROT_WRITE,
    301           false,
    302           false,
    303           &error_msg));
    304   ASSERT_TRUE(dest != nullptr);
    305   // Resize down to 1 page so we can remap the rest.
    306   dest->SetSize(kPageSize);
    307   // Create source from the last 2 pages
    308   MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
    309                                         dest->Begin() + kPageSize,
    310                                         2 * kPageSize,
    311                                         PROT_WRITE | PROT_READ,
    312                                         false,
    313                                         false,
    314                                         &error_msg);
    315   ASSERT_TRUE(source != nullptr);
    316   MemMap* orig_source = source;
    317   ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
    318   uint8_t* source_addr = source->Begin();
    319   uint8_t* dest_addr = dest->Begin();
    320   ASSERT_TRUE(IsAddressMapped(source_addr));
    321 
    322   // Fill the source and dest with random data.
    323   std::vector<uint8_t> data = RandomData(2 * kPageSize);
    324   memcpy(source->Begin(), data.data(), data.size());
    325   std::vector<uint8_t> dest_data = RandomData(kPageSize);
    326   memcpy(dest->Begin(), dest_data.data(), dest_data.size());
    327 
    328   ASSERT_TRUE(IsAddressMapped(dest_addr));
    329   ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
    330 
    331   ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
    332 
    333   ASSERT_TRUE(source == orig_source);
    334   ASSERT_TRUE(IsAddressMapped(source_addr));
    335   ASSERT_TRUE(IsAddressMapped(dest_addr));
    336   ASSERT_EQ(source->Size(), data.size());
    337   ASSERT_EQ(dest->Size(), dest_data.size());
    338 
    339   ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
    340   ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
    341 
    342   delete source;
    343 }
    344 #endif  // HAVE_MREMAP_SYSCALL
    345 
    346 TEST_F(MemMapTest, MapAnonymousEmpty) {
    347   CommonInit();
    348   std::string error_msg;
    349   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
    350                                                    nullptr,
    351                                                    0,
    352                                                    PROT_READ,
    353                                                    false,
    354                                                    false,
    355                                                    &error_msg));
    356   ASSERT_TRUE(map.get() != nullptr) << error_msg;
    357   ASSERT_TRUE(error_msg.empty());
    358   map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
    359                                  nullptr,
    360                                  kPageSize,
    361                                  PROT_READ | PROT_WRITE,
    362                                  false,
    363                                  false,
    364                                  &error_msg));
    365   ASSERT_TRUE(map.get() != nullptr) << error_msg;
    366   ASSERT_TRUE(error_msg.empty());
    367 }
    368 
    369 TEST_F(MemMapTest, MapAnonymousFailNullError) {
    370   CommonInit();
    371   // Test that we don't crash with a null error_str when mapping at an invalid location.
    372   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
    373                                                     reinterpret_cast<uint8_t*>(kPageSize),
    374                                                     0x20000,
    375                                                     PROT_READ | PROT_WRITE,
    376                                                     false,
    377                                                     false,
    378                                                     nullptr));
    379   ASSERT_EQ(nullptr, map.get());
    380 }
    381 
    382 #ifdef __LP64__
    383 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
    384   CommonInit();
    385   std::string error_msg;
    386   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
    387                                                    nullptr,
    388                                                    kPageSize,
    389                                                    PROT_READ | PROT_WRITE,
    390                                                    true,
    391                                                    false,
    392                                                    &error_msg));
    393   ASSERT_TRUE(map.get() != nullptr) << error_msg;
    394   ASSERT_TRUE(error_msg.empty());
    395   ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
    396 }
    397 TEST_F(MemMapTest, MapFile32Bit) {
    398   CommonInit();
    399   std::string error_msg;
    400   ScratchFile scratch_file;
    401   constexpr size_t kMapSize = kPageSize;
    402   std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
    403   ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
    404   std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
    405                                               PROT_READ,
    406                                               MAP_PRIVATE,
    407                                               scratch_file.GetFd(),
    408                                               /*start*/0,
    409                                               /*low_4gb*/true,
    410                                               scratch_file.GetFilename().c_str(),
    411                                               &error_msg));
    412   ASSERT_TRUE(map != nullptr) << error_msg;
    413   ASSERT_TRUE(error_msg.empty());
    414   ASSERT_EQ(map->Size(), kMapSize);
    415   ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
    416 }
    417 #endif
    418 
    419 TEST_F(MemMapTest, MapAnonymousExactAddr) {
    420   CommonInit();
    421   std::string error_msg;
    422   // Find a valid address.
    423   uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
    424   // Map at an address that should work, which should succeed.
    425   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
    426                                                     valid_address,
    427                                                     kPageSize,
    428                                                     PROT_READ | PROT_WRITE,
    429                                                     false,
    430                                                     false,
    431                                                     &error_msg));
    432   ASSERT_TRUE(map0.get() != nullptr) << error_msg;
    433   ASSERT_TRUE(error_msg.empty());
    434   ASSERT_TRUE(map0->BaseBegin() == valid_address);
    435   // Map at an unspecified address, which should succeed.
    436   std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
    437                                                     nullptr,
    438                                                     kPageSize,
    439                                                     PROT_READ | PROT_WRITE,
    440                                                     false,
    441                                                     false,
    442                                                     &error_msg));
    443   ASSERT_TRUE(map1.get() != nullptr) << error_msg;
    444   ASSERT_TRUE(error_msg.empty());
    445   ASSERT_TRUE(map1->BaseBegin() != nullptr);
    446   // Attempt to map at the same address, which should fail.
    447   std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
    448                                                     reinterpret_cast<uint8_t*>(map1->BaseBegin()),
    449                                                     kPageSize,
    450                                                     PROT_READ | PROT_WRITE,
    451                                                     false,
    452                                                     false,
    453                                                     &error_msg));
    454   ASSERT_TRUE(map2.get() == nullptr) << error_msg;
    455   ASSERT_TRUE(!error_msg.empty());
    456 }
    457 
    458 TEST_F(MemMapTest, RemapAtEnd) {
    459   RemapAtEndTest(false);
    460 }
    461 
    462 #ifdef __LP64__
    463 TEST_F(MemMapTest, RemapAtEnd32bit) {
    464   RemapAtEndTest(true);
    465 }
    466 #endif
    467 
    468 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
    469   // Some MIPS32 hardware (namely the Creator Ci20 development board)
    470   // cannot allocate in the 2GB-4GB region.
    471   TEST_DISABLED_FOR_MIPS();
    472 
    473   CommonInit();
    474   // This test may not work under valgrind.
    475   if (RUNNING_ON_MEMORY_TOOL == 0) {
    476     constexpr size_t size = 0x100000;
    477     // Try all addresses starting from 2GB to 4GB.
    478     size_t start_addr = 2 * GB;
    479     std::string error_msg;
    480     std::unique_ptr<MemMap> map;
    481     for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
    482       map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
    483                                      reinterpret_cast<uint8_t*>(start_addr),
    484                                      size,
    485                                      PROT_READ | PROT_WRITE,
    486                                      /*low_4gb*/true,
    487                                      false,
    488                                      &error_msg));
    489       if (map != nullptr) {
    490         break;
    491       }
    492     }
    493     ASSERT_TRUE(map.get() != nullptr) << error_msg;
    494     ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
    495     ASSERT_TRUE(error_msg.empty());
    496     ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
    497   }
    498 }
    499 
    500 TEST_F(MemMapTest, MapAnonymousOverflow) {
    501   CommonInit();
    502   std::string error_msg;
    503   uintptr_t ptr = 0;
    504   ptr -= kPageSize;  // Now it's close to the top.
    505   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
    506                                                    reinterpret_cast<uint8_t*>(ptr),
    507                                                    2 * kPageSize,  // brings it over the top.
    508                                                    PROT_READ | PROT_WRITE,
    509                                                    false,
    510                                                    false,
    511                                                    &error_msg));
    512   ASSERT_EQ(nullptr, map.get());
    513   ASSERT_FALSE(error_msg.empty());
    514 }
    515 
    516 #ifdef __LP64__
    517 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
    518   CommonInit();
    519   std::string error_msg;
    520   std::unique_ptr<MemMap> map(
    521       MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
    522                            reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
    523                            kPageSize,
    524                            PROT_READ | PROT_WRITE,
    525                            true,
    526                            false,
    527                            &error_msg));
    528   ASSERT_EQ(nullptr, map.get());
    529   ASSERT_FALSE(error_msg.empty());
    530 }
    531 
    532 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
    533   CommonInit();
    534   std::string error_msg;
    535   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
    536                                                    reinterpret_cast<uint8_t*>(0xF0000000),
    537                                                    0x20000000,
    538                                                    PROT_READ | PROT_WRITE,
    539                                                    true,
    540                                                    false,
    541                                                    &error_msg));
    542   ASSERT_EQ(nullptr, map.get());
    543   ASSERT_FALSE(error_msg.empty());
    544 }
    545 #endif
    546 
    547 TEST_F(MemMapTest, MapAnonymousReuse) {
    548   CommonInit();
    549   std::string error_msg;
    550   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
    551                                                    nullptr,
    552                                                    0x20000,
    553                                                    PROT_READ | PROT_WRITE,
    554                                                    false,
    555                                                    false,
    556                                                    &error_msg));
    557   ASSERT_NE(nullptr, map.get());
    558   ASSERT_TRUE(error_msg.empty());
    559   std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
    560                                                     reinterpret_cast<uint8_t*>(map->BaseBegin()),
    561                                                     0x10000,
    562                                                     PROT_READ | PROT_WRITE,
    563                                                     false,
    564                                                     true,
    565                                                     &error_msg));
    566   ASSERT_NE(nullptr, map2.get());
    567   ASSERT_TRUE(error_msg.empty());
    568 }
    569 
    570 TEST_F(MemMapTest, CheckNoGaps) {
    571   CommonInit();
    572   std::string error_msg;
    573   constexpr size_t kNumPages = 3;
    574   // Map a 3-page mem map.
    575   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
    576                                                    nullptr,
    577                                                    kPageSize * kNumPages,
    578                                                    PROT_READ | PROT_WRITE,
    579                                                    false,
    580                                                    false,
    581                                                    &error_msg));
    582   ASSERT_TRUE(map.get() != nullptr) << error_msg;
    583   ASSERT_TRUE(error_msg.empty());
    584   // Record the base address.
    585   uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
    586   // Unmap it.
    587   map.reset();
    588 
    589   // Map at the same address, but in page-sized separate mem maps,
    590   // assuming the space at the address is still available.
    591   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
    592                                                     map_base,
    593                                                     kPageSize,
    594                                                     PROT_READ | PROT_WRITE,
    595                                                     false,
    596                                                     false,
    597                                                     &error_msg));
    598   ASSERT_TRUE(map0.get() != nullptr) << error_msg;
    599   ASSERT_TRUE(error_msg.empty());
    600   std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
    601                                                     map_base + kPageSize,
    602                                                     kPageSize,
    603                                                     PROT_READ | PROT_WRITE,
    604                                                     false,
    605                                                     false,
    606                                                     &error_msg));
    607   ASSERT_TRUE(map1.get() != nullptr) << error_msg;
    608   ASSERT_TRUE(error_msg.empty());
    609   std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
    610                                                     map_base + kPageSize * 2,
    611                                                     kPageSize,
    612                                                     PROT_READ | PROT_WRITE,
    613                                                     false,
    614                                                     false,
    615                                                     &error_msg));
    616   ASSERT_TRUE(map2.get() != nullptr) << error_msg;
    617   ASSERT_TRUE(error_msg.empty());
    618 
    619   // One-map cases.
    620   ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
    621   ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
    622   ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
    623 
    624   // Two or three-map cases.
    625   ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
    626   ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
    627   ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
    628 
    629   // Unmap the middle one.
    630   map1.reset();
    631 
    632   // Should return false now that there's a gap in the middle.
    633   ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
    634 }
    635 
    636 TEST_F(MemMapTest, AlignBy) {
    637   CommonInit();
    638   std::string error_msg;
    639   // Cast the page size to size_t.
    640   const size_t page_size = static_cast<size_t>(kPageSize);
    641   // Map a region.
    642   std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
    643                                                   nullptr,
    644                                                   14 * page_size,
    645                                                   PROT_READ | PROT_WRITE,
    646                                                   false,
    647                                                   false,
    648                                                   &error_msg));
    649   uint8_t* base0 = m0->Begin();
    650   ASSERT_TRUE(base0 != nullptr) << error_msg;
    651   ASSERT_EQ(m0->Size(), 14 * page_size);
    652   ASSERT_EQ(BaseBegin(m0.get()), base0);
    653   ASSERT_EQ(BaseSize(m0.get()), m0->Size());
    654 
    655   // Break it into several regions by using RemapAtEnd.
    656   std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
    657                                             "MemMapTest_AlignByTest_map1",
    658                                             PROT_READ | PROT_WRITE,
    659                                             &error_msg));
    660   uint8_t* base1 = m1->Begin();
    661   ASSERT_TRUE(base1 != nullptr) << error_msg;
    662   ASSERT_EQ(base1, base0 + 3 * page_size);
    663   ASSERT_EQ(m0->Size(), 3 * page_size);
    664 
    665   std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
    666                                             "MemMapTest_AlignByTest_map2",
    667                                             PROT_READ | PROT_WRITE,
    668                                             &error_msg));
    669   uint8_t* base2 = m2->Begin();
    670   ASSERT_TRUE(base2 != nullptr) << error_msg;
    671   ASSERT_EQ(base2, base1 + 4 * page_size);
    672   ASSERT_EQ(m1->Size(), 4 * page_size);
    673 
    674   std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
    675                                             "MemMapTest_AlignByTest_map1",
    676                                             PROT_READ | PROT_WRITE,
    677                                             &error_msg));
    678   uint8_t* base3 = m3->Begin();
    679   ASSERT_TRUE(base3 != nullptr) << error_msg;
    680   ASSERT_EQ(base3, base2 + 3 * page_size);
    681   ASSERT_EQ(m2->Size(), 3 * page_size);
    682   ASSERT_EQ(m3->Size(), 4 * page_size);
    683 
    684   uint8_t* end0 = base0 + m0->Size();
    685   uint8_t* end1 = base1 + m1->Size();
    686   uint8_t* end2 = base2 + m2->Size();
    687   uint8_t* end3 = base3 + m3->Size();
    688 
    689   ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
    690 
    691   if (IsAlignedParam(base0, 2 * page_size)) {
    692     ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
    693     ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
    694     ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
    695     ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
    696   } else {
    697     ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
    698     ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
    699     ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
    700     ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
    701   }
    702 
    703   // Align by 2 * page_size;
    704   m0->AlignBy(2 * page_size);
    705   m1->AlignBy(2 * page_size);
    706   m2->AlignBy(2 * page_size);
    707   m3->AlignBy(2 * page_size);
    708 
    709   EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
    710   EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
    711   EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
    712   EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
    713 
    714   EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
    715   EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
    716   EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
    717   EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
    718 
    719   if (IsAlignedParam(base0, 2 * page_size)) {
    720     EXPECT_EQ(m0->Begin(), base0);
    721     EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
    722     EXPECT_EQ(m1->Begin(), base1 + page_size);
    723     EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
    724     EXPECT_EQ(m2->Begin(), base2 + page_size);
    725     EXPECT_EQ(m2->Begin() + m2->Size(), end2);
    726     EXPECT_EQ(m3->Begin(), base3);
    727     EXPECT_EQ(m3->Begin() + m3->Size(), end3);
    728   } else {
    729     EXPECT_EQ(m0->Begin(), base0 + page_size);
    730     EXPECT_EQ(m0->Begin() + m0->Size(), end0);
    731     EXPECT_EQ(m1->Begin(), base1);
    732     EXPECT_EQ(m1->Begin() + m1->Size(), end1);
    733     EXPECT_EQ(m2->Begin(), base2);
    734     EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
    735     EXPECT_EQ(m3->Begin(), base3 + page_size);
    736     EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
    737   }
    738 }
    739 
    740 }  // namespace art
    741