1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <sys/mman.h> 18 19 #include "common_runtime_test.h" 20 #include "gc/collector/immune_spaces.h" 21 #include "gc/space/image_space.h" 22 #include "gc/space/space-inl.h" 23 #include "oat_file.h" 24 #include "thread-current-inl.h" 25 26 namespace art { 27 namespace mirror { 28 class Object; 29 } // namespace mirror 30 namespace gc { 31 namespace collector { 32 33 class DummyOatFile : public OatFile { 34 public: 35 DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) { 36 begin_ = begin; 37 end_ = end; 38 } 39 }; 40 41 class DummyImageSpace : public space::ImageSpace { 42 public: 43 DummyImageSpace(MemMap* map, 44 accounting::ContinuousSpaceBitmap* live_bitmap, 45 std::unique_ptr<DummyOatFile>&& oat_file, 46 std::unique_ptr<MemMap>&& oat_map) 47 : ImageSpace("DummyImageSpace", 48 /*image_location*/"", 49 map, 50 live_bitmap, 51 map->End()), 52 oat_map_(std::move(oat_map)) { 53 oat_file_ = std::move(oat_file); 54 oat_file_non_owned_ = oat_file_.get(); 55 } 56 57 private: 58 std::unique_ptr<MemMap> oat_map_; 59 }; 60 61 class ImmuneSpacesTest : public CommonRuntimeTest { 62 static constexpr size_t kMaxBitmaps = 10; 63 64 public: 65 ImmuneSpacesTest() {} 66 67 void ReserveBitmaps() { 68 // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps 69 // do not need to cover the image spaces though. 70 for (size_t i = 0; i < kMaxBitmaps; ++i) { 71 std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap( 72 accounting::ContinuousSpaceBitmap::Create("bitmap", 73 reinterpret_cast<uint8_t*>(kPageSize), 74 kPageSize)); 75 CHECK(bitmap != nullptr); 76 live_bitmaps_.push_back(std::move(bitmap)); 77 } 78 } 79 80 // Create an image space, the oat file is optional. 81 DummyImageSpace* CreateImageSpace(uint8_t* image_begin, 82 size_t image_size, 83 uint8_t* oat_begin, 84 size_t oat_size) { 85 std::string error_str; 86 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace", 87 image_begin, 88 image_size, 89 PROT_READ | PROT_WRITE, 90 /*low_4gb*/true, 91 /*reuse*/false, 92 &error_str)); 93 if (map == nullptr) { 94 LOG(ERROR) << error_str; 95 return nullptr; 96 } 97 CHECK(!live_bitmaps_.empty()); 98 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back())); 99 live_bitmaps_.pop_back(); 100 std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap", 101 oat_begin, 102 oat_size, 103 PROT_READ | PROT_WRITE, 104 /*low_4gb*/true, 105 /*reuse*/false, 106 &error_str)); 107 if (oat_map == nullptr) { 108 LOG(ERROR) << error_str; 109 return nullptr; 110 } 111 std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End())); 112 // Create image header. 113 ImageSection sections[ImageHeader::kSectionCount]; 114 new (map->Begin()) ImageHeader( 115 /*image_begin*/PointerToLowMemUInt32(map->Begin()), 116 /*image_size*/map->Size(), 117 sections, 118 /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1, 119 /*oat_checksum*/0u, 120 // The oat file data in the header is always right after the image space. 121 /*oat_file_begin*/PointerToLowMemUInt32(oat_begin), 122 /*oat_data_begin*/PointerToLowMemUInt32(oat_begin), 123 /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size), 124 /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size), 125 /*boot_image_begin*/0u, 126 /*boot_image_size*/0u, 127 /*boot_oat_begin*/0u, 128 /*boot_oat_size*/0u, 129 /*pointer_size*/sizeof(void*), 130 /*compile_pic*/false, 131 /*is_pic*/false, 132 ImageHeader::kStorageModeUncompressed, 133 /*storage_size*/0u); 134 return new DummyImageSpace(map.release(), 135 live_bitmap.release(), 136 std::move(oat_file), 137 std::move(oat_map)); 138 } 139 140 // Does not reserve the memory, the caller needs to be sure no other threads will map at the 141 // returned address. 142 static uint8_t* GetContinuousMemoryRegion(size_t size) { 143 std::string error_str; 144 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve", 145 nullptr, 146 size, 147 PROT_READ | PROT_WRITE, 148 /*low_4gb*/true, 149 /*reuse*/false, 150 &error_str)); 151 if (map == nullptr) { 152 LOG(ERROR) << "Failed to allocate memory region " << error_str; 153 return nullptr; 154 } 155 return map->Begin(); 156 } 157 158 private: 159 // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want 160 // them to randomly get placed somewhere where we want an image space. 161 std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_; 162 }; 163 164 class DummySpace : public space::ContinuousSpace { 165 public: 166 DummySpace(uint8_t* begin, uint8_t* end) 167 : ContinuousSpace("DummySpace", 168 space::kGcRetentionPolicyNeverCollect, 169 begin, 170 end, 171 /*limit*/end) {} 172 173 space::SpaceType GetType() const OVERRIDE { 174 return space::kSpaceTypeMallocSpace; 175 } 176 177 bool CanMoveObjects() const OVERRIDE { 178 return false; 179 } 180 181 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE { 182 return nullptr; 183 } 184 185 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE { 186 return nullptr; 187 } 188 }; 189 190 TEST_F(ImmuneSpacesTest, AppendBasic) { 191 ImmuneSpaces spaces; 192 uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000); 193 DummySpace a(base, base + 45 * KB); 194 DummySpace b(a.Limit(), a.Limit() + 813 * KB); 195 { 196 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 197 spaces.AddSpace(&a); 198 spaces.AddSpace(&b); 199 } 200 EXPECT_TRUE(spaces.ContainsSpace(&a)); 201 EXPECT_TRUE(spaces.ContainsSpace(&b)); 202 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin()); 203 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit()); 204 } 205 206 // Tests [image][oat][space] producing a single large immune region. 207 TEST_F(ImmuneSpacesTest, AppendAfterImage) { 208 ReserveBitmaps(); 209 ImmuneSpaces spaces; 210 constexpr size_t kImageSize = 123 * kPageSize; 211 constexpr size_t kImageOatSize = 321 * kPageSize; 212 constexpr size_t kOtherSpaceSize= 100 * kPageSize; 213 214 uint8_t* memory = GetContinuousMemoryRegion(kImageSize + kImageOatSize + kOtherSpaceSize); 215 216 std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(memory, 217 kImageSize, 218 memory + kImageSize, 219 kImageOatSize)); 220 ASSERT_TRUE(image_space != nullptr); 221 const ImageHeader& image_header = image_space->GetImageHeader(); 222 DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize); 223 224 EXPECT_EQ(image_header.GetImageSize(), kImageSize); 225 EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()), 226 kImageOatSize); 227 EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize); 228 // Check that we do not include the oat if there is no space after. 229 { 230 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 231 spaces.AddSpace(image_space.get()); 232 } 233 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), 234 image_space->Begin()); 235 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), 236 image_space->Limit()); 237 // Add another space and ensure it gets appended. 238 EXPECT_NE(image_space->Limit(), space.Begin()); 239 { 240 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 241 spaces.AddSpace(&space); 242 } 243 EXPECT_TRUE(spaces.ContainsSpace(image_space.get())); 244 EXPECT_TRUE(spaces.ContainsSpace(&space)); 245 // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the 246 // image prevents gaps. 247 // Check that we have a continuous region. 248 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), 249 image_space->Begin()); 250 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit()); 251 } 252 253 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region. 254 TEST_F(ImmuneSpacesTest, MultiImage) { 255 ReserveBitmaps(); 256 // Image 2 needs to be smaller or else it may be chosen for immune region. 257 constexpr size_t kImage1Size = kPageSize * 17; 258 constexpr size_t kImage2Size = kPageSize * 13; 259 constexpr size_t kImage3Size = kPageSize * 3; 260 constexpr size_t kImage1OatSize = kPageSize * 5; 261 constexpr size_t kImage2OatSize = kPageSize * 8; 262 constexpr size_t kImage3OatSize = kPageSize; 263 constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size; 264 constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize; 265 uint8_t* memory = GetContinuousMemoryRegion(kMemorySize); 266 uint8_t* space1_begin = memory; 267 memory += kImage1Size; 268 uint8_t* space2_begin = memory; 269 memory += kImage2Size; 270 uint8_t* space1_oat_begin = memory; 271 memory += kImage1OatSize; 272 uint8_t* space2_oat_begin = memory; 273 memory += kImage2OatSize; 274 uint8_t* space3_begin = memory; 275 276 std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(space1_begin, 277 kImage1Size, 278 space1_oat_begin, 279 kImage1OatSize)); 280 ASSERT_TRUE(space1 != nullptr); 281 282 283 std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(space2_begin, 284 kImage2Size, 285 space2_oat_begin, 286 kImage2OatSize)); 287 ASSERT_TRUE(space2 != nullptr); 288 289 // Finally put a 3rd image space. 290 std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(space3_begin, 291 kImage3Size, 292 space3_begin + kImage3Size, 293 kImage3OatSize)); 294 ASSERT_TRUE(space3 != nullptr); 295 296 // Check that we do not include the oat if there is no space after. 297 ImmuneSpaces spaces; 298 { 299 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 300 LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin()); 301 spaces.AddSpace(space1.get()); 302 LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin()); 303 spaces.AddSpace(space2.get()); 304 } 305 // There are no more heap bytes, the immune region should only be the first 2 image spaces and 306 // should exclude the image oat files. 307 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), 308 space1->Begin()); 309 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), 310 space2->Limit()); 311 312 // Add another space after the oat files, now it should contain the entire memory region. 313 { 314 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 315 LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin()); 316 spaces.AddSpace(space3.get()); 317 } 318 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), 319 space1->Begin()); 320 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), 321 space3->Limit()); 322 323 // Add a smaller non-adjacent space and ensure it does not become part of the immune region. 324 // Image size is kImageBytes - kPageSize 325 // Oat size is kPageSize. 326 // Guard pages to ensure it is not adjacent to an existing immune region. 327 // Layout: [guard page][image][oat][guard page] 328 constexpr size_t kGuardSize = kPageSize; 329 constexpr size_t kImage4Size = kImageBytes - kPageSize; 330 constexpr size_t kImage4OatSize = kPageSize; 331 uint8_t* memory2 = GetContinuousMemoryRegion(kImage4Size + kImage4OatSize + kGuardSize * 2); 332 std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(memory2 + kGuardSize, 333 kImage4Size, 334 memory2 + kGuardSize + kImage4Size, 335 kImage4OatSize)); 336 ASSERT_TRUE(space4 != nullptr); 337 { 338 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 339 LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin()); 340 spaces.AddSpace(space4.get()); 341 } 342 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), 343 space1->Begin()); 344 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), 345 space3->Limit()); 346 347 // Add a larger non-adjacent space and ensure it becomes the new largest immune region. 348 // Image size is kImageBytes + kPageSize 349 // Oat size is kPageSize. 350 // Guard pages to ensure it is not adjacent to an existing immune region. 351 // Layout: [guard page][image][oat][guard page] 352 constexpr size_t kImage5Size = kImageBytes + kPageSize; 353 constexpr size_t kImage5OatSize = kPageSize; 354 uint8_t* memory3 = GetContinuousMemoryRegion(kImage5Size + kImage5OatSize + kGuardSize * 2); 355 std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(memory3 + kGuardSize, 356 kImage5Size, 357 memory3 + kGuardSize + kImage5Size, 358 kImage5OatSize)); 359 ASSERT_TRUE(space5 != nullptr); 360 { 361 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 362 LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin()); 363 spaces.AddSpace(space5.get()); 364 } 365 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin()); 366 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit()); 367 } 368 369 } // namespace collector 370 } // namespace gc 371 } // namespace art 372