Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
     18 #define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
     19 
     20 #include <stdint.h>
     21 #include <memory>
     22 
     23 #include "common_runtime_test.h"
     24 #include "globals.h"
     25 #include "mirror/array-inl.h"
     26 #include "mirror/class-inl.h"
     27 #include "mirror/class_loader.h"
     28 #include "mirror/object-inl.h"
     29 #include "scoped_thread_state_change.h"
     30 #include "thread_list.h"
     31 #include "zygote_space.h"
     32 
     33 namespace art {
     34 namespace gc {
     35 namespace space {
     36 
     37 template <class Super>
     38 class SpaceTest : public Super {
     39  public:
     40   jobject byte_array_class_ = nullptr;
     41 
     42   void AddSpace(ContinuousSpace* space, bool revoke = true) {
     43     Heap* heap = Runtime::Current()->GetHeap();
     44     if (revoke) {
     45       heap->RevokeAllThreadLocalBuffers();
     46     }
     47     {
     48       ScopedThreadStateChange sts(Thread::Current(), kSuspended);
     49       ScopedSuspendAll ssa("Add image space");
     50       heap->AddSpace(space);
     51     }
     52     heap->SetSpaceAsDefault(space);
     53   }
     54 
     55   mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
     56     StackHandleScope<1> hs(self);
     57     auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
     58     if (byte_array_class_ == nullptr) {
     59       mirror::Class* byte_array_class =
     60           Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
     61       EXPECT_TRUE(byte_array_class != nullptr);
     62       byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
     63       EXPECT_TRUE(byte_array_class_ != nullptr);
     64     }
     65     return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
     66   }
     67 
     68   mirror::Object* Alloc(space::MallocSpace* alloc_space,
     69                         Thread* self,
     70                         size_t bytes,
     71                         size_t* bytes_allocated,
     72                         size_t* usable_size,
     73                         size_t* bytes_tl_bulk_allocated)
     74       SHARED_REQUIRES(Locks::mutator_lock_) {
     75     StackHandleScope<1> hs(self);
     76     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     77     mirror::Object* obj = alloc_space->Alloc(self,
     78                                              bytes,
     79                                              bytes_allocated,
     80                                              usable_size,
     81                                              bytes_tl_bulk_allocated);
     82     if (obj != nullptr) {
     83       InstallClass(obj, byte_array_class.Get(), bytes);
     84     }
     85     return obj;
     86   }
     87 
     88   mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space,
     89                                   Thread* self,
     90                                   size_t bytes,
     91                                   size_t* bytes_allocated,
     92                                   size_t* usable_size,
     93                                   size_t* bytes_tl_bulk_allocated)
     94       SHARED_REQUIRES(Locks::mutator_lock_) {
     95     StackHandleScope<1> hs(self);
     96     Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     97     mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
     98                                                        bytes_tl_bulk_allocated);
     99     if (obj != nullptr) {
    100       InstallClass(obj, byte_array_class.Get(), bytes);
    101     }
    102     return obj;
    103   }
    104 
    105   void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
    106       SHARED_REQUIRES(Locks::mutator_lock_) {
    107     // Note the minimum size, which is the size of a zero-length byte array.
    108     EXPECT_GE(size, SizeOfZeroLengthByteArray());
    109     EXPECT_TRUE(byte_array_class != nullptr);
    110     o->SetClass(byte_array_class);
    111     if (kUseBakerOrBrooksReadBarrier) {
    112       // Like the proper heap object allocation, install and verify
    113       // the correct read barrier pointer.
    114       if (kUseBrooksReadBarrier) {
    115         o->SetReadBarrierPointer(o);
    116       }
    117       o->AssertReadBarrierPointer();
    118     }
    119     mirror::Array* arr = o->AsArray<kVerifyNone>();
    120     size_t header_size = SizeOfZeroLengthByteArray();
    121     int32_t length = size - header_size;
    122     arr->SetLength(length);
    123     EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
    124   }
    125 
    126   static size_t SizeOfZeroLengthByteArray() {
    127     return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
    128   }
    129 
    130   typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
    131                                         size_t capacity, uint8_t* requested_begin);
    132 
    133   void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
    134                                            int round, size_t growth_limit);
    135   void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
    136 };
    137 
    138 static inline size_t test_rand(size_t* seed) {
    139   *seed = *seed * 1103515245 + 12345;
    140   return *seed;
    141 }
    142 
    143 template <class Super>
    144 void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space,
    145                                                            intptr_t object_size,
    146                                                            int round,
    147                                                            size_t growth_limit) {
    148   if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
    149       ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
    150     // No allocation can succeed
    151     return;
    152   }
    153 
    154   // The space's footprint equals amount of resources requested from system
    155   size_t footprint = space->GetFootprint();
    156 
    157   // The space must at least have its book keeping allocated
    158   EXPECT_GT(footprint, 0u);
    159 
    160   // But it shouldn't exceed the initial size
    161   EXPECT_LE(footprint, growth_limit);
    162 
    163   // space's size shouldn't exceed the initial size
    164   EXPECT_LE(space->Size(), growth_limit);
    165 
    166   // this invariant should always hold or else the space has grown to be larger than what the
    167   // space believes its size is (which will break invariants)
    168   EXPECT_GE(space->Size(), footprint);
    169 
    170   // Fill the space with lots of small objects up to the growth limit
    171   size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
    172   std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
    173   size_t last_object = 0;  // last object for which allocation succeeded
    174   size_t amount_allocated = 0;  // amount of space allocated
    175   Thread* self = Thread::Current();
    176   ScopedObjectAccess soa(self);
    177   size_t rand_seed = 123456789;
    178   for (size_t i = 0; i < max_objects; i++) {
    179     size_t alloc_fails = 0;  // number of failed allocations
    180     size_t max_fails = 30;  // number of times we fail allocation before giving up
    181     for (; alloc_fails < max_fails; alloc_fails++) {
    182       size_t alloc_size;
    183       if (object_size > 0) {
    184         alloc_size = object_size;
    185       } else {
    186         alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
    187         // Note the minimum size, which is the size of a zero-length byte array.
    188         size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
    189         if (alloc_size < size_of_zero_length_byte_array) {
    190           alloc_size = size_of_zero_length_byte_array;
    191         }
    192       }
    193       StackHandleScope<1> hs(soa.Self());
    194       auto object(hs.NewHandle<mirror::Object>(nullptr));
    195       size_t bytes_allocated = 0;
    196       size_t bytes_tl_bulk_allocated;
    197       if (round <= 1) {
    198         object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
    199                             &bytes_tl_bulk_allocated));
    200       } else {
    201         object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
    202                                       &bytes_tl_bulk_allocated));
    203       }
    204       footprint = space->GetFootprint();
    205       EXPECT_GE(space->Size(), footprint);  // invariant
    206       if (object.Get() != nullptr) {  // allocation succeeded
    207         lots_of_objects[i] = object.Get();
    208         size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
    209         EXPECT_EQ(bytes_allocated, allocation_size);
    210         if (object_size > 0) {
    211           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
    212         } else {
    213           EXPECT_GE(allocation_size, 8u);
    214         }
    215         EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
    216                     bytes_tl_bulk_allocated >= allocation_size);
    217         amount_allocated += allocation_size;
    218         break;
    219       }
    220     }
    221     if (alloc_fails == max_fails) {
    222       last_object = i;
    223       break;
    224     }
    225   }
    226   CHECK_NE(last_object, 0u);  // we should have filled the space
    227   EXPECT_GT(amount_allocated, 0u);
    228 
    229   // We shouldn't have gone past the growth_limit
    230   EXPECT_LE(amount_allocated, growth_limit);
    231   EXPECT_LE(footprint, growth_limit);
    232   EXPECT_LE(space->Size(), growth_limit);
    233 
    234   // footprint and size should agree with amount allocated
    235   EXPECT_GE(footprint, amount_allocated);
    236   EXPECT_GE(space->Size(), amount_allocated);
    237 
    238   // Release storage in a semi-adhoc manner
    239   size_t free_increment = 96;
    240   while (true) {
    241     {
    242       ScopedThreadStateChange tsc(self, kNative);
    243       // Give the space a haircut.
    244       space->Trim();
    245     }
    246 
    247     // Bounds sanity
    248     footprint = space->GetFootprint();
    249     EXPECT_LE(amount_allocated, growth_limit);
    250     EXPECT_GE(footprint, amount_allocated);
    251     EXPECT_LE(footprint, growth_limit);
    252     EXPECT_GE(space->Size(), amount_allocated);
    253     EXPECT_LE(space->Size(), growth_limit);
    254 
    255     if (free_increment == 0) {
    256       break;
    257     }
    258 
    259     // Free some objects
    260     for (size_t i = 0; i < last_object; i += free_increment) {
    261       mirror::Object* object = lots_of_objects.get()[i];
    262       if (object == nullptr) {
    263         continue;
    264       }
    265       size_t allocation_size = space->AllocationSize(object, nullptr);
    266       if (object_size > 0) {
    267         EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
    268       } else {
    269         EXPECT_GE(allocation_size, 8u);
    270       }
    271       space->Free(self, object);
    272       lots_of_objects.get()[i] = nullptr;
    273       amount_allocated -= allocation_size;
    274       footprint = space->GetFootprint();
    275       EXPECT_GE(space->Size(), footprint);  // invariant
    276     }
    277 
    278     free_increment >>= 1;
    279   }
    280 
    281   // The space has become empty here before allocating a large object
    282   // below. For RosAlloc, revoke thread-local runs, which are kept
    283   // even when empty for a performance reason, so that they won't
    284   // cause the following large object allocation to fail due to
    285   // potential fragmentation. Note they are normally revoked at each
    286   // GC (but no GC here.)
    287   space->RevokeAllThreadLocalBuffers();
    288 
    289   // All memory was released, try a large allocation to check freed memory is being coalesced
    290   StackHandleScope<1> hs(soa.Self());
    291   auto large_object(hs.NewHandle<mirror::Object>(nullptr));
    292   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
    293   size_t bytes_allocated = 0;
    294   size_t bytes_tl_bulk_allocated;
    295   if (round <= 1) {
    296     large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
    297                               &bytes_tl_bulk_allocated));
    298   } else {
    299     large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
    300                                         nullptr, &bytes_tl_bulk_allocated));
    301   }
    302   EXPECT_TRUE(large_object.Get() != nullptr);
    303 
    304   // Sanity check footprint
    305   footprint = space->GetFootprint();
    306   EXPECT_LE(footprint, growth_limit);
    307   EXPECT_GE(space->Size(), footprint);
    308   EXPECT_LE(space->Size(), growth_limit);
    309 
    310   // Clean up
    311   space->Free(self, large_object.Assign(nullptr));
    312 
    313   // Sanity check footprint
    314   footprint = space->GetFootprint();
    315   EXPECT_LE(footprint, growth_limit);
    316   EXPECT_GE(space->Size(), footprint);
    317   EXPECT_LE(space->Size(), growth_limit);
    318 }
    319 
    320 template <class Super>
    321 void SpaceTest<Super>::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,
    322                                                              CreateSpaceFn create_space) {
    323   if (object_size < SizeOfZeroLengthByteArray()) {
    324     // Too small for the object layout/model.
    325     return;
    326   }
    327   size_t initial_size = 4 * MB;
    328   size_t growth_limit = 8 * MB;
    329   size_t capacity = 16 * MB;
    330   MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
    331   ASSERT_TRUE(space != nullptr);
    332 
    333   // Basic sanity
    334   EXPECT_EQ(space->Capacity(), growth_limit);
    335   EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
    336 
    337   // Make space findable to the heap, will also delete space when runtime is cleaned up
    338   AddSpace(space);
    339 
    340   // In this round we don't allocate with growth and therefore can't grow past the initial size.
    341   // This effectively makes the growth_limit the initial_size, so assert this.
    342   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
    343   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
    344   // Remove growth limit
    345   space->ClearGrowthLimit();
    346   EXPECT_EQ(space->Capacity(), capacity);
    347   SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
    348 }
    349 
    350 #define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
    351   TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
    352     SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
    353   }
    354 
    355 #define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
    356   TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
    357     SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
    358   }
    359 
    360 #define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
    361   class spaceName##StaticTest : public SpaceTest<CommonRuntimeTest> { \
    362   }; \
    363   \
    364   TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
    365   TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
    366   TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
    367   TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
    368   TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
    369   TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
    370   TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
    371   TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
    372   TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
    373   TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
    374   TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
    375 
    376 #define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
    377   class spaceName##RandomTest : public SpaceTest<CommonRuntimeTest> { \
    378   }; \
    379   \
    380   TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
    381   TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
    382   TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
    383   TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
    384   TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
    385   TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
    386   TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
    387   TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
    388   TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
    389   TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
    390 
    391 }  // namespace space
    392 }  // namespace gc
    393 }  // namespace art
    394 
    395 #endif  // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
    396