Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_HEAP_INL_H_
     18 #define ART_RUNTIME_GC_HEAP_INL_H_
     19 
     20 #include "heap.h"
     21 
     22 #include "allocation_listener.h"
     23 #include "base/time_utils.h"
     24 #include "gc/accounting/card_table-inl.h"
     25 #include "gc/allocation_record.h"
     26 #include "gc/collector/semi_space.h"
     27 #include "gc/space/bump_pointer_space-inl.h"
     28 #include "gc/space/dlmalloc_space-inl.h"
     29 #include "gc/space/large_object_space.h"
     30 #include "gc/space/region_space-inl.h"
     31 #include "gc/space/rosalloc_space-inl.h"
     32 #include "obj_ptr-inl.h"
     33 #include "runtime.h"
     34 #include "handle_scope-inl.h"
     35 #include "thread-inl.h"
     36 #include "utils.h"
     37 #include "verify_object.h"
     38 
     39 namespace art {
     40 namespace gc {
     41 
     42 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
     43 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
     44                                                       ObjPtr<mirror::Class> klass,
     45                                                       size_t byte_count,
     46                                                       AllocatorType allocator,
     47                                                       const PreFenceVisitor& pre_fence_visitor) {
     48   if (kIsDebugBuild) {
     49     CheckPreconditionsForAllocObject(klass, byte_count);
     50     // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
     51     // done in the runnable state where suspension is expected.
     52     CHECK_EQ(self->GetState(), kRunnable);
     53     self->AssertThreadSuspensionIsAllowable();
     54     self->AssertNoPendingException();
     55     // Make sure to preserve klass.
     56     StackHandleScope<1> hs(self);
     57     HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
     58     self->PoisonObjectPointers();
     59   }
     60   // Need to check that we aren't the large object allocator since the large object allocation code
     61   // path includes this function. If we didn't check we would have an infinite loop.
     62   ObjPtr<mirror::Object> obj;
     63   if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
     64     obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
     65                                                            pre_fence_visitor);
     66     if (obj != nullptr) {
     67       return obj.Ptr();
     68     } else {
     69       // There should be an OOM exception, since we are retrying, clear it.
     70       self->ClearException();
     71     }
     72     // If the large object allocation failed, try to use the normal spaces (main space,
     73     // non moving space). This can happen if there is significant virtual address space
     74     // fragmentation.
     75   }
     76   // bytes allocated for the (individual) object.
     77   size_t bytes_allocated;
     78   size_t usable_size;
     79   size_t new_num_bytes_allocated = 0;
     80   if (IsTLABAllocator(allocator)) {
     81     byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
     82   }
     83   // If we have a thread local allocation we don't need to update bytes allocated.
     84   if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
     85     obj = self->AllocTlab(byte_count);
     86     DCHECK(obj != nullptr) << "AllocTlab can't fail";
     87     obj->SetClass(klass);
     88     if (kUseBakerReadBarrier) {
     89       obj->AssertReadBarrierState();
     90     }
     91     bytes_allocated = byte_count;
     92     usable_size = bytes_allocated;
     93     pre_fence_visitor(obj, usable_size);
     94     QuasiAtomic::ThreadFenceForConstructor();
     95   } else if (
     96       !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
     97       (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
     98       LIKELY(obj != nullptr)) {
     99     DCHECK(!is_running_on_memory_tool_);
    100     obj->SetClass(klass);
    101     if (kUseBakerReadBarrier) {
    102       obj->AssertReadBarrierState();
    103     }
    104     usable_size = bytes_allocated;
    105     pre_fence_visitor(obj, usable_size);
    106     QuasiAtomic::ThreadFenceForConstructor();
    107   } else {
    108     // bytes allocated that takes bulk thread-local buffer allocations into account.
    109     size_t bytes_tl_bulk_allocated = 0;
    110     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
    111                                               &usable_size, &bytes_tl_bulk_allocated);
    112     if (UNLIKELY(obj == nullptr)) {
    113       // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
    114       // or changes the allocator in a suspend point here, we need to retry the allocation.
    115       obj = AllocateInternalWithGc(self,
    116                                    allocator,
    117                                    kInstrumented,
    118                                    byte_count,
    119                                    &bytes_allocated,
    120                                    &usable_size,
    121                                    &bytes_tl_bulk_allocated, &klass);
    122       if (obj == nullptr) {
    123         // The only way that we can get a null return if there is no pending exception is if the
    124         // allocator or instrumentation changed.
    125         if (!self->IsExceptionPending()) {
    126           // AllocObject will pick up the new allocator type, and instrumented as true is the safe
    127           // default.
    128           return AllocObject</*kInstrumented*/true>(self,
    129                                                     klass,
    130                                                     byte_count,
    131                                                     pre_fence_visitor);
    132         }
    133         return nullptr;
    134       }
    135     }
    136     DCHECK_GT(bytes_allocated, 0u);
    137     DCHECK_GT(usable_size, 0u);
    138     obj->SetClass(klass);
    139     if (kUseBakerReadBarrier) {
    140       obj->AssertReadBarrierState();
    141     }
    142     if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
    143       // (Note this if statement will be constant folded away for the
    144       // fast-path quick entry points.) Because SetClass() has no write
    145       // barrier, if a non-moving space allocation, we need a write
    146       // barrier as the class pointer may point to the bump pointer
    147       // space (where the class pointer is an "old-to-young" reference,
    148       // though rare) under the GSS collector with the remembered set
    149       // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
    150       // cases because we don't directly allocate into the main alloc
    151       // space (besides promotions) under the SS/GSS collector.
    152       WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
    153     }
    154     pre_fence_visitor(obj, usable_size);
    155     QuasiAtomic::ThreadFenceForConstructor();
    156     new_num_bytes_allocated = static_cast<size_t>(
    157         num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated;
    158   }
    159   if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
    160     CHECK_LE(obj->SizeOf(), usable_size);
    161   }
    162   // TODO: Deprecate.
    163   if (kInstrumented) {
    164     if (Runtime::Current()->HasStatsEnabled()) {
    165       RuntimeStats* thread_stats = self->GetStats();
    166       ++thread_stats->allocated_objects;
    167       thread_stats->allocated_bytes += bytes_allocated;
    168       RuntimeStats* global_stats = Runtime::Current()->GetStats();
    169       ++global_stats->allocated_objects;
    170       global_stats->allocated_bytes += bytes_allocated;
    171     }
    172   } else {
    173     DCHECK(!Runtime::Current()->HasStatsEnabled());
    174   }
    175   if (kInstrumented) {
    176     if (IsAllocTrackingEnabled()) {
    177       // allocation_records_ is not null since it never becomes null after allocation tracking is
    178       // enabled.
    179       DCHECK(allocation_records_ != nullptr);
    180       allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
    181     }
    182     AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
    183     if (l != nullptr) {
    184       // Same as above. We assume that a listener that was once stored will never be deleted.
    185       // Otherwise we'd have to perform this under a lock.
    186       l->ObjectAllocated(self, &obj, bytes_allocated);
    187     }
    188   } else {
    189     DCHECK(!IsAllocTrackingEnabled());
    190   }
    191   if (AllocatorHasAllocationStack(allocator)) {
    192     PushOnAllocationStack(self, &obj);
    193   }
    194   if (kInstrumented) {
    195     if (gc_stress_mode_) {
    196       CheckGcStressMode(self, &obj);
    197     }
    198   } else {
    199     DCHECK(!gc_stress_mode_);
    200   }
    201   // IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for
    202   // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
    203   // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
    204   // the allocator_type should be constant propagated.
    205   if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
    206     CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
    207   }
    208   VerifyObject(obj);
    209   self->VerifyStack();
    210   return obj.Ptr();
    211 }
    212 
    213 // The size of a thread-local allocation stack in the number of references.
    214 static constexpr size_t kThreadLocalAllocationStackSize = 128;
    215 
    216 inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
    217   if (kUseThreadLocalAllocationStack) {
    218     if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
    219       PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
    220     }
    221   } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
    222     PushOnAllocationStackWithInternalGC(self, obj);
    223   }
    224 }
    225 
    226 template <bool kInstrumented, typename PreFenceVisitor>
    227 inline mirror::Object* Heap::AllocLargeObject(Thread* self,
    228                                               ObjPtr<mirror::Class>* klass,
    229                                               size_t byte_count,
    230                                               const PreFenceVisitor& pre_fence_visitor) {
    231   // Save and restore the class in case it moves.
    232   StackHandleScope<1> hs(self);
    233   auto klass_wrapper = hs.NewHandleWrapper(klass);
    234   return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
    235                                                                          kAllocatorTypeLOS,
    236                                                                          pre_fence_visitor);
    237 }
    238 
    239 template <const bool kInstrumented, const bool kGrow>
    240 inline mirror::Object* Heap::TryToAllocate(Thread* self,
    241                                            AllocatorType allocator_type,
    242                                            size_t alloc_size,
    243                                            size_t* bytes_allocated,
    244                                            size_t* usable_size,
    245                                            size_t* bytes_tl_bulk_allocated) {
    246   if (allocator_type != kAllocatorTypeTLAB &&
    247       allocator_type != kAllocatorTypeRegionTLAB &&
    248       allocator_type != kAllocatorTypeRosAlloc &&
    249       UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
    250     return nullptr;
    251   }
    252   mirror::Object* ret;
    253   switch (allocator_type) {
    254     case kAllocatorTypeBumpPointer: {
    255       DCHECK(bump_pointer_space_ != nullptr);
    256       alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
    257       ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
    258       if (LIKELY(ret != nullptr)) {
    259         *bytes_allocated = alloc_size;
    260         *usable_size = alloc_size;
    261         *bytes_tl_bulk_allocated = alloc_size;
    262       }
    263       break;
    264     }
    265     case kAllocatorTypeRosAlloc: {
    266       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    267         // If running on valgrind or asan, we should be using the instrumented path.
    268         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
    269         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    270                                                max_bytes_tl_bulk_allocated,
    271                                                kGrow))) {
    272           return nullptr;
    273         }
    274         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
    275                                      bytes_tl_bulk_allocated);
    276       } else {
    277         DCHECK(!is_running_on_memory_tool_);
    278         size_t max_bytes_tl_bulk_allocated =
    279             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
    280         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    281                                                max_bytes_tl_bulk_allocated,
    282                                                kGrow))) {
    283           return nullptr;
    284         }
    285         if (!kInstrumented) {
    286           DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
    287         }
    288         ret = rosalloc_space_->AllocNonvirtual(self,
    289                                                alloc_size,
    290                                                bytes_allocated,
    291                                                usable_size,
    292                                                bytes_tl_bulk_allocated);
    293       }
    294       break;
    295     }
    296     case kAllocatorTypeDlMalloc: {
    297       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    298         // If running on valgrind, we should be using the instrumented path.
    299         ret = dlmalloc_space_->Alloc(self,
    300                                      alloc_size,
    301                                      bytes_allocated,
    302                                      usable_size,
    303                                      bytes_tl_bulk_allocated);
    304       } else {
    305         DCHECK(!is_running_on_memory_tool_);
    306         ret = dlmalloc_space_->AllocNonvirtual(self,
    307                                                alloc_size,
    308                                                bytes_allocated,
    309                                                usable_size,
    310                                                bytes_tl_bulk_allocated);
    311       }
    312       break;
    313     }
    314     case kAllocatorTypeNonMoving: {
    315       ret = non_moving_space_->Alloc(self,
    316                                      alloc_size,
    317                                      bytes_allocated,
    318                                      usable_size,
    319                                      bytes_tl_bulk_allocated);
    320       break;
    321     }
    322     case kAllocatorTypeLOS: {
    323       ret = large_object_space_->Alloc(self,
    324                                        alloc_size,
    325                                        bytes_allocated,
    326                                        usable_size,
    327                                        bytes_tl_bulk_allocated);
    328       // Note that the bump pointer spaces aren't necessarily next to
    329       // the other continuous spaces like the non-moving alloc space or
    330       // the zygote space.
    331       DCHECK(ret == nullptr || large_object_space_->Contains(ret));
    332       break;
    333     }
    334     case kAllocatorTypeRegion: {
    335       DCHECK(region_space_ != nullptr);
    336       alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
    337       ret = region_space_->AllocNonvirtual<false>(alloc_size,
    338                                                   bytes_allocated,
    339                                                   usable_size,
    340                                                   bytes_tl_bulk_allocated);
    341       break;
    342     }
    343     case kAllocatorTypeTLAB:
    344       FALLTHROUGH_INTENDED;
    345     case kAllocatorTypeRegionTLAB: {
    346       DCHECK_ALIGNED(alloc_size, kObjectAlignment);
    347       static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
    348                     "mismatched alignments");
    349       static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
    350                     "mismatched alignments");
    351       if (UNLIKELY(self->TlabSize() < alloc_size)) {
    352         // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
    353         // that is why the allocator is not passed down.
    354         return AllocWithNewTLAB(self,
    355                                 alloc_size,
    356                                 kGrow,
    357                                 bytes_allocated,
    358                                 usable_size,
    359                                 bytes_tl_bulk_allocated);
    360       }
    361       // The allocation can't fail.
    362       ret = self->AllocTlab(alloc_size);
    363       DCHECK(ret != nullptr);
    364       *bytes_allocated = alloc_size;
    365       *bytes_tl_bulk_allocated = 0;  // Allocated in an existing buffer.
    366       *usable_size = alloc_size;
    367       break;
    368     }
    369     default: {
    370       LOG(FATAL) << "Invalid allocator type";
    371       ret = nullptr;
    372     }
    373   }
    374   return ret;
    375 }
    376 
    377 inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
    378   // We need to have a zygote space or else our newly allocated large object can end up in the
    379   // Zygote resulting in it being prematurely freed.
    380   // We can only do this for primitive objects since large objects will not be within the card table
    381   // range. This also means that we rely on SetClass not dirtying the object's card.
    382   return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
    383 }
    384 
    385 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
    386                                             size_t alloc_size,
    387                                             bool grow) {
    388   size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
    389   if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
    390     if (UNLIKELY(new_footprint > growth_limit_)) {
    391       return true;
    392     }
    393     if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
    394       if (!grow) {
    395         return true;
    396       }
    397       // TODO: Grow for allocation is racy, fix it.
    398       VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
    399           << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
    400       max_allowed_footprint_ = new_footprint;
    401     }
    402   }
    403   return false;
    404 }
    405 
    406 inline void Heap::CheckConcurrentGC(Thread* self,
    407                                     size_t new_num_bytes_allocated,
    408                                     ObjPtr<mirror::Object>* obj) {
    409   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
    410     RequestConcurrentGCAndSaveObject(self, false, obj);
    411   }
    412 }
    413 
    414 inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
    415                                     MemberOffset offset ATTRIBUTE_UNUSED,
    416                                     ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
    417   card_table_->MarkCard(dst.Ptr());
    418 }
    419 
    420 inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
    421                                     int start_offset ATTRIBUTE_UNUSED,
    422                                     size_t length ATTRIBUTE_UNUSED) {
    423   card_table_->MarkCard(dst.Ptr());
    424 }
    425 
    426 inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
    427   card_table_->MarkCard(obj.Ptr());
    428 }
    429 
    430 }  // namespace gc
    431 }  // namespace art
    432 
    433 #endif  // ART_RUNTIME_GC_HEAP_INL_H_
    434