Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_HEAP_INL_H_
     18 #define ART_RUNTIME_GC_HEAP_INL_H_
     19 
     20 #include "heap.h"
     21 
     22 #include "allocation_listener.h"
     23 #include "base/time_utils.h"
     24 #include "gc/accounting/atomic_stack.h"
     25 #include "gc/accounting/card_table-inl.h"
     26 #include "gc/allocation_record.h"
     27 #include "gc/collector/semi_space.h"
     28 #include "gc/space/bump_pointer_space-inl.h"
     29 #include "gc/space/dlmalloc_space-inl.h"
     30 #include "gc/space/large_object_space.h"
     31 #include "gc/space/region_space-inl.h"
     32 #include "gc/space/rosalloc_space-inl.h"
     33 #include "obj_ptr-inl.h"
     34 #include "runtime.h"
     35 #include "handle_scope-inl.h"
     36 #include "thread-inl.h"
     37 #include "utils.h"
     38 #include "verify_object.h"
     39 
     40 namespace art {
     41 namespace gc {
     42 
     43 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
     44 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
     45                                                       ObjPtr<mirror::Class> klass,
     46                                                       size_t byte_count,
     47                                                       AllocatorType allocator,
     48                                                       const PreFenceVisitor& pre_fence_visitor) {
     49   if (kIsDebugBuild) {
     50     CheckPreconditionsForAllocObject(klass, byte_count);
     51     // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
     52     // done in the runnable state where suspension is expected.
     53     CHECK_EQ(self->GetState(), kRunnable);
     54     self->AssertThreadSuspensionIsAllowable();
     55     self->AssertNoPendingException();
     56     // Make sure to preserve klass.
     57     StackHandleScope<1> hs(self);
     58     HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
     59     self->PoisonObjectPointers();
     60   }
     61   // Need to check that we aren't the large object allocator since the large object allocation code
     62   // path includes this function. If we didn't check we would have an infinite loop.
     63   ObjPtr<mirror::Object> obj;
     64   if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
     65     obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
     66                                                            pre_fence_visitor);
     67     if (obj != nullptr) {
     68       return obj.Ptr();
     69     } else {
     70       // There should be an OOM exception, since we are retrying, clear it.
     71       self->ClearException();
     72     }
     73     // If the large object allocation failed, try to use the normal spaces (main space,
     74     // non moving space). This can happen if there is significant virtual address space
     75     // fragmentation.
     76   }
     77   // bytes allocated for the (individual) object.
     78   size_t bytes_allocated;
     79   size_t usable_size;
     80   size_t new_num_bytes_allocated = 0;
     81   if (IsTLABAllocator(allocator)) {
     82     byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
     83   }
     84   // If we have a thread local allocation we don't need to update bytes allocated.
     85   if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
     86     obj = self->AllocTlab(byte_count);
     87     DCHECK(obj != nullptr) << "AllocTlab can't fail";
     88     obj->SetClass(klass);
     89     if (kUseBakerReadBarrier) {
     90       obj->AssertReadBarrierState();
     91     }
     92     bytes_allocated = byte_count;
     93     usable_size = bytes_allocated;
     94     pre_fence_visitor(obj, usable_size);
     95     QuasiAtomic::ThreadFenceForConstructor();
     96   } else if (
     97       !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
     98       (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
     99       LIKELY(obj != nullptr)) {
    100     DCHECK(!is_running_on_memory_tool_);
    101     obj->SetClass(klass);
    102     if (kUseBakerReadBarrier) {
    103       obj->AssertReadBarrierState();
    104     }
    105     usable_size = bytes_allocated;
    106     pre_fence_visitor(obj, usable_size);
    107     QuasiAtomic::ThreadFenceForConstructor();
    108   } else {
    109     // bytes allocated that takes bulk thread-local buffer allocations into account.
    110     size_t bytes_tl_bulk_allocated = 0;
    111     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
    112                                               &usable_size, &bytes_tl_bulk_allocated);
    113     if (UNLIKELY(obj == nullptr)) {
    114       // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
    115       // or changes the allocator in a suspend point here, we need to retry the allocation.
    116       obj = AllocateInternalWithGc(self,
    117                                    allocator,
    118                                    kInstrumented,
    119                                    byte_count,
    120                                    &bytes_allocated,
    121                                    &usable_size,
    122                                    &bytes_tl_bulk_allocated, &klass);
    123       if (obj == nullptr) {
    124         // The only way that we can get a null return if there is no pending exception is if the
    125         // allocator or instrumentation changed.
    126         if (!self->IsExceptionPending()) {
    127           // AllocObject will pick up the new allocator type, and instrumented as true is the safe
    128           // default.
    129           return AllocObject</*kInstrumented*/true>(self,
    130                                                     klass,
    131                                                     byte_count,
    132                                                     pre_fence_visitor);
    133         }
    134         return nullptr;
    135       }
    136     }
    137     DCHECK_GT(bytes_allocated, 0u);
    138     DCHECK_GT(usable_size, 0u);
    139     obj->SetClass(klass);
    140     if (kUseBakerReadBarrier) {
    141       obj->AssertReadBarrierState();
    142     }
    143     if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
    144       // (Note this if statement will be constant folded away for the
    145       // fast-path quick entry points.) Because SetClass() has no write
    146       // barrier, if a non-moving space allocation, we need a write
    147       // barrier as the class pointer may point to the bump pointer
    148       // space (where the class pointer is an "old-to-young" reference,
    149       // though rare) under the GSS collector with the remembered set
    150       // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
    151       // cases because we don't directly allocate into the main alloc
    152       // space (besides promotions) under the SS/GSS collector.
    153       WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
    154     }
    155     pre_fence_visitor(obj, usable_size);
    156     QuasiAtomic::ThreadFenceForConstructor();
    157     new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
    158         bytes_tl_bulk_allocated;
    159     if (bytes_tl_bulk_allocated > 0) {
    160       // Only trace when we get an increase in the number of bytes allocated. This happens when
    161       // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
    162       TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
    163     }
    164   }
    165   if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
    166     CHECK_LE(obj->SizeOf(), usable_size);
    167   }
    168   // TODO: Deprecate.
    169   if (kInstrumented) {
    170     if (Runtime::Current()->HasStatsEnabled()) {
    171       RuntimeStats* thread_stats = self->GetStats();
    172       ++thread_stats->allocated_objects;
    173       thread_stats->allocated_bytes += bytes_allocated;
    174       RuntimeStats* global_stats = Runtime::Current()->GetStats();
    175       ++global_stats->allocated_objects;
    176       global_stats->allocated_bytes += bytes_allocated;
    177     }
    178   } else {
    179     DCHECK(!Runtime::Current()->HasStatsEnabled());
    180   }
    181   if (kInstrumented) {
    182     if (IsAllocTrackingEnabled()) {
    183       // allocation_records_ is not null since it never becomes null after allocation tracking is
    184       // enabled.
    185       DCHECK(allocation_records_ != nullptr);
    186       allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
    187     }
    188     AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
    189     if (l != nullptr) {
    190       // Same as above. We assume that a listener that was once stored will never be deleted.
    191       // Otherwise we'd have to perform this under a lock.
    192       l->ObjectAllocated(self, &obj, bytes_allocated);
    193     }
    194   } else {
    195     DCHECK(!IsAllocTrackingEnabled());
    196   }
    197   if (AllocatorHasAllocationStack(allocator)) {
    198     PushOnAllocationStack(self, &obj);
    199   }
    200   if (kInstrumented) {
    201     if (gc_stress_mode_) {
    202       CheckGcStressMode(self, &obj);
    203     }
    204   } else {
    205     DCHECK(!gc_stress_mode_);
    206   }
    207   // IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for
    208   // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
    209   // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
    210   // the allocator_type should be constant propagated.
    211   if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
    212     CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
    213   }
    214   VerifyObject(obj);
    215   self->VerifyStack();
    216   return obj.Ptr();
    217 }
    218 
    219 // The size of a thread-local allocation stack in the number of references.
    220 static constexpr size_t kThreadLocalAllocationStackSize = 128;
    221 
    222 inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
    223   if (kUseThreadLocalAllocationStack) {
    224     if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
    225       PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
    226     }
    227   } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
    228     PushOnAllocationStackWithInternalGC(self, obj);
    229   }
    230 }
    231 
    232 template <bool kInstrumented, typename PreFenceVisitor>
    233 inline mirror::Object* Heap::AllocLargeObject(Thread* self,
    234                                               ObjPtr<mirror::Class>* klass,
    235                                               size_t byte_count,
    236                                               const PreFenceVisitor& pre_fence_visitor) {
    237   // Save and restore the class in case it moves.
    238   StackHandleScope<1> hs(self);
    239   auto klass_wrapper = hs.NewHandleWrapper(klass);
    240   return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
    241                                                                          kAllocatorTypeLOS,
    242                                                                          pre_fence_visitor);
    243 }
    244 
    245 template <const bool kInstrumented, const bool kGrow>
    246 inline mirror::Object* Heap::TryToAllocate(Thread* self,
    247                                            AllocatorType allocator_type,
    248                                            size_t alloc_size,
    249                                            size_t* bytes_allocated,
    250                                            size_t* usable_size,
    251                                            size_t* bytes_tl_bulk_allocated) {
    252   if (allocator_type != kAllocatorTypeTLAB &&
    253       allocator_type != kAllocatorTypeRegionTLAB &&
    254       allocator_type != kAllocatorTypeRosAlloc &&
    255       UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
    256     return nullptr;
    257   }
    258   mirror::Object* ret;
    259   switch (allocator_type) {
    260     case kAllocatorTypeBumpPointer: {
    261       DCHECK(bump_pointer_space_ != nullptr);
    262       alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
    263       ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
    264       if (LIKELY(ret != nullptr)) {
    265         *bytes_allocated = alloc_size;
    266         *usable_size = alloc_size;
    267         *bytes_tl_bulk_allocated = alloc_size;
    268       }
    269       break;
    270     }
    271     case kAllocatorTypeRosAlloc: {
    272       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    273         // If running on valgrind or asan, we should be using the instrumented path.
    274         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
    275         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    276                                                max_bytes_tl_bulk_allocated,
    277                                                kGrow))) {
    278           return nullptr;
    279         }
    280         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
    281                                      bytes_tl_bulk_allocated);
    282       } else {
    283         DCHECK(!is_running_on_memory_tool_);
    284         size_t max_bytes_tl_bulk_allocated =
    285             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
    286         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    287                                                max_bytes_tl_bulk_allocated,
    288                                                kGrow))) {
    289           return nullptr;
    290         }
    291         if (!kInstrumented) {
    292           DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
    293         }
    294         ret = rosalloc_space_->AllocNonvirtual(self,
    295                                                alloc_size,
    296                                                bytes_allocated,
    297                                                usable_size,
    298                                                bytes_tl_bulk_allocated);
    299       }
    300       break;
    301     }
    302     case kAllocatorTypeDlMalloc: {
    303       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    304         // If running on valgrind, we should be using the instrumented path.
    305         ret = dlmalloc_space_->Alloc(self,
    306                                      alloc_size,
    307                                      bytes_allocated,
    308                                      usable_size,
    309                                      bytes_tl_bulk_allocated);
    310       } else {
    311         DCHECK(!is_running_on_memory_tool_);
    312         ret = dlmalloc_space_->AllocNonvirtual(self,
    313                                                alloc_size,
    314                                                bytes_allocated,
    315                                                usable_size,
    316                                                bytes_tl_bulk_allocated);
    317       }
    318       break;
    319     }
    320     case kAllocatorTypeNonMoving: {
    321       ret = non_moving_space_->Alloc(self,
    322                                      alloc_size,
    323                                      bytes_allocated,
    324                                      usable_size,
    325                                      bytes_tl_bulk_allocated);
    326       break;
    327     }
    328     case kAllocatorTypeLOS: {
    329       ret = large_object_space_->Alloc(self,
    330                                        alloc_size,
    331                                        bytes_allocated,
    332                                        usable_size,
    333                                        bytes_tl_bulk_allocated);
    334       // Note that the bump pointer spaces aren't necessarily next to
    335       // the other continuous spaces like the non-moving alloc space or
    336       // the zygote space.
    337       DCHECK(ret == nullptr || large_object_space_->Contains(ret));
    338       break;
    339     }
    340     case kAllocatorTypeRegion: {
    341       DCHECK(region_space_ != nullptr);
    342       alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
    343       ret = region_space_->AllocNonvirtual<false>(alloc_size,
    344                                                   bytes_allocated,
    345                                                   usable_size,
    346                                                   bytes_tl_bulk_allocated);
    347       break;
    348     }
    349     case kAllocatorTypeTLAB:
    350       FALLTHROUGH_INTENDED;
    351     case kAllocatorTypeRegionTLAB: {
    352       DCHECK_ALIGNED(alloc_size, kObjectAlignment);
    353       static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
    354                     "mismatched alignments");
    355       static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
    356                     "mismatched alignments");
    357       if (UNLIKELY(self->TlabSize() < alloc_size)) {
    358         // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
    359         // that is why the allocator is not passed down.
    360         return AllocWithNewTLAB(self,
    361                                 alloc_size,
    362                                 kGrow,
    363                                 bytes_allocated,
    364                                 usable_size,
    365                                 bytes_tl_bulk_allocated);
    366       }
    367       // The allocation can't fail.
    368       ret = self->AllocTlab(alloc_size);
    369       DCHECK(ret != nullptr);
    370       *bytes_allocated = alloc_size;
    371       *bytes_tl_bulk_allocated = 0;  // Allocated in an existing buffer.
    372       *usable_size = alloc_size;
    373       break;
    374     }
    375     default: {
    376       LOG(FATAL) << "Invalid allocator type";
    377       ret = nullptr;
    378     }
    379   }
    380   return ret;
    381 }
    382 
    383 inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
    384   // We need to have a zygote space or else our newly allocated large object can end up in the
    385   // Zygote resulting in it being prematurely freed.
    386   // We can only do this for primitive objects since large objects will not be within the card table
    387   // range. This also means that we rely on SetClass not dirtying the object's card.
    388   return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
    389 }
    390 
    391 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
    392                                             size_t alloc_size,
    393                                             bool grow) {
    394   size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
    395   if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
    396     if (UNLIKELY(new_footprint > growth_limit_)) {
    397       return true;
    398     }
    399     if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
    400       if (!grow) {
    401         return true;
    402       }
    403       // TODO: Grow for allocation is racy, fix it.
    404       VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
    405           << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
    406       max_allowed_footprint_ = new_footprint;
    407     }
    408   }
    409   return false;
    410 }
    411 
    412 inline void Heap::CheckConcurrentGC(Thread* self,
    413                                     size_t new_num_bytes_allocated,
    414                                     ObjPtr<mirror::Object>* obj) {
    415   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
    416     RequestConcurrentGCAndSaveObject(self, false, obj);
    417   }
    418 }
    419 
    420 inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
    421                                     MemberOffset offset ATTRIBUTE_UNUSED,
    422                                     ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
    423   card_table_->MarkCard(dst.Ptr());
    424 }
    425 
    426 inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
    427                                     int start_offset ATTRIBUTE_UNUSED,
    428                                     size_t length ATTRIBUTE_UNUSED) {
    429   card_table_->MarkCard(dst.Ptr());
    430 }
    431 
    432 inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
    433   card_table_->MarkCard(obj.Ptr());
    434 }
    435 
    436 }  // namespace gc
    437 }  // namespace art
    438 
    439 #endif  // ART_RUNTIME_GC_HEAP_INL_H_
    440