Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_HEAP_INL_H_
     18 #define ART_RUNTIME_GC_HEAP_INL_H_
     19 
     20 #include "heap.h"
     21 
     22 #include "allocation_listener.h"
     23 #include "base/quasi_atomic.h"
     24 #include "base/time_utils.h"
     25 #include "base/utils.h"
     26 #include "gc/accounting/atomic_stack.h"
     27 #include "gc/accounting/card_table-inl.h"
     28 #include "gc/allocation_record.h"
     29 #include "gc/collector/semi_space.h"
     30 #include "gc/space/bump_pointer_space-inl.h"
     31 #include "gc/space/dlmalloc_space-inl.h"
     32 #include "gc/space/large_object_space.h"
     33 #include "gc/space/region_space-inl.h"
     34 #include "gc/space/rosalloc_space-inl.h"
     35 #include "handle_scope-inl.h"
     36 #include "obj_ptr-inl.h"
     37 #include "runtime.h"
     38 #include "thread-inl.h"
     39 #include "verify_object.h"
     40 
     41 namespace art {
     42 namespace gc {
     43 
     44 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
     45 inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
     46                                                       ObjPtr<mirror::Class> klass,
     47                                                       size_t byte_count,
     48                                                       AllocatorType allocator,
     49                                                       const PreFenceVisitor& pre_fence_visitor) {
     50   if (kIsDebugBuild) {
     51     CheckPreconditionsForAllocObject(klass, byte_count);
     52     // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
     53     // done in the runnable state where suspension is expected.
     54     CHECK_EQ(self->GetState(), kRunnable);
     55     self->AssertThreadSuspensionIsAllowable();
     56     self->AssertNoPendingException();
     57     // Make sure to preserve klass.
     58     StackHandleScope<1> hs(self);
     59     HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
     60     self->PoisonObjectPointers();
     61   }
     62   // Need to check that we aren't the large object allocator since the large object allocation code
     63   // path includes this function. If we didn't check we would have an infinite loop.
     64   ObjPtr<mirror::Object> obj;
     65   if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
     66     obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
     67                                                            pre_fence_visitor);
     68     if (obj != nullptr) {
     69       return obj.Ptr();
     70     } else {
     71       // There should be an OOM exception, since we are retrying, clear it.
     72       self->ClearException();
     73     }
     74     // If the large object allocation failed, try to use the normal spaces (main space,
     75     // non moving space). This can happen if there is significant virtual address space
     76     // fragmentation.
     77   }
     78   // bytes allocated for the (individual) object.
     79   size_t bytes_allocated;
     80   size_t usable_size;
     81   size_t new_num_bytes_allocated = 0;
     82   if (IsTLABAllocator(allocator)) {
     83     byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
     84   }
     85   // If we have a thread local allocation we don't need to update bytes allocated.
     86   if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
     87     obj = self->AllocTlab(byte_count);
     88     DCHECK(obj != nullptr) << "AllocTlab can't fail";
     89     obj->SetClass(klass);
     90     if (kUseBakerReadBarrier) {
     91       obj->AssertReadBarrierState();
     92     }
     93     bytes_allocated = byte_count;
     94     usable_size = bytes_allocated;
     95     pre_fence_visitor(obj, usable_size);
     96     QuasiAtomic::ThreadFenceForConstructor();
     97   } else if (
     98       !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
     99       (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
    100       LIKELY(obj != nullptr)) {
    101     DCHECK(!is_running_on_memory_tool_);
    102     obj->SetClass(klass);
    103     if (kUseBakerReadBarrier) {
    104       obj->AssertReadBarrierState();
    105     }
    106     usable_size = bytes_allocated;
    107     pre_fence_visitor(obj, usable_size);
    108     QuasiAtomic::ThreadFenceForConstructor();
    109   } else {
    110     // Bytes allocated that takes bulk thread-local buffer allocations into account.
    111     size_t bytes_tl_bulk_allocated = 0u;
    112     obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
    113                                               &usable_size, &bytes_tl_bulk_allocated);
    114     if (UNLIKELY(obj == nullptr)) {
    115       // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
    116       // or changes the allocator in a suspend point here, we need to retry the allocation.
    117       obj = AllocateInternalWithGc(self,
    118                                    allocator,
    119                                    kInstrumented,
    120                                    byte_count,
    121                                    &bytes_allocated,
    122                                    &usable_size,
    123                                    &bytes_tl_bulk_allocated, &klass);
    124       if (obj == nullptr) {
    125         // The only way that we can get a null return if there is no pending exception is if the
    126         // allocator or instrumentation changed.
    127         if (!self->IsExceptionPending()) {
    128           // AllocObject will pick up the new allocator type, and instrumented as true is the safe
    129           // default.
    130           return AllocObject</*kInstrumented*/true>(self,
    131                                                     klass,
    132                                                     byte_count,
    133                                                     pre_fence_visitor);
    134         }
    135         return nullptr;
    136       }
    137     }
    138     DCHECK_GT(bytes_allocated, 0u);
    139     DCHECK_GT(usable_size, 0u);
    140     obj->SetClass(klass);
    141     if (kUseBakerReadBarrier) {
    142       obj->AssertReadBarrierState();
    143     }
    144     if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
    145       // (Note this if statement will be constant folded away for the
    146       // fast-path quick entry points.) Because SetClass() has no write
    147       // barrier, if a non-moving space allocation, we need a write
    148       // barrier as the class pointer may point to the bump pointer
    149       // space (where the class pointer is an "old-to-young" reference,
    150       // though rare) under the GSS collector with the remembered set
    151       // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
    152       // cases because we don't directly allocate into the main alloc
    153       // space (besides promotions) under the SS/GSS collector.
    154       WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
    155     }
    156     pre_fence_visitor(obj, usable_size);
    157     QuasiAtomic::ThreadFenceForConstructor();
    158     size_t num_bytes_allocated_before =
    159         num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
    160     new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
    161     if (bytes_tl_bulk_allocated > 0) {
    162       // Only trace when we get an increase in the number of bytes allocated. This happens when
    163       // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
    164       TraceHeapSize(new_num_bytes_allocated);
    165     }
    166   }
    167   if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
    168     CHECK_LE(obj->SizeOf(), usable_size);
    169   }
    170   // TODO: Deprecate.
    171   if (kInstrumented) {
    172     if (Runtime::Current()->HasStatsEnabled()) {
    173       RuntimeStats* thread_stats = self->GetStats();
    174       ++thread_stats->allocated_objects;
    175       thread_stats->allocated_bytes += bytes_allocated;
    176       RuntimeStats* global_stats = Runtime::Current()->GetStats();
    177       ++global_stats->allocated_objects;
    178       global_stats->allocated_bytes += bytes_allocated;
    179     }
    180   } else {
    181     DCHECK(!Runtime::Current()->HasStatsEnabled());
    182   }
    183   if (kInstrumented) {
    184     if (IsAllocTrackingEnabled()) {
    185       // allocation_records_ is not null since it never becomes null after allocation tracking is
    186       // enabled.
    187       DCHECK(allocation_records_ != nullptr);
    188       allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
    189     }
    190     AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
    191     if (l != nullptr) {
    192       // Same as above. We assume that a listener that was once stored will never be deleted.
    193       // Otherwise we'd have to perform this under a lock.
    194       l->ObjectAllocated(self, &obj, bytes_allocated);
    195     }
    196   } else {
    197     DCHECK(!IsAllocTrackingEnabled());
    198   }
    199   if (AllocatorHasAllocationStack(allocator)) {
    200     PushOnAllocationStack(self, &obj);
    201   }
    202   if (kInstrumented) {
    203     if (gc_stress_mode_) {
    204       CheckGcStressMode(self, &obj);
    205     }
    206   } else {
    207     DCHECK(!gc_stress_mode_);
    208   }
    209   // IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for
    210   // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
    211   // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
    212   // the allocator_type should be constant propagated.
    213   if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
    214     CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
    215   }
    216   VerifyObject(obj);
    217   self->VerifyStack();
    218   return obj.Ptr();
    219 }
    220 
    221 // The size of a thread-local allocation stack in the number of references.
    222 static constexpr size_t kThreadLocalAllocationStackSize = 128;
    223 
    224 inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
    225   if (kUseThreadLocalAllocationStack) {
    226     if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
    227       PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
    228     }
    229   } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
    230     PushOnAllocationStackWithInternalGC(self, obj);
    231   }
    232 }
    233 
    234 template <bool kInstrumented, typename PreFenceVisitor>
    235 inline mirror::Object* Heap::AllocLargeObject(Thread* self,
    236                                               ObjPtr<mirror::Class>* klass,
    237                                               size_t byte_count,
    238                                               const PreFenceVisitor& pre_fence_visitor) {
    239   // Save and restore the class in case it moves.
    240   StackHandleScope<1> hs(self);
    241   auto klass_wrapper = hs.NewHandleWrapper(klass);
    242   return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
    243                                                                          kAllocatorTypeLOS,
    244                                                                          pre_fence_visitor);
    245 }
    246 
    247 template <const bool kInstrumented, const bool kGrow>
    248 inline mirror::Object* Heap::TryToAllocate(Thread* self,
    249                                            AllocatorType allocator_type,
    250                                            size_t alloc_size,
    251                                            size_t* bytes_allocated,
    252                                            size_t* usable_size,
    253                                            size_t* bytes_tl_bulk_allocated) {
    254   if (allocator_type != kAllocatorTypeTLAB &&
    255       allocator_type != kAllocatorTypeRegionTLAB &&
    256       allocator_type != kAllocatorTypeRosAlloc &&
    257       UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
    258     return nullptr;
    259   }
    260   mirror::Object* ret;
    261   switch (allocator_type) {
    262     case kAllocatorTypeBumpPointer: {
    263       DCHECK(bump_pointer_space_ != nullptr);
    264       alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
    265       ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
    266       if (LIKELY(ret != nullptr)) {
    267         *bytes_allocated = alloc_size;
    268         *usable_size = alloc_size;
    269         *bytes_tl_bulk_allocated = alloc_size;
    270       }
    271       break;
    272     }
    273     case kAllocatorTypeRosAlloc: {
    274       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    275         // If running on valgrind or asan, we should be using the instrumented path.
    276         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
    277         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    278                                                max_bytes_tl_bulk_allocated,
    279                                                kGrow))) {
    280           return nullptr;
    281         }
    282         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
    283                                      bytes_tl_bulk_allocated);
    284       } else {
    285         DCHECK(!is_running_on_memory_tool_);
    286         size_t max_bytes_tl_bulk_allocated =
    287             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
    288         if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
    289                                                max_bytes_tl_bulk_allocated,
    290                                                kGrow))) {
    291           return nullptr;
    292         }
    293         if (!kInstrumented) {
    294           DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
    295         }
    296         ret = rosalloc_space_->AllocNonvirtual(self,
    297                                                alloc_size,
    298                                                bytes_allocated,
    299                                                usable_size,
    300                                                bytes_tl_bulk_allocated);
    301       }
    302       break;
    303     }
    304     case kAllocatorTypeDlMalloc: {
    305       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    306         // If running on valgrind, we should be using the instrumented path.
    307         ret = dlmalloc_space_->Alloc(self,
    308                                      alloc_size,
    309                                      bytes_allocated,
    310                                      usable_size,
    311                                      bytes_tl_bulk_allocated);
    312       } else {
    313         DCHECK(!is_running_on_memory_tool_);
    314         ret = dlmalloc_space_->AllocNonvirtual(self,
    315                                                alloc_size,
    316                                                bytes_allocated,
    317                                                usable_size,
    318                                                bytes_tl_bulk_allocated);
    319       }
    320       break;
    321     }
    322     case kAllocatorTypeNonMoving: {
    323       ret = non_moving_space_->Alloc(self,
    324                                      alloc_size,
    325                                      bytes_allocated,
    326                                      usable_size,
    327                                      bytes_tl_bulk_allocated);
    328       break;
    329     }
    330     case kAllocatorTypeLOS: {
    331       ret = large_object_space_->Alloc(self,
    332                                        alloc_size,
    333                                        bytes_allocated,
    334                                        usable_size,
    335                                        bytes_tl_bulk_allocated);
    336       // Note that the bump pointer spaces aren't necessarily next to
    337       // the other continuous spaces like the non-moving alloc space or
    338       // the zygote space.
    339       DCHECK(ret == nullptr || large_object_space_->Contains(ret));
    340       break;
    341     }
    342     case kAllocatorTypeRegion: {
    343       DCHECK(region_space_ != nullptr);
    344       alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
    345       ret = region_space_->AllocNonvirtual<false>(alloc_size,
    346                                                   bytes_allocated,
    347                                                   usable_size,
    348                                                   bytes_tl_bulk_allocated);
    349       break;
    350     }
    351     case kAllocatorTypeTLAB:
    352       FALLTHROUGH_INTENDED;
    353     case kAllocatorTypeRegionTLAB: {
    354       DCHECK_ALIGNED(alloc_size, kObjectAlignment);
    355       static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
    356                     "mismatched alignments");
    357       static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
    358                     "mismatched alignments");
    359       if (UNLIKELY(self->TlabSize() < alloc_size)) {
    360         // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
    361         // that is why the allocator is not passed down.
    362         return AllocWithNewTLAB(self,
    363                                 alloc_size,
    364                                 kGrow,
    365                                 bytes_allocated,
    366                                 usable_size,
    367                                 bytes_tl_bulk_allocated);
    368       }
    369       // The allocation can't fail.
    370       ret = self->AllocTlab(alloc_size);
    371       DCHECK(ret != nullptr);
    372       *bytes_allocated = alloc_size;
    373       *bytes_tl_bulk_allocated = 0;  // Allocated in an existing buffer.
    374       *usable_size = alloc_size;
    375       break;
    376     }
    377     default: {
    378       LOG(FATAL) << "Invalid allocator type";
    379       ret = nullptr;
    380     }
    381   }
    382   return ret;
    383 }
    384 
    385 inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
    386   // We need to have a zygote space or else our newly allocated large object can end up in the
    387   // Zygote resulting in it being prematurely freed.
    388   // We can only do this for primitive objects since large objects will not be within the card table
    389   // range. This also means that we rely on SetClass not dirtying the object's card.
    390   return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
    391 }
    392 
    393 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
    394                                             size_t alloc_size,
    395                                             bool grow) {
    396   size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
    397   if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
    398     if (UNLIKELY(new_footprint > growth_limit_)) {
    399       return true;
    400     }
    401     if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
    402       if (!grow) {
    403         return true;
    404       }
    405       // TODO: Grow for allocation is racy, fix it.
    406       VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
    407       max_allowed_footprint_ = new_footprint;
    408     }
    409   }
    410   return false;
    411 }
    412 
    413 inline void Heap::CheckConcurrentGC(Thread* self,
    414                                     size_t new_num_bytes_allocated,
    415                                     ObjPtr<mirror::Object>* obj) {
    416   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
    417     RequestConcurrentGCAndSaveObject(self, false, obj);
    418   }
    419 }
    420 
    421 inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
    422                                     MemberOffset offset ATTRIBUTE_UNUSED,
    423                                     ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
    424   card_table_->MarkCard(dst.Ptr());
    425 }
    426 
    427 inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
    428                                     int start_offset ATTRIBUTE_UNUSED,
    429                                     size_t length ATTRIBUTE_UNUSED) {
    430   card_table_->MarkCard(dst.Ptr());
    431 }
    432 
    433 inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
    434   card_table_->MarkCard(obj.Ptr());
    435 }
    436 
    437 }  // namespace gc
    438 }  // namespace art
    439 
    440 #endif  // ART_RUNTIME_GC_HEAP_INL_H_
    441