Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     19 
     20 #include "region_space.h"
     21 #include "thread-current-inl.h"
     22 
     23 namespace art {
     24 namespace gc {
     25 namespace space {
     26 
     27 inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
     28                                           size_t num_bytes,
     29                                           /* out */ size_t* bytes_allocated,
     30                                           /* out */ size_t* usable_size,
     31                                           /* out */ size_t* bytes_tl_bulk_allocated) {
     32   num_bytes = RoundUp(num_bytes, kAlignment);
     33   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
     34                                 bytes_tl_bulk_allocated);
     35 }
     36 
     37 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self,
     38                                                       size_t num_bytes,
     39                                                       /* out */ size_t* bytes_allocated,
     40                                                       /* out */ size_t* usable_size,
     41                                                       /* out */ size_t* bytes_tl_bulk_allocated) {
     42   Locks::mutator_lock_->AssertExclusiveHeld(self);
     43   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     44 }
     45 
     46 template<bool kForEvac>
     47 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes,
     48                                                     /* out */ size_t* bytes_allocated,
     49                                                     /* out */ size_t* usable_size,
     50                                                     /* out */ size_t* bytes_tl_bulk_allocated) {
     51   DCHECK_ALIGNED(num_bytes, kAlignment);
     52   mirror::Object* obj;
     53   if (LIKELY(num_bytes <= kRegionSize)) {
     54     // Non-large object.
     55     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     56                                                              bytes_allocated,
     57                                                              usable_size,
     58                                                              bytes_tl_bulk_allocated);
     59     if (LIKELY(obj != nullptr)) {
     60       return obj;
     61     }
     62     MutexLock mu(Thread::Current(), region_lock_);
     63     // Retry with current region since another thread may have updated it.
     64     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     65                                                              bytes_allocated,
     66                                                              usable_size,
     67                                                              bytes_tl_bulk_allocated);
     68     if (LIKELY(obj != nullptr)) {
     69       return obj;
     70     }
     71     Region* r = AllocateRegion(kForEvac);
     72     if (LIKELY(r != nullptr)) {
     73       obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     74       CHECK(obj != nullptr);
     75       // Do our allocation before setting the region, this makes sure no threads race ahead
     76       // and fill in the region before we allocate the object. b/63153464
     77       if (kForEvac) {
     78         evac_region_ = r;
     79       } else {
     80         current_region_ = r;
     81       }
     82       return obj;
     83     }
     84   } else {
     85     // Large object.
     86     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     87     if (LIKELY(obj != nullptr)) {
     88       return obj;
     89     }
     90   }
     91   return nullptr;
     92 }
     93 
     94 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes,
     95                                                   /* out */ size_t* bytes_allocated,
     96                                                   /* out */ size_t* usable_size,
     97                                                   /* out */ size_t* bytes_tl_bulk_allocated) {
     98   DCHECK(IsAllocated() && IsInToSpace());
     99   DCHECK_ALIGNED(num_bytes, kAlignment);
    100   uint8_t* old_top;
    101   uint8_t* new_top;
    102   do {
    103     old_top = top_.LoadRelaxed();
    104     new_top = old_top + num_bytes;
    105     if (UNLIKELY(new_top > end_)) {
    106       return nullptr;
    107     }
    108   } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
    109   objects_allocated_.FetchAndAddRelaxed(1);
    110   DCHECK_LE(Top(), end_);
    111   DCHECK_LT(old_top, end_);
    112   DCHECK_LE(new_top, end_);
    113   *bytes_allocated = num_bytes;
    114   if (usable_size != nullptr) {
    115     *usable_size = num_bytes;
    116   }
    117   *bytes_tl_bulk_allocated = num_bytes;
    118   return reinterpret_cast<mirror::Object*>(old_top);
    119 }
    120 
    121 template<RegionSpace::RegionType kRegionType>
    122 inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
    123   uint64_t bytes = 0;
    124   MutexLock mu(Thread::Current(), region_lock_);
    125   for (size_t i = 0; i < num_regions_; ++i) {
    126     Region* r = &regions_[i];
    127     if (r->IsFree()) {
    128       continue;
    129     }
    130     switch (kRegionType) {
    131       case RegionType::kRegionTypeAll:
    132         bytes += r->BytesAllocated();
    133         break;
    134       case RegionType::kRegionTypeFromSpace:
    135         if (r->IsInFromSpace()) {
    136           bytes += r->BytesAllocated();
    137         }
    138         break;
    139       case RegionType::kRegionTypeUnevacFromSpace:
    140         if (r->IsInUnevacFromSpace()) {
    141           bytes += r->BytesAllocated();
    142         }
    143         break;
    144       case RegionType::kRegionTypeToSpace:
    145         if (r->IsInToSpace()) {
    146           bytes += r->BytesAllocated();
    147         }
    148         break;
    149       default:
    150         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    151     }
    152   }
    153   return bytes;
    154 }
    155 
    156 template<RegionSpace::RegionType kRegionType>
    157 inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
    158   uint64_t bytes = 0;
    159   MutexLock mu(Thread::Current(), region_lock_);
    160   for (size_t i = 0; i < num_regions_; ++i) {
    161     Region* r = &regions_[i];
    162     if (r->IsFree()) {
    163       continue;
    164     }
    165     switch (kRegionType) {
    166       case RegionType::kRegionTypeAll:
    167         bytes += r->ObjectsAllocated();
    168         break;
    169       case RegionType::kRegionTypeFromSpace:
    170         if (r->IsInFromSpace()) {
    171           bytes += r->ObjectsAllocated();
    172         }
    173         break;
    174       case RegionType::kRegionTypeUnevacFromSpace:
    175         if (r->IsInUnevacFromSpace()) {
    176           bytes += r->ObjectsAllocated();
    177         }
    178         break;
    179       case RegionType::kRegionTypeToSpace:
    180         if (r->IsInToSpace()) {
    181           bytes += r->ObjectsAllocated();
    182         }
    183         break;
    184       default:
    185         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    186     }
    187   }
    188   return bytes;
    189 }
    190 
    191 template<bool kToSpaceOnly, typename Visitor>
    192 inline void RegionSpace::WalkInternal(Visitor&& visitor) {
    193   // TODO: MutexLock on region_lock_ won't work due to lock order
    194   // issues (the classloader classes lock and the monitor lock). We
    195   // call this with threads suspended.
    196   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    197   for (size_t i = 0; i < num_regions_; ++i) {
    198     Region* r = &regions_[i];
    199     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
    200       continue;
    201     }
    202     if (r->IsLarge()) {
    203       // Avoid visiting dead large objects since they may contain dangling pointers to the
    204       // from-space.
    205       DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
    206       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
    207       DCHECK(obj->GetClass() != nullptr);
    208       visitor(obj);
    209     } else if (r->IsLargeTail()) {
    210       // Do nothing.
    211     } else {
    212       // For newly allocated and evacuated regions, live bytes will be -1.
    213       uint8_t* pos = r->Begin();
    214       uint8_t* top = r->Top();
    215       const bool need_bitmap =
    216           r->LiveBytes() != static_cast<size_t>(-1) &&
    217           r->LiveBytes() != static_cast<size_t>(top - pos);
    218       if (need_bitmap) {
    219         GetLiveBitmap()->VisitMarkedRange(
    220             reinterpret_cast<uintptr_t>(pos),
    221             reinterpret_cast<uintptr_t>(top),
    222             visitor);
    223       } else {
    224         while (pos < top) {
    225           mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    226           if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
    227             visitor(obj);
    228             pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    229           } else {
    230             break;
    231           }
    232         }
    233       }
    234     }
    235   }
    236 }
    237 
    238 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
    239   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    240   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
    241 }
    242 
    243 template<bool kForEvac>
    244 inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
    245                                                /* out */ size_t* bytes_allocated,
    246                                                /* out */ size_t* usable_size,
    247                                                /* out */ size_t* bytes_tl_bulk_allocated) {
    248   DCHECK_ALIGNED(num_bytes, kAlignment);
    249   DCHECK_GT(num_bytes, kRegionSize);
    250   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
    251   DCHECK_GT(num_regs, 0U);
    252   DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
    253   DCHECK_LE(num_bytes, num_regs * kRegionSize);
    254   MutexLock mu(Thread::Current(), region_lock_);
    255   if (!kForEvac) {
    256     // Retain sufficient free regions for full evacuation.
    257     if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
    258       return nullptr;
    259     }
    260   }
    261   // Find a large enough set of contiguous free regions.
    262   size_t left = 0;
    263   while (left + num_regs - 1 < num_regions_) {
    264     bool found = true;
    265     size_t right = left;
    266     DCHECK_LT(right, left + num_regs)
    267         << "The inner loop Should iterate at least once";
    268     while (right < left + num_regs) {
    269       if (regions_[right].IsFree()) {
    270         ++right;
    271       } else {
    272         found = false;
    273         break;
    274       }
    275     }
    276     if (found) {
    277       // `right` points to the one region past the last free region.
    278       DCHECK_EQ(left + num_regs, right);
    279       Region* first_reg = &regions_[left];
    280       DCHECK(first_reg->IsFree());
    281       first_reg->UnfreeLarge(this, time_);
    282       if (kForEvac) {
    283         ++num_evac_regions_;
    284       } else {
    285         ++num_non_free_regions_;
    286       }
    287       size_t allocated = num_regs * kRegionSize;
    288       // We make 'top' all usable bytes, as the caller of this
    289       // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
    290       first_reg->SetTop(first_reg->Begin() + allocated);
    291       for (size_t p = left + 1; p < right; ++p) {
    292         DCHECK_LT(p, num_regions_);
    293         DCHECK(regions_[p].IsFree());
    294         regions_[p].UnfreeLargeTail(this, time_);
    295         if (kForEvac) {
    296           ++num_evac_regions_;
    297         } else {
    298           ++num_non_free_regions_;
    299         }
    300       }
    301       *bytes_allocated = allocated;
    302       if (usable_size != nullptr) {
    303         *usable_size = allocated;
    304       }
    305       *bytes_tl_bulk_allocated = allocated;
    306       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
    307     } else {
    308       // right points to the non-free region. Start with the one after it.
    309       left = right + 1;
    310     }
    311   }
    312   return nullptr;
    313 }
    314 
    315 template<bool kForEvac>
    316 inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
    317   DCHECK(Contains(large_obj));
    318   DCHECK_ALIGNED(large_obj, kRegionSize);
    319   MutexLock mu(Thread::Current(), region_lock_);
    320   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
    321   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
    322   CHECK_LT(begin_addr, end_addr);
    323   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
    324     Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
    325     if (addr == begin_addr) {
    326       DCHECK(reg->IsLarge());
    327     } else {
    328       DCHECK(reg->IsLargeTail());
    329     }
    330     reg->Clear(/*zero_and_release_pages*/true);
    331     if (kForEvac) {
    332       --num_evac_regions_;
    333     } else {
    334       --num_non_free_regions_;
    335     }
    336   }
    337   if (end_addr < Limit()) {
    338     // If we aren't at the end of the space, check that the next region is not a large tail.
    339     Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
    340     DCHECK(!following_reg->IsLargeTail());
    341   }
    342 }
    343 
    344 inline size_t RegionSpace::Region::BytesAllocated() const {
    345   if (IsLarge()) {
    346     DCHECK_LT(begin_ + kRegionSize, Top());
    347     return static_cast<size_t>(Top() - begin_);
    348   } else if (IsLargeTail()) {
    349     DCHECK_EQ(begin_, Top());
    350     return 0;
    351   } else {
    352     DCHECK(IsAllocated()) << "state=" << state_;
    353     DCHECK_LE(begin_, Top());
    354     size_t bytes;
    355     if (is_a_tlab_) {
    356       bytes = thread_->GetThreadLocalBytesAllocated();
    357     } else {
    358       bytes = static_cast<size_t>(Top() - begin_);
    359     }
    360     DCHECK_LE(bytes, kRegionSize);
    361     return bytes;
    362   }
    363 }
    364 
    365 inline size_t RegionSpace::Region::ObjectsAllocated() const {
    366   if (IsLarge()) {
    367     DCHECK_LT(begin_ + kRegionSize, Top());
    368     DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    369     return 1;
    370   } else if (IsLargeTail()) {
    371     DCHECK_EQ(begin_, Top());
    372     DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    373     return 0;
    374   } else {
    375     DCHECK(IsAllocated()) << "state=" << state_;
    376     return objects_allocated_;
    377   }
    378 }
    379 
    380 }  // namespace space
    381 }  // namespace gc
    382 }  // namespace art
    383 
    384 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
    385