Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     19 
     20 #include "region_space.h"
     21 
     22 #include "base/mutex-inl.h"
     23 #include "mirror/object-inl.h"
     24 #include "region_space.h"
     25 #include "thread-current-inl.h"
     26 
     27 namespace art {
     28 namespace gc {
     29 namespace space {
     30 
     31 inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
     32                                           size_t num_bytes,
     33                                           /* out */ size_t* bytes_allocated,
     34                                           /* out */ size_t* usable_size,
     35                                           /* out */ size_t* bytes_tl_bulk_allocated) {
     36   num_bytes = RoundUp(num_bytes, kAlignment);
     37   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
     38                                 bytes_tl_bulk_allocated);
     39 }
     40 
     41 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self,
     42                                                       size_t num_bytes,
     43                                                       /* out */ size_t* bytes_allocated,
     44                                                       /* out */ size_t* usable_size,
     45                                                       /* out */ size_t* bytes_tl_bulk_allocated) {
     46   Locks::mutator_lock_->AssertExclusiveHeld(self);
     47   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     48 }
     49 
     50 template<bool kForEvac>
     51 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes,
     52                                                     /* out */ size_t* bytes_allocated,
     53                                                     /* out */ size_t* usable_size,
     54                                                     /* out */ size_t* bytes_tl_bulk_allocated) {
     55   DCHECK_ALIGNED(num_bytes, kAlignment);
     56   mirror::Object* obj;
     57   if (LIKELY(num_bytes <= kRegionSize)) {
     58     // Non-large object.
     59     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     60                                                              bytes_allocated,
     61                                                              usable_size,
     62                                                              bytes_tl_bulk_allocated);
     63     if (LIKELY(obj != nullptr)) {
     64       return obj;
     65     }
     66     MutexLock mu(Thread::Current(), region_lock_);
     67     // Retry with current region since another thread may have updated
     68     // current_region_ or evac_region_.  TODO: fix race.
     69     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     70                                                              bytes_allocated,
     71                                                              usable_size,
     72                                                              bytes_tl_bulk_allocated);
     73     if (LIKELY(obj != nullptr)) {
     74       return obj;
     75     }
     76     Region* r = AllocateRegion(kForEvac);
     77     if (LIKELY(r != nullptr)) {
     78       obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     79       CHECK(obj != nullptr);
     80       // Do our allocation before setting the region, this makes sure no threads race ahead
     81       // and fill in the region before we allocate the object. b/63153464
     82       if (kForEvac) {
     83         evac_region_ = r;
     84       } else {
     85         current_region_ = r;
     86       }
     87       return obj;
     88     }
     89   } else {
     90     // Large object.
     91     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     92     if (LIKELY(obj != nullptr)) {
     93       return obj;
     94     }
     95   }
     96   return nullptr;
     97 }
     98 
     99 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes,
    100                                                   /* out */ size_t* bytes_allocated,
    101                                                   /* out */ size_t* usable_size,
    102                                                   /* out */ size_t* bytes_tl_bulk_allocated) {
    103   DCHECK(IsAllocated() && IsInToSpace());
    104   DCHECK_ALIGNED(num_bytes, kAlignment);
    105   uint8_t* old_top;
    106   uint8_t* new_top;
    107   do {
    108     old_top = top_.load(std::memory_order_relaxed);
    109     new_top = old_top + num_bytes;
    110     if (UNLIKELY(new_top > end_)) {
    111       return nullptr;
    112     }
    113   } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top));
    114   objects_allocated_.fetch_add(1, std::memory_order_relaxed);
    115   DCHECK_LE(Top(), end_);
    116   DCHECK_LT(old_top, end_);
    117   DCHECK_LE(new_top, end_);
    118   *bytes_allocated = num_bytes;
    119   if (usable_size != nullptr) {
    120     *usable_size = num_bytes;
    121   }
    122   *bytes_tl_bulk_allocated = num_bytes;
    123   return reinterpret_cast<mirror::Object*>(old_top);
    124 }
    125 
    126 template<RegionSpace::RegionType kRegionType>
    127 inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
    128   uint64_t bytes = 0;
    129   MutexLock mu(Thread::Current(), region_lock_);
    130   for (size_t i = 0; i < num_regions_; ++i) {
    131     Region* r = &regions_[i];
    132     if (r->IsFree()) {
    133       continue;
    134     }
    135     switch (kRegionType) {
    136       case RegionType::kRegionTypeAll:
    137         bytes += r->BytesAllocated();
    138         break;
    139       case RegionType::kRegionTypeFromSpace:
    140         if (r->IsInFromSpace()) {
    141           bytes += r->BytesAllocated();
    142         }
    143         break;
    144       case RegionType::kRegionTypeUnevacFromSpace:
    145         if (r->IsInUnevacFromSpace()) {
    146           bytes += r->BytesAllocated();
    147         }
    148         break;
    149       case RegionType::kRegionTypeToSpace:
    150         if (r->IsInToSpace()) {
    151           bytes += r->BytesAllocated();
    152         }
    153         break;
    154       default:
    155         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    156     }
    157   }
    158   return bytes;
    159 }
    160 
    161 template<RegionSpace::RegionType kRegionType>
    162 inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
    163   uint64_t bytes = 0;
    164   MutexLock mu(Thread::Current(), region_lock_);
    165   for (size_t i = 0; i < num_regions_; ++i) {
    166     Region* r = &regions_[i];
    167     if (r->IsFree()) {
    168       continue;
    169     }
    170     switch (kRegionType) {
    171       case RegionType::kRegionTypeAll:
    172         bytes += r->ObjectsAllocated();
    173         break;
    174       case RegionType::kRegionTypeFromSpace:
    175         if (r->IsInFromSpace()) {
    176           bytes += r->ObjectsAllocated();
    177         }
    178         break;
    179       case RegionType::kRegionTypeUnevacFromSpace:
    180         if (r->IsInUnevacFromSpace()) {
    181           bytes += r->ObjectsAllocated();
    182         }
    183         break;
    184       case RegionType::kRegionTypeToSpace:
    185         if (r->IsInToSpace()) {
    186           bytes += r->ObjectsAllocated();
    187         }
    188         break;
    189       default:
    190         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    191     }
    192   }
    193   return bytes;
    194 }
    195 
    196 template <typename Visitor>
    197 inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
    198                                              Visitor&& visitor) {
    199   const size_t iter_limit = kUseTableLookupReadBarrier
    200       ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
    201   // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
    202   // visit them. Everything before visit_block_begin has been processed, while
    203   // [visit_block_begin, visit_block_end) still needs to be visited.
    204   uint8_t* visit_block_begin = nullptr;
    205   uint8_t* visit_block_end = nullptr;
    206   for (size_t i = 0; i < iter_limit; ++i) {
    207     Region* r = &regions_[i];
    208     if (r->IsInUnevacFromSpace()) {
    209       // visit_block_begin set to nullptr means a new visit block needs to be stated.
    210       if (visit_block_begin == nullptr) {
    211         visit_block_begin = r->Begin();
    212       }
    213       visit_block_end = r->End();
    214     } else if (visit_block_begin != nullptr) {
    215       // Visit the block range as r is not adjacent to current visit block.
    216       bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
    217                                reinterpret_cast<uintptr_t>(visit_block_end),
    218                                visitor);
    219       visit_block_begin = nullptr;
    220     }
    221   }
    222   // Visit last block, if not processed yet.
    223   if (visit_block_begin != nullptr) {
    224     bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
    225                              reinterpret_cast<uintptr_t>(visit_block_end),
    226                              visitor);
    227   }
    228 }
    229 
    230 template<bool kToSpaceOnly, typename Visitor>
    231 inline void RegionSpace::WalkInternal(Visitor&& visitor) {
    232   // TODO: MutexLock on region_lock_ won't work due to lock order
    233   // issues (the classloader classes lock and the monitor lock). We
    234   // call this with threads suspended.
    235   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    236   for (size_t i = 0; i < num_regions_; ++i) {
    237     Region* r = &regions_[i];
    238     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
    239       continue;
    240     }
    241     if (r->IsLarge()) {
    242       // We may visit a large object with live_bytes = 0 here. However, it is
    243       // safe as it cannot contain dangling pointers because corresponding regions
    244       // (and regions corresponding to dead referents) cannot be allocated for new
    245       // allocations without first clearing regions' live_bytes and state.
    246       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
    247       DCHECK(obj->GetClass() != nullptr);
    248       visitor(obj);
    249     } else if (r->IsLargeTail()) {
    250       // Do nothing.
    251     } else {
    252       WalkNonLargeRegion(visitor, r);
    253     }
    254   }
    255 }
    256 
    257 template<typename Visitor>
    258 inline void RegionSpace::WalkNonLargeRegion(Visitor&& visitor, const Region* r) {
    259   DCHECK(!r->IsLarge() && !r->IsLargeTail());
    260   // For newly allocated and evacuated regions, live bytes will be -1.
    261   uint8_t* pos = r->Begin();
    262   uint8_t* top = r->Top();
    263   // We need the region space bitmap to iterate over a region's objects
    264   // if
    265   // - its live bytes count is invalid (i.e. -1); or
    266   // - its live bytes count is lower than the allocated bytes count.
    267   //
    268   // In both of the previous cases, we do not have the guarantee that
    269   // all allocated objects are "alive" (i.e. valid), so we depend on
    270   // the region space bitmap to identify which ones to visit.
    271   //
    272   // On the other hand, when all allocated bytes are known to be alive,
    273   // we know that they form a range of consecutive objects (modulo
    274   // object alignment constraints) that can be visited iteratively: we
    275   // can compute the next object's location by using the current
    276   // object's address and size (and object alignment constraints).
    277   const bool need_bitmap =
    278       r->LiveBytes() != static_cast<size_t>(-1) &&
    279       r->LiveBytes() != static_cast<size_t>(top - pos);
    280   if (need_bitmap) {
    281     GetLiveBitmap()->VisitMarkedRange(
    282         reinterpret_cast<uintptr_t>(pos),
    283         reinterpret_cast<uintptr_t>(top),
    284         visitor);
    285   } else {
    286     while (pos < top) {
    287       mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    288       if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
    289         visitor(obj);
    290         pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    291       } else {
    292         break;
    293       }
    294     }
    295   }
    296 }
    297 
    298 template <typename Visitor>
    299 inline void RegionSpace::Walk(Visitor&& visitor) {
    300   WalkInternal</* kToSpaceOnly= */ false>(visitor);
    301 }
    302 template <typename Visitor>
    303 inline void RegionSpace::WalkToSpace(Visitor&& visitor) {
    304   WalkInternal</* kToSpaceOnly= */ true>(visitor);
    305 }
    306 
    307 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
    308   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    309   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
    310 }
    311 
    312 template<bool kForEvac>
    313 inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
    314                                                /* out */ size_t* bytes_allocated,
    315                                                /* out */ size_t* usable_size,
    316                                                /* out */ size_t* bytes_tl_bulk_allocated) {
    317   DCHECK_ALIGNED(num_bytes, kAlignment);
    318   DCHECK_GT(num_bytes, kRegionSize);
    319   size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
    320   DCHECK_GT(num_regs_in_large_region, 0U);
    321   DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
    322   DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
    323   MutexLock mu(Thread::Current(), region_lock_);
    324   if (!kForEvac) {
    325     // Retain sufficient free regions for full evacuation.
    326     if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
    327       return nullptr;
    328     }
    329   }
    330 
    331   // Find a large enough set of contiguous free regions.
    332   if (kCyclicRegionAllocation) {
    333     // Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
    334     size_t next_region1 = -1;
    335     mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
    336                                                           num_regions_,
    337                                                           num_regs_in_large_region,
    338                                                           bytes_allocated,
    339                                                           usable_size,
    340                                                           bytes_tl_bulk_allocated,
    341                                                           &next_region1);
    342     if (region1 != nullptr) {
    343       DCHECK_LT(0u, next_region1);
    344       DCHECK_LE(next_region1, num_regions_);
    345       // Move the cyclic allocation region marker to the region
    346       // following the large region that was just allocated.
    347       cyclic_alloc_region_index_ = next_region1 % num_regions_;
    348       return region1;
    349     }
    350 
    351     // If the previous attempt failed, try to find a range of free regions within
    352     // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
    353     size_t next_region2 = -1;
    354     mirror::Object* region2 = AllocLargeInRange<kForEvac>(
    355             0,
    356             std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
    357             num_regs_in_large_region,
    358             bytes_allocated,
    359             usable_size,
    360             bytes_tl_bulk_allocated,
    361             &next_region2);
    362     if (region2 != nullptr) {
    363       DCHECK_LT(0u, next_region2);
    364       DCHECK_LE(next_region2, num_regions_);
    365       // Move the cyclic allocation region marker to the region
    366       // following the large region that was just allocated.
    367       cyclic_alloc_region_index_ = next_region2 % num_regions_;
    368       return region2;
    369     }
    370   } else {
    371     // Try to find a range of free regions within [0, num_regions_).
    372     mirror::Object* region = AllocLargeInRange<kForEvac>(0,
    373                                                          num_regions_,
    374                                                          num_regs_in_large_region,
    375                                                          bytes_allocated,
    376                                                          usable_size,
    377                                                          bytes_tl_bulk_allocated);
    378     if (region != nullptr) {
    379       return region;
    380     }
    381   }
    382   return nullptr;
    383 }
    384 
    385 template<bool kForEvac>
    386 inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
    387                                                       size_t end,
    388                                                       size_t num_regs_in_large_region,
    389                                                       /* out */ size_t* bytes_allocated,
    390                                                       /* out */ size_t* usable_size,
    391                                                       /* out */ size_t* bytes_tl_bulk_allocated,
    392                                                       /* out */ size_t* next_region) {
    393   DCHECK_LE(0u, begin);
    394   DCHECK_LT(begin, end);
    395   DCHECK_LE(end, num_regions_);
    396   size_t left = begin;
    397   while (left + num_regs_in_large_region - 1 < end) {
    398     bool found = true;
    399     size_t right = left;
    400     DCHECK_LT(right, left + num_regs_in_large_region)
    401         << "The inner loop should iterate at least once";
    402     while (right < left + num_regs_in_large_region) {
    403       if (regions_[right].IsFree()) {
    404         ++right;
    405         // Ensure `right` is not going beyond the past-the-end index of the region space.
    406         DCHECK_LE(right, num_regions_);
    407       } else {
    408         found = false;
    409         break;
    410       }
    411     }
    412     if (found) {
    413       // `right` points to the one region past the last free region.
    414       DCHECK_EQ(left + num_regs_in_large_region, right);
    415       Region* first_reg = &regions_[left];
    416       DCHECK(first_reg->IsFree());
    417       first_reg->UnfreeLarge(this, time_);
    418       if (kForEvac) {
    419         ++num_evac_regions_;
    420       } else {
    421         ++num_non_free_regions_;
    422       }
    423       size_t allocated = num_regs_in_large_region * kRegionSize;
    424       // We make 'top' all usable bytes, as the caller of this
    425       // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
    426       first_reg->SetTop(first_reg->Begin() + allocated);
    427       if (!kForEvac) {
    428         // Evac doesn't count as newly allocated.
    429         first_reg->SetNewlyAllocated();
    430       }
    431       for (size_t p = left + 1; p < right; ++p) {
    432         DCHECK_LT(p, num_regions_);
    433         DCHECK(regions_[p].IsFree());
    434         regions_[p].UnfreeLargeTail(this, time_);
    435         if (kForEvac) {
    436           ++num_evac_regions_;
    437         } else {
    438           ++num_non_free_regions_;
    439         }
    440         if (!kForEvac) {
    441           // Evac doesn't count as newly allocated.
    442           regions_[p].SetNewlyAllocated();
    443         }
    444       }
    445       *bytes_allocated = allocated;
    446       if (usable_size != nullptr) {
    447         *usable_size = allocated;
    448       }
    449       *bytes_tl_bulk_allocated = allocated;
    450       mirror::Object* large_region = reinterpret_cast<mirror::Object*>(first_reg->Begin());
    451       DCHECK(large_region != nullptr);
    452       if (next_region != nullptr) {
    453         // Return the index to the region next to the allocated large region via `next_region`.
    454         *next_region = right;
    455       }
    456       return large_region;
    457     } else {
    458       // `right` points to the non-free region. Start with the one after it.
    459       left = right + 1;
    460     }
    461   }
    462   return nullptr;
    463 }
    464 
    465 template<bool kForEvac>
    466 inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
    467   DCHECK(Contains(large_obj));
    468   DCHECK_ALIGNED(large_obj, kRegionSize);
    469   MutexLock mu(Thread::Current(), region_lock_);
    470   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
    471   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
    472   CHECK_LT(begin_addr, end_addr);
    473   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
    474     Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
    475     if (addr == begin_addr) {
    476       DCHECK(reg->IsLarge());
    477     } else {
    478       DCHECK(reg->IsLargeTail());
    479     }
    480     reg->Clear(/*zero_and_release_pages=*/true);
    481     if (kForEvac) {
    482       --num_evac_regions_;
    483     } else {
    484       --num_non_free_regions_;
    485     }
    486   }
    487   if (kIsDebugBuild && end_addr < Limit()) {
    488     // If we aren't at the end of the space, check that the next region is not a large tail.
    489     Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
    490     DCHECK(!following_reg->IsLargeTail());
    491   }
    492 }
    493 
    494 inline size_t RegionSpace::Region::BytesAllocated() const {
    495   if (IsLarge()) {
    496     DCHECK_LT(begin_ + kRegionSize, Top());
    497     return static_cast<size_t>(Top() - begin_);
    498   } else if (IsLargeTail()) {
    499     DCHECK_EQ(begin_, Top());
    500     return 0;
    501   } else {
    502     DCHECK(IsAllocated()) << "state=" << state_;
    503     DCHECK_LE(begin_, Top());
    504     size_t bytes;
    505     if (is_a_tlab_) {
    506       bytes = thread_->GetThreadLocalBytesAllocated();
    507     } else {
    508       bytes = static_cast<size_t>(Top() - begin_);
    509     }
    510     DCHECK_LE(bytes, kRegionSize);
    511     return bytes;
    512   }
    513 }
    514 
    515 inline size_t RegionSpace::Region::ObjectsAllocated() const {
    516   if (IsLarge()) {
    517     DCHECK_LT(begin_ + kRegionSize, Top());
    518     DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
    519     return 1;
    520   } else if (IsLargeTail()) {
    521     DCHECK_EQ(begin_, Top());
    522     DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
    523     return 0;
    524   } else {
    525     DCHECK(IsAllocated()) << "state=" << state_;
    526     return objects_allocated_;
    527   }
    528 }
    529 
    530 }  // namespace space
    531 }  // namespace gc
    532 }  // namespace art
    533 
    534 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
    535