Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     19 
     20 #include "region_space.h"
     21 #include "thread-current-inl.h"
     22 
     23 namespace art {
     24 namespace gc {
     25 namespace space {
     26 
     27 inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
     28                                           size_t* usable_size,
     29                                           size_t* bytes_tl_bulk_allocated) {
     30   num_bytes = RoundUp(num_bytes, kAlignment);
     31   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
     32                                 bytes_tl_bulk_allocated);
     33 }
     34 
     35 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
     36                                                       size_t* bytes_allocated,
     37                                                       size_t* usable_size,
     38                                                       size_t* bytes_tl_bulk_allocated) {
     39   Locks::mutator_lock_->AssertExclusiveHeld(self);
     40   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     41 }
     42 
     43 template<bool kForEvac>
     44 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
     45                                                     size_t* usable_size,
     46                                                     size_t* bytes_tl_bulk_allocated) {
     47   DCHECK_ALIGNED(num_bytes, kAlignment);
     48   mirror::Object* obj;
     49   if (LIKELY(num_bytes <= kRegionSize)) {
     50     // Non-large object.
     51     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     52                                                              bytes_allocated,
     53                                                              usable_size,
     54                                                              bytes_tl_bulk_allocated);
     55     if (LIKELY(obj != nullptr)) {
     56       return obj;
     57     }
     58     MutexLock mu(Thread::Current(), region_lock_);
     59     // Retry with current region since another thread may have updated it.
     60     obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
     61                                                              bytes_allocated,
     62                                                              usable_size,
     63                                                              bytes_tl_bulk_allocated);
     64     if (LIKELY(obj != nullptr)) {
     65       return obj;
     66     }
     67     Region* r = AllocateRegion(kForEvac);
     68     if (LIKELY(r != nullptr)) {
     69       obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     70       CHECK(obj != nullptr);
     71       // Do our allocation before setting the region, this makes sure no threads race ahead
     72       // and fill in the region before we allocate the object. b/63153464
     73       if (kForEvac) {
     74         evac_region_ = r;
     75       } else {
     76         current_region_ = r;
     77       }
     78       return obj;
     79     }
     80   } else {
     81     // Large object.
     82     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
     83                                bytes_tl_bulk_allocated);
     84     if (LIKELY(obj != nullptr)) {
     85       return obj;
     86     }
     87   }
     88   return nullptr;
     89 }
     90 
     91 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
     92                                                   size_t* usable_size,
     93                                                   size_t* bytes_tl_bulk_allocated) {
     94   DCHECK(IsAllocated() && IsInToSpace());
     95   DCHECK_ALIGNED(num_bytes, kAlignment);
     96   uint8_t* old_top;
     97   uint8_t* new_top;
     98   do {
     99     old_top = top_.LoadRelaxed();
    100     new_top = old_top + num_bytes;
    101     if (UNLIKELY(new_top > end_)) {
    102       return nullptr;
    103     }
    104   } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top));
    105   objects_allocated_.FetchAndAddRelaxed(1);
    106   DCHECK_LE(Top(), end_);
    107   DCHECK_LT(old_top, end_);
    108   DCHECK_LE(new_top, end_);
    109   *bytes_allocated = num_bytes;
    110   if (usable_size != nullptr) {
    111     *usable_size = num_bytes;
    112   }
    113   *bytes_tl_bulk_allocated = num_bytes;
    114   return reinterpret_cast<mirror::Object*>(old_top);
    115 }
    116 
    117 template<RegionSpace::RegionType kRegionType>
    118 uint64_t RegionSpace::GetBytesAllocatedInternal() {
    119   uint64_t bytes = 0;
    120   MutexLock mu(Thread::Current(), region_lock_);
    121   for (size_t i = 0; i < num_regions_; ++i) {
    122     Region* r = &regions_[i];
    123     if (r->IsFree()) {
    124       continue;
    125     }
    126     switch (kRegionType) {
    127       case RegionType::kRegionTypeAll:
    128         bytes += r->BytesAllocated();
    129         break;
    130       case RegionType::kRegionTypeFromSpace:
    131         if (r->IsInFromSpace()) {
    132           bytes += r->BytesAllocated();
    133         }
    134         break;
    135       case RegionType::kRegionTypeUnevacFromSpace:
    136         if (r->IsInUnevacFromSpace()) {
    137           bytes += r->BytesAllocated();
    138         }
    139         break;
    140       case RegionType::kRegionTypeToSpace:
    141         if (r->IsInToSpace()) {
    142           bytes += r->BytesAllocated();
    143         }
    144         break;
    145       default:
    146         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    147     }
    148   }
    149   return bytes;
    150 }
    151 
    152 template<RegionSpace::RegionType kRegionType>
    153 uint64_t RegionSpace::GetObjectsAllocatedInternal() {
    154   uint64_t bytes = 0;
    155   MutexLock mu(Thread::Current(), region_lock_);
    156   for (size_t i = 0; i < num_regions_; ++i) {
    157     Region* r = &regions_[i];
    158     if (r->IsFree()) {
    159       continue;
    160     }
    161     switch (kRegionType) {
    162       case RegionType::kRegionTypeAll:
    163         bytes += r->ObjectsAllocated();
    164         break;
    165       case RegionType::kRegionTypeFromSpace:
    166         if (r->IsInFromSpace()) {
    167           bytes += r->ObjectsAllocated();
    168         }
    169         break;
    170       case RegionType::kRegionTypeUnevacFromSpace:
    171         if (r->IsInUnevacFromSpace()) {
    172           bytes += r->ObjectsAllocated();
    173         }
    174         break;
    175       case RegionType::kRegionTypeToSpace:
    176         if (r->IsInToSpace()) {
    177           bytes += r->ObjectsAllocated();
    178         }
    179         break;
    180       default:
    181         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    182     }
    183   }
    184   return bytes;
    185 }
    186 
    187 template<bool kToSpaceOnly, typename Visitor>
    188 void RegionSpace::WalkInternal(Visitor&& visitor) {
    189   // TODO: MutexLock on region_lock_ won't work due to lock order
    190   // issues (the classloader classes lock and the monitor lock). We
    191   // call this with threads suspended.
    192   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    193   for (size_t i = 0; i < num_regions_; ++i) {
    194     Region* r = &regions_[i];
    195     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
    196       continue;
    197     }
    198     if (r->IsLarge()) {
    199       // Avoid visiting dead large objects since they may contain dangling pointers to the
    200       // from-space.
    201       DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
    202       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
    203       DCHECK(obj->GetClass() != nullptr);
    204       visitor(obj);
    205     } else if (r->IsLargeTail()) {
    206       // Do nothing.
    207     } else {
    208       // For newly allocated and evacuated regions, live bytes will be -1.
    209       uint8_t* pos = r->Begin();
    210       uint8_t* top = r->Top();
    211       const bool need_bitmap =
    212           r->LiveBytes() != static_cast<size_t>(-1) &&
    213           r->LiveBytes() != static_cast<size_t>(top - pos);
    214       if (need_bitmap) {
    215         GetLiveBitmap()->VisitMarkedRange(
    216             reinterpret_cast<uintptr_t>(pos),
    217             reinterpret_cast<uintptr_t>(top),
    218             visitor);
    219       } else {
    220         while (pos < top) {
    221           mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    222           if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
    223             visitor(obj);
    224             pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    225           } else {
    226             break;
    227           }
    228         }
    229       }
    230     }
    231   }
    232 }
    233 
    234 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
    235   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    236   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
    237 }
    238 
    239 template<bool kForEvac>
    240 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
    241                                         size_t* usable_size,
    242                                         size_t* bytes_tl_bulk_allocated) {
    243   DCHECK_ALIGNED(num_bytes, kAlignment);
    244   DCHECK_GT(num_bytes, kRegionSize);
    245   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
    246   DCHECK_GT(num_regs, 0U);
    247   DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
    248   DCHECK_LE(num_bytes, num_regs * kRegionSize);
    249   MutexLock mu(Thread::Current(), region_lock_);
    250   if (!kForEvac) {
    251     // Retain sufficient free regions for full evacuation.
    252     if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
    253       return nullptr;
    254     }
    255   }
    256   // Find a large enough contiguous free regions.
    257   size_t left = 0;
    258   while (left + num_regs - 1 < num_regions_) {
    259     bool found = true;
    260     size_t right = left;
    261     DCHECK_LT(right, left + num_regs)
    262         << "The inner loop Should iterate at least once";
    263     while (right < left + num_regs) {
    264       if (regions_[right].IsFree()) {
    265         ++right;
    266       } else {
    267         found = false;
    268         break;
    269       }
    270     }
    271     if (found) {
    272       // right points to the one region past the last free region.
    273       DCHECK_EQ(left + num_regs, right);
    274       Region* first_reg = &regions_[left];
    275       DCHECK(first_reg->IsFree());
    276       first_reg->UnfreeLarge(this, time_);
    277       ++num_non_free_regions_;
    278       size_t allocated = num_regs * kRegionSize;
    279       // We make 'top' all usable bytes, as the caller of this
    280       // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
    281       first_reg->SetTop(first_reg->Begin() + allocated);
    282       for (size_t p = left + 1; p < right; ++p) {
    283         DCHECK_LT(p, num_regions_);
    284         DCHECK(regions_[p].IsFree());
    285         regions_[p].UnfreeLargeTail(this, time_);
    286         ++num_non_free_regions_;
    287       }
    288       *bytes_allocated = allocated;
    289       if (usable_size != nullptr) {
    290         *usable_size = allocated;
    291       }
    292       *bytes_tl_bulk_allocated = allocated;
    293       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
    294     } else {
    295       // right points to the non-free region. Start with the one after it.
    296       left = right + 1;
    297     }
    298   }
    299   return nullptr;
    300 }
    301 
    302 inline size_t RegionSpace::Region::BytesAllocated() const {
    303   if (IsLarge()) {
    304     DCHECK_LT(begin_ + kRegionSize, Top());
    305     return static_cast<size_t>(Top() - begin_);
    306   } else if (IsLargeTail()) {
    307     DCHECK_EQ(begin_, Top());
    308     return 0;
    309   } else {
    310     DCHECK(IsAllocated()) << static_cast<uint>(state_);
    311     DCHECK_LE(begin_, Top());
    312     size_t bytes;
    313     if (is_a_tlab_) {
    314       bytes = thread_->GetThreadLocalBytesAllocated();
    315     } else {
    316       bytes = static_cast<size_t>(Top() - begin_);
    317     }
    318     DCHECK_LE(bytes, kRegionSize);
    319     return bytes;
    320   }
    321 }
    322 
    323 
    324 }  // namespace space
    325 }  // namespace gc
    326 }  // namespace art
    327 
    328 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
    329