Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     19 
     20 #include "region_space.h"
     21 
     22 namespace art {
     23 namespace gc {
     24 namespace space {
     25 
     26 inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
     27                                           size_t* usable_size,
     28                                           size_t* bytes_tl_bulk_allocated) {
     29   num_bytes = RoundUp(num_bytes, kAlignment);
     30   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
     31                                 bytes_tl_bulk_allocated);
     32 }
     33 
     34 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
     35                                                       size_t* bytes_allocated,
     36                                                       size_t* usable_size,
     37                                                       size_t* bytes_tl_bulk_allocated) {
     38   Locks::mutator_lock_->AssertExclusiveHeld(self);
     39   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     40 }
     41 
     42 template<bool kForEvac>
     43 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
     44                                                     size_t* usable_size,
     45                                                     size_t* bytes_tl_bulk_allocated) {
     46   DCHECK(IsAligned<kAlignment>(num_bytes));
     47   mirror::Object* obj;
     48   if (LIKELY(num_bytes <= kRegionSize)) {
     49     // Non-large object.
     50     if (!kForEvac) {
     51       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     52                                    bytes_tl_bulk_allocated);
     53     } else {
     54       DCHECK(evac_region_ != nullptr);
     55       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     56                                 bytes_tl_bulk_allocated);
     57     }
     58     if (LIKELY(obj != nullptr)) {
     59       return obj;
     60     }
     61     MutexLock mu(Thread::Current(), region_lock_);
     62     // Retry with current region since another thread may have updated it.
     63     if (!kForEvac) {
     64       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     65                                    bytes_tl_bulk_allocated);
     66     } else {
     67       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     68                                 bytes_tl_bulk_allocated);
     69     }
     70     if (LIKELY(obj != nullptr)) {
     71       return obj;
     72     }
     73     if (!kForEvac) {
     74       // Retain sufficient free regions for full evacuation.
     75       if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
     76         return nullptr;
     77       }
     78       for (size_t i = 0; i < num_regions_; ++i) {
     79         Region* r = &regions_[i];
     80         if (r->IsFree()) {
     81           r->Unfree(time_);
     82           r->SetNewlyAllocated();
     83           ++num_non_free_regions_;
     84           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     85           CHECK(obj != nullptr);
     86           current_region_ = r;
     87           return obj;
     88         }
     89       }
     90     } else {
     91       for (size_t i = 0; i < num_regions_; ++i) {
     92         Region* r = &regions_[i];
     93         if (r->IsFree()) {
     94           r->Unfree(time_);
     95           ++num_non_free_regions_;
     96           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     97           CHECK(obj != nullptr);
     98           evac_region_ = r;
     99           return obj;
    100         }
    101       }
    102     }
    103   } else {
    104     // Large object.
    105     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
    106                                bytes_tl_bulk_allocated);
    107     if (LIKELY(obj != nullptr)) {
    108       return obj;
    109     }
    110   }
    111   return nullptr;
    112 }
    113 
    114 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
    115                                                   size_t* usable_size,
    116                                                   size_t* bytes_tl_bulk_allocated) {
    117   DCHECK(IsAllocated() && IsInToSpace());
    118   DCHECK(IsAligned<kAlignment>(num_bytes));
    119   Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
    120   uint8_t* old_top;
    121   uint8_t* new_top;
    122   do {
    123     old_top = atomic_top->LoadRelaxed();
    124     new_top = old_top + num_bytes;
    125     if (UNLIKELY(new_top > end_)) {
    126       return nullptr;
    127     }
    128   } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
    129   reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
    130   DCHECK_LE(atomic_top->LoadRelaxed(), end_);
    131   DCHECK_LT(old_top, end_);
    132   DCHECK_LE(new_top, end_);
    133   *bytes_allocated = num_bytes;
    134   if (usable_size != nullptr) {
    135     *usable_size = num_bytes;
    136   }
    137   *bytes_tl_bulk_allocated = num_bytes;
    138   return reinterpret_cast<mirror::Object*>(old_top);
    139 }
    140 
    141 inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
    142     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    143   size_t num_bytes = obj->SizeOf();
    144   if (usable_size != nullptr) {
    145     if (LIKELY(num_bytes <= kRegionSize)) {
    146       DCHECK(RefToRegion(obj)->IsAllocated());
    147       *usable_size = RoundUp(num_bytes, kAlignment);
    148     } else {
    149       DCHECK(RefToRegion(obj)->IsLarge());
    150       *usable_size = RoundUp(num_bytes, kRegionSize);
    151     }
    152   }
    153   return num_bytes;
    154 }
    155 
    156 template<RegionSpace::RegionType kRegionType>
    157 uint64_t RegionSpace::GetBytesAllocatedInternal() {
    158   uint64_t bytes = 0;
    159   MutexLock mu(Thread::Current(), region_lock_);
    160   for (size_t i = 0; i < num_regions_; ++i) {
    161     Region* r = &regions_[i];
    162     if (r->IsFree()) {
    163       continue;
    164     }
    165     switch (kRegionType) {
    166       case RegionType::kRegionTypeAll:
    167         bytes += r->BytesAllocated();
    168         break;
    169       case RegionType::kRegionTypeFromSpace:
    170         if (r->IsInFromSpace()) {
    171           bytes += r->BytesAllocated();
    172         }
    173         break;
    174       case RegionType::kRegionTypeUnevacFromSpace:
    175         if (r->IsInUnevacFromSpace()) {
    176           bytes += r->BytesAllocated();
    177         }
    178         break;
    179       case RegionType::kRegionTypeToSpace:
    180         if (r->IsInToSpace()) {
    181           bytes += r->BytesAllocated();
    182         }
    183         break;
    184       default:
    185         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    186     }
    187   }
    188   return bytes;
    189 }
    190 
    191 template<RegionSpace::RegionType kRegionType>
    192 uint64_t RegionSpace::GetObjectsAllocatedInternal() {
    193   uint64_t bytes = 0;
    194   MutexLock mu(Thread::Current(), region_lock_);
    195   for (size_t i = 0; i < num_regions_; ++i) {
    196     Region* r = &regions_[i];
    197     if (r->IsFree()) {
    198       continue;
    199     }
    200     switch (kRegionType) {
    201       case RegionType::kRegionTypeAll:
    202         bytes += r->ObjectsAllocated();
    203         break;
    204       case RegionType::kRegionTypeFromSpace:
    205         if (r->IsInFromSpace()) {
    206           bytes += r->ObjectsAllocated();
    207         }
    208         break;
    209       case RegionType::kRegionTypeUnevacFromSpace:
    210         if (r->IsInUnevacFromSpace()) {
    211           bytes += r->ObjectsAllocated();
    212         }
    213         break;
    214       case RegionType::kRegionTypeToSpace:
    215         if (r->IsInToSpace()) {
    216           bytes += r->ObjectsAllocated();
    217         }
    218         break;
    219       default:
    220         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    221     }
    222   }
    223   return bytes;
    224 }
    225 
    226 template<bool kToSpaceOnly>
    227 void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
    228   // TODO: MutexLock on region_lock_ won't work due to lock order
    229   // issues (the classloader classes lock and the monitor lock). We
    230   // call this with threads suspended.
    231   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    232   for (size_t i = 0; i < num_regions_; ++i) {
    233     Region* r = &regions_[i];
    234     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
    235       continue;
    236     }
    237     if (r->IsLarge()) {
    238       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
    239       if (obj->GetClass() != nullptr) {
    240         callback(obj, arg);
    241       }
    242     } else if (r->IsLargeTail()) {
    243       // Do nothing.
    244     } else {
    245       uint8_t* pos = r->Begin();
    246       uint8_t* top = r->Top();
    247       while (pos < top) {
    248         mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    249         if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
    250           callback(obj, arg);
    251           pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    252         } else {
    253           break;
    254         }
    255       }
    256     }
    257   }
    258 }
    259 
    260 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
    261   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    262   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
    263 }
    264 
    265 template<bool kForEvac>
    266 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
    267                                         size_t* usable_size,
    268                                         size_t* bytes_tl_bulk_allocated) {
    269   DCHECK(IsAligned<kAlignment>(num_bytes));
    270   DCHECK_GT(num_bytes, kRegionSize);
    271   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
    272   DCHECK_GT(num_regs, 0U);
    273   DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
    274   DCHECK_LE(num_bytes, num_regs * kRegionSize);
    275   MutexLock mu(Thread::Current(), region_lock_);
    276   if (!kForEvac) {
    277     // Retain sufficient free regions for full evacuation.
    278     if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
    279       return nullptr;
    280     }
    281   }
    282   // Find a large enough contiguous free regions.
    283   size_t left = 0;
    284   while (left + num_regs - 1 < num_regions_) {
    285     bool found = true;
    286     size_t right = left;
    287     DCHECK_LT(right, left + num_regs)
    288         << "The inner loop Should iterate at least once";
    289     while (right < left + num_regs) {
    290       if (regions_[right].IsFree()) {
    291         ++right;
    292       } else {
    293         found = false;
    294         break;
    295       }
    296     }
    297     if (found) {
    298       // right points to the one region past the last free region.
    299       DCHECK_EQ(left + num_regs, right);
    300       Region* first_reg = &regions_[left];
    301       DCHECK(first_reg->IsFree());
    302       first_reg->UnfreeLarge(time_);
    303       ++num_non_free_regions_;
    304       first_reg->SetTop(first_reg->Begin() + num_bytes);
    305       for (size_t p = left + 1; p < right; ++p) {
    306         DCHECK_LT(p, num_regions_);
    307         DCHECK(regions_[p].IsFree());
    308         regions_[p].UnfreeLargeTail(time_);
    309         ++num_non_free_regions_;
    310       }
    311       *bytes_allocated = num_bytes;
    312       if (usable_size != nullptr) {
    313         *usable_size = num_regs * kRegionSize;
    314       }
    315       *bytes_tl_bulk_allocated = num_bytes;
    316       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
    317     } else {
    318       // right points to the non-free region. Start with the one after it.
    319       left = right + 1;
    320     }
    321   }
    322   return nullptr;
    323 }
    324 
    325 }  // namespace space
    326 }  // namespace gc
    327 }  // namespace art
    328 
    329 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
    330