Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
     19 
     20 #include "region_space.h"
     21 #include "thread-inl.h"
     22 
     23 namespace art {
     24 namespace gc {
     25 namespace space {
     26 
     27 inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
     28                                           size_t* usable_size,
     29                                           size_t* bytes_tl_bulk_allocated) {
     30   num_bytes = RoundUp(num_bytes, kAlignment);
     31   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
     32                                 bytes_tl_bulk_allocated);
     33 }
     34 
     35 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
     36                                                       size_t* bytes_allocated,
     37                                                       size_t* usable_size,
     38                                                       size_t* bytes_tl_bulk_allocated) {
     39   Locks::mutator_lock_->AssertExclusiveHeld(self);
     40   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     41 }
     42 
     43 template<bool kForEvac>
     44 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
     45                                                     size_t* usable_size,
     46                                                     size_t* bytes_tl_bulk_allocated) {
     47   DCHECK_ALIGNED(num_bytes, kAlignment);
     48   mirror::Object* obj;
     49   if (LIKELY(num_bytes <= kRegionSize)) {
     50     // Non-large object.
     51     if (!kForEvac) {
     52       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     53                                    bytes_tl_bulk_allocated);
     54     } else {
     55       DCHECK(evac_region_ != nullptr);
     56       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     57                                 bytes_tl_bulk_allocated);
     58     }
     59     if (LIKELY(obj != nullptr)) {
     60       return obj;
     61     }
     62     MutexLock mu(Thread::Current(), region_lock_);
     63     // Retry with current region since another thread may have updated it.
     64     if (!kForEvac) {
     65       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     66                                    bytes_tl_bulk_allocated);
     67     } else {
     68       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
     69                                 bytes_tl_bulk_allocated);
     70     }
     71     if (LIKELY(obj != nullptr)) {
     72       return obj;
     73     }
     74     if (!kForEvac) {
     75       // Retain sufficient free regions for full evacuation.
     76       if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
     77         return nullptr;
     78       }
     79       for (size_t i = 0; i < num_regions_; ++i) {
     80         Region* r = &regions_[i];
     81         if (r->IsFree()) {
     82           r->Unfree(this, time_);
     83           r->SetNewlyAllocated();
     84           ++num_non_free_regions_;
     85           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     86           CHECK(obj != nullptr);
     87           current_region_ = r;
     88           return obj;
     89         }
     90       }
     91     } else {
     92       for (size_t i = 0; i < num_regions_; ++i) {
     93         Region* r = &regions_[i];
     94         if (r->IsFree()) {
     95           r->Unfree(this, time_);
     96           ++num_non_free_regions_;
     97           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
     98           CHECK(obj != nullptr);
     99           evac_region_ = r;
    100           return obj;
    101         }
    102       }
    103     }
    104   } else {
    105     // Large object.
    106     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
    107                                bytes_tl_bulk_allocated);
    108     if (LIKELY(obj != nullptr)) {
    109       return obj;
    110     }
    111   }
    112   return nullptr;
    113 }
    114 
    115 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
    116                                                   size_t* usable_size,
    117                                                   size_t* bytes_tl_bulk_allocated) {
    118   DCHECK(IsAllocated() && IsInToSpace());
    119   DCHECK_ALIGNED(num_bytes, kAlignment);
    120   uint8_t* old_top;
    121   uint8_t* new_top;
    122   do {
    123     old_top = top_.LoadRelaxed();
    124     new_top = old_top + num_bytes;
    125     if (UNLIKELY(new_top > end_)) {
    126       return nullptr;
    127     }
    128   } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top));
    129   objects_allocated_.FetchAndAddRelaxed(1);
    130   DCHECK_LE(Top(), end_);
    131   DCHECK_LT(old_top, end_);
    132   DCHECK_LE(new_top, end_);
    133   *bytes_allocated = num_bytes;
    134   if (usable_size != nullptr) {
    135     *usable_size = num_bytes;
    136   }
    137   *bytes_tl_bulk_allocated = num_bytes;
    138   return reinterpret_cast<mirror::Object*>(old_top);
    139 }
    140 
    141 inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
    142   size_t num_bytes = obj->SizeOf();
    143   if (usable_size != nullptr) {
    144     if (LIKELY(num_bytes <= kRegionSize)) {
    145       DCHECK(RefToRegion(obj)->IsAllocated());
    146       *usable_size = RoundUp(num_bytes, kAlignment);
    147     } else {
    148       DCHECK(RefToRegion(obj)->IsLarge());
    149       *usable_size = RoundUp(num_bytes, kRegionSize);
    150     }
    151   }
    152   return num_bytes;
    153 }
    154 
    155 template<RegionSpace::RegionType kRegionType>
    156 uint64_t RegionSpace::GetBytesAllocatedInternal() {
    157   uint64_t bytes = 0;
    158   MutexLock mu(Thread::Current(), region_lock_);
    159   for (size_t i = 0; i < num_regions_; ++i) {
    160     Region* r = &regions_[i];
    161     if (r->IsFree()) {
    162       continue;
    163     }
    164     switch (kRegionType) {
    165       case RegionType::kRegionTypeAll:
    166         bytes += r->BytesAllocated();
    167         break;
    168       case RegionType::kRegionTypeFromSpace:
    169         if (r->IsInFromSpace()) {
    170           bytes += r->BytesAllocated();
    171         }
    172         break;
    173       case RegionType::kRegionTypeUnevacFromSpace:
    174         if (r->IsInUnevacFromSpace()) {
    175           bytes += r->BytesAllocated();
    176         }
    177         break;
    178       case RegionType::kRegionTypeToSpace:
    179         if (r->IsInToSpace()) {
    180           bytes += r->BytesAllocated();
    181         }
    182         break;
    183       default:
    184         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    185     }
    186   }
    187   return bytes;
    188 }
    189 
    190 template<RegionSpace::RegionType kRegionType>
    191 uint64_t RegionSpace::GetObjectsAllocatedInternal() {
    192   uint64_t bytes = 0;
    193   MutexLock mu(Thread::Current(), region_lock_);
    194   for (size_t i = 0; i < num_regions_; ++i) {
    195     Region* r = &regions_[i];
    196     if (r->IsFree()) {
    197       continue;
    198     }
    199     switch (kRegionType) {
    200       case RegionType::kRegionTypeAll:
    201         bytes += r->ObjectsAllocated();
    202         break;
    203       case RegionType::kRegionTypeFromSpace:
    204         if (r->IsInFromSpace()) {
    205           bytes += r->ObjectsAllocated();
    206         }
    207         break;
    208       case RegionType::kRegionTypeUnevacFromSpace:
    209         if (r->IsInUnevacFromSpace()) {
    210           bytes += r->ObjectsAllocated();
    211         }
    212         break;
    213       case RegionType::kRegionTypeToSpace:
    214         if (r->IsInToSpace()) {
    215           bytes += r->ObjectsAllocated();
    216         }
    217         break;
    218       default:
    219         LOG(FATAL) << "Unexpected space type : " << kRegionType;
    220     }
    221   }
    222   return bytes;
    223 }
    224 
    225 template<bool kToSpaceOnly>
    226 void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
    227   // TODO: MutexLock on region_lock_ won't work due to lock order
    228   // issues (the classloader classes lock and the monitor lock). We
    229   // call this with threads suspended.
    230   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    231   for (size_t i = 0; i < num_regions_; ++i) {
    232     Region* r = &regions_[i];
    233     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
    234       continue;
    235     }
    236     if (r->IsLarge()) {
    237       // Avoid visiting dead large objects since they may contain dangling pointers to the
    238       // from-space.
    239       DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
    240       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
    241       DCHECK(obj->GetClass() != nullptr);
    242       callback(obj, arg);
    243     } else if (r->IsLargeTail()) {
    244       // Do nothing.
    245     } else {
    246       // For newly allocated and evacuated regions, live bytes will be -1.
    247       uint8_t* pos = r->Begin();
    248       uint8_t* top = r->Top();
    249       const bool need_bitmap =
    250           r->LiveBytes() != static_cast<size_t>(-1) &&
    251           r->LiveBytes() != static_cast<size_t>(top - pos);
    252       if (need_bitmap) {
    253         GetLiveBitmap()->VisitMarkedRange(
    254             reinterpret_cast<uintptr_t>(pos),
    255             reinterpret_cast<uintptr_t>(top),
    256             [callback, arg](mirror::Object* obj) {
    257           callback(obj, arg);
    258         });
    259       } else {
    260         while (pos < top) {
    261           mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
    262           if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
    263             callback(obj, arg);
    264             pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    265           } else {
    266             break;
    267           }
    268         }
    269       }
    270     }
    271   }
    272 }
    273 
    274 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
    275   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    276   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
    277 }
    278 
    279 template<bool kForEvac>
    280 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
    281                                         size_t* usable_size,
    282                                         size_t* bytes_tl_bulk_allocated) {
    283   DCHECK_ALIGNED(num_bytes, kAlignment);
    284   DCHECK_GT(num_bytes, kRegionSize);
    285   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
    286   DCHECK_GT(num_regs, 0U);
    287   DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
    288   DCHECK_LE(num_bytes, num_regs * kRegionSize);
    289   MutexLock mu(Thread::Current(), region_lock_);
    290   if (!kForEvac) {
    291     // Retain sufficient free regions for full evacuation.
    292     if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
    293       return nullptr;
    294     }
    295   }
    296   // Find a large enough contiguous free regions.
    297   size_t left = 0;
    298   while (left + num_regs - 1 < num_regions_) {
    299     bool found = true;
    300     size_t right = left;
    301     DCHECK_LT(right, left + num_regs)
    302         << "The inner loop Should iterate at least once";
    303     while (right < left + num_regs) {
    304       if (regions_[right].IsFree()) {
    305         ++right;
    306       } else {
    307         found = false;
    308         break;
    309       }
    310     }
    311     if (found) {
    312       // right points to the one region past the last free region.
    313       DCHECK_EQ(left + num_regs, right);
    314       Region* first_reg = &regions_[left];
    315       DCHECK(first_reg->IsFree());
    316       first_reg->UnfreeLarge(this, time_);
    317       ++num_non_free_regions_;
    318       first_reg->SetTop(first_reg->Begin() + num_bytes);
    319       for (size_t p = left + 1; p < right; ++p) {
    320         DCHECK_LT(p, num_regions_);
    321         DCHECK(regions_[p].IsFree());
    322         regions_[p].UnfreeLargeTail(this, time_);
    323         ++num_non_free_regions_;
    324       }
    325       *bytes_allocated = num_bytes;
    326       if (usable_size != nullptr) {
    327         *usable_size = num_regs * kRegionSize;
    328       }
    329       *bytes_tl_bulk_allocated = num_bytes;
    330       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
    331     } else {
    332       // right points to the non-free region. Start with the one after it.
    333       left = right + 1;
    334     }
    335   }
    336   return nullptr;
    337 }
    338 
    339 inline size_t RegionSpace::Region::BytesAllocated() const {
    340   if (IsLarge()) {
    341     DCHECK_LT(begin_ + kRegionSize, Top());
    342     return static_cast<size_t>(Top() - begin_);
    343   } else if (IsLargeTail()) {
    344     DCHECK_EQ(begin_, Top());
    345     return 0;
    346   } else {
    347     DCHECK(IsAllocated()) << static_cast<uint>(state_);
    348     DCHECK_LE(begin_, Top());
    349     size_t bytes;
    350     if (is_a_tlab_) {
    351       bytes = thread_->GetThreadLocalBytesAllocated();
    352     } else {
    353       bytes = static_cast<size_t>(Top() - begin_);
    354     }
    355     DCHECK_LE(bytes, kRegionSize);
    356     return bytes;
    357   }
    358 }
    359 
    360 
    361 }  // namespace space
    362 }  // namespace gc
    363 }  // namespace art
    364 
    365 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
    366