Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
     19 
     20 #include "concurrent_copying.h"
     21 
     22 #include "gc/accounting/space_bitmap-inl.h"
     23 #include "gc/heap.h"
     24 #include "gc/space/region_space.h"
     25 #include "lock_word.h"
     26 
     27 namespace art {
     28 namespace gc {
     29 namespace collector {
     30 
     31 inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
     32   if (from_ref == nullptr) {
     33     return nullptr;
     34   }
     35   DCHECK(heap_->collector_type_ == kCollectorTypeCC);
     36   if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
     37     // In the lock word forward address state, the read barrier bits
     38     // in the lock word are part of the stored forwarding address and
     39     // invalid. This is usually OK as the from-space copy of objects
     40     // aren't accessed by mutators due to the to-space
     41     // invariant. However, during the dex2oat image writing relocation
     42     // and the zygote compaction, objects can be in the forward
     43     // address state (to store the forward/relocation addresses) and
     44     // they can still be accessed and the invalid read barrier bits
     45     // are consulted. If they look like gray but aren't really, the
     46     // read barriers slow path can trigger when it shouldn't. To guard
     47     // against this, return here if the CC collector isn't running.
     48     return from_ref;
     49   }
     50   DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
     51   space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
     52   switch (rtype) {
     53     case space::RegionSpace::RegionType::kRegionTypeToSpace:
     54       // It's already marked.
     55       return from_ref;
     56     case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
     57       mirror::Object* to_ref = GetFwdPtr(from_ref);
     58       if (kUseBakerReadBarrier) {
     59         DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
     60             << "from_ref=" << from_ref << " to_ref=" << to_ref;
     61       }
     62       if (to_ref == nullptr) {
     63         // It isn't marked yet. Mark it by copying it to the to-space.
     64         to_ref = Copy(from_ref);
     65       }
     66       DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
     67           << "from_ref=" << from_ref << " to_ref=" << to_ref;
     68       return to_ref;
     69     }
     70     case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
     71       // This may or may not succeed, which is ok.
     72       if (kUseBakerReadBarrier) {
     73         from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
     74       }
     75       mirror::Object* to_ref = from_ref;
     76       if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
     77         // Already marked.
     78       } else {
     79         // Newly marked.
     80         if (kUseBakerReadBarrier) {
     81           DCHECK_EQ(to_ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
     82         }
     83         PushOntoMarkStack(to_ref);
     84       }
     85       return to_ref;
     86     }
     87     case space::RegionSpace::RegionType::kRegionTypeNone:
     88       return MarkNonMoving(from_ref);
     89     default:
     90       UNREACHABLE();
     91   }
     92 }
     93 
     94 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
     95   DCHECK(region_space_->IsInFromSpace(from_ref));
     96   LockWord lw = from_ref->GetLockWord(false);
     97   if (lw.GetState() == LockWord::kForwardingAddress) {
     98     mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
     99     DCHECK(fwd_ptr != nullptr);
    100     return fwd_ptr;
    101   } else {
    102     return nullptr;
    103   }
    104 }
    105 
    106 }  // namespace collector
    107 }  // namespace gc
    108 }  // namespace art
    109 
    110 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
    111