Home | History | Annotate | Download | only in mirror
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <ctime>
     18 
     19 #include "object.h"
     20 
     21 #include "art_field.h"
     22 #include "art_field-inl.h"
     23 #include "array-inl.h"
     24 #include "class.h"
     25 #include "class-inl.h"
     26 #include "class_linker-inl.h"
     27 #include "dex_file-inl.h"
     28 #include "gc/accounting/card_table-inl.h"
     29 #include "gc/heap.h"
     30 #include "iftable-inl.h"
     31 #include "monitor.h"
     32 #include "object-inl.h"
     33 #include "object_array-inl.h"
     34 #include "runtime.h"
     35 #include "handle_scope-inl.h"
     36 #include "throwable.h"
     37 #include "well_known_classes.h"
     38 
     39 namespace art {
     40 namespace mirror {
     41 
     42 Atomic<uint32_t> Object::hash_code_seed(987654321U + std::time(nullptr));
     43 
     44 class CopyReferenceFieldsWithReadBarrierVisitor {
     45  public:
     46   explicit CopyReferenceFieldsWithReadBarrierVisitor(Object* dest_obj)
     47       : dest_obj_(dest_obj) {}
     48 
     49   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
     50       ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     51     // GetFieldObject() contains a RB.
     52     Object* ref = obj->GetFieldObject<Object>(offset);
     53     // No WB here as a large object space does not have a card table
     54     // coverage. Instead, cards will be marked separately.
     55     dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
     56   }
     57 
     58   void operator()(mirror::Class* klass, mirror::Reference* ref) const
     59       ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
     60     // Copy java.lang.ref.Reference.referent which isn't visited in
     61     // Object::VisitReferences().
     62     DCHECK(klass->IsTypeOfReferenceClass());
     63     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
     64   }
     65 
     66   // Unused since we don't copy class native roots.
     67   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
     68       const {}
     69   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
     70 
     71  private:
     72   Object* const dest_obj_;
     73 };
     74 
     75 Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
     76                            size_t num_bytes) {
     77   // Copy instance data.  We assume memcpy copies by words.
     78   // TODO: expose and use move32.
     79   uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src);
     80   uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest);
     81   size_t offset = sizeof(Object);
     82   memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
     83   if (kUseReadBarrier) {
     84     // We need a RB here. After the memcpy that covers the whole
     85     // object above, copy references fields one by one again with a
     86     // RB. TODO: Optimize this later?
     87     CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
     88     src->VisitReferences(visitor, visitor);
     89   }
     90   gc::Heap* heap = Runtime::Current()->GetHeap();
     91   // Perform write barriers on copied object references.
     92   Class* c = src->GetClass();
     93   if (c->IsArrayClass()) {
     94     if (!c->GetComponentType()->IsPrimitive()) {
     95       ObjectArray<Object>* array = dest->AsObjectArray<Object>();
     96       heap->WriteBarrierArray(dest, 0, array->GetLength());
     97     }
     98   } else {
     99     heap->WriteBarrierEveryFieldOf(dest);
    100   }
    101   if (c->IsFinalizable()) {
    102     heap->AddFinalizerReference(self, &dest);
    103   }
    104   return dest;
    105 }
    106 
    107 // An allocation pre-fence visitor that copies the object.
    108 class CopyObjectVisitor {
    109  public:
    110   CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes)
    111       : self_(self), orig_(orig), num_bytes_(num_bytes) {
    112   }
    113 
    114   void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
    115       SHARED_REQUIRES(Locks::mutator_lock_) {
    116     Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
    117   }
    118 
    119  private:
    120   Thread* const self_;
    121   Handle<Object>* const orig_;
    122   const size_t num_bytes_;
    123   DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
    124 };
    125 
    126 Object* Object::Clone(Thread* self) {
    127   CHECK(!IsClass()) << "Can't clone classes.";
    128   // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
    129   // be wrong.
    130   gc::Heap* heap = Runtime::Current()->GetHeap();
    131   size_t num_bytes = SizeOf();
    132   StackHandleScope<1> hs(self);
    133   Handle<Object> this_object(hs.NewHandle(this));
    134   Object* copy;
    135   CopyObjectVisitor visitor(self, &this_object, num_bytes);
    136   if (heap->IsMovableObject(this)) {
    137     copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
    138   } else {
    139     copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
    140   }
    141   return copy;
    142 }
    143 
    144 uint32_t Object::GenerateIdentityHashCode() {
    145   uint32_t expected_value, new_value;
    146   do {
    147     expected_value = hash_code_seed.LoadRelaxed();
    148     new_value = expected_value * 1103515245 + 12345;
    149   } while (!hash_code_seed.CompareExchangeWeakRelaxed(expected_value, new_value) ||
    150       (expected_value & LockWord::kHashMask) == 0);
    151   return expected_value & LockWord::kHashMask;
    152 }
    153 
    154 void Object::SetHashCodeSeed(uint32_t new_seed) {
    155   hash_code_seed.StoreRelaxed(new_seed);
    156 }
    157 
    158 int32_t Object::IdentityHashCode() const {
    159   mirror::Object* current_this = const_cast<mirror::Object*>(this);
    160   while (true) {
    161     LockWord lw = current_this->GetLockWord(false);
    162     switch (lw.GetState()) {
    163       case LockWord::kUnlocked: {
    164         // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
    165         // loop iteration.
    166         LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(),
    167                                                     lw.ReadBarrierState());
    168         DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
    169         if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
    170           return hash_word.GetHashCode();
    171         }
    172         break;
    173       }
    174       case LockWord::kThinLocked: {
    175         // Inflate the thin lock to a monitor and stick the hash code inside of the monitor. May
    176         // fail spuriously.
    177         Thread* self = Thread::Current();
    178         StackHandleScope<1> hs(self);
    179         Handle<mirror::Object> h_this(hs.NewHandle(current_this));
    180         Monitor::InflateThinLocked(self, h_this, lw, GenerateIdentityHashCode());
    181         // A GC may have occurred when we switched to kBlocked.
    182         current_this = h_this.Get();
    183         break;
    184       }
    185       case LockWord::kFatLocked: {
    186         // Already inflated, return the hash stored in the monitor.
    187         Monitor* monitor = lw.FatLockMonitor();
    188         DCHECK(monitor != nullptr);
    189         return monitor->GetHashCode();
    190       }
    191       case LockWord::kHashCode: {
    192         return lw.GetHashCode();
    193       }
    194       default: {
    195         LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
    196         break;
    197       }
    198     }
    199   }
    200   UNREACHABLE();
    201 }
    202 
    203 void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) {
    204   Class* c = GetClass();
    205   Runtime* runtime = Runtime::Current();
    206   if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
    207       !runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
    208     return;
    209   }
    210   for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
    211     for (ArtField& field : cur->GetIFields()) {
    212       StackHandleScope<1> hs(Thread::Current());
    213       Handle<Object> h_object(hs.NewHandle(new_value));
    214       if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
    215         CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
    216         // TODO: resolve the field type for moving GC.
    217         mirror::Class* field_type = field.GetType<!kMovingCollector>();
    218         if (field_type != nullptr) {
    219           CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
    220         }
    221         return;
    222       }
    223     }
    224   }
    225   if (c->IsArrayClass()) {
    226     // Bounds and assign-ability done in the array setter.
    227     return;
    228   }
    229   if (IsClass()) {
    230     for (ArtField& field : AsClass()->GetSFields()) {
    231       if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
    232         CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
    233         // TODO: resolve the field type for moving GC.
    234         mirror::Class* field_type = field.GetType<!kMovingCollector>();
    235         if (field_type != nullptr) {
    236           CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
    237         }
    238         return;
    239       }
    240     }
    241   }
    242   LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast<void*>(this)
    243       << " of type " << PrettyDescriptor(c) << " at offset " << field_offset;
    244   UNREACHABLE();
    245 }
    246 
    247 ArtField* Object::FindFieldByOffset(MemberOffset offset) {
    248   return IsClass() ? ArtField::FindStaticFieldWithOffset(AsClass(), offset.Uint32Value())
    249       : ArtField::FindInstanceFieldWithOffset(GetClass(), offset.Uint32Value());
    250 }
    251 
    252 }  // namespace mirror
    253 }  // namespace art
    254