Home | History | Annotate | Download | only in lambda
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #include "lambda/box_table.h"
     17 
     18 #include "base/mutex.h"
     19 #include "common_throws.h"
     20 #include "gc_root-inl.h"
     21 #include "lambda/closure.h"
     22 #include "lambda/leaking_allocator.h"
     23 #include "mirror/method.h"
     24 #include "mirror/object-inl.h"
     25 #include "thread.h"
     26 
     27 #include <vector>
     28 
     29 namespace art {
     30 namespace lambda {
     31 // Temporarily represent the lambda Closure as its raw bytes in an array.
     32 // TODO: Generate a proxy class for the closure when boxing the first time.
     33 using BoxedClosurePointerType = mirror::ByteArray*;
     34 
     35 static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     36   return mirror::ByteArray::GetArrayClass();
     37 }
     38 
     39 namespace {
     40   // Convenience functions to allocating/deleting box table copies of the closures.
     41   struct ClosureAllocator {
     42     // Deletes a Closure that was allocated through ::Allocate.
     43     static void Delete(Closure* ptr) {
     44       delete[] reinterpret_cast<char*>(ptr);
     45     }
     46 
     47     // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
     48     static Closure* Allocate(size_t size) {
     49       DCHECK_GE(size, sizeof(Closure));
     50 
     51       // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
     52       Closure* closure = reinterpret_cast<Closure*>(new char[size]);
     53       DCHECK_ALIGNED(closure, alignof(Closure));
     54       return closure;
     55     }
     56   };
     57 }  // namespace
     58 
     59 BoxTable::BoxTable()
     60   : allow_new_weaks_(true),
     61     new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
     62 
     63 BoxTable::~BoxTable() {
     64   // Free all the copies of our closures.
     65   for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
     66     std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
     67 
     68     Closure* closure = key_value_pair.first;
     69 
     70     // Remove from the map first, so that it doesn't try to access dangling pointer.
     71     map_iterator = map_.Erase(map_iterator);
     72 
     73     // Safe to delete, no dangling pointers.
     74     ClosureAllocator::Delete(closure);
     75   }
     76 }
     77 
     78 mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
     79   Thread* self = Thread::Current();
     80 
     81   {
     82     // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
     83     /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
     84     BlockUntilWeaksAllowed();
     85 
     86     // Attempt to look up this object, it's possible it was already boxed previously.
     87     // If this is the case we *must* return the same object as before to maintain
     88     // referential equality.
     89     //
     90     // In managed code:
     91     //   Functional f = () -> 5;  // vF = create-lambda
     92     //   Object a = f;            // vA = box-lambda vA
     93     //   Object b = f;            // vB = box-lambda vB
     94     //   assert(a == f)
     95     ValueType value = FindBoxedLambda(closure);
     96     if (!value.IsNull()) {
     97       return value.Read();
     98     }
     99 
    100     // Otherwise we need to box ourselves and insert it into the hash map
    101   }
    102 
    103   // Release the lambda table lock here, so that thread suspension is allowed.
    104 
    105   // Convert the Closure into a managed byte[] which will serve
    106   // as the temporary 'boxed' version of the lambda. This is good enough
    107   // to check all the basic object identities that a boxed lambda must retain.
    108   // It's also good enough to contain all the captured primitive variables.
    109 
    110   // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
    111   // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
    112   BoxedClosurePointerType closure_as_array_object =
    113       mirror::ByteArray::Alloc(self, closure->GetSize());
    114 
    115   // There are no thread suspension points after this, so we don't need to put it into a handle.
    116 
    117   if (UNLIKELY(closure_as_array_object == nullptr)) {
    118     // Most likely an OOM has occurred.
    119     CHECK(self->IsExceptionPending());
    120     return nullptr;
    121   }
    122 
    123   // Write the raw closure data into the byte[].
    124   closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t),  // component size
    125                                                       0 /*index*/),     // index
    126                   closure_as_array_object->GetLength());
    127 
    128   // The method has been successfully boxed into an object, now insert it into the hash map.
    129   {
    130     MutexLock mu(self, *Locks::lambda_table_lock_);
    131     BlockUntilWeaksAllowed();
    132 
    133     // Lookup the object again, it's possible another thread already boxed it while
    134     // we were allocating the object before.
    135     ValueType value = FindBoxedLambda(closure);
    136     if (UNLIKELY(!value.IsNull())) {
    137       // Let the GC clean up method_as_object at a later time.
    138       return value.Read();
    139     }
    140 
    141     // Otherwise we need to insert it into the hash map in this thread.
    142 
    143     // Make a copy for the box table to keep, in case the closure gets collected from the stack.
    144     // TODO: GC may need to sweep for roots in the box table's copy of the closure.
    145     Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
    146     closure->CopyTo(closure_table_copy, closure->GetSize());
    147 
    148     // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
    149 
    150     // Actually insert into the table.
    151     map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
    152   }
    153 
    154   return closure_as_array_object;
    155 }
    156 
    157 bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
    158   DCHECK(object != nullptr);
    159   *out_closure = nullptr;
    160 
    161   Thread* self = Thread::Current();
    162 
    163   // Note that we do not need to access lambda_table_lock_ here
    164   // since we don't need to look at the map.
    165 
    166   mirror::Object* boxed_closure_object = object;
    167 
    168   // Raise ClassCastException if object is not instanceof byte[]
    169   if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
    170     ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
    171     return false;
    172   }
    173 
    174   // TODO(iam): We must check that the closure object extends/implements the type
    175   // specified in [type id]. This is not currently implemented since it's always a byte[].
    176 
    177   // If we got this far, the inputs are valid.
    178   // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
    179   BoxedClosurePointerType boxed_closure_as_array =
    180       down_cast<BoxedClosurePointerType>(boxed_closure_object);
    181 
    182   const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
    183 
    184   // Allocate a copy that can "escape" and copy the closure data into that.
    185   Closure* unboxed_closure =
    186       LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
    187   // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
    188   memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
    189 
    190   DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
    191 
    192   *out_closure = unboxed_closure;
    193   return true;
    194 }
    195 
    196 BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
    197   auto map_iterator = map_.Find(closure);
    198   if (map_iterator != map_.end()) {
    199     const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
    200     const ValueType& value = key_value_pair.second;
    201 
    202     DCHECK(!value.IsNull());  // Never store null boxes.
    203     return value;
    204   }
    205 
    206   return ValueType(nullptr);
    207 }
    208 
    209 void BoxTable::BlockUntilWeaksAllowed() {
    210   Thread* self = Thread::Current();
    211   while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
    212                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
    213     new_weaks_condition_.WaitHoldingLocks(self);  // wait while holding mutator lock
    214   }
    215 }
    216 
    217 void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
    218   DCHECK(visitor != nullptr);
    219 
    220   Thread* self = Thread::Current();
    221   MutexLock mu(self, *Locks::lambda_table_lock_);
    222 
    223   /*
    224    * Visit every weak root in our lambda box table.
    225    * Remove unmarked objects, update marked objects to new address.
    226    */
    227   std::vector<ClosureType> remove_list;
    228   for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
    229     std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
    230 
    231     const ValueType& old_value = key_value_pair.second;
    232 
    233     // This does not need a read barrier because this is called by GC.
    234     mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
    235     mirror::Object* new_value = visitor->IsMarked(old_value_raw);
    236 
    237     if (new_value == nullptr) {
    238       // The object has been swept away.
    239       const ClosureType& closure = key_value_pair.first;
    240 
    241       // Delete the entry from the map.
    242       map_iterator = map_.Erase(map_iterator);
    243 
    244       // Clean up the memory by deleting the closure.
    245       ClosureAllocator::Delete(closure);
    246 
    247     } else {
    248       // The object has been moved.
    249       // Update the map.
    250       key_value_pair.second = ValueType(new_value);
    251       ++map_iterator;
    252     }
    253   }
    254 
    255   // Occasionally shrink the map to avoid growing very large.
    256   if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
    257     map_.ShrinkToMaximumLoad();
    258   }
    259 }
    260 
    261 void BoxTable::DisallowNewWeakBoxedLambdas() {
    262   CHECK(!kUseReadBarrier);
    263   Thread* self = Thread::Current();
    264   MutexLock mu(self, *Locks::lambda_table_lock_);
    265 
    266   allow_new_weaks_ = false;
    267 }
    268 
    269 void BoxTable::AllowNewWeakBoxedLambdas() {
    270   CHECK(!kUseReadBarrier);
    271   Thread* self = Thread::Current();
    272   MutexLock mu(self, *Locks::lambda_table_lock_);
    273 
    274   allow_new_weaks_ = true;
    275   new_weaks_condition_.Broadcast(self);
    276 }
    277 
    278 void BoxTable::BroadcastForNewWeakBoxedLambdas() {
    279   CHECK(kUseReadBarrier);
    280   Thread* self = Thread::Current();
    281   MutexLock mu(self, *Locks::lambda_table_lock_);
    282   new_weaks_condition_.Broadcast(self);
    283 }
    284 
    285 void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
    286   item.first = nullptr;
    287 
    288   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    289   item.second = ValueType();  // Also clear the GC root.
    290 }
    291 
    292 bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
    293   return item.first == nullptr;
    294 }
    295 
    296 bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
    297                                     const UnorderedMapKeyType& rhs) const {
    298   // Nothing needs this right now, but leave this assertion for later when
    299   // we need to look at the references inside of the closure.
    300   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    301 
    302   return lhs->ReferenceEquals(rhs);
    303 }
    304 
    305 size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
    306   const lambda::Closure* closure = key;
    307   DCHECK_ALIGNED(closure, alignof(lambda::Closure));
    308 
    309   // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
    310   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    311   return closure->GetHashCode();
    312 }
    313 
    314 }  // namespace lambda
    315 }  // namespace art
    316