Home | History | Annotate | Download | only in lambda
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
     17 #define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
     18 
     19 #include "base/allocator.h"
     20 #include "base/hash_map.h"
     21 #include "gc_root.h"
     22 #include "base/macros.h"
     23 #include "base/mutex.h"
     24 #include "object_callbacks.h"
     25 
     26 #include <stdint.h>
     27 
     28 namespace art {
     29 
     30 class ArtMethod;  // forward declaration
     31 
     32 namespace mirror {
     33 class Object;  // forward declaration
     34 }  // namespace mirror
     35 
     36 namespace lambda {
     37 struct Closure;  // forward declaration
     38 
     39 /*
     40  * Store a table of boxed lambdas. This is required to maintain object referential equality
     41  * when a lambda is re-boxed.
     42  *
     43  * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
     44  * When too many objects get GCd, we shrink the underlying table to use less space.
     45  */
     46 class BoxTable FINAL {
     47  public:
     48   using ClosureType = art::lambda::Closure*;
     49 
     50   // Boxes a closure into an object. Returns null and throws an exception on failure.
     51   mirror::Object* BoxLambda(const ClosureType& closure)
     52       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
     53 
     54   // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
     55   bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
     56       SHARED_REQUIRES(Locks::mutator_lock_);
     57 
     58   // Sweep weak references to lambda boxes. Update the addresses if the objects have been
     59   // moved, and delete them from the table if the objects have been cleaned up.
     60   void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
     61       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
     62 
     63   // GC callback: Temporarily block anyone from touching the map.
     64   void DisallowNewWeakBoxedLambdas()
     65       REQUIRES(!Locks::lambda_table_lock_);
     66 
     67   // GC callback: Unblock any readers who have been queued waiting to touch the map.
     68   void AllowNewWeakBoxedLambdas()
     69       REQUIRES(!Locks::lambda_table_lock_);
     70 
     71   // GC callback: Unblock any readers who have been queued waiting to touch the map.
     72   void BroadcastForNewWeakBoxedLambdas()
     73       REQUIRES(!Locks::lambda_table_lock_);
     74 
     75   BoxTable();
     76   ~BoxTable();
     77 
     78  private:
     79   // Explanation:
     80   // - After all threads are suspended (exclusive mutator lock),
     81   //   the concurrent-copying GC can move objects from the "from" space to the "to" space.
     82   // If an object is moved at that time and *before* SweepSystemWeaks are called then
     83   // we don't know if the move has happened yet.
     84   // Successive reads will then (incorrectly) look at the objects in the "from" space,
     85   // which is a problem since the objects have been already forwarded and mutations
     86   // would not be visible in the right space.
     87   // Instead, use a GcRoot here which will be automatically updated by the GC.
     88   //
     89   // Also, any reads should be protected by a read barrier to always give us the "to" space address.
     90   using ValueType = GcRoot<mirror::Object>;
     91 
     92   // Attempt to look up the lambda in the map, or return null if it's not there yet.
     93   ValueType FindBoxedLambda(const ClosureType& closure) const
     94       SHARED_REQUIRES(Locks::lambda_table_lock_);
     95 
     96   // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
     97   void BlockUntilWeaksAllowed()
     98       SHARED_REQUIRES(Locks::lambda_table_lock_);
     99 
    100   // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
    101   using UnorderedMapKeyType = ClosureType;
    102 
    103   // EmptyFn implementation for art::HashMap
    104   struct EmptyFn {
    105     void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
    106         NO_THREAD_SAFETY_ANALYSIS;  // SHARED_REQUIRES(Locks::mutator_lock_)
    107 
    108     bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
    109   };
    110 
    111   // HashFn implementation for art::HashMap
    112   struct HashFn {
    113     size_t operator()(const UnorderedMapKeyType& key) const
    114         NO_THREAD_SAFETY_ANALYSIS;  // SHARED_REQUIRES(Locks::mutator_lock_)
    115   };
    116 
    117   // EqualsFn implementation for art::HashMap
    118   struct EqualsFn {
    119     bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
    120         NO_THREAD_SAFETY_ANALYSIS;  // SHARED_REQUIRES(Locks::mutator_lock_)
    121   };
    122 
    123   using UnorderedMap = art::HashMap<UnorderedMapKeyType,
    124                                     ValueType,
    125                                     EmptyFn,
    126                                     HashFn,
    127                                     EqualsFn,
    128                                     TrackingAllocator<std::pair<ClosureType, ValueType>,
    129                                                       kAllocatorTagLambdaBoxTable>>;
    130 
    131   UnorderedMap map_                                          GUARDED_BY(Locks::lambda_table_lock_);
    132   bool allow_new_weaks_                                      GUARDED_BY(Locks::lambda_table_lock_);
    133   ConditionVariable new_weaks_condition_                     GUARDED_BY(Locks::lambda_table_lock_);
    134 
    135   // Shrink the map when we get below this load factor.
    136   // (This is an arbitrary value that should be large enough to prevent aggressive map erases
    137   // from shrinking the table too often.)
    138   static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
    139 
    140   DISALLOW_COPY_AND_ASSIGN(BoxTable);
    141 };
    142 
    143 }  // namespace lambda
    144 }  // namespace art
    145 
    146 #endif  // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
    147