Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
     18 #define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
     19 
     20 #include "base/casts.h"
     21 #include "jni_internal.h"
     22 #include "thread-inl.h"
     23 
     24 namespace art {
     25 
     26 // Scoped change into and out of a particular state. Handles Runnable transitions that require
     27 // more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
     28 // ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
     29 // the unchecked variant doesn't aid annotalysis.
     30 class ScopedThreadStateChange {
     31  public:
     32   ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
     33       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
     34       : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
     35     if (UNLIKELY(self_ == NULL)) {
     36       // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
     37       old_thread_state_ = kTerminated;
     38       MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
     39       Runtime* runtime = Runtime::Current();
     40       CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown());
     41     } else {
     42       bool runnable_transition;
     43       DCHECK_EQ(self, Thread::Current());
     44       // Read state without locks, ok as state is effectively thread local and we're not interested
     45       // in the suspend count (this will be handled in the runnable transitions).
     46       old_thread_state_ = self->GetState();
     47       runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable;
     48       if (!runnable_transition) {
     49         // A suspended transition to another effectively suspended transition, ok to use Unsafe.
     50         self_->SetState(new_thread_state);
     51       }
     52 
     53       if (runnable_transition && old_thread_state_ != new_thread_state) {
     54         if (new_thread_state == kRunnable) {
     55           self_->TransitionFromSuspendedToRunnable();
     56         } else {
     57           DCHECK_EQ(old_thread_state_, kRunnable);
     58           self_->TransitionFromRunnableToSuspended(new_thread_state);
     59         }
     60       }
     61     }
     62   }
     63 
     64   ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
     65     if (UNLIKELY(self_ == NULL)) {
     66       if (!expected_has_no_thread_) {
     67         MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
     68         Runtime* runtime = Runtime::Current();
     69         bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
     70         CHECK(shutting_down);
     71       }
     72     } else {
     73       if (old_thread_state_ != thread_state_) {
     74         if (old_thread_state_ == kRunnable) {
     75           self_->TransitionFromSuspendedToRunnable();
     76         } else if (thread_state_ == kRunnable) {
     77           self_->TransitionFromRunnableToSuspended(old_thread_state_);
     78         } else {
     79           // A suspended transition to another effectively suspended transition, ok to use Unsafe.
     80           self_->SetState(old_thread_state_);
     81         }
     82       }
     83     }
     84   }
     85 
     86   Thread* Self() const {
     87     return self_;
     88   }
     89 
     90  protected:
     91   // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
     92   ScopedThreadStateChange()
     93       : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
     94         expected_has_no_thread_(true) {}
     95 
     96   Thread* const self_;
     97   const ThreadState thread_state_;
     98 
     99  private:
    100   ThreadState old_thread_state_;
    101   const bool expected_has_no_thread_;
    102 
    103   DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
    104 };
    105 
    106 // Entry/exit processing for transitions from Native to Runnable (ie within JNI functions).
    107 //
    108 // This class performs the necessary thread state switching to and from Runnable and lets us
    109 // amortize the cost of working out the current thread. Additionally it lets us check (and repair)
    110 // apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects
    111 // into jobjects via methods of this class. Performing this here enforces the Runnable thread state
    112 // for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code
    113 // is also manipulating the Object.
    114 //
    115 // The destructor transitions back to the previous thread state, typically Native. In this state
    116 // GC and thread suspension may occur.
    117 //
    118 // For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of
    119 // the mutator_lock_ will be acquired on construction.
    120 class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
    121  public:
    122   explicit ScopedObjectAccessUnchecked(JNIEnv* env)
    123       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
    124       : ScopedThreadStateChange(ThreadForEnv(env), kRunnable),
    125         env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
    126     self_->VerifyStack();
    127   }
    128 
    129   explicit ScopedObjectAccessUnchecked(Thread* self)
    130       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    131       : ScopedThreadStateChange(self, kRunnable),
    132         env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())),
    133         vm_(env_ != NULL ? env_->vm : NULL) {
    134     self_->VerifyStack();
    135   }
    136 
    137   // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
    138   // change into Runnable or acquire a share on the mutator_lock_.
    139   explicit ScopedObjectAccessUnchecked(JavaVM* vm)
    140       : ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)) {}
    141 
    142   // Here purely to force inlining.
    143   ~ScopedObjectAccessUnchecked() ALWAYS_INLINE {
    144   }
    145 
    146   JNIEnvExt* Env() const {
    147     return env_;
    148   }
    149 
    150   JavaVMExt* Vm() const {
    151     return vm_;
    152   }
    153 
    154   /*
    155    * Add a local reference for an object to the indirect reference table associated with the
    156    * current stack frame.  When the native function returns, the reference will be discarded.
    157    *
    158    * We need to allow the same reference to be added multiple times, and cope with NULL.
    159    *
    160    * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
    161    * it's best if we don't grab a mutex.
    162    */
    163   template<typename T>
    164   T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    165     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    166     if (obj == NULL) {
    167       return NULL;
    168     }
    169 
    170     DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
    171 
    172     IndirectReferenceTable& locals = Env()->locals;
    173 
    174     uint32_t cookie = Env()->local_ref_cookie;
    175     IndirectRef ref = locals.Add(cookie, obj);
    176 
    177 #if 0  // TODO: fix this to understand PushLocalFrame, so we can turn it on.
    178     if (Env()->check_jni) {
    179       size_t entry_count = locals.Capacity();
    180       if (entry_count > 16) {
    181         LOG(WARNING) << "Warning: more than 16 JNI local references: "
    182                      << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n"
    183                      << Dumpable<IndirectReferenceTable>(locals);
    184         // TODO: LOG(FATAL) in a later release?
    185       }
    186     }
    187 #endif
    188 
    189     if (Vm()->work_around_app_jni_bugs) {
    190       // Hand out direct pointers to support broken old apps.
    191       return reinterpret_cast<T>(obj);
    192     }
    193 
    194     return reinterpret_cast<T>(ref);
    195   }
    196 
    197   template<typename T>
    198   T Decode(jobject obj) const
    199       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    200     Locks::mutator_lock_->AssertSharedHeld(Self());
    201     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    202     return down_cast<T>(Self()->DecodeJObject(obj));
    203   }
    204 
    205   mirror::ArtField* DecodeField(jfieldID fid) const
    206       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    207     Locks::mutator_lock_->AssertSharedHeld(Self());
    208     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    209 #ifdef MOVING_GARBAGE_COLLECTOR
    210     // TODO: we should make these unique weak globals if Field instances can ever move.
    211     UNIMPLEMENTED(WARNING);
    212 #endif
    213     return reinterpret_cast<mirror::ArtField*>(fid);
    214   }
    215 
    216   jfieldID EncodeField(mirror::ArtField* field) const
    217       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    218     Locks::mutator_lock_->AssertSharedHeld(Self());
    219     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    220 #ifdef MOVING_GARBAGE_COLLECTOR
    221     UNIMPLEMENTED(WARNING);
    222 #endif
    223     return reinterpret_cast<jfieldID>(field);
    224   }
    225 
    226   mirror::ArtMethod* DecodeMethod(jmethodID mid) const
    227       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    228     Locks::mutator_lock_->AssertSharedHeld(Self());
    229     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    230 #ifdef MOVING_GARBAGE_COLLECTOR
    231     // TODO: we should make these unique weak globals if Method instances can ever move.
    232     UNIMPLEMENTED(WARNING);
    233 #endif
    234     return reinterpret_cast<mirror::ArtMethod*>(mid);
    235   }
    236 
    237   jmethodID EncodeMethod(mirror::ArtMethod* method) const
    238       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    239     Locks::mutator_lock_->AssertSharedHeld(Self());
    240     DCHECK_EQ(thread_state_, kRunnable);  // Don't work with raw objects in non-runnable states.
    241 #ifdef MOVING_GARBAGE_COLLECTOR
    242     UNIMPLEMENTED(WARNING);
    243 #endif
    244     return reinterpret_cast<jmethodID>(method);
    245   }
    246 
    247  private:
    248   static Thread* ThreadForEnv(JNIEnv* env) {
    249     JNIEnvExt* full_env(reinterpret_cast<JNIEnvExt*>(env));
    250     return full_env->self;
    251   }
    252 
    253   // The full JNIEnv.
    254   JNIEnvExt* const env_;
    255   // The full JavaVM.
    256   JavaVMExt* const vm_;
    257 
    258   DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked);
    259 };
    260 
    261 // Annotalysis helping variant of the above.
    262 class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
    263  public:
    264   explicit ScopedObjectAccess(JNIEnv* env)
    265       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    266       SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
    267       : ScopedObjectAccessUnchecked(env) {
    268     Locks::mutator_lock_->AssertSharedHeld(Self());
    269   }
    270 
    271   explicit ScopedObjectAccess(Thread* self)
    272       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    273       SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
    274       : ScopedObjectAccessUnchecked(self) {
    275     Locks::mutator_lock_->AssertSharedHeld(Self());
    276   }
    277 
    278   ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
    279     // Base class will release share of lock. Invoked after this destructor.
    280   }
    281 
    282  private:
    283   // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
    284   //       routines operating with just a VM are sound, they are not, but when you have just a VM
    285   //       you cannot call the unsound routines.
    286   explicit ScopedObjectAccess(JavaVM* vm)
    287       SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
    288       : ScopedObjectAccessUnchecked(vm) {}
    289 
    290   friend class ScopedCheck;
    291   DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
    292 };
    293 
    294 }  // namespace art
    295 
    296 #endif  // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
    297