Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_THREAD_H_
     18 #define ART_RUNTIME_THREAD_H_
     19 
     20 #include <bitset>
     21 #include <deque>
     22 #include <iosfwd>
     23 #include <list>
     24 #include <memory>
     25 #include <setjmp.h>
     26 #include <string>
     27 
     28 #include "arch/context.h"
     29 #include "arch/instruction_set.h"
     30 #include "atomic.h"
     31 #include "base/macros.h"
     32 #include "base/mutex.h"
     33 #include "entrypoints/jni/jni_entrypoints.h"
     34 #include "entrypoints/quick/quick_entrypoints.h"
     35 #include "globals.h"
     36 #include "handle_scope.h"
     37 #include "instrumentation.h"
     38 #include "jvalue.h"
     39 #include "object_callbacks.h"
     40 #include "offsets.h"
     41 #include "runtime_stats.h"
     42 #include "stack.h"
     43 #include "thread_state.h"
     44 
     45 class BacktraceMap;
     46 
     47 namespace art {
     48 
     49 namespace gc {
     50 namespace accounting {
     51   template<class T> class AtomicStack;
     52 }  // namespace accounting
     53 namespace collector {
     54   class SemiSpace;
     55 }  // namespace collector
     56 }  // namespace gc
     57 
     58 namespace mirror {
     59   class Array;
     60   class Class;
     61   class ClassLoader;
     62   class Object;
     63   template<class T> class ObjectArray;
     64   template<class T> class PrimitiveArray;
     65   typedef PrimitiveArray<int32_t> IntArray;
     66   class StackTraceElement;
     67   class String;
     68   class Throwable;
     69 }  // namespace mirror
     70 
     71 namespace verifier {
     72 class MethodVerifier;
     73 }  // namespace verifier
     74 
     75 class ArtMethod;
     76 class BaseMutex;
     77 class ClassLinker;
     78 class Closure;
     79 class Context;
     80 struct DebugInvokeReq;
     81 class DeoptimizationContextRecord;
     82 class DexFile;
     83 class FrameIdToShadowFrame;
     84 class JavaVMExt;
     85 struct JNIEnvExt;
     86 class Monitor;
     87 class Runtime;
     88 class ScopedObjectAccessAlreadyRunnable;
     89 class ShadowFrame;
     90 class SingleStepControl;
     91 class StackedShadowFrameRecord;
     92 class Thread;
     93 class ThreadList;
     94 
     95 // Thread priorities. These must match the Thread.MIN_PRIORITY,
     96 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
     97 enum ThreadPriority {
     98   kMinThreadPriority = 1,
     99   kNormThreadPriority = 5,
    100   kMaxThreadPriority = 10,
    101 };
    102 
    103 enum ThreadFlag {
    104   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
    105                           // safepoint handler.
    106   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
    107   kActiveSuspendBarrier = 4  // Register that at least 1 suspend barrier needs to be passed.
    108 };
    109 
    110 enum class StackedShadowFrameType {
    111   kShadowFrameUnderConstruction,
    112   kDeoptimizationShadowFrame,
    113   kSingleFrameDeoptimizationShadowFrame
    114 };
    115 
    116 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
    117 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
    118 
    119 // Thread's stack layout for implicit stack overflow checks:
    120 //
    121 //   +---------------------+  <- highest address of stack memory
    122 //   |                     |
    123 //   .                     .  <- SP
    124 //   |                     |
    125 //   |                     |
    126 //   +---------------------+  <- stack_end
    127 //   |                     |
    128 //   |  Gap                |
    129 //   |                     |
    130 //   +---------------------+  <- stack_begin
    131 //   |                     |
    132 //   | Protected region    |
    133 //   |                     |
    134 //   +---------------------+  <- lowest address of stack memory
    135 //
    136 // The stack always grows down in memory.  At the lowest address is a region of memory
    137 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
    138 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
    139 // between the stack_end and the highest address in stack memory.  An implicit stack
    140 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
    141 // If the thread's SP is below the stack_end address this will be a read into the protected
    142 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
    143 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
    144 // if the thread makes a call out to a native function (through JNI), that native function
    145 // might only have 4K of memory (if the SP is adjacent to stack_end).
    146 
    147 class Thread {
    148  public:
    149   static const size_t kStackOverflowImplicitCheckSize;
    150 
    151   // Creates a new native thread corresponding to the given managed peer.
    152   // Used to implement Thread.start.
    153   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
    154 
    155   // Attaches the calling native thread to the runtime, returning the new native peer.
    156   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
    157   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
    158                         bool create_peer);
    159 
    160   // Reset internal state of child thread after fork.
    161   void InitAfterFork();
    162 
    163   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
    164   // high cost and so we favor passing self around when possible.
    165   // TODO: mark as PURE so the compiler may coalesce and remove?
    166   static Thread* Current();
    167 
    168   // On a runnable thread, check for pending thread suspension request and handle if pending.
    169   void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
    170 
    171   // Process pending thread suspension request and handle if pending.
    172   void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
    173 
    174   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
    175                                    mirror::Object* thread_peer)
    176       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
    177       SHARED_REQUIRES(Locks::mutator_lock_);
    178   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
    179       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
    180       SHARED_REQUIRES(Locks::mutator_lock_);
    181 
    182   // Translates 172 to pAllocArrayFromCode and so on.
    183   template<size_t size_of_pointers>
    184   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
    185 
    186   // Dumps a one-line summary of thread state (used for operator<<).
    187   void ShortDump(std::ostream& os) const;
    188 
    189   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
    190   void Dump(std::ostream& os,
    191             bool dump_native_stack = true,
    192             BacktraceMap* backtrace_map = nullptr) const
    193       REQUIRES(!Locks::thread_suspend_count_lock_)
    194       SHARED_REQUIRES(Locks::mutator_lock_);
    195 
    196   void DumpJavaStack(std::ostream& os) const
    197       REQUIRES(!Locks::thread_suspend_count_lock_)
    198       SHARED_REQUIRES(Locks::mutator_lock_);
    199 
    200   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
    201   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
    202   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
    203       REQUIRES(!Locks::thread_suspend_count_lock_)
    204       SHARED_REQUIRES(Locks::mutator_lock_);
    205 
    206   ThreadState GetState() const {
    207     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
    208     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
    209     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
    210   }
    211 
    212   ThreadState SetState(ThreadState new_state);
    213 
    214   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
    215     return tls32_.suspend_count;
    216   }
    217 
    218   int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
    219     return tls32_.debug_suspend_count;
    220   }
    221 
    222   bool IsSuspended() const {
    223     union StateAndFlags state_and_flags;
    224     state_and_flags.as_int = tls32_.state_and_flags.as_int;
    225     return state_and_flags.as_struct.state != kRunnable &&
    226         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
    227   }
    228 
    229   bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger)
    230       REQUIRES(Locks::thread_suspend_count_lock_);
    231 
    232   bool RequestCheckpoint(Closure* function)
    233       REQUIRES(Locks::thread_suspend_count_lock_);
    234 
    235   void SetFlipFunction(Closure* function);
    236   Closure* GetFlipFunction();
    237 
    238   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
    239     CHECK(kUseReadBarrier);
    240     return tlsPtr_.thread_local_mark_stack;
    241   }
    242   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
    243     CHECK(kUseReadBarrier);
    244     tlsPtr_.thread_local_mark_stack = stack;
    245   }
    246 
    247   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
    248   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
    249   void FullSuspendCheck()
    250       REQUIRES(!Locks::thread_suspend_count_lock_)
    251       SHARED_REQUIRES(Locks::mutator_lock_);
    252 
    253   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
    254   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
    255       REQUIRES(!Locks::thread_suspend_count_lock_)
    256       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
    257 
    258   // Transition from runnable into a state where mutator privileges are denied. Releases share of
    259   // mutator lock.
    260   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
    261       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
    262       UNLOCK_FUNCTION(Locks::mutator_lock_);
    263 
    264   // Once called thread suspension will cause an assertion failure.
    265   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
    266     Roles::uninterruptible_.Acquire();  // No-op.
    267     if (kIsDebugBuild) {
    268       CHECK(cause != nullptr);
    269       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
    270       tls32_.no_thread_suspension++;
    271       tlsPtr_.last_no_thread_suspension_cause = cause;
    272       return previous_cause;
    273     } else {
    274       return nullptr;
    275     }
    276   }
    277 
    278   // End region where no thread suspension is expected.
    279   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
    280     if (kIsDebugBuild) {
    281       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
    282       CHECK_GT(tls32_.no_thread_suspension, 0U);
    283       tls32_.no_thread_suspension--;
    284       tlsPtr_.last_no_thread_suspension_cause = old_cause;
    285     }
    286     Roles::uninterruptible_.Release();  // No-op.
    287   }
    288 
    289   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
    290 
    291   bool IsDaemon() const {
    292     return tls32_.daemon;
    293   }
    294 
    295   size_t NumberOfHeldMutexes() const;
    296 
    297   bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
    298 
    299   /*
    300    * Changes the priority of this thread to match that of the java.lang.Thread object.
    301    *
    302    * We map a priority value from 1-10 to Linux "nice" values, where lower
    303    * numbers indicate higher priority.
    304    */
    305   void SetNativePriority(int newPriority);
    306 
    307   /*
    308    * Returns the thread priority for the current thread by querying the system.
    309    * This is useful when attaching a thread through JNI.
    310    *
    311    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
    312    */
    313   static int GetNativePriority();
    314 
    315   // Guaranteed to be non-zero.
    316   uint32_t GetThreadId() const {
    317     return tls32_.thin_lock_thread_id;
    318   }
    319 
    320   pid_t GetTid() const {
    321     return tls32_.tid;
    322   }
    323 
    324   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
    325   mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
    326       SHARED_REQUIRES(Locks::mutator_lock_);
    327 
    328   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
    329   // allocation, or locking.
    330   void GetThreadName(std::string& name) const;
    331 
    332   // Sets the thread's name.
    333   void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
    334 
    335   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
    336   uint64_t GetCpuMicroTime() const;
    337 
    338   mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
    339     CHECK(tlsPtr_.jpeer == nullptr);
    340     return tlsPtr_.opeer;
    341   }
    342 
    343   bool HasPeer() const {
    344     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
    345   }
    346 
    347   RuntimeStats* GetStats() {
    348     return &tls64_.stats;
    349   }
    350 
    351   bool IsStillStarting() const;
    352 
    353   bool IsExceptionPending() const {
    354     return tlsPtr_.exception != nullptr;
    355   }
    356 
    357   mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
    358     return tlsPtr_.exception;
    359   }
    360 
    361   void AssertPendingException() const;
    362   void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
    363   void AssertNoPendingException() const;
    364   void AssertNoPendingExceptionForNewException(const char* msg) const;
    365 
    366   void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
    367 
    368   void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
    369     tlsPtr_.exception = nullptr;
    370   }
    371 
    372   // Find catch block and perform long jump to appropriate exception handle
    373   NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
    374 
    375   Context* GetLongJumpContext();
    376   void ReleaseLongJumpContext(Context* context) {
    377     if (tlsPtr_.long_jump_context != nullptr) {
    378       // Each QuickExceptionHandler gets a long jump context and uses
    379       // it for doing the long jump, after finding catch blocks/doing deoptimization.
    380       // Both finding catch blocks and deoptimization can trigger another
    381       // exception such as a result of class loading. So there can be nested
    382       // cases of exception handling and multiple contexts being used.
    383       // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
    384       // for reuse so there is no need to always allocate a new one each time when
    385       // getting a context. Since we only keep one context for reuse, delete the
    386       // existing one since the passed in context is yet to be used for longjump.
    387       delete tlsPtr_.long_jump_context;
    388     }
    389     tlsPtr_.long_jump_context = context;
    390   }
    391 
    392   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
    393   // abort the runtime iff abort_on_error is true.
    394   ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
    395       SHARED_REQUIRES(Locks::mutator_lock_);
    396 
    397   // Returns whether the given exception was thrown by the current Java method being executed
    398   // (Note that this includes native Java methods).
    399   bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
    400       SHARED_REQUIRES(Locks::mutator_lock_);
    401 
    402   void SetTopOfStack(ArtMethod** top_method) {
    403     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
    404   }
    405 
    406   void SetTopOfShadowStack(ShadowFrame* top) {
    407     tlsPtr_.managed_stack.SetTopShadowFrame(top);
    408   }
    409 
    410   bool HasManagedStack() const {
    411     return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
    412         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
    413   }
    414 
    415   // If 'msg' is null, no detail message is set.
    416   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
    417       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
    418 
    419   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
    420   // used as the new exception's cause.
    421   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
    422       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
    423 
    424   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
    425       __attribute__((format(printf, 3, 4)))
    426       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
    427 
    428   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
    429       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
    430 
    431   // OutOfMemoryError is special, because we need to pre-allocate an instance.
    432   // Only the GC should call this.
    433   void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_)
    434       REQUIRES(!Roles::uninterruptible_);
    435 
    436   static void Startup();
    437   static void FinishStartup();
    438   static void Shutdown();
    439 
    440   // JNI methods
    441   JNIEnvExt* GetJniEnv() const {
    442     return tlsPtr_.jni_env;
    443   }
    444 
    445   // Convert a jobject into a Object*
    446   mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
    447   // Checks if the weak global ref has been cleared by the GC without decoding it.
    448   bool IsJWeakCleared(jweak obj) const SHARED_REQUIRES(Locks::mutator_lock_);
    449 
    450   mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
    451     return tlsPtr_.monitor_enter_object;
    452   }
    453 
    454   void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
    455     tlsPtr_.monitor_enter_object = obj;
    456   }
    457 
    458   // Implements java.lang.Thread.interrupted.
    459   bool Interrupted() REQUIRES(!*wait_mutex_);
    460   // Implements java.lang.Thread.isInterrupted.
    461   bool IsInterrupted() REQUIRES(!*wait_mutex_);
    462   bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
    463     return interrupted_;
    464   }
    465   void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
    466   void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
    467     interrupted_ = i;
    468   }
    469   void Notify() REQUIRES(!*wait_mutex_);
    470 
    471  private:
    472   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
    473 
    474  public:
    475   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
    476     return wait_mutex_;
    477   }
    478 
    479   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
    480     return wait_cond_;
    481   }
    482 
    483   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
    484     return wait_monitor_;
    485   }
    486 
    487   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
    488     wait_monitor_ = mon;
    489   }
    490 
    491   // Waiter link-list support.
    492   Thread* GetWaitNext() const {
    493     return tlsPtr_.wait_next;
    494   }
    495 
    496   void SetWaitNext(Thread* next) {
    497     tlsPtr_.wait_next = next;
    498   }
    499 
    500   jobject GetClassLoaderOverride() {
    501     return tlsPtr_.class_loader_override;
    502   }
    503 
    504   void SetClassLoaderOverride(jobject class_loader_override);
    505 
    506   // Create the internal representation of a stack trace, that is more time
    507   // and space efficient to compute than the StackTraceElement[].
    508   template<bool kTransactionActive>
    509   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
    510       SHARED_REQUIRES(Locks::mutator_lock_);
    511 
    512   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
    513   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
    514   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
    515   // with the number of valid frames in the returned array.
    516   static jobjectArray InternalStackTraceToStackTraceElementArray(
    517       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
    518       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
    519       SHARED_REQUIRES(Locks::mutator_lock_);
    520 
    521   bool HasDebuggerShadowFrames() const {
    522     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
    523   }
    524 
    525   void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
    526 
    527   ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
    528 
    529   //
    530   // Offsets of various members of native Thread class, used by compiled code.
    531   //
    532 
    533   template<size_t pointer_size>
    534   static ThreadOffset<pointer_size> ThinLockIdOffset() {
    535     return ThreadOffset<pointer_size>(
    536         OFFSETOF_MEMBER(Thread, tls32_) +
    537         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
    538   }
    539 
    540   template<size_t pointer_size>
    541   static ThreadOffset<pointer_size> ThreadFlagsOffset() {
    542     return ThreadOffset<pointer_size>(
    543         OFFSETOF_MEMBER(Thread, tls32_) +
    544         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
    545   }
    546 
    547   template<size_t pointer_size>
    548   static ThreadOffset<pointer_size> IsGcMarkingOffset() {
    549     return ThreadOffset<pointer_size>(
    550         OFFSETOF_MEMBER(Thread, tls32_) +
    551         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
    552   }
    553 
    554   // Deoptimize the Java stack.
    555   void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
    556 
    557  private:
    558   template<size_t pointer_size>
    559   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
    560     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
    561     size_t scale;
    562     size_t shrink;
    563     if (pointer_size == sizeof(void*)) {
    564       scale = 1;
    565       shrink = 1;
    566     } else if (pointer_size > sizeof(void*)) {
    567       scale = pointer_size / sizeof(void*);
    568       shrink = 1;
    569     } else {
    570       DCHECK_GT(sizeof(void*), pointer_size);
    571       scale = 1;
    572       shrink = sizeof(void*) / pointer_size;
    573     }
    574     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
    575   }
    576 
    577  public:
    578   static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
    579                                                 size_t pointer_size) {
    580     DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
    581     if (pointer_size == 4) {
    582       return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
    583     } else {
    584       return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
    585     }
    586   }
    587 
    588   template<size_t pointer_size>
    589   static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
    590     return ThreadOffsetFromTlsPtr<pointer_size>(
    591         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
    592   }
    593 
    594   template<size_t pointer_size>
    595   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
    596     return ThreadOffsetFromTlsPtr<pointer_size>(
    597         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
    598   }
    599 
    600   template<size_t pointer_size>
    601   static ThreadOffset<pointer_size> SelfOffset() {
    602     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
    603   }
    604 
    605   template<size_t pointer_size>
    606   static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
    607     return ThreadOffsetFromTlsPtr<pointer_size>(
    608         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
    609   }
    610 
    611   template<size_t pointer_size>
    612   static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
    613     return ThreadOffsetFromTlsPtr<pointer_size>(
    614         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
    615   }
    616 
    617   template<size_t pointer_size>
    618   static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
    619     return ThreadOffsetFromTlsPtr<pointer_size>(
    620         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
    621   }
    622 
    623   template<size_t pointer_size>
    624   static ThreadOffset<pointer_size> ExceptionOffset() {
    625     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
    626   }
    627 
    628   template<size_t pointer_size>
    629   static ThreadOffset<pointer_size> PeerOffset() {
    630     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
    631   }
    632 
    633 
    634   template<size_t pointer_size>
    635   static ThreadOffset<pointer_size> CardTableOffset() {
    636     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
    637   }
    638 
    639   template<size_t pointer_size>
    640   static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
    641     return ThreadOffsetFromTlsPtr<pointer_size>(
    642         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
    643   }
    644 
    645   template<size_t pointer_size>
    646   static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
    647     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
    648   }
    649 
    650   template<size_t pointer_size>
    651   static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
    652     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
    653   }
    654 
    655   template<size_t pointer_size>
    656   static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
    657     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
    658   }
    659 
    660   template<size_t pointer_size>
    661   static ThreadOffset<pointer_size> RosAllocRunsOffset() {
    662     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
    663                                                                 rosalloc_runs));
    664   }
    665 
    666   template<size_t pointer_size>
    667   static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
    668     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
    669                                                                 thread_local_alloc_stack_top));
    670   }
    671 
    672   template<size_t pointer_size>
    673   static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
    674     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
    675                                                                 thread_local_alloc_stack_end));
    676   }
    677 
    678   // Size of stack less any space reserved for stack overflow
    679   size_t GetStackSize() const {
    680     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
    681   }
    682 
    683   uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
    684     if (implicit_overflow_check) {
    685       // The interpreter needs the extra overflow bytes that stack_end does
    686       // not include.
    687       return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
    688     } else {
    689       return tlsPtr_.stack_end;
    690     }
    691   }
    692 
    693   uint8_t* GetStackEnd() const {
    694     return tlsPtr_.stack_end;
    695   }
    696 
    697   // Set the stack end to that to be used during a stack overflow
    698   void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
    699 
    700   // Set the stack end to that to be used during regular execution
    701   void ResetDefaultStackEnd() {
    702     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
    703     // to throw a StackOverflowError.
    704     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
    705   }
    706 
    707   // Install the protected region for implicit stack checks.
    708   void InstallImplicitProtection();
    709 
    710   bool IsHandlingStackOverflow() const {
    711     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
    712   }
    713 
    714   template<size_t pointer_size>
    715   static ThreadOffset<pointer_size> StackEndOffset() {
    716     return ThreadOffsetFromTlsPtr<pointer_size>(
    717         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
    718   }
    719 
    720   template<size_t pointer_size>
    721   static ThreadOffset<pointer_size> JniEnvOffset() {
    722     return ThreadOffsetFromTlsPtr<pointer_size>(
    723         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
    724   }
    725 
    726   template<size_t pointer_size>
    727   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
    728     return ThreadOffsetFromTlsPtr<pointer_size>(
    729         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
    730         ManagedStack::TopQuickFrameOffset());
    731   }
    732 
    733   const ManagedStack* GetManagedStack() const {
    734     return &tlsPtr_.managed_stack;
    735   }
    736 
    737   // Linked list recording fragments of managed stack.
    738   void PushManagedStackFragment(ManagedStack* fragment) {
    739     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
    740   }
    741   void PopManagedStackFragment(const ManagedStack& fragment) {
    742     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
    743   }
    744 
    745   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
    746     return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
    747   }
    748 
    749   ShadowFrame* PopShadowFrame() {
    750     return tlsPtr_.managed_stack.PopShadowFrame();
    751   }
    752 
    753   template<size_t pointer_size>
    754   static ThreadOffset<pointer_size> TopShadowFrameOffset() {
    755     return ThreadOffsetFromTlsPtr<pointer_size>(
    756         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
    757         ManagedStack::TopShadowFrameOffset());
    758   }
    759 
    760   // Number of references allocated in JNI ShadowFrames on this thread.
    761   size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
    762     return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
    763   }
    764 
    765   // Number of references in handle scope on this thread.
    766   size_t NumHandleReferences();
    767 
    768   // Number of references allocated in handle scopes & JNI shadow frames on this thread.
    769   size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
    770     return NumHandleReferences() + NumJniShadowFrameReferences();
    771   }
    772 
    773   // Is the given obj in this thread's stack indirect reference table?
    774   bool HandleScopeContains(jobject obj) const;
    775 
    776   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
    777       SHARED_REQUIRES(Locks::mutator_lock_);
    778 
    779   HandleScope* GetTopHandleScope() {
    780     return tlsPtr_.top_handle_scope;
    781   }
    782 
    783   void PushHandleScope(HandleScope* handle_scope) {
    784     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
    785     tlsPtr_.top_handle_scope = handle_scope;
    786   }
    787 
    788   HandleScope* PopHandleScope() {
    789     HandleScope* handle_scope = tlsPtr_.top_handle_scope;
    790     DCHECK(handle_scope != nullptr);
    791     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
    792     return handle_scope;
    793   }
    794 
    795   template<size_t pointer_size>
    796   static ThreadOffset<pointer_size> TopHandleScopeOffset() {
    797     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
    798                                                                 top_handle_scope));
    799   }
    800 
    801   DebugInvokeReq* GetInvokeReq() const {
    802     return tlsPtr_.debug_invoke_req;
    803   }
    804 
    805   SingleStepControl* GetSingleStepControl() const {
    806     return tlsPtr_.single_step_control;
    807   }
    808 
    809   // Indicates whether this thread is ready to invoke a method for debugging. This
    810   // is only true if the thread has been suspended by a debug event.
    811   bool IsReadyForDebugInvoke() const {
    812     return tls32_.ready_for_debug_invoke;
    813   }
    814 
    815   void SetReadyForDebugInvoke(bool ready) {
    816     tls32_.ready_for_debug_invoke = ready;
    817   }
    818 
    819   bool IsDebugMethodEntry() const {
    820     return tls32_.debug_method_entry_;
    821   }
    822 
    823   void SetDebugMethodEntry() {
    824     tls32_.debug_method_entry_ = true;
    825   }
    826 
    827   void ClearDebugMethodEntry() {
    828     tls32_.debug_method_entry_ = false;
    829   }
    830 
    831   bool GetIsGcMarking() const {
    832     CHECK(kUseReadBarrier);
    833     return tls32_.is_gc_marking;
    834   }
    835 
    836   void SetIsGcMarking(bool is_marking) {
    837     CHECK(kUseReadBarrier);
    838     tls32_.is_gc_marking = is_marking;
    839   }
    840 
    841   bool GetWeakRefAccessEnabled() const {
    842     CHECK(kUseReadBarrier);
    843     return tls32_.weak_ref_access_enabled;
    844   }
    845 
    846   void SetWeakRefAccessEnabled(bool enabled) {
    847     CHECK(kUseReadBarrier);
    848     tls32_.weak_ref_access_enabled = enabled;
    849   }
    850 
    851   uint32_t GetDisableThreadFlipCount() const {
    852     CHECK(kUseReadBarrier);
    853     return tls32_.disable_thread_flip_count;
    854   }
    855 
    856   void IncrementDisableThreadFlipCount() {
    857     CHECK(kUseReadBarrier);
    858     ++tls32_.disable_thread_flip_count;
    859   }
    860 
    861   void DecrementDisableThreadFlipCount() {
    862     CHECK(kUseReadBarrier);
    863     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
    864     --tls32_.disable_thread_flip_count;
    865   }
    866 
    867   // Activates single step control for debugging. The thread takes the
    868   // ownership of the given SingleStepControl*. It is deleted by a call
    869   // to DeactivateSingleStepControl or upon thread destruction.
    870   void ActivateSingleStepControl(SingleStepControl* ssc);
    871 
    872   // Deactivates single step control for debugging.
    873   void DeactivateSingleStepControl();
    874 
    875   // Sets debug invoke request for debugging. When the thread is resumed,
    876   // it executes the method described by this request then sends the reply
    877   // before suspending itself. The thread takes the ownership of the given
    878   // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
    879   void SetDebugInvokeReq(DebugInvokeReq* req);
    880 
    881   // Clears debug invoke request for debugging. When the thread completes
    882   // method invocation, it deletes its debug invoke request and suspends
    883   // itself.
    884   void ClearDebugInvokeReq();
    885 
    886   // Returns the fake exception used to activate deoptimization.
    887   static mirror::Throwable* GetDeoptimizationException() {
    888     return reinterpret_cast<mirror::Throwable*>(-1);
    889   }
    890 
    891   // Currently deoptimization invokes verifier which can trigger class loading
    892   // and execute Java code, so there might be nested deoptimizations happening.
    893   // We need to save the ongoing deoptimization shadow frames and return
    894   // values on stacks.
    895   // 'from_code' denotes whether the deoptimization was explicitly made from
    896   // compiled code.
    897   void PushDeoptimizationContext(const JValue& return_value,
    898                                  bool is_reference,
    899                                  bool from_code,
    900                                  mirror::Throwable* exception)
    901       SHARED_REQUIRES(Locks::mutator_lock_);
    902   void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
    903       SHARED_REQUIRES(Locks::mutator_lock_);
    904   void AssertHasDeoptimizationContext()
    905       SHARED_REQUIRES(Locks::mutator_lock_);
    906   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
    907   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
    908 
    909   // For debugger, find the shadow frame that corresponds to a frame id.
    910   // Or return null if there is none.
    911   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
    912       SHARED_REQUIRES(Locks::mutator_lock_);
    913   // For debugger, find the bool array that keeps track of the updated vreg set
    914   // for a frame id.
    915   bool* GetUpdatedVRegFlags(size_t frame_id) SHARED_REQUIRES(Locks::mutator_lock_);
    916   // For debugger, find the shadow frame that corresponds to a frame id. If
    917   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
    918   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
    919                                                uint32_t num_vregs,
    920                                                ArtMethod* method,
    921                                                uint32_t dex_pc)
    922       SHARED_REQUIRES(Locks::mutator_lock_);
    923 
    924   // Delete the entry that maps from frame_id to shadow_frame.
    925   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
    926       SHARED_REQUIRES(Locks::mutator_lock_);
    927 
    928   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
    929     return tlsPtr_.instrumentation_stack;
    930   }
    931 
    932   std::vector<ArtMethod*>* GetStackTraceSample() const {
    933     return tlsPtr_.stack_trace_sample;
    934   }
    935 
    936   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
    937     tlsPtr_.stack_trace_sample = sample;
    938   }
    939 
    940   uint64_t GetTraceClockBase() const {
    941     return tls64_.trace_clock_base;
    942   }
    943 
    944   void SetTraceClockBase(uint64_t clock_base) {
    945     tls64_.trace_clock_base = clock_base;
    946   }
    947 
    948   BaseMutex* GetHeldMutex(LockLevel level) const {
    949     return tlsPtr_.held_mutexes[level];
    950   }
    951 
    952   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
    953     tlsPtr_.held_mutexes[level] = mutex;
    954   }
    955 
    956   void RunCheckpointFunction();
    957 
    958   bool PassActiveSuspendBarriers(Thread* self)
    959       REQUIRES(!Locks::thread_suspend_count_lock_);
    960 
    961   void ClearSuspendBarrier(AtomicInteger* target)
    962       REQUIRES(Locks::thread_suspend_count_lock_);
    963 
    964   bool ReadFlag(ThreadFlag flag) const {
    965     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
    966   }
    967 
    968   bool TestAllFlags() const {
    969     return (tls32_.state_and_flags.as_struct.flags != 0);
    970   }
    971 
    972   void AtomicSetFlag(ThreadFlag flag) {
    973     tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
    974   }
    975 
    976   void AtomicClearFlag(ThreadFlag flag) {
    977     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
    978   }
    979 
    980   void ResetQuickAllocEntryPointsForThread();
    981 
    982   // Returns the remaining space in the TLAB.
    983   size_t TlabSize() const;
    984   // Doesn't check that there is room.
    985   mirror::Object* AllocTlab(size_t bytes);
    986   void SetTlab(uint8_t* start, uint8_t* end);
    987   bool HasTlab() const;
    988   uint8_t* GetTlabStart() {
    989     return tlsPtr_.thread_local_start;
    990   }
    991   uint8_t* GetTlabPos() {
    992     return tlsPtr_.thread_local_pos;
    993   }
    994 
    995   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
    996   // equal to a valid pointer.
    997   // TODO: does this need to atomic?  I don't think so.
    998   void RemoveSuspendTrigger() {
    999     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
   1000   }
   1001 
   1002   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
   1003   // The next time a suspend check is done, it will load from the value at this address
   1004   // and trigger a SIGSEGV.
   1005   void TriggerSuspend() {
   1006     tlsPtr_.suspend_trigger = nullptr;
   1007   }
   1008 
   1009 
   1010   // Push an object onto the allocation stack.
   1011   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
   1012       SHARED_REQUIRES(Locks::mutator_lock_);
   1013 
   1014   // Set the thread local allocation pointers to the given pointers.
   1015   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
   1016                                      StackReference<mirror::Object>* end);
   1017 
   1018   // Resets the thread local allocation pointers.
   1019   void RevokeThreadLocalAllocationStack();
   1020 
   1021   size_t GetThreadLocalBytesAllocated() const {
   1022     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
   1023   }
   1024 
   1025   size_t GetThreadLocalObjectsAllocated() const {
   1026     return tlsPtr_.thread_local_objects;
   1027   }
   1028 
   1029   void* GetRosAllocRun(size_t index) const {
   1030     return tlsPtr_.rosalloc_runs[index];
   1031   }
   1032 
   1033   void SetRosAllocRun(size_t index, void* run) {
   1034     tlsPtr_.rosalloc_runs[index] = run;
   1035   }
   1036 
   1037   bool ProtectStack(bool fatal_on_error = true);
   1038   bool UnprotectStack();
   1039 
   1040   void SetMterpDefaultIBase(void* ibase) {
   1041     tlsPtr_.mterp_default_ibase = ibase;
   1042   }
   1043 
   1044   void SetMterpCurrentIBase(void* ibase) {
   1045     tlsPtr_.mterp_current_ibase = ibase;
   1046   }
   1047 
   1048   void SetMterpAltIBase(void* ibase) {
   1049     tlsPtr_.mterp_alt_ibase = ibase;
   1050   }
   1051 
   1052   const void* GetMterpDefaultIBase() const {
   1053     return tlsPtr_.mterp_default_ibase;
   1054   }
   1055 
   1056   const void* GetMterpCurrentIBase() const {
   1057     return tlsPtr_.mterp_current_ibase;
   1058   }
   1059 
   1060   const void* GetMterpAltIBase() const {
   1061     return tlsPtr_.mterp_alt_ibase;
   1062   }
   1063 
   1064   void NoteSignalBeingHandled() {
   1065     if (tls32_.handling_signal_) {
   1066       LOG(FATAL) << "Detected signal while processing a signal";
   1067     }
   1068     tls32_.handling_signal_ = true;
   1069   }
   1070 
   1071   void NoteSignalHandlerDone() {
   1072     tls32_.handling_signal_ = false;
   1073   }
   1074 
   1075   jmp_buf* GetNestedSignalState() {
   1076     return tlsPtr_.nested_signal_state;
   1077   }
   1078 
   1079   bool IsSuspendedAtSuspendCheck() const {
   1080     return tls32_.suspended_at_suspend_check;
   1081   }
   1082 
   1083   void PushVerifier(verifier::MethodVerifier* verifier);
   1084   void PopVerifier(verifier::MethodVerifier* verifier);
   1085 
   1086   void InitStringEntryPoints();
   1087 
   1088   void ModifyDebugDisallowReadBarrier(int8_t delta) {
   1089     debug_disallow_read_barrier_ += delta;
   1090   }
   1091 
   1092   uint8_t GetDebugDisallowReadBarrierCount() const {
   1093     return debug_disallow_read_barrier_;
   1094   }
   1095 
   1096   // Returns true if the current thread is the jit sensitive thread.
   1097   bool IsJitSensitiveThread() const {
   1098     return this == jit_sensitive_thread_;
   1099   }
   1100 
   1101   // Returns true if StrictMode events are traced for the current thread.
   1102   static bool IsSensitiveThread() {
   1103     if (is_sensitive_thread_hook_ != nullptr) {
   1104       return (*is_sensitive_thread_hook_)();
   1105     }
   1106     return false;
   1107   }
   1108 
   1109  private:
   1110   explicit Thread(bool daemon);
   1111   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   1112   void Destroy();
   1113 
   1114   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
   1115 
   1116   template<bool kTransactionActive>
   1117   void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
   1118                 jobject thread_name, jint thread_priority)
   1119       SHARED_REQUIRES(Locks::mutator_lock_);
   1120 
   1121   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
   1122   // Dbg::Disconnected.
   1123   ThreadState SetStateUnsafe(ThreadState new_state) {
   1124     ThreadState old_state = GetState();
   1125     if (old_state == kRunnable && new_state != kRunnable) {
   1126       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
   1127       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
   1128       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
   1129       TransitionToSuspendedAndRunCheckpoints(new_state);
   1130       // Since we transitioned to a suspended state, check the pass barrier requests.
   1131       PassActiveSuspendBarriers();
   1132     } else {
   1133       tls32_.state_and_flags.as_struct.state = new_state;
   1134     }
   1135     return old_state;
   1136   }
   1137 
   1138   void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
   1139 
   1140   void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
   1141   void DumpStack(std::ostream& os,
   1142                  bool dump_native_stack = true,
   1143                  BacktraceMap* backtrace_map = nullptr) const
   1144       REQUIRES(!Locks::thread_suspend_count_lock_)
   1145       SHARED_REQUIRES(Locks::mutator_lock_);
   1146 
   1147   // Out-of-line conveniences for debugging in gdb.
   1148   static Thread* CurrentFromGdb();  // Like Thread::Current.
   1149   // Like Thread::Dump(std::cerr).
   1150   void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
   1151 
   1152   static void* CreateCallback(void* arg);
   1153 
   1154   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
   1155       SHARED_REQUIRES(Locks::mutator_lock_);
   1156   void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
   1157 
   1158   // Initialize a thread.
   1159   //
   1160   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
   1161   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
   1162   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
   1163   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
   1164   // of false).
   1165   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
   1166       REQUIRES(Locks::runtime_shutdown_lock_);
   1167   void InitCardTable();
   1168   void InitCpu();
   1169   void CleanupCpu();
   1170   void InitTlsEntryPoints();
   1171   void InitTid();
   1172   void InitPthreadKeySelf();
   1173   bool InitStackHwm();
   1174 
   1175   void SetUpAlternateSignalStack();
   1176   void TearDownAlternateSignalStack();
   1177 
   1178   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
   1179       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
   1180 
   1181   ALWAYS_INLINE void PassActiveSuspendBarriers()
   1182       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
   1183 
   1184   // Registers the current thread as the jit sensitive thread. Should be called just once.
   1185   static void SetJitSensitiveThread() {
   1186     if (jit_sensitive_thread_ == nullptr) {
   1187       jit_sensitive_thread_ = Thread::Current();
   1188     } else {
   1189       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
   1190           << Thread::Current()->GetTid();
   1191     }
   1192   }
   1193 
   1194   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
   1195     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
   1196   }
   1197 
   1198   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
   1199   // change from being Suspended to Runnable without a suspend request occurring.
   1200   union PACKED(4) StateAndFlags {
   1201     StateAndFlags() {}
   1202     struct PACKED(4) {
   1203       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
   1204       // ThreadFlags for bit field meanings.
   1205       volatile uint16_t flags;
   1206       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
   1207       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
   1208       // operation. If a thread is suspended and a suspend_request is present, a thread may not
   1209       // change to Runnable as a GC or other operation is in progress.
   1210       volatile uint16_t state;
   1211     } as_struct;
   1212     AtomicInteger as_atomic_int;
   1213     volatile int32_t as_int;
   1214 
   1215    private:
   1216     // gcc does not handle struct with volatile member assignments correctly.
   1217     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
   1218     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
   1219   };
   1220   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
   1221 
   1222   static void ThreadExitCallback(void* arg);
   1223 
   1224   // Maximum number of checkpoint functions.
   1225   static constexpr uint32_t kMaxCheckpoints = 3;
   1226 
   1227   // Maximum number of suspend barriers.
   1228   static constexpr uint32_t kMaxSuspendBarriers = 3;
   1229 
   1230   // Has Thread::Startup been called?
   1231   static bool is_started_;
   1232 
   1233   // TLS key used to retrieve the Thread*.
   1234   static pthread_key_t pthread_key_self_;
   1235 
   1236   // Used to notify threads that they should attempt to resume, they will suspend again if
   1237   // their suspend count is > 0.
   1238   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
   1239 
   1240   // Hook passed by framework which returns true
   1241   // when StrictMode events are traced for the current thread.
   1242   static bool (*is_sensitive_thread_hook_)();
   1243   // Stores the jit sensitive thread (which for now is the UI thread).
   1244   static Thread* jit_sensitive_thread_;
   1245 
   1246   /***********************************************************************************************/
   1247   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
   1248   // pointer size differences. To encourage shorter encoding, more frequently used values appear
   1249   // first if possible.
   1250   /***********************************************************************************************/
   1251 
   1252   struct PACKED(4) tls_32bit_sized_values {
   1253     // We have no control over the size of 'bool', but want our boolean fields
   1254     // to be 4-byte quantities.
   1255     typedef uint32_t bool32_t;
   1256 
   1257     explicit tls_32bit_sized_values(bool is_daemon) :
   1258       suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
   1259       daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
   1260       thread_exit_check_count(0), handling_signal_(false),
   1261       suspended_at_suspend_check(false), ready_for_debug_invoke(false),
   1262       debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
   1263       disable_thread_flip_count(0) {
   1264     }
   1265 
   1266     union StateAndFlags state_and_flags;
   1267     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
   1268                   "Size of state_and_flags and int32 are different");
   1269 
   1270     // A non-zero value is used to tell the current thread to enter a safe point
   1271     // at the next poll.
   1272     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
   1273 
   1274     // How much of 'suspend_count_' is by request of the debugger, used to set things right
   1275     // when the debugger detaches. Must be <= suspend_count_.
   1276     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
   1277 
   1278     // Thin lock thread id. This is a small integer used by the thin lock implementation.
   1279     // This is not to be confused with the native thread's tid, nor is it the value returned
   1280     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
   1281     // important difference between this id and the ids visible to managed code is that these
   1282     // ones get reused (to ensure that they fit in the number of bits available).
   1283     uint32_t thin_lock_thread_id;
   1284 
   1285     // System thread id.
   1286     uint32_t tid;
   1287 
   1288     // Is the thread a daemon?
   1289     const bool32_t daemon;
   1290 
   1291     // A boolean telling us whether we're recursively throwing OOME.
   1292     bool32_t throwing_OutOfMemoryError;
   1293 
   1294     // A positive value implies we're in a region where thread suspension isn't expected.
   1295     uint32_t no_thread_suspension;
   1296 
   1297     // How many times has our pthread key's destructor been called?
   1298     uint32_t thread_exit_check_count;
   1299 
   1300     // True if signal is being handled by this thread.
   1301     bool32_t handling_signal_;
   1302 
   1303     // True if the thread is suspended in FullSuspendCheck(). This is
   1304     // used to distinguish runnable threads that are suspended due to
   1305     // a normal suspend check from other threads.
   1306     bool32_t suspended_at_suspend_check;
   1307 
   1308     // True if the thread has been suspended by a debugger event. This is
   1309     // used to invoke method from the debugger which is only allowed when
   1310     // the thread is suspended by an event.
   1311     bool32_t ready_for_debug_invoke;
   1312 
   1313     // True if the thread enters a method. This is used to detect method entry
   1314     // event for the debugger.
   1315     bool32_t debug_method_entry_;
   1316 
   1317     // True if the GC is in the marking phase. This is used for the CC collector only. This is
   1318     // thread local so that we can simplify the logic to check for the fast path of read barriers of
   1319     // GC roots.
   1320     bool32_t is_gc_marking;
   1321 
   1322     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
   1323     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
   1324     // processing of the CC collector only. This is thread local so that we can enable/disable weak
   1325     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
   1326     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
   1327     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
   1328     // ReferenceProcessor::EnableSlowPath().
   1329     bool32_t weak_ref_access_enabled;
   1330 
   1331     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
   1332     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
   1333     // critical section enter.
   1334     uint32_t disable_thread_flip_count;
   1335   } tls32_;
   1336 
   1337   struct PACKED(8) tls_64bit_sized_values {
   1338     tls_64bit_sized_values() : trace_clock_base(0) {
   1339     }
   1340 
   1341     // The clock base used for tracing.
   1342     uint64_t trace_clock_base;
   1343 
   1344     RuntimeStats stats;
   1345   } tls64_;
   1346 
   1347   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
   1348       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
   1349       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
   1350       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
   1351       stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
   1352       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
   1353       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
   1354       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
   1355       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
   1356       last_no_thread_suspension_cause(nullptr), thread_local_objects(0),
   1357       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
   1358       mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
   1359       thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
   1360       nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
   1361       thread_local_mark_stack(nullptr) {
   1362       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
   1363     }
   1364 
   1365     // The biased card table, see CardTable for details.
   1366     uint8_t* card_table;
   1367 
   1368     // The pending exception or null.
   1369     mirror::Throwable* exception;
   1370 
   1371     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
   1372     // We leave extra space so there's room for the code that throws StackOverflowError.
   1373     uint8_t* stack_end;
   1374 
   1375     // The top of the managed stack often manipulated directly by compiler generated code.
   1376     ManagedStack managed_stack;
   1377 
   1378     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
   1379     // normally set to the address of itself.
   1380     uintptr_t* suspend_trigger;
   1381 
   1382     // Every thread may have an associated JNI environment
   1383     JNIEnvExt* jni_env;
   1384 
   1385     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
   1386     // created thread.
   1387     JNIEnvExt* tmp_jni_env;
   1388 
   1389     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
   1390     // is easy but getting the address of Thread::Current is hard. This field can be read off of
   1391     // Thread::Current to give the address.
   1392     Thread* self;
   1393 
   1394     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
   1395     // start up, until the thread is registered and the local opeer_ is used.
   1396     mirror::Object* opeer;
   1397     jobject jpeer;
   1398 
   1399     // The "lowest addressable byte" of the stack.
   1400     uint8_t* stack_begin;
   1401 
   1402     // Size of the stack.
   1403     size_t stack_size;
   1404 
   1405     // Pointer to previous stack trace captured by sampling profiler.
   1406     std::vector<ArtMethod*>* stack_trace_sample;
   1407 
   1408     // The next thread in the wait set this thread is part of or null if not waiting.
   1409     Thread* wait_next;
   1410 
   1411     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
   1412     mirror::Object* monitor_enter_object;
   1413 
   1414     // Top of linked list of handle scopes or null for none.
   1415     HandleScope* top_handle_scope;
   1416 
   1417     // Needed to get the right ClassLoader in JNI_OnLoad, but also
   1418     // useful for testing.
   1419     jobject class_loader_override;
   1420 
   1421     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
   1422     Context* long_jump_context;
   1423 
   1424     // Additional stack used by method instrumentation to store method and return pc values.
   1425     // Stored as a pointer since std::deque is not PACKED.
   1426     std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
   1427 
   1428     // JDWP invoke-during-breakpoint support.
   1429     DebugInvokeReq* debug_invoke_req;
   1430 
   1431     // JDWP single-stepping support.
   1432     SingleStepControl* single_step_control;
   1433 
   1434     // For gc purpose, a shadow frame record stack that keeps track of:
   1435     // 1) shadow frames under construction.
   1436     // 2) deoptimization shadow frames.
   1437     StackedShadowFrameRecord* stacked_shadow_frame_record;
   1438 
   1439     // Deoptimization return value record stack.
   1440     DeoptimizationContextRecord* deoptimization_context_stack;
   1441 
   1442     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
   1443     // Shadow frames may be created before deoptimization happens so that the debugger can
   1444     // set local values there first.
   1445     FrameIdToShadowFrame* frame_id_to_shadow_frame;
   1446 
   1447     // A cached copy of the java.lang.Thread's name.
   1448     std::string* name;
   1449 
   1450     // A cached pthread_t for the pthread underlying this Thread*.
   1451     pthread_t pthread_self;
   1452 
   1453     // If no_thread_suspension_ is > 0, what is causing that assertion.
   1454     const char* last_no_thread_suspension_cause;
   1455 
   1456     // Pending checkpoint function or null if non-pending. Installation guarding by
   1457     // Locks::thread_suspend_count_lock_.
   1458     Closure* checkpoint_functions[kMaxCheckpoints];
   1459 
   1460     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
   1461     // Locks::thread_suspend_count_lock_.
   1462     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
   1463     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
   1464     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
   1465 
   1466     // Entrypoint function pointers.
   1467     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
   1468     JniEntryPoints jni_entrypoints;
   1469     QuickEntryPoints quick_entrypoints;
   1470 
   1471     // Thread-local allocation pointer.
   1472     size_t thread_local_objects;
   1473     uint8_t* thread_local_start;
   1474     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
   1475     // potentially better performance.
   1476     uint8_t* thread_local_pos;
   1477     uint8_t* thread_local_end;
   1478 
   1479     // Mterp jump table bases.
   1480     void* mterp_current_ibase;
   1481     void* mterp_default_ibase;
   1482     void* mterp_alt_ibase;
   1483 
   1484     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
   1485     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
   1486 
   1487     // Thread-local allocation stack data/routines.
   1488     StackReference<mirror::Object>* thread_local_alloc_stack_top;
   1489     StackReference<mirror::Object>* thread_local_alloc_stack_end;
   1490 
   1491     // Support for Mutex lock hierarchy bug detection.
   1492     BaseMutex* held_mutexes[kLockLevelCount];
   1493 
   1494     // Recorded thread state for nested signals.
   1495     jmp_buf* nested_signal_state;
   1496 
   1497     // The function used for thread flip.
   1498     Closure* flip_function;
   1499 
   1500     // Current method verifier, used for root marking.
   1501     verifier::MethodVerifier* method_verifier;
   1502 
   1503     // Thread-local mark stack for the concurrent copying collector.
   1504     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
   1505   } tlsPtr_;
   1506 
   1507   // Guards the 'interrupted_' and 'wait_monitor_' members.
   1508   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1509 
   1510   // Condition variable waited upon during a wait.
   1511   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
   1512   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
   1513   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
   1514 
   1515   // Thread "interrupted" status; stays raised until queried or thrown.
   1516   bool interrupted_ GUARDED_BY(wait_mutex_);
   1517 
   1518   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
   1519   uint8_t debug_disallow_read_barrier_ = 0;
   1520 
   1521   friend class Dbg;  // For SetStateUnsafe.
   1522   friend class gc::collector::SemiSpace;  // For getting stack traces.
   1523   friend class Runtime;  // For CreatePeer.
   1524   friend class QuickExceptionHandler;  // For dumping the stack.
   1525   friend class ScopedThreadStateChange;
   1526   friend class StubTest;  // For accessing entrypoints.
   1527   friend class ThreadList;  // For ~Thread and Destroy.
   1528 
   1529   friend class EntrypointsOrderTest;  // To test the order of tls entries.
   1530 
   1531   DISALLOW_COPY_AND_ASSIGN(Thread);
   1532 };
   1533 
   1534 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
   1535  public:
   1536   ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_)
   1537       : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
   1538   }
   1539   ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
   1540     self_->EndAssertNoThreadSuspension(old_cause_);
   1541   }
   1542   Thread* Self() {
   1543     return self_;
   1544   }
   1545 
   1546  private:
   1547   Thread* const self_;
   1548   const char* const old_cause_;
   1549 };
   1550 
   1551 class ScopedStackedShadowFramePusher {
   1552  public:
   1553   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
   1554     : self_(self), type_(type) {
   1555     self_->PushStackedShadowFrame(sf, type);
   1556   }
   1557   ~ScopedStackedShadowFramePusher() {
   1558     self_->PopStackedShadowFrame(type_);
   1559   }
   1560 
   1561  private:
   1562   Thread* const self_;
   1563   const StackedShadowFrameType type_;
   1564 
   1565   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
   1566 };
   1567 
   1568 // Only works for debug builds.
   1569 class ScopedDebugDisallowReadBarriers {
   1570  public:
   1571   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
   1572     self_->ModifyDebugDisallowReadBarrier(1);
   1573   }
   1574   ~ScopedDebugDisallowReadBarriers() {
   1575     self_->ModifyDebugDisallowReadBarrier(-1);
   1576   }
   1577 
   1578  private:
   1579   Thread* const self_;
   1580 };
   1581 
   1582 std::ostream& operator<<(std::ostream& os, const Thread& thread);
   1583 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
   1584 
   1585 }  // namespace art
   1586 
   1587 #endif  // ART_RUNTIME_THREAD_H_
   1588