Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_THREAD_H_
     18 #define ART_RUNTIME_THREAD_H_
     19 
     20 #include <bitset>
     21 #include <deque>
     22 #include <iosfwd>
     23 #include <list>
     24 #include <memory>
     25 #include <setjmp.h>
     26 #include <string>
     27 
     28 #include "atomic.h"
     29 #include "base/macros.h"
     30 #include "base/mutex.h"
     31 #include "entrypoints/interpreter/interpreter_entrypoints.h"
     32 #include "entrypoints/jni/jni_entrypoints.h"
     33 #include "entrypoints/portable/portable_entrypoints.h"
     34 #include "entrypoints/quick/quick_entrypoints.h"
     35 #include "globals.h"
     36 #include "handle_scope.h"
     37 #include "instruction_set.h"
     38 #include "jvalue.h"
     39 #include "object_callbacks.h"
     40 #include "offsets.h"
     41 #include "runtime_stats.h"
     42 #include "stack.h"
     43 #include "thread_state.h"
     44 #include "throw_location.h"
     45 
     46 namespace art {
     47 
     48 namespace gc {
     49 namespace collector {
     50   class SemiSpace;
     51 }  // namespace collector
     52 }  // namespace gc
     53 
     54 namespace mirror {
     55   class ArtMethod;
     56   class Array;
     57   class Class;
     58   class ClassLoader;
     59   class Object;
     60   template<class T> class ObjectArray;
     61   template<class T> class PrimitiveArray;
     62   typedef PrimitiveArray<int32_t> IntArray;
     63   class StackTraceElement;
     64   class Throwable;
     65 }  // namespace mirror
     66 class BaseMutex;
     67 class ClassLinker;
     68 class Closure;
     69 class Context;
     70 struct DebugInvokeReq;
     71 class DexFile;
     72 class JavaVMExt;
     73 struct JNIEnvExt;
     74 class Monitor;
     75 class Runtime;
     76 class ScopedObjectAccessAlreadyRunnable;
     77 class ShadowFrame;
     78 struct SingleStepControl;
     79 class Thread;
     80 class ThreadList;
     81 
     82 // Thread priorities. These must match the Thread.MIN_PRIORITY,
     83 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
     84 enum ThreadPriority {
     85   kMinThreadPriority = 1,
     86   kNormThreadPriority = 5,
     87   kMaxThreadPriority = 10,
     88 };
     89 
     90 enum ThreadFlag {
     91   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
     92                           // safepoint handler.
     93   kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
     94 };
     95 
     96 static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
     97 
     98 // Thread's stack layout for implicit stack overflow checks:
     99 //
    100 //   +---------------------+  <- highest address of stack memory
    101 //   |                     |
    102 //   .                     .  <- SP
    103 //   |                     |
    104 //   |                     |
    105 //   +---------------------+  <- stack_end
    106 //   |                     |
    107 //   |  Gap                |
    108 //   |                     |
    109 //   +---------------------+  <- stack_begin
    110 //   |                     |
    111 //   | Protected region    |
    112 //   |                     |
    113 //   +---------------------+  <- lowest address of stack memory
    114 //
    115 // The stack always grows down in memory.  At the lowest address is a region of memory
    116 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
    117 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
    118 // between the stack_end and the highest address in stack memory.  An implicit stack
    119 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
    120 // If the thread's SP is below the stack_end address this will be a read into the protected
    121 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
    122 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
    123 // if the thread makes a call out to a native function (through JNI), that native function
    124 // might only have 4K of memory (if the SP is adjacent to stack_end).
    125 
    126 class Thread {
    127  public:
    128   // For implicit overflow checks we reserve an extra piece of memory at the bottom
    129   // of the stack (lowest memory).  The higher portion of the memory
    130   // is protected against reads and the lower is available for use while
    131   // throwing the StackOverflow exception.
    132   static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
    133   static const size_t kStackOverflowImplicitCheckSize;
    134 
    135   // Creates a new native thread corresponding to the given managed peer.
    136   // Used to implement Thread.start.
    137   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
    138 
    139   // Attaches the calling native thread to the runtime, returning the new native peer.
    140   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
    141   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
    142                         bool create_peer);
    143 
    144   // Reset internal state of child thread after fork.
    145   void InitAfterFork();
    146 
    147   static Thread* Current();
    148 
    149   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
    150                                    mirror::Object* thread_peer)
    151       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
    152       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    153       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    154   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
    155       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
    156       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    157       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    158 
    159   // Translates 172 to pAllocArrayFromCode and so on.
    160   template<size_t size_of_pointers>
    161   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
    162 
    163   // Dumps a one-line summary of thread state (used for operator<<).
    164   void ShortDump(std::ostream& os) const;
    165 
    166   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
    167   void Dump(std::ostream& os) const
    168       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    169       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    170 
    171   void DumpJavaStack(std::ostream& os) const
    172       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    173       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    174 
    175   // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
    176   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
    177   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
    178       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    179       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    180 
    181   ThreadState GetState() const {
    182     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
    183     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
    184     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
    185   }
    186 
    187   ThreadState SetState(ThreadState new_state);
    188 
    189   int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
    190     return tls32_.suspend_count;
    191   }
    192 
    193   int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
    194     return tls32_.debug_suspend_count;
    195   }
    196 
    197   bool IsSuspended() const {
    198     union StateAndFlags state_and_flags;
    199     state_and_flags.as_int = tls32_.state_and_flags.as_int;
    200     return state_and_flags.as_struct.state != kRunnable &&
    201         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
    202   }
    203 
    204   void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
    205       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
    206 
    207   bool RequestCheckpoint(Closure* function)
    208       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
    209 
    210   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
    211   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
    212   void FullSuspendCheck()
    213       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    214       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    215 
    216   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
    217   ThreadState TransitionFromSuspendedToRunnable()
    218       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    219       SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
    220       ALWAYS_INLINE;
    221 
    222   // Transition from runnable into a state where mutator privileges are denied. Releases share of
    223   // mutator lock.
    224   void TransitionFromRunnableToSuspended(ThreadState new_state)
    225       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    226       UNLOCK_FUNCTION(Locks::mutator_lock_)
    227       ALWAYS_INLINE;
    228 
    229   // Once called thread suspension will cause an assertion failure.
    230   const char* StartAssertNoThreadSuspension(const char* cause) {
    231     if (kIsDebugBuild) {
    232       CHECK(cause != NULL);
    233       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
    234       tls32_.no_thread_suspension++;
    235       tlsPtr_.last_no_thread_suspension_cause = cause;
    236       return previous_cause;
    237     } else {
    238       return nullptr;
    239     }
    240   }
    241 
    242   // End region where no thread suspension is expected.
    243   void EndAssertNoThreadSuspension(const char* old_cause) {
    244     if (kIsDebugBuild) {
    245       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
    246       CHECK_GT(tls32_.no_thread_suspension, 0U);
    247       tls32_.no_thread_suspension--;
    248       tlsPtr_.last_no_thread_suspension_cause = old_cause;
    249     }
    250   }
    251 
    252   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
    253 
    254   bool IsDaemon() const {
    255     return tls32_.daemon;
    256   }
    257 
    258   bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    259 
    260   /*
    261    * Changes the priority of this thread to match that of the java.lang.Thread object.
    262    *
    263    * We map a priority value from 1-10 to Linux "nice" values, where lower
    264    * numbers indicate higher priority.
    265    */
    266   void SetNativePriority(int newPriority);
    267 
    268   /*
    269    * Returns the thread priority for the current thread by querying the system.
    270    * This is useful when attaching a thread through JNI.
    271    *
    272    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
    273    */
    274   static int GetNativePriority();
    275 
    276   uint32_t GetThreadId() const {
    277     return tls32_.thin_lock_thread_id;
    278   }
    279 
    280   pid_t GetTid() const {
    281     return tls32_.tid;
    282   }
    283 
    284   // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
    285   mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
    286       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    287 
    288   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
    289   // allocation, or locking.
    290   void GetThreadName(std::string& name) const;
    291 
    292   // Sets the thread's name.
    293   void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    294 
    295   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
    296   uint64_t GetCpuMicroTime() const;
    297 
    298   mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    299     CHECK(tlsPtr_.jpeer == nullptr);
    300     return tlsPtr_.opeer;
    301   }
    302 
    303   bool HasPeer() const {
    304     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
    305   }
    306 
    307   RuntimeStats* GetStats() {
    308     return &tls64_.stats;
    309   }
    310 
    311   bool IsStillStarting() const;
    312 
    313   bool IsExceptionPending() const {
    314     return tlsPtr_.exception != nullptr;
    315   }
    316 
    317   mirror::Throwable* GetException(ThrowLocation* throw_location) const
    318       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    319     if (throw_location != nullptr) {
    320       *throw_location = tlsPtr_.throw_location;
    321     }
    322     return tlsPtr_.exception;
    323   }
    324 
    325   void AssertNoPendingException() const;
    326   void AssertNoPendingExceptionForNewException(const char* msg) const;
    327 
    328   void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
    329       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    330     CHECK(new_exception != NULL);
    331     // TODO: DCHECK(!IsExceptionPending());
    332     tlsPtr_.exception = new_exception;
    333     tlsPtr_.throw_location = throw_location;
    334   }
    335 
    336   void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    337     tlsPtr_.exception = nullptr;
    338     tlsPtr_.throw_location.Clear();
    339     SetExceptionReportedToInstrumentation(false);
    340   }
    341 
    342   // Find catch block and perform long jump to appropriate exception handle
    343   void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    344 
    345   Context* GetLongJumpContext();
    346   void ReleaseLongJumpContext(Context* context) {
    347     DCHECK(tlsPtr_.long_jump_context == nullptr);
    348     tlsPtr_.long_jump_context = context;
    349   }
    350 
    351   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
    352   // abort the runtime iff abort_on_error is true.
    353   mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
    354       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    355 
    356   ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    357 
    358   void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
    359     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
    360     tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
    361   }
    362 
    363   void SetTopOfShadowStack(ShadowFrame* top) {
    364     tlsPtr_.managed_stack.SetTopShadowFrame(top);
    365   }
    366 
    367   bool HasManagedStack() const {
    368     return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
    369         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
    370   }
    371 
    372   // If 'msg' is NULL, no detail message is set.
    373   void ThrowNewException(const ThrowLocation& throw_location,
    374                          const char* exception_class_descriptor, const char* msg)
    375       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    376 
    377   // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
    378   // used as the new exception's cause.
    379   void ThrowNewWrappedException(const ThrowLocation& throw_location,
    380                                 const char* exception_class_descriptor,
    381                                 const char* msg)
    382       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    383 
    384   void ThrowNewExceptionF(const ThrowLocation& throw_location,
    385                           const char* exception_class_descriptor, const char* fmt, ...)
    386       __attribute__((format(printf, 4, 5)))
    387       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    388 
    389   void ThrowNewExceptionV(const ThrowLocation& throw_location,
    390                           const char* exception_class_descriptor, const char* fmt, va_list ap)
    391       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    392 
    393   // OutOfMemoryError is special, because we need to pre-allocate an instance.
    394   // Only the GC should call this.
    395   void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    396 
    397   static void Startup();
    398   static void FinishStartup();
    399   static void Shutdown();
    400 
    401   // JNI methods
    402   JNIEnvExt* GetJniEnv() const {
    403     return tlsPtr_.jni_env;
    404   }
    405 
    406   // Convert a jobject into a Object*
    407   mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    408 
    409   mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    410     return tlsPtr_.monitor_enter_object;
    411   }
    412 
    413   void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    414     tlsPtr_.monitor_enter_object = obj;
    415   }
    416 
    417   // Implements java.lang.Thread.interrupted.
    418   bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
    419   // Implements java.lang.Thread.isInterrupted.
    420   bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
    421   bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
    422     return interrupted_;
    423   }
    424   void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
    425   void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
    426     interrupted_ = i;
    427   }
    428   void Notify() LOCKS_EXCLUDED(wait_mutex_);
    429 
    430  private:
    431   void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
    432 
    433  public:
    434   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
    435     return wait_mutex_;
    436   }
    437 
    438   ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
    439     return wait_cond_;
    440   }
    441 
    442   Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
    443     return wait_monitor_;
    444   }
    445 
    446   void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
    447     wait_monitor_ = mon;
    448   }
    449 
    450 
    451   // Waiter link-list support.
    452   Thread* GetWaitNext() const {
    453     return tlsPtr_.wait_next;
    454   }
    455 
    456   void SetWaitNext(Thread* next) {
    457     tlsPtr_.wait_next = next;
    458   }
    459 
    460   mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    461     return tlsPtr_.class_loader_override;
    462   }
    463 
    464   void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
    465       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    466 
    467   // Create the internal representation of a stack trace, that is more time
    468   // and space efficient to compute than the StackTraceElement[].
    469   template<bool kTransactionActive>
    470   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
    471       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    472 
    473   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
    474   // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
    475   // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
    476   // with the number of valid frames in the returned array.
    477   static jobjectArray InternalStackTraceToStackTraceElementArray(
    478       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
    479       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
    480       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    481 
    482   void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    483 
    484   ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    485 
    486   //
    487   // Offsets of various members of native Thread class, used by compiled code.
    488   //
    489 
    490   template<size_t pointer_size>
    491   static ThreadOffset<pointer_size> ThinLockIdOffset() {
    492     return ThreadOffset<pointer_size>(
    493         OFFSETOF_MEMBER(Thread, tls32_) +
    494         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
    495   }
    496 
    497   template<size_t pointer_size>
    498   static ThreadOffset<pointer_size> ThreadFlagsOffset() {
    499     return ThreadOffset<pointer_size>(
    500         OFFSETOF_MEMBER(Thread, tls32_) +
    501         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
    502   }
    503 
    504  private:
    505   template<size_t pointer_size>
    506   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
    507     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
    508     size_t scale;
    509     size_t shrink;
    510     if (pointer_size == sizeof(void*)) {
    511       scale = 1;
    512       shrink = 1;
    513     } else if (pointer_size > sizeof(void*)) {
    514       scale = pointer_size / sizeof(void*);
    515       shrink = 1;
    516     } else {
    517       DCHECK_GT(sizeof(void*), pointer_size);
    518       scale = 1;
    519       shrink = sizeof(void*) / pointer_size;
    520     }
    521     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
    522   }
    523 
    524  public:
    525   template<size_t pointer_size>
    526   static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
    527     return ThreadOffsetFromTlsPtr<pointer_size>(
    528         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
    529   }
    530 
    531   template<size_t pointer_size>
    532   static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
    533     return ThreadOffsetFromTlsPtr<pointer_size>(
    534         OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
    535   }
    536 
    537   template<size_t pointer_size>
    538   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
    539     return ThreadOffsetFromTlsPtr<pointer_size>(
    540         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
    541   }
    542 
    543   template<size_t pointer_size>
    544   static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
    545     return ThreadOffsetFromTlsPtr<pointer_size>(
    546         OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
    547   }
    548 
    549   template<size_t pointer_size>
    550   static ThreadOffset<pointer_size> SelfOffset() {
    551     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
    552   }
    553 
    554   template<size_t pointer_size>
    555   static ThreadOffset<pointer_size> ExceptionOffset() {
    556     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
    557   }
    558 
    559   template<size_t pointer_size>
    560   static ThreadOffset<pointer_size> PeerOffset() {
    561     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
    562   }
    563 
    564 
    565   template<size_t pointer_size>
    566   static ThreadOffset<pointer_size> CardTableOffset() {
    567     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
    568   }
    569 
    570   template<size_t pointer_size>
    571   static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
    572     return ThreadOffsetFromTlsPtr<pointer_size>(
    573         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
    574   }
    575 
    576   // Size of stack less any space reserved for stack overflow
    577   size_t GetStackSize() const {
    578     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
    579   }
    580 
    581   byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
    582     if (implicit_overflow_check) {
    583       // The interpreter needs the extra overflow bytes that stack_end does
    584       // not include.
    585       return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
    586     } else {
    587       return tlsPtr_.stack_end;
    588     }
    589   }
    590 
    591   byte* GetStackEnd() const {
    592     return tlsPtr_.stack_end;
    593   }
    594 
    595   // Set the stack end to that to be used during a stack overflow
    596   void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    597 
    598   // Set the stack end to that to be used during regular execution
    599   void ResetDefaultStackEnd() {
    600     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
    601     // to throw a StackOverflowError.
    602     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
    603   }
    604 
    605   // Install the protected region for implicit stack checks.
    606   void InstallImplicitProtection();
    607 
    608   bool IsHandlingStackOverflow() const {
    609     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
    610   }
    611 
    612   template<size_t pointer_size>
    613   static ThreadOffset<pointer_size> StackEndOffset() {
    614     return ThreadOffsetFromTlsPtr<pointer_size>(
    615         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
    616   }
    617 
    618   template<size_t pointer_size>
    619   static ThreadOffset<pointer_size> JniEnvOffset() {
    620     return ThreadOffsetFromTlsPtr<pointer_size>(
    621         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
    622   }
    623 
    624   template<size_t pointer_size>
    625   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
    626     return ThreadOffsetFromTlsPtr<pointer_size>(
    627         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
    628         ManagedStack::TopQuickFrameOffset());
    629   }
    630 
    631   template<size_t pointer_size>
    632   static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
    633     return ThreadOffsetFromTlsPtr<pointer_size>(
    634         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
    635         ManagedStack::TopQuickFramePcOffset());
    636   }
    637 
    638   const ManagedStack* GetManagedStack() const {
    639     return &tlsPtr_.managed_stack;
    640   }
    641 
    642   // Linked list recording fragments of managed stack.
    643   void PushManagedStackFragment(ManagedStack* fragment) {
    644     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
    645   }
    646   void PopManagedStackFragment(const ManagedStack& fragment) {
    647     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
    648   }
    649 
    650   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
    651     return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
    652   }
    653 
    654   ShadowFrame* PopShadowFrame() {
    655     return tlsPtr_.managed_stack.PopShadowFrame();
    656   }
    657 
    658   template<size_t pointer_size>
    659   static ThreadOffset<pointer_size> TopShadowFrameOffset() {
    660     return ThreadOffsetFromTlsPtr<pointer_size>(
    661         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
    662         ManagedStack::TopShadowFrameOffset());
    663   }
    664 
    665   // Number of references allocated in JNI ShadowFrames on this thread.
    666   size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    667     return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
    668   }
    669 
    670   // Number of references in handle scope on this thread.
    671   size_t NumHandleReferences();
    672 
    673   // Number of references allocated in handle scopes & JNI shadow frames on this thread.
    674   size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    675     return NumHandleReferences() + NumJniShadowFrameReferences();
    676   };
    677 
    678   // Is the given obj in this thread's stack indirect reference table?
    679   bool HandleScopeContains(jobject obj) const;
    680 
    681   void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
    682       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    683 
    684   HandleScope* GetTopHandleScope() {
    685     return tlsPtr_.top_handle_scope;
    686   }
    687 
    688   void PushHandleScope(HandleScope* handle_scope) {
    689     handle_scope->SetLink(tlsPtr_.top_handle_scope);
    690     tlsPtr_.top_handle_scope = handle_scope;
    691   }
    692 
    693   HandleScope* PopHandleScope() {
    694     HandleScope* handle_scope = tlsPtr_.top_handle_scope;
    695     DCHECK(handle_scope != nullptr);
    696     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
    697     return handle_scope;
    698   }
    699 
    700   template<size_t pointer_size>
    701   static ThreadOffset<pointer_size> TopHandleScopeOffset() {
    702     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
    703                                                                 top_handle_scope));
    704   }
    705 
    706   DebugInvokeReq* GetInvokeReq() const {
    707     return tlsPtr_.debug_invoke_req;
    708   }
    709 
    710   SingleStepControl* GetSingleStepControl() const {
    711     return tlsPtr_.single_step_control;
    712   }
    713 
    714   // Returns the fake exception used to activate deoptimization.
    715   static mirror::Throwable* GetDeoptimizationException() {
    716     return reinterpret_cast<mirror::Throwable*>(-1);
    717   }
    718 
    719   void SetDeoptimizationShadowFrame(ShadowFrame* sf);
    720   void SetDeoptimizationReturnValue(const JValue& ret_val);
    721 
    722   ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
    723 
    724   bool HasDeoptimizationShadowFrame() const {
    725     return tlsPtr_.deoptimization_shadow_frame != nullptr;
    726   }
    727 
    728   void SetShadowFrameUnderConstruction(ShadowFrame* sf);
    729   void ClearShadowFrameUnderConstruction();
    730 
    731   bool HasShadowFrameUnderConstruction() const {
    732     return tlsPtr_.shadow_frame_under_construction != nullptr;
    733   }
    734 
    735   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
    736     return tlsPtr_.instrumentation_stack;
    737   }
    738 
    739   std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
    740     return tlsPtr_.stack_trace_sample;
    741   }
    742 
    743   void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
    744     tlsPtr_.stack_trace_sample = sample;
    745   }
    746 
    747   uint64_t GetTraceClockBase() const {
    748     return tls64_.trace_clock_base;
    749   }
    750 
    751   void SetTraceClockBase(uint64_t clock_base) {
    752     tls64_.trace_clock_base = clock_base;
    753   }
    754 
    755   BaseMutex* GetHeldMutex(LockLevel level) const {
    756     return tlsPtr_.held_mutexes[level];
    757   }
    758 
    759   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
    760     tlsPtr_.held_mutexes[level] = mutex;
    761   }
    762 
    763   void RunCheckpointFunction();
    764 
    765   bool ReadFlag(ThreadFlag flag) const {
    766     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
    767   }
    768 
    769   bool TestAllFlags() const {
    770     return (tls32_.state_and_flags.as_struct.flags != 0);
    771   }
    772 
    773   void AtomicSetFlag(ThreadFlag flag) {
    774     tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
    775   }
    776 
    777   void AtomicClearFlag(ThreadFlag flag) {
    778     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
    779   }
    780 
    781   void ResetQuickAllocEntryPointsForThread();
    782 
    783   // Returns the remaining space in the TLAB.
    784   size_t TlabSize() const;
    785   // Doesn't check that there is room.
    786   mirror::Object* AllocTlab(size_t bytes);
    787   void SetTlab(byte* start, byte* end);
    788   bool HasTlab() const;
    789 
    790   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
    791   // equal to a valid pointer.
    792   // TODO: does this need to atomic?  I don't think so.
    793   void RemoveSuspendTrigger() {
    794     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
    795   }
    796 
    797   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
    798   // The next time a suspend check is done, it will load from the value at this address
    799   // and trigger a SIGSEGV.
    800   void TriggerSuspend() {
    801     tlsPtr_.suspend_trigger = nullptr;
    802   }
    803 
    804 
    805   // Push an object onto the allocation stack.
    806   bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
    807 
    808   // Set the thread local allocation pointers to the given pointers.
    809   void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
    810 
    811   // Resets the thread local allocation pointers.
    812   void RevokeThreadLocalAllocationStack();
    813 
    814   size_t GetThreadLocalBytesAllocated() const {
    815     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
    816   }
    817 
    818   size_t GetThreadLocalObjectsAllocated() const {
    819     return tlsPtr_.thread_local_objects;
    820   }
    821 
    822   void* GetRosAllocRun(size_t index) const {
    823     return tlsPtr_.rosalloc_runs[index];
    824   }
    825 
    826   void SetRosAllocRun(size_t index, void* run) {
    827     tlsPtr_.rosalloc_runs[index] = run;
    828   }
    829 
    830   bool IsExceptionReportedToInstrumentation() const {
    831     return tls32_.is_exception_reported_to_instrumentation_;
    832   }
    833 
    834   void SetExceptionReportedToInstrumentation(bool reported) {
    835     tls32_.is_exception_reported_to_instrumentation_ = reported;
    836   }
    837 
    838   void ProtectStack();
    839   bool UnprotectStack();
    840 
    841   void NoteSignalBeingHandled() {
    842     if (tls32_.handling_signal_) {
    843       LOG(FATAL) << "Detected signal while processing a signal";
    844     }
    845     tls32_.handling_signal_ = true;
    846   }
    847 
    848   void NoteSignalHandlerDone() {
    849     tls32_.handling_signal_ = false;
    850   }
    851 
    852   jmp_buf* GetNestedSignalState() {
    853     return tlsPtr_.nested_signal_state;
    854   }
    855 
    856  private:
    857   explicit Thread(bool daemon);
    858   ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
    859                            Locks::thread_suspend_count_lock_);
    860   void Destroy();
    861 
    862   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
    863 
    864   template<bool kTransactionActive>
    865   void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
    866                 jobject thread_name, jint thread_priority)
    867       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    868 
    869   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
    870   // Dbg::Disconnected.
    871   ThreadState SetStateUnsafe(ThreadState new_state) {
    872     ThreadState old_state = GetState();
    873     tls32_.state_and_flags.as_struct.state = new_state;
    874     return old_state;
    875   }
    876 
    877   void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    878 
    879   void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    880   void DumpStack(std::ostream& os) const
    881       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    882       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    883 
    884   // Out-of-line conveniences for debugging in gdb.
    885   static Thread* CurrentFromGdb();  // Like Thread::Current.
    886   // Like Thread::Dump(std::cerr).
    887   void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    888 
    889   static void* CreateCallback(void* arg);
    890 
    891   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
    892       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    893   void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    894 
    895   void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
    896   void InitCardTable();
    897   void InitCpu();
    898   void CleanupCpu();
    899   void InitTlsEntryPoints();
    900   void InitTid();
    901   void InitPthreadKeySelf();
    902   void InitStackHwm();
    903 
    904   void SetUpAlternateSignalStack();
    905   void TearDownAlternateSignalStack();
    906 
    907   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
    908   // change from being Suspended to Runnable without a suspend request occurring.
    909   union PACKED(4) StateAndFlags {
    910     StateAndFlags() {}
    911     struct PACKED(4) {
    912       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
    913       // ThreadFlags for bit field meanings.
    914       volatile uint16_t flags;
    915       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
    916       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
    917       // operation. If a thread is suspended and a suspend_request is present, a thread may not
    918       // change to Runnable as a GC or other operation is in progress.
    919       volatile uint16_t state;
    920     } as_struct;
    921     AtomicInteger as_atomic_int;
    922     volatile int32_t as_int;
    923 
    924    private:
    925     // gcc does not handle struct with volatile member assignments correctly.
    926     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
    927     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
    928   };
    929   COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
    930 
    931   static void ThreadExitCallback(void* arg);
    932 
    933   // Maximum number of checkpoint functions.
    934   static constexpr uint32_t kMaxCheckpoints = 3;
    935 
    936   // Has Thread::Startup been called?
    937   static bool is_started_;
    938 
    939   // TLS key used to retrieve the Thread*.
    940   static pthread_key_t pthread_key_self_;
    941 
    942   // Used to notify threads that they should attempt to resume, they will suspend again if
    943   // their suspend count is > 0.
    944   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
    945 
    946   /***********************************************************************************************/
    947   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
    948   // pointer size differences. To encourage shorter encoding, more frequently used values appear
    949   // first if possible.
    950   /***********************************************************************************************/
    951 
    952   struct PACKED(4) tls_32bit_sized_values {
    953     // We have no control over the size of 'bool', but want our boolean fields
    954     // to be 4-byte quantities.
    955     typedef uint32_t bool32_t;
    956 
    957     explicit tls_32bit_sized_values(bool is_daemon) :
    958       suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
    959       daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
    960       thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false),
    961       handling_signal_(false), padding_(0) {
    962     }
    963 
    964     union StateAndFlags state_and_flags;
    965     COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
    966                    sizeof_state_and_flags_and_int32_are_different);
    967 
    968     // A non-zero value is used to tell the current thread to enter a safe point
    969     // at the next poll.
    970     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
    971 
    972     // How much of 'suspend_count_' is by request of the debugger, used to set things right
    973     // when the debugger detaches. Must be <= suspend_count_.
    974     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
    975 
    976     // Thin lock thread id. This is a small integer used by the thin lock implementation.
    977     // This is not to be confused with the native thread's tid, nor is it the value returned
    978     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
    979     // important difference between this id and the ids visible to managed code is that these
    980     // ones get reused (to ensure that they fit in the number of bits available).
    981     uint32_t thin_lock_thread_id;
    982 
    983     // System thread id.
    984     uint32_t tid;
    985 
    986     // Is the thread a daemon?
    987     const bool32_t daemon;
    988 
    989     // A boolean telling us whether we're recursively throwing OOME.
    990     bool32_t throwing_OutOfMemoryError;
    991 
    992     // A positive value implies we're in a region where thread suspension isn't expected.
    993     uint32_t no_thread_suspension;
    994 
    995     // How many times has our pthread key's destructor been called?
    996     uint32_t thread_exit_check_count;
    997 
    998     // When true this field indicates that the exception associated with this thread has already
    999     // been reported to instrumentation.
   1000     bool32_t is_exception_reported_to_instrumentation_;
   1001 
   1002     // True if signal is being handled by this thread.
   1003     bool32_t handling_signal_;
   1004 
   1005     // Padding to make the size aligned to 8.  Remove this if we add another 32 bit field.
   1006     int32_t padding_;
   1007   } tls32_;
   1008 
   1009   struct PACKED(8) tls_64bit_sized_values {
   1010     tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
   1011     }
   1012 
   1013     // The clock base used for tracing.
   1014     uint64_t trace_clock_base;
   1015 
   1016     // Return value used by deoptimization.
   1017     JValue deoptimization_return_value;
   1018 
   1019     RuntimeStats stats;
   1020   } tls64_;
   1021 
   1022   struct PACKED(4) tls_ptr_sized_values {
   1023       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
   1024       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
   1025       jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
   1026       stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
   1027       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
   1028       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
   1029       deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
   1030       pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
   1031       thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
   1032       thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
   1033     }
   1034 
   1035     // The biased card table, see CardTable for details.
   1036     byte* card_table;
   1037 
   1038     // The pending exception or NULL.
   1039     mirror::Throwable* exception;
   1040 
   1041     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
   1042     // We leave extra space so there's room for the code that throws StackOverflowError.
   1043     byte* stack_end;
   1044 
   1045     // The top of the managed stack often manipulated directly by compiler generated code.
   1046     ManagedStack managed_stack;
   1047 
   1048     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
   1049     // normally set to the address of itself.
   1050     uintptr_t* suspend_trigger;
   1051 
   1052     // Every thread may have an associated JNI environment
   1053     JNIEnvExt* jni_env;
   1054 
   1055     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
   1056     // is easy but getting the address of Thread::Current is hard. This field can be read off of
   1057     // Thread::Current to give the address.
   1058     Thread* self;
   1059 
   1060     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
   1061     // start up, until the thread is registered and the local opeer_ is used.
   1062     mirror::Object* opeer;
   1063     jobject jpeer;
   1064 
   1065     // The "lowest addressable byte" of the stack.
   1066     byte* stack_begin;
   1067 
   1068     // Size of the stack.
   1069     size_t stack_size;
   1070 
   1071     // The location the current exception was thrown from.
   1072     ThrowLocation throw_location;
   1073 
   1074     // Pointer to previous stack trace captured by sampling profiler.
   1075     std::vector<mirror::ArtMethod*>* stack_trace_sample;
   1076 
   1077     // The next thread in the wait set this thread is part of or NULL if not waiting.
   1078     Thread* wait_next;
   1079 
   1080     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
   1081     mirror::Object* monitor_enter_object;
   1082 
   1083     // Top of linked list of handle scopes or nullptr for none.
   1084     HandleScope* top_handle_scope;
   1085 
   1086     // Needed to get the right ClassLoader in JNI_OnLoad, but also
   1087     // useful for testing.
   1088     mirror::ClassLoader* class_loader_override;
   1089 
   1090     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
   1091     Context* long_jump_context;
   1092 
   1093     // Additional stack used by method instrumentation to store method and return pc values.
   1094     // Stored as a pointer since std::deque is not PACKED.
   1095     std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
   1096 
   1097     // JDWP invoke-during-breakpoint support.
   1098     DebugInvokeReq* debug_invoke_req;
   1099 
   1100     // JDWP single-stepping support.
   1101     SingleStepControl* single_step_control;
   1102 
   1103     // Shadow frame stack that is used temporarily during the deoptimization of a method.
   1104     ShadowFrame* deoptimization_shadow_frame;
   1105 
   1106     // Shadow frame stack that is currently under construction but not yet on the stack
   1107     ShadowFrame* shadow_frame_under_construction;
   1108 
   1109     // A cached copy of the java.lang.Thread's name.
   1110     std::string* name;
   1111 
   1112     // A cached pthread_t for the pthread underlying this Thread*.
   1113     pthread_t pthread_self;
   1114 
   1115     // If no_thread_suspension_ is > 0, what is causing that assertion.
   1116     const char* last_no_thread_suspension_cause;
   1117 
   1118     // Pending checkpoint function or NULL if non-pending. Installation guarding by
   1119     // Locks::thread_suspend_count_lock_.
   1120     Closure* checkpoint_functions[kMaxCheckpoints];
   1121 
   1122     // Entrypoint function pointers.
   1123     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
   1124     InterpreterEntryPoints interpreter_entrypoints;
   1125     JniEntryPoints jni_entrypoints;
   1126     PortableEntryPoints portable_entrypoints;
   1127     QuickEntryPoints quick_entrypoints;
   1128 
   1129     // Thread-local allocation pointer.
   1130     byte* thread_local_start;
   1131     byte* thread_local_pos;
   1132     byte* thread_local_end;
   1133     size_t thread_local_objects;
   1134 
   1135     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
   1136     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
   1137 
   1138     // Thread-local allocation stack data/routines.
   1139     mirror::Object** thread_local_alloc_stack_top;
   1140     mirror::Object** thread_local_alloc_stack_end;
   1141 
   1142     // Support for Mutex lock hierarchy bug detection.
   1143     BaseMutex* held_mutexes[kLockLevelCount];
   1144 
   1145     // Recorded thread state for nested signals.
   1146     jmp_buf* nested_signal_state;
   1147   } tlsPtr_;
   1148 
   1149   // Guards the 'interrupted_' and 'wait_monitor_' members.
   1150   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1151 
   1152   // Condition variable waited upon during a wait.
   1153   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
   1154   // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
   1155   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
   1156 
   1157   // Thread "interrupted" status; stays raised until queried or thrown.
   1158   bool interrupted_ GUARDED_BY(wait_mutex_);
   1159 
   1160   friend class Dbg;  // For SetStateUnsafe.
   1161   friend class gc::collector::SemiSpace;  // For getting stack traces.
   1162   friend class Runtime;  // For CreatePeer.
   1163   friend class QuickExceptionHandler;  // For dumping the stack.
   1164   friend class ScopedThreadStateChange;
   1165   friend class SignalCatcher;  // For SetStateUnsafe.
   1166   friend class StubTest;  // For accessing entrypoints.
   1167   friend class ThreadList;  // For ~Thread and Destroy.
   1168 
   1169   friend class EntrypointsOrderTest;  // To test the order of tls entries.
   1170 
   1171   DISALLOW_COPY_AND_ASSIGN(Thread);
   1172 };
   1173 
   1174 std::ostream& operator<<(std::ostream& os, const Thread& thread);
   1175 std::ostream& operator<<(std::ostream& os, const ThreadState& state);
   1176 
   1177 }  // namespace art
   1178 
   1179 #endif  // ART_RUNTIME_THREAD_H_
   1180