Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_STACK_H_
     18 #define ART_RUNTIME_STACK_H_
     19 
     20 #include <stdint.h>
     21 #include <string>
     22 
     23 #include "dex_file.h"
     24 #include "gc_root.h"
     25 #include "instruction_set.h"
     26 #include "mirror/object_reference.h"
     27 #include "throw_location.h"
     28 #include "utils.h"
     29 #include "verify_object.h"
     30 
     31 namespace art {
     32 
     33 namespace mirror {
     34   class ArtMethod;
     35   class Object;
     36 }  // namespace mirror
     37 
     38 class Context;
     39 class ShadowFrame;
     40 class HandleScope;
     41 class ScopedObjectAccess;
     42 class Thread;
     43 
     44 // The kind of vreg being accessed in calls to Set/GetVReg.
     45 enum VRegKind {
     46   kReferenceVReg,
     47   kIntVReg,
     48   kFloatVReg,
     49   kLongLoVReg,
     50   kLongHiVReg,
     51   kDoubleLoVReg,
     52   kDoubleHiVReg,
     53   kConstant,
     54   kImpreciseConstant,
     55   kUndefined,
     56 };
     57 
     58 /**
     59  * @brief Represents the virtual register numbers that denote special meaning.
     60  * @details This is used to make some virtual register numbers to have specific
     61  * semantic meaning. This is done so that the compiler can treat all virtual
     62  * registers the same way and only special case when needed. For example,
     63  * calculating SSA does not care whether a virtual register is a normal one or
     64  * a compiler temporary, so it can deal with them in a consistent manner. But,
     65  * for example if backend cares about temporaries because it has custom spill
     66  * location, then it can special case them only then.
     67  */
     68 enum VRegBaseRegNum : int {
     69   /**
     70    * @brief Virtual registers originating from dex have number >= 0.
     71    */
     72   kVRegBaseReg = 0,
     73 
     74   /**
     75    * @brief Invalid virtual register number.
     76    */
     77   kVRegInvalid = -1,
     78 
     79   /**
     80    * @brief Used to denote the base register for compiler temporaries.
     81    * @details Compiler temporaries are virtual registers not originating
     82    * from dex but that are created by compiler.  All virtual register numbers
     83    * that are <= kVRegTempBaseReg are categorized as compiler temporaries.
     84    */
     85   kVRegTempBaseReg = -2,
     86 
     87   /**
     88    * @brief Base register of temporary that holds the method pointer.
     89    * @details This is a special compiler temporary because it has a specific
     90    * location on stack.
     91    */
     92   kVRegMethodPtrBaseReg = kVRegTempBaseReg,
     93 
     94   /**
     95    * @brief Base register of non-special compiler temporary.
     96    * @details A non-special compiler temporary is one whose spill location
     97    * is flexible.
     98    */
     99   kVRegNonSpecialTempBaseReg = -3,
    100 };
    101 
    102 // A reference from the shadow stack to a MirrorType object within the Java heap.
    103 template<class MirrorType>
    104 class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType> {
    105  public:
    106   StackReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    107       : mirror::ObjectReference<false, MirrorType>(nullptr) {}
    108 
    109   static StackReference<MirrorType> FromMirrorPtr(MirrorType* p)
    110       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    111     return StackReference<MirrorType>(p);
    112   }
    113 
    114  private:
    115   StackReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    116       : mirror::ObjectReference<false, MirrorType>(p) {}
    117 };
    118 
    119 // ShadowFrame has 3 possible layouts:
    120 //  - portable - a unified array of VRegs and references. Precise references need GC maps.
    121 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
    122 //  - JNI - just VRegs, but where every VReg holds a reference.
    123 class ShadowFrame {
    124  public:
    125   // Compute size of ShadowFrame in bytes assuming it has a reference array.
    126   static size_t ComputeSize(uint32_t num_vregs) {
    127     return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
    128            (sizeof(StackReference<mirror::Object>) * num_vregs);
    129   }
    130 
    131   // Create ShadowFrame in heap for deoptimization.
    132   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
    133                              mirror::ArtMethod* method, uint32_t dex_pc) {
    134     uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
    135     return Create(num_vregs, link, method, dex_pc, memory);
    136   }
    137 
    138   // Create ShadowFrame for interpreter using provided memory.
    139   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
    140                              mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
    141     ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
    142     return sf;
    143   }
    144   ~ShadowFrame() {}
    145 
    146   bool HasReferenceArray() const {
    147 #if defined(ART_USE_PORTABLE_COMPILER)
    148     return (number_of_vregs_ & kHasReferenceArray) != 0;
    149 #else
    150     return true;
    151 #endif
    152   }
    153 
    154   uint32_t NumberOfVRegs() const {
    155 #if defined(ART_USE_PORTABLE_COMPILER)
    156     return number_of_vregs_ & ~kHasReferenceArray;
    157 #else
    158     return number_of_vregs_;
    159 #endif
    160   }
    161 
    162   void SetNumberOfVRegs(uint32_t number_of_vregs) {
    163 #if defined(ART_USE_PORTABLE_COMPILER)
    164     number_of_vregs_ = number_of_vregs | (number_of_vregs_ & kHasReferenceArray);
    165 #else
    166     UNUSED(number_of_vregs);
    167     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
    168 #endif
    169   }
    170 
    171   uint32_t GetDexPC() const {
    172     return dex_pc_;
    173   }
    174 
    175   void SetDexPC(uint32_t dex_pc) {
    176     dex_pc_ = dex_pc;
    177   }
    178 
    179   ShadowFrame* GetLink() const {
    180     return link_;
    181   }
    182 
    183   void SetLink(ShadowFrame* frame) {
    184     DCHECK_NE(this, frame);
    185     link_ = frame;
    186   }
    187 
    188   int32_t GetVReg(size_t i) const {
    189     DCHECK_LT(i, NumberOfVRegs());
    190     const uint32_t* vreg = &vregs_[i];
    191     return *reinterpret_cast<const int32_t*>(vreg);
    192   }
    193 
    194   float GetVRegFloat(size_t i) const {
    195     DCHECK_LT(i, NumberOfVRegs());
    196     // NOTE: Strict-aliasing?
    197     const uint32_t* vreg = &vregs_[i];
    198     return *reinterpret_cast<const float*>(vreg);
    199   }
    200 
    201   int64_t GetVRegLong(size_t i) const {
    202     DCHECK_LT(i, NumberOfVRegs());
    203     const uint32_t* vreg = &vregs_[i];
    204     // Alignment attribute required for GCC 4.8
    205     typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
    206     return *reinterpret_cast<unaligned_int64*>(vreg);
    207   }
    208 
    209   double GetVRegDouble(size_t i) const {
    210     DCHECK_LT(i, NumberOfVRegs());
    211     const uint32_t* vreg = &vregs_[i];
    212     // Alignment attribute required for GCC 4.8
    213     typedef const double unaligned_double __attribute__ ((aligned (4)));
    214     return *reinterpret_cast<unaligned_double*>(vreg);
    215   }
    216 
    217   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
    218   mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    219     DCHECK_LT(i, NumberOfVRegs());
    220     mirror::Object* ref;
    221     if (HasReferenceArray()) {
    222       ref = References()[i].AsMirrorPtr();
    223     } else {
    224       const uint32_t* vreg_ptr = &vregs_[i];
    225       ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
    226     }
    227     if (kVerifyFlags & kVerifyReads) {
    228       VerifyObject(ref);
    229     }
    230     return ref;
    231   }
    232 
    233   // Get view of vregs as range of consecutive arguments starting at i.
    234   uint32_t* GetVRegArgs(size_t i) {
    235     return &vregs_[i];
    236   }
    237 
    238   void SetVReg(size_t i, int32_t val) {
    239     DCHECK_LT(i, NumberOfVRegs());
    240     uint32_t* vreg = &vregs_[i];
    241     *reinterpret_cast<int32_t*>(vreg) = val;
    242     // This is needed for moving collectors since these can update the vreg references if they
    243     // happen to agree with references in the reference array.
    244     if (kMovingCollector && HasReferenceArray()) {
    245       References()[i].Clear();
    246     }
    247   }
    248 
    249   void SetVRegFloat(size_t i, float val) {
    250     DCHECK_LT(i, NumberOfVRegs());
    251     uint32_t* vreg = &vregs_[i];
    252     *reinterpret_cast<float*>(vreg) = val;
    253     // This is needed for moving collectors since these can update the vreg references if they
    254     // happen to agree with references in the reference array.
    255     if (kMovingCollector && HasReferenceArray()) {
    256       References()[i].Clear();
    257     }
    258   }
    259 
    260   void SetVRegLong(size_t i, int64_t val) {
    261     DCHECK_LT(i, NumberOfVRegs());
    262     uint32_t* vreg = &vregs_[i];
    263     // Alignment attribute required for GCC 4.8
    264     typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
    265     *reinterpret_cast<unaligned_int64*>(vreg) = val;
    266     // This is needed for moving collectors since these can update the vreg references if they
    267     // happen to agree with references in the reference array.
    268     if (kMovingCollector && HasReferenceArray()) {
    269       References()[i].Clear();
    270       References()[i + 1].Clear();
    271     }
    272   }
    273 
    274   void SetVRegDouble(size_t i, double val) {
    275     DCHECK_LT(i, NumberOfVRegs());
    276     uint32_t* vreg = &vregs_[i];
    277     // Alignment attribute required for GCC 4.8
    278     typedef double unaligned_double __attribute__ ((aligned (4)));
    279     *reinterpret_cast<unaligned_double*>(vreg) = val;
    280     // This is needed for moving collectors since these can update the vreg references if they
    281     // happen to agree with references in the reference array.
    282     if (kMovingCollector && HasReferenceArray()) {
    283       References()[i].Clear();
    284       References()[i + 1].Clear();
    285     }
    286   }
    287 
    288   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
    289   void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    290     DCHECK_LT(i, NumberOfVRegs());
    291     if (kVerifyFlags & kVerifyWrites) {
    292       VerifyObject(val);
    293     }
    294     uint32_t* vreg = &vregs_[i];
    295     reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
    296     if (HasReferenceArray()) {
    297       References()[i].Assign(val);
    298     }
    299   }
    300 
    301   mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    302     DCHECK(method_ != nullptr);
    303     return method_;
    304   }
    305 
    306   mirror::ArtMethod** GetMethodAddress() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    307     DCHECK(method_ != nullptr);
    308     return &method_;
    309   }
    310 
    311   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    312 
    313   mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    314 
    315   ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    316 
    317   void SetMethod(mirror::ArtMethod* method) {
    318 #if defined(ART_USE_PORTABLE_COMPILER)
    319     DCHECK(method != nullptr);
    320     method_ = method;
    321 #else
    322     UNUSED(method);
    323     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
    324 #endif
    325   }
    326 
    327   bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
    328     if (HasReferenceArray()) {
    329       return ((&References()[0] <= shadow_frame_entry_obj) &&
    330               (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
    331     } else {
    332       uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
    333       return ((&vregs_[0] <= shadow_frame_entry) &&
    334               (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
    335     }
    336   }
    337 
    338   static size_t LinkOffset() {
    339     return OFFSETOF_MEMBER(ShadowFrame, link_);
    340   }
    341 
    342   static size_t MethodOffset() {
    343     return OFFSETOF_MEMBER(ShadowFrame, method_);
    344   }
    345 
    346   static size_t DexPCOffset() {
    347     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
    348   }
    349 
    350   static size_t NumberOfVRegsOffset() {
    351     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
    352   }
    353 
    354   static size_t VRegsOffset() {
    355     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
    356   }
    357 
    358  private:
    359   ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method,
    360               uint32_t dex_pc, bool has_reference_array)
    361       : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
    362     if (has_reference_array) {
    363 #if defined(ART_USE_PORTABLE_COMPILER)
    364       CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
    365       number_of_vregs_ |= kHasReferenceArray;
    366 #endif
    367       memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
    368     } else {
    369       memset(vregs_, 0, num_vregs * sizeof(uint32_t));
    370     }
    371   }
    372 
    373   const StackReference<mirror::Object>* References() const {
    374     DCHECK(HasReferenceArray());
    375     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
    376     return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
    377   }
    378 
    379   StackReference<mirror::Object>* References() {
    380     return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
    381   }
    382 
    383 #if defined(ART_USE_PORTABLE_COMPILER)
    384   enum ShadowFrameFlag {
    385     kHasReferenceArray = 1ul << 31
    386   };
    387   // TODO: make const in the portable case.
    388   uint32_t number_of_vregs_;
    389 #else
    390   const uint32_t number_of_vregs_;
    391 #endif
    392   // Link to previous shadow frame or NULL.
    393   ShadowFrame* link_;
    394   mirror::ArtMethod* method_;
    395   uint32_t dex_pc_;
    396   uint32_t vregs_[0];
    397 
    398   DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
    399 };
    400 
    401 class JavaFrameRootInfo : public RootInfo {
    402  public:
    403   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
    404      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
    405   }
    406   virtual void Describe(std::ostream& os) const OVERRIDE
    407       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    408 
    409  private:
    410   const StackVisitor* const stack_visitor_;
    411   const size_t vreg_;
    412 };
    413 
    414 // The managed stack is used to record fragments of managed code stacks. Managed code stacks
    415 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
    416 // necessary for transitions between code using different frame layouts and transitions into native
    417 // code.
    418 class PACKED(4) ManagedStack {
    419  public:
    420   ManagedStack()
    421       : link_(NULL), top_shadow_frame_(NULL), top_quick_frame_(NULL), top_quick_frame_pc_(0) {}
    422 
    423   void PushManagedStackFragment(ManagedStack* fragment) {
    424     // Copy this top fragment into given fragment.
    425     memcpy(fragment, this, sizeof(ManagedStack));
    426     // Clear this fragment, which has become the top.
    427     memset(this, 0, sizeof(ManagedStack));
    428     // Link our top fragment onto the given fragment.
    429     link_ = fragment;
    430   }
    431 
    432   void PopManagedStackFragment(const ManagedStack& fragment) {
    433     DCHECK(&fragment == link_);
    434     // Copy this given fragment back to the top.
    435     memcpy(this, &fragment, sizeof(ManagedStack));
    436   }
    437 
    438   ManagedStack* GetLink() const {
    439     return link_;
    440   }
    441 
    442   StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
    443     return top_quick_frame_;
    444   }
    445 
    446   void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
    447     DCHECK(top_shadow_frame_ == NULL);
    448     top_quick_frame_ = top;
    449   }
    450 
    451   uintptr_t GetTopQuickFramePc() const {
    452     return top_quick_frame_pc_;
    453   }
    454 
    455   void SetTopQuickFramePc(uintptr_t pc) {
    456     DCHECK(top_shadow_frame_ == NULL);
    457     top_quick_frame_pc_ = pc;
    458   }
    459 
    460   static size_t TopQuickFrameOffset() {
    461     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
    462   }
    463 
    464   static size_t TopQuickFramePcOffset() {
    465     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_pc_);
    466   }
    467 
    468   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
    469     DCHECK(top_quick_frame_ == NULL);
    470     ShadowFrame* old_frame = top_shadow_frame_;
    471     top_shadow_frame_ = new_top_frame;
    472     new_top_frame->SetLink(old_frame);
    473     return old_frame;
    474   }
    475 
    476   ShadowFrame* PopShadowFrame() {
    477     DCHECK(top_quick_frame_ == NULL);
    478     CHECK(top_shadow_frame_ != NULL);
    479     ShadowFrame* frame = top_shadow_frame_;
    480     top_shadow_frame_ = frame->GetLink();
    481     return frame;
    482   }
    483 
    484   ShadowFrame* GetTopShadowFrame() const {
    485     return top_shadow_frame_;
    486   }
    487 
    488   void SetTopShadowFrame(ShadowFrame* top) {
    489     DCHECK(top_quick_frame_ == NULL);
    490     top_shadow_frame_ = top;
    491   }
    492 
    493   static size_t TopShadowFrameOffset() {
    494     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
    495   }
    496 
    497   size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    498 
    499   bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
    500 
    501  private:
    502   ManagedStack* link_;
    503   ShadowFrame* top_shadow_frame_;
    504   StackReference<mirror::ArtMethod>* top_quick_frame_;
    505   uintptr_t top_quick_frame_pc_;
    506 };
    507 
    508 class StackVisitor {
    509  protected:
    510   StackVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    511 
    512  public:
    513   virtual ~StackVisitor() {}
    514 
    515   // Return 'true' if we should continue to visit more frames, 'false' to stop.
    516   virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
    517 
    518   void WalkStack(bool include_transitions = false)
    519       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    520 
    521   mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    522     if (cur_shadow_frame_ != nullptr) {
    523       return cur_shadow_frame_->GetMethod();
    524     } else if (cur_quick_frame_ != nullptr) {
    525       return cur_quick_frame_->AsMirrorPtr();
    526     } else {
    527       return nullptr;
    528     }
    529   }
    530 
    531   bool IsShadowFrame() const {
    532     return cur_shadow_frame_ != nullptr;
    533   }
    534 
    535   uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    536 
    537   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    538 
    539   size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    540 
    541   uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
    542       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    543     // Callee saves are held at the top of the frame
    544     DCHECK(GetMethod() != nullptr);
    545     byte* save_addr =
    546         reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
    547 #if defined(__i386__) || defined(__x86_64__)
    548     save_addr -= kPointerSize;  // account for return address
    549 #endif
    550     return reinterpret_cast<uintptr_t*>(save_addr);
    551   }
    552 
    553   // Returns the height of the stack in the managed stack frames, including transitions.
    554   size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    555     return GetNumFrames() - cur_depth_ - 1;
    556   }
    557 
    558   // Returns a frame ID for JDWP use, starting from 1.
    559   size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    560     return GetFrameHeight() + 1;
    561   }
    562 
    563   size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    564     if (num_frames_ == 0) {
    565       num_frames_ = ComputeNumFrames(thread_);
    566     }
    567     return num_frames_;
    568   }
    569 
    570   size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    571     return cur_depth_;
    572   }
    573 
    574   // Get the method and dex pc immediately after the one that's currently being visited.
    575   bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
    576       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    577 
    578   bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
    579       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    580 
    581   uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
    582       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    583     uint32_t val;
    584     bool success = GetVReg(m, vreg, kind, &val);
    585     CHECK(success) << "Failed to read vreg " << vreg << " of kind " << kind;
    586     return val;
    587   }
    588 
    589   bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
    590                    uint64_t* val) const
    591       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    592 
    593   uint64_t GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
    594                        VRegKind kind_hi) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    595     uint64_t val;
    596     bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
    597     CHECK(success) << "Failed to read vreg pair " << vreg
    598                    << " of kind [" << kind_lo << "," << kind_hi << "]";
    599     return val;
    600   }
    601 
    602   bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
    603       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    604 
    605   bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
    606                    VRegKind kind_lo, VRegKind kind_hi)
    607       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    608 
    609   uintptr_t* GetGPRAddress(uint32_t reg) const;
    610 
    611   // This is a fast-path for getting/setting values in a quick frame.
    612   uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
    613                         const DexFile::CodeItem* code_item,
    614                         uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
    615                         uint16_t vreg) const {
    616     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
    617     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
    618     byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
    619     return reinterpret_cast<uint32_t*>(vreg_addr);
    620   }
    621 
    622   uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    623 
    624   void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    625 
    626   /*
    627    * Return sp-relative offset for a Dalvik virtual register, compiler
    628    * spill or Method* in bytes using Method*.
    629    * Note that (reg >= 0) refers to a Dalvik register, (reg == -1)
    630    * denotes an invalid Dalvik register, (reg == -2) denotes Method*
    631    * and (reg <= -3) denotes a compiler temporary. A compiler temporary
    632    * can be thought of as a virtual register that does not exist in the
    633    * dex but holds intermediate values to help optimizations and code
    634    * generation. A special compiler temporary is one whose location
    635    * in frame is well known while non-special ones do not have a requirement
    636    * on location in frame as long as code generator itself knows how
    637    * to access them.
    638    *
    639    *     +---------------------------+
    640    *     | IN[ins-1]                 |  {Note: resides in caller's frame}
    641    *     |       .                   |
    642    *     | IN[0]                     |
    643    *     | caller's ArtMethod        |  ... StackReference<ArtMethod>
    644    *     +===========================+  {Note: start of callee's frame}
    645    *     | core callee-save spill    |  {variable sized}
    646    *     +---------------------------+
    647    *     | fp callee-save spill      |
    648    *     +---------------------------+
    649    *     | filler word               |  {For compatibility, if V[locals-1] used as wide
    650    *     +---------------------------+
    651    *     | V[locals-1]               |
    652    *     | V[locals-2]               |
    653    *     |      .                    |
    654    *     |      .                    |  ... (reg == 2)
    655    *     | V[1]                      |  ... (reg == 1)
    656    *     | V[0]                      |  ... (reg == 0) <---- "locals_start"
    657    *     +---------------------------+
    658    *     | Compiler temp region      |  ... (reg <= -3)
    659    *     |                           |
    660    *     |                           |
    661    *     +---------------------------+
    662    *     | stack alignment padding   |  {0 to (kStackAlignWords-1) of padding}
    663    *     +---------------------------+
    664    *     | OUT[outs-1]               |
    665    *     | OUT[outs-2]               |
    666    *     |       .                   |
    667    *     | OUT[0]                    |
    668    *     | StackReference<ArtMethod> |  ... (reg == -2) <<== sp, 16-byte aligned
    669    *     +===========================+
    670    */
    671   static int GetVRegOffset(const DexFile::CodeItem* code_item,
    672                            uint32_t core_spills, uint32_t fp_spills,
    673                            size_t frame_size, int reg, InstructionSet isa) {
    674     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
    675     DCHECK_NE(reg, static_cast<int>(kVRegInvalid));
    676     int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
    677         + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
    678         + sizeof(uint32_t);  // Filler.
    679     int num_ins = code_item->ins_size_;
    680     int num_regs = code_item->registers_size_ - num_ins;
    681     int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
    682     if (reg == static_cast<int>(kVRegMethodPtrBaseReg)) {
    683       // The current method pointer corresponds to special location on stack.
    684       return 0;
    685     } else if (reg <= static_cast<int>(kVRegNonSpecialTempBaseReg)) {
    686       /*
    687        * Special temporaries may have custom locations and the logic above deals with that.
    688        * However, non-special temporaries are placed relative to the locals. Since the
    689        * virtual register numbers for temporaries "grow" in negative direction, reg number
    690        * will always be <= to the temp base reg. Thus, the logic ensures that the first
    691        * temp is at offset -4 bytes from locals, the second is at -8 bytes from locals,
    692        * and so on.
    693        */
    694       int relative_offset =
    695           (reg + std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg)) - 1) * sizeof(uint32_t);
    696       return locals_start + relative_offset;
    697     }  else if (reg < num_regs) {
    698       return locals_start + (reg * sizeof(uint32_t));
    699     } else {
    700       // Handle ins.
    701       return frame_size + ((reg - num_regs) * sizeof(uint32_t)) +
    702           sizeof(StackReference<mirror::ArtMethod>);
    703     }
    704   }
    705 
    706   static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
    707     // According to stack model, the first out is above the Method referernce.
    708     return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
    709   }
    710 
    711   uintptr_t GetCurrentQuickFramePc() const {
    712     return cur_quick_frame_pc_;
    713   }
    714 
    715   StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
    716     return cur_quick_frame_;
    717   }
    718 
    719   ShadowFrame* GetCurrentShadowFrame() const {
    720     return cur_shadow_frame_;
    721   }
    722 
    723   HandleScope* GetCurrentHandleScope() const {
    724     StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
    725     ++sp;  // Skip Method*; handle scope comes next;
    726     return reinterpret_cast<HandleScope*>(sp);
    727   }
    728 
    729   std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    730 
    731   static size_t ComputeNumFrames(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    732 
    733   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    734 
    735  private:
    736   // Private constructor known in the case that num_frames_ has already been computed.
    737   StackVisitor(Thread* thread, Context* context, size_t num_frames)
    738       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    739 
    740   bool GetGPR(uint32_t reg, uintptr_t* val) const;
    741   bool SetGPR(uint32_t reg, uintptr_t value);
    742   bool GetFPR(uint32_t reg, uintptr_t* val) const;
    743   bool SetFPR(uint32_t reg, uintptr_t value);
    744 
    745   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    746 
    747   Thread* const thread_;
    748   ShadowFrame* cur_shadow_frame_;
    749   StackReference<mirror::ArtMethod>* cur_quick_frame_;
    750   uintptr_t cur_quick_frame_pc_;
    751   // Lazily computed, number of frames in the stack.
    752   size_t num_frames_;
    753   // Depth of the frame we're currently at.
    754   size_t cur_depth_;
    755 
    756  protected:
    757   Context* const context_;
    758 };
    759 
    760 }  // namespace art
    761 
    762 #endif  // ART_RUNTIME_STACK_H_
    763