Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_STACK_H_
     18 #define ART_RUNTIME_STACK_H_
     19 
     20 #include <stdint.h>
     21 #include <string>
     22 
     23 #include "dex_file.h"
     24 #include "instruction_set.h"
     25 #include "mirror/object_reference.h"
     26 #include "throw_location.h"
     27 #include "utils.h"
     28 #include "verify_object.h"
     29 
     30 namespace art {
     31 
     32 namespace mirror {
     33   class ArtMethod;
     34   class Object;
     35 }  // namespace mirror
     36 
     37 class Context;
     38 class ShadowFrame;
     39 class HandleScope;
     40 class ScopedObjectAccess;
     41 class Thread;
     42 
     43 // The kind of vreg being accessed in calls to Set/GetVReg.
     44 enum VRegKind {
     45   kReferenceVReg,
     46   kIntVReg,
     47   kFloatVReg,
     48   kLongLoVReg,
     49   kLongHiVReg,
     50   kDoubleLoVReg,
     51   kDoubleHiVReg,
     52   kConstant,
     53   kImpreciseConstant,
     54   kUndefined,
     55 };
     56 
     57 /**
     58  * @brief Represents the virtual register numbers that denote special meaning.
     59  * @details This is used to make some virtual register numbers to have specific
     60  * semantic meaning. This is done so that the compiler can treat all virtual
     61  * registers the same way and only special case when needed. For example,
     62  * calculating SSA does not care whether a virtual register is a normal one or
     63  * a compiler temporary, so it can deal with them in a consistent manner. But,
     64  * for example if backend cares about temporaries because it has custom spill
     65  * location, then it can special case them only then.
     66  */
     67 enum VRegBaseRegNum : int {
     68   /**
     69    * @brief Virtual registers originating from dex have number >= 0.
     70    */
     71   kVRegBaseReg = 0,
     72 
     73   /**
     74    * @brief Invalid virtual register number.
     75    */
     76   kVRegInvalid = -1,
     77 
     78   /**
     79    * @brief Used to denote the base register for compiler temporaries.
     80    * @details Compiler temporaries are virtual registers not originating
     81    * from dex but that are created by compiler.  All virtual register numbers
     82    * that are <= kVRegTempBaseReg are categorized as compiler temporaries.
     83    */
     84   kVRegTempBaseReg = -2,
     85 
     86   /**
     87    * @brief Base register of temporary that holds the method pointer.
     88    * @details This is a special compiler temporary because it has a specific
     89    * location on stack.
     90    */
     91   kVRegMethodPtrBaseReg = kVRegTempBaseReg,
     92 
     93   /**
     94    * @brief Base register of non-special compiler temporary.
     95    * @details A non-special compiler temporary is one whose spill location
     96    * is flexible.
     97    */
     98   kVRegNonSpecialTempBaseReg = -3,
     99 };
    100 
    101 // A reference from the shadow stack to a MirrorType object within the Java heap.
    102 template<class MirrorType>
    103 class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType> {
    104  public:
    105   StackReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    106       : mirror::ObjectReference<false, MirrorType>(nullptr) {}
    107 
    108   static StackReference<MirrorType> FromMirrorPtr(MirrorType* p)
    109       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    110     return StackReference<MirrorType>(p);
    111   }
    112 
    113  private:
    114   StackReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    115       : mirror::ObjectReference<false, MirrorType>(p) {}
    116 };
    117 
    118 // ShadowFrame has 3 possible layouts:
    119 //  - portable - a unified array of VRegs and references. Precise references need GC maps.
    120 //  - interpreter - separate VRegs and reference arrays. References are in the reference array.
    121 //  - JNI - just VRegs, but where every VReg holds a reference.
    122 class ShadowFrame {
    123  public:
    124   // Compute size of ShadowFrame in bytes assuming it has a reference array.
    125   static size_t ComputeSize(uint32_t num_vregs) {
    126     return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) +
    127            (sizeof(StackReference<mirror::Object>) * num_vregs);
    128   }
    129 
    130   // Create ShadowFrame in heap for deoptimization.
    131   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
    132                              mirror::ArtMethod* method, uint32_t dex_pc) {
    133     uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
    134     return Create(num_vregs, link, method, dex_pc, memory);
    135   }
    136 
    137   // Create ShadowFrame for interpreter using provided memory.
    138   static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
    139                              mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
    140     ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
    141     return sf;
    142   }
    143   ~ShadowFrame() {}
    144 
    145   bool HasReferenceArray() const {
    146 #if defined(ART_USE_PORTABLE_COMPILER)
    147     return (number_of_vregs_ & kHasReferenceArray) != 0;
    148 #else
    149     return true;
    150 #endif
    151   }
    152 
    153   uint32_t NumberOfVRegs() const {
    154 #if defined(ART_USE_PORTABLE_COMPILER)
    155     return number_of_vregs_ & ~kHasReferenceArray;
    156 #else
    157     return number_of_vregs_;
    158 #endif
    159   }
    160 
    161   void SetNumberOfVRegs(uint32_t number_of_vregs) {
    162 #if defined(ART_USE_PORTABLE_COMPILER)
    163     number_of_vregs_ = number_of_vregs | (number_of_vregs_ & kHasReferenceArray);
    164 #else
    165     UNUSED(number_of_vregs);
    166     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
    167 #endif
    168   }
    169 
    170   uint32_t GetDexPC() const {
    171     return dex_pc_;
    172   }
    173 
    174   void SetDexPC(uint32_t dex_pc) {
    175     dex_pc_ = dex_pc;
    176   }
    177 
    178   ShadowFrame* GetLink() const {
    179     return link_;
    180   }
    181 
    182   void SetLink(ShadowFrame* frame) {
    183     DCHECK_NE(this, frame);
    184     link_ = frame;
    185   }
    186 
    187   int32_t GetVReg(size_t i) const {
    188     DCHECK_LT(i, NumberOfVRegs());
    189     const uint32_t* vreg = &vregs_[i];
    190     return *reinterpret_cast<const int32_t*>(vreg);
    191   }
    192 
    193   float GetVRegFloat(size_t i) const {
    194     DCHECK_LT(i, NumberOfVRegs());
    195     // NOTE: Strict-aliasing?
    196     const uint32_t* vreg = &vregs_[i];
    197     return *reinterpret_cast<const float*>(vreg);
    198   }
    199 
    200   int64_t GetVRegLong(size_t i) const {
    201     DCHECK_LT(i, NumberOfVRegs());
    202     const uint32_t* vreg = &vregs_[i];
    203     // Alignment attribute required for GCC 4.8
    204     typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
    205     return *reinterpret_cast<unaligned_int64*>(vreg);
    206   }
    207 
    208   double GetVRegDouble(size_t i) const {
    209     DCHECK_LT(i, NumberOfVRegs());
    210     const uint32_t* vreg = &vregs_[i];
    211     // Alignment attribute required for GCC 4.8
    212     typedef const double unaligned_double __attribute__ ((aligned (4)));
    213     return *reinterpret_cast<unaligned_double*>(vreg);
    214   }
    215 
    216   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
    217   mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    218     DCHECK_LT(i, NumberOfVRegs());
    219     mirror::Object* ref;
    220     if (HasReferenceArray()) {
    221       ref = References()[i].AsMirrorPtr();
    222     } else {
    223       const uint32_t* vreg_ptr = &vregs_[i];
    224       ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
    225     }
    226     if (kVerifyFlags & kVerifyReads) {
    227       VerifyObject(ref);
    228     }
    229     return ref;
    230   }
    231 
    232   // Get view of vregs as range of consecutive arguments starting at i.
    233   uint32_t* GetVRegArgs(size_t i) {
    234     return &vregs_[i];
    235   }
    236 
    237   void SetVReg(size_t i, int32_t val) {
    238     DCHECK_LT(i, NumberOfVRegs());
    239     uint32_t* vreg = &vregs_[i];
    240     *reinterpret_cast<int32_t*>(vreg) = val;
    241     // This is needed for moving collectors since these can update the vreg references if they
    242     // happen to agree with references in the reference array.
    243     if (kMovingCollector && HasReferenceArray()) {
    244       References()[i].Clear();
    245     }
    246   }
    247 
    248   void SetVRegFloat(size_t i, float val) {
    249     DCHECK_LT(i, NumberOfVRegs());
    250     uint32_t* vreg = &vregs_[i];
    251     *reinterpret_cast<float*>(vreg) = val;
    252     // This is needed for moving collectors since these can update the vreg references if they
    253     // happen to agree with references in the reference array.
    254     if (kMovingCollector && HasReferenceArray()) {
    255       References()[i].Clear();
    256     }
    257   }
    258 
    259   void SetVRegLong(size_t i, int64_t val) {
    260     DCHECK_LT(i, NumberOfVRegs());
    261     uint32_t* vreg = &vregs_[i];
    262     // Alignment attribute required for GCC 4.8
    263     typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
    264     *reinterpret_cast<unaligned_int64*>(vreg) = val;
    265     // This is needed for moving collectors since these can update the vreg references if they
    266     // happen to agree with references in the reference array.
    267     if (kMovingCollector && HasReferenceArray()) {
    268       References()[i].Clear();
    269       References()[i + 1].Clear();
    270     }
    271   }
    272 
    273   void SetVRegDouble(size_t i, double val) {
    274     DCHECK_LT(i, NumberOfVRegs());
    275     uint32_t* vreg = &vregs_[i];
    276     // Alignment attribute required for GCC 4.8
    277     typedef double unaligned_double __attribute__ ((aligned (4)));
    278     *reinterpret_cast<unaligned_double*>(vreg) = val;
    279     // This is needed for moving collectors since these can update the vreg references if they
    280     // happen to agree with references in the reference array.
    281     if (kMovingCollector && HasReferenceArray()) {
    282       References()[i].Clear();
    283       References()[i + 1].Clear();
    284     }
    285   }
    286 
    287   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
    288   void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    289     DCHECK_LT(i, NumberOfVRegs());
    290     if (kVerifyFlags & kVerifyWrites) {
    291       VerifyObject(val);
    292     }
    293     uint32_t* vreg = &vregs_[i];
    294     reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
    295     if (HasReferenceArray()) {
    296       References()[i].Assign(val);
    297     }
    298   }
    299 
    300   mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    301     DCHECK(method_ != nullptr);
    302     return method_;
    303   }
    304 
    305   mirror::ArtMethod** GetMethodAddress() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    306     DCHECK(method_ != nullptr);
    307     return &method_;
    308   }
    309 
    310   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    311 
    312   mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    313 
    314   ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    315 
    316   void SetMethod(mirror::ArtMethod* method) {
    317 #if defined(ART_USE_PORTABLE_COMPILER)
    318     DCHECK(method != nullptr);
    319     method_ = method;
    320 #else
    321     UNUSED(method);
    322     UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
    323 #endif
    324   }
    325 
    326   bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
    327     if (HasReferenceArray()) {
    328       return ((&References()[0] <= shadow_frame_entry_obj) &&
    329               (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
    330     } else {
    331       uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
    332       return ((&vregs_[0] <= shadow_frame_entry) &&
    333               (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
    334     }
    335   }
    336 
    337   static size_t LinkOffset() {
    338     return OFFSETOF_MEMBER(ShadowFrame, link_);
    339   }
    340 
    341   static size_t MethodOffset() {
    342     return OFFSETOF_MEMBER(ShadowFrame, method_);
    343   }
    344 
    345   static size_t DexPCOffset() {
    346     return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
    347   }
    348 
    349   static size_t NumberOfVRegsOffset() {
    350     return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
    351   }
    352 
    353   static size_t VRegsOffset() {
    354     return OFFSETOF_MEMBER(ShadowFrame, vregs_);
    355   }
    356 
    357  private:
    358   ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method,
    359               uint32_t dex_pc, bool has_reference_array)
    360       : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
    361     if (has_reference_array) {
    362 #if defined(ART_USE_PORTABLE_COMPILER)
    363       CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
    364       number_of_vregs_ |= kHasReferenceArray;
    365 #endif
    366       memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
    367     } else {
    368       memset(vregs_, 0, num_vregs * sizeof(uint32_t));
    369     }
    370   }
    371 
    372   const StackReference<mirror::Object>* References() const {
    373     DCHECK(HasReferenceArray());
    374     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
    375     return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
    376   }
    377 
    378   StackReference<mirror::Object>* References() {
    379     return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
    380   }
    381 
    382 #if defined(ART_USE_PORTABLE_COMPILER)
    383   enum ShadowFrameFlag {
    384     kHasReferenceArray = 1ul << 31
    385   };
    386   // TODO: make const in the portable case.
    387   uint32_t number_of_vregs_;
    388 #else
    389   const uint32_t number_of_vregs_;
    390 #endif
    391   // Link to previous shadow frame or NULL.
    392   ShadowFrame* link_;
    393   mirror::ArtMethod* method_;
    394   uint32_t dex_pc_;
    395   uint32_t vregs_[0];
    396 
    397   DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
    398 };
    399 
    400 // The managed stack is used to record fragments of managed code stacks. Managed code stacks
    401 // may either be shadow frames or lists of frames using fixed frame sizes. Transition records are
    402 // necessary for transitions between code using different frame layouts and transitions into native
    403 // code.
    404 class PACKED(4) ManagedStack {
    405  public:
    406   ManagedStack()
    407       : link_(NULL), top_shadow_frame_(NULL), top_quick_frame_(NULL), top_quick_frame_pc_(0) {}
    408 
    409   void PushManagedStackFragment(ManagedStack* fragment) {
    410     // Copy this top fragment into given fragment.
    411     memcpy(fragment, this, sizeof(ManagedStack));
    412     // Clear this fragment, which has become the top.
    413     memset(this, 0, sizeof(ManagedStack));
    414     // Link our top fragment onto the given fragment.
    415     link_ = fragment;
    416   }
    417 
    418   void PopManagedStackFragment(const ManagedStack& fragment) {
    419     DCHECK(&fragment == link_);
    420     // Copy this given fragment back to the top.
    421     memcpy(this, &fragment, sizeof(ManagedStack));
    422   }
    423 
    424   ManagedStack* GetLink() const {
    425     return link_;
    426   }
    427 
    428   StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
    429     return top_quick_frame_;
    430   }
    431 
    432   void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
    433     DCHECK(top_shadow_frame_ == NULL);
    434     top_quick_frame_ = top;
    435   }
    436 
    437   uintptr_t GetTopQuickFramePc() const {
    438     return top_quick_frame_pc_;
    439   }
    440 
    441   void SetTopQuickFramePc(uintptr_t pc) {
    442     DCHECK(top_shadow_frame_ == NULL);
    443     top_quick_frame_pc_ = pc;
    444   }
    445 
    446   static size_t TopQuickFrameOffset() {
    447     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
    448   }
    449 
    450   static size_t TopQuickFramePcOffset() {
    451     return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_pc_);
    452   }
    453 
    454   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
    455     DCHECK(top_quick_frame_ == NULL);
    456     ShadowFrame* old_frame = top_shadow_frame_;
    457     top_shadow_frame_ = new_top_frame;
    458     new_top_frame->SetLink(old_frame);
    459     return old_frame;
    460   }
    461 
    462   ShadowFrame* PopShadowFrame() {
    463     DCHECK(top_quick_frame_ == NULL);
    464     CHECK(top_shadow_frame_ != NULL);
    465     ShadowFrame* frame = top_shadow_frame_;
    466     top_shadow_frame_ = frame->GetLink();
    467     return frame;
    468   }
    469 
    470   ShadowFrame* GetTopShadowFrame() const {
    471     return top_shadow_frame_;
    472   }
    473 
    474   void SetTopShadowFrame(ShadowFrame* top) {
    475     DCHECK(top_quick_frame_ == NULL);
    476     top_shadow_frame_ = top;
    477   }
    478 
    479   static size_t TopShadowFrameOffset() {
    480     return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
    481   }
    482 
    483   size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    484 
    485   bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
    486 
    487  private:
    488   ManagedStack* link_;
    489   ShadowFrame* top_shadow_frame_;
    490   StackReference<mirror::ArtMethod>* top_quick_frame_;
    491   uintptr_t top_quick_frame_pc_;
    492 };
    493 
    494 class StackVisitor {
    495  protected:
    496   StackVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    497 
    498  public:
    499   virtual ~StackVisitor() {}
    500 
    501   // Return 'true' if we should continue to visit more frames, 'false' to stop.
    502   virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
    503 
    504   void WalkStack(bool include_transitions = false)
    505       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    506 
    507   mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    508     if (cur_shadow_frame_ != nullptr) {
    509       return cur_shadow_frame_->GetMethod();
    510     } else if (cur_quick_frame_ != nullptr) {
    511       return cur_quick_frame_->AsMirrorPtr();
    512     } else {
    513       return nullptr;
    514     }
    515   }
    516 
    517   bool IsShadowFrame() const {
    518     return cur_shadow_frame_ != nullptr;
    519   }
    520 
    521   uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    522 
    523   mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    524 
    525   size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    526 
    527   uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
    528       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    529     // Callee saves are held at the top of the frame
    530     DCHECK(GetMethod() != nullptr);
    531     byte* save_addr =
    532         reinterpret_cast<byte*>(cur_quick_frame_) + frame_size - ((num + 1) * kPointerSize);
    533 #if defined(__i386__) || defined(__x86_64__)
    534     save_addr -= kPointerSize;  // account for return address
    535 #endif
    536     return reinterpret_cast<uintptr_t*>(save_addr);
    537   }
    538 
    539   // Returns the height of the stack in the managed stack frames, including transitions.
    540   size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    541     return GetNumFrames() - cur_depth_ - 1;
    542   }
    543 
    544   // Returns a frame ID for JDWP use, starting from 1.
    545   size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    546     return GetFrameHeight() + 1;
    547   }
    548 
    549   size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    550     if (num_frames_ == 0) {
    551       num_frames_ = ComputeNumFrames(thread_);
    552     }
    553     return num_frames_;
    554   }
    555 
    556   size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    557     return cur_depth_;
    558   }
    559 
    560   // Get the method and dex pc immediately after the one that's currently being visited.
    561   bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
    562       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    563 
    564   bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
    565       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    566 
    567   uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
    568       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    569     uint32_t val;
    570     bool success = GetVReg(m, vreg, kind, &val);
    571     CHECK(success) << "Failed to read vreg " << vreg << " of kind " << kind;
    572     return val;
    573   }
    574 
    575   bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
    576                    uint64_t* val) const
    577       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    578 
    579   uint64_t GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
    580                        VRegKind kind_hi) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    581     uint64_t val;
    582     bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
    583     CHECK(success) << "Failed to read vreg pair " << vreg
    584                    << " of kind [" << kind_lo << "," << kind_hi << "]";
    585     return val;
    586   }
    587 
    588   bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
    589       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    590 
    591   bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
    592                    VRegKind kind_lo, VRegKind kind_hi)
    593       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    594 
    595   uintptr_t* GetGPRAddress(uint32_t reg) const;
    596 
    597   // This is a fast-path for getting/setting values in a quick frame.
    598   uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
    599                         const DexFile::CodeItem* code_item,
    600                         uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
    601                         uint16_t vreg) const {
    602     int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
    603     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
    604     byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
    605     return reinterpret_cast<uint32_t*>(vreg_addr);
    606   }
    607 
    608   uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    609 
    610   void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    611 
    612   /*
    613    * Return sp-relative offset for a Dalvik virtual register, compiler
    614    * spill or Method* in bytes using Method*.
    615    * Note that (reg >= 0) refers to a Dalvik register, (reg == -1)
    616    * denotes an invalid Dalvik register, (reg == -2) denotes Method*
    617    * and (reg <= -3) denotes a compiler temporary. A compiler temporary
    618    * can be thought of as a virtual register that does not exist in the
    619    * dex but holds intermediate values to help optimizations and code
    620    * generation. A special compiler temporary is one whose location
    621    * in frame is well known while non-special ones do not have a requirement
    622    * on location in frame as long as code generator itself knows how
    623    * to access them.
    624    *
    625    *     +---------------------------+
    626    *     | IN[ins-1]                 |  {Note: resides in caller's frame}
    627    *     |       .                   |
    628    *     | IN[0]                     |
    629    *     | caller's ArtMethod        |  ... StackReference<ArtMethod>
    630    *     +===========================+  {Note: start of callee's frame}
    631    *     | core callee-save spill    |  {variable sized}
    632    *     +---------------------------+
    633    *     | fp callee-save spill      |
    634    *     +---------------------------+
    635    *     | filler word               |  {For compatibility, if V[locals-1] used as wide
    636    *     +---------------------------+
    637    *     | V[locals-1]               |
    638    *     | V[locals-2]               |
    639    *     |      .                    |
    640    *     |      .                    |  ... (reg == 2)
    641    *     | V[1]                      |  ... (reg == 1)
    642    *     | V[0]                      |  ... (reg == 0) <---- "locals_start"
    643    *     +---------------------------+
    644    *     | Compiler temp region      |  ... (reg <= -3)
    645    *     |                           |
    646    *     |                           |
    647    *     +---------------------------+
    648    *     | stack alignment padding   |  {0 to (kStackAlignWords-1) of padding}
    649    *     +---------------------------+
    650    *     | OUT[outs-1]               |
    651    *     | OUT[outs-2]               |
    652    *     |       .                   |
    653    *     | OUT[0]                    |
    654    *     | StackReference<ArtMethod> |  ... (reg == -2) <<== sp, 16-byte aligned
    655    *     +===========================+
    656    */
    657   static int GetVRegOffset(const DexFile::CodeItem* code_item,
    658                            uint32_t core_spills, uint32_t fp_spills,
    659                            size_t frame_size, int reg, InstructionSet isa) {
    660     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
    661     DCHECK_NE(reg, static_cast<int>(kVRegInvalid));
    662     int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
    663         + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
    664         + sizeof(uint32_t);  // Filler.
    665     int num_ins = code_item->ins_size_;
    666     int num_regs = code_item->registers_size_ - num_ins;
    667     int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
    668     if (reg == static_cast<int>(kVRegMethodPtrBaseReg)) {
    669       // The current method pointer corresponds to special location on stack.
    670       return 0;
    671     } else if (reg <= static_cast<int>(kVRegNonSpecialTempBaseReg)) {
    672       /*
    673        * Special temporaries may have custom locations and the logic above deals with that.
    674        * However, non-special temporaries are placed relative to the locals. Since the
    675        * virtual register numbers for temporaries "grow" in negative direction, reg number
    676        * will always be <= to the temp base reg. Thus, the logic ensures that the first
    677        * temp is at offset -4 bytes from locals, the second is at -8 bytes from locals,
    678        * and so on.
    679        */
    680       int relative_offset =
    681           (reg + std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg)) - 1) * sizeof(uint32_t);
    682       return locals_start + relative_offset;
    683     }  else if (reg < num_regs) {
    684       return locals_start + (reg * sizeof(uint32_t));
    685     } else {
    686       // Handle ins.
    687       return frame_size + ((reg - num_regs) * sizeof(uint32_t)) +
    688           sizeof(StackReference<mirror::ArtMethod>);
    689     }
    690   }
    691 
    692   static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
    693     // According to stack model, the first out is above the Method referernce.
    694     return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
    695   }
    696 
    697   uintptr_t GetCurrentQuickFramePc() const {
    698     return cur_quick_frame_pc_;
    699   }
    700 
    701   StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
    702     return cur_quick_frame_;
    703   }
    704 
    705   ShadowFrame* GetCurrentShadowFrame() const {
    706     return cur_shadow_frame_;
    707   }
    708 
    709   HandleScope* GetCurrentHandleScope() const {
    710     StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
    711     ++sp;  // Skip Method*; handle scope comes next;
    712     return reinterpret_cast<HandleScope*>(sp);
    713   }
    714 
    715   std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    716 
    717   static size_t ComputeNumFrames(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    718 
    719   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    720 
    721  private:
    722   // Private constructor known in the case that num_frames_ has already been computed.
    723   StackVisitor(Thread* thread, Context* context, size_t num_frames)
    724       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    725 
    726   bool GetGPR(uint32_t reg, uintptr_t* val) const;
    727   bool SetGPR(uint32_t reg, uintptr_t value);
    728   bool GetFPR(uint32_t reg, uintptr_t* val) const;
    729   bool SetFPR(uint32_t reg, uintptr_t value);
    730 
    731   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    732 
    733   Thread* const thread_;
    734   ShadowFrame* cur_shadow_frame_;
    735   StackReference<mirror::ArtMethod>* cur_quick_frame_;
    736   uintptr_t cur_quick_frame_pc_;
    737   // Lazily computed, number of frames in the stack.
    738   size_t num_frames_;
    739   // Depth of the frame we're currently at.
    740   size_t cur_depth_;
    741 
    742  protected:
    743   Context* const context_;
    744 };
    745 
    746 }  // namespace art
    747 
    748 #endif  // ART_RUNTIME_STACK_H_
    749