Home | History | Annotate | Download | only in utils
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
     18 #define ART_COMPILER_UTILS_ASSEMBLER_H_
     19 
     20 #include <vector>
     21 
     22 #include "arch/instruction_set.h"
     23 #include "base/logging.h"
     24 #include "base/macros.h"
     25 #include "arm/constants_arm.h"
     26 #include "managed_register.h"
     27 #include "memory_region.h"
     28 #include "mips/constants_mips.h"
     29 #include "offsets.h"
     30 #include "x86/constants_x86.h"
     31 #include "x86_64/constants_x86_64.h"
     32 #include "dwarf/debug_frame_opcode_writer.h"
     33 
     34 namespace art {
     35 
     36 class Assembler;
     37 class AssemblerBuffer;
     38 class AssemblerFixup;
     39 
     40 namespace arm {
     41   class ArmAssembler;
     42   class Arm32Assembler;
     43   class Thumb2Assembler;
     44 }
     45 namespace arm64 {
     46   class Arm64Assembler;
     47 }
     48 namespace mips {
     49   class MipsAssembler;
     50 }
     51 namespace mips64 {
     52   class Mips64Assembler;
     53 }
     54 namespace x86 {
     55   class X86Assembler;
     56 }
     57 namespace x86_64 {
     58   class X86_64Assembler;
     59 }
     60 
     61 class ExternalLabel {
     62  public:
     63   ExternalLabel(const char* name_in, uintptr_t address_in)
     64       : name_(name_in), address_(address_in) {
     65     DCHECK(name_in != nullptr);
     66   }
     67 
     68   const char* name() const { return name_; }
     69   uintptr_t address() const {
     70     return address_;
     71   }
     72 
     73  private:
     74   const char* name_;
     75   const uintptr_t address_;
     76 };
     77 
     78 class Label {
     79  public:
     80   Label() : position_(0) {}
     81 
     82   ~Label() {
     83     // Assert if label is being destroyed with unresolved branches pending.
     84     CHECK(!IsLinked());
     85   }
     86 
     87   // Returns the position for bound and linked labels. Cannot be used
     88   // for unused labels.
     89   int Position() const {
     90     CHECK(!IsUnused());
     91     return IsBound() ? -position_ - sizeof(void*) : position_ - sizeof(void*);
     92   }
     93 
     94   int LinkPosition() const {
     95     CHECK(IsLinked());
     96     return position_ - sizeof(void*);
     97   }
     98 
     99   bool IsBound() const { return position_ < 0; }
    100   bool IsUnused() const { return position_ == 0; }
    101   bool IsLinked() const { return position_ > 0; }
    102 
    103  private:
    104   int position_;
    105 
    106   void Reinitialize() {
    107     position_ = 0;
    108   }
    109 
    110   void BindTo(int position) {
    111     CHECK(!IsBound());
    112     position_ = -position - sizeof(void*);
    113     CHECK(IsBound());
    114   }
    115 
    116   void LinkTo(int position) {
    117     CHECK(!IsBound());
    118     position_ = position + sizeof(void*);
    119     CHECK(IsLinked());
    120   }
    121 
    122   friend class arm::ArmAssembler;
    123   friend class arm::Arm32Assembler;
    124   friend class arm::Thumb2Assembler;
    125   friend class arm64::Arm64Assembler;
    126   friend class mips::MipsAssembler;
    127   friend class mips64::Mips64Assembler;
    128   friend class x86::X86Assembler;
    129   friend class x86_64::X86_64Assembler;
    130 
    131   DISALLOW_COPY_AND_ASSIGN(Label);
    132 };
    133 
    134 
    135 // Assembler fixups are positions in generated code that require processing
    136 // after the code has been copied to executable memory. This includes building
    137 // relocation information.
    138 class AssemblerFixup {
    139  public:
    140   virtual void Process(const MemoryRegion& region, int position) = 0;
    141   virtual ~AssemblerFixup() {}
    142 
    143  private:
    144   AssemblerFixup* previous_;
    145   int position_;
    146 
    147   AssemblerFixup* previous() const { return previous_; }
    148   void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
    149 
    150   int position() const { return position_; }
    151   void set_position(int position_in) { position_ = position_in; }
    152 
    153   friend class AssemblerBuffer;
    154 };
    155 
    156 // Parent of all queued slow paths, emitted during finalization
    157 class SlowPath {
    158  public:
    159   SlowPath() : next_(nullptr) {}
    160   virtual ~SlowPath() {}
    161 
    162   Label* Continuation() { return &continuation_; }
    163   Label* Entry() { return &entry_; }
    164   // Generate code for slow path
    165   virtual void Emit(Assembler *sp_asm) = 0;
    166 
    167  protected:
    168   // Entry branched to by fast path
    169   Label entry_;
    170   // Optional continuation that is branched to at the end of the slow path
    171   Label continuation_;
    172   // Next in linked list of slow paths
    173   SlowPath *next_;
    174 
    175  private:
    176   friend class AssemblerBuffer;
    177   DISALLOW_COPY_AND_ASSIGN(SlowPath);
    178 };
    179 
    180 class AssemblerBuffer {
    181  public:
    182   AssemblerBuffer();
    183   ~AssemblerBuffer();
    184 
    185   // Basic support for emitting, loading, and storing.
    186   template<typename T> void Emit(T value) {
    187     CHECK(HasEnsuredCapacity());
    188     *reinterpret_cast<T*>(cursor_) = value;
    189     cursor_ += sizeof(T);
    190   }
    191 
    192   template<typename T> T Load(size_t position) {
    193     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    194     return *reinterpret_cast<T*>(contents_ + position);
    195   }
    196 
    197   template<typename T> void Store(size_t position, T value) {
    198     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    199     *reinterpret_cast<T*>(contents_ + position) = value;
    200   }
    201 
    202   void Move(size_t newposition, size_t oldposition) {
    203     CHECK(HasEnsuredCapacity());
    204     // Move the contents of the buffer from oldposition to
    205     // newposition by nbytes.
    206     size_t nbytes = Size() - oldposition;
    207     memmove(contents_ + newposition, contents_ + oldposition, nbytes);
    208     cursor_ += newposition - oldposition;
    209   }
    210 
    211   // Emit a fixup at the current location.
    212   void EmitFixup(AssemblerFixup* fixup) {
    213     fixup->set_previous(fixup_);
    214     fixup->set_position(Size());
    215     fixup_ = fixup;
    216   }
    217 
    218   void EnqueueSlowPath(SlowPath* slowpath) {
    219     if (slow_path_ == nullptr) {
    220       slow_path_ = slowpath;
    221     } else {
    222       SlowPath* cur = slow_path_;
    223       for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
    224       cur->next_ = slowpath;
    225     }
    226   }
    227 
    228   void EmitSlowPaths(Assembler* sp_asm) {
    229     SlowPath* cur = slow_path_;
    230     SlowPath* next = nullptr;
    231     slow_path_ = nullptr;
    232     for ( ; cur != nullptr ; cur = next) {
    233       cur->Emit(sp_asm);
    234       next = cur->next_;
    235       delete cur;
    236     }
    237   }
    238 
    239   // Get the size of the emitted code.
    240   size_t Size() const {
    241     CHECK_GE(cursor_, contents_);
    242     return cursor_ - contents_;
    243   }
    244 
    245   uint8_t* contents() const { return contents_; }
    246 
    247   // Copy the assembled instructions into the specified memory block
    248   // and apply all fixups.
    249   void FinalizeInstructions(const MemoryRegion& region);
    250 
    251   // To emit an instruction to the assembler buffer, the EnsureCapacity helper
    252   // must be used to guarantee that the underlying data area is big enough to
    253   // hold the emitted instruction. Usage:
    254   //
    255   //     AssemblerBuffer buffer;
    256   //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
    257   //     ... emit bytes for single instruction ...
    258 
    259 #ifndef NDEBUG
    260 
    261   class EnsureCapacity {
    262    public:
    263     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    264       if (buffer->cursor() >= buffer->limit()) {
    265         buffer->ExtendCapacity();
    266       }
    267       // In debug mode, we save the assembler buffer along with the gap
    268       // size before we start emitting to the buffer. This allows us to
    269       // check that any single generated instruction doesn't overflow the
    270       // limit implied by the minimum gap size.
    271       buffer_ = buffer;
    272       gap_ = ComputeGap();
    273       // Make sure that extending the capacity leaves a big enough gap
    274       // for any kind of instruction.
    275       CHECK_GE(gap_, kMinimumGap);
    276       // Mark the buffer as having ensured the capacity.
    277       CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
    278       buffer->has_ensured_capacity_ = true;
    279     }
    280 
    281     ~EnsureCapacity() {
    282       // Unmark the buffer, so we cannot emit after this.
    283       buffer_->has_ensured_capacity_ = false;
    284       // Make sure the generated instruction doesn't take up more
    285       // space than the minimum gap.
    286       int delta = gap_ - ComputeGap();
    287       CHECK_LE(delta, kMinimumGap);
    288     }
    289 
    290    private:
    291     AssemblerBuffer* buffer_;
    292     int gap_;
    293 
    294     int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
    295   };
    296 
    297   bool has_ensured_capacity_;
    298   bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
    299 
    300 #else
    301 
    302   class EnsureCapacity {
    303    public:
    304     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    305       if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
    306     }
    307   };
    308 
    309   // When building the C++ tests, assertion code is enabled. To allow
    310   // asserting that the user of the assembler buffer has ensured the
    311   // capacity needed for emitting, we add a dummy method in non-debug mode.
    312   bool HasEnsuredCapacity() const { return true; }
    313 
    314 #endif
    315 
    316   // Returns the position in the instruction stream.
    317   int GetPosition() { return  cursor_ - contents_; }
    318 
    319  private:
    320   // The limit is set to kMinimumGap bytes before the end of the data area.
    321   // This leaves enough space for the longest possible instruction and allows
    322   // for a single, fast space check per instruction.
    323   static const int kMinimumGap = 32;
    324 
    325   uint8_t* contents_;
    326   uint8_t* cursor_;
    327   uint8_t* limit_;
    328   AssemblerFixup* fixup_;
    329 #ifndef NDEBUG
    330   bool fixups_processed_;
    331 #endif
    332 
    333   // Head of linked list of slow paths
    334   SlowPath* slow_path_;
    335 
    336   uint8_t* cursor() const { return cursor_; }
    337   uint8_t* limit() const { return limit_; }
    338   size_t Capacity() const {
    339     CHECK_GE(limit_, contents_);
    340     return (limit_ - contents_) + kMinimumGap;
    341   }
    342 
    343   // Process the fixup chain starting at the given fixup. The offset is
    344   // non-zero for fixups in the body if the preamble is non-empty.
    345   void ProcessFixups(const MemoryRegion& region);
    346 
    347   // Compute the limit based on the data area and the capacity. See
    348   // description of kMinimumGap for the reasoning behind the value.
    349   static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
    350     return data + capacity - kMinimumGap;
    351   }
    352 
    353   void ExtendCapacity();
    354 
    355   friend class AssemblerFixup;
    356 };
    357 
    358 // The purpose of this class is to ensure that we do not have to explicitly
    359 // call the AdvancePC method (which is good for convenience and correctness).
    360 class DebugFrameOpCodeWriterForAssembler FINAL
    361     : public dwarf::DebugFrameOpCodeWriter<> {
    362  public:
    363   // This method is called the by the opcode writers.
    364   virtual void ImplicitlyAdvancePC() FINAL;
    365 
    366   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
    367       : dwarf::DebugFrameOpCodeWriter<>(),
    368         assembler_(buffer) {
    369   }
    370 
    371  private:
    372   Assembler* assembler_;
    373 };
    374 
    375 class Assembler {
    376  public:
    377   static Assembler* Create(InstructionSet instruction_set);
    378 
    379   // Emit slow paths queued during assembly
    380   virtual void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
    381 
    382   // Size of generated code
    383   virtual size_t CodeSize() const { return buffer_.Size(); }
    384 
    385   // Copy instructions out of assembly buffer into the given region of memory
    386   virtual void FinalizeInstructions(const MemoryRegion& region) {
    387     buffer_.FinalizeInstructions(region);
    388   }
    389 
    390   // TODO: Implement with disassembler.
    391   virtual void Comment(const char* format, ...) { UNUSED(format); }
    392 
    393   // Emit code that will create an activation on the stack
    394   virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
    395                           const std::vector<ManagedRegister>& callee_save_regs,
    396                           const ManagedRegisterEntrySpills& entry_spills) = 0;
    397 
    398   // Emit code that will remove an activation from the stack
    399   virtual void RemoveFrame(size_t frame_size,
    400                            const std::vector<ManagedRegister>& callee_save_regs) = 0;
    401 
    402   virtual void IncreaseFrameSize(size_t adjust) = 0;
    403   virtual void DecreaseFrameSize(size_t adjust) = 0;
    404 
    405   // Store routines
    406   virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
    407   virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
    408   virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
    409 
    410   virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
    411                                      ManagedRegister scratch) = 0;
    412 
    413   virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
    414                                         ManagedRegister scratch);
    415   virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
    416                                         ManagedRegister scratch);
    417 
    418   virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
    419                                           FrameOffset fr_offs,
    420                                           ManagedRegister scratch);
    421   virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
    422                                           FrameOffset fr_offs,
    423                                           ManagedRegister scratch);
    424 
    425   virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
    426   virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
    427 
    428   virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
    429                              FrameOffset in_off, ManagedRegister scratch) = 0;
    430 
    431   // Load routines
    432   virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
    433 
    434   virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
    435   virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
    436 
    437   virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
    438   // If poison_reference is true and kPoisonReference is true, then we negate the read reference.
    439   virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
    440                        bool poison_reference) = 0;
    441 
    442   virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
    443 
    444   virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
    445   virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
    446 
    447   // Copying routines
    448   virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
    449 
    450   virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
    451                                       ManagedRegister scratch);
    452   virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
    453                                       ManagedRegister scratch);
    454 
    455   virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
    456                                     ManagedRegister scratch);
    457   virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
    458                                     ManagedRegister scratch);
    459 
    460   virtual void CopyRef(FrameOffset dest, FrameOffset src,
    461                        ManagedRegister scratch) = 0;
    462 
    463   virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
    464 
    465   virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
    466                     ManagedRegister scratch, size_t size) = 0;
    467 
    468   virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
    469                     ManagedRegister scratch, size_t size) = 0;
    470 
    471   virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
    472                     ManagedRegister scratch, size_t size) = 0;
    473 
    474   virtual void Copy(ManagedRegister dest, Offset dest_offset,
    475                     ManagedRegister src, Offset src_offset,
    476                     ManagedRegister scratch, size_t size) = 0;
    477 
    478   virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
    479                     ManagedRegister scratch, size_t size) = 0;
    480 
    481   virtual void MemoryBarrier(ManagedRegister scratch) = 0;
    482 
    483   // Sign extension
    484   virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
    485 
    486   // Zero extension
    487   virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
    488 
    489   // Exploit fast access in managed code to Thread::Current()
    490   virtual void GetCurrentThread(ManagedRegister tr) = 0;
    491   virtual void GetCurrentThread(FrameOffset dest_offset,
    492                                 ManagedRegister scratch) = 0;
    493 
    494   // Set up out_reg to hold a Object** into the handle scope, or to be null if the
    495   // value is null and null_allowed. in_reg holds a possibly stale reference
    496   // that can be used to avoid loading the handle scope entry to see if the value is
    497   // null.
    498   virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
    499                                ManagedRegister in_reg, bool null_allowed) = 0;
    500 
    501   // Set up out_off to hold a Object** into the handle scope, or to be null if the
    502   // value is null and null_allowed.
    503   virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
    504                                ManagedRegister scratch, bool null_allowed) = 0;
    505 
    506   // src holds a handle scope entry (Object**) load this into dst
    507   virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
    508                                      ManagedRegister src) = 0;
    509 
    510   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
    511   // know that src may not be null.
    512   virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
    513   virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
    514 
    515   // Call to address held at [base+offset]
    516   virtual void Call(ManagedRegister base, Offset offset,
    517                     ManagedRegister scratch) = 0;
    518   virtual void Call(FrameOffset base, Offset offset,
    519                     ManagedRegister scratch) = 0;
    520   virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
    521   virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
    522 
    523   // Generate code to check if Thread::Current()->exception_ is non-null
    524   // and branch to a ExceptionSlowPath if it is.
    525   virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
    526 
    527   virtual ~Assembler() {}
    528 
    529   /**
    530    * @brief Buffer of DWARF's Call Frame Information opcodes.
    531    * @details It is used by debuggers and other tools to unwind the call stack.
    532    */
    533   DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
    534 
    535  protected:
    536   Assembler() : buffer_(), cfi_(this) {}
    537 
    538   AssemblerBuffer buffer_;
    539 
    540   DebugFrameOpCodeWriterForAssembler cfi_;
    541 };
    542 
    543 }  // namespace art
    544 
    545 #endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
    546