Home | History | Annotate | Download | only in utils
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
     18 #define ART_COMPILER_UTILS_ASSEMBLER_H_
     19 
     20 #include <vector>
     21 
     22 #include "base/logging.h"
     23 #include "base/macros.h"
     24 #include "arm/constants_arm.h"
     25 #include "mips/constants_mips.h"
     26 #include "x86/constants_x86.h"
     27 #include "instruction_set.h"
     28 #include "managed_register.h"
     29 #include "memory_region.h"
     30 #include "offsets.h"
     31 
     32 namespace art {
     33 
     34 class Assembler;
     35 class AssemblerBuffer;
     36 class AssemblerFixup;
     37 
     38 namespace arm {
     39   class ArmAssembler;
     40 }
     41 namespace mips {
     42   class MipsAssembler;
     43 }
     44 namespace x86 {
     45   class X86Assembler;
     46 }
     47 
     48 class Label {
     49  public:
     50   Label() : position_(0) {}
     51 
     52   ~Label() {
     53     // Assert if label is being destroyed with unresolved branches pending.
     54     CHECK(!IsLinked());
     55   }
     56 
     57   // Returns the position for bound and linked labels. Cannot be used
     58   // for unused labels.
     59   int Position() const {
     60     CHECK(!IsUnused());
     61     return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
     62   }
     63 
     64   int LinkPosition() const {
     65     CHECK(IsLinked());
     66     return position_ - kWordSize;
     67   }
     68 
     69   bool IsBound() const { return position_ < 0; }
     70   bool IsUnused() const { return position_ == 0; }
     71   bool IsLinked() const { return position_ > 0; }
     72 
     73  private:
     74   int position_;
     75 
     76   void Reinitialize() {
     77     position_ = 0;
     78   }
     79 
     80   void BindTo(int position) {
     81     CHECK(!IsBound());
     82     position_ = -position - kPointerSize;
     83     CHECK(IsBound());
     84   }
     85 
     86   void LinkTo(int position) {
     87     CHECK(!IsBound());
     88     position_ = position + kPointerSize;
     89     CHECK(IsLinked());
     90   }
     91 
     92   friend class arm::ArmAssembler;
     93   friend class mips::MipsAssembler;
     94   friend class x86::X86Assembler;
     95 
     96   DISALLOW_COPY_AND_ASSIGN(Label);
     97 };
     98 
     99 
    100 // Assembler fixups are positions in generated code that require processing
    101 // after the code has been copied to executable memory. This includes building
    102 // relocation information.
    103 class AssemblerFixup {
    104  public:
    105   virtual void Process(const MemoryRegion& region, int position) = 0;
    106   virtual ~AssemblerFixup() {}
    107 
    108  private:
    109   AssemblerFixup* previous_;
    110   int position_;
    111 
    112   AssemblerFixup* previous() const { return previous_; }
    113   void set_previous(AssemblerFixup* previous) { previous_ = previous; }
    114 
    115   int position() const { return position_; }
    116   void set_position(int position) { position_ = position; }
    117 
    118   friend class AssemblerBuffer;
    119 };
    120 
    121 // Parent of all queued slow paths, emitted during finalization
    122 class SlowPath {
    123  public:
    124   SlowPath() : next_(NULL) {}
    125   virtual ~SlowPath() {}
    126 
    127   Label* Continuation() { return &continuation_; }
    128   Label* Entry() { return &entry_; }
    129   // Generate code for slow path
    130   virtual void Emit(Assembler *sp_asm) = 0;
    131 
    132  protected:
    133   // Entry branched to by fast path
    134   Label entry_;
    135   // Optional continuation that is branched to at the end of the slow path
    136   Label continuation_;
    137   // Next in linked list of slow paths
    138   SlowPath *next_;
    139 
    140  private:
    141   friend class AssemblerBuffer;
    142   DISALLOW_COPY_AND_ASSIGN(SlowPath);
    143 };
    144 
    145 class AssemblerBuffer {
    146  public:
    147   AssemblerBuffer();
    148   ~AssemblerBuffer();
    149 
    150   // Basic support for emitting, loading, and storing.
    151   template<typename T> void Emit(T value) {
    152     CHECK(HasEnsuredCapacity());
    153     *reinterpret_cast<T*>(cursor_) = value;
    154     cursor_ += sizeof(T);
    155   }
    156 
    157   template<typename T> T Load(size_t position) {
    158     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    159     return *reinterpret_cast<T*>(contents_ + position);
    160   }
    161 
    162   template<typename T> void Store(size_t position, T value) {
    163     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    164     *reinterpret_cast<T*>(contents_ + position) = value;
    165   }
    166 
    167   // Emit a fixup at the current location.
    168   void EmitFixup(AssemblerFixup* fixup) {
    169     fixup->set_previous(fixup_);
    170     fixup->set_position(Size());
    171     fixup_ = fixup;
    172   }
    173 
    174   void EnqueueSlowPath(SlowPath* slowpath) {
    175     if (slow_path_ == NULL) {
    176       slow_path_ = slowpath;
    177     } else {
    178       SlowPath* cur = slow_path_;
    179       for ( ; cur->next_ != NULL ; cur = cur->next_) {}
    180       cur->next_ = slowpath;
    181     }
    182   }
    183 
    184   void EmitSlowPaths(Assembler* sp_asm) {
    185     SlowPath* cur = slow_path_;
    186     SlowPath* next = NULL;
    187     slow_path_ = NULL;
    188     for ( ; cur != NULL ; cur = next) {
    189       cur->Emit(sp_asm);
    190       next = cur->next_;
    191       delete cur;
    192     }
    193   }
    194 
    195   // Get the size of the emitted code.
    196   size_t Size() const {
    197     CHECK_GE(cursor_, contents_);
    198     return cursor_ - contents_;
    199   }
    200 
    201   byte* contents() const { return contents_; }
    202 
    203   // Copy the assembled instructions into the specified memory block
    204   // and apply all fixups.
    205   void FinalizeInstructions(const MemoryRegion& region);
    206 
    207   // To emit an instruction to the assembler buffer, the EnsureCapacity helper
    208   // must be used to guarantee that the underlying data area is big enough to
    209   // hold the emitted instruction. Usage:
    210   //
    211   //     AssemblerBuffer buffer;
    212   //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
    213   //     ... emit bytes for single instruction ...
    214 
    215 #ifndef NDEBUG
    216 
    217   class EnsureCapacity {
    218    public:
    219     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    220       if (buffer->cursor() >= buffer->limit()) {
    221         buffer->ExtendCapacity();
    222       }
    223       // In debug mode, we save the assembler buffer along with the gap
    224       // size before we start emitting to the buffer. This allows us to
    225       // check that any single generated instruction doesn't overflow the
    226       // limit implied by the minimum gap size.
    227       buffer_ = buffer;
    228       gap_ = ComputeGap();
    229       // Make sure that extending the capacity leaves a big enough gap
    230       // for any kind of instruction.
    231       CHECK_GE(gap_, kMinimumGap);
    232       // Mark the buffer as having ensured the capacity.
    233       CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
    234       buffer->has_ensured_capacity_ = true;
    235     }
    236 
    237     ~EnsureCapacity() {
    238       // Unmark the buffer, so we cannot emit after this.
    239       buffer_->has_ensured_capacity_ = false;
    240       // Make sure the generated instruction doesn't take up more
    241       // space than the minimum gap.
    242       int delta = gap_ - ComputeGap();
    243       CHECK_LE(delta, kMinimumGap);
    244     }
    245 
    246    private:
    247     AssemblerBuffer* buffer_;
    248     int gap_;
    249 
    250     int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
    251   };
    252 
    253   bool has_ensured_capacity_;
    254   bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
    255 
    256 #else
    257 
    258   class EnsureCapacity {
    259    public:
    260     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    261       if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
    262     }
    263   };
    264 
    265   // When building the C++ tests, assertion code is enabled. To allow
    266   // asserting that the user of the assembler buffer has ensured the
    267   // capacity needed for emitting, we add a dummy method in non-debug mode.
    268   bool HasEnsuredCapacity() const { return true; }
    269 
    270 #endif
    271 
    272   // Returns the position in the instruction stream.
    273   int GetPosition() { return  cursor_ - contents_; }
    274 
    275  private:
    276   // The limit is set to kMinimumGap bytes before the end of the data area.
    277   // This leaves enough space for the longest possible instruction and allows
    278   // for a single, fast space check per instruction.
    279   static const int kMinimumGap = 32;
    280 
    281   byte* contents_;
    282   byte* cursor_;
    283   byte* limit_;
    284   AssemblerFixup* fixup_;
    285   bool fixups_processed_;
    286 
    287   // Head of linked list of slow paths
    288   SlowPath* slow_path_;
    289 
    290   byte* cursor() const { return cursor_; }
    291   byte* limit() const { return limit_; }
    292   size_t Capacity() const {
    293     CHECK_GE(limit_, contents_);
    294     return (limit_ - contents_) + kMinimumGap;
    295   }
    296 
    297   // Process the fixup chain starting at the given fixup. The offset is
    298   // non-zero for fixups in the body if the preamble is non-empty.
    299   void ProcessFixups(const MemoryRegion& region);
    300 
    301   // Compute the limit based on the data area and the capacity. See
    302   // description of kMinimumGap for the reasoning behind the value.
    303   static byte* ComputeLimit(byte* data, size_t capacity) {
    304     return data + capacity - kMinimumGap;
    305   }
    306 
    307   void ExtendCapacity();
    308 
    309   friend class AssemblerFixup;
    310 };
    311 
    312 class Assembler {
    313  public:
    314   static Assembler* Create(InstructionSet instruction_set);
    315 
    316   // Emit slow paths queued during assembly
    317   void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
    318 
    319   // Size of generated code
    320   size_t CodeSize() const { return buffer_.Size(); }
    321 
    322   // Copy instructions out of assembly buffer into the given region of memory
    323   void FinalizeInstructions(const MemoryRegion& region) {
    324     buffer_.FinalizeInstructions(region);
    325   }
    326 
    327   // Emit code that will create an activation on the stack
    328   virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
    329                           const std::vector<ManagedRegister>& callee_save_regs,
    330                           const std::vector<ManagedRegister>& entry_spills) = 0;
    331 
    332   // Emit code that will remove an activation from the stack
    333   virtual void RemoveFrame(size_t frame_size,
    334                            const std::vector<ManagedRegister>& callee_save_regs) = 0;
    335 
    336   virtual void IncreaseFrameSize(size_t adjust) = 0;
    337   virtual void DecreaseFrameSize(size_t adjust) = 0;
    338 
    339   // Store routines
    340   virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
    341   virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
    342   virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
    343 
    344   virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
    345                                      ManagedRegister scratch) = 0;
    346 
    347   virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
    348                                       ManagedRegister scratch) = 0;
    349 
    350   virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
    351                                         FrameOffset fr_offs,
    352                                         ManagedRegister scratch) = 0;
    353 
    354   virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
    355 
    356   virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
    357                              FrameOffset in_off, ManagedRegister scratch) = 0;
    358 
    359   // Load routines
    360   virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
    361 
    362   virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0;
    363 
    364   virtual void LoadRef(ManagedRegister dest, FrameOffset  src) = 0;
    365 
    366   virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
    367                        MemberOffset offs) = 0;
    368 
    369   virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
    370                           Offset offs) = 0;
    371 
    372   virtual void LoadRawPtrFromThread(ManagedRegister dest,
    373                                     ThreadOffset offs) = 0;
    374 
    375   // Copying routines
    376   virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
    377 
    378   virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
    379                                     ManagedRegister scratch) = 0;
    380 
    381   virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
    382                                   ManagedRegister scratch) = 0;
    383 
    384   virtual void CopyRef(FrameOffset dest, FrameOffset src,
    385                        ManagedRegister scratch) = 0;
    386 
    387   virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
    388 
    389   virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
    390                     ManagedRegister scratch, size_t size) = 0;
    391 
    392   virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
    393                     ManagedRegister scratch, size_t size) = 0;
    394 
    395   virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
    396                     ManagedRegister scratch, size_t size) = 0;
    397 
    398   virtual void Copy(ManagedRegister dest, Offset dest_offset,
    399                     ManagedRegister src, Offset src_offset,
    400                     ManagedRegister scratch, size_t size) = 0;
    401 
    402   virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
    403                     ManagedRegister scratch, size_t size) = 0;
    404 
    405   virtual void MemoryBarrier(ManagedRegister scratch) = 0;
    406 
    407   // Sign extension
    408   virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
    409 
    410   // Zero extension
    411   virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
    412 
    413   // Exploit fast access in managed code to Thread::Current()
    414   virtual void GetCurrentThread(ManagedRegister tr) = 0;
    415   virtual void GetCurrentThread(FrameOffset dest_offset,
    416                                 ManagedRegister scratch) = 0;
    417 
    418   // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
    419   // value is null and null_allowed. in_reg holds a possibly stale reference
    420   // that can be used to avoid loading the SIRT entry to see if the value is
    421   // NULL.
    422   virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
    423                                ManagedRegister in_reg, bool null_allowed) = 0;
    424 
    425   // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
    426   // value is null and null_allowed.
    427   virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
    428                                ManagedRegister scratch, bool null_allowed) = 0;
    429 
    430   // src holds a SIRT entry (Object**) load this into dst
    431   virtual void LoadReferenceFromSirt(ManagedRegister dst,
    432                                      ManagedRegister src) = 0;
    433 
    434   // Heap::VerifyObject on src. In some cases (such as a reference to this) we
    435   // know that src may not be null.
    436   virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
    437   virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
    438 
    439   // Call to address held at [base+offset]
    440   virtual void Call(ManagedRegister base, Offset offset,
    441                     ManagedRegister scratch) = 0;
    442   virtual void Call(FrameOffset base, Offset offset,
    443                     ManagedRegister scratch) = 0;
    444   virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
    445 
    446   // Generate code to check if Thread::Current()->exception_ is non-null
    447   // and branch to a ExceptionSlowPath if it is.
    448   virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
    449 
    450   virtual ~Assembler() {}
    451 
    452  protected:
    453   Assembler() : buffer_() {}
    454 
    455   AssemblerBuffer buffer_;
    456 };
    457 
    458 }  // namespace art
    459 
    460 #endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
    461