Home | History | Annotate | Download | only in utils
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
     18 #define ART_COMPILER_UTILS_ASSEMBLER_H_
     19 
     20 #include <vector>
     21 
     22 #include <android-base/logging.h>
     23 
     24 #include "arch/instruction_set.h"
     25 #include "arch/instruction_set_features.h"
     26 #include "arm/constants_arm.h"
     27 #include "base/arena_allocator.h"
     28 #include "base/arena_object.h"
     29 #include "base/array_ref.h"
     30 #include "base/enums.h"
     31 #include "base/macros.h"
     32 #include "debug/dwarf/debug_frame_opcode_writer.h"
     33 #include "label.h"
     34 #include "managed_register.h"
     35 #include "memory_region.h"
     36 #include "mips/constants_mips.h"
     37 #include "offsets.h"
     38 #include "x86/constants_x86.h"
     39 #include "x86_64/constants_x86_64.h"
     40 
     41 namespace art {
     42 
     43 class Assembler;
     44 class AssemblerBuffer;
     45 
     46 // Assembler fixups are positions in generated code that require processing
     47 // after the code has been copied to executable memory. This includes building
     48 // relocation information.
     49 class AssemblerFixup {
     50  public:
     51   virtual void Process(const MemoryRegion& region, int position) = 0;
     52   virtual ~AssemblerFixup() {}
     53 
     54  private:
     55   AssemblerFixup* previous_;
     56   int position_;
     57 
     58   AssemblerFixup* previous() const { return previous_; }
     59   void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
     60 
     61   int position() const { return position_; }
     62   void set_position(int position_in) { position_ = position_in; }
     63 
     64   friend class AssemblerBuffer;
     65 };
     66 
     67 // Parent of all queued slow paths, emitted during finalization
     68 class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> {
     69  public:
     70   SlowPath() : next_(nullptr) {}
     71   virtual ~SlowPath() {}
     72 
     73   Label* Continuation() { return &continuation_; }
     74   Label* Entry() { return &entry_; }
     75   // Generate code for slow path
     76   virtual void Emit(Assembler *sp_asm) = 0;
     77 
     78  protected:
     79   // Entry branched to by fast path
     80   Label entry_;
     81   // Optional continuation that is branched to at the end of the slow path
     82   Label continuation_;
     83   // Next in linked list of slow paths
     84   SlowPath *next_;
     85 
     86  private:
     87   friend class AssemblerBuffer;
     88   DISALLOW_COPY_AND_ASSIGN(SlowPath);
     89 };
     90 
     91 class AssemblerBuffer {
     92  public:
     93   explicit AssemblerBuffer(ArenaAllocator* allocator);
     94   ~AssemblerBuffer();
     95 
     96   ArenaAllocator* GetAllocator() {
     97     return allocator_;
     98   }
     99 
    100   // Basic support for emitting, loading, and storing.
    101   template<typename T> void Emit(T value) {
    102     CHECK(HasEnsuredCapacity());
    103     *reinterpret_cast<T*>(cursor_) = value;
    104     cursor_ += sizeof(T);
    105   }
    106 
    107   template<typename T> T Load(size_t position) {
    108     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    109     return *reinterpret_cast<T*>(contents_ + position);
    110   }
    111 
    112   template<typename T> void Store(size_t position, T value) {
    113     CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
    114     *reinterpret_cast<T*>(contents_ + position) = value;
    115   }
    116 
    117   void Resize(size_t new_size) {
    118     if (new_size > Capacity()) {
    119       ExtendCapacity(new_size);
    120     }
    121     cursor_ = contents_ + new_size;
    122   }
    123 
    124   void Move(size_t newposition, size_t oldposition, size_t size) {
    125     // Move a chunk of the buffer from oldposition to newposition.
    126     DCHECK_LE(oldposition + size, Size());
    127     DCHECK_LE(newposition + size, Size());
    128     memmove(contents_ + newposition, contents_ + oldposition, size);
    129   }
    130 
    131   // Emit a fixup at the current location.
    132   void EmitFixup(AssemblerFixup* fixup) {
    133     fixup->set_previous(fixup_);
    134     fixup->set_position(Size());
    135     fixup_ = fixup;
    136   }
    137 
    138   void EnqueueSlowPath(SlowPath* slowpath) {
    139     if (slow_path_ == nullptr) {
    140       slow_path_ = slowpath;
    141     } else {
    142       SlowPath* cur = slow_path_;
    143       for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
    144       cur->next_ = slowpath;
    145     }
    146   }
    147 
    148   void EmitSlowPaths(Assembler* sp_asm) {
    149     SlowPath* cur = slow_path_;
    150     SlowPath* next = nullptr;
    151     slow_path_ = nullptr;
    152     for ( ; cur != nullptr ; cur = next) {
    153       cur->Emit(sp_asm);
    154       next = cur->next_;
    155       delete cur;
    156     }
    157   }
    158 
    159   // Get the size of the emitted code.
    160   size_t Size() const {
    161     CHECK_GE(cursor_, contents_);
    162     return cursor_ - contents_;
    163   }
    164 
    165   uint8_t* contents() const { return contents_; }
    166 
    167   // Copy the assembled instructions into the specified memory block
    168   // and apply all fixups.
    169   void FinalizeInstructions(const MemoryRegion& region);
    170 
    171   // To emit an instruction to the assembler buffer, the EnsureCapacity helper
    172   // must be used to guarantee that the underlying data area is big enough to
    173   // hold the emitted instruction. Usage:
    174   //
    175   //     AssemblerBuffer buffer;
    176   //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
    177   //     ... emit bytes for single instruction ...
    178 
    179 #ifndef NDEBUG
    180 
    181   class EnsureCapacity {
    182    public:
    183     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    184       if (buffer->cursor() > buffer->limit()) {
    185         buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
    186       }
    187       // In debug mode, we save the assembler buffer along with the gap
    188       // size before we start emitting to the buffer. This allows us to
    189       // check that any single generated instruction doesn't overflow the
    190       // limit implied by the minimum gap size.
    191       buffer_ = buffer;
    192       gap_ = ComputeGap();
    193       // Make sure that extending the capacity leaves a big enough gap
    194       // for any kind of instruction.
    195       CHECK_GE(gap_, kMinimumGap);
    196       // Mark the buffer as having ensured the capacity.
    197       CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
    198       buffer->has_ensured_capacity_ = true;
    199     }
    200 
    201     ~EnsureCapacity() {
    202       // Unmark the buffer, so we cannot emit after this.
    203       buffer_->has_ensured_capacity_ = false;
    204       // Make sure the generated instruction doesn't take up more
    205       // space than the minimum gap.
    206       int delta = gap_ - ComputeGap();
    207       CHECK_LE(delta, kMinimumGap);
    208     }
    209 
    210    private:
    211     AssemblerBuffer* buffer_;
    212     int gap_;
    213 
    214     int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
    215   };
    216 
    217   bool has_ensured_capacity_;
    218   bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
    219 
    220 #else
    221 
    222   class EnsureCapacity {
    223    public:
    224     explicit EnsureCapacity(AssemblerBuffer* buffer) {
    225       if (buffer->cursor() > buffer->limit()) {
    226         buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
    227       }
    228     }
    229   };
    230 
    231   // When building the C++ tests, assertion code is enabled. To allow
    232   // asserting that the user of the assembler buffer has ensured the
    233   // capacity needed for emitting, we add a dummy method in non-debug mode.
    234   bool HasEnsuredCapacity() const { return true; }
    235 
    236 #endif
    237 
    238   // Returns the position in the instruction stream.
    239   int GetPosition() { return  cursor_ - contents_; }
    240 
    241   size_t Capacity() const {
    242     CHECK_GE(limit_, contents_);
    243     return (limit_ - contents_) + kMinimumGap;
    244   }
    245 
    246   // Unconditionally increase the capacity.
    247   // The provided `min_capacity` must be higher than current `Capacity()`.
    248   void ExtendCapacity(size_t min_capacity);
    249 
    250  private:
    251   // The limit is set to kMinimumGap bytes before the end of the data area.
    252   // This leaves enough space for the longest possible instruction and allows
    253   // for a single, fast space check per instruction.
    254   static const int kMinimumGap = 32;
    255 
    256   ArenaAllocator* const allocator_;
    257   uint8_t* contents_;
    258   uint8_t* cursor_;
    259   uint8_t* limit_;
    260   AssemblerFixup* fixup_;
    261 #ifndef NDEBUG
    262   bool fixups_processed_;
    263 #endif
    264 
    265   // Head of linked list of slow paths
    266   SlowPath* slow_path_;
    267 
    268   uint8_t* cursor() const { return cursor_; }
    269   uint8_t* limit() const { return limit_; }
    270 
    271   // Process the fixup chain starting at the given fixup. The offset is
    272   // non-zero for fixups in the body if the preamble is non-empty.
    273   void ProcessFixups(const MemoryRegion& region);
    274 
    275   // Compute the limit based on the data area and the capacity. See
    276   // description of kMinimumGap for the reasoning behind the value.
    277   static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
    278     return data + capacity - kMinimumGap;
    279   }
    280 
    281   friend class AssemblerFixup;
    282 };
    283 
    284 // The purpose of this class is to ensure that we do not have to explicitly
    285 // call the AdvancePC method (which is good for convenience and correctness).
    286 class DebugFrameOpCodeWriterForAssembler FINAL
    287     : public dwarf::DebugFrameOpCodeWriter<> {
    288  public:
    289   struct DelayedAdvancePC {
    290     uint32_t stream_pos;
    291     uint32_t pc;
    292   };
    293 
    294   // This method is called the by the opcode writers.
    295   virtual void ImplicitlyAdvancePC() FINAL;
    296 
    297   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
    298       : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
    299         assembler_(buffer),
    300         delay_emitting_advance_pc_(false),
    301         delayed_advance_pcs_() {
    302   }
    303 
    304   ~DebugFrameOpCodeWriterForAssembler() {
    305     DCHECK(delayed_advance_pcs_.empty());
    306   }
    307 
    308   // Tell the writer to delay emitting advance PC info.
    309   // The assembler must explicitly process all the delayed advances.
    310   void DelayEmittingAdvancePCs() {
    311     delay_emitting_advance_pc_ = true;
    312   }
    313 
    314   // Override the last delayed PC. The new PC can be out of order.
    315   void OverrideDelayedPC(size_t pc) {
    316     DCHECK(delay_emitting_advance_pc_);
    317     if (enabled_) {
    318       DCHECK(!delayed_advance_pcs_.empty());
    319       delayed_advance_pcs_.back().pc = pc;
    320     }
    321   }
    322 
    323   // Return the number of delayed advance PC entries.
    324   size_t NumberOfDelayedAdvancePCs() const {
    325     return delayed_advance_pcs_.size();
    326   }
    327 
    328   // Release the CFI stream and advance PC infos so that the assembler can patch it.
    329   std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>>
    330   ReleaseStreamAndPrepareForDelayedAdvancePC() {
    331     DCHECK(delay_emitting_advance_pc_);
    332     delay_emitting_advance_pc_ = false;
    333     std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>> result;
    334     result.first.swap(opcodes_);
    335     result.second.swap(delayed_advance_pcs_);
    336     return result;
    337   }
    338 
    339   // Reserve space for the CFI stream.
    340   void ReserveCFIStream(size_t capacity) {
    341     opcodes_.reserve(capacity);
    342   }
    343 
    344   // Append raw data to the CFI stream.
    345   void AppendRawData(const std::vector<uint8_t>& raw_data, size_t first, size_t last) {
    346     DCHECK_LE(0u, first);
    347     DCHECK_LE(first, last);
    348     DCHECK_LE(last, raw_data.size());
    349     opcodes_.insert(opcodes_.end(), raw_data.begin() + first, raw_data.begin() + last);
    350   }
    351 
    352  private:
    353   Assembler* assembler_;
    354   bool delay_emitting_advance_pc_;
    355   std::vector<DelayedAdvancePC> delayed_advance_pcs_;
    356 };
    357 
    358 class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
    359  public:
    360   // Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
    361   virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
    362 
    363   // Size of generated code
    364   virtual size_t CodeSize() const { return buffer_.Size(); }
    365   virtual const uint8_t* CodeBufferBaseAddress() const { return buffer_.contents(); }
    366   // CodePosition() is a non-const method similar to CodeSize(), which is used to
    367   // record positions within the code buffer for the purpose of signal handling
    368   // (stack overflow checks and implicit null checks may trigger signals and the
    369   // signal handlers expect them right before the recorded positions).
    370   // On most architectures CodePosition() should be equivalent to CodeSize(), but
    371   // the MIPS assembler needs to be aware of this recording, so it doesn't put
    372   // the instructions that can trigger signals into branch delay slots. Handling
    373   // signals from instructions in delay slots is a bit problematic and should be
    374   // avoided.
    375   virtual size_t CodePosition() { return CodeSize(); }
    376 
    377   // Copy instructions out of assembly buffer into the given region of memory
    378   virtual void FinalizeInstructions(const MemoryRegion& region) {
    379     buffer_.FinalizeInstructions(region);
    380   }
    381 
    382   // TODO: Implement with disassembler.
    383   virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
    384 
    385   virtual void Bind(Label* label) = 0;
    386   virtual void Jump(Label* label) = 0;
    387 
    388   virtual ~Assembler() {}
    389 
    390   /**
    391    * @brief Buffer of DWARF's Call Frame Information opcodes.
    392    * @details It is used by debuggers and other tools to unwind the call stack.
    393    */
    394   DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
    395 
    396   ArenaAllocator* GetAllocator() {
    397     return buffer_.GetAllocator();
    398   }
    399 
    400   AssemblerBuffer* GetBuffer() {
    401     return &buffer_;
    402   }
    403 
    404  protected:
    405   explicit Assembler(ArenaAllocator* allocator) : buffer_(allocator), cfi_(this) {}
    406 
    407   AssemblerBuffer buffer_;
    408 
    409   DebugFrameOpCodeWriterForAssembler cfi_;
    410 };
    411 
    412 }  // namespace art
    413 
    414 #endif  // ART_COMPILER_UTILS_ASSEMBLER_H_
    415