Home | History | Annotate | Download | only in mips
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2010 the V8 project authors. All rights reserved.
     34 
     35 
     36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
     37 #define V8_MIPS_ASSEMBLER_MIPS_H_
     38 
     39 #include <stdio.h>
     40 #include "assembler.h"
     41 #include "constants-mips.h"
     42 #include "serialize.h"
     43 
     44 namespace v8 {
     45 namespace internal {
     46 
     47 // CPU Registers.
     48 //
     49 // 1) We would prefer to use an enum, but enum values are assignment-
     50 // compatible with int, which has caused code-generation bugs.
     51 //
     52 // 2) We would prefer to use a class instead of a struct but we don't like
     53 // the register initialization to depend on the particular initialization
     54 // order (which appears to be different on OS X, Linux, and Windows for the
     55 // installed versions of C++ we tried). Using a struct permits C-style
     56 // "initialization". Also, the Register objects cannot be const as this
     57 // forces initialization stubs in MSVC, making us dependent on initialization
     58 // order.
     59 //
     60 // 3) By not using an enum, we are possibly preventing the compiler from
     61 // doing certain constant folds, which may significantly reduce the
     62 // code generated for some assembly instructions (because they boil down
     63 // to a few constants). If this is a problem, we could change the code
     64 // such that we use an enum in optimized mode, and the struct in debug
     65 // mode. This way we get the compile-time error checking in debug mode
     66 // and best performance in optimized code.
     67 
     68 
     69 // -----------------------------------------------------------------------------
     70 // Implementation of Register and FPURegister
     71 
     72 // Core register.
     73 struct Register {
     74   static const int kNumRegisters = v8::internal::kNumRegisters;
     75   static const int kNumAllocatableRegisters = 14;  // v0 through t7
     76 
     77   static int ToAllocationIndex(Register reg) {
     78     return reg.code() - 2;  // zero_reg and 'at' are skipped.
     79   }
     80 
     81   static Register FromAllocationIndex(int index) {
     82     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     83     return from_code(index + 2);  // zero_reg and 'at' are skipped.
     84   }
     85 
     86   static const char* AllocationIndexToString(int index) {
     87     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
     88     const char* const names[] = {
     89       "v0",
     90       "v1",
     91       "a0",
     92       "a1",
     93       "a2",
     94       "a3",
     95       "t0",
     96       "t1",
     97       "t2",
     98       "t3",
     99       "t4",
    100       "t5",
    101       "t6",
    102       "t7",
    103     };
    104     return names[index];
    105   }
    106 
    107   static Register from_code(int code) {
    108     Register r = { code };
    109     return r;
    110   }
    111 
    112   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
    113   bool is(Register reg) const { return code_ == reg.code_; }
    114   int code() const {
    115     ASSERT(is_valid());
    116     return code_;
    117   }
    118   int bit() const {
    119     ASSERT(is_valid());
    120     return 1 << code_;
    121   }
    122 
    123   // Unfortunately we can't make this private in a struct.
    124   int code_;
    125 };
    126 
    127 const Register no_reg = { -1 };
    128 
    129 const Register zero_reg = { 0 };
    130 const Register at = { 1 };
    131 const Register v0 = { 2 };
    132 const Register v1 = { 3 };
    133 const Register a0 = { 4 };
    134 const Register a1 = { 5 };
    135 const Register a2 = { 6 };
    136 const Register a3 = { 7 };
    137 const Register t0 = { 8 };
    138 const Register t1 = { 9 };
    139 const Register t2 = { 10 };
    140 const Register t3 = { 11 };
    141 const Register t4 = { 12 };
    142 const Register t5 = { 13 };
    143 const Register t6 = { 14 };
    144 const Register t7 = { 15 };
    145 const Register s0 = { 16 };
    146 const Register s1 = { 17 };
    147 const Register s2 = { 18 };
    148 const Register s3 = { 19 };
    149 const Register s4 = { 20 };
    150 const Register s5 = { 21 };
    151 const Register s6 = { 22 };
    152 const Register s7 = { 23 };
    153 const Register t8 = { 24 };
    154 const Register t9 = { 25 };
    155 const Register k0 = { 26 };
    156 const Register k1 = { 27 };
    157 const Register gp = { 28 };
    158 const Register sp = { 29 };
    159 const Register s8_fp = { 30 };
    160 const Register ra = { 31 };
    161 
    162 
    163 int ToNumber(Register reg);
    164 
    165 Register ToRegister(int num);
    166 
    167 // Coprocessor register.
    168 struct FPURegister {
    169   static const int kNumRegisters = v8::internal::kNumFPURegisters;
    170   // f0 has been excluded from allocation. This is following ia32
    171   // where xmm0 is excluded.
    172   static const int kNumAllocatableRegisters = 15;
    173 
    174   static int ToAllocationIndex(FPURegister reg) {
    175     ASSERT(reg.code() != 0);
    176     ASSERT(reg.code() % 2 == 0);
    177     return (reg.code() / 2) - 1;
    178   }
    179 
    180   static FPURegister FromAllocationIndex(int index) {
    181     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
    182     return from_code((index + 1) * 2);
    183   }
    184 
    185   static const char* AllocationIndexToString(int index) {
    186     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
    187     const char* const names[] = {
    188       "f2",
    189       "f4",
    190       "f6",
    191       "f8",
    192       "f10",
    193       "f12",
    194       "f14",
    195       "f16",
    196       "f18",
    197       "f20",
    198       "f22",
    199       "f24",
    200       "f26",
    201       "f28",
    202       "f30"
    203     };
    204     return names[index];
    205   }
    206 
    207   static FPURegister from_code(int code) {
    208     FPURegister r = { code };
    209     return r;
    210   }
    211 
    212   bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
    213   bool is(FPURegister creg) const { return code_ == creg.code_; }
    214   int code() const {
    215     ASSERT(is_valid());
    216     return code_;
    217   }
    218   int bit() const {
    219     ASSERT(is_valid());
    220     return 1 << code_;
    221   }
    222   void setcode(int f) {
    223     code_ = f;
    224     ASSERT(is_valid());
    225   }
    226   // Unfortunately we can't make this private in a struct.
    227   int code_;
    228 };
    229 
    230 typedef FPURegister DoubleRegister;
    231 
    232 const FPURegister no_creg = { -1 };
    233 
    234 const FPURegister f0 = { 0 };  // Return value in hard float mode.
    235 const FPURegister f1 = { 1 };
    236 const FPURegister f2 = { 2 };
    237 const FPURegister f3 = { 3 };
    238 const FPURegister f4 = { 4 };
    239 const FPURegister f5 = { 5 };
    240 const FPURegister f6 = { 6 };
    241 const FPURegister f7 = { 7 };
    242 const FPURegister f8 = { 8 };
    243 const FPURegister f9 = { 9 };
    244 const FPURegister f10 = { 10 };
    245 const FPURegister f11 = { 11 };
    246 const FPURegister f12 = { 12 };  // Arg 0 in hard float mode.
    247 const FPURegister f13 = { 13 };
    248 const FPURegister f14 = { 14 };  // Arg 1 in hard float mode.
    249 const FPURegister f15 = { 15 };
    250 const FPURegister f16 = { 16 };
    251 const FPURegister f17 = { 17 };
    252 const FPURegister f18 = { 18 };
    253 const FPURegister f19 = { 19 };
    254 const FPURegister f20 = { 20 };
    255 const FPURegister f21 = { 21 };
    256 const FPURegister f22 = { 22 };
    257 const FPURegister f23 = { 23 };
    258 const FPURegister f24 = { 24 };
    259 const FPURegister f25 = { 25 };
    260 const FPURegister f26 = { 26 };
    261 const FPURegister f27 = { 27 };
    262 const FPURegister f28 = { 28 };
    263 const FPURegister f29 = { 29 };
    264 const FPURegister f30 = { 30 };
    265 const FPURegister f31 = { 31 };
    266 
    267 // FPU (coprocessor 1) control registers.
    268 // Currently only FCSR (#31) is implemented.
    269 struct FPUControlRegister {
    270   static const int kFCSRRegister = 31;
    271   static const int kInvalidFPUControlRegister = -1;
    272 
    273   bool is_valid() const { return code_ == kFCSRRegister; }
    274   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
    275   int code() const {
    276     ASSERT(is_valid());
    277     return code_;
    278   }
    279   int bit() const {
    280     ASSERT(is_valid());
    281     return 1 << code_;
    282   }
    283   void setcode(int f) {
    284     code_ = f;
    285     ASSERT(is_valid());
    286   }
    287   // Unfortunately we can't make this private in a struct.
    288   int code_;
    289 };
    290 
    291 const FPUControlRegister no_fpucreg = { -1 };
    292 const FPUControlRegister FCSR = { kFCSRRegister };
    293 
    294 
    295 // -----------------------------------------------------------------------------
    296 // Machine instruction Operands.
    297 
    298 // Class Operand represents a shifter operand in data processing instructions.
    299 class Operand BASE_EMBEDDED {
    300  public:
    301   // Immediate.
    302   INLINE(explicit Operand(int32_t immediate,
    303          RelocInfo::Mode rmode = RelocInfo::NONE));
    304   INLINE(explicit Operand(const ExternalReference& f));
    305   INLINE(explicit Operand(const char* s));
    306   INLINE(explicit Operand(Object** opp));
    307   INLINE(explicit Operand(Context** cpp));
    308   explicit Operand(Handle<Object> handle);
    309   INLINE(explicit Operand(Smi* value));
    310 
    311   // Register.
    312   INLINE(explicit Operand(Register rm));
    313 
    314   // Return true if this is a register operand.
    315   INLINE(bool is_reg() const);
    316 
    317   Register rm() const { return rm_; }
    318 
    319  private:
    320   Register rm_;
    321   int32_t imm32_;  // Valid if rm_ == no_reg
    322   RelocInfo::Mode rmode_;
    323 
    324   friend class Assembler;
    325   friend class MacroAssembler;
    326 };
    327 
    328 
    329 // On MIPS we have only one adressing mode with base_reg + offset.
    330 // Class MemOperand represents a memory operand in load and store instructions.
    331 class MemOperand : public Operand {
    332  public:
    333 
    334   explicit MemOperand(Register rn, int32_t offset = 0);
    335 
    336  private:
    337   int32_t offset_;
    338 
    339   friend class Assembler;
    340 };
    341 
    342 
    343 // CpuFeatures keeps track of which features are supported by the target CPU.
    344 // Supported features must be enabled by a Scope before use.
    345 class CpuFeatures {
    346  public:
    347   // Detect features of the target CPU. Set safe defaults if the serializer
    348   // is enabled (snapshots must be portable).
    349   void Probe(bool portable);
    350 
    351   // Check whether a feature is supported by the target CPU.
    352   bool IsSupported(CpuFeature f) const {
    353     if (f == FPU && !FLAG_enable_fpu) return false;
    354     return (supported_ & (1u << f)) != 0;
    355   }
    356 
    357   // Check whether a feature is currently enabled.
    358   bool IsEnabled(CpuFeature f) const {
    359     return (enabled_ & (1u << f)) != 0;
    360   }
    361 
    362   // Enable a specified feature within a scope.
    363   class Scope BASE_EMBEDDED {
    364 #ifdef DEBUG
    365    public:
    366     explicit Scope(CpuFeature f)
    367         : cpu_features_(Isolate::Current()->cpu_features()),
    368           isolate_(Isolate::Current()) {
    369       ASSERT(cpu_features_->IsSupported(f));
    370       ASSERT(!Serializer::enabled() ||
    371              (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
    372       old_enabled_ = cpu_features_->enabled_;
    373       cpu_features_->enabled_ |= 1u << f;
    374     }
    375     ~Scope() {
    376       ASSERT_EQ(Isolate::Current(), isolate_);
    377       cpu_features_->enabled_ = old_enabled_;
    378      }
    379    private:
    380     unsigned old_enabled_;
    381     CpuFeatures* cpu_features_;
    382     Isolate* isolate_;
    383 #else
    384    public:
    385     explicit Scope(CpuFeature f) {}
    386 #endif
    387   };
    388 
    389  private:
    390   CpuFeatures();
    391 
    392   unsigned supported_;
    393   unsigned enabled_;
    394   unsigned found_by_runtime_probing_;
    395 
    396   friend class Isolate;
    397 
    398   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
    399 };
    400 
    401 
    402 class Assembler : public AssemblerBase {
    403  public:
    404   // Create an assembler. Instructions and relocation information are emitted
    405   // into a buffer, with the instructions starting from the beginning and the
    406   // relocation information starting from the end of the buffer. See CodeDesc
    407   // for a detailed comment on the layout (globals.h).
    408   //
    409   // If the provided buffer is NULL, the assembler allocates and grows its own
    410   // buffer, and buffer_size determines the initial buffer size. The buffer is
    411   // owned by the assembler and deallocated upon destruction of the assembler.
    412   //
    413   // If the provided buffer is not NULL, the assembler uses the provided buffer
    414   // for code generation and assumes its size to be buffer_size. If the buffer
    415   // is too small, a fatal error occurs. No deallocation of the buffer is done
    416   // upon destruction of the assembler.
    417   Assembler(void* buffer, int buffer_size);
    418   ~Assembler();
    419 
    420   // Overrides the default provided by FLAG_debug_code.
    421   void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
    422 
    423   // GetCode emits any pending (non-emitted) code and fills the descriptor
    424   // desc. GetCode() is idempotent; it returns the same result if no other
    425   // Assembler functions are invoked in between GetCode() calls.
    426   void GetCode(CodeDesc* desc);
    427 
    428   // Label operations & relative jumps (PPUM Appendix D).
    429   //
    430   // Takes a branch opcode (cc) and a label (L) and generates
    431   // either a backward branch or a forward branch and links it
    432   // to the label fixup chain. Usage:
    433   //
    434   // Label L;    // unbound label
    435   // j(cc, &L);  // forward branch to unbound label
    436   // bind(&L);   // bind label to the current pc
    437   // j(cc, &L);  // backward branch to bound label
    438   // bind(&L);   // illegal: a label may be bound only once
    439   //
    440   // Note: The same Label can be used for forward and backward branches
    441   // but it may be bound only once.
    442   void bind(Label* L);  // binds an unbound label L to the current code position
    443 
    444   // Returns the branch offset to the given label from the current code position
    445   // Links the label to the current position if it is still unbound
    446   // Manages the jump elimination optimization if the second parameter is true.
    447   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
    448   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
    449     int32_t o = branch_offset(L, jump_elimination_allowed);
    450     ASSERT((o & 3) == 0);   // Assert the offset is aligned.
    451     return o >> 2;
    452   }
    453 
    454   // Puts a labels target address at the given position.
    455   // The high 8 bits are set to zero.
    456   void label_at_put(Label* L, int at_offset);
    457 
    458   // Read/Modify the code target address in the branch/call instruction at pc.
    459   static Address target_address_at(Address pc);
    460   static void set_target_address_at(Address pc, Address target);
    461 
    462   // This sets the branch destination (which gets loaded at the call address).
    463   // This is for calls and branches within generated code.
    464   inline static void set_target_at(Address instruction_payload,
    465                                    Address target) {
    466     set_target_address_at(instruction_payload, target);
    467   }
    468 
    469   // This sets the branch destination.
    470   // This is for calls and branches to runtime code.
    471   inline static void set_external_target_at(Address instruction_payload,
    472                                             Address target) {
    473     set_target_address_at(instruction_payload, target);
    474   }
    475 
    476   // Size of an instruction.
    477   static const int kInstrSize = sizeof(Instr);
    478 
    479   // Difference between address of current opcode and target address offset.
    480   static const int kBranchPCOffset = 4;
    481 
    482   // Here we are patching the address in the LUI/ORI instruction pair.
    483   // These values are used in the serialization process and must be zero for
    484   // MIPS platform, as Code, Embedded Object or External-reference pointers
    485   // are split across two consecutive instructions and don't exist separately
    486   // in the code, so the serializer should not step forwards in memory after
    487   // a target is resolved and written.
    488   static const int kCallTargetSize = 0 * kInstrSize;
    489   static const int kExternalTargetSize = 0 * kInstrSize;
    490 
    491   // Number of consecutive instructions used to store 32bit constant.
    492   // Used in RelocInfo::target_address_address() function to tell serializer
    493   // address of the instruction that follows LUI/ORI instruction pair.
    494   static const int kInstructionsFor32BitConstant = 2;
    495 
    496   // Distance between the instruction referring to the address of the call
    497   // target and the return address.
    498   static const int kCallTargetAddressOffset = 4 * kInstrSize;
    499 
    500   // Distance between start of patched return sequence and the emitted address
    501   // to jump to.
    502   static const int kPatchReturnSequenceAddressOffset = 0;
    503 
    504   // Distance between start of patched debug break slot and the emitted address
    505   // to jump to.
    506   static const int kPatchDebugBreakSlotAddressOffset =  0 * kInstrSize;
    507 
    508   // Difference between address of current opcode and value read from pc
    509   // register.
    510   static const int kPcLoadDelta = 4;
    511 
    512   // Number of instructions used for the JS return sequence. The constant is
    513   // used by the debugger to patch the JS return sequence.
    514   static const int kJSReturnSequenceInstructions = 7;
    515   static const int kDebugBreakSlotInstructions = 4;
    516   static const int kDebugBreakSlotLength =
    517       kDebugBreakSlotInstructions * kInstrSize;
    518 
    519 
    520   // ---------------------------------------------------------------------------
    521   // Code generation.
    522 
    523   // Insert the smallest number of nop instructions
    524   // possible to align the pc offset to a multiple
    525   // of m. m must be a power of 2 (>= 4).
    526   void Align(int m);
    527   // Aligns code to something that's optimal for a jump target for the platform.
    528   void CodeTargetAlign();
    529 
    530   // Different nop operations are used by the code generator to detect certain
    531   // states of the generated code.
    532   enum NopMarkerTypes {
    533     NON_MARKING_NOP = 0,
    534     DEBUG_BREAK_NOP,
    535     // IC markers.
    536     PROPERTY_ACCESS_INLINED,
    537     PROPERTY_ACCESS_INLINED_CONTEXT,
    538     PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    539     // Helper values.
    540     LAST_CODE_MARKER,
    541     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
    542   };
    543 
    544   // type == 0 is the default non-marking type.
    545   void nop(unsigned int type = 0) {
    546     ASSERT(type < 32);
    547     sll(zero_reg, zero_reg, type, true);
    548   }
    549 
    550 
    551   //------- Branch and jump  instructions --------
    552   // We don't use likely variant of instructions.
    553   void b(int16_t offset);
    554   void b(Label* L) { b(branch_offset(L, false)>>2); }
    555   void bal(int16_t offset);
    556   void bal(Label* L) { bal(branch_offset(L, false)>>2); }
    557 
    558   void beq(Register rs, Register rt, int16_t offset);
    559   void beq(Register rs, Register rt, Label* L) {
    560     beq(rs, rt, branch_offset(L, false) >> 2);
    561   }
    562   void bgez(Register rs, int16_t offset);
    563   void bgezal(Register rs, int16_t offset);
    564   void bgtz(Register rs, int16_t offset);
    565   void blez(Register rs, int16_t offset);
    566   void bltz(Register rs, int16_t offset);
    567   void bltzal(Register rs, int16_t offset);
    568   void bne(Register rs, Register rt, int16_t offset);
    569   void bne(Register rs, Register rt, Label* L) {
    570     bne(rs, rt, branch_offset(L, false)>>2);
    571   }
    572 
    573   // Never use the int16_t b(l)cond version with a branch offset
    574   // instead of using the Label* version. See Twiki for infos.
    575 
    576   // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
    577   void j(int32_t target);
    578   void jal(int32_t target);
    579   void jalr(Register rs, Register rd = ra);
    580   void jr(Register target);
    581 
    582 
    583   //-------Data-processing-instructions---------
    584 
    585   // Arithmetic.
    586   void addu(Register rd, Register rs, Register rt);
    587   void subu(Register rd, Register rs, Register rt);
    588   void mult(Register rs, Register rt);
    589   void multu(Register rs, Register rt);
    590   void div(Register rs, Register rt);
    591   void divu(Register rs, Register rt);
    592   void mul(Register rd, Register rs, Register rt);
    593 
    594   void addiu(Register rd, Register rs, int32_t j);
    595 
    596   // Logical.
    597   void and_(Register rd, Register rs, Register rt);
    598   void or_(Register rd, Register rs, Register rt);
    599   void xor_(Register rd, Register rs, Register rt);
    600   void nor(Register rd, Register rs, Register rt);
    601 
    602   void andi(Register rd, Register rs, int32_t j);
    603   void ori(Register rd, Register rs, int32_t j);
    604   void xori(Register rd, Register rs, int32_t j);
    605   void lui(Register rd, int32_t j);
    606 
    607   // Shifts.
    608   // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
    609   // and may cause problems in normal code. coming_from_nop makes sure this
    610   // doesn't happen.
    611   void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
    612   void sllv(Register rd, Register rt, Register rs);
    613   void srl(Register rd, Register rt, uint16_t sa);
    614   void srlv(Register rd, Register rt, Register rs);
    615   void sra(Register rt, Register rd, uint16_t sa);
    616   void srav(Register rt, Register rd, Register rs);
    617   void rotr(Register rd, Register rt, uint16_t sa);
    618   void rotrv(Register rd, Register rt, Register rs);
    619 
    620 
    621   //------------Memory-instructions-------------
    622 
    623   void lb(Register rd, const MemOperand& rs);
    624   void lbu(Register rd, const MemOperand& rs);
    625   void lh(Register rd, const MemOperand& rs);
    626   void lhu(Register rd, const MemOperand& rs);
    627   void lw(Register rd, const MemOperand& rs);
    628   void lwl(Register rd, const MemOperand& rs);
    629   void lwr(Register rd, const MemOperand& rs);
    630   void sb(Register rd, const MemOperand& rs);
    631   void sh(Register rd, const MemOperand& rs);
    632   void sw(Register rd, const MemOperand& rs);
    633   void swl(Register rd, const MemOperand& rs);
    634   void swr(Register rd, const MemOperand& rs);
    635 
    636 
    637   //-------------Misc-instructions--------------
    638 
    639   // Break / Trap instructions.
    640   void break_(uint32_t code);
    641   void tge(Register rs, Register rt, uint16_t code);
    642   void tgeu(Register rs, Register rt, uint16_t code);
    643   void tlt(Register rs, Register rt, uint16_t code);
    644   void tltu(Register rs, Register rt, uint16_t code);
    645   void teq(Register rs, Register rt, uint16_t code);
    646   void tne(Register rs, Register rt, uint16_t code);
    647 
    648   // Move from HI/LO register.
    649   void mfhi(Register rd);
    650   void mflo(Register rd);
    651 
    652   // Set on less than.
    653   void slt(Register rd, Register rs, Register rt);
    654   void sltu(Register rd, Register rs, Register rt);
    655   void slti(Register rd, Register rs, int32_t j);
    656   void sltiu(Register rd, Register rs, int32_t j);
    657 
    658   // Conditional move.
    659   void movz(Register rd, Register rs, Register rt);
    660   void movn(Register rd, Register rs, Register rt);
    661   void movt(Register rd, Register rs, uint16_t cc = 0);
    662   void movf(Register rd, Register rs, uint16_t cc = 0);
    663 
    664   // Bit twiddling.
    665   void clz(Register rd, Register rs);
    666   void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
    667   void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
    668 
    669   //--------Coprocessor-instructions----------------
    670 
    671   // Load, store, and move.
    672   void lwc1(FPURegister fd, const MemOperand& src);
    673   void ldc1(FPURegister fd, const MemOperand& src);
    674 
    675   void swc1(FPURegister fs, const MemOperand& dst);
    676   void sdc1(FPURegister fs, const MemOperand& dst);
    677 
    678   void mtc1(Register rt, FPURegister fs);
    679   void mfc1(Register rt, FPURegister fs);
    680 
    681   void ctc1(Register rt, FPUControlRegister fs);
    682   void cfc1(Register rt, FPUControlRegister fs);
    683 
    684   // Arithmetic.
    685   void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
    686   void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
    687   void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
    688   void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
    689   void abs_d(FPURegister fd, FPURegister fs);
    690   void mov_d(FPURegister fd, FPURegister fs);
    691   void neg_d(FPURegister fd, FPURegister fs);
    692   void sqrt_d(FPURegister fd, FPURegister fs);
    693 
    694   // Conversion.
    695   void cvt_w_s(FPURegister fd, FPURegister fs);
    696   void cvt_w_d(FPURegister fd, FPURegister fs);
    697   void trunc_w_s(FPURegister fd, FPURegister fs);
    698   void trunc_w_d(FPURegister fd, FPURegister fs);
    699   void round_w_s(FPURegister fd, FPURegister fs);
    700   void round_w_d(FPURegister fd, FPURegister fs);
    701   void floor_w_s(FPURegister fd, FPURegister fs);
    702   void floor_w_d(FPURegister fd, FPURegister fs);
    703   void ceil_w_s(FPURegister fd, FPURegister fs);
    704   void ceil_w_d(FPURegister fd, FPURegister fs);
    705 
    706   void cvt_l_s(FPURegister fd, FPURegister fs);
    707   void cvt_l_d(FPURegister fd, FPURegister fs);
    708   void trunc_l_s(FPURegister fd, FPURegister fs);
    709   void trunc_l_d(FPURegister fd, FPURegister fs);
    710   void round_l_s(FPURegister fd, FPURegister fs);
    711   void round_l_d(FPURegister fd, FPURegister fs);
    712   void floor_l_s(FPURegister fd, FPURegister fs);
    713   void floor_l_d(FPURegister fd, FPURegister fs);
    714   void ceil_l_s(FPURegister fd, FPURegister fs);
    715   void ceil_l_d(FPURegister fd, FPURegister fs);
    716 
    717   void cvt_s_w(FPURegister fd, FPURegister fs);
    718   void cvt_s_l(FPURegister fd, FPURegister fs);
    719   void cvt_s_d(FPURegister fd, FPURegister fs);
    720 
    721   void cvt_d_w(FPURegister fd, FPURegister fs);
    722   void cvt_d_l(FPURegister fd, FPURegister fs);
    723   void cvt_d_s(FPURegister fd, FPURegister fs);
    724 
    725   // Conditions and branches.
    726   void c(FPUCondition cond, SecondaryField fmt,
    727          FPURegister ft, FPURegister fs, uint16_t cc = 0);
    728 
    729   void bc1f(int16_t offset, uint16_t cc = 0);
    730   void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
    731   void bc1t(int16_t offset, uint16_t cc = 0);
    732   void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
    733   void fcmp(FPURegister src1, const double src2, FPUCondition cond);
    734 
    735   // Check the code size generated from label to here.
    736   int InstructionsGeneratedSince(Label* l) {
    737     return (pc_offset() - l->pos()) / kInstrSize;
    738   }
    739 
    740   // Class for scoping postponing the trampoline pool generation.
    741   class BlockTrampolinePoolScope {
    742    public:
    743     explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
    744       assem_->StartBlockTrampolinePool();
    745     }
    746     ~BlockTrampolinePoolScope() {
    747       assem_->EndBlockTrampolinePool();
    748     }
    749 
    750    private:
    751     Assembler* assem_;
    752 
    753     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
    754   };
    755 
    756   // Debugging.
    757 
    758   // Mark address of the ExitJSFrame code.
    759   void RecordJSReturn();
    760 
    761   // Mark address of a debug break slot.
    762   void RecordDebugBreakSlot();
    763 
    764   // Record a comment relocation entry that can be used by a disassembler.
    765   // Use --code-comments to enable.
    766   void RecordComment(const char* msg);
    767 
    768   // Writes a single byte or word of data in the code stream.  Used for
    769   // inline tables, e.g., jump-tables.
    770   void db(uint8_t data);
    771   void dd(uint32_t data);
    772 
    773   int32_t pc_offset() const { return pc_ - buffer_; }
    774 
    775   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
    776 
    777   bool can_peephole_optimize(int instructions) {
    778     if (!allow_peephole_optimization_) return false;
    779     if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
    780     return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
    781   }
    782 
    783   // Postpone the generation of the trampoline pool for the specified number of
    784   // instructions.
    785   void BlockTrampolinePoolFor(int instructions);
    786 
    787   // Check if there is less than kGap bytes available in the buffer.
    788   // If this is the case, we need to grow the buffer before emitting
    789   // an instruction or relocation information.
    790   inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
    791 
    792   // Get the number of bytes available in the buffer.
    793   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
    794 
    795   // Read/patch instructions.
    796   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
    797   static void instr_at_put(byte* pc, Instr instr) {
    798     *reinterpret_cast<Instr*>(pc) = instr;
    799   }
    800   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
    801   void instr_at_put(int pos, Instr instr) {
    802     *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
    803   }
    804 
    805   // Check if an instruction is a branch of some kind.
    806   static bool IsBranch(Instr instr);
    807 
    808   static bool IsNop(Instr instr, unsigned int type);
    809   static bool IsPop(Instr instr);
    810   static bool IsPush(Instr instr);
    811   static bool IsLwRegFpOffset(Instr instr);
    812   static bool IsSwRegFpOffset(Instr instr);
    813   static bool IsLwRegFpNegOffset(Instr instr);
    814   static bool IsSwRegFpNegOffset(Instr instr);
    815 
    816   static Register GetRt(Instr instr);
    817 
    818   static int32_t GetBranchOffset(Instr instr);
    819   static bool IsLw(Instr instr);
    820   static int16_t GetLwOffset(Instr instr);
    821   static Instr SetLwOffset(Instr instr, int16_t offset);
    822 
    823   static bool IsSw(Instr instr);
    824   static Instr SetSwOffset(Instr instr, int16_t offset);
    825   static bool IsAddImmediate(Instr instr);
    826   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
    827 
    828   void CheckTrampolinePool(bool force_emit = false);
    829 
    830  protected:
    831   bool emit_debug_code() const { return emit_debug_code_; }
    832 
    833   int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
    834 
    835   // Decode branch instruction at pos and return branch target pos.
    836   int target_at(int32_t pos);
    837 
    838   // Patch branch instruction at pos to branch to given branch target pos.
    839   void target_at_put(int32_t pos, int32_t target_pos);
    840 
    841   // Say if we need to relocate with this mode.
    842   bool MustUseReg(RelocInfo::Mode rmode);
    843 
    844   // Record reloc info for current pc_.
    845   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
    846 
    847   // Block the emission of the trampoline pool before pc_offset.
    848   void BlockTrampolinePoolBefore(int pc_offset) {
    849     if (no_trampoline_pool_before_ < pc_offset)
    850       no_trampoline_pool_before_ = pc_offset;
    851   }
    852 
    853   void StartBlockTrampolinePool() {
    854     trampoline_pool_blocked_nesting_++;
    855   }
    856   void EndBlockTrampolinePool() {
    857     trampoline_pool_blocked_nesting_--;
    858   }
    859 
    860   bool is_trampoline_pool_blocked() const {
    861     return trampoline_pool_blocked_nesting_ > 0;
    862   }
    863 
    864  private:
    865   // Code buffer:
    866   // The buffer into which code and relocation info are generated.
    867   byte* buffer_;
    868   int buffer_size_;
    869   // True if the assembler owns the buffer, false if buffer is external.
    870   bool own_buffer_;
    871 
    872   // Buffer size and constant pool distance are checked together at regular
    873   // intervals of kBufferCheckInterval emitted bytes.
    874   static const int kBufferCheckInterval = 1*KB/2;
    875 
    876   // Code generation.
    877   // The relocation writer's position is at least kGap bytes below the end of
    878   // the generated instructions. This is so that multi-instruction sequences do
    879   // not have to check for overflow. The same is true for writes of large
    880   // relocation info entries.
    881   static const int kGap = 32;
    882   byte* pc_;  // The program counter - moves forward.
    883 
    884 
    885   // Repeated checking whether the trampoline pool should be emitted is rather
    886   // expensive. By default we only check again once a number of instructions
    887   // has been generated.
    888   static const int kCheckConstIntervalInst = 32;
    889   static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
    890 
    891   int next_buffer_check_;  // pc offset of next buffer check.
    892 
    893   // Emission of the trampoline pool may be blocked in some code sequences.
    894   int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
    895   int no_trampoline_pool_before_;  // Block emission before this pc offset.
    896 
    897   // Keep track of the last emitted pool to guarantee a maximal distance.
    898   int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
    899 
    900   // Relocation information generation.
    901   // Each relocation is encoded as a variable size value.
    902   static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
    903   RelocInfoWriter reloc_info_writer;
    904 
    905   // The bound position, before this we cannot do instruction elimination.
    906   int last_bound_pos_;
    907 
    908   // Code emission.
    909   inline void CheckBuffer();
    910   void GrowBuffer();
    911   inline void emit(Instr x);
    912   inline void CheckTrampolinePoolQuick();
    913 
    914   // Instruction generation.
    915   // We have 3 different kind of encoding layout on MIPS.
    916   // However due to many different types of objects encoded in the same fields
    917   // we have quite a few aliases for each mode.
    918   // Using the same structure to refer to Register and FPURegister would spare a
    919   // few aliases, but mixing both does not look clean to me.
    920   // Anyway we could surely implement this differently.
    921 
    922   void GenInstrRegister(Opcode opcode,
    923                         Register rs,
    924                         Register rt,
    925                         Register rd,
    926                         uint16_t sa = 0,
    927                         SecondaryField func = NULLSF);
    928 
    929   void GenInstrRegister(Opcode opcode,
    930                         Register rs,
    931                         Register rt,
    932                         uint16_t msb,
    933                         uint16_t lsb,
    934                         SecondaryField func);
    935 
    936   void GenInstrRegister(Opcode opcode,
    937                         SecondaryField fmt,
    938                         FPURegister ft,
    939                         FPURegister fs,
    940                         FPURegister fd,
    941                         SecondaryField func = NULLSF);
    942 
    943   void GenInstrRegister(Opcode opcode,
    944                         SecondaryField fmt,
    945                         Register rt,
    946                         FPURegister fs,
    947                         FPURegister fd,
    948                         SecondaryField func = NULLSF);
    949 
    950   void GenInstrRegister(Opcode opcode,
    951                         SecondaryField fmt,
    952                         Register rt,
    953                         FPUControlRegister fs,
    954                         SecondaryField func = NULLSF);
    955 
    956 
    957   void GenInstrImmediate(Opcode opcode,
    958                          Register rs,
    959                          Register rt,
    960                          int32_t  j);
    961   void GenInstrImmediate(Opcode opcode,
    962                          Register rs,
    963                          SecondaryField SF,
    964                          int32_t  j);
    965   void GenInstrImmediate(Opcode opcode,
    966                          Register r1,
    967                          FPURegister r2,
    968                          int32_t  j);
    969 
    970 
    971   void GenInstrJump(Opcode opcode,
    972                      uint32_t address);
    973 
    974   // Helpers.
    975   void LoadRegPlusOffsetToAt(const MemOperand& src);
    976 
    977   // Labels.
    978   void print(Label* L);
    979   void bind_to(Label* L, int pos);
    980   void link_to(Label* L, Label* appendix);
    981   void next(Label* L);
    982 
    983   // One trampoline consists of:
    984   // - space for trampoline slots,
    985   // - space for labels.
    986   //
    987   // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
    988   // Space for trampoline slots preceeds space for labels. Each label is of one
    989   // instruction size, so total amount for labels is equal to
    990   // label_count *  kInstrSize.
    991   class Trampoline {
    992    public:
    993     Trampoline(int start, int slot_count, int label_count) {
    994       start_ = start;
    995       next_slot_ = start;
    996       free_slot_count_ = slot_count;
    997       next_label_ = start + slot_count * 2 * kInstrSize;
    998       free_label_count_ = label_count;
    999       end_ = next_label_ + (label_count - 1) * kInstrSize;
   1000     }
   1001     int start() {
   1002       return start_;
   1003     }
   1004     int end() {
   1005       return end_;
   1006     }
   1007     int take_slot() {
   1008       int trampoline_slot = next_slot_;
   1009       ASSERT(free_slot_count_ > 0);
   1010       free_slot_count_--;
   1011       next_slot_ += 2 * kInstrSize;
   1012       return trampoline_slot;
   1013     }
   1014     int take_label() {
   1015       int label_pos = next_label_;
   1016       ASSERT(free_label_count_ > 0);
   1017       free_label_count_--;
   1018       next_label_ += kInstrSize;
   1019       return label_pos;
   1020     }
   1021    private:
   1022     int start_;
   1023     int end_;
   1024     int next_slot_;
   1025     int free_slot_count_;
   1026     int next_label_;
   1027     int free_label_count_;
   1028   };
   1029 
   1030   int32_t get_label_entry(int32_t pos, bool next_pool = true);
   1031   int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
   1032 
   1033   static const int kSlotsPerTrampoline = 2304;
   1034   static const int kLabelsPerTrampoline = 8;
   1035   static const int kTrampolineInst =
   1036       2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
   1037   static const int kTrampolineSize = kTrampolineInst * kInstrSize;
   1038   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
   1039   static const int kMaxDistBetweenPools =
   1040       kMaxBranchOffset - 2 * kTrampolineSize;
   1041 
   1042   List<Trampoline> trampolines_;
   1043 
   1044   friend class RegExpMacroAssemblerMIPS;
   1045   friend class RelocInfo;
   1046   friend class CodePatcher;
   1047   friend class BlockTrampolinePoolScope;
   1048 
   1049   PositionsRecorder positions_recorder_;
   1050   bool allow_peephole_optimization_;
   1051   bool emit_debug_code_;
   1052   friend class PositionsRecorder;
   1053   friend class EnsureSpace;
   1054 };
   1055 
   1056 
   1057 class EnsureSpace BASE_EMBEDDED {
   1058  public:
   1059   explicit EnsureSpace(Assembler* assembler) {
   1060     assembler->CheckBuffer();
   1061   }
   1062 };
   1063 
   1064 } }  // namespace v8::internal
   1065 
   1066 #endif  // V8_ARM_ASSEMBLER_MIPS_H_
   1067