Home | History | Annotate | Download | only in mips
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2012 the V8 project authors. All rights reserved.
     34 
     35 
     36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
     37 #define V8_MIPS_ASSEMBLER_MIPS_H_
     38 
     39 #include <stdio.h>
     40 
     41 #include "src/assembler.h"
     42 #include "src/mips/constants-mips.h"
     43 #include "src/serialize.h"
     44 
     45 namespace v8 {
     46 namespace internal {
     47 
     48 // CPU Registers.
     49 //
     50 // 1) We would prefer to use an enum, but enum values are assignment-
     51 // compatible with int, which has caused code-generation bugs.
     52 //
     53 // 2) We would prefer to use a class instead of a struct but we don't like
     54 // the register initialization to depend on the particular initialization
     55 // order (which appears to be different on OS X, Linux, and Windows for the
     56 // installed versions of C++ we tried). Using a struct permits C-style
     57 // "initialization". Also, the Register objects cannot be const as this
     58 // forces initialization stubs in MSVC, making us dependent on initialization
     59 // order.
     60 //
     61 // 3) By not using an enum, we are possibly preventing the compiler from
     62 // doing certain constant folds, which may significantly reduce the
     63 // code generated for some assembly instructions (because they boil down
     64 // to a few constants). If this is a problem, we could change the code
     65 // such that we use an enum in optimized mode, and the struct in debug
     66 // mode. This way we get the compile-time error checking in debug mode
     67 // and best performance in optimized code.
     68 
     69 
     70 // -----------------------------------------------------------------------------
     71 // Implementation of Register and FPURegister.
     72 
     73 // Core register.
     74 struct Register {
     75   static const int kNumRegisters = v8::internal::kNumRegisters;
     76   static const int kMaxNumAllocatableRegisters = 14;  // v0 through t6 and cp.
     77   static const int kSizeInBytes = 4;
     78   static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
     79 
     80 #if defined(V8_TARGET_LITTLE_ENDIAN)
     81   static const int kMantissaOffset = 0;
     82   static const int kExponentOffset = 4;
     83 #elif defined(V8_TARGET_BIG_ENDIAN)
     84   static const int kMantissaOffset = 4;
     85   static const int kExponentOffset = 0;
     86 #else
     87 #error Unknown endianness
     88 #endif
     89 
     90   inline static int NumAllocatableRegisters();
     91 
     92   static int ToAllocationIndex(Register reg) {
     93     ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
     94            reg.is(from_code(kCpRegister)));
     95     return reg.is(from_code(kCpRegister)) ?
     96            kMaxNumAllocatableRegisters - 1 :  // Return last index for 'cp'.
     97            reg.code() - 2;  // zero_reg and 'at' are skipped.
     98   }
     99 
    100   static Register FromAllocationIndex(int index) {
    101     ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
    102     return index == kMaxNumAllocatableRegisters - 1 ?
    103            from_code(kCpRegister) :  // Last index is always the 'cp' register.
    104            from_code(index + 2);  // zero_reg and 'at' are skipped.
    105   }
    106 
    107   static const char* AllocationIndexToString(int index) {
    108     ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
    109     const char* const names[] = {
    110       "v0",
    111       "v1",
    112       "a0",
    113       "a1",
    114       "a2",
    115       "a3",
    116       "t0",
    117       "t1",
    118       "t2",
    119       "t3",
    120       "t4",
    121       "t5",
    122       "t6",
    123       "s7",
    124     };
    125     return names[index];
    126   }
    127 
    128   static Register from_code(int code) {
    129     Register r = { code };
    130     return r;
    131   }
    132 
    133   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
    134   bool is(Register reg) const { return code_ == reg.code_; }
    135   int code() const {
    136     ASSERT(is_valid());
    137     return code_;
    138   }
    139   int bit() const {
    140     ASSERT(is_valid());
    141     return 1 << code_;
    142   }
    143 
    144   // Unfortunately we can't make this private in a struct.
    145   int code_;
    146 };
    147 
    148 #define REGISTER(N, C) \
    149   const int kRegister_ ## N ## _Code = C; \
    150   const Register N = { C }
    151 
    152 REGISTER(no_reg, -1);
    153 // Always zero.
    154 REGISTER(zero_reg, 0);
    155 // at: Reserved for synthetic instructions.
    156 REGISTER(at, 1);
    157 // v0, v1: Used when returning multiple values from subroutines.
    158 REGISTER(v0, 2);
    159 REGISTER(v1, 3);
    160 // a0 - a4: Used to pass non-FP parameters.
    161 REGISTER(a0, 4);
    162 REGISTER(a1, 5);
    163 REGISTER(a2, 6);
    164 REGISTER(a3, 7);
    165 // t0 - t9: Can be used without reservation, act as temporary registers and are
    166 // allowed to be destroyed by subroutines.
    167 REGISTER(t0, 8);
    168 REGISTER(t1, 9);
    169 REGISTER(t2, 10);
    170 REGISTER(t3, 11);
    171 REGISTER(t4, 12);
    172 REGISTER(t5, 13);
    173 REGISTER(t6, 14);
    174 REGISTER(t7, 15);
    175 // s0 - s7: Subroutine register variables. Subroutines that write to these
    176 // registers must restore their values before exiting so that the caller can
    177 // expect the values to be preserved.
    178 REGISTER(s0, 16);
    179 REGISTER(s1, 17);
    180 REGISTER(s2, 18);
    181 REGISTER(s3, 19);
    182 REGISTER(s4, 20);
    183 REGISTER(s5, 21);
    184 REGISTER(s6, 22);
    185 REGISTER(s7, 23);
    186 REGISTER(t8, 24);
    187 REGISTER(t9, 25);
    188 // k0, k1: Reserved for system calls and interrupt handlers.
    189 REGISTER(k0, 26);
    190 REGISTER(k1, 27);
    191 // gp: Reserved.
    192 REGISTER(gp, 28);
    193 // sp: Stack pointer.
    194 REGISTER(sp, 29);
    195 // fp: Frame pointer.
    196 REGISTER(fp, 30);
    197 // ra: Return address pointer.
    198 REGISTER(ra, 31);
    199 
    200 #undef REGISTER
    201 
    202 
    203 int ToNumber(Register reg);
    204 
    205 Register ToRegister(int num);
    206 
    207 // Coprocessor register.
    208 struct FPURegister {
    209   static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
    210 
    211   // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
    212   // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
    213   // number of Double regs (64-bit regs, or FPU-reg-pairs).
    214 
    215   // A few double registers are reserved: one as a scratch register and one to
    216   // hold 0.0.
    217   //  f28: 0.0
    218   //  f30: scratch register.
    219   static const int kNumReservedRegisters = 2;
    220   static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
    221       kNumReservedRegisters;
    222 
    223   inline static int NumRegisters();
    224   inline static int NumAllocatableRegisters();
    225   inline static int ToAllocationIndex(FPURegister reg);
    226   static const char* AllocationIndexToString(int index);
    227 
    228   static FPURegister FromAllocationIndex(int index) {
    229     ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
    230     return from_code(index * 2);
    231   }
    232 
    233   static FPURegister from_code(int code) {
    234     FPURegister r = { code };
    235     return r;
    236   }
    237 
    238   bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
    239   bool is(FPURegister creg) const { return code_ == creg.code_; }
    240   FPURegister low() const {
    241     // Find low reg of a Double-reg pair, which is the reg itself.
    242     ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
    243     FPURegister reg;
    244     reg.code_ = code_;
    245     ASSERT(reg.is_valid());
    246     return reg;
    247   }
    248   FPURegister high() const {
    249     // Find high reg of a Doubel-reg pair, which is reg + 1.
    250     ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
    251     FPURegister reg;
    252     reg.code_ = code_ + 1;
    253     ASSERT(reg.is_valid());
    254     return reg;
    255   }
    256 
    257   int code() const {
    258     ASSERT(is_valid());
    259     return code_;
    260   }
    261   int bit() const {
    262     ASSERT(is_valid());
    263     return 1 << code_;
    264   }
    265   void setcode(int f) {
    266     code_ = f;
    267     ASSERT(is_valid());
    268   }
    269   // Unfortunately we can't make this private in a struct.
    270   int code_;
    271 };
    272 
    273 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32
    274 // 32-bit registers, f0 through f31. When used as 'double' they are used
    275 // in pairs, starting with the even numbered register. So a double operation
    276 // on f0 really uses f0 and f1.
    277 // (Modern mips hardware also supports 32 64-bit registers, via setting
    278 // (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
    279 // but it is not in common use. Someday we will want to support this in v8.)
    280 
    281 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
    282 typedef FPURegister DoubleRegister;
    283 typedef FPURegister FloatRegister;
    284 
    285 const FPURegister no_freg = { -1 };
    286 
    287 const FPURegister f0 = { 0 };  // Return value in hard float mode.
    288 const FPURegister f1 = { 1 };
    289 const FPURegister f2 = { 2 };
    290 const FPURegister f3 = { 3 };
    291 const FPURegister f4 = { 4 };
    292 const FPURegister f5 = { 5 };
    293 const FPURegister f6 = { 6 };
    294 const FPURegister f7 = { 7 };
    295 const FPURegister f8 = { 8 };
    296 const FPURegister f9 = { 9 };
    297 const FPURegister f10 = { 10 };
    298 const FPURegister f11 = { 11 };
    299 const FPURegister f12 = { 12 };  // Arg 0 in hard float mode.
    300 const FPURegister f13 = { 13 };
    301 const FPURegister f14 = { 14 };  // Arg 1 in hard float mode.
    302 const FPURegister f15 = { 15 };
    303 const FPURegister f16 = { 16 };
    304 const FPURegister f17 = { 17 };
    305 const FPURegister f18 = { 18 };
    306 const FPURegister f19 = { 19 };
    307 const FPURegister f20 = { 20 };
    308 const FPURegister f21 = { 21 };
    309 const FPURegister f22 = { 22 };
    310 const FPURegister f23 = { 23 };
    311 const FPURegister f24 = { 24 };
    312 const FPURegister f25 = { 25 };
    313 const FPURegister f26 = { 26 };
    314 const FPURegister f27 = { 27 };
    315 const FPURegister f28 = { 28 };
    316 const FPURegister f29 = { 29 };
    317 const FPURegister f30 = { 30 };
    318 const FPURegister f31 = { 31 };
    319 
    320 // Register aliases.
    321 // cp is assumed to be a callee saved register.
    322 // Defined using #define instead of "static const Register&" because Clang
    323 // complains otherwise when a compilation unit that includes this header
    324 // doesn't use the variables.
    325 #define kRootRegister s6
    326 #define cp s7
    327 #define kLithiumScratchReg s3
    328 #define kLithiumScratchReg2 s4
    329 #define kLithiumScratchDouble f30
    330 #define kDoubleRegZero f28
    331 
    332 // FPU (coprocessor 1) control registers.
    333 // Currently only FCSR (#31) is implemented.
    334 struct FPUControlRegister {
    335   bool is_valid() const { return code_ == kFCSRRegister; }
    336   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
    337   int code() const {
    338     ASSERT(is_valid());
    339     return code_;
    340   }
    341   int bit() const {
    342     ASSERT(is_valid());
    343     return 1 << code_;
    344   }
    345   void setcode(int f) {
    346     code_ = f;
    347     ASSERT(is_valid());
    348   }
    349   // Unfortunately we can't make this private in a struct.
    350   int code_;
    351 };
    352 
    353 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
    354 const FPUControlRegister FCSR = { kFCSRRegister };
    355 
    356 
    357 // -----------------------------------------------------------------------------
    358 // Machine instruction Operands.
    359 
    360 // Class Operand represents a shifter operand in data processing instructions.
    361 class Operand BASE_EMBEDDED {
    362  public:
    363   // Immediate.
    364   INLINE(explicit Operand(int32_t immediate,
    365          RelocInfo::Mode rmode = RelocInfo::NONE32));
    366   INLINE(explicit Operand(const ExternalReference& f));
    367   INLINE(explicit Operand(const char* s));
    368   INLINE(explicit Operand(Object** opp));
    369   INLINE(explicit Operand(Context** cpp));
    370   explicit Operand(Handle<Object> handle);
    371   INLINE(explicit Operand(Smi* value));
    372 
    373   // Register.
    374   INLINE(explicit Operand(Register rm));
    375 
    376   // Return true if this is a register operand.
    377   INLINE(bool is_reg() const);
    378 
    379   inline int32_t immediate() const {
    380     ASSERT(!is_reg());
    381     return imm32_;
    382   }
    383 
    384   Register rm() const { return rm_; }
    385 
    386  private:
    387   Register rm_;
    388   int32_t imm32_;  // Valid if rm_ == no_reg.
    389   RelocInfo::Mode rmode_;
    390 
    391   friend class Assembler;
    392   friend class MacroAssembler;
    393 };
    394 
    395 
    396 // On MIPS we have only one adressing mode with base_reg + offset.
    397 // Class MemOperand represents a memory operand in load and store instructions.
    398 class MemOperand : public Operand {
    399  public:
    400   // Immediate value attached to offset.
    401   enum OffsetAddend {
    402     offset_minus_one = -1,
    403     offset_zero = 0
    404   };
    405 
    406   explicit MemOperand(Register rn, int32_t offset = 0);
    407   explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
    408                       OffsetAddend offset_addend = offset_zero);
    409   int32_t offset() const { return offset_; }
    410 
    411   bool OffsetIsInt16Encodable() const {
    412     return is_int16(offset_);
    413   }
    414 
    415  private:
    416   int32_t offset_;
    417 
    418   friend class Assembler;
    419 };
    420 
    421 
    422 class Assembler : public AssemblerBase {
    423  public:
    424   // Create an assembler. Instructions and relocation information are emitted
    425   // into a buffer, with the instructions starting from the beginning and the
    426   // relocation information starting from the end of the buffer. See CodeDesc
    427   // for a detailed comment on the layout (globals.h).
    428   //
    429   // If the provided buffer is NULL, the assembler allocates and grows its own
    430   // buffer, and buffer_size determines the initial buffer size. The buffer is
    431   // owned by the assembler and deallocated upon destruction of the assembler.
    432   //
    433   // If the provided buffer is not NULL, the assembler uses the provided buffer
    434   // for code generation and assumes its size to be buffer_size. If the buffer
    435   // is too small, a fatal error occurs. No deallocation of the buffer is done
    436   // upon destruction of the assembler.
    437   Assembler(Isolate* isolate, void* buffer, int buffer_size);
    438   virtual ~Assembler() { }
    439 
    440   // GetCode emits any pending (non-emitted) code and fills the descriptor
    441   // desc. GetCode() is idempotent; it returns the same result if no other
    442   // Assembler functions are invoked in between GetCode() calls.
    443   void GetCode(CodeDesc* desc);
    444 
    445   // Label operations & relative jumps (PPUM Appendix D).
    446   //
    447   // Takes a branch opcode (cc) and a label (L) and generates
    448   // either a backward branch or a forward branch and links it
    449   // to the label fixup chain. Usage:
    450   //
    451   // Label L;    // unbound label
    452   // j(cc, &L);  // forward branch to unbound label
    453   // bind(&L);   // bind label to the current pc
    454   // j(cc, &L);  // backward branch to bound label
    455   // bind(&L);   // illegal: a label may be bound only once
    456   //
    457   // Note: The same Label can be used for forward and backward branches
    458   // but it may be bound only once.
    459   void bind(Label* L);  // Binds an unbound label L to current code position.
    460   // Determines if Label is bound and near enough so that branch instruction
    461   // can be used to reach it, instead of jump instruction.
    462   bool is_near(Label* L);
    463 
    464   // Returns the branch offset to the given label from the current code
    465   // position. Links the label to the current position if it is still unbound.
    466   // Manages the jump elimination optimization if the second parameter is true.
    467   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
    468   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
    469     int32_t o = branch_offset(L, jump_elimination_allowed);
    470     ASSERT((o & 3) == 0);   // Assert the offset is aligned.
    471     return o >> 2;
    472   }
    473   uint32_t jump_address(Label* L);
    474 
    475   // Puts a labels target address at the given position.
    476   // The high 8 bits are set to zero.
    477   void label_at_put(Label* L, int at_offset);
    478 
    479   // Read/Modify the code target address in the branch/call instruction at pc.
    480   static Address target_address_at(Address pc);
    481   static void set_target_address_at(Address pc,
    482                                     Address target,
    483                                     ICacheFlushMode icache_flush_mode =
    484                                         FLUSH_ICACHE_IF_NEEDED);
    485   // On MIPS there is no Constant Pool so we skip that parameter.
    486   INLINE(static Address target_address_at(Address pc,
    487                                           ConstantPoolArray* constant_pool)) {
    488     return target_address_at(pc);
    489   }
    490   INLINE(static void set_target_address_at(Address pc,
    491                                            ConstantPoolArray* constant_pool,
    492                                            Address target,
    493                                            ICacheFlushMode icache_flush_mode =
    494                                                FLUSH_ICACHE_IF_NEEDED)) {
    495     set_target_address_at(pc, target, icache_flush_mode);
    496   }
    497   INLINE(static Address target_address_at(Address pc, Code* code)) {
    498     ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
    499     return target_address_at(pc, constant_pool);
    500   }
    501   INLINE(static void set_target_address_at(Address pc,
    502                                            Code* code,
    503                                            Address target,
    504                                            ICacheFlushMode icache_flush_mode =
    505                                                FLUSH_ICACHE_IF_NEEDED)) {
    506     ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
    507     set_target_address_at(pc, constant_pool, target, icache_flush_mode);
    508   }
    509 
    510   // Return the code target address at a call site from the return address
    511   // of that call in the instruction stream.
    512   inline static Address target_address_from_return_address(Address pc);
    513 
    514   static void JumpLabelToJumpRegister(Address pc);
    515 
    516   static void QuietNaN(HeapObject* nan);
    517 
    518   // This sets the branch destination (which gets loaded at the call address).
    519   // This is for calls and branches within generated code.  The serializer
    520   // has already deserialized the lui/ori instructions etc.
    521   inline static void deserialization_set_special_target_at(
    522       Address instruction_payload, Code* code, Address target) {
    523     set_target_address_at(
    524         instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
    525         code,
    526         target);
    527   }
    528 
    529   // Size of an instruction.
    530   static const int kInstrSize = sizeof(Instr);
    531 
    532   // Difference between address of current opcode and target address offset.
    533   static const int kBranchPCOffset = 4;
    534 
    535   // Here we are patching the address in the LUI/ORI instruction pair.
    536   // These values are used in the serialization process and must be zero for
    537   // MIPS platform, as Code, Embedded Object or External-reference pointers
    538   // are split across two consecutive instructions and don't exist separately
    539   // in the code, so the serializer should not step forwards in memory after
    540   // a target is resolved and written.
    541   static const int kSpecialTargetSize = 0;
    542 
    543   // Number of consecutive instructions used to store 32bit constant.
    544   // Before jump-optimizations, this constant was used in
    545   // RelocInfo::target_address_address() function to tell serializer address of
    546   // the instruction that follows LUI/ORI instruction pair. Now, with new jump
    547   // optimization, where jump-through-register instruction that usually
    548   // follows LUI/ORI pair is substituted with J/JAL, this constant equals
    549   // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
    550   static const int kInstructionsFor32BitConstant = 3;
    551 
    552   // Distance between the instruction referring to the address of the call
    553   // target and the return address.
    554   static const int kCallTargetAddressOffset = 4 * kInstrSize;
    555 
    556   // Distance between start of patched return sequence and the emitted address
    557   // to jump to.
    558   static const int kPatchReturnSequenceAddressOffset = 0;
    559 
    560   // Distance between start of patched debug break slot and the emitted address
    561   // to jump to.
    562   static const int kPatchDebugBreakSlotAddressOffset =  0 * kInstrSize;
    563 
    564   // Difference between address of current opcode and value read from pc
    565   // register.
    566   static const int kPcLoadDelta = 4;
    567 
    568   static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
    569 
    570   // Number of instructions used for the JS return sequence. The constant is
    571   // used by the debugger to patch the JS return sequence.
    572   static const int kJSReturnSequenceInstructions = 7;
    573   static const int kDebugBreakSlotInstructions = 4;
    574   static const int kDebugBreakSlotLength =
    575       kDebugBreakSlotInstructions * kInstrSize;
    576 
    577 
    578   // ---------------------------------------------------------------------------
    579   // Code generation.
    580 
    581   // Insert the smallest number of nop instructions
    582   // possible to align the pc offset to a multiple
    583   // of m. m must be a power of 2 (>= 4).
    584   void Align(int m);
    585   // Aligns code to something that's optimal for a jump target for the platform.
    586   void CodeTargetAlign();
    587 
    588   // Different nop operations are used by the code generator to detect certain
    589   // states of the generated code.
    590   enum NopMarkerTypes {
    591     NON_MARKING_NOP = 0,
    592     DEBUG_BREAK_NOP,
    593     // IC markers.
    594     PROPERTY_ACCESS_INLINED,
    595     PROPERTY_ACCESS_INLINED_CONTEXT,
    596     PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    597     // Helper values.
    598     LAST_CODE_MARKER,
    599     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
    600     // Code aging
    601     CODE_AGE_MARKER_NOP = 6,
    602     CODE_AGE_SEQUENCE_NOP
    603   };
    604 
    605   // Type == 0 is the default non-marking nop. For mips this is a
    606   // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
    607   // marking, to avoid conflict with ssnop and ehb instructions.
    608   void nop(unsigned int type = 0) {
    609     ASSERT(type < 32);
    610     Register nop_rt_reg = (type == 0) ? zero_reg : at;
    611     sll(zero_reg, nop_rt_reg, type, true);
    612   }
    613 
    614 
    615   // --------Branch-and-jump-instructions----------
    616   // We don't use likely variant of instructions.
    617   void b(int16_t offset);
    618   void b(Label* L) { b(branch_offset(L, false)>>2); }
    619   void bal(int16_t offset);
    620   void bal(Label* L) { bal(branch_offset(L, false)>>2); }
    621 
    622   void beq(Register rs, Register rt, int16_t offset);
    623   void beq(Register rs, Register rt, Label* L) {
    624     beq(rs, rt, branch_offset(L, false) >> 2);
    625   }
    626   void bgez(Register rs, int16_t offset);
    627   void bgezal(Register rs, int16_t offset);
    628   void bgtz(Register rs, int16_t offset);
    629   void blez(Register rs, int16_t offset);
    630   void bltz(Register rs, int16_t offset);
    631   void bltzal(Register rs, int16_t offset);
    632   void bne(Register rs, Register rt, int16_t offset);
    633   void bne(Register rs, Register rt, Label* L) {
    634     bne(rs, rt, branch_offset(L, false)>>2);
    635   }
    636 
    637   // Never use the int16_t b(l)cond version with a branch offset
    638   // instead of using the Label* version.
    639 
    640   // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
    641   void j(int32_t target);
    642   void jal(int32_t target);
    643   void jalr(Register rs, Register rd = ra);
    644   void jr(Register target);
    645   void j_or_jr(int32_t target, Register rs);
    646   void jal_or_jalr(int32_t target, Register rs);
    647 
    648 
    649   // -------Data-processing-instructions---------
    650 
    651   // Arithmetic.
    652   void addu(Register rd, Register rs, Register rt);
    653   void subu(Register rd, Register rs, Register rt);
    654   void mult(Register rs, Register rt);
    655   void multu(Register rs, Register rt);
    656   void div(Register rs, Register rt);
    657   void divu(Register rs, Register rt);
    658   void mul(Register rd, Register rs, Register rt);
    659 
    660   void addiu(Register rd, Register rs, int32_t j);
    661 
    662   // Logical.
    663   void and_(Register rd, Register rs, Register rt);
    664   void or_(Register rd, Register rs, Register rt);
    665   void xor_(Register rd, Register rs, Register rt);
    666   void nor(Register rd, Register rs, Register rt);
    667 
    668   void andi(Register rd, Register rs, int32_t j);
    669   void ori(Register rd, Register rs, int32_t j);
    670   void xori(Register rd, Register rs, int32_t j);
    671   void lui(Register rd, int32_t j);
    672 
    673   // Shifts.
    674   // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
    675   // and may cause problems in normal code. coming_from_nop makes sure this
    676   // doesn't happen.
    677   void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
    678   void sllv(Register rd, Register rt, Register rs);
    679   void srl(Register rd, Register rt, uint16_t sa);
    680   void srlv(Register rd, Register rt, Register rs);
    681   void sra(Register rt, Register rd, uint16_t sa);
    682   void srav(Register rt, Register rd, Register rs);
    683   void rotr(Register rd, Register rt, uint16_t sa);
    684   void rotrv(Register rd, Register rt, Register rs);
    685 
    686 
    687   // ------------Memory-instructions-------------
    688 
    689   void lb(Register rd, const MemOperand& rs);
    690   void lbu(Register rd, const MemOperand& rs);
    691   void lh(Register rd, const MemOperand& rs);
    692   void lhu(Register rd, const MemOperand& rs);
    693   void lw(Register rd, const MemOperand& rs);
    694   void lwl(Register rd, const MemOperand& rs);
    695   void lwr(Register rd, const MemOperand& rs);
    696   void sb(Register rd, const MemOperand& rs);
    697   void sh(Register rd, const MemOperand& rs);
    698   void sw(Register rd, const MemOperand& rs);
    699   void swl(Register rd, const MemOperand& rs);
    700   void swr(Register rd, const MemOperand& rs);
    701 
    702 
    703   // ----------------Prefetch--------------------
    704 
    705   void pref(int32_t hint, const MemOperand& rs);
    706 
    707 
    708   // -------------Misc-instructions--------------
    709 
    710   // Break / Trap instructions.
    711   void break_(uint32_t code, bool break_as_stop = false);
    712   void stop(const char* msg, uint32_t code = kMaxStopCode);
    713   void tge(Register rs, Register rt, uint16_t code);
    714   void tgeu(Register rs, Register rt, uint16_t code);
    715   void tlt(Register rs, Register rt, uint16_t code);
    716   void tltu(Register rs, Register rt, uint16_t code);
    717   void teq(Register rs, Register rt, uint16_t code);
    718   void tne(Register rs, Register rt, uint16_t code);
    719 
    720   // Move from HI/LO register.
    721   void mfhi(Register rd);
    722   void mflo(Register rd);
    723 
    724   // Set on less than.
    725   void slt(Register rd, Register rs, Register rt);
    726   void sltu(Register rd, Register rs, Register rt);
    727   void slti(Register rd, Register rs, int32_t j);
    728   void sltiu(Register rd, Register rs, int32_t j);
    729 
    730   // Conditional move.
    731   void movz(Register rd, Register rs, Register rt);
    732   void movn(Register rd, Register rs, Register rt);
    733   void movt(Register rd, Register rs, uint16_t cc = 0);
    734   void movf(Register rd, Register rs, uint16_t cc = 0);
    735 
    736   // Bit twiddling.
    737   void clz(Register rd, Register rs);
    738   void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
    739   void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
    740 
    741   // --------Coprocessor-instructions----------------
    742 
    743   // Load, store, and move.
    744   void lwc1(FPURegister fd, const MemOperand& src);
    745   void ldc1(FPURegister fd, const MemOperand& src);
    746 
    747   void swc1(FPURegister fs, const MemOperand& dst);
    748   void sdc1(FPURegister fs, const MemOperand& dst);
    749 
    750   void mtc1(Register rt, FPURegister fs);
    751   void mfc1(Register rt, FPURegister fs);
    752 
    753   void ctc1(Register rt, FPUControlRegister fs);
    754   void cfc1(Register rt, FPUControlRegister fs);
    755 
    756   // Arithmetic.
    757   void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
    758   void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
    759   void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
    760   void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
    761   void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
    762   void abs_d(FPURegister fd, FPURegister fs);
    763   void mov_d(FPURegister fd, FPURegister fs);
    764   void neg_d(FPURegister fd, FPURegister fs);
    765   void sqrt_d(FPURegister fd, FPURegister fs);
    766 
    767   // Conversion.
    768   void cvt_w_s(FPURegister fd, FPURegister fs);
    769   void cvt_w_d(FPURegister fd, FPURegister fs);
    770   void trunc_w_s(FPURegister fd, FPURegister fs);
    771   void trunc_w_d(FPURegister fd, FPURegister fs);
    772   void round_w_s(FPURegister fd, FPURegister fs);
    773   void round_w_d(FPURegister fd, FPURegister fs);
    774   void floor_w_s(FPURegister fd, FPURegister fs);
    775   void floor_w_d(FPURegister fd, FPURegister fs);
    776   void ceil_w_s(FPURegister fd, FPURegister fs);
    777   void ceil_w_d(FPURegister fd, FPURegister fs);
    778 
    779   void cvt_l_s(FPURegister fd, FPURegister fs);
    780   void cvt_l_d(FPURegister fd, FPURegister fs);
    781   void trunc_l_s(FPURegister fd, FPURegister fs);
    782   void trunc_l_d(FPURegister fd, FPURegister fs);
    783   void round_l_s(FPURegister fd, FPURegister fs);
    784   void round_l_d(FPURegister fd, FPURegister fs);
    785   void floor_l_s(FPURegister fd, FPURegister fs);
    786   void floor_l_d(FPURegister fd, FPURegister fs);
    787   void ceil_l_s(FPURegister fd, FPURegister fs);
    788   void ceil_l_d(FPURegister fd, FPURegister fs);
    789 
    790   void cvt_s_w(FPURegister fd, FPURegister fs);
    791   void cvt_s_l(FPURegister fd, FPURegister fs);
    792   void cvt_s_d(FPURegister fd, FPURegister fs);
    793 
    794   void cvt_d_w(FPURegister fd, FPURegister fs);
    795   void cvt_d_l(FPURegister fd, FPURegister fs);
    796   void cvt_d_s(FPURegister fd, FPURegister fs);
    797 
    798   // Conditions and branches.
    799   void c(FPUCondition cond, SecondaryField fmt,
    800          FPURegister ft, FPURegister fs, uint16_t cc = 0);
    801 
    802   void bc1f(int16_t offset, uint16_t cc = 0);
    803   void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
    804   void bc1t(int16_t offset, uint16_t cc = 0);
    805   void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
    806   void fcmp(FPURegister src1, const double src2, FPUCondition cond);
    807 
    808   // Check the code size generated from label to here.
    809   int SizeOfCodeGeneratedSince(Label* label) {
    810     return pc_offset() - label->pos();
    811   }
    812 
    813   // Check the number of instructions generated from label to here.
    814   int InstructionsGeneratedSince(Label* label) {
    815     return SizeOfCodeGeneratedSince(label) / kInstrSize;
    816   }
    817 
    818   // Class for scoping postponing the trampoline pool generation.
    819   class BlockTrampolinePoolScope {
    820    public:
    821     explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
    822       assem_->StartBlockTrampolinePool();
    823     }
    824     ~BlockTrampolinePoolScope() {
    825       assem_->EndBlockTrampolinePool();
    826     }
    827 
    828    private:
    829     Assembler* assem_;
    830 
    831     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
    832   };
    833 
    834   // Class for postponing the assembly buffer growth. Typically used for
    835   // sequences of instructions that must be emitted as a unit, before
    836   // buffer growth (and relocation) can occur.
    837   // This blocking scope is not nestable.
    838   class BlockGrowBufferScope {
    839    public:
    840     explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
    841       assem_->StartBlockGrowBuffer();
    842     }
    843     ~BlockGrowBufferScope() {
    844       assem_->EndBlockGrowBuffer();
    845     }
    846 
    847    private:
    848     Assembler* assem_;
    849 
    850     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
    851   };
    852 
    853   // Debugging.
    854 
    855   // Mark address of the ExitJSFrame code.
    856   void RecordJSReturn();
    857 
    858   // Mark address of a debug break slot.
    859   void RecordDebugBreakSlot();
    860 
    861   // Record the AST id of the CallIC being compiled, so that it can be placed
    862   // in the relocation information.
    863   void SetRecordedAstId(TypeFeedbackId ast_id) {
    864     ASSERT(recorded_ast_id_.IsNone());
    865     recorded_ast_id_ = ast_id;
    866   }
    867 
    868   TypeFeedbackId RecordedAstId() {
    869     ASSERT(!recorded_ast_id_.IsNone());
    870     return recorded_ast_id_;
    871   }
    872 
    873   void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
    874 
    875   // Record a comment relocation entry that can be used by a disassembler.
    876   // Use --code-comments to enable.
    877   void RecordComment(const char* msg);
    878 
    879   static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
    880 
    881   // Writes a single byte or word of data in the code stream.  Used for
    882   // inline tables, e.g., jump-tables.
    883   void db(uint8_t data);
    884   void dd(uint32_t data);
    885 
    886   // Emits the address of the code stub's first instruction.
    887   void emit_code_stub_address(Code* stub);
    888 
    889   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
    890 
    891   // Postpone the generation of the trampoline pool for the specified number of
    892   // instructions.
    893   void BlockTrampolinePoolFor(int instructions);
    894 
    895   // Check if there is less than kGap bytes available in the buffer.
    896   // If this is the case, we need to grow the buffer before emitting
    897   // an instruction or relocation information.
    898   inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
    899 
    900   // Get the number of bytes available in the buffer.
    901   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
    902 
    903   // Read/patch instructions.
    904   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
    905   static void instr_at_put(byte* pc, Instr instr) {
    906     *reinterpret_cast<Instr*>(pc) = instr;
    907   }
    908   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
    909   void instr_at_put(int pos, Instr instr) {
    910     *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
    911   }
    912 
    913   // Check if an instruction is a branch of some kind.
    914   static bool IsBranch(Instr instr);
    915   static bool IsBeq(Instr instr);
    916   static bool IsBne(Instr instr);
    917 
    918   static bool IsJump(Instr instr);
    919   static bool IsJ(Instr instr);
    920   static bool IsLui(Instr instr);
    921   static bool IsOri(Instr instr);
    922 
    923   static bool IsJal(Instr instr);
    924   static bool IsJr(Instr instr);
    925   static bool IsJalr(Instr instr);
    926 
    927   static bool IsNop(Instr instr, unsigned int type);
    928   static bool IsPop(Instr instr);
    929   static bool IsPush(Instr instr);
    930   static bool IsLwRegFpOffset(Instr instr);
    931   static bool IsSwRegFpOffset(Instr instr);
    932   static bool IsLwRegFpNegOffset(Instr instr);
    933   static bool IsSwRegFpNegOffset(Instr instr);
    934 
    935   static Register GetRtReg(Instr instr);
    936   static Register GetRsReg(Instr instr);
    937   static Register GetRdReg(Instr instr);
    938 
    939   static uint32_t GetRt(Instr instr);
    940   static uint32_t GetRtField(Instr instr);
    941   static uint32_t GetRs(Instr instr);
    942   static uint32_t GetRsField(Instr instr);
    943   static uint32_t GetRd(Instr instr);
    944   static uint32_t GetRdField(Instr instr);
    945   static uint32_t GetSa(Instr instr);
    946   static uint32_t GetSaField(Instr instr);
    947   static uint32_t GetOpcodeField(Instr instr);
    948   static uint32_t GetFunction(Instr instr);
    949   static uint32_t GetFunctionField(Instr instr);
    950   static uint32_t GetImmediate16(Instr instr);
    951   static uint32_t GetLabelConst(Instr instr);
    952 
    953   static int32_t GetBranchOffset(Instr instr);
    954   static bool IsLw(Instr instr);
    955   static int16_t GetLwOffset(Instr instr);
    956   static Instr SetLwOffset(Instr instr, int16_t offset);
    957 
    958   static bool IsSw(Instr instr);
    959   static Instr SetSwOffset(Instr instr, int16_t offset);
    960   static bool IsAddImmediate(Instr instr);
    961   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
    962 
    963   static bool IsAndImmediate(Instr instr);
    964   static bool IsEmittedConstant(Instr instr);
    965 
    966   void CheckTrampolinePool();
    967 
    968   // Allocate a constant pool of the correct size for the generated code.
    969   Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
    970 
    971   // Generate the constant pool for the generated code.
    972   void PopulateConstantPool(ConstantPoolArray* constant_pool);
    973 
    974  protected:
    975   // Relocation for a type-recording IC has the AST id added to it.  This
    976   // member variable is a way to pass the information from the call site to
    977   // the relocation info.
    978   TypeFeedbackId recorded_ast_id_;
    979 
    980   int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
    981 
    982   // Decode branch instruction at pos and return branch target pos.
    983   int target_at(int32_t pos);
    984 
    985   // Patch branch instruction at pos to branch to given branch target pos.
    986   void target_at_put(int32_t pos, int32_t target_pos);
    987 
    988   // Say if we need to relocate with this mode.
    989   bool MustUseReg(RelocInfo::Mode rmode);
    990 
    991   // Record reloc info for current pc_.
    992   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
    993 
    994   // Block the emission of the trampoline pool before pc_offset.
    995   void BlockTrampolinePoolBefore(int pc_offset) {
    996     if (no_trampoline_pool_before_ < pc_offset)
    997       no_trampoline_pool_before_ = pc_offset;
    998   }
    999 
   1000   void StartBlockTrampolinePool() {
   1001     trampoline_pool_blocked_nesting_++;
   1002   }
   1003 
   1004   void EndBlockTrampolinePool() {
   1005     trampoline_pool_blocked_nesting_--;
   1006   }
   1007 
   1008   bool is_trampoline_pool_blocked() const {
   1009     return trampoline_pool_blocked_nesting_ > 0;
   1010   }
   1011 
   1012   bool has_exception() const {
   1013     return internal_trampoline_exception_;
   1014   }
   1015 
   1016   void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
   1017 
   1018   bool is_trampoline_emitted() const {
   1019     return trampoline_emitted_;
   1020   }
   1021 
   1022   // Temporarily block automatic assembly buffer growth.
   1023   void StartBlockGrowBuffer() {
   1024     ASSERT(!block_buffer_growth_);
   1025     block_buffer_growth_ = true;
   1026   }
   1027 
   1028   void EndBlockGrowBuffer() {
   1029     ASSERT(block_buffer_growth_);
   1030     block_buffer_growth_ = false;
   1031   }
   1032 
   1033   bool is_buffer_growth_blocked() const {
   1034     return block_buffer_growth_;
   1035   }
   1036 
   1037  private:
   1038   // Buffer size and constant pool distance are checked together at regular
   1039   // intervals of kBufferCheckInterval emitted bytes.
   1040   static const int kBufferCheckInterval = 1*KB/2;
   1041 
   1042   // Code generation.
   1043   // The relocation writer's position is at least kGap bytes below the end of
   1044   // the generated instructions. This is so that multi-instruction sequences do
   1045   // not have to check for overflow. The same is true for writes of large
   1046   // relocation info entries.
   1047   static const int kGap = 32;
   1048 
   1049 
   1050   // Repeated checking whether the trampoline pool should be emitted is rather
   1051   // expensive. By default we only check again once a number of instructions
   1052   // has been generated.
   1053   static const int kCheckConstIntervalInst = 32;
   1054   static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
   1055 
   1056   int next_buffer_check_;  // pc offset of next buffer check.
   1057 
   1058   // Emission of the trampoline pool may be blocked in some code sequences.
   1059   int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
   1060   int no_trampoline_pool_before_;  // Block emission before this pc offset.
   1061 
   1062   // Keep track of the last emitted pool to guarantee a maximal distance.
   1063   int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
   1064 
   1065   // Automatic growth of the assembly buffer may be blocked for some sequences.
   1066   bool block_buffer_growth_;  // Block growth when true.
   1067 
   1068   // Relocation information generation.
   1069   // Each relocation is encoded as a variable size value.
   1070   static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
   1071   RelocInfoWriter reloc_info_writer;
   1072 
   1073   // The bound position, before this we cannot do instruction elimination.
   1074   int last_bound_pos_;
   1075 
   1076   // Code emission.
   1077   inline void CheckBuffer();
   1078   void GrowBuffer();
   1079   inline void emit(Instr x);
   1080   inline void CheckTrampolinePoolQuick();
   1081 
   1082   // Instruction generation.
   1083   // We have 3 different kind of encoding layout on MIPS.
   1084   // However due to many different types of objects encoded in the same fields
   1085   // we have quite a few aliases for each mode.
   1086   // Using the same structure to refer to Register and FPURegister would spare a
   1087   // few aliases, but mixing both does not look clean to me.
   1088   // Anyway we could surely implement this differently.
   1089 
   1090   void GenInstrRegister(Opcode opcode,
   1091                         Register rs,
   1092                         Register rt,
   1093                         Register rd,
   1094                         uint16_t sa = 0,
   1095                         SecondaryField func = NULLSF);
   1096 
   1097   void GenInstrRegister(Opcode opcode,
   1098                         Register rs,
   1099                         Register rt,
   1100                         uint16_t msb,
   1101                         uint16_t lsb,
   1102                         SecondaryField func);
   1103 
   1104   void GenInstrRegister(Opcode opcode,
   1105                         SecondaryField fmt,
   1106                         FPURegister ft,
   1107                         FPURegister fs,
   1108                         FPURegister fd,
   1109                         SecondaryField func = NULLSF);
   1110 
   1111   void GenInstrRegister(Opcode opcode,
   1112                         FPURegister fr,
   1113                         FPURegister ft,
   1114                         FPURegister fs,
   1115                         FPURegister fd,
   1116                         SecondaryField func = NULLSF);
   1117 
   1118   void GenInstrRegister(Opcode opcode,
   1119                         SecondaryField fmt,
   1120                         Register rt,
   1121                         FPURegister fs,
   1122                         FPURegister fd,
   1123                         SecondaryField func = NULLSF);
   1124 
   1125   void GenInstrRegister(Opcode opcode,
   1126                         SecondaryField fmt,
   1127                         Register rt,
   1128                         FPUControlRegister fs,
   1129                         SecondaryField func = NULLSF);
   1130 
   1131 
   1132   void GenInstrImmediate(Opcode opcode,
   1133                          Register rs,
   1134                          Register rt,
   1135                          int32_t  j);
   1136   void GenInstrImmediate(Opcode opcode,
   1137                          Register rs,
   1138                          SecondaryField SF,
   1139                          int32_t  j);
   1140   void GenInstrImmediate(Opcode opcode,
   1141                          Register r1,
   1142                          FPURegister r2,
   1143                          int32_t  j);
   1144 
   1145 
   1146   void GenInstrJump(Opcode opcode,
   1147                      uint32_t address);
   1148 
   1149   // Helpers.
   1150   void LoadRegPlusOffsetToAt(const MemOperand& src);
   1151 
   1152   // Labels.
   1153   void print(Label* L);
   1154   void bind_to(Label* L, int pos);
   1155   void next(Label* L);
   1156 
   1157   // One trampoline consists of:
   1158   // - space for trampoline slots,
   1159   // - space for labels.
   1160   //
   1161   // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
   1162   // Space for trampoline slots preceeds space for labels. Each label is of one
   1163   // instruction size, so total amount for labels is equal to
   1164   // label_count *  kInstrSize.
   1165   class Trampoline {
   1166    public:
   1167     Trampoline() {
   1168       start_ = 0;
   1169       next_slot_ = 0;
   1170       free_slot_count_ = 0;
   1171       end_ = 0;
   1172     }
   1173     Trampoline(int start, int slot_count) {
   1174       start_ = start;
   1175       next_slot_ = start;
   1176       free_slot_count_ = slot_count;
   1177       end_ = start + slot_count * kTrampolineSlotsSize;
   1178     }
   1179     int start() {
   1180       return start_;
   1181     }
   1182     int end() {
   1183       return end_;
   1184     }
   1185     int take_slot() {
   1186       int trampoline_slot = kInvalidSlotPos;
   1187       if (free_slot_count_ <= 0) {
   1188         // We have run out of space on trampolines.
   1189         // Make sure we fail in debug mode, so we become aware of each case
   1190         // when this happens.
   1191         ASSERT(0);
   1192         // Internal exception will be caught.
   1193       } else {
   1194         trampoline_slot = next_slot_;
   1195         free_slot_count_--;
   1196         next_slot_ += kTrampolineSlotsSize;
   1197       }
   1198       return trampoline_slot;
   1199     }
   1200 
   1201    private:
   1202     int start_;
   1203     int end_;
   1204     int next_slot_;
   1205     int free_slot_count_;
   1206   };
   1207 
   1208   int32_t get_trampoline_entry(int32_t pos);
   1209   int unbound_labels_count_;
   1210   // If trampoline is emitted, generated code is becoming large. As this is
   1211   // already a slow case which can possibly break our code generation for the
   1212   // extreme case, we use this information to trigger different mode of
   1213   // branch instruction generation, where we use jump instructions rather
   1214   // than regular branch instructions.
   1215   bool trampoline_emitted_;
   1216   static const int kTrampolineSlotsSize = 4 * kInstrSize;
   1217   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
   1218   static const int kInvalidSlotPos = -1;
   1219 
   1220   Trampoline trampoline_;
   1221   bool internal_trampoline_exception_;
   1222 
   1223   friend class RegExpMacroAssemblerMIPS;
   1224   friend class RelocInfo;
   1225   friend class CodePatcher;
   1226   friend class BlockTrampolinePoolScope;
   1227 
   1228   PositionsRecorder positions_recorder_;
   1229   friend class PositionsRecorder;
   1230   friend class EnsureSpace;
   1231 };
   1232 
   1233 
   1234 class EnsureSpace BASE_EMBEDDED {
   1235  public:
   1236   explicit EnsureSpace(Assembler* assembler) {
   1237     assembler->CheckBuffer();
   1238   }
   1239 };
   1240 
   1241 } }  // namespace v8::internal
   1242 
   1243 #endif  // V8_ARM_ASSEMBLER_MIPS_H_
   1244