Home | History | Annotate | Download | only in mips64
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2012 the V8 project authors. All rights reserved.
     34 
     35 #include "src/mips64/assembler-mips64.h"
     36 
     37 #if V8_TARGET_ARCH_MIPS64
     38 
     39 #include "src/base/cpu.h"
     40 #include "src/mips64/assembler-mips64-inl.h"
     41 
     42 namespace v8 {
     43 namespace internal {
     44 
     45 
     46 // Get the CPU features enabled by the build. For cross compilation the
     47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
     48 // can be defined to enable FPU instructions when building the
     49 // snapshot.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52 #ifdef CAN_USE_FPU_INSTRUCTIONS
     53   answer |= 1u << FPU;
     54 #endif  // def CAN_USE_FPU_INSTRUCTIONS
     55 
     56   // If the compiler is allowed to use FPU then we can use FPU too in our code
     57   // generation even when generating snapshots.  This won't work for cross
     58   // compilation.
     59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
     60   answer |= 1u << FPU;
     61 #endif
     62 
     63   return answer;
     64 }
     65 
     66 
     67 void CpuFeatures::ProbeImpl(bool cross_compile) {
     68   supported_ |= CpuFeaturesImpliedByCompiler();
     69 
     70   // Only use statically determined features for cross compile (snapshot).
     71   if (cross_compile) return;
     72 
     73   // If the compiler is allowed to use fpu then we can use fpu too in our
     74   // code generation.
     75 #ifndef __mips__
     76   // For the simulator build, use FPU.
     77   supported_ |= 1u << FPU;
     78 #else
     79   // Probe for additional features at runtime.
     80   base::CPU cpu;
     81   if (cpu.has_fpu()) supported_ |= 1u << FPU;
     82 #endif
     83 }
     84 
     85 
     86 void CpuFeatures::PrintTarget() { }
     87 void CpuFeatures::PrintFeatures() { }
     88 
     89 
     90 int ToNumber(Register reg) {
     91   DCHECK(reg.is_valid());
     92   const int kNumbers[] = {
     93     0,    // zero_reg
     94     1,    // at
     95     2,    // v0
     96     3,    // v1
     97     4,    // a0
     98     5,    // a1
     99     6,    // a2
    100     7,    // a3
    101     8,    // a4
    102     9,    // a5
    103     10,   // a6
    104     11,   // a7
    105     12,   // t0
    106     13,   // t1
    107     14,   // t2
    108     15,   // t3
    109     16,   // s0
    110     17,   // s1
    111     18,   // s2
    112     19,   // s3
    113     20,   // s4
    114     21,   // s5
    115     22,   // s6
    116     23,   // s7
    117     24,   // t8
    118     25,   // t9
    119     26,   // k0
    120     27,   // k1
    121     28,   // gp
    122     29,   // sp
    123     30,   // fp
    124     31,   // ra
    125   };
    126   return kNumbers[reg.code()];
    127 }
    128 
    129 
    130 Register ToRegister(int num) {
    131   DCHECK(num >= 0 && num < kNumRegisters);
    132   const Register kRegisters[] = {
    133     zero_reg,
    134     at,
    135     v0, v1,
    136     a0, a1, a2, a3, a4, a5, a6, a7,
    137     t0, t1, t2, t3,
    138     s0, s1, s2, s3, s4, s5, s6, s7,
    139     t8, t9,
    140     k0, k1,
    141     gp,
    142     sp,
    143     fp,
    144     ra
    145   };
    146   return kRegisters[num];
    147 }
    148 
    149 
    150 // -----------------------------------------------------------------------------
    151 // Implementation of RelocInfo.
    152 
    153 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
    154                                   1 << RelocInfo::INTERNAL_REFERENCE |
    155                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    156 
    157 
    158 bool RelocInfo::IsCodedSpecially() {
    159   // The deserializer needs to know whether a pointer is specially coded.  Being
    160   // specially coded on MIPS means that it is a lui/ori instruction, and that is
    161   // always the case inside code objects.
    162   return true;
    163 }
    164 
    165 
    166 bool RelocInfo::IsInConstantPool() {
    167   return false;
    168 }
    169 
    170 Address RelocInfo::wasm_memory_reference() {
    171   DCHECK(IsWasmMemoryReference(rmode_));
    172   return Assembler::target_address_at(pc_, host_);
    173 }
    174 
    175 Address RelocInfo::wasm_global_reference() {
    176   DCHECK(IsWasmGlobalReference(rmode_));
    177   return Assembler::target_address_at(pc_, host_);
    178 }
    179 
    180 uint32_t RelocInfo::wasm_memory_size_reference() {
    181   DCHECK(IsWasmMemorySizeReference(rmode_));
    182   return static_cast<uint32_t>(
    183       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
    184 }
    185 
    186 uint32_t RelocInfo::wasm_function_table_size_reference() {
    187   DCHECK(IsWasmFunctionTableSizeReference(rmode_));
    188   return static_cast<uint32_t>(
    189       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
    190 }
    191 
    192 void RelocInfo::unchecked_update_wasm_memory_reference(
    193     Address address, ICacheFlushMode flush_mode) {
    194   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
    195 }
    196 
    197 void RelocInfo::unchecked_update_wasm_size(uint32_t size,
    198                                            ICacheFlushMode flush_mode) {
    199   Assembler::set_target_address_at(isolate_, pc_, host_,
    200                                    reinterpret_cast<Address>(size), flush_mode);
    201 }
    202 
    203 // -----------------------------------------------------------------------------
    204 // Implementation of Operand and MemOperand.
    205 // See assembler-mips-inl.h for inlined constructors.
    206 
    207 Operand::Operand(Handle<Object> handle) {
    208   AllowDeferredHandleDereference using_raw_address;
    209   rm_ = no_reg;
    210   // Verify all Objects referred by code are NOT in new space.
    211   Object* obj = *handle;
    212   if (obj->IsHeapObject()) {
    213     imm64_ = reinterpret_cast<intptr_t>(handle.location());
    214     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    215   } else {
    216     // No relocation needed.
    217     imm64_ = reinterpret_cast<intptr_t>(obj);
    218     rmode_ = RelocInfo::NONE64;
    219   }
    220 }
    221 
    222 
    223 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
    224   offset_ = offset;
    225 }
    226 
    227 
    228 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
    229                        OffsetAddend offset_addend)
    230     : Operand(rm) {
    231   offset_ = unit * multiplier + offset_addend;
    232 }
    233 
    234 
    235 // -----------------------------------------------------------------------------
    236 // Specific instructions, constants, and masks.
    237 
    238 static const int kNegOffset = 0x00008000;
    239 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
    240 // operations as post-increment of sp.
    241 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
    242                               (Register::kCode_sp << kRtShift) |
    243                               (kPointerSize & kImm16Mask);  // NOLINT
    244 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
    245 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
    246                                (Register::kCode_sp << kRtShift) |
    247                                (-kPointerSize & kImm16Mask);  // NOLINT
    248 // sd(r, MemOperand(sp, 0))
    249 const Instr kPushRegPattern =
    250     SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    251 //  ld(r, MemOperand(sp, 0))
    252 const Instr kPopRegPattern =
    253     LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    254 
    255 const Instr kLwRegFpOffsetPattern =
    256     LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    257 
    258 const Instr kSwRegFpOffsetPattern =
    259     SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    260 
    261 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
    262                                        (kNegOffset & kImm16Mask);  // NOLINT
    263 
    264 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
    265                                        (kNegOffset & kImm16Mask);  // NOLINT
    266 // A mask for the Rt register for push, pop, lw, sw instructions.
    267 const Instr kRtMask = kRtFieldMask;
    268 const Instr kLwSwInstrTypeMask = 0xffe00000;
    269 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
    270 const Instr kLwSwOffsetMask = kImm16Mask;
    271 
    272 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    273     : AssemblerBase(isolate, buffer, buffer_size),
    274       recorded_ast_id_(TypeFeedbackId::None()) {
    275   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    276 
    277   last_trampoline_pool_end_ = 0;
    278   no_trampoline_pool_before_ = 0;
    279   trampoline_pool_blocked_nesting_ = 0;
    280   // We leave space (16 * kTrampolineSlotsSize)
    281   // for BlockTrampolinePoolScope buffer.
    282   next_buffer_check_ = FLAG_force_long_branches
    283       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
    284   internal_trampoline_exception_ = false;
    285   last_bound_pos_ = 0;
    286 
    287   trampoline_emitted_ = FLAG_force_long_branches;
    288   unbound_labels_count_ = 0;
    289   block_buffer_growth_ = false;
    290 
    291   ClearRecordedAstId();
    292 }
    293 
    294 
    295 void Assembler::GetCode(CodeDesc* desc) {
    296   EmitForbiddenSlotInstruction();
    297   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
    298   // Set up code descriptor.
    299   desc->buffer = buffer_;
    300   desc->buffer_size = buffer_size_;
    301   desc->instr_size = pc_offset();
    302   desc->reloc_size =
    303       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
    304   desc->origin = this;
    305   desc->constant_pool_size = 0;
    306   desc->unwinding_info_size = 0;
    307   desc->unwinding_info = nullptr;
    308 }
    309 
    310 
    311 void Assembler::Align(int m) {
    312   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    313   EmitForbiddenSlotInstruction();
    314   while ((pc_offset() & (m - 1)) != 0) {
    315     nop();
    316   }
    317 }
    318 
    319 
    320 void Assembler::CodeTargetAlign() {
    321   // No advantage to aligning branch/call targets to more than
    322   // single instruction, that I am aware of.
    323   Align(4);
    324 }
    325 
    326 
    327 Register Assembler::GetRtReg(Instr instr) {
    328   Register rt;
    329   rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
    330   return rt;
    331 }
    332 
    333 
    334 Register Assembler::GetRsReg(Instr instr) {
    335   Register rs;
    336   rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
    337   return rs;
    338 }
    339 
    340 
    341 Register Assembler::GetRdReg(Instr instr) {
    342   Register rd;
    343   rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
    344   return rd;
    345 }
    346 
    347 
    348 uint32_t Assembler::GetRt(Instr instr) {
    349   return (instr & kRtFieldMask) >> kRtShift;
    350 }
    351 
    352 
    353 uint32_t Assembler::GetRtField(Instr instr) {
    354   return instr & kRtFieldMask;
    355 }
    356 
    357 
    358 uint32_t Assembler::GetRs(Instr instr) {
    359   return (instr & kRsFieldMask) >> kRsShift;
    360 }
    361 
    362 
    363 uint32_t Assembler::GetRsField(Instr instr) {
    364   return instr & kRsFieldMask;
    365 }
    366 
    367 
    368 uint32_t Assembler::GetRd(Instr instr) {
    369   return  (instr & kRdFieldMask) >> kRdShift;
    370 }
    371 
    372 
    373 uint32_t Assembler::GetRdField(Instr instr) {
    374   return  instr & kRdFieldMask;
    375 }
    376 
    377 
    378 uint32_t Assembler::GetSa(Instr instr) {
    379   return (instr & kSaFieldMask) >> kSaShift;
    380 }
    381 
    382 
    383 uint32_t Assembler::GetSaField(Instr instr) {
    384   return instr & kSaFieldMask;
    385 }
    386 
    387 
    388 uint32_t Assembler::GetOpcodeField(Instr instr) {
    389   return instr & kOpcodeMask;
    390 }
    391 
    392 
    393 uint32_t Assembler::GetFunction(Instr instr) {
    394   return (instr & kFunctionFieldMask) >> kFunctionShift;
    395 }
    396 
    397 
    398 uint32_t Assembler::GetFunctionField(Instr instr) {
    399   return instr & kFunctionFieldMask;
    400 }
    401 
    402 
    403 uint32_t Assembler::GetImmediate16(Instr instr) {
    404   return instr & kImm16Mask;
    405 }
    406 
    407 
    408 uint32_t Assembler::GetLabelConst(Instr instr) {
    409   return instr & ~kImm16Mask;
    410 }
    411 
    412 
    413 bool Assembler::IsPop(Instr instr) {
    414   return (instr & ~kRtMask) == kPopRegPattern;
    415 }
    416 
    417 
    418 bool Assembler::IsPush(Instr instr) {
    419   return (instr & ~kRtMask) == kPushRegPattern;
    420 }
    421 
    422 
    423 bool Assembler::IsSwRegFpOffset(Instr instr) {
    424   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
    425 }
    426 
    427 
    428 bool Assembler::IsLwRegFpOffset(Instr instr) {
    429   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
    430 }
    431 
    432 
    433 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
    434   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
    435           kSwRegFpNegOffsetPattern);
    436 }
    437 
    438 
    439 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
    440   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
    441           kLwRegFpNegOffsetPattern);
    442 }
    443 
    444 
    445 // Labels refer to positions in the (to be) generated code.
    446 // There are bound, linked, and unused labels.
    447 //
    448 // Bound labels refer to known positions in the already
    449 // generated code. pos() is the position the label refers to.
    450 //
    451 // Linked labels refer to unknown positions in the code
    452 // to be generated; pos() is the position of the last
    453 // instruction using the label.
    454 
    455 // The link chain is terminated by a value in the instruction of -1,
    456 // which is an otherwise illegal value (branch -1 is inf loop).
    457 // The instruction 16-bit offset field addresses 32-bit words, but in
    458 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
    459 
    460 const int kEndOfChain = -4;
    461 // Determines the end of the Jump chain (a subset of the label link chain).
    462 const int kEndOfJumpChain = 0;
    463 
    464 
    465 bool Assembler::IsBranch(Instr instr) {
    466   uint32_t opcode   = GetOpcodeField(instr);
    467   uint32_t rt_field = GetRtField(instr);
    468   uint32_t rs_field = GetRsField(instr);
    469   // Checks if the instruction is a branch.
    470   bool isBranch =
    471       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
    472       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
    473       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
    474                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
    475       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
    476       (opcode == COP1 && rs_field == BC1EQZ) ||
    477       (opcode == COP1 && rs_field == BC1NEZ);
    478   if (!isBranch && kArchVariant == kMips64r6) {
    479     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
    480     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
    481     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
    482                 opcode == BALC ||
    483                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
    484                 (opcode == POP76 && rs_field != 0);    // BNEZC
    485   }
    486   return isBranch;
    487 }
    488 
    489 
    490 bool Assembler::IsBc(Instr instr) {
    491   uint32_t opcode = GetOpcodeField(instr);
    492   // Checks if the instruction is a BC or BALC.
    493   return opcode == BC || opcode == BALC;
    494 }
    495 
    496 
    497 bool Assembler::IsBzc(Instr instr) {
    498   uint32_t opcode = GetOpcodeField(instr);
    499   // Checks if the instruction is BEQZC or BNEZC.
    500   return (opcode == POP66 && GetRsField(instr) != 0) ||
    501          (opcode == POP76 && GetRsField(instr) != 0);
    502 }
    503 
    504 
    505 bool Assembler::IsEmittedConstant(Instr instr) {
    506   uint32_t label_constant = GetLabelConst(instr);
    507   return label_constant == 0;  // Emitted label const in reg-exp engine.
    508 }
    509 
    510 
    511 bool Assembler::IsBeq(Instr instr) {
    512   return GetOpcodeField(instr) == BEQ;
    513 }
    514 
    515 
    516 bool Assembler::IsBne(Instr instr) {
    517   return GetOpcodeField(instr) == BNE;
    518 }
    519 
    520 
    521 bool Assembler::IsBeqzc(Instr instr) {
    522   uint32_t opcode = GetOpcodeField(instr);
    523   return opcode == POP66 && GetRsField(instr) != 0;
    524 }
    525 
    526 
    527 bool Assembler::IsBnezc(Instr instr) {
    528   uint32_t opcode = GetOpcodeField(instr);
    529   return opcode == POP76 && GetRsField(instr) != 0;
    530 }
    531 
    532 
    533 bool Assembler::IsBeqc(Instr instr) {
    534   uint32_t opcode = GetOpcodeField(instr);
    535   uint32_t rs = GetRsField(instr);
    536   uint32_t rt = GetRtField(instr);
    537   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
    538 }
    539 
    540 
    541 bool Assembler::IsBnec(Instr instr) {
    542   uint32_t opcode = GetOpcodeField(instr);
    543   uint32_t rs = GetRsField(instr);
    544   uint32_t rt = GetRtField(instr);
    545   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
    546 }
    547 
    548 
    549 bool Assembler::IsJump(Instr instr) {
    550   uint32_t opcode   = GetOpcodeField(instr);
    551   uint32_t rt_field = GetRtField(instr);
    552   uint32_t rd_field = GetRdField(instr);
    553   uint32_t function_field = GetFunctionField(instr);
    554   // Checks if the instruction is a jump.
    555   return opcode == J || opcode == JAL ||
    556       (opcode == SPECIAL && rt_field == 0 &&
    557       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
    558 }
    559 
    560 
    561 bool Assembler::IsJ(Instr instr) {
    562   uint32_t opcode = GetOpcodeField(instr);
    563   // Checks if the instruction is a jump.
    564   return opcode == J;
    565 }
    566 
    567 
    568 bool Assembler::IsJal(Instr instr) {
    569   return GetOpcodeField(instr) == JAL;
    570 }
    571 
    572 
    573 bool Assembler::IsJr(Instr instr) {
    574   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
    575 }
    576 
    577 
    578 bool Assembler::IsJalr(Instr instr) {
    579   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
    580 }
    581 
    582 
    583 bool Assembler::IsLui(Instr instr) {
    584   uint32_t opcode = GetOpcodeField(instr);
    585   // Checks if the instruction is a load upper immediate.
    586   return opcode == LUI;
    587 }
    588 
    589 
    590 bool Assembler::IsOri(Instr instr) {
    591   uint32_t opcode = GetOpcodeField(instr);
    592   // Checks if the instruction is a load upper immediate.
    593   return opcode == ORI;
    594 }
    595 
    596 
    597 bool Assembler::IsNop(Instr instr, unsigned int type) {
    598   // See Assembler::nop(type).
    599   DCHECK(type < 32);
    600   uint32_t opcode = GetOpcodeField(instr);
    601   uint32_t function = GetFunctionField(instr);
    602   uint32_t rt = GetRt(instr);
    603   uint32_t rd = GetRd(instr);
    604   uint32_t sa = GetSa(instr);
    605 
    606   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
    607   // When marking non-zero type, use sll(zero_reg, at, type)
    608   // to avoid use of mips ssnop and ehb special encodings
    609   // of the sll instruction.
    610 
    611   Register nop_rt_reg = (type == 0) ? zero_reg : at;
    612   bool ret = (opcode == SPECIAL && function == SLL &&
    613               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
    614               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
    615               sa == type);
    616 
    617   return ret;
    618 }
    619 
    620 
    621 int32_t Assembler::GetBranchOffset(Instr instr) {
    622   DCHECK(IsBranch(instr));
    623   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
    624 }
    625 
    626 
    627 bool Assembler::IsLw(Instr instr) {
    628   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
    629 }
    630 
    631 
    632 int16_t Assembler::GetLwOffset(Instr instr) {
    633   DCHECK(IsLw(instr));
    634   return ((instr & kImm16Mask));
    635 }
    636 
    637 
    638 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
    639   DCHECK(IsLw(instr));
    640 
    641   // We actually create a new lw instruction based on the original one.
    642   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
    643       | (offset & kImm16Mask);
    644 
    645   return temp_instr;
    646 }
    647 
    648 
    649 bool Assembler::IsSw(Instr instr) {
    650   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
    651 }
    652 
    653 
    654 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
    655   DCHECK(IsSw(instr));
    656   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
    657 }
    658 
    659 
    660 bool Assembler::IsAddImmediate(Instr instr) {
    661   return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
    662 }
    663 
    664 
    665 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
    666   DCHECK(IsAddImmediate(instr));
    667   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
    668 }
    669 
    670 
    671 bool Assembler::IsAndImmediate(Instr instr) {
    672   return GetOpcodeField(instr) == ANDI;
    673 }
    674 
    675 
    676 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
    677   if (kArchVariant == kMips64r6) {
    678     if (Assembler::IsBc(instr)) {
    679       return Assembler::OffsetSize::kOffset26;
    680     } else if (Assembler::IsBzc(instr)) {
    681       return Assembler::OffsetSize::kOffset21;
    682     }
    683   }
    684   return Assembler::OffsetSize::kOffset16;
    685 }
    686 
    687 
    688 static inline int32_t AddBranchOffset(int pos, Instr instr) {
    689   int bits = OffsetSizeInBits(instr);
    690   const int32_t mask = (1 << bits) - 1;
    691   bits = 32 - bits;
    692 
    693   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
    694   // the compiler uses arithmetic shifts for signed integers.
    695   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
    696 
    697   if (imm == kEndOfChain) {
    698     // EndOfChain sentinel is returned directly, not relative to pc or pos.
    699     return kEndOfChain;
    700   } else {
    701     return pos + Assembler::kBranchPCOffset + imm;
    702   }
    703 }
    704 
    705 
    706 int Assembler::target_at(int pos, bool is_internal) {
    707   if (is_internal) {
    708     int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
    709     int64_t address = *p;
    710     if (address == kEndOfJumpChain) {
    711       return kEndOfChain;
    712     } else {
    713       int64_t instr_address = reinterpret_cast<int64_t>(p);
    714       DCHECK(instr_address - address < INT_MAX);
    715       int delta = static_cast<int>(instr_address - address);
    716       DCHECK(pos > delta);
    717       return pos - delta;
    718     }
    719   }
    720   Instr instr = instr_at(pos);
    721   if ((instr & ~kImm16Mask) == 0) {
    722     // Emitted label constant, not part of a branch.
    723     if (instr == 0) {
    724        return kEndOfChain;
    725      } else {
    726        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
    727        return (imm18 + pos);
    728      }
    729   }
    730   // Check we have a branch or jump instruction.
    731   DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
    732   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
    733   // the compiler uses arithmetic shifts for signed integers.
    734   if (IsBranch(instr)) {
    735     return AddBranchOffset(pos, instr);
    736   } else if (IsLui(instr)) {
    737     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
    738     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
    739     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
    740     DCHECK(IsOri(instr_ori));
    741     DCHECK(IsOri(instr_ori2));
    742 
    743     // TODO(plind) create named constants for shift values.
    744     int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
    745     imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
    746     imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
    747     // Sign extend address;
    748     imm >>= 16;
    749 
    750     if (imm == kEndOfJumpChain) {
    751       // EndOfChain sentinel is returned directly, not relative to pc or pos.
    752       return kEndOfChain;
    753     } else {
    754       uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
    755       DCHECK(instr_address - imm < INT_MAX);
    756       int delta = static_cast<int>(instr_address - imm);
    757       DCHECK(pos > delta);
    758       return pos - delta;
    759     }
    760   } else {
    761     DCHECK(IsJ(instr) || IsJal(instr));
    762     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
    763     if (imm28 == kEndOfJumpChain) {
    764       // EndOfChain sentinel is returned directly, not relative to pc or pos.
    765       return kEndOfChain;
    766     } else {
    767       // Sign extend 28-bit offset.
    768       int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
    769       return pos + delta;
    770     }
    771   }
    772 }
    773 
    774 
    775 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
    776                                     Instr instr) {
    777   int32_t bits = OffsetSizeInBits(instr);
    778   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
    779   DCHECK((imm & 3) == 0);
    780   imm >>= 2;
    781 
    782   const int32_t mask = (1 << bits) - 1;
    783   instr &= ~mask;
    784   DCHECK(is_intn(imm, bits));
    785 
    786   return instr | (imm & mask);
    787 }
    788 
    789 
    790 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
    791   if (is_internal) {
    792     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
    793     *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
    794     return;
    795   }
    796   Instr instr = instr_at(pos);
    797   if ((instr & ~kImm16Mask) == 0) {
    798     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
    799     // Emitted label constant, not part of a branch.
    800     // Make label relative to Code* of generated Code object.
    801     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
    802     return;
    803   }
    804 
    805   if (IsBranch(instr)) {
    806     instr = SetBranchOffset(pos, target_pos, instr);
    807     instr_at_put(pos, instr);
    808   } else if (IsLui(instr)) {
    809     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
    810     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
    811     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
    812     DCHECK(IsOri(instr_ori));
    813     DCHECK(IsOri(instr_ori2));
    814 
    815     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
    816     DCHECK((imm & 3) == 0);
    817 
    818     instr_lui &= ~kImm16Mask;
    819     instr_ori &= ~kImm16Mask;
    820     instr_ori2 &= ~kImm16Mask;
    821 
    822     instr_at_put(pos + 0 * Assembler::kInstrSize,
    823                  instr_lui | ((imm >> 32) & kImm16Mask));
    824     instr_at_put(pos + 1 * Assembler::kInstrSize,
    825                  instr_ori | ((imm >> 16) & kImm16Mask));
    826     instr_at_put(pos + 3 * Assembler::kInstrSize,
    827                  instr_ori2 | (imm & kImm16Mask));
    828   } else if (IsJ(instr) || IsJal(instr)) {
    829     int32_t imm28 = target_pos - pos;
    830     DCHECK((imm28 & 3) == 0);
    831 
    832     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
    833     DCHECK(is_uint26(imm26));
    834     // Place 26-bit signed offset with markings.
    835     // When code is committed it will be resolved to j/jal.
    836     int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
    837     instr_at_put(pos, mark | (imm26 & kImm26Mask));
    838   } else {
    839     int32_t imm28 = target_pos - pos;
    840     DCHECK((imm28 & 3) == 0);
    841 
    842     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
    843     DCHECK(is_uint26(imm26));
    844     // Place raw 26-bit signed offset.
    845     // When code is committed it will be resolved to j/jal.
    846     instr &= ~kImm26Mask;
    847     instr_at_put(pos, instr | (imm26 & kImm26Mask));
    848   }
    849 }
    850 
    851 
    852 void Assembler::print(Label* L) {
    853   if (L->is_unused()) {
    854     PrintF("unused label\n");
    855   } else if (L->is_bound()) {
    856     PrintF("bound label to %d\n", L->pos());
    857   } else if (L->is_linked()) {
    858     Label l = *L;
    859     PrintF("unbound label");
    860     while (l.is_linked()) {
    861       PrintF("@ %d ", l.pos());
    862       Instr instr = instr_at(l.pos());
    863       if ((instr & ~kImm16Mask) == 0) {
    864         PrintF("value\n");
    865       } else {
    866         PrintF("%d\n", instr);
    867       }
    868       next(&l, is_internal_reference(&l));
    869     }
    870   } else {
    871     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
    872   }
    873 }
    874 
    875 
    876 void Assembler::bind_to(Label* L, int pos) {
    877   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
    878   int trampoline_pos = kInvalidSlotPos;
    879   bool is_internal = false;
    880   if (L->is_linked() && !trampoline_emitted_) {
    881     unbound_labels_count_--;
    882     if (!is_internal_reference(L)) {
    883       next_buffer_check_ += kTrampolineSlotsSize;
    884     }
    885   }
    886 
    887   while (L->is_linked()) {
    888     int fixup_pos = L->pos();
    889     int dist = pos - fixup_pos;
    890     is_internal = is_internal_reference(L);
    891     next(L, is_internal);  // Call next before overwriting link with target at
    892                            // fixup_pos.
    893     Instr instr = instr_at(fixup_pos);
    894     if (is_internal) {
    895       target_at_put(fixup_pos, pos, is_internal);
    896     } else {
    897       if (IsBranch(instr)) {
    898         int branch_offset = BranchOffset(instr);
    899         if (dist > branch_offset) {
    900           if (trampoline_pos == kInvalidSlotPos) {
    901             trampoline_pos = get_trampoline_entry(fixup_pos);
    902             CHECK(trampoline_pos != kInvalidSlotPos);
    903           }
    904           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
    905           target_at_put(fixup_pos, trampoline_pos, false);
    906           fixup_pos = trampoline_pos;
    907         }
    908         target_at_put(fixup_pos, pos, false);
    909       } else {
    910         DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
    911                IsEmittedConstant(instr));
    912         target_at_put(fixup_pos, pos, false);
    913       }
    914     }
    915   }
    916   L->bind_to(pos);
    917 
    918   // Keep track of the last bound label so we don't eliminate any instructions
    919   // before a bound label.
    920   if (pos > last_bound_pos_)
    921     last_bound_pos_ = pos;
    922 }
    923 
    924 
    925 void Assembler::bind(Label* L) {
    926   DCHECK(!L->is_bound());  // Label can only be bound once.
    927   bind_to(L, pc_offset());
    928 }
    929 
    930 
    931 void Assembler::next(Label* L, bool is_internal) {
    932   DCHECK(L->is_linked());
    933   int link = target_at(L->pos(), is_internal);
    934   if (link == kEndOfChain) {
    935     L->Unuse();
    936   } else {
    937     DCHECK(link >= 0);
    938     L->link_to(link);
    939   }
    940 }
    941 
    942 
    943 bool Assembler::is_near(Label* L) {
    944   DCHECK(L->is_bound());
    945   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
    946 }
    947 
    948 
    949 bool Assembler::is_near(Label* L, OffsetSize bits) {
    950   if (L == nullptr || !L->is_bound()) return true;
    951   return ((pc_offset() - L->pos()) <
    952           (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
    953 }
    954 
    955 
    956 bool Assembler::is_near_branch(Label* L) {
    957   DCHECK(L->is_bound());
    958   return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
    959 }
    960 
    961 
    962 int Assembler::BranchOffset(Instr instr) {
    963   // At pre-R6 and for other R6 branches the offset is 16 bits.
    964   int bits = OffsetSize::kOffset16;
    965 
    966   if (kArchVariant == kMips64r6) {
    967     uint32_t opcode = GetOpcodeField(instr);
    968     switch (opcode) {
    969       // Checks BC or BALC.
    970       case BC:
    971       case BALC:
    972         bits = OffsetSize::kOffset26;
    973         break;
    974 
    975       // Checks BEQZC or BNEZC.
    976       case POP66:
    977       case POP76:
    978         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
    979         break;
    980       default:
    981         break;
    982     }
    983   }
    984 
    985   return (1 << (bits + 2 - 1)) - 1;
    986 }
    987 
    988 
    989 // We have to use a temporary register for things that can be relocated even
    990 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
    991 // space.  There is no guarantee that the relocated location can be similarly
    992 // encoded.
    993 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
    994   return !RelocInfo::IsNone(rmode);
    995 }
    996 
    997 void Assembler::GenInstrRegister(Opcode opcode,
    998                                  Register rs,
    999                                  Register rt,
   1000                                  Register rd,
   1001                                  uint16_t sa,
   1002                                  SecondaryField func) {
   1003   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
   1004   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1005       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
   1006   emit(instr);
   1007 }
   1008 
   1009 
   1010 void Assembler::GenInstrRegister(Opcode opcode,
   1011                                  Register rs,
   1012                                  Register rt,
   1013                                  uint16_t msb,
   1014                                  uint16_t lsb,
   1015                                  SecondaryField func) {
   1016   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
   1017   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1018       | (msb << kRdShift) | (lsb << kSaShift) | func;
   1019   emit(instr);
   1020 }
   1021 
   1022 
   1023 void Assembler::GenInstrRegister(Opcode opcode,
   1024                                  SecondaryField fmt,
   1025                                  FPURegister ft,
   1026                                  FPURegister fs,
   1027                                  FPURegister fd,
   1028                                  SecondaryField func) {
   1029   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
   1030   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
   1031       | (fd.code() << kFdShift) | func;
   1032   emit(instr);
   1033 }
   1034 
   1035 
   1036 void Assembler::GenInstrRegister(Opcode opcode,
   1037                                  FPURegister fr,
   1038                                  FPURegister ft,
   1039                                  FPURegister fs,
   1040                                  FPURegister fd,
   1041                                  SecondaryField func) {
   1042   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
   1043   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
   1044       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   1045   emit(instr);
   1046 }
   1047 
   1048 
   1049 void Assembler::GenInstrRegister(Opcode opcode,
   1050                                  SecondaryField fmt,
   1051                                  Register rt,
   1052                                  FPURegister fs,
   1053                                  FPURegister fd,
   1054                                  SecondaryField func) {
   1055   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
   1056   Instr instr = opcode | fmt | (rt.code() << kRtShift)
   1057       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   1058   emit(instr);
   1059 }
   1060 
   1061 
   1062 void Assembler::GenInstrRegister(Opcode opcode,
   1063                                  SecondaryField fmt,
   1064                                  Register rt,
   1065                                  FPUControlRegister fs,
   1066                                  SecondaryField func) {
   1067   DCHECK(fs.is_valid() && rt.is_valid());
   1068   Instr instr =
   1069       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   1070   emit(instr);
   1071 }
   1072 
   1073 
   1074 // Instructions with immediate value.
   1075 // Registers are in the order of the instruction encoding, from left to right.
   1076 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
   1077                                   int32_t j,
   1078                                   CompactBranchType is_compact_branch) {
   1079   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
   1080   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1081       | (j & kImm16Mask);
   1082   emit(instr, is_compact_branch);
   1083 }
   1084 
   1085 
   1086 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
   1087                                   int32_t j,
   1088                                   CompactBranchType is_compact_branch) {
   1089   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
   1090   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
   1091   emit(instr, is_compact_branch);
   1092 }
   1093 
   1094 
   1095 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
   1096                                   int32_t j,
   1097                                   CompactBranchType is_compact_branch) {
   1098   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
   1099   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
   1100       | (j & kImm16Mask);
   1101   emit(instr, is_compact_branch);
   1102 }
   1103 
   1104 
   1105 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
   1106                                   CompactBranchType is_compact_branch) {
   1107   DCHECK(rs.is_valid() && (is_int21(offset21)));
   1108   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
   1109   emit(instr, is_compact_branch);
   1110 }
   1111 
   1112 
   1113 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
   1114                                   uint32_t offset21) {
   1115   DCHECK(rs.is_valid() && (is_uint21(offset21)));
   1116   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
   1117   emit(instr);
   1118 }
   1119 
   1120 
   1121 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
   1122                                   CompactBranchType is_compact_branch) {
   1123   DCHECK(is_int26(offset26));
   1124   Instr instr = opcode | (offset26 & kImm26Mask);
   1125   emit(instr, is_compact_branch);
   1126 }
   1127 
   1128 
   1129 void Assembler::GenInstrJump(Opcode opcode,
   1130                              uint32_t address) {
   1131   BlockTrampolinePoolScope block_trampoline_pool(this);
   1132   DCHECK(is_uint26(address));
   1133   Instr instr = opcode | address;
   1134   emit(instr);
   1135   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1136 }
   1137 
   1138 
   1139 // Returns the next free trampoline entry.
   1140 int32_t Assembler::get_trampoline_entry(int32_t pos) {
   1141   int32_t trampoline_entry = kInvalidSlotPos;
   1142   if (!internal_trampoline_exception_) {
   1143     if (trampoline_.start() > pos) {
   1144      trampoline_entry = trampoline_.take_slot();
   1145     }
   1146 
   1147     if (kInvalidSlotPos == trampoline_entry) {
   1148       internal_trampoline_exception_ = true;
   1149     }
   1150   }
   1151   return trampoline_entry;
   1152 }
   1153 
   1154 
   1155 uint64_t Assembler::jump_address(Label* L) {
   1156   int64_t target_pos;
   1157   if (L->is_bound()) {
   1158     target_pos = L->pos();
   1159   } else {
   1160     if (L->is_linked()) {
   1161       target_pos = L->pos();  // L's link.
   1162       L->link_to(pc_offset());
   1163     } else {
   1164       L->link_to(pc_offset());
   1165       return kEndOfJumpChain;
   1166     }
   1167   }
   1168   uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
   1169   DCHECK((imm & 3) == 0);
   1170 
   1171   return imm;
   1172 }
   1173 
   1174 
   1175 uint64_t Assembler::jump_offset(Label* L) {
   1176   int64_t target_pos;
   1177   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
   1178 
   1179   if (L->is_bound()) {
   1180     target_pos = L->pos();
   1181   } else {
   1182     if (L->is_linked()) {
   1183       target_pos = L->pos();  // L's link.
   1184       L->link_to(pc_offset() + pad);
   1185     } else {
   1186       L->link_to(pc_offset() + pad);
   1187       return kEndOfJumpChain;
   1188     }
   1189   }
   1190   int64_t imm = target_pos - (pc_offset() + pad);
   1191   DCHECK((imm & 3) == 0);
   1192 
   1193   return static_cast<uint64_t>(imm);
   1194 }
   1195 
   1196 
   1197 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
   1198   int32_t target_pos;
   1199   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
   1200 
   1201   if (L->is_bound()) {
   1202     target_pos = L->pos();
   1203   } else {
   1204     if (L->is_linked()) {
   1205       target_pos = L->pos();
   1206       L->link_to(pc_offset() + pad);
   1207     } else {
   1208       L->link_to(pc_offset() + pad);
   1209       if (!trampoline_emitted_) {
   1210         unbound_labels_count_++;
   1211         next_buffer_check_ -= kTrampolineSlotsSize;
   1212       }
   1213       return kEndOfChain;
   1214     }
   1215   }
   1216 
   1217   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
   1218   DCHECK(is_intn(offset, bits + 2));
   1219   DCHECK((offset & 3) == 0);
   1220 
   1221   return offset;
   1222 }
   1223 
   1224 
   1225 void Assembler::label_at_put(Label* L, int at_offset) {
   1226   int target_pos;
   1227   if (L->is_bound()) {
   1228     target_pos = L->pos();
   1229     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
   1230   } else {
   1231     if (L->is_linked()) {
   1232       target_pos = L->pos();  // L's link.
   1233       int32_t imm18 = target_pos - at_offset;
   1234       DCHECK((imm18 & 3) == 0);
   1235       int32_t imm16 = imm18 >> 2;
   1236       DCHECK(is_int16(imm16));
   1237       instr_at_put(at_offset, (imm16 & kImm16Mask));
   1238     } else {
   1239       target_pos = kEndOfChain;
   1240       instr_at_put(at_offset, 0);
   1241       if (!trampoline_emitted_) {
   1242         unbound_labels_count_++;
   1243         next_buffer_check_ -= kTrampolineSlotsSize;
   1244       }
   1245     }
   1246     L->link_to(at_offset);
   1247   }
   1248 }
   1249 
   1250 
   1251 //------- Branch and jump instructions --------
   1252 
   1253 void Assembler::b(int16_t offset) {
   1254   beq(zero_reg, zero_reg, offset);
   1255 }
   1256 
   1257 
   1258 void Assembler::bal(int16_t offset) {
   1259   bgezal(zero_reg, offset);
   1260 }
   1261 
   1262 
   1263 void Assembler::bc(int32_t offset) {
   1264   DCHECK(kArchVariant == kMips64r6);
   1265   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
   1266 }
   1267 
   1268 
   1269 void Assembler::balc(int32_t offset) {
   1270   DCHECK(kArchVariant == kMips64r6);
   1271   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
   1272 }
   1273 
   1274 
   1275 void Assembler::beq(Register rs, Register rt, int16_t offset) {
   1276   BlockTrampolinePoolScope block_trampoline_pool(this);
   1277   GenInstrImmediate(BEQ, rs, rt, offset);
   1278   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1279 }
   1280 
   1281 
   1282 void Assembler::bgez(Register rs, int16_t offset) {
   1283   BlockTrampolinePoolScope block_trampoline_pool(this);
   1284   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
   1285   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1286 }
   1287 
   1288 
   1289 void Assembler::bgezc(Register rt, int16_t offset) {
   1290   DCHECK(kArchVariant == kMips64r6);
   1291   DCHECK(!(rt.is(zero_reg)));
   1292   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1293 }
   1294 
   1295 
   1296 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
   1297   DCHECK(kArchVariant == kMips64r6);
   1298   DCHECK(!(rs.is(zero_reg)));
   1299   DCHECK(!(rt.is(zero_reg)));
   1300   DCHECK(rs.code() != rt.code());
   1301   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1302 }
   1303 
   1304 
   1305 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
   1306   DCHECK(kArchVariant == kMips64r6);
   1307   DCHECK(!(rs.is(zero_reg)));
   1308   DCHECK(!(rt.is(zero_reg)));
   1309   DCHECK(rs.code() != rt.code());
   1310   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1311 }
   1312 
   1313 
   1314 void Assembler::bgezal(Register rs, int16_t offset) {
   1315   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   1316   BlockTrampolinePoolScope block_trampoline_pool(this);
   1317   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
   1318   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1319 }
   1320 
   1321 
   1322 void Assembler::bgtz(Register rs, int16_t offset) {
   1323   BlockTrampolinePoolScope block_trampoline_pool(this);
   1324   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
   1325   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1326 }
   1327 
   1328 
   1329 void Assembler::bgtzc(Register rt, int16_t offset) {
   1330   DCHECK(kArchVariant == kMips64r6);
   1331   DCHECK(!(rt.is(zero_reg)));
   1332   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
   1333                     CompactBranchType::COMPACT_BRANCH);
   1334 }
   1335 
   1336 
   1337 void Assembler::blez(Register rs, int16_t offset) {
   1338   BlockTrampolinePoolScope block_trampoline_pool(this);
   1339   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
   1340   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1341 }
   1342 
   1343 
   1344 void Assembler::blezc(Register rt, int16_t offset) {
   1345   DCHECK(kArchVariant == kMips64r6);
   1346   DCHECK(!(rt.is(zero_reg)));
   1347   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
   1348                     CompactBranchType::COMPACT_BRANCH);
   1349 }
   1350 
   1351 
   1352 void Assembler::bltzc(Register rt, int16_t offset) {
   1353   DCHECK(kArchVariant == kMips64r6);
   1354   DCHECK(!rt.is(zero_reg));
   1355   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1356 }
   1357 
   1358 
   1359 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
   1360   DCHECK(kArchVariant == kMips64r6);
   1361   DCHECK(!(rs.is(zero_reg)));
   1362   DCHECK(!(rt.is(zero_reg)));
   1363   DCHECK(rs.code() != rt.code());
   1364   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1365 }
   1366 
   1367 
   1368 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
   1369   DCHECK(kArchVariant == kMips64r6);
   1370   DCHECK(!rs.is(zero_reg));
   1371   DCHECK(!rt.is(zero_reg));
   1372   DCHECK(rs.code() != rt.code());
   1373   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1374 }
   1375 
   1376 
   1377 void Assembler::bltz(Register rs, int16_t offset) {
   1378   BlockTrampolinePoolScope block_trampoline_pool(this);
   1379   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
   1380   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1381 }
   1382 
   1383 
   1384 void Assembler::bltzal(Register rs, int16_t offset) {
   1385   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   1386   BlockTrampolinePoolScope block_trampoline_pool(this);
   1387   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
   1388   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1389 }
   1390 
   1391 
   1392 void Assembler::bne(Register rs, Register rt, int16_t offset) {
   1393   BlockTrampolinePoolScope block_trampoline_pool(this);
   1394   GenInstrImmediate(BNE, rs, rt, offset);
   1395   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1396 }
   1397 
   1398 
   1399 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
   1400   DCHECK(kArchVariant == kMips64r6);
   1401   if (rs.code() >= rt.code()) {
   1402     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1403   } else {
   1404     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1405   }
   1406 }
   1407 
   1408 
   1409 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
   1410   DCHECK(kArchVariant == kMips64r6);
   1411   if (rs.code() >= rt.code()) {
   1412     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1413   } else {
   1414     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1415   }
   1416 }
   1417 
   1418 
   1419 void Assembler::blezalc(Register rt, int16_t offset) {
   1420   DCHECK(kArchVariant == kMips64r6);
   1421   DCHECK(!(rt.is(zero_reg)));
   1422   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
   1423                     CompactBranchType::COMPACT_BRANCH);
   1424 }
   1425 
   1426 
   1427 void Assembler::bgezalc(Register rt, int16_t offset) {
   1428   DCHECK(kArchVariant == kMips64r6);
   1429   DCHECK(!(rt.is(zero_reg)));
   1430   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1431 }
   1432 
   1433 
   1434 void Assembler::bgezall(Register rs, int16_t offset) {
   1435   DCHECK(kArchVariant != kMips64r6);
   1436   DCHECK(!(rs.is(zero_reg)));
   1437   BlockTrampolinePoolScope block_trampoline_pool(this);
   1438   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
   1439   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1440 }
   1441 
   1442 
   1443 void Assembler::bltzalc(Register rt, int16_t offset) {
   1444   DCHECK(kArchVariant == kMips64r6);
   1445   DCHECK(!(rt.is(zero_reg)));
   1446   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1447 }
   1448 
   1449 
   1450 void Assembler::bgtzalc(Register rt, int16_t offset) {
   1451   DCHECK(kArchVariant == kMips64r6);
   1452   DCHECK(!(rt.is(zero_reg)));
   1453   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
   1454                     CompactBranchType::COMPACT_BRANCH);
   1455 }
   1456 
   1457 
   1458 void Assembler::beqzalc(Register rt, int16_t offset) {
   1459   DCHECK(kArchVariant == kMips64r6);
   1460   DCHECK(!(rt.is(zero_reg)));
   1461   GenInstrImmediate(ADDI, zero_reg, rt, offset,
   1462                     CompactBranchType::COMPACT_BRANCH);
   1463 }
   1464 
   1465 
   1466 void Assembler::bnezalc(Register rt, int16_t offset) {
   1467   DCHECK(kArchVariant == kMips64r6);
   1468   DCHECK(!(rt.is(zero_reg)));
   1469   GenInstrImmediate(DADDI, zero_reg, rt, offset,
   1470                     CompactBranchType::COMPACT_BRANCH);
   1471 }
   1472 
   1473 
   1474 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
   1475   DCHECK(kArchVariant == kMips64r6);
   1476   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
   1477   if (rs.code() < rt.code()) {
   1478     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1479   } else {
   1480     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1481   }
   1482 }
   1483 
   1484 
   1485 void Assembler::beqzc(Register rs, int32_t offset) {
   1486   DCHECK(kArchVariant == kMips64r6);
   1487   DCHECK(!(rs.is(zero_reg)));
   1488   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1489 }
   1490 
   1491 
   1492 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
   1493   DCHECK(kArchVariant == kMips64r6);
   1494   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
   1495   if (rs.code() < rt.code()) {
   1496     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1497   } else {
   1498     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1499   }
   1500 }
   1501 
   1502 
   1503 void Assembler::bnezc(Register rs, int32_t offset) {
   1504   DCHECK(kArchVariant == kMips64r6);
   1505   DCHECK(!(rs.is(zero_reg)));
   1506   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1507 }
   1508 
   1509 
   1510 void Assembler::j(int64_t target) {
   1511   BlockTrampolinePoolScope block_trampoline_pool(this);
   1512   GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
   1513   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1514 }
   1515 
   1516 
   1517 void Assembler::j(Label* target) {
   1518   uint64_t imm = jump_offset(target);
   1519   if (target->is_bound()) {
   1520     BlockTrampolinePoolScope block_trampoline_pool(this);
   1521     GenInstrJump(static_cast<Opcode>(kJRawMark),
   1522                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
   1523     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1524   } else {
   1525     j(imm);
   1526   }
   1527 }
   1528 
   1529 
   1530 void Assembler::jal(Label* target) {
   1531   uint64_t imm = jump_offset(target);
   1532   if (target->is_bound()) {
   1533     BlockTrampolinePoolScope block_trampoline_pool(this);
   1534     GenInstrJump(static_cast<Opcode>(kJalRawMark),
   1535                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
   1536     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1537   } else {
   1538     jal(imm);
   1539   }
   1540 }
   1541 
   1542 
   1543 void Assembler::jr(Register rs) {
   1544   if (kArchVariant != kMips64r6) {
   1545     BlockTrampolinePoolScope block_trampoline_pool(this);
   1546     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
   1547     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1548   } else {
   1549     jalr(rs, zero_reg);
   1550   }
   1551 }
   1552 
   1553 
   1554 void Assembler::jal(int64_t target) {
   1555   BlockTrampolinePoolScope block_trampoline_pool(this);
   1556   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
   1557   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1558 }
   1559 
   1560 
   1561 void Assembler::jalr(Register rs, Register rd) {
   1562   DCHECK(rs.code() != rd.code());
   1563   BlockTrampolinePoolScope block_trampoline_pool(this);
   1564   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
   1565   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1566 }
   1567 
   1568 
   1569 void Assembler::jic(Register rt, int16_t offset) {
   1570   DCHECK(kArchVariant == kMips64r6);
   1571   GenInstrImmediate(POP66, zero_reg, rt, offset);
   1572 }
   1573 
   1574 
   1575 void Assembler::jialc(Register rt, int16_t offset) {
   1576   DCHECK(kArchVariant == kMips64r6);
   1577   GenInstrImmediate(POP76, zero_reg, rt, offset);
   1578 }
   1579 
   1580 
   1581 // -------Data-processing-instructions---------
   1582 
   1583 // Arithmetic.
   1584 
   1585 void Assembler::addu(Register rd, Register rs, Register rt) {
   1586   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
   1587 }
   1588 
   1589 
   1590 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   1591   GenInstrImmediate(ADDIU, rs, rd, j);
   1592 }
   1593 
   1594 
   1595 void Assembler::subu(Register rd, Register rs, Register rt) {
   1596   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
   1597 }
   1598 
   1599 
   1600 void Assembler::mul(Register rd, Register rs, Register rt) {
   1601   if (kArchVariant == kMips64r6) {
   1602       GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
   1603   } else {
   1604       GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
   1605   }
   1606 }
   1607 
   1608 
   1609 void Assembler::muh(Register rd, Register rs, Register rt) {
   1610   DCHECK(kArchVariant == kMips64r6);
   1611   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
   1612 }
   1613 
   1614 
   1615 void Assembler::mulu(Register rd, Register rs, Register rt) {
   1616   DCHECK(kArchVariant == kMips64r6);
   1617   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
   1618 }
   1619 
   1620 
   1621 void Assembler::muhu(Register rd, Register rs, Register rt) {
   1622   DCHECK(kArchVariant == kMips64r6);
   1623   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
   1624 }
   1625 
   1626 
   1627 void Assembler::dmul(Register rd, Register rs, Register rt) {
   1628   DCHECK(kArchVariant == kMips64r6);
   1629   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
   1630 }
   1631 
   1632 
   1633 void Assembler::dmuh(Register rd, Register rs, Register rt) {
   1634   DCHECK(kArchVariant == kMips64r6);
   1635   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
   1636 }
   1637 
   1638 
   1639 void Assembler::dmulu(Register rd, Register rs, Register rt) {
   1640   DCHECK(kArchVariant == kMips64r6);
   1641   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
   1642 }
   1643 
   1644 
   1645 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
   1646   DCHECK(kArchVariant == kMips64r6);
   1647   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
   1648 }
   1649 
   1650 
   1651 void Assembler::mult(Register rs, Register rt) {
   1652   DCHECK(kArchVariant != kMips64r6);
   1653   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
   1654 }
   1655 
   1656 
   1657 void Assembler::multu(Register rs, Register rt) {
   1658   DCHECK(kArchVariant != kMips64r6);
   1659   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
   1660 }
   1661 
   1662 
   1663 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
   1664   GenInstrImmediate(DADDIU, rs, rd, j);
   1665 }
   1666 
   1667 
   1668 void Assembler::div(Register rs, Register rt) {
   1669   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
   1670 }
   1671 
   1672 
   1673 void Assembler::div(Register rd, Register rs, Register rt) {
   1674   DCHECK(kArchVariant == kMips64r6);
   1675   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
   1676 }
   1677 
   1678 
   1679 void Assembler::mod(Register rd, Register rs, Register rt) {
   1680   DCHECK(kArchVariant == kMips64r6);
   1681   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
   1682 }
   1683 
   1684 
   1685 void Assembler::divu(Register rs, Register rt) {
   1686   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
   1687 }
   1688 
   1689 
   1690 void Assembler::divu(Register rd, Register rs, Register rt) {
   1691   DCHECK(kArchVariant == kMips64r6);
   1692   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
   1693 }
   1694 
   1695 
   1696 void Assembler::modu(Register rd, Register rs, Register rt) {
   1697   DCHECK(kArchVariant == kMips64r6);
   1698   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
   1699 }
   1700 
   1701 
   1702 void Assembler::daddu(Register rd, Register rs, Register rt) {
   1703   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
   1704 }
   1705 
   1706 
   1707 void Assembler::dsubu(Register rd, Register rs, Register rt) {
   1708   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
   1709 }
   1710 
   1711 
   1712 void Assembler::dmult(Register rs, Register rt) {
   1713   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
   1714 }
   1715 
   1716 
   1717 void Assembler::dmultu(Register rs, Register rt) {
   1718   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
   1719 }
   1720 
   1721 
   1722 void Assembler::ddiv(Register rs, Register rt) {
   1723   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
   1724 }
   1725 
   1726 
   1727 void Assembler::ddiv(Register rd, Register rs, Register rt) {
   1728   DCHECK(kArchVariant == kMips64r6);
   1729   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
   1730 }
   1731 
   1732 
   1733 void Assembler::dmod(Register rd, Register rs, Register rt) {
   1734   DCHECK(kArchVariant == kMips64r6);
   1735   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
   1736 }
   1737 
   1738 
   1739 void Assembler::ddivu(Register rs, Register rt) {
   1740   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
   1741 }
   1742 
   1743 
   1744 void Assembler::ddivu(Register rd, Register rs, Register rt) {
   1745   DCHECK(kArchVariant == kMips64r6);
   1746   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
   1747 }
   1748 
   1749 
   1750 void Assembler::dmodu(Register rd, Register rs, Register rt) {
   1751   DCHECK(kArchVariant == kMips64r6);
   1752   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
   1753 }
   1754 
   1755 
   1756 // Logical.
   1757 
   1758 void Assembler::and_(Register rd, Register rs, Register rt) {
   1759   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
   1760 }
   1761 
   1762 
   1763 void Assembler::andi(Register rt, Register rs, int32_t j) {
   1764   DCHECK(is_uint16(j));
   1765   GenInstrImmediate(ANDI, rs, rt, j);
   1766 }
   1767 
   1768 
   1769 void Assembler::or_(Register rd, Register rs, Register rt) {
   1770   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
   1771 }
   1772 
   1773 
   1774 void Assembler::ori(Register rt, Register rs, int32_t j) {
   1775   DCHECK(is_uint16(j));
   1776   GenInstrImmediate(ORI, rs, rt, j);
   1777 }
   1778 
   1779 
   1780 void Assembler::xor_(Register rd, Register rs, Register rt) {
   1781   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
   1782 }
   1783 
   1784 
   1785 void Assembler::xori(Register rt, Register rs, int32_t j) {
   1786   DCHECK(is_uint16(j));
   1787   GenInstrImmediate(XORI, rs, rt, j);
   1788 }
   1789 
   1790 
   1791 void Assembler::nor(Register rd, Register rs, Register rt) {
   1792   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
   1793 }
   1794 
   1795 
   1796 // Shifts.
   1797 void Assembler::sll(Register rd,
   1798                     Register rt,
   1799                     uint16_t sa,
   1800                     bool coming_from_nop) {
   1801   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
   1802   // generated using the sll instruction. They must be generated using
   1803   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
   1804   // instructions.
   1805   DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
   1806   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
   1807 }
   1808 
   1809 
   1810 void Assembler::sllv(Register rd, Register rt, Register rs) {
   1811   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
   1812 }
   1813 
   1814 
   1815 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
   1816   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
   1817 }
   1818 
   1819 
   1820 void Assembler::srlv(Register rd, Register rt, Register rs) {
   1821   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
   1822 }
   1823 
   1824 
   1825 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
   1826   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
   1827 }
   1828 
   1829 
   1830 void Assembler::srav(Register rd, Register rt, Register rs) {
   1831   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
   1832 }
   1833 
   1834 
   1835 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
   1836   // Should be called via MacroAssembler::Ror.
   1837   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1838   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   1839   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
   1840       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
   1841   emit(instr);
   1842 }
   1843 
   1844 
   1845 void Assembler::rotrv(Register rd, Register rt, Register rs) {
   1846   // Should be called via MacroAssembler::Ror.
   1847   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1848   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   1849   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1850      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
   1851   emit(instr);
   1852 }
   1853 
   1854 
   1855 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
   1856   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
   1857 }
   1858 
   1859 
   1860 void Assembler::dsllv(Register rd, Register rt, Register rs) {
   1861   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
   1862 }
   1863 
   1864 
   1865 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
   1866   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
   1867 }
   1868 
   1869 
   1870 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
   1871   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
   1872 }
   1873 
   1874 
   1875 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
   1876   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1877   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
   1878       | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
   1879   emit(instr);
   1880 }
   1881 
   1882 void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
   1883   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1884   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
   1885                 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
   1886   emit(instr);
   1887 }
   1888 
   1889 void Assembler::drotrv(Register rd, Register rt, Register rs) {
   1890   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
   1891   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1892       | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
   1893   emit(instr);
   1894 }
   1895 
   1896 
   1897 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
   1898   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
   1899 }
   1900 
   1901 
   1902 void Assembler::dsrav(Register rd, Register rt, Register rs) {
   1903   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
   1904 }
   1905 
   1906 
   1907 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
   1908   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
   1909 }
   1910 
   1911 
   1912 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
   1913   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
   1914 }
   1915 
   1916 
   1917 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
   1918   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
   1919 }
   1920 
   1921 
   1922 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
   1923   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1924   DCHECK(sa <= 3);
   1925   DCHECK(kArchVariant == kMips64r6);
   1926   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
   1927                 rd.code() << kRdShift | sa << kSaShift | LSA;
   1928   emit(instr);
   1929 }
   1930 
   1931 
   1932 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
   1933   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1934   DCHECK(sa <= 3);
   1935   DCHECK(kArchVariant == kMips64r6);
   1936   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
   1937                 rd.code() << kRdShift | sa << kSaShift | DLSA;
   1938   emit(instr);
   1939 }
   1940 
   1941 
   1942 // ------------Memory-instructions-------------
   1943 
   1944 // Helper for base-reg + offset, when offset is larger than int16.
   1945 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
   1946   DCHECK(!src.rm().is(at));
   1947   DCHECK(is_int32(src.offset_));
   1948 
   1949   if (kArchVariant == kMips64r6) {
   1950     int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
   1951     if (src.offset_ & kNegOffset) {
   1952       if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
   1953         lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
   1954         ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
   1955         daddu(at, at, src.rm());                // Add base register.
   1956         return;
   1957       }
   1958 
   1959       hi += 1;
   1960     }
   1961 
   1962     daui(at, src.rm(), hi);
   1963     daddiu(at, at, src.offset_ & kImm16Mask);
   1964   } else {
   1965     lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
   1966     ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
   1967     daddu(at, at, src.rm());                // Add base register.
   1968   }
   1969 }
   1970 
   1971 // Helper for base-reg + upper part of offset, when offset is larger than int16.
   1972 // Loads higher part of the offset to AT register.
   1973 // Returns lower part of the offset to be used as offset
   1974 // in Load/Store instructions
   1975 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
   1976   DCHECK(!src.rm().is(at));
   1977   DCHECK(is_int32(src.offset_));
   1978   int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
   1979   // If the highest bit of the lower part of the offset is 1, this would make
   1980   // the offset in the load/store instruction negative. We need to compensate
   1981   // for this by adding 1 to the upper part of the offset.
   1982   if (src.offset_ & kNegOffset) {
   1983     if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
   1984       LoadRegPlusOffsetToAt(src);
   1985       return 0;
   1986     }
   1987 
   1988     hi += 1;
   1989   }
   1990 
   1991   if (kArchVariant == kMips64r6) {
   1992     daui(at, src.rm(), hi);
   1993   } else {
   1994     lui(at, hi);
   1995     daddu(at, at, src.rm());
   1996   }
   1997   return (src.offset_ & kImm16Mask);
   1998 }
   1999 
   2000 void Assembler::lb(Register rd, const MemOperand& rs) {
   2001   if (is_int16(rs.offset_)) {
   2002     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
   2003   } else {  // Offset > 16 bits, use multiple instructions to load.
   2004     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2005     GenInstrImmediate(LB, at, rd, off16);
   2006   }
   2007 }
   2008 
   2009 
   2010 void Assembler::lbu(Register rd, const MemOperand& rs) {
   2011   if (is_int16(rs.offset_)) {
   2012     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
   2013   } else {  // Offset > 16 bits, use multiple instructions to load.
   2014     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2015     GenInstrImmediate(LBU, at, rd, off16);
   2016   }
   2017 }
   2018 
   2019 
   2020 void Assembler::lh(Register rd, const MemOperand& rs) {
   2021   if (is_int16(rs.offset_)) {
   2022     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
   2023   } else {  // Offset > 16 bits, use multiple instructions to load.
   2024     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2025     GenInstrImmediate(LH, at, rd, off16);
   2026   }
   2027 }
   2028 
   2029 
   2030 void Assembler::lhu(Register rd, const MemOperand& rs) {
   2031   if (is_int16(rs.offset_)) {
   2032     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
   2033   } else {  // Offset > 16 bits, use multiple instructions to load.
   2034     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2035     GenInstrImmediate(LHU, at, rd, off16);
   2036   }
   2037 }
   2038 
   2039 
   2040 void Assembler::lw(Register rd, const MemOperand& rs) {
   2041   if (is_int16(rs.offset_)) {
   2042     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
   2043   } else {  // Offset > 16 bits, use multiple instructions to load.
   2044     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2045     GenInstrImmediate(LW, at, rd, off16);
   2046   }
   2047 }
   2048 
   2049 
   2050 void Assembler::lwu(Register rd, const MemOperand& rs) {
   2051   if (is_int16(rs.offset_)) {
   2052     GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
   2053   } else {  // Offset > 16 bits, use multiple instructions to load.
   2054     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2055     GenInstrImmediate(LWU, at, rd, off16);
   2056   }
   2057 }
   2058 
   2059 
   2060 void Assembler::lwl(Register rd, const MemOperand& rs) {
   2061   DCHECK(is_int16(rs.offset_));
   2062   DCHECK(kArchVariant == kMips64r2);
   2063   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
   2064 }
   2065 
   2066 
   2067 void Assembler::lwr(Register rd, const MemOperand& rs) {
   2068   DCHECK(is_int16(rs.offset_));
   2069   DCHECK(kArchVariant == kMips64r2);
   2070   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
   2071 }
   2072 
   2073 
   2074 void Assembler::sb(Register rd, const MemOperand& rs) {
   2075   if (is_int16(rs.offset_)) {
   2076     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
   2077   } else {  // Offset > 16 bits, use multiple instructions to store.
   2078     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2079     GenInstrImmediate(SB, at, rd, off16);
   2080   }
   2081 }
   2082 
   2083 
   2084 void Assembler::sh(Register rd, const MemOperand& rs) {
   2085   if (is_int16(rs.offset_)) {
   2086     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
   2087   } else {  // Offset > 16 bits, use multiple instructions to store.
   2088     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2089     GenInstrImmediate(SH, at, rd, off16);
   2090   }
   2091 }
   2092 
   2093 
   2094 void Assembler::sw(Register rd, const MemOperand& rs) {
   2095   if (is_int16(rs.offset_)) {
   2096     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
   2097   } else {  // Offset > 16 bits, use multiple instructions to store.
   2098     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2099     GenInstrImmediate(SW, at, rd, off16);
   2100   }
   2101 }
   2102 
   2103 
   2104 void Assembler::swl(Register rd, const MemOperand& rs) {
   2105   DCHECK(is_int16(rs.offset_));
   2106   DCHECK(kArchVariant == kMips64r2);
   2107   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
   2108 }
   2109 
   2110 
   2111 void Assembler::swr(Register rd, const MemOperand& rs) {
   2112   DCHECK(is_int16(rs.offset_));
   2113   DCHECK(kArchVariant == kMips64r2);
   2114   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
   2115 }
   2116 
   2117 
   2118 void Assembler::lui(Register rd, int32_t j) {
   2119   DCHECK(is_uint16(j));
   2120   GenInstrImmediate(LUI, zero_reg, rd, j);
   2121 }
   2122 
   2123 
   2124 void Assembler::aui(Register rt, Register rs, int32_t j) {
   2125   // This instruction uses same opcode as 'lui'. The difference in encoding is
   2126   // 'lui' has zero reg. for rs field.
   2127   DCHECK(is_uint16(j));
   2128   GenInstrImmediate(LUI, rs, rt, j);
   2129 }
   2130 
   2131 
   2132 void Assembler::daui(Register rt, Register rs, int32_t j) {
   2133   DCHECK(is_uint16(j));
   2134   DCHECK(!rs.is(zero_reg));
   2135   GenInstrImmediate(DAUI, rs, rt, j);
   2136 }
   2137 
   2138 
   2139 void Assembler::dahi(Register rs, int32_t j) {
   2140   DCHECK(is_uint16(j));
   2141   GenInstrImmediate(REGIMM, rs, DAHI, j);
   2142 }
   2143 
   2144 
   2145 void Assembler::dati(Register rs, int32_t j) {
   2146   DCHECK(is_uint16(j));
   2147   GenInstrImmediate(REGIMM, rs, DATI, j);
   2148 }
   2149 
   2150 
   2151 void Assembler::ldl(Register rd, const MemOperand& rs) {
   2152   DCHECK(is_int16(rs.offset_));
   2153   DCHECK(kArchVariant == kMips64r2);
   2154   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
   2155 }
   2156 
   2157 
   2158 void Assembler::ldr(Register rd, const MemOperand& rs) {
   2159   DCHECK(is_int16(rs.offset_));
   2160   DCHECK(kArchVariant == kMips64r2);
   2161   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
   2162 }
   2163 
   2164 
   2165 void Assembler::sdl(Register rd, const MemOperand& rs) {
   2166   DCHECK(is_int16(rs.offset_));
   2167   DCHECK(kArchVariant == kMips64r2);
   2168   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
   2169 }
   2170 
   2171 
   2172 void Assembler::sdr(Register rd, const MemOperand& rs) {
   2173   DCHECK(is_int16(rs.offset_));
   2174   DCHECK(kArchVariant == kMips64r2);
   2175   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
   2176 }
   2177 
   2178 
   2179 void Assembler::ld(Register rd, const MemOperand& rs) {
   2180   if (is_int16(rs.offset_)) {
   2181     GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
   2182   } else {  // Offset > 16 bits, use multiple instructions to load.
   2183     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2184     GenInstrImmediate(LD, at, rd, off16);
   2185   }
   2186 }
   2187 
   2188 
   2189 void Assembler::sd(Register rd, const MemOperand& rs) {
   2190   if (is_int16(rs.offset_)) {
   2191     GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
   2192   } else {  // Offset > 16 bits, use multiple instructions to store.
   2193     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
   2194     GenInstrImmediate(SD, at, rd, off16);
   2195   }
   2196 }
   2197 
   2198 
   2199 // ---------PC-Relative instructions-----------
   2200 
   2201 void Assembler::addiupc(Register rs, int32_t imm19) {
   2202   DCHECK(kArchVariant == kMips64r6);
   2203   DCHECK(rs.is_valid() && is_int19(imm19));
   2204   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
   2205   GenInstrImmediate(PCREL, rs, imm21);
   2206 }
   2207 
   2208 
   2209 void Assembler::lwpc(Register rs, int32_t offset19) {
   2210   DCHECK(kArchVariant == kMips64r6);
   2211   DCHECK(rs.is_valid() && is_int19(offset19));
   2212   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
   2213   GenInstrImmediate(PCREL, rs, imm21);
   2214 }
   2215 
   2216 
   2217 void Assembler::lwupc(Register rs, int32_t offset19) {
   2218   DCHECK(kArchVariant == kMips64r6);
   2219   DCHECK(rs.is_valid() && is_int19(offset19));
   2220   uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
   2221   GenInstrImmediate(PCREL, rs, imm21);
   2222 }
   2223 
   2224 
   2225 void Assembler::ldpc(Register rs, int32_t offset18) {
   2226   DCHECK(kArchVariant == kMips64r6);
   2227   DCHECK(rs.is_valid() && is_int18(offset18));
   2228   uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
   2229   GenInstrImmediate(PCREL, rs, imm21);
   2230 }
   2231 
   2232 
   2233 void Assembler::auipc(Register rs, int16_t imm16) {
   2234   DCHECK(kArchVariant == kMips64r6);
   2235   DCHECK(rs.is_valid());
   2236   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
   2237   GenInstrImmediate(PCREL, rs, imm21);
   2238 }
   2239 
   2240 
   2241 void Assembler::aluipc(Register rs, int16_t imm16) {
   2242   DCHECK(kArchVariant == kMips64r6);
   2243   DCHECK(rs.is_valid());
   2244   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
   2245   GenInstrImmediate(PCREL, rs, imm21);
   2246 }
   2247 
   2248 
   2249 // -------------Misc-instructions--------------
   2250 
   2251 // Break / Trap instructions.
   2252 void Assembler::break_(uint32_t code, bool break_as_stop) {
   2253   DCHECK((code & ~0xfffff) == 0);
   2254   // We need to invalidate breaks that could be stops as well because the
   2255   // simulator expects a char pointer after the stop instruction.
   2256   // See constants-mips.h for explanation.
   2257   DCHECK((break_as_stop &&
   2258           code <= kMaxStopCode &&
   2259           code > kMaxWatchpointCode) ||
   2260          (!break_as_stop &&
   2261           (code > kMaxStopCode ||
   2262            code <= kMaxWatchpointCode)));
   2263   Instr break_instr = SPECIAL | BREAK | (code << 6);
   2264   emit(break_instr);
   2265 }
   2266 
   2267 
   2268 void Assembler::stop(const char* msg, uint32_t code) {
   2269   DCHECK(code > kMaxWatchpointCode);
   2270   DCHECK(code <= kMaxStopCode);
   2271 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
   2272   break_(0x54321);
   2273 #else  // V8_HOST_ARCH_MIPS
   2274   BlockTrampolinePoolFor(3);
   2275   // The Simulator will handle the stop instruction and get the message address.
   2276   // On MIPS stop() is just a special kind of break_().
   2277   break_(code, true);
   2278   // Do not embed the message string address! We used to do this, but that
   2279   // made snapshots created from position-independent executable builds
   2280   // non-deterministic.
   2281   // TODO(yangguo): remove this field entirely.
   2282   nop();
   2283 #endif
   2284 }
   2285 
   2286 
   2287 void Assembler::tge(Register rs, Register rt, uint16_t code) {
   2288   DCHECK(is_uint10(code));
   2289   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
   2290       | rt.code() << kRtShift | code << 6;
   2291   emit(instr);
   2292 }
   2293 
   2294 
   2295 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
   2296   DCHECK(is_uint10(code));
   2297   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
   2298       | rt.code() << kRtShift | code << 6;
   2299   emit(instr);
   2300 }
   2301 
   2302 
   2303 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
   2304   DCHECK(is_uint10(code));
   2305   Instr instr =
   2306       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2307   emit(instr);
   2308 }
   2309 
   2310 
   2311 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
   2312   DCHECK(is_uint10(code));
   2313   Instr instr =
   2314       SPECIAL | TLTU | rs.code() << kRsShift
   2315       | rt.code() << kRtShift | code << 6;
   2316   emit(instr);
   2317 }
   2318 
   2319 
   2320 void Assembler::teq(Register rs, Register rt, uint16_t code) {
   2321   DCHECK(is_uint10(code));
   2322   Instr instr =
   2323       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2324   emit(instr);
   2325 }
   2326 
   2327 
   2328 void Assembler::tne(Register rs, Register rt, uint16_t code) {
   2329   DCHECK(is_uint10(code));
   2330   Instr instr =
   2331       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2332   emit(instr);
   2333 }
   2334 
   2335 void Assembler::sync() {
   2336   Instr sync_instr = SPECIAL | SYNC;
   2337   emit(sync_instr);
   2338 }
   2339 
   2340 // Move from HI/LO register.
   2341 
   2342 void Assembler::mfhi(Register rd) {
   2343   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
   2344 }
   2345 
   2346 
   2347 void Assembler::mflo(Register rd) {
   2348   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
   2349 }
   2350 
   2351 
   2352 // Set on less than instructions.
   2353 void Assembler::slt(Register rd, Register rs, Register rt) {
   2354   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
   2355 }
   2356 
   2357 
   2358 void Assembler::sltu(Register rd, Register rs, Register rt) {
   2359   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
   2360 }
   2361 
   2362 
   2363 void Assembler::slti(Register rt, Register rs, int32_t j) {
   2364   GenInstrImmediate(SLTI, rs, rt, j);
   2365 }
   2366 
   2367 
   2368 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
   2369   GenInstrImmediate(SLTIU, rs, rt, j);
   2370 }
   2371 
   2372 
   2373 // Conditional move.
   2374 void Assembler::movz(Register rd, Register rs, Register rt) {
   2375   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
   2376 }
   2377 
   2378 
   2379 void Assembler::movn(Register rd, Register rs, Register rt) {
   2380   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
   2381 }
   2382 
   2383 
   2384 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
   2385   Register rt;
   2386   rt.reg_code = (cc & 0x0007) << 2 | 1;
   2387   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
   2388 }
   2389 
   2390 
   2391 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
   2392   Register rt;
   2393   rt.reg_code = (cc & 0x0007) << 2 | 0;
   2394   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
   2395 }
   2396 
   2397 
   2398 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2399   min(S, fd, fs, ft);
   2400 }
   2401 
   2402 
   2403 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2404   min(D, fd, fs, ft);
   2405 }
   2406 
   2407 
   2408 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2409   max(S, fd, fs, ft);
   2410 }
   2411 
   2412 
   2413 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2414   max(D, fd, fs, ft);
   2415 }
   2416 
   2417 
   2418 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2419   mina(S, fd, fs, ft);
   2420 }
   2421 
   2422 
   2423 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2424   mina(D, fd, fs, ft);
   2425 }
   2426 
   2427 
   2428 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2429   maxa(S, fd, fs, ft);
   2430 }
   2431 
   2432 
   2433 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2434   maxa(D, fd, fs, ft);
   2435 }
   2436 
   2437 
   2438 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2439                     FPURegister ft) {
   2440   DCHECK(kArchVariant == kMips64r6);
   2441   DCHECK((fmt == D) || (fmt == S));
   2442   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
   2443 }
   2444 
   2445 
   2446 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2447                     FPURegister ft) {
   2448   DCHECK(kArchVariant == kMips64r6);
   2449   DCHECK((fmt == D) || (fmt == S));
   2450   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
   2451 }
   2452 
   2453 
   2454 // GPR.
   2455 void Assembler::seleqz(Register rd, Register rs, Register rt) {
   2456   DCHECK(kArchVariant == kMips64r6);
   2457   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
   2458 }
   2459 
   2460 
   2461 // GPR.
   2462 void Assembler::selnez(Register rd, Register rs, Register rt) {
   2463   DCHECK(kArchVariant == kMips64r6);
   2464   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
   2465 }
   2466 
   2467 
   2468 // Bit twiddling.
   2469 void Assembler::clz(Register rd, Register rs) {
   2470   if (kArchVariant != kMips64r6) {
   2471     // Clz instr requires same GPR number in 'rd' and 'rt' fields.
   2472     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
   2473   } else {
   2474     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
   2475   }
   2476 }
   2477 
   2478 
   2479 void Assembler::dclz(Register rd, Register rs) {
   2480   if (kArchVariant != kMips64r6) {
   2481     // dclz instr requires same GPR number in 'rd' and 'rt' fields.
   2482     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
   2483   } else {
   2484     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
   2485   }
   2486 }
   2487 
   2488 
   2489 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2490   // Should be called via MacroAssembler::Ins.
   2491   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
   2492   DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
   2493   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
   2494 }
   2495 
   2496 
   2497 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2498   // Should be called via MacroAssembler::Dins.
   2499   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2500   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2501   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
   2502 }
   2503 
   2504 
   2505 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2506   // Should be called via MacroAssembler::Ext.
   2507   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2508   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2509   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
   2510 }
   2511 
   2512 
   2513 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2514   // Should be called via MacroAssembler::Dext.
   2515   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2516   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2517   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
   2518 }
   2519 
   2520 
   2521 void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2522   // Should be called via MacroAssembler::Dextm.
   2523   // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
   2524   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2525   GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
   2526 }
   2527 
   2528 
   2529 void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2530   // Should be called via MacroAssembler::Dextu.
   2531   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2532   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2533   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
   2534 }
   2535 
   2536 
   2537 void Assembler::bitswap(Register rd, Register rt) {
   2538   DCHECK(kArchVariant == kMips64r6);
   2539   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
   2540 }
   2541 
   2542 
   2543 void Assembler::dbitswap(Register rd, Register rt) {
   2544   DCHECK(kArchVariant == kMips64r6);
   2545   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
   2546 }
   2547 
   2548 
   2549 void Assembler::pref(int32_t hint, const MemOperand& rs) {
   2550   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
   2551   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
   2552       | (rs.offset_);
   2553   emit(instr);
   2554 }
   2555 
   2556 
   2557 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
   2558   DCHECK(kArchVariant == kMips64r6);
   2559   DCHECK(is_uint3(bp));
   2560   uint16_t sa = (ALIGN << kBp2Bits) | bp;
   2561   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
   2562 }
   2563 
   2564 
   2565 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
   2566   DCHECK(kArchVariant == kMips64r6);
   2567   DCHECK(is_uint3(bp));
   2568   uint16_t sa = (DALIGN << kBp3Bits) | bp;
   2569   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
   2570 }
   2571 
   2572 void Assembler::wsbh(Register rd, Register rt) {
   2573   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2574   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
   2575 }
   2576 
   2577 void Assembler::dsbh(Register rd, Register rt) {
   2578   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2579   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
   2580 }
   2581 
   2582 void Assembler::dshd(Register rd, Register rt) {
   2583   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2584   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
   2585 }
   2586 
   2587 void Assembler::seh(Register rd, Register rt) {
   2588   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2589   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
   2590 }
   2591 
   2592 void Assembler::seb(Register rd, Register rt) {
   2593   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2594   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
   2595 }
   2596 
   2597 // --------Coprocessor-instructions----------------
   2598 
   2599 // Load, store, move.
   2600 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
   2601   if (is_int16(src.offset_)) {
   2602     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
   2603   } else {  // Offset > 16 bits, use multiple instructions to load.
   2604     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
   2605     GenInstrImmediate(LWC1, at, fd, off16);
   2606   }
   2607 }
   2608 
   2609 
   2610 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
   2611   if (is_int16(src.offset_)) {
   2612     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
   2613   } else {  // Offset > 16 bits, use multiple instructions to load.
   2614     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
   2615     GenInstrImmediate(LDC1, at, fd, off16);
   2616   }
   2617 }
   2618 
   2619 
   2620 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
   2621   if (is_int16(src.offset_)) {
   2622     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
   2623   } else {  // Offset > 16 bits, use multiple instructions to load.
   2624     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
   2625     GenInstrImmediate(SWC1, at, fd, off16);
   2626   }
   2627 }
   2628 
   2629 
   2630 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
   2631   DCHECK(!src.rm().is(at));
   2632   if (is_int16(src.offset_)) {
   2633     GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
   2634   } else {  // Offset > 16 bits, use multiple instructions to load.
   2635     int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
   2636     GenInstrImmediate(SDC1, at, fd, off16);
   2637   }
   2638 }
   2639 
   2640 
   2641 void Assembler::mtc1(Register rt, FPURegister fs) {
   2642   GenInstrRegister(COP1, MTC1, rt, fs, f0);
   2643 }
   2644 
   2645 
   2646 void Assembler::mthc1(Register rt, FPURegister fs) {
   2647   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
   2648 }
   2649 
   2650 
   2651 void Assembler::dmtc1(Register rt, FPURegister fs) {
   2652   GenInstrRegister(COP1, DMTC1, rt, fs, f0);
   2653 }
   2654 
   2655 
   2656 void Assembler::mfc1(Register rt, FPURegister fs) {
   2657   GenInstrRegister(COP1, MFC1, rt, fs, f0);
   2658 }
   2659 
   2660 
   2661 void Assembler::mfhc1(Register rt, FPURegister fs) {
   2662   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
   2663 }
   2664 
   2665 
   2666 void Assembler::dmfc1(Register rt, FPURegister fs) {
   2667   GenInstrRegister(COP1, DMFC1, rt, fs, f0);
   2668 }
   2669 
   2670 
   2671 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
   2672   GenInstrRegister(COP1, CTC1, rt, fs);
   2673 }
   2674 
   2675 
   2676 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
   2677   GenInstrRegister(COP1, CFC1, rt, fs);
   2678 }
   2679 
   2680 
   2681 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   2682   uint64_t i;
   2683   memcpy(&i, &d, 8);
   2684 
   2685   *lo = i & 0xffffffff;
   2686   *hi = i >> 32;
   2687 }
   2688 
   2689 
   2690 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2691                     FPURegister ft) {
   2692   DCHECK(kArchVariant == kMips64r6);
   2693   DCHECK((fmt == D) || (fmt == S));
   2694 
   2695   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
   2696 }
   2697 
   2698 
   2699 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2700   sel(S, fd, fs, ft);
   2701 }
   2702 
   2703 
   2704 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2705   sel(D, fd, fs, ft);
   2706 }
   2707 
   2708 
   2709 // FPR.
   2710 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2711                        FPURegister ft) {
   2712   DCHECK((fmt == D) || (fmt == S));
   2713   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
   2714 }
   2715 
   2716 
   2717 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2718   seleqz(D, fd, fs, ft);
   2719 }
   2720 
   2721 
   2722 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2723   seleqz(S, fd, fs, ft);
   2724 }
   2725 
   2726 
   2727 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2728   selnez(D, fd, fs, ft);
   2729 }
   2730 
   2731 
   2732 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2733   selnez(S, fd, fs, ft);
   2734 }
   2735 
   2736 
   2737 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
   2738   DCHECK(kArchVariant == kMips64r2);
   2739   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
   2740 }
   2741 
   2742 
   2743 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
   2744   DCHECK(kArchVariant == kMips64r2);
   2745   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
   2746 }
   2747 
   2748 
   2749 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
   2750   DCHECK(kArchVariant == kMips64r2);
   2751   FPURegister ft;
   2752   ft.reg_code = (cc & 0x0007) << 2 | 1;
   2753   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
   2754 }
   2755 
   2756 
   2757 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
   2758   DCHECK(kArchVariant == kMips64r2);
   2759   FPURegister ft;
   2760   ft.reg_code = (cc & 0x0007) << 2 | 1;
   2761   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
   2762 }
   2763 
   2764 
   2765 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
   2766   DCHECK(kArchVariant == kMips64r2);
   2767   FPURegister ft;
   2768   ft.reg_code = (cc & 0x0007) << 2 | 0;
   2769   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
   2770 }
   2771 
   2772 
   2773 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
   2774   DCHECK(kArchVariant == kMips64r2);
   2775   FPURegister ft;
   2776   ft.reg_code = (cc & 0x0007) << 2 | 0;
   2777   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
   2778 }
   2779 
   2780 
   2781 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
   2782   DCHECK(kArchVariant == kMips64r2);
   2783   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
   2784 }
   2785 
   2786 
   2787 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
   2788   DCHECK(kArchVariant == kMips64r2);
   2789   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
   2790 }
   2791 
   2792 
   2793 // FPR.
   2794 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2795                        FPURegister ft) {
   2796   DCHECK(kArchVariant == kMips64r6);
   2797   DCHECK((fmt == D) || (fmt == S));
   2798   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
   2799 }
   2800 
   2801 
   2802 // Arithmetic.
   2803 
   2804 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2805   GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
   2806 }
   2807 
   2808 
   2809 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2810   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
   2811 }
   2812 
   2813 
   2814 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2815   GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
   2816 }
   2817 
   2818 
   2819 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2820   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
   2821 }
   2822 
   2823 
   2824 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2825   GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
   2826 }
   2827 
   2828 
   2829 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2830   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
   2831 }
   2832 
   2833 void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
   2834                        FPURegister ft) {
   2835   DCHECK(kArchVariant == kMips64r2);
   2836   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
   2837 }
   2838 
   2839 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
   2840     FPURegister ft) {
   2841   DCHECK(kArchVariant == kMips64r2);
   2842   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
   2843 }
   2844 
   2845 void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
   2846                        FPURegister ft) {
   2847   DCHECK(kArchVariant == kMips64r2);
   2848   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
   2849 }
   2850 
   2851 void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
   2852                        FPURegister ft) {
   2853   DCHECK(kArchVariant == kMips64r2);
   2854   GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
   2855 }
   2856 
   2857 void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2858   DCHECK(kArchVariant == kMips64r6);
   2859   GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S);
   2860 }
   2861 
   2862 void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2863   DCHECK(kArchVariant == kMips64r6);
   2864   GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D);
   2865 }
   2866 
   2867 void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2868   DCHECK(kArchVariant == kMips64r6);
   2869   GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S);
   2870 }
   2871 
   2872 void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2873   DCHECK(kArchVariant == kMips64r6);
   2874   GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D);
   2875 }
   2876 
   2877 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2878   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
   2879 }
   2880 
   2881 
   2882 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2883   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
   2884 }
   2885 
   2886 
   2887 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
   2888   GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
   2889 }
   2890 
   2891 
   2892 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
   2893   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
   2894 }
   2895 
   2896 
   2897 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
   2898   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
   2899 }
   2900 
   2901 
   2902 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
   2903   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
   2904 }
   2905 
   2906 
   2907 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
   2908   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
   2909 }
   2910 
   2911 
   2912 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
   2913   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
   2914 }
   2915 
   2916 
   2917 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
   2918   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
   2919 }
   2920 
   2921 
   2922 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
   2923   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
   2924 }
   2925 
   2926 
   2927 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
   2928   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
   2929 }
   2930 
   2931 
   2932 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
   2933   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
   2934 }
   2935 
   2936 
   2937 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
   2938   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
   2939 }
   2940 
   2941 
   2942 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
   2943   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
   2944 }
   2945 
   2946 
   2947 // Conversions.
   2948 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
   2949   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
   2950 }
   2951 
   2952 
   2953 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
   2954   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
   2955 }
   2956 
   2957 
   2958 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
   2959   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
   2960 }
   2961 
   2962 
   2963 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
   2964   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
   2965 }
   2966 
   2967 
   2968 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
   2969   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
   2970 }
   2971 
   2972 
   2973 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
   2974   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
   2975 }
   2976 
   2977 
   2978 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
   2979   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
   2980 }
   2981 
   2982 
   2983 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
   2984   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
   2985 }
   2986 
   2987 
   2988 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
   2989   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
   2990 }
   2991 
   2992 
   2993 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
   2994   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
   2995 }
   2996 
   2997 
   2998 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
   2999 
   3000 
   3001 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
   3002 
   3003 
   3004 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
   3005   DCHECK(kArchVariant == kMips64r6);
   3006   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
   3007 }
   3008 
   3009 
   3010 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
   3011   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3012   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
   3013 }
   3014 
   3015 
   3016 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
   3017   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3018   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
   3019 }
   3020 
   3021 
   3022 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
   3023   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3024   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
   3025 }
   3026 
   3027 
   3028 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
   3029   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3030   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
   3031 }
   3032 
   3033 
   3034 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
   3035   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
   3036 }
   3037 
   3038 
   3039 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
   3040   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
   3041 }
   3042 
   3043 
   3044 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
   3045   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
   3046 }
   3047 
   3048 
   3049 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
   3050   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
   3051 }
   3052 
   3053 
   3054 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
   3055   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
   3056 }
   3057 
   3058 
   3059 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
   3060   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
   3061 }
   3062 
   3063 
   3064 void Assembler::class_s(FPURegister fd, FPURegister fs) {
   3065   DCHECK(kArchVariant == kMips64r6);
   3066   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
   3067 }
   3068 
   3069 
   3070 void Assembler::class_d(FPURegister fd, FPURegister fs) {
   3071   DCHECK(kArchVariant == kMips64r6);
   3072   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
   3073 }
   3074 
   3075 
   3076 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
   3077                      FPURegister ft) {
   3078   DCHECK(kArchVariant == kMips64r6);
   3079   DCHECK((fmt == D) || (fmt == S));
   3080   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
   3081 }
   3082 
   3083 
   3084 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
   3085                      FPURegister ft) {
   3086   DCHECK(kArchVariant == kMips64r6);
   3087   DCHECK((fmt == D) || (fmt == S));
   3088   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
   3089 }
   3090 
   3091 
   3092 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
   3093   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
   3094 }
   3095 
   3096 
   3097 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
   3098   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3099   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
   3100 }
   3101 
   3102 
   3103 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
   3104   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
   3105 }
   3106 
   3107 
   3108 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
   3109   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
   3110 }
   3111 
   3112 
   3113 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
   3114   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3115   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
   3116 }
   3117 
   3118 
   3119 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
   3120   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
   3121 }
   3122 
   3123 
   3124 // Conditions for >= MIPSr6.
   3125 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
   3126     FPURegister fd, FPURegister fs, FPURegister ft) {
   3127   DCHECK(kArchVariant == kMips64r6);
   3128   DCHECK((fmt & ~(31 << kRsShift)) == 0);
   3129   Instr instr = COP1 | fmt | ft.code() << kFtShift |
   3130       fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
   3131   emit(instr);
   3132 }
   3133 
   3134 
   3135 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
   3136                       FPURegister ft) {
   3137   cmp(cond, W, fd, fs, ft);
   3138 }
   3139 
   3140 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
   3141                       FPURegister ft) {
   3142   cmp(cond, L, fd, fs, ft);
   3143 }
   3144 
   3145 
   3146 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
   3147   DCHECK(kArchVariant == kMips64r6);
   3148   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
   3149   emit(instr);
   3150 }
   3151 
   3152 
   3153 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
   3154   DCHECK(kArchVariant == kMips64r6);
   3155   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
   3156   emit(instr);
   3157 }
   3158 
   3159 
   3160 // Conditions for < MIPSr6.
   3161 void Assembler::c(FPUCondition cond, SecondaryField fmt,
   3162     FPURegister fs, FPURegister ft, uint16_t cc) {
   3163   DCHECK(kArchVariant != kMips64r6);
   3164   DCHECK(is_uint3(cc));
   3165   DCHECK(fmt == S || fmt == D);
   3166   DCHECK((fmt & ~(31 << kRsShift)) == 0);
   3167   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
   3168       | cc << 8 | 3 << 4 | cond;
   3169   emit(instr);
   3170 }
   3171 
   3172 
   3173 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
   3174                     uint16_t cc) {
   3175   c(cond, S, fs, ft, cc);
   3176 }
   3177 
   3178 
   3179 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
   3180                     uint16_t cc) {
   3181   c(cond, D, fs, ft, cc);
   3182 }
   3183 
   3184 
   3185 void Assembler::fcmp(FPURegister src1, const double src2,
   3186       FPUCondition cond) {
   3187   DCHECK(src2 == 0.0);
   3188   mtc1(zero_reg, f14);
   3189   cvt_d_w(f14, f14);
   3190   c(cond, D, src1, f14, 0);
   3191 }
   3192 
   3193 
   3194 void Assembler::bc1f(int16_t offset, uint16_t cc) {
   3195   DCHECK(is_uint3(cc));
   3196   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   3197   emit(instr);
   3198 }
   3199 
   3200 
   3201 void Assembler::bc1t(int16_t offset, uint16_t cc) {
   3202   DCHECK(is_uint3(cc));
   3203   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   3204   emit(instr);
   3205 }
   3206 
   3207 
   3208 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
   3209                                          intptr_t pc_delta) {
   3210   if (RelocInfo::IsInternalReference(rmode)) {
   3211     int64_t* p = reinterpret_cast<int64_t*>(pc);
   3212     if (*p == kEndOfJumpChain) {
   3213       return 0;  // Number of instructions patched.
   3214     }
   3215     *p += pc_delta;
   3216     return 2;  // Number of instructions patched.
   3217   }
   3218   Instr instr = instr_at(pc);
   3219   DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
   3220   if (IsLui(instr)) {
   3221     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
   3222     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
   3223     Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
   3224     DCHECK(IsOri(instr_ori));
   3225     DCHECK(IsOri(instr_ori2));
   3226     // TODO(plind): symbolic names for the shifts.
   3227     int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
   3228     imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
   3229     imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
   3230     // Sign extend address.
   3231     imm >>= 16;
   3232 
   3233     if (imm == kEndOfJumpChain) {
   3234       return 0;  // Number of instructions patched.
   3235     }
   3236     imm += pc_delta;
   3237     DCHECK((imm & 3) == 0);
   3238 
   3239     instr_lui &= ~kImm16Mask;
   3240     instr_ori &= ~kImm16Mask;
   3241     instr_ori2 &= ~kImm16Mask;
   3242 
   3243     instr_at_put(pc + 0 * Assembler::kInstrSize,
   3244                  instr_lui | ((imm >> 32) & kImm16Mask));
   3245     instr_at_put(pc + 1 * Assembler::kInstrSize,
   3246                  instr_ori | (imm >> 16 & kImm16Mask));
   3247     instr_at_put(pc + 3 * Assembler::kInstrSize,
   3248                  instr_ori2 | (imm & kImm16Mask));
   3249     return 4;  // Number of instructions patched.
   3250   } else if (IsJ(instr) || IsJal(instr)) {
   3251     // Regular j/jal relocation.
   3252     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
   3253     imm28 += pc_delta;
   3254     imm28 &= kImm28Mask;
   3255     instr &= ~kImm26Mask;
   3256     DCHECK((imm28 & 3) == 0);
   3257     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
   3258     instr_at_put(pc, instr | (imm26 & kImm26Mask));
   3259     return 1;  // Number of instructions patched.
   3260   } else {
   3261     DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
   3262            ((instr & kJumpRawMask) == kJalRawMark));
   3263     // Unbox raw offset and emit j/jal.
   3264     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
   3265     // Sign extend 28-bit offset to 32-bit.
   3266     imm28 = (imm28 << 4) >> 4;
   3267     uint64_t target =
   3268         static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
   3269     target &= kImm28Mask;
   3270     DCHECK((imm28 & 3) == 0);
   3271     uint32_t imm26 = static_cast<uint32_t>(target >> 2);
   3272     // Check markings whether to emit j or jal.
   3273     uint32_t unbox = (instr & kJRawMark) ? J : JAL;
   3274     instr_at_put(pc, unbox | (imm26 & kImm26Mask));
   3275     return 1;  // Number of instructions patched.
   3276   }
   3277 }
   3278 
   3279 
   3280 void Assembler::GrowBuffer() {
   3281   if (!own_buffer_) FATAL("external code buffer is too small");
   3282 
   3283   // Compute new buffer size.
   3284   CodeDesc desc;  // The new buffer.
   3285   if (buffer_size_ < 1 * MB) {
   3286     desc.buffer_size = 2*buffer_size_;
   3287   } else {
   3288     desc.buffer_size = buffer_size_ + 1*MB;
   3289   }
   3290   CHECK_GT(desc.buffer_size, 0);  // No overflow.
   3291 
   3292   // Set up new buffer.
   3293   desc.buffer = NewArray<byte>(desc.buffer_size);
   3294   desc.origin = this;
   3295 
   3296   desc.instr_size = pc_offset();
   3297   desc.reloc_size =
   3298       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   3299 
   3300   // Copy the data.
   3301   intptr_t pc_delta = desc.buffer - buffer_;
   3302   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
   3303       (buffer_ + buffer_size_);
   3304   MemMove(desc.buffer, buffer_, desc.instr_size);
   3305   MemMove(reloc_info_writer.pos() + rc_delta,
   3306               reloc_info_writer.pos(), desc.reloc_size);
   3307 
   3308   // Switch buffers.
   3309   DeleteArray(buffer_);
   3310   buffer_ = desc.buffer;
   3311   buffer_size_ = desc.buffer_size;
   3312   pc_ += pc_delta;
   3313   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   3314                                reloc_info_writer.last_pc() + pc_delta);
   3315 
   3316   // Relocate runtime entries.
   3317   for (RelocIterator it(desc); !it.done(); it.next()) {
   3318     RelocInfo::Mode rmode = it.rinfo()->rmode();
   3319     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
   3320       byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
   3321       RelocateInternalReference(rmode, p, pc_delta);
   3322     }
   3323   }
   3324   DCHECK(!overflow());
   3325 }
   3326 
   3327 
   3328 void Assembler::db(uint8_t data) {
   3329   CheckForEmitInForbiddenSlot();
   3330   EmitHelper(data);
   3331 }
   3332 
   3333 
   3334 void Assembler::dd(uint32_t data) {
   3335   CheckForEmitInForbiddenSlot();
   3336   EmitHelper(data);
   3337 }
   3338 
   3339 
   3340 void Assembler::dq(uint64_t data) {
   3341   CheckForEmitInForbiddenSlot();
   3342   EmitHelper(data);
   3343 }
   3344 
   3345 
   3346 void Assembler::dd(Label* label) {
   3347   uint64_t data;
   3348   CheckForEmitInForbiddenSlot();
   3349   if (label->is_bound()) {
   3350     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
   3351   } else {
   3352     data = jump_address(label);
   3353     unbound_labels_count_++;
   3354     internal_reference_positions_.insert(label->pos());
   3355   }
   3356   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   3357   EmitHelper(data);
   3358 }
   3359 
   3360 
   3361 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   3362   // We do not try to reuse pool constants.
   3363   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
   3364   if (rmode >= RelocInfo::COMMENT &&
   3365       rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
   3366     // Adjust code for new modes.
   3367     DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
   3368     // These modes do not need an entry in the constant pool.
   3369   }
   3370   if (!RelocInfo::IsNone(rinfo.rmode())) {
   3371     // Don't record external references unless the heap will be serialized.
   3372     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
   3373         !serializer_enabled() && !emit_debug_code()) {
   3374       return;
   3375     }
   3376     DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
   3377     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   3378       RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
   3379                                        RecordedAstId().ToInt(), NULL);
   3380       ClearRecordedAstId();
   3381       reloc_info_writer.Write(&reloc_info_with_ast_id);
   3382     } else {
   3383       reloc_info_writer.Write(&rinfo);
   3384     }
   3385   }
   3386 }
   3387 
   3388 
   3389 void Assembler::BlockTrampolinePoolFor(int instructions) {
   3390   CheckTrampolinePoolQuick(instructions);
   3391   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   3392 }
   3393 
   3394 
   3395 void Assembler::CheckTrampolinePool() {
   3396   // Some small sequences of instructions must not be broken up by the
   3397   // insertion of a trampoline pool; such sequences are protected by setting
   3398   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   3399   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   3400   // are blocked by trampoline_pool_blocked_nesting_.
   3401   if ((trampoline_pool_blocked_nesting_ > 0) ||
   3402       (pc_offset() < no_trampoline_pool_before_)) {
   3403     // Emission is currently blocked; make sure we try again as soon as
   3404     // possible.
   3405     if (trampoline_pool_blocked_nesting_ > 0) {
   3406       next_buffer_check_ = pc_offset() + kInstrSize;
   3407     } else {
   3408       next_buffer_check_ = no_trampoline_pool_before_;
   3409     }
   3410     return;
   3411   }
   3412 
   3413   DCHECK(!trampoline_emitted_);
   3414   DCHECK(unbound_labels_count_ >= 0);
   3415   if (unbound_labels_count_ > 0) {
   3416     // First we emit jump (2 instructions), then we emit trampoline pool.
   3417     { BlockTrampolinePoolScope block_trampoline_pool(this);
   3418       Label after_pool;
   3419       if (kArchVariant == kMips64r6) {
   3420         bc(&after_pool);
   3421       } else {
   3422         b(&after_pool);
   3423       }
   3424       nop();
   3425 
   3426       int pool_start = pc_offset();
   3427       for (int i = 0; i < unbound_labels_count_; i++) {
   3428         { BlockGrowBufferScope block_buf_growth(this);
   3429           // Buffer growth (and relocation) must be blocked for internal
   3430           // references until associated instructions are emitted and available
   3431           // to be patched.
   3432           RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   3433           j(&after_pool);
   3434         }
   3435         nop();
   3436       }
   3437       bind(&after_pool);
   3438       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
   3439 
   3440       trampoline_emitted_ = true;
   3441       // As we are only going to emit trampoline once, we need to prevent any
   3442       // further emission.
   3443       next_buffer_check_ = kMaxInt;
   3444     }
   3445   } else {
   3446     // Number of branches to unbound label at this point is zero, so we can
   3447     // move next buffer check to maximum.
   3448     next_buffer_check_ = pc_offset() +
   3449         kMaxBranchOffset - kTrampolineSlotsSize * 16;
   3450   }
   3451   return;
   3452 }
   3453 
   3454 
   3455 Address Assembler::target_address_at(Address pc) {
   3456   Instr instr0 = instr_at(pc);
   3457   Instr instr1 = instr_at(pc + 1 * kInstrSize);
   3458   Instr instr3 = instr_at(pc + 3 * kInstrSize);
   3459 
   3460   // Interpret 4 instructions for address generated by li: See listing in
   3461   // Assembler::set_target_address_at() just below.
   3462   if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
   3463       (GetOpcodeField(instr3) == ORI)) {
   3464     // Assemble the 48 bit value.
   3465      int64_t addr  = static_cast<int64_t>(
   3466           ((uint64_t)(GetImmediate16(instr0)) << 32) |
   3467           ((uint64_t)(GetImmediate16(instr1)) << 16) |
   3468           ((uint64_t)(GetImmediate16(instr3))));
   3469 
   3470     // Sign extend to get canonical address.
   3471     addr = (addr << 16) >> 16;
   3472     return reinterpret_cast<Address>(addr);
   3473   }
   3474   // We should never get here, force a bad address if we do.
   3475   UNREACHABLE();
   3476   return (Address)0x0;
   3477 }
   3478 
   3479 
   3480 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
   3481 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
   3482 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
   3483 // OS::nan_value() returns a qNaN.
   3484 void Assembler::QuietNaN(HeapObject* object) {
   3485   HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
   3486 }
   3487 
   3488 
   3489 // On Mips64, a target address is stored in a 4-instruction sequence:
   3490 //    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
   3491 //    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
   3492 //    2: dsll(rd, rd, 16);
   3493 //    3: ori(rd, rd, j.imm32_ & kImm16Mask);
   3494 //
   3495 // Patching the address must replace all the lui & ori instructions,
   3496 // and flush the i-cache.
   3497 //
   3498 // There is an optimization below, which emits a nop when the address
   3499 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
   3500 // and possibly removed.
   3501 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
   3502                                       Address target,
   3503                                       ICacheFlushMode icache_flush_mode) {
   3504 // There is an optimization where only 4 instructions are used to load address
   3505 // in code on MIP64 because only 48-bits of address is effectively used.
   3506 // It relies on fact the upper [63:48] bits are not used for virtual address
   3507 // translation and they have to be set according to value of bit 47 in order
   3508 // get canonical address.
   3509   Instr instr1 = instr_at(pc + kInstrSize);
   3510   uint32_t rt_code = GetRt(instr1);
   3511   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   3512   uint64_t itarget = reinterpret_cast<uint64_t>(target);
   3513 
   3514 #ifdef DEBUG
   3515   // Check we have the result from a li macro-instruction.
   3516   Instr instr0 = instr_at(pc);
   3517   Instr instr3 = instr_at(pc + kInstrSize * 3);
   3518   CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
   3519          GetOpcodeField(instr3) == ORI));
   3520 #endif
   3521 
   3522   // Must use 4 instructions to insure patchable code.
   3523   // lui rt, upper-16.
   3524   // ori rt, rt, lower-16.
   3525   // dsll rt, rt, 16.
   3526   // ori rt rt, lower-16.
   3527   *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
   3528   *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
   3529       | ((itarget >> 16) & kImm16Mask);
   3530   *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
   3531       | (itarget & kImm16Mask);
   3532 
   3533   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
   3534     Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
   3535   }
   3536 }
   3537 
   3538 }  // namespace internal
   3539 }  // namespace v8
   3540 
   3541 #endif  // V8_TARGET_ARCH_MIPS64
   3542