Home | History | Annotate | Download | only in mips64
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2012 the V8 project authors. All rights reserved.
     34 
     35 #include "src/mips64/assembler-mips64.h"
     36 
     37 #if V8_TARGET_ARCH_MIPS64
     38 
     39 #include "src/base/cpu.h"
     40 #include "src/mips64/assembler-mips64-inl.h"
     41 
     42 namespace v8 {
     43 namespace internal {
     44 
     45 
     46 // Get the CPU features enabled by the build. For cross compilation the
     47 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
     48 // can be defined to enable FPU instructions when building the
     49 // snapshot.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52 #ifdef CAN_USE_FPU_INSTRUCTIONS
     53   answer |= 1u << FPU;
     54 #endif  // def CAN_USE_FPU_INSTRUCTIONS
     55 
     56   // If the compiler is allowed to use FPU then we can use FPU too in our code
     57   // generation even when generating snapshots.  This won't work for cross
     58   // compilation.
     59 #if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
     60   answer |= 1u << FPU;
     61 #endif
     62 
     63   return answer;
     64 }
     65 
     66 
     67 void CpuFeatures::ProbeImpl(bool cross_compile) {
     68   supported_ |= CpuFeaturesImpliedByCompiler();
     69 
     70   // Only use statically determined features for cross compile (snapshot).
     71   if (cross_compile) return;
     72 
     73   // If the compiler is allowed to use fpu then we can use fpu too in our
     74   // code generation.
     75 #ifndef __mips__
     76   // For the simulator build, use FPU.
     77   supported_ |= 1u << FPU;
     78 #else
     79   // Probe for additional features at runtime.
     80   base::CPU cpu;
     81   if (cpu.has_fpu()) supported_ |= 1u << FPU;
     82 #endif
     83 }
     84 
     85 
     86 void CpuFeatures::PrintTarget() { }
     87 void CpuFeatures::PrintFeatures() { }
     88 
     89 
     90 int ToNumber(Register reg) {
     91   DCHECK(reg.is_valid());
     92   const int kNumbers[] = {
     93     0,    // zero_reg
     94     1,    // at
     95     2,    // v0
     96     3,    // v1
     97     4,    // a0
     98     5,    // a1
     99     6,    // a2
    100     7,    // a3
    101     8,    // a4
    102     9,    // a5
    103     10,   // a6
    104     11,   // a7
    105     12,   // t0
    106     13,   // t1
    107     14,   // t2
    108     15,   // t3
    109     16,   // s0
    110     17,   // s1
    111     18,   // s2
    112     19,   // s3
    113     20,   // s4
    114     21,   // s5
    115     22,   // s6
    116     23,   // s7
    117     24,   // t8
    118     25,   // t9
    119     26,   // k0
    120     27,   // k1
    121     28,   // gp
    122     29,   // sp
    123     30,   // fp
    124     31,   // ra
    125   };
    126   return kNumbers[reg.code()];
    127 }
    128 
    129 
    130 Register ToRegister(int num) {
    131   DCHECK(num >= 0 && num < kNumRegisters);
    132   const Register kRegisters[] = {
    133     zero_reg,
    134     at,
    135     v0, v1,
    136     a0, a1, a2, a3, a4, a5, a6, a7,
    137     t0, t1, t2, t3,
    138     s0, s1, s2, s3, s4, s5, s6, s7,
    139     t8, t9,
    140     k0, k1,
    141     gp,
    142     sp,
    143     fp,
    144     ra
    145   };
    146   return kRegisters[num];
    147 }
    148 
    149 
    150 // -----------------------------------------------------------------------------
    151 // Implementation of RelocInfo.
    152 
    153 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
    154                                   1 << RelocInfo::INTERNAL_REFERENCE |
    155                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    156 
    157 
    158 bool RelocInfo::IsCodedSpecially() {
    159   // The deserializer needs to know whether a pointer is specially coded.  Being
    160   // specially coded on MIPS means that it is a lui/ori instruction, and that is
    161   // always the case inside code objects.
    162   return true;
    163 }
    164 
    165 
    166 bool RelocInfo::IsInConstantPool() {
    167   return false;
    168 }
    169 
    170 Address RelocInfo::wasm_memory_reference() {
    171   DCHECK(IsWasmMemoryReference(rmode_));
    172   return Assembler::target_address_at(pc_, host_);
    173 }
    174 
    175 Address RelocInfo::wasm_global_reference() {
    176   DCHECK(IsWasmGlobalReference(rmode_));
    177   return Assembler::target_address_at(pc_, host_);
    178 }
    179 
    180 uint32_t RelocInfo::wasm_memory_size_reference() {
    181   DCHECK(IsWasmMemorySizeReference(rmode_));
    182   return static_cast<uint32_t>(
    183       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
    184 }
    185 
    186 void RelocInfo::unchecked_update_wasm_memory_reference(
    187     Address address, ICacheFlushMode flush_mode) {
    188   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
    189 }
    190 
    191 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
    192                                                   ICacheFlushMode flush_mode) {
    193   Assembler::set_target_address_at(isolate_, pc_, host_,
    194                                    reinterpret_cast<Address>(size), flush_mode);
    195 }
    196 
    197 // -----------------------------------------------------------------------------
    198 // Implementation of Operand and MemOperand.
    199 // See assembler-mips-inl.h for inlined constructors.
    200 
    201 Operand::Operand(Handle<Object> handle) {
    202   AllowDeferredHandleDereference using_raw_address;
    203   rm_ = no_reg;
    204   // Verify all Objects referred by code are NOT in new space.
    205   Object* obj = *handle;
    206   if (obj->IsHeapObject()) {
    207     DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
    208     imm64_ = reinterpret_cast<intptr_t>(handle.location());
    209     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    210   } else {
    211     // No relocation needed.
    212     imm64_ = reinterpret_cast<intptr_t>(obj);
    213     rmode_ = RelocInfo::NONE64;
    214   }
    215 }
    216 
    217 
    218 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
    219   offset_ = offset;
    220 }
    221 
    222 
    223 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
    224                        OffsetAddend offset_addend)
    225     : Operand(rm) {
    226   offset_ = unit * multiplier + offset_addend;
    227 }
    228 
    229 
    230 // -----------------------------------------------------------------------------
    231 // Specific instructions, constants, and masks.
    232 
    233 static const int kNegOffset = 0x00008000;
    234 // daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
    235 // operations as post-increment of sp.
    236 const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
    237                               (Register::kCode_sp << kRtShift) |
    238                               (kPointerSize & kImm16Mask);  // NOLINT
    239 // daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
    240 const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
    241                                (Register::kCode_sp << kRtShift) |
    242                                (-kPointerSize & kImm16Mask);  // NOLINT
    243 // sd(r, MemOperand(sp, 0))
    244 const Instr kPushRegPattern =
    245     SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    246 //  ld(r, MemOperand(sp, 0))
    247 const Instr kPopRegPattern =
    248     LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    249 
    250 const Instr kLwRegFpOffsetPattern =
    251     LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    252 
    253 const Instr kSwRegFpOffsetPattern =
    254     SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask);  // NOLINT
    255 
    256 const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
    257                                        (kNegOffset & kImm16Mask);  // NOLINT
    258 
    259 const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
    260                                        (kNegOffset & kImm16Mask);  // NOLINT
    261 // A mask for the Rt register for push, pop, lw, sw instructions.
    262 const Instr kRtMask = kRtFieldMask;
    263 const Instr kLwSwInstrTypeMask = 0xffe00000;
    264 const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
    265 const Instr kLwSwOffsetMask = kImm16Mask;
    266 
    267 
    268 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    269     : AssemblerBase(isolate, buffer, buffer_size),
    270       recorded_ast_id_(TypeFeedbackId::None()),
    271       positions_recorder_(this) {
    272   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    273 
    274   last_trampoline_pool_end_ = 0;
    275   no_trampoline_pool_before_ = 0;
    276   trampoline_pool_blocked_nesting_ = 0;
    277   // We leave space (16 * kTrampolineSlotsSize)
    278   // for BlockTrampolinePoolScope buffer.
    279   next_buffer_check_ = FLAG_force_long_branches
    280       ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
    281   internal_trampoline_exception_ = false;
    282   last_bound_pos_ = 0;
    283 
    284   trampoline_emitted_ = FLAG_force_long_branches;
    285   unbound_labels_count_ = 0;
    286   block_buffer_growth_ = false;
    287 
    288   ClearRecordedAstId();
    289 }
    290 
    291 
    292 void Assembler::GetCode(CodeDesc* desc) {
    293   EmitForbiddenSlotInstruction();
    294   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
    295   // Set up code descriptor.
    296   desc->buffer = buffer_;
    297   desc->buffer_size = buffer_size_;
    298   desc->instr_size = pc_offset();
    299   desc->reloc_size =
    300       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
    301   desc->origin = this;
    302   desc->constant_pool_size = 0;
    303   desc->unwinding_info_size = 0;
    304   desc->unwinding_info = nullptr;
    305 }
    306 
    307 
    308 void Assembler::Align(int m) {
    309   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    310   EmitForbiddenSlotInstruction();
    311   while ((pc_offset() & (m - 1)) != 0) {
    312     nop();
    313   }
    314 }
    315 
    316 
    317 void Assembler::CodeTargetAlign() {
    318   // No advantage to aligning branch/call targets to more than
    319   // single instruction, that I am aware of.
    320   Align(4);
    321 }
    322 
    323 
    324 Register Assembler::GetRtReg(Instr instr) {
    325   Register rt;
    326   rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
    327   return rt;
    328 }
    329 
    330 
    331 Register Assembler::GetRsReg(Instr instr) {
    332   Register rs;
    333   rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
    334   return rs;
    335 }
    336 
    337 
    338 Register Assembler::GetRdReg(Instr instr) {
    339   Register rd;
    340   rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
    341   return rd;
    342 }
    343 
    344 
    345 uint32_t Assembler::GetRt(Instr instr) {
    346   return (instr & kRtFieldMask) >> kRtShift;
    347 }
    348 
    349 
    350 uint32_t Assembler::GetRtField(Instr instr) {
    351   return instr & kRtFieldMask;
    352 }
    353 
    354 
    355 uint32_t Assembler::GetRs(Instr instr) {
    356   return (instr & kRsFieldMask) >> kRsShift;
    357 }
    358 
    359 
    360 uint32_t Assembler::GetRsField(Instr instr) {
    361   return instr & kRsFieldMask;
    362 }
    363 
    364 
    365 uint32_t Assembler::GetRd(Instr instr) {
    366   return  (instr & kRdFieldMask) >> kRdShift;
    367 }
    368 
    369 
    370 uint32_t Assembler::GetRdField(Instr instr) {
    371   return  instr & kRdFieldMask;
    372 }
    373 
    374 
    375 uint32_t Assembler::GetSa(Instr instr) {
    376   return (instr & kSaFieldMask) >> kSaShift;
    377 }
    378 
    379 
    380 uint32_t Assembler::GetSaField(Instr instr) {
    381   return instr & kSaFieldMask;
    382 }
    383 
    384 
    385 uint32_t Assembler::GetOpcodeField(Instr instr) {
    386   return instr & kOpcodeMask;
    387 }
    388 
    389 
    390 uint32_t Assembler::GetFunction(Instr instr) {
    391   return (instr & kFunctionFieldMask) >> kFunctionShift;
    392 }
    393 
    394 
    395 uint32_t Assembler::GetFunctionField(Instr instr) {
    396   return instr & kFunctionFieldMask;
    397 }
    398 
    399 
    400 uint32_t Assembler::GetImmediate16(Instr instr) {
    401   return instr & kImm16Mask;
    402 }
    403 
    404 
    405 uint32_t Assembler::GetLabelConst(Instr instr) {
    406   return instr & ~kImm16Mask;
    407 }
    408 
    409 
    410 bool Assembler::IsPop(Instr instr) {
    411   return (instr & ~kRtMask) == kPopRegPattern;
    412 }
    413 
    414 
    415 bool Assembler::IsPush(Instr instr) {
    416   return (instr & ~kRtMask) == kPushRegPattern;
    417 }
    418 
    419 
    420 bool Assembler::IsSwRegFpOffset(Instr instr) {
    421   return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
    422 }
    423 
    424 
    425 bool Assembler::IsLwRegFpOffset(Instr instr) {
    426   return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
    427 }
    428 
    429 
    430 bool Assembler::IsSwRegFpNegOffset(Instr instr) {
    431   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
    432           kSwRegFpNegOffsetPattern);
    433 }
    434 
    435 
    436 bool Assembler::IsLwRegFpNegOffset(Instr instr) {
    437   return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
    438           kLwRegFpNegOffsetPattern);
    439 }
    440 
    441 
    442 // Labels refer to positions in the (to be) generated code.
    443 // There are bound, linked, and unused labels.
    444 //
    445 // Bound labels refer to known positions in the already
    446 // generated code. pos() is the position the label refers to.
    447 //
    448 // Linked labels refer to unknown positions in the code
    449 // to be generated; pos() is the position of the last
    450 // instruction using the label.
    451 
    452 // The link chain is terminated by a value in the instruction of -1,
    453 // which is an otherwise illegal value (branch -1 is inf loop).
    454 // The instruction 16-bit offset field addresses 32-bit words, but in
    455 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
    456 
    457 const int kEndOfChain = -4;
    458 // Determines the end of the Jump chain (a subset of the label link chain).
    459 const int kEndOfJumpChain = 0;
    460 
    461 
    462 bool Assembler::IsBranch(Instr instr) {
    463   uint32_t opcode   = GetOpcodeField(instr);
    464   uint32_t rt_field = GetRtField(instr);
    465   uint32_t rs_field = GetRsField(instr);
    466   // Checks if the instruction is a branch.
    467   bool isBranch =
    468       opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
    469       opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
    470       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
    471                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
    472       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
    473       (opcode == COP1 && rs_field == BC1EQZ) ||
    474       (opcode == COP1 && rs_field == BC1NEZ);
    475   if (!isBranch && kArchVariant == kMips64r6) {
    476     // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
    477     // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
    478     isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
    479                 opcode == BALC ||
    480                 (opcode == POP66 && rs_field != 0) ||  // BEQZC
    481                 (opcode == POP76 && rs_field != 0);    // BNEZC
    482   }
    483   return isBranch;
    484 }
    485 
    486 
    487 bool Assembler::IsBc(Instr instr) {
    488   uint32_t opcode = GetOpcodeField(instr);
    489   // Checks if the instruction is a BC or BALC.
    490   return opcode == BC || opcode == BALC;
    491 }
    492 
    493 
    494 bool Assembler::IsBzc(Instr instr) {
    495   uint32_t opcode = GetOpcodeField(instr);
    496   // Checks if the instruction is BEQZC or BNEZC.
    497   return (opcode == POP66 && GetRsField(instr) != 0) ||
    498          (opcode == POP76 && GetRsField(instr) != 0);
    499 }
    500 
    501 
    502 bool Assembler::IsEmittedConstant(Instr instr) {
    503   uint32_t label_constant = GetLabelConst(instr);
    504   return label_constant == 0;  // Emitted label const in reg-exp engine.
    505 }
    506 
    507 
    508 bool Assembler::IsBeq(Instr instr) {
    509   return GetOpcodeField(instr) == BEQ;
    510 }
    511 
    512 
    513 bool Assembler::IsBne(Instr instr) {
    514   return GetOpcodeField(instr) == BNE;
    515 }
    516 
    517 
    518 bool Assembler::IsBeqzc(Instr instr) {
    519   uint32_t opcode = GetOpcodeField(instr);
    520   return opcode == POP66 && GetRsField(instr) != 0;
    521 }
    522 
    523 
    524 bool Assembler::IsBnezc(Instr instr) {
    525   uint32_t opcode = GetOpcodeField(instr);
    526   return opcode == POP76 && GetRsField(instr) != 0;
    527 }
    528 
    529 
    530 bool Assembler::IsBeqc(Instr instr) {
    531   uint32_t opcode = GetOpcodeField(instr);
    532   uint32_t rs = GetRsField(instr);
    533   uint32_t rt = GetRtField(instr);
    534   return opcode == POP10 && rs != 0 && rs < rt;  // && rt != 0
    535 }
    536 
    537 
    538 bool Assembler::IsBnec(Instr instr) {
    539   uint32_t opcode = GetOpcodeField(instr);
    540   uint32_t rs = GetRsField(instr);
    541   uint32_t rt = GetRtField(instr);
    542   return opcode == POP30 && rs != 0 && rs < rt;  // && rt != 0
    543 }
    544 
    545 
    546 bool Assembler::IsJump(Instr instr) {
    547   uint32_t opcode   = GetOpcodeField(instr);
    548   uint32_t rt_field = GetRtField(instr);
    549   uint32_t rd_field = GetRdField(instr);
    550   uint32_t function_field = GetFunctionField(instr);
    551   // Checks if the instruction is a jump.
    552   return opcode == J || opcode == JAL ||
    553       (opcode == SPECIAL && rt_field == 0 &&
    554       ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
    555 }
    556 
    557 
    558 bool Assembler::IsJ(Instr instr) {
    559   uint32_t opcode = GetOpcodeField(instr);
    560   // Checks if the instruction is a jump.
    561   return opcode == J;
    562 }
    563 
    564 
    565 bool Assembler::IsJal(Instr instr) {
    566   return GetOpcodeField(instr) == JAL;
    567 }
    568 
    569 
    570 bool Assembler::IsJr(Instr instr) {
    571   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
    572 }
    573 
    574 
    575 bool Assembler::IsJalr(Instr instr) {
    576   return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
    577 }
    578 
    579 
    580 bool Assembler::IsLui(Instr instr) {
    581   uint32_t opcode = GetOpcodeField(instr);
    582   // Checks if the instruction is a load upper immediate.
    583   return opcode == LUI;
    584 }
    585 
    586 
    587 bool Assembler::IsOri(Instr instr) {
    588   uint32_t opcode = GetOpcodeField(instr);
    589   // Checks if the instruction is a load upper immediate.
    590   return opcode == ORI;
    591 }
    592 
    593 
    594 bool Assembler::IsNop(Instr instr, unsigned int type) {
    595   // See Assembler::nop(type).
    596   DCHECK(type < 32);
    597   uint32_t opcode = GetOpcodeField(instr);
    598   uint32_t function = GetFunctionField(instr);
    599   uint32_t rt = GetRt(instr);
    600   uint32_t rd = GetRd(instr);
    601   uint32_t sa = GetSa(instr);
    602 
    603   // Traditional mips nop == sll(zero_reg, zero_reg, 0)
    604   // When marking non-zero type, use sll(zero_reg, at, type)
    605   // to avoid use of mips ssnop and ehb special encodings
    606   // of the sll instruction.
    607 
    608   Register nop_rt_reg = (type == 0) ? zero_reg : at;
    609   bool ret = (opcode == SPECIAL && function == SLL &&
    610               rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
    611               rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
    612               sa == type);
    613 
    614   return ret;
    615 }
    616 
    617 
    618 int32_t Assembler::GetBranchOffset(Instr instr) {
    619   DCHECK(IsBranch(instr));
    620   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
    621 }
    622 
    623 
    624 bool Assembler::IsLw(Instr instr) {
    625   return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
    626 }
    627 
    628 
    629 int16_t Assembler::GetLwOffset(Instr instr) {
    630   DCHECK(IsLw(instr));
    631   return ((instr & kImm16Mask));
    632 }
    633 
    634 
    635 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
    636   DCHECK(IsLw(instr));
    637 
    638   // We actually create a new lw instruction based on the original one.
    639   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
    640       | (offset & kImm16Mask);
    641 
    642   return temp_instr;
    643 }
    644 
    645 
    646 bool Assembler::IsSw(Instr instr) {
    647   return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
    648 }
    649 
    650 
    651 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
    652   DCHECK(IsSw(instr));
    653   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
    654 }
    655 
    656 
    657 bool Assembler::IsAddImmediate(Instr instr) {
    658   return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
    659 }
    660 
    661 
    662 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
    663   DCHECK(IsAddImmediate(instr));
    664   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
    665 }
    666 
    667 
    668 bool Assembler::IsAndImmediate(Instr instr) {
    669   return GetOpcodeField(instr) == ANDI;
    670 }
    671 
    672 
    673 static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
    674   if (kArchVariant == kMips64r6) {
    675     if (Assembler::IsBc(instr)) {
    676       return Assembler::OffsetSize::kOffset26;
    677     } else if (Assembler::IsBzc(instr)) {
    678       return Assembler::OffsetSize::kOffset21;
    679     }
    680   }
    681   return Assembler::OffsetSize::kOffset16;
    682 }
    683 
    684 
    685 static inline int32_t AddBranchOffset(int pos, Instr instr) {
    686   int bits = OffsetSizeInBits(instr);
    687   const int32_t mask = (1 << bits) - 1;
    688   bits = 32 - bits;
    689 
    690   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
    691   // the compiler uses arithmetic shifts for signed integers.
    692   int32_t imm = ((instr & mask) << bits) >> (bits - 2);
    693 
    694   if (imm == kEndOfChain) {
    695     // EndOfChain sentinel is returned directly, not relative to pc or pos.
    696     return kEndOfChain;
    697   } else {
    698     return pos + Assembler::kBranchPCOffset + imm;
    699   }
    700 }
    701 
    702 
    703 int Assembler::target_at(int pos, bool is_internal) {
    704   if (is_internal) {
    705     int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
    706     int64_t address = *p;
    707     if (address == kEndOfJumpChain) {
    708       return kEndOfChain;
    709     } else {
    710       int64_t instr_address = reinterpret_cast<int64_t>(p);
    711       DCHECK(instr_address - address < INT_MAX);
    712       int delta = static_cast<int>(instr_address - address);
    713       DCHECK(pos > delta);
    714       return pos - delta;
    715     }
    716   }
    717   Instr instr = instr_at(pos);
    718   if ((instr & ~kImm16Mask) == 0) {
    719     // Emitted label constant, not part of a branch.
    720     if (instr == 0) {
    721        return kEndOfChain;
    722      } else {
    723        int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
    724        return (imm18 + pos);
    725      }
    726   }
    727   // Check we have a branch or jump instruction.
    728   DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
    729   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
    730   // the compiler uses arithmetic shifts for signed integers.
    731   if (IsBranch(instr)) {
    732     return AddBranchOffset(pos, instr);
    733   } else if (IsLui(instr)) {
    734     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
    735     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
    736     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
    737     DCHECK(IsOri(instr_ori));
    738     DCHECK(IsOri(instr_ori2));
    739 
    740     // TODO(plind) create named constants for shift values.
    741     int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
    742     imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
    743     imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
    744     // Sign extend address;
    745     imm >>= 16;
    746 
    747     if (imm == kEndOfJumpChain) {
    748       // EndOfChain sentinel is returned directly, not relative to pc or pos.
    749       return kEndOfChain;
    750     } else {
    751       uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
    752       DCHECK(instr_address - imm < INT_MAX);
    753       int delta = static_cast<int>(instr_address - imm);
    754       DCHECK(pos > delta);
    755       return pos - delta;
    756     }
    757   } else {
    758     DCHECK(IsJ(instr) || IsJal(instr));
    759     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
    760     if (imm28 == kEndOfJumpChain) {
    761       // EndOfChain sentinel is returned directly, not relative to pc or pos.
    762       return kEndOfChain;
    763     } else {
    764       // Sign extend 28-bit offset.
    765       int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
    766       return pos + delta;
    767     }
    768   }
    769 }
    770 
    771 
    772 static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
    773                                     Instr instr) {
    774   int32_t bits = OffsetSizeInBits(instr);
    775   int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
    776   DCHECK((imm & 3) == 0);
    777   imm >>= 2;
    778 
    779   const int32_t mask = (1 << bits) - 1;
    780   instr &= ~mask;
    781   DCHECK(is_intn(imm, bits));
    782 
    783   return instr | (imm & mask);
    784 }
    785 
    786 
    787 void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
    788   if (is_internal) {
    789     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
    790     *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
    791     return;
    792   }
    793   Instr instr = instr_at(pos);
    794   if ((instr & ~kImm16Mask) == 0) {
    795     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
    796     // Emitted label constant, not part of a branch.
    797     // Make label relative to Code* of generated Code object.
    798     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
    799     return;
    800   }
    801 
    802   if (IsBranch(instr)) {
    803     instr = SetBranchOffset(pos, target_pos, instr);
    804     instr_at_put(pos, instr);
    805   } else if (IsLui(instr)) {
    806     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
    807     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
    808     Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
    809     DCHECK(IsOri(instr_ori));
    810     DCHECK(IsOri(instr_ori2));
    811 
    812     uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
    813     DCHECK((imm & 3) == 0);
    814 
    815     instr_lui &= ~kImm16Mask;
    816     instr_ori &= ~kImm16Mask;
    817     instr_ori2 &= ~kImm16Mask;
    818 
    819     instr_at_put(pos + 0 * Assembler::kInstrSize,
    820                  instr_lui | ((imm >> 32) & kImm16Mask));
    821     instr_at_put(pos + 1 * Assembler::kInstrSize,
    822                  instr_ori | ((imm >> 16) & kImm16Mask));
    823     instr_at_put(pos + 3 * Assembler::kInstrSize,
    824                  instr_ori2 | (imm & kImm16Mask));
    825   } else if (IsJ(instr) || IsJal(instr)) {
    826     int32_t imm28 = target_pos - pos;
    827     DCHECK((imm28 & 3) == 0);
    828 
    829     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
    830     DCHECK(is_uint26(imm26));
    831     // Place 26-bit signed offset with markings.
    832     // When code is committed it will be resolved to j/jal.
    833     int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
    834     instr_at_put(pos, mark | (imm26 & kImm26Mask));
    835   } else {
    836     int32_t imm28 = target_pos - pos;
    837     DCHECK((imm28 & 3) == 0);
    838 
    839     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
    840     DCHECK(is_uint26(imm26));
    841     // Place raw 26-bit signed offset.
    842     // When code is committed it will be resolved to j/jal.
    843     instr &= ~kImm26Mask;
    844     instr_at_put(pos, instr | (imm26 & kImm26Mask));
    845   }
    846 }
    847 
    848 
    849 void Assembler::print(Label* L) {
    850   if (L->is_unused()) {
    851     PrintF("unused label\n");
    852   } else if (L->is_bound()) {
    853     PrintF("bound label to %d\n", L->pos());
    854   } else if (L->is_linked()) {
    855     Label l = *L;
    856     PrintF("unbound label");
    857     while (l.is_linked()) {
    858       PrintF("@ %d ", l.pos());
    859       Instr instr = instr_at(l.pos());
    860       if ((instr & ~kImm16Mask) == 0) {
    861         PrintF("value\n");
    862       } else {
    863         PrintF("%d\n", instr);
    864       }
    865       next(&l, internal_reference_positions_.find(l.pos()) !=
    866                    internal_reference_positions_.end());
    867     }
    868   } else {
    869     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
    870   }
    871 }
    872 
    873 
    874 void Assembler::bind_to(Label* L, int pos) {
    875   DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
    876   int trampoline_pos = kInvalidSlotPos;
    877   bool is_internal = false;
    878   if (L->is_linked() && !trampoline_emitted_) {
    879     unbound_labels_count_--;
    880     next_buffer_check_ += kTrampolineSlotsSize;
    881   }
    882 
    883   while (L->is_linked()) {
    884     int fixup_pos = L->pos();
    885     int dist = pos - fixup_pos;
    886     is_internal = internal_reference_positions_.find(fixup_pos) !=
    887                   internal_reference_positions_.end();
    888     next(L, is_internal);  // Call next before overwriting link with target at
    889                            // fixup_pos.
    890     Instr instr = instr_at(fixup_pos);
    891     if (is_internal) {
    892       target_at_put(fixup_pos, pos, is_internal);
    893     } else {
    894       if (IsBranch(instr)) {
    895         int branch_offset = BranchOffset(instr);
    896         if (dist > branch_offset) {
    897           if (trampoline_pos == kInvalidSlotPos) {
    898             trampoline_pos = get_trampoline_entry(fixup_pos);
    899             CHECK(trampoline_pos != kInvalidSlotPos);
    900           }
    901           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
    902           target_at_put(fixup_pos, trampoline_pos, false);
    903           fixup_pos = trampoline_pos;
    904           dist = pos - fixup_pos;
    905         }
    906         target_at_put(fixup_pos, pos, false);
    907       } else {
    908         DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
    909                IsEmittedConstant(instr));
    910         target_at_put(fixup_pos, pos, false);
    911       }
    912     }
    913   }
    914   L->bind_to(pos);
    915 
    916   // Keep track of the last bound label so we don't eliminate any instructions
    917   // before a bound label.
    918   if (pos > last_bound_pos_)
    919     last_bound_pos_ = pos;
    920 }
    921 
    922 
    923 void Assembler::bind(Label* L) {
    924   DCHECK(!L->is_bound());  // Label can only be bound once.
    925   bind_to(L, pc_offset());
    926 }
    927 
    928 
    929 void Assembler::next(Label* L, bool is_internal) {
    930   DCHECK(L->is_linked());
    931   int link = target_at(L->pos(), is_internal);
    932   if (link == kEndOfChain) {
    933     L->Unuse();
    934   } else {
    935     DCHECK(link >= 0);
    936     L->link_to(link);
    937   }
    938 }
    939 
    940 
    941 bool Assembler::is_near(Label* L) {
    942   DCHECK(L->is_bound());
    943   return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
    944 }
    945 
    946 
    947 bool Assembler::is_near(Label* L, OffsetSize bits) {
    948   if (L == nullptr || !L->is_bound()) return true;
    949   return ((pc_offset() - L->pos()) <
    950           (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
    951 }
    952 
    953 
    954 bool Assembler::is_near_branch(Label* L) {
    955   DCHECK(L->is_bound());
    956   return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
    957 }
    958 
    959 
    960 int Assembler::BranchOffset(Instr instr) {
    961   // At pre-R6 and for other R6 branches the offset is 16 bits.
    962   int bits = OffsetSize::kOffset16;
    963 
    964   if (kArchVariant == kMips64r6) {
    965     uint32_t opcode = GetOpcodeField(instr);
    966     switch (opcode) {
    967       // Checks BC or BALC.
    968       case BC:
    969       case BALC:
    970         bits = OffsetSize::kOffset26;
    971         break;
    972 
    973       // Checks BEQZC or BNEZC.
    974       case POP66:
    975       case POP76:
    976         if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
    977         break;
    978       default:
    979         break;
    980     }
    981   }
    982 
    983   return (1 << (bits + 2 - 1)) - 1;
    984 }
    985 
    986 
    987 // We have to use a temporary register for things that can be relocated even
    988 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
    989 // space.  There is no guarantee that the relocated location can be similarly
    990 // encoded.
    991 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
    992   return !RelocInfo::IsNone(rmode);
    993 }
    994 
    995 void Assembler::GenInstrRegister(Opcode opcode,
    996                                  Register rs,
    997                                  Register rt,
    998                                  Register rd,
    999                                  uint16_t sa,
   1000                                  SecondaryField func) {
   1001   DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
   1002   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1003       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
   1004   emit(instr);
   1005 }
   1006 
   1007 
   1008 void Assembler::GenInstrRegister(Opcode opcode,
   1009                                  Register rs,
   1010                                  Register rt,
   1011                                  uint16_t msb,
   1012                                  uint16_t lsb,
   1013                                  SecondaryField func) {
   1014   DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
   1015   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1016       | (msb << kRdShift) | (lsb << kSaShift) | func;
   1017   emit(instr);
   1018 }
   1019 
   1020 
   1021 void Assembler::GenInstrRegister(Opcode opcode,
   1022                                  SecondaryField fmt,
   1023                                  FPURegister ft,
   1024                                  FPURegister fs,
   1025                                  FPURegister fd,
   1026                                  SecondaryField func) {
   1027   DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
   1028   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
   1029       | (fd.code() << kFdShift) | func;
   1030   emit(instr);
   1031 }
   1032 
   1033 
   1034 void Assembler::GenInstrRegister(Opcode opcode,
   1035                                  FPURegister fr,
   1036                                  FPURegister ft,
   1037                                  FPURegister fs,
   1038                                  FPURegister fd,
   1039                                  SecondaryField func) {
   1040   DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
   1041   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
   1042       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   1043   emit(instr);
   1044 }
   1045 
   1046 
   1047 void Assembler::GenInstrRegister(Opcode opcode,
   1048                                  SecondaryField fmt,
   1049                                  Register rt,
   1050                                  FPURegister fs,
   1051                                  FPURegister fd,
   1052                                  SecondaryField func) {
   1053   DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
   1054   Instr instr = opcode | fmt | (rt.code() << kRtShift)
   1055       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   1056   emit(instr);
   1057 }
   1058 
   1059 
   1060 void Assembler::GenInstrRegister(Opcode opcode,
   1061                                  SecondaryField fmt,
   1062                                  Register rt,
   1063                                  FPUControlRegister fs,
   1064                                  SecondaryField func) {
   1065   DCHECK(fs.is_valid() && rt.is_valid());
   1066   Instr instr =
   1067       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   1068   emit(instr);
   1069 }
   1070 
   1071 
   1072 // Instructions with immediate value.
   1073 // Registers are in the order of the instruction encoding, from left to right.
   1074 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
   1075                                   int32_t j,
   1076                                   CompactBranchType is_compact_branch) {
   1077   DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
   1078   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1079       | (j & kImm16Mask);
   1080   emit(instr, is_compact_branch);
   1081 }
   1082 
   1083 
   1084 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
   1085                                   int32_t j,
   1086                                   CompactBranchType is_compact_branch) {
   1087   DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
   1088   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
   1089   emit(instr, is_compact_branch);
   1090 }
   1091 
   1092 
   1093 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
   1094                                   int32_t j,
   1095                                   CompactBranchType is_compact_branch) {
   1096   DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
   1097   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
   1098       | (j & kImm16Mask);
   1099   emit(instr, is_compact_branch);
   1100 }
   1101 
   1102 
   1103 void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
   1104                                   CompactBranchType is_compact_branch) {
   1105   DCHECK(rs.is_valid() && (is_int21(offset21)));
   1106   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
   1107   emit(instr, is_compact_branch);
   1108 }
   1109 
   1110 
   1111 void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
   1112                                   uint32_t offset21) {
   1113   DCHECK(rs.is_valid() && (is_uint21(offset21)));
   1114   Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
   1115   emit(instr);
   1116 }
   1117 
   1118 
   1119 void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
   1120                                   CompactBranchType is_compact_branch) {
   1121   DCHECK(is_int26(offset26));
   1122   Instr instr = opcode | (offset26 & kImm26Mask);
   1123   emit(instr, is_compact_branch);
   1124 }
   1125 
   1126 
   1127 void Assembler::GenInstrJump(Opcode opcode,
   1128                              uint32_t address) {
   1129   BlockTrampolinePoolScope block_trampoline_pool(this);
   1130   DCHECK(is_uint26(address));
   1131   Instr instr = opcode | address;
   1132   emit(instr);
   1133   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1134 }
   1135 
   1136 
   1137 // Returns the next free trampoline entry.
   1138 int32_t Assembler::get_trampoline_entry(int32_t pos) {
   1139   int32_t trampoline_entry = kInvalidSlotPos;
   1140   if (!internal_trampoline_exception_) {
   1141     if (trampoline_.start() > pos) {
   1142      trampoline_entry = trampoline_.take_slot();
   1143     }
   1144 
   1145     if (kInvalidSlotPos == trampoline_entry) {
   1146       internal_trampoline_exception_ = true;
   1147     }
   1148   }
   1149   return trampoline_entry;
   1150 }
   1151 
   1152 
   1153 uint64_t Assembler::jump_address(Label* L) {
   1154   int64_t target_pos;
   1155   if (L->is_bound()) {
   1156     target_pos = L->pos();
   1157   } else {
   1158     if (L->is_linked()) {
   1159       target_pos = L->pos();  // L's link.
   1160       L->link_to(pc_offset());
   1161     } else {
   1162       L->link_to(pc_offset());
   1163       return kEndOfJumpChain;
   1164     }
   1165   }
   1166   uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
   1167   DCHECK((imm & 3) == 0);
   1168 
   1169   return imm;
   1170 }
   1171 
   1172 
   1173 uint64_t Assembler::jump_offset(Label* L) {
   1174   int64_t target_pos;
   1175   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
   1176 
   1177   if (L->is_bound()) {
   1178     target_pos = L->pos();
   1179   } else {
   1180     if (L->is_linked()) {
   1181       target_pos = L->pos();  // L's link.
   1182       L->link_to(pc_offset() + pad);
   1183     } else {
   1184       L->link_to(pc_offset() + pad);
   1185       return kEndOfJumpChain;
   1186     }
   1187   }
   1188   int64_t imm = target_pos - (pc_offset() + pad);
   1189   DCHECK((imm & 3) == 0);
   1190 
   1191   return static_cast<uint64_t>(imm);
   1192 }
   1193 
   1194 
   1195 int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
   1196   int32_t target_pos;
   1197   int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
   1198 
   1199   if (L->is_bound()) {
   1200     target_pos = L->pos();
   1201   } else {
   1202     if (L->is_linked()) {
   1203       target_pos = L->pos();
   1204       L->link_to(pc_offset() + pad);
   1205     } else {
   1206       L->link_to(pc_offset() + pad);
   1207       if (!trampoline_emitted_) {
   1208         unbound_labels_count_++;
   1209         next_buffer_check_ -= kTrampolineSlotsSize;
   1210       }
   1211       return kEndOfChain;
   1212     }
   1213   }
   1214 
   1215   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
   1216   DCHECK(is_intn(offset, bits + 2));
   1217   DCHECK((offset & 3) == 0);
   1218 
   1219   return offset;
   1220 }
   1221 
   1222 
   1223 void Assembler::label_at_put(Label* L, int at_offset) {
   1224   int target_pos;
   1225   if (L->is_bound()) {
   1226     target_pos = L->pos();
   1227     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
   1228   } else {
   1229     if (L->is_linked()) {
   1230       target_pos = L->pos();  // L's link.
   1231       int32_t imm18 = target_pos - at_offset;
   1232       DCHECK((imm18 & 3) == 0);
   1233       int32_t imm16 = imm18 >> 2;
   1234       DCHECK(is_int16(imm16));
   1235       instr_at_put(at_offset, (imm16 & kImm16Mask));
   1236     } else {
   1237       target_pos = kEndOfChain;
   1238       instr_at_put(at_offset, 0);
   1239       if (!trampoline_emitted_) {
   1240         unbound_labels_count_++;
   1241         next_buffer_check_ -= kTrampolineSlotsSize;
   1242       }
   1243     }
   1244     L->link_to(at_offset);
   1245   }
   1246 }
   1247 
   1248 
   1249 //------- Branch and jump instructions --------
   1250 
   1251 void Assembler::b(int16_t offset) {
   1252   beq(zero_reg, zero_reg, offset);
   1253 }
   1254 
   1255 
   1256 void Assembler::bal(int16_t offset) {
   1257   bgezal(zero_reg, offset);
   1258 }
   1259 
   1260 
   1261 void Assembler::bc(int32_t offset) {
   1262   DCHECK(kArchVariant == kMips64r6);
   1263   GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
   1264 }
   1265 
   1266 
   1267 void Assembler::balc(int32_t offset) {
   1268   DCHECK(kArchVariant == kMips64r6);
   1269   GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
   1270 }
   1271 
   1272 
   1273 void Assembler::beq(Register rs, Register rt, int16_t offset) {
   1274   BlockTrampolinePoolScope block_trampoline_pool(this);
   1275   GenInstrImmediate(BEQ, rs, rt, offset);
   1276   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1277 }
   1278 
   1279 
   1280 void Assembler::bgez(Register rs, int16_t offset) {
   1281   BlockTrampolinePoolScope block_trampoline_pool(this);
   1282   GenInstrImmediate(REGIMM, rs, BGEZ, offset);
   1283   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1284 }
   1285 
   1286 
   1287 void Assembler::bgezc(Register rt, int16_t offset) {
   1288   DCHECK(kArchVariant == kMips64r6);
   1289   DCHECK(!(rt.is(zero_reg)));
   1290   GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1291 }
   1292 
   1293 
   1294 void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
   1295   DCHECK(kArchVariant == kMips64r6);
   1296   DCHECK(!(rs.is(zero_reg)));
   1297   DCHECK(!(rt.is(zero_reg)));
   1298   DCHECK(rs.code() != rt.code());
   1299   GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1300 }
   1301 
   1302 
   1303 void Assembler::bgec(Register rs, Register rt, int16_t offset) {
   1304   DCHECK(kArchVariant == kMips64r6);
   1305   DCHECK(!(rs.is(zero_reg)));
   1306   DCHECK(!(rt.is(zero_reg)));
   1307   DCHECK(rs.code() != rt.code());
   1308   GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1309 }
   1310 
   1311 
   1312 void Assembler::bgezal(Register rs, int16_t offset) {
   1313   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   1314   BlockTrampolinePoolScope block_trampoline_pool(this);
   1315   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
   1316   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1317 }
   1318 
   1319 
   1320 void Assembler::bgtz(Register rs, int16_t offset) {
   1321   BlockTrampolinePoolScope block_trampoline_pool(this);
   1322   GenInstrImmediate(BGTZ, rs, zero_reg, offset);
   1323   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1324 }
   1325 
   1326 
   1327 void Assembler::bgtzc(Register rt, int16_t offset) {
   1328   DCHECK(kArchVariant == kMips64r6);
   1329   DCHECK(!(rt.is(zero_reg)));
   1330   GenInstrImmediate(BGTZL, zero_reg, rt, offset,
   1331                     CompactBranchType::COMPACT_BRANCH);
   1332 }
   1333 
   1334 
   1335 void Assembler::blez(Register rs, int16_t offset) {
   1336   BlockTrampolinePoolScope block_trampoline_pool(this);
   1337   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
   1338   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1339 }
   1340 
   1341 
   1342 void Assembler::blezc(Register rt, int16_t offset) {
   1343   DCHECK(kArchVariant == kMips64r6);
   1344   DCHECK(!(rt.is(zero_reg)));
   1345   GenInstrImmediate(BLEZL, zero_reg, rt, offset,
   1346                     CompactBranchType::COMPACT_BRANCH);
   1347 }
   1348 
   1349 
   1350 void Assembler::bltzc(Register rt, int16_t offset) {
   1351   DCHECK(kArchVariant == kMips64r6);
   1352   DCHECK(!rt.is(zero_reg));
   1353   GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1354 }
   1355 
   1356 
   1357 void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
   1358   DCHECK(kArchVariant == kMips64r6);
   1359   DCHECK(!(rs.is(zero_reg)));
   1360   DCHECK(!(rt.is(zero_reg)));
   1361   DCHECK(rs.code() != rt.code());
   1362   GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1363 }
   1364 
   1365 
   1366 void Assembler::bltc(Register rs, Register rt, int16_t offset) {
   1367   DCHECK(kArchVariant == kMips64r6);
   1368   DCHECK(!rs.is(zero_reg));
   1369   DCHECK(!rt.is(zero_reg));
   1370   DCHECK(rs.code() != rt.code());
   1371   GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1372 }
   1373 
   1374 
   1375 void Assembler::bltz(Register rs, int16_t offset) {
   1376   BlockTrampolinePoolScope block_trampoline_pool(this);
   1377   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
   1378   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1379 }
   1380 
   1381 
   1382 void Assembler::bltzal(Register rs, int16_t offset) {
   1383   DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
   1384   BlockTrampolinePoolScope block_trampoline_pool(this);
   1385   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
   1386   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1387 }
   1388 
   1389 
   1390 void Assembler::bne(Register rs, Register rt, int16_t offset) {
   1391   BlockTrampolinePoolScope block_trampoline_pool(this);
   1392   GenInstrImmediate(BNE, rs, rt, offset);
   1393   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1394 }
   1395 
   1396 
   1397 void Assembler::bovc(Register rs, Register rt, int16_t offset) {
   1398   DCHECK(kArchVariant == kMips64r6);
   1399   if (rs.code() >= rt.code()) {
   1400     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1401   } else {
   1402     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1403   }
   1404 }
   1405 
   1406 
   1407 void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
   1408   DCHECK(kArchVariant == kMips64r6);
   1409   if (rs.code() >= rt.code()) {
   1410     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1411   } else {
   1412     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1413   }
   1414 }
   1415 
   1416 
   1417 void Assembler::blezalc(Register rt, int16_t offset) {
   1418   DCHECK(kArchVariant == kMips64r6);
   1419   DCHECK(!(rt.is(zero_reg)));
   1420   GenInstrImmediate(BLEZ, zero_reg, rt, offset,
   1421                     CompactBranchType::COMPACT_BRANCH);
   1422 }
   1423 
   1424 
   1425 void Assembler::bgezalc(Register rt, int16_t offset) {
   1426   DCHECK(kArchVariant == kMips64r6);
   1427   DCHECK(!(rt.is(zero_reg)));
   1428   GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1429 }
   1430 
   1431 
   1432 void Assembler::bgezall(Register rs, int16_t offset) {
   1433   DCHECK(kArchVariant != kMips64r6);
   1434   DCHECK(!(rs.is(zero_reg)));
   1435   BlockTrampolinePoolScope block_trampoline_pool(this);
   1436   GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
   1437   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1438 }
   1439 
   1440 
   1441 void Assembler::bltzalc(Register rt, int16_t offset) {
   1442   DCHECK(kArchVariant == kMips64r6);
   1443   DCHECK(!(rt.is(zero_reg)));
   1444   GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1445 }
   1446 
   1447 
   1448 void Assembler::bgtzalc(Register rt, int16_t offset) {
   1449   DCHECK(kArchVariant == kMips64r6);
   1450   DCHECK(!(rt.is(zero_reg)));
   1451   GenInstrImmediate(BGTZ, zero_reg, rt, offset,
   1452                     CompactBranchType::COMPACT_BRANCH);
   1453 }
   1454 
   1455 
   1456 void Assembler::beqzalc(Register rt, int16_t offset) {
   1457   DCHECK(kArchVariant == kMips64r6);
   1458   DCHECK(!(rt.is(zero_reg)));
   1459   GenInstrImmediate(ADDI, zero_reg, rt, offset,
   1460                     CompactBranchType::COMPACT_BRANCH);
   1461 }
   1462 
   1463 
   1464 void Assembler::bnezalc(Register rt, int16_t offset) {
   1465   DCHECK(kArchVariant == kMips64r6);
   1466   DCHECK(!(rt.is(zero_reg)));
   1467   GenInstrImmediate(DADDI, zero_reg, rt, offset,
   1468                     CompactBranchType::COMPACT_BRANCH);
   1469 }
   1470 
   1471 
   1472 void Assembler::beqc(Register rs, Register rt, int16_t offset) {
   1473   DCHECK(kArchVariant == kMips64r6);
   1474   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
   1475   if (rs.code() < rt.code()) {
   1476     GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1477   } else {
   1478     GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1479   }
   1480 }
   1481 
   1482 
   1483 void Assembler::beqzc(Register rs, int32_t offset) {
   1484   DCHECK(kArchVariant == kMips64r6);
   1485   DCHECK(!(rs.is(zero_reg)));
   1486   GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1487 }
   1488 
   1489 
   1490 void Assembler::bnec(Register rs, Register rt, int16_t offset) {
   1491   DCHECK(kArchVariant == kMips64r6);
   1492   DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
   1493   if (rs.code() < rt.code()) {
   1494     GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
   1495   } else {
   1496     GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1497   }
   1498 }
   1499 
   1500 
   1501 void Assembler::bnezc(Register rs, int32_t offset) {
   1502   DCHECK(kArchVariant == kMips64r6);
   1503   DCHECK(!(rs.is(zero_reg)));
   1504   GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
   1505 }
   1506 
   1507 
   1508 void Assembler::j(int64_t target) {
   1509   BlockTrampolinePoolScope block_trampoline_pool(this);
   1510   GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
   1511   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1512 }
   1513 
   1514 
   1515 void Assembler::j(Label* target) {
   1516   uint64_t imm = jump_offset(target);
   1517   if (target->is_bound()) {
   1518     BlockTrampolinePoolScope block_trampoline_pool(this);
   1519     GenInstrJump(static_cast<Opcode>(kJRawMark),
   1520                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
   1521     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1522   } else {
   1523     j(imm);
   1524   }
   1525 }
   1526 
   1527 
   1528 void Assembler::jal(Label* target) {
   1529   uint64_t imm = jump_offset(target);
   1530   if (target->is_bound()) {
   1531     BlockTrampolinePoolScope block_trampoline_pool(this);
   1532     GenInstrJump(static_cast<Opcode>(kJalRawMark),
   1533                  static_cast<uint32_t>(imm >> 2) & kImm26Mask);
   1534     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1535   } else {
   1536     jal(imm);
   1537   }
   1538 }
   1539 
   1540 
   1541 void Assembler::jr(Register rs) {
   1542   if (kArchVariant != kMips64r6) {
   1543     BlockTrampolinePoolScope block_trampoline_pool(this);
   1544     GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
   1545     BlockTrampolinePoolFor(1);  // For associated delay slot.
   1546   } else {
   1547     jalr(rs, zero_reg);
   1548   }
   1549 }
   1550 
   1551 
   1552 void Assembler::jal(int64_t target) {
   1553   BlockTrampolinePoolScope block_trampoline_pool(this);
   1554   GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
   1555   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1556 }
   1557 
   1558 
   1559 void Assembler::jalr(Register rs, Register rd) {
   1560   DCHECK(rs.code() != rd.code());
   1561   BlockTrampolinePoolScope block_trampoline_pool(this);
   1562   GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
   1563   BlockTrampolinePoolFor(1);  // For associated delay slot.
   1564 }
   1565 
   1566 
   1567 void Assembler::jic(Register rt, int16_t offset) {
   1568   DCHECK(kArchVariant == kMips64r6);
   1569   GenInstrImmediate(POP66, zero_reg, rt, offset);
   1570 }
   1571 
   1572 
   1573 void Assembler::jialc(Register rt, int16_t offset) {
   1574   DCHECK(kArchVariant == kMips64r6);
   1575   GenInstrImmediate(POP76, zero_reg, rt, offset);
   1576 }
   1577 
   1578 
   1579 // -------Data-processing-instructions---------
   1580 
   1581 // Arithmetic.
   1582 
   1583 void Assembler::addu(Register rd, Register rs, Register rt) {
   1584   GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
   1585 }
   1586 
   1587 
   1588 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   1589   GenInstrImmediate(ADDIU, rs, rd, j);
   1590 }
   1591 
   1592 
   1593 void Assembler::subu(Register rd, Register rs, Register rt) {
   1594   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
   1595 }
   1596 
   1597 
   1598 void Assembler::mul(Register rd, Register rs, Register rt) {
   1599   if (kArchVariant == kMips64r6) {
   1600       GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
   1601   } else {
   1602       GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
   1603   }
   1604 }
   1605 
   1606 
   1607 void Assembler::muh(Register rd, Register rs, Register rt) {
   1608   DCHECK(kArchVariant == kMips64r6);
   1609   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
   1610 }
   1611 
   1612 
   1613 void Assembler::mulu(Register rd, Register rs, Register rt) {
   1614   DCHECK(kArchVariant == kMips64r6);
   1615   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
   1616 }
   1617 
   1618 
   1619 void Assembler::muhu(Register rd, Register rs, Register rt) {
   1620   DCHECK(kArchVariant == kMips64r6);
   1621   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
   1622 }
   1623 
   1624 
   1625 void Assembler::dmul(Register rd, Register rs, Register rt) {
   1626   DCHECK(kArchVariant == kMips64r6);
   1627   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
   1628 }
   1629 
   1630 
   1631 void Assembler::dmuh(Register rd, Register rs, Register rt) {
   1632   DCHECK(kArchVariant == kMips64r6);
   1633   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
   1634 }
   1635 
   1636 
   1637 void Assembler::dmulu(Register rd, Register rs, Register rt) {
   1638   DCHECK(kArchVariant == kMips64r6);
   1639   GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
   1640 }
   1641 
   1642 
   1643 void Assembler::dmuhu(Register rd, Register rs, Register rt) {
   1644   DCHECK(kArchVariant == kMips64r6);
   1645   GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
   1646 }
   1647 
   1648 
   1649 void Assembler::mult(Register rs, Register rt) {
   1650   DCHECK(kArchVariant != kMips64r6);
   1651   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
   1652 }
   1653 
   1654 
   1655 void Assembler::multu(Register rs, Register rt) {
   1656   DCHECK(kArchVariant != kMips64r6);
   1657   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
   1658 }
   1659 
   1660 
   1661 void Assembler::daddiu(Register rd, Register rs, int32_t j) {
   1662   GenInstrImmediate(DADDIU, rs, rd, j);
   1663 }
   1664 
   1665 
   1666 void Assembler::div(Register rs, Register rt) {
   1667   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
   1668 }
   1669 
   1670 
   1671 void Assembler::div(Register rd, Register rs, Register rt) {
   1672   DCHECK(kArchVariant == kMips64r6);
   1673   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
   1674 }
   1675 
   1676 
   1677 void Assembler::mod(Register rd, Register rs, Register rt) {
   1678   DCHECK(kArchVariant == kMips64r6);
   1679   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
   1680 }
   1681 
   1682 
   1683 void Assembler::divu(Register rs, Register rt) {
   1684   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
   1685 }
   1686 
   1687 
   1688 void Assembler::divu(Register rd, Register rs, Register rt) {
   1689   DCHECK(kArchVariant == kMips64r6);
   1690   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
   1691 }
   1692 
   1693 
   1694 void Assembler::modu(Register rd, Register rs, Register rt) {
   1695   DCHECK(kArchVariant == kMips64r6);
   1696   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
   1697 }
   1698 
   1699 
   1700 void Assembler::daddu(Register rd, Register rs, Register rt) {
   1701   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
   1702 }
   1703 
   1704 
   1705 void Assembler::dsubu(Register rd, Register rs, Register rt) {
   1706   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
   1707 }
   1708 
   1709 
   1710 void Assembler::dmult(Register rs, Register rt) {
   1711   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
   1712 }
   1713 
   1714 
   1715 void Assembler::dmultu(Register rs, Register rt) {
   1716   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
   1717 }
   1718 
   1719 
   1720 void Assembler::ddiv(Register rs, Register rt) {
   1721   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
   1722 }
   1723 
   1724 
   1725 void Assembler::ddiv(Register rd, Register rs, Register rt) {
   1726   DCHECK(kArchVariant == kMips64r6);
   1727   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
   1728 }
   1729 
   1730 
   1731 void Assembler::dmod(Register rd, Register rs, Register rt) {
   1732   DCHECK(kArchVariant == kMips64r6);
   1733   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
   1734 }
   1735 
   1736 
   1737 void Assembler::ddivu(Register rs, Register rt) {
   1738   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
   1739 }
   1740 
   1741 
   1742 void Assembler::ddivu(Register rd, Register rs, Register rt) {
   1743   DCHECK(kArchVariant == kMips64r6);
   1744   GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
   1745 }
   1746 
   1747 
   1748 void Assembler::dmodu(Register rd, Register rs, Register rt) {
   1749   DCHECK(kArchVariant == kMips64r6);
   1750   GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
   1751 }
   1752 
   1753 
   1754 // Logical.
   1755 
   1756 void Assembler::and_(Register rd, Register rs, Register rt) {
   1757   GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
   1758 }
   1759 
   1760 
   1761 void Assembler::andi(Register rt, Register rs, int32_t j) {
   1762   DCHECK(is_uint16(j));
   1763   GenInstrImmediate(ANDI, rs, rt, j);
   1764 }
   1765 
   1766 
   1767 void Assembler::or_(Register rd, Register rs, Register rt) {
   1768   GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
   1769 }
   1770 
   1771 
   1772 void Assembler::ori(Register rt, Register rs, int32_t j) {
   1773   DCHECK(is_uint16(j));
   1774   GenInstrImmediate(ORI, rs, rt, j);
   1775 }
   1776 
   1777 
   1778 void Assembler::xor_(Register rd, Register rs, Register rt) {
   1779   GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
   1780 }
   1781 
   1782 
   1783 void Assembler::xori(Register rt, Register rs, int32_t j) {
   1784   DCHECK(is_uint16(j));
   1785   GenInstrImmediate(XORI, rs, rt, j);
   1786 }
   1787 
   1788 
   1789 void Assembler::nor(Register rd, Register rs, Register rt) {
   1790   GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
   1791 }
   1792 
   1793 
   1794 // Shifts.
   1795 void Assembler::sll(Register rd,
   1796                     Register rt,
   1797                     uint16_t sa,
   1798                     bool coming_from_nop) {
   1799   // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
   1800   // generated using the sll instruction. They must be generated using
   1801   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
   1802   // instructions.
   1803   DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
   1804   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
   1805 }
   1806 
   1807 
   1808 void Assembler::sllv(Register rd, Register rt, Register rs) {
   1809   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
   1810 }
   1811 
   1812 
   1813 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
   1814   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
   1815 }
   1816 
   1817 
   1818 void Assembler::srlv(Register rd, Register rt, Register rs) {
   1819   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
   1820 }
   1821 
   1822 
   1823 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
   1824   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
   1825 }
   1826 
   1827 
   1828 void Assembler::srav(Register rd, Register rt, Register rs) {
   1829   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
   1830 }
   1831 
   1832 
   1833 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
   1834   // Should be called via MacroAssembler::Ror.
   1835   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1836   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   1837   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
   1838       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
   1839   emit(instr);
   1840 }
   1841 
   1842 
   1843 void Assembler::rotrv(Register rd, Register rt, Register rs) {
   1844   // Should be called via MacroAssembler::Ror.
   1845   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1846   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   1847   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1848      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
   1849   emit(instr);
   1850 }
   1851 
   1852 
   1853 void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
   1854   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
   1855 }
   1856 
   1857 
   1858 void Assembler::dsllv(Register rd, Register rt, Register rs) {
   1859   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
   1860 }
   1861 
   1862 
   1863 void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
   1864   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
   1865 }
   1866 
   1867 
   1868 void Assembler::dsrlv(Register rd, Register rt, Register rs) {
   1869   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
   1870 }
   1871 
   1872 
   1873 void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
   1874   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1875   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
   1876       | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
   1877   emit(instr);
   1878 }
   1879 
   1880 void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
   1881   DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
   1882   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
   1883                 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
   1884   emit(instr);
   1885 }
   1886 
   1887 void Assembler::drotrv(Register rd, Register rt, Register rs) {
   1888   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
   1889   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
   1890       | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
   1891   emit(instr);
   1892 }
   1893 
   1894 
   1895 void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
   1896   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
   1897 }
   1898 
   1899 
   1900 void Assembler::dsrav(Register rd, Register rt, Register rs) {
   1901   GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
   1902 }
   1903 
   1904 
   1905 void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
   1906   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
   1907 }
   1908 
   1909 
   1910 void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
   1911   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
   1912 }
   1913 
   1914 
   1915 void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
   1916   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
   1917 }
   1918 
   1919 
   1920 void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
   1921   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1922   DCHECK(sa <= 3);
   1923   DCHECK(kArchVariant == kMips64r6);
   1924   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
   1925                 rd.code() << kRdShift | sa << kSaShift | LSA;
   1926   emit(instr);
   1927 }
   1928 
   1929 
   1930 void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
   1931   DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
   1932   DCHECK(sa <= 3);
   1933   DCHECK(kArchVariant == kMips64r6);
   1934   Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
   1935                 rd.code() << kRdShift | sa << kSaShift | DLSA;
   1936   emit(instr);
   1937 }
   1938 
   1939 
   1940 // ------------Memory-instructions-------------
   1941 
   1942 // Helper for base-reg + offset, when offset is larger than int16.
   1943 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
   1944   DCHECK(!src.rm().is(at));
   1945   DCHECK(is_int32(src.offset_));
   1946   daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
   1947   dsll(at, at, kLuiShift);
   1948   ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
   1949   daddu(at, at, src.rm());  // Add base register.
   1950 }
   1951 
   1952 
   1953 void Assembler::lb(Register rd, const MemOperand& rs) {
   1954   if (is_int16(rs.offset_)) {
   1955     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
   1956   } else {  // Offset > 16 bits, use multiple instructions to load.
   1957     LoadRegPlusOffsetToAt(rs);
   1958     GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
   1959   }
   1960 }
   1961 
   1962 
   1963 void Assembler::lbu(Register rd, const MemOperand& rs) {
   1964   if (is_int16(rs.offset_)) {
   1965     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
   1966   } else {  // Offset > 16 bits, use multiple instructions to load.
   1967     LoadRegPlusOffsetToAt(rs);
   1968     GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
   1969   }
   1970 }
   1971 
   1972 
   1973 void Assembler::lh(Register rd, const MemOperand& rs) {
   1974   if (is_int16(rs.offset_)) {
   1975     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
   1976   } else {  // Offset > 16 bits, use multiple instructions to load.
   1977     LoadRegPlusOffsetToAt(rs);
   1978     GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
   1979   }
   1980 }
   1981 
   1982 
   1983 void Assembler::lhu(Register rd, const MemOperand& rs) {
   1984   if (is_int16(rs.offset_)) {
   1985     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
   1986   } else {  // Offset > 16 bits, use multiple instructions to load.
   1987     LoadRegPlusOffsetToAt(rs);
   1988     GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
   1989   }
   1990 }
   1991 
   1992 
   1993 void Assembler::lw(Register rd, const MemOperand& rs) {
   1994   if (is_int16(rs.offset_)) {
   1995     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
   1996   } else {  // Offset > 16 bits, use multiple instructions to load.
   1997     LoadRegPlusOffsetToAt(rs);
   1998     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
   1999   }
   2000 }
   2001 
   2002 
   2003 void Assembler::lwu(Register rd, const MemOperand& rs) {
   2004   if (is_int16(rs.offset_)) {
   2005     GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
   2006   } else {  // Offset > 16 bits, use multiple instructions to load.
   2007     LoadRegPlusOffsetToAt(rs);
   2008     GenInstrImmediate(LWU, at, rd, 0);  // Equiv to lwu(rd, MemOperand(at, 0));
   2009   }
   2010 }
   2011 
   2012 
   2013 void Assembler::lwl(Register rd, const MemOperand& rs) {
   2014   DCHECK(is_int16(rs.offset_));
   2015   DCHECK(kArchVariant == kMips64r2);
   2016   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
   2017 }
   2018 
   2019 
   2020 void Assembler::lwr(Register rd, const MemOperand& rs) {
   2021   DCHECK(is_int16(rs.offset_));
   2022   DCHECK(kArchVariant == kMips64r2);
   2023   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
   2024 }
   2025 
   2026 
   2027 void Assembler::sb(Register rd, const MemOperand& rs) {
   2028   if (is_int16(rs.offset_)) {
   2029     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
   2030   } else {  // Offset > 16 bits, use multiple instructions to store.
   2031     LoadRegPlusOffsetToAt(rs);
   2032     GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
   2033   }
   2034 }
   2035 
   2036 
   2037 void Assembler::sh(Register rd, const MemOperand& rs) {
   2038   if (is_int16(rs.offset_)) {
   2039     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
   2040   } else {  // Offset > 16 bits, use multiple instructions to store.
   2041     LoadRegPlusOffsetToAt(rs);
   2042     GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
   2043   }
   2044 }
   2045 
   2046 
   2047 void Assembler::sw(Register rd, const MemOperand& rs) {
   2048   if (is_int16(rs.offset_)) {
   2049     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
   2050   } else {  // Offset > 16 bits, use multiple instructions to store.
   2051     LoadRegPlusOffsetToAt(rs);
   2052     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
   2053   }
   2054 }
   2055 
   2056 
   2057 void Assembler::swl(Register rd, const MemOperand& rs) {
   2058   DCHECK(is_int16(rs.offset_));
   2059   DCHECK(kArchVariant == kMips64r2);
   2060   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
   2061 }
   2062 
   2063 
   2064 void Assembler::swr(Register rd, const MemOperand& rs) {
   2065   DCHECK(is_int16(rs.offset_));
   2066   DCHECK(kArchVariant == kMips64r2);
   2067   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
   2068 }
   2069 
   2070 
   2071 void Assembler::lui(Register rd, int32_t j) {
   2072   DCHECK(is_uint16(j));
   2073   GenInstrImmediate(LUI, zero_reg, rd, j);
   2074 }
   2075 
   2076 
   2077 void Assembler::aui(Register rt, Register rs, int32_t j) {
   2078   // This instruction uses same opcode as 'lui'. The difference in encoding is
   2079   // 'lui' has zero reg. for rs field.
   2080   DCHECK(is_uint16(j));
   2081   GenInstrImmediate(LUI, rs, rt, j);
   2082 }
   2083 
   2084 
   2085 void Assembler::daui(Register rt, Register rs, int32_t j) {
   2086   DCHECK(is_uint16(j));
   2087   DCHECK(!rs.is(zero_reg));
   2088   GenInstrImmediate(DAUI, rs, rt, j);
   2089 }
   2090 
   2091 
   2092 void Assembler::dahi(Register rs, int32_t j) {
   2093   DCHECK(is_uint16(j));
   2094   GenInstrImmediate(REGIMM, rs, DAHI, j);
   2095 }
   2096 
   2097 
   2098 void Assembler::dati(Register rs, int32_t j) {
   2099   DCHECK(is_uint16(j));
   2100   GenInstrImmediate(REGIMM, rs, DATI, j);
   2101 }
   2102 
   2103 
   2104 void Assembler::ldl(Register rd, const MemOperand& rs) {
   2105   DCHECK(is_int16(rs.offset_));
   2106   DCHECK(kArchVariant == kMips64r2);
   2107   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
   2108 }
   2109 
   2110 
   2111 void Assembler::ldr(Register rd, const MemOperand& rs) {
   2112   DCHECK(is_int16(rs.offset_));
   2113   DCHECK(kArchVariant == kMips64r2);
   2114   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
   2115 }
   2116 
   2117 
   2118 void Assembler::sdl(Register rd, const MemOperand& rs) {
   2119   DCHECK(is_int16(rs.offset_));
   2120   DCHECK(kArchVariant == kMips64r2);
   2121   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
   2122 }
   2123 
   2124 
   2125 void Assembler::sdr(Register rd, const MemOperand& rs) {
   2126   DCHECK(is_int16(rs.offset_));
   2127   DCHECK(kArchVariant == kMips64r2);
   2128   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
   2129 }
   2130 
   2131 
   2132 void Assembler::ld(Register rd, const MemOperand& rs) {
   2133   if (is_int16(rs.offset_)) {
   2134     GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
   2135   } else {  // Offset > 16 bits, use multiple instructions to load.
   2136     LoadRegPlusOffsetToAt(rs);
   2137     GenInstrImmediate(LD, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
   2138   }
   2139 }
   2140 
   2141 
   2142 void Assembler::sd(Register rd, const MemOperand& rs) {
   2143   if (is_int16(rs.offset_)) {
   2144     GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
   2145   } else {  // Offset > 16 bits, use multiple instructions to store.
   2146     LoadRegPlusOffsetToAt(rs);
   2147     GenInstrImmediate(SD, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
   2148   }
   2149 }
   2150 
   2151 
   2152 // ---------PC-Relative instructions-----------
   2153 
   2154 void Assembler::addiupc(Register rs, int32_t imm19) {
   2155   DCHECK(kArchVariant == kMips64r6);
   2156   DCHECK(rs.is_valid() && is_int19(imm19));
   2157   uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
   2158   GenInstrImmediate(PCREL, rs, imm21);
   2159 }
   2160 
   2161 
   2162 void Assembler::lwpc(Register rs, int32_t offset19) {
   2163   DCHECK(kArchVariant == kMips64r6);
   2164   DCHECK(rs.is_valid() && is_int19(offset19));
   2165   uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
   2166   GenInstrImmediate(PCREL, rs, imm21);
   2167 }
   2168 
   2169 
   2170 void Assembler::lwupc(Register rs, int32_t offset19) {
   2171   DCHECK(kArchVariant == kMips64r6);
   2172   DCHECK(rs.is_valid() && is_int19(offset19));
   2173   uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
   2174   GenInstrImmediate(PCREL, rs, imm21);
   2175 }
   2176 
   2177 
   2178 void Assembler::ldpc(Register rs, int32_t offset18) {
   2179   DCHECK(kArchVariant == kMips64r6);
   2180   DCHECK(rs.is_valid() && is_int18(offset18));
   2181   uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
   2182   GenInstrImmediate(PCREL, rs, imm21);
   2183 }
   2184 
   2185 
   2186 void Assembler::auipc(Register rs, int16_t imm16) {
   2187   DCHECK(kArchVariant == kMips64r6);
   2188   DCHECK(rs.is_valid());
   2189   uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
   2190   GenInstrImmediate(PCREL, rs, imm21);
   2191 }
   2192 
   2193 
   2194 void Assembler::aluipc(Register rs, int16_t imm16) {
   2195   DCHECK(kArchVariant == kMips64r6);
   2196   DCHECK(rs.is_valid());
   2197   uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
   2198   GenInstrImmediate(PCREL, rs, imm21);
   2199 }
   2200 
   2201 
   2202 // -------------Misc-instructions--------------
   2203 
   2204 // Break / Trap instructions.
   2205 void Assembler::break_(uint32_t code, bool break_as_stop) {
   2206   DCHECK((code & ~0xfffff) == 0);
   2207   // We need to invalidate breaks that could be stops as well because the
   2208   // simulator expects a char pointer after the stop instruction.
   2209   // See constants-mips.h for explanation.
   2210   DCHECK((break_as_stop &&
   2211           code <= kMaxStopCode &&
   2212           code > kMaxWatchpointCode) ||
   2213          (!break_as_stop &&
   2214           (code > kMaxStopCode ||
   2215            code <= kMaxWatchpointCode)));
   2216   Instr break_instr = SPECIAL | BREAK | (code << 6);
   2217   emit(break_instr);
   2218 }
   2219 
   2220 
   2221 void Assembler::stop(const char* msg, uint32_t code) {
   2222   DCHECK(code > kMaxWatchpointCode);
   2223   DCHECK(code <= kMaxStopCode);
   2224 #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
   2225   break_(0x54321);
   2226 #else  // V8_HOST_ARCH_MIPS
   2227   BlockTrampolinePoolFor(3);
   2228   // The Simulator will handle the stop instruction and get the message address.
   2229   // On MIPS stop() is just a special kind of break_().
   2230   break_(code, true);
   2231   emit(reinterpret_cast<uint64_t>(msg));
   2232 #endif
   2233 }
   2234 
   2235 
   2236 void Assembler::tge(Register rs, Register rt, uint16_t code) {
   2237   DCHECK(is_uint10(code));
   2238   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
   2239       | rt.code() << kRtShift | code << 6;
   2240   emit(instr);
   2241 }
   2242 
   2243 
   2244 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
   2245   DCHECK(is_uint10(code));
   2246   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
   2247       | rt.code() << kRtShift | code << 6;
   2248   emit(instr);
   2249 }
   2250 
   2251 
   2252 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
   2253   DCHECK(is_uint10(code));
   2254   Instr instr =
   2255       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2256   emit(instr);
   2257 }
   2258 
   2259 
   2260 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
   2261   DCHECK(is_uint10(code));
   2262   Instr instr =
   2263       SPECIAL | TLTU | rs.code() << kRsShift
   2264       | rt.code() << kRtShift | code << 6;
   2265   emit(instr);
   2266 }
   2267 
   2268 
   2269 void Assembler::teq(Register rs, Register rt, uint16_t code) {
   2270   DCHECK(is_uint10(code));
   2271   Instr instr =
   2272       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2273   emit(instr);
   2274 }
   2275 
   2276 
   2277 void Assembler::tne(Register rs, Register rt, uint16_t code) {
   2278   DCHECK(is_uint10(code));
   2279   Instr instr =
   2280       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   2281   emit(instr);
   2282 }
   2283 
   2284 void Assembler::sync() {
   2285   Instr sync_instr = SPECIAL | SYNC;
   2286   emit(sync_instr);
   2287 }
   2288 
   2289 // Move from HI/LO register.
   2290 
   2291 void Assembler::mfhi(Register rd) {
   2292   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
   2293 }
   2294 
   2295 
   2296 void Assembler::mflo(Register rd) {
   2297   GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
   2298 }
   2299 
   2300 
   2301 // Set on less than instructions.
   2302 void Assembler::slt(Register rd, Register rs, Register rt) {
   2303   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
   2304 }
   2305 
   2306 
   2307 void Assembler::sltu(Register rd, Register rs, Register rt) {
   2308   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
   2309 }
   2310 
   2311 
   2312 void Assembler::slti(Register rt, Register rs, int32_t j) {
   2313   GenInstrImmediate(SLTI, rs, rt, j);
   2314 }
   2315 
   2316 
   2317 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
   2318   GenInstrImmediate(SLTIU, rs, rt, j);
   2319 }
   2320 
   2321 
   2322 // Conditional move.
   2323 void Assembler::movz(Register rd, Register rs, Register rt) {
   2324   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
   2325 }
   2326 
   2327 
   2328 void Assembler::movn(Register rd, Register rs, Register rt) {
   2329   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
   2330 }
   2331 
   2332 
   2333 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
   2334   Register rt;
   2335   rt.reg_code = (cc & 0x0007) << 2 | 1;
   2336   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
   2337 }
   2338 
   2339 
   2340 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
   2341   Register rt;
   2342   rt.reg_code = (cc & 0x0007) << 2 | 0;
   2343   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
   2344 }
   2345 
   2346 
   2347 void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2348   min(S, fd, fs, ft);
   2349 }
   2350 
   2351 
   2352 void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2353   min(D, fd, fs, ft);
   2354 }
   2355 
   2356 
   2357 void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2358   max(S, fd, fs, ft);
   2359 }
   2360 
   2361 
   2362 void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2363   max(D, fd, fs, ft);
   2364 }
   2365 
   2366 
   2367 void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2368   mina(S, fd, fs, ft);
   2369 }
   2370 
   2371 
   2372 void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2373   mina(D, fd, fs, ft);
   2374 }
   2375 
   2376 
   2377 void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2378   maxa(S, fd, fs, ft);
   2379 }
   2380 
   2381 
   2382 void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2383   maxa(D, fd, fs, ft);
   2384 }
   2385 
   2386 
   2387 void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2388                     FPURegister ft) {
   2389   DCHECK(kArchVariant == kMips64r6);
   2390   DCHECK((fmt == D) || (fmt == S));
   2391   GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
   2392 }
   2393 
   2394 
   2395 void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2396                     FPURegister ft) {
   2397   DCHECK(kArchVariant == kMips64r6);
   2398   DCHECK((fmt == D) || (fmt == S));
   2399   GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
   2400 }
   2401 
   2402 
   2403 // GPR.
   2404 void Assembler::seleqz(Register rd, Register rs, Register rt) {
   2405   DCHECK(kArchVariant == kMips64r6);
   2406   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
   2407 }
   2408 
   2409 
   2410 // GPR.
   2411 void Assembler::selnez(Register rd, Register rs, Register rt) {
   2412   DCHECK(kArchVariant == kMips64r6);
   2413   GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
   2414 }
   2415 
   2416 
   2417 // Bit twiddling.
   2418 void Assembler::clz(Register rd, Register rs) {
   2419   if (kArchVariant != kMips64r6) {
   2420     // Clz instr requires same GPR number in 'rd' and 'rt' fields.
   2421     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
   2422   } else {
   2423     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
   2424   }
   2425 }
   2426 
   2427 
   2428 void Assembler::dclz(Register rd, Register rs) {
   2429   if (kArchVariant != kMips64r6) {
   2430     // dclz instr requires same GPR number in 'rd' and 'rt' fields.
   2431     GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
   2432   } else {
   2433     GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
   2434   }
   2435 }
   2436 
   2437 
   2438 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2439   // Should be called via MacroAssembler::Ins.
   2440   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
   2441   DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
   2442   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
   2443 }
   2444 
   2445 
   2446 void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2447   // Should be called via MacroAssembler::Dins.
   2448   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2449   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2450   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
   2451 }
   2452 
   2453 
   2454 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2455   // Should be called via MacroAssembler::Ext.
   2456   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2457   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2458   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
   2459 }
   2460 
   2461 
   2462 void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2463   // Should be called via MacroAssembler::Dext.
   2464   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2465   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2466   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
   2467 }
   2468 
   2469 
   2470 void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2471   // Should be called via MacroAssembler::Dextm.
   2472   // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
   2473   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2474   GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
   2475 }
   2476 
   2477 
   2478 void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
   2479   // Should be called via MacroAssembler::Dextu.
   2480   // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
   2481   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2482   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
   2483 }
   2484 
   2485 
   2486 void Assembler::bitswap(Register rd, Register rt) {
   2487   DCHECK(kArchVariant == kMips64r6);
   2488   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
   2489 }
   2490 
   2491 
   2492 void Assembler::dbitswap(Register rd, Register rt) {
   2493   DCHECK(kArchVariant == kMips64r6);
   2494   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
   2495 }
   2496 
   2497 
   2498 void Assembler::pref(int32_t hint, const MemOperand& rs) {
   2499   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
   2500   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
   2501       | (rs.offset_);
   2502   emit(instr);
   2503 }
   2504 
   2505 
   2506 void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
   2507   DCHECK(kArchVariant == kMips64r6);
   2508   DCHECK(is_uint3(bp));
   2509   uint16_t sa = (ALIGN << kBp2Bits) | bp;
   2510   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
   2511 }
   2512 
   2513 
   2514 void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
   2515   DCHECK(kArchVariant == kMips64r6);
   2516   DCHECK(is_uint3(bp));
   2517   uint16_t sa = (DALIGN << kBp3Bits) | bp;
   2518   GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
   2519 }
   2520 
   2521 void Assembler::wsbh(Register rd, Register rt) {
   2522   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2523   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
   2524 }
   2525 
   2526 void Assembler::dsbh(Register rd, Register rt) {
   2527   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2528   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
   2529 }
   2530 
   2531 void Assembler::dshd(Register rd, Register rt) {
   2532   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2533   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
   2534 }
   2535 
   2536 void Assembler::seh(Register rd, Register rt) {
   2537   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2538   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
   2539 }
   2540 
   2541 void Assembler::seb(Register rd, Register rt) {
   2542   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2543   GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
   2544 }
   2545 
   2546 // --------Coprocessor-instructions----------------
   2547 
   2548 // Load, store, move.
   2549 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
   2550   if (is_int16(src.offset_)) {
   2551     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
   2552   } else {  // Offset > 16 bits, use multiple instructions to load.
   2553     LoadRegPlusOffsetToAt(src);
   2554     GenInstrImmediate(LWC1, at, fd, 0);
   2555   }
   2556 }
   2557 
   2558 
   2559 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
   2560   if (is_int16(src.offset_)) {
   2561     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
   2562   } else {  // Offset > 16 bits, use multiple instructions to load.
   2563     LoadRegPlusOffsetToAt(src);
   2564     GenInstrImmediate(LDC1, at, fd, 0);
   2565   }
   2566 }
   2567 
   2568 
   2569 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
   2570   if (is_int16(src.offset_)) {
   2571     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
   2572   } else {  // Offset > 16 bits, use multiple instructions to load.
   2573     LoadRegPlusOffsetToAt(src);
   2574     GenInstrImmediate(SWC1, at, fd, 0);
   2575   }
   2576 }
   2577 
   2578 
   2579 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
   2580   DCHECK(!src.rm().is(at));
   2581   if (is_int16(src.offset_)) {
   2582     GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
   2583   } else {  // Offset > 16 bits, use multiple instructions to load.
   2584     LoadRegPlusOffsetToAt(src);
   2585     GenInstrImmediate(SDC1, at, fd, 0);
   2586   }
   2587 }
   2588 
   2589 
   2590 void Assembler::mtc1(Register rt, FPURegister fs) {
   2591   GenInstrRegister(COP1, MTC1, rt, fs, f0);
   2592 }
   2593 
   2594 
   2595 void Assembler::mthc1(Register rt, FPURegister fs) {
   2596   GenInstrRegister(COP1, MTHC1, rt, fs, f0);
   2597 }
   2598 
   2599 
   2600 void Assembler::dmtc1(Register rt, FPURegister fs) {
   2601   GenInstrRegister(COP1, DMTC1, rt, fs, f0);
   2602 }
   2603 
   2604 
   2605 void Assembler::mfc1(Register rt, FPURegister fs) {
   2606   GenInstrRegister(COP1, MFC1, rt, fs, f0);
   2607 }
   2608 
   2609 
   2610 void Assembler::mfhc1(Register rt, FPURegister fs) {
   2611   GenInstrRegister(COP1, MFHC1, rt, fs, f0);
   2612 }
   2613 
   2614 
   2615 void Assembler::dmfc1(Register rt, FPURegister fs) {
   2616   GenInstrRegister(COP1, DMFC1, rt, fs, f0);
   2617 }
   2618 
   2619 
   2620 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
   2621   GenInstrRegister(COP1, CTC1, rt, fs);
   2622 }
   2623 
   2624 
   2625 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
   2626   GenInstrRegister(COP1, CFC1, rt, fs);
   2627 }
   2628 
   2629 
   2630 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   2631   uint64_t i;
   2632   memcpy(&i, &d, 8);
   2633 
   2634   *lo = i & 0xffffffff;
   2635   *hi = i >> 32;
   2636 }
   2637 
   2638 
   2639 void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2640                     FPURegister ft) {
   2641   DCHECK(kArchVariant == kMips64r6);
   2642   DCHECK((fmt == D) || (fmt == S));
   2643 
   2644   GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
   2645 }
   2646 
   2647 
   2648 void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2649   sel(S, fd, fs, ft);
   2650 }
   2651 
   2652 
   2653 void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2654   sel(D, fd, fs, ft);
   2655 }
   2656 
   2657 
   2658 // FPR.
   2659 void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2660                        FPURegister ft) {
   2661   DCHECK((fmt == D) || (fmt == S));
   2662   GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
   2663 }
   2664 
   2665 
   2666 void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2667   seleqz(D, fd, fs, ft);
   2668 }
   2669 
   2670 
   2671 void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2672   seleqz(S, fd, fs, ft);
   2673 }
   2674 
   2675 
   2676 void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2677   selnez(D, fd, fs, ft);
   2678 }
   2679 
   2680 
   2681 void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2682   selnez(S, fd, fs, ft);
   2683 }
   2684 
   2685 
   2686 void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
   2687   DCHECK(kArchVariant == kMips64r2);
   2688   GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
   2689 }
   2690 
   2691 
   2692 void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
   2693   DCHECK(kArchVariant == kMips64r2);
   2694   GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
   2695 }
   2696 
   2697 
   2698 void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
   2699   DCHECK(kArchVariant == kMips64r2);
   2700   FPURegister ft;
   2701   ft.reg_code = (cc & 0x0007) << 2 | 1;
   2702   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
   2703 }
   2704 
   2705 
   2706 void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
   2707   DCHECK(kArchVariant == kMips64r2);
   2708   FPURegister ft;
   2709   ft.reg_code = (cc & 0x0007) << 2 | 1;
   2710   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
   2711 }
   2712 
   2713 
   2714 void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
   2715   DCHECK(kArchVariant == kMips64r2);
   2716   FPURegister ft;
   2717   ft.reg_code = (cc & 0x0007) << 2 | 0;
   2718   GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
   2719 }
   2720 
   2721 
   2722 void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
   2723   DCHECK(kArchVariant == kMips64r2);
   2724   FPURegister ft;
   2725   ft.reg_code = (cc & 0x0007) << 2 | 0;
   2726   GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
   2727 }
   2728 
   2729 
   2730 void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
   2731   DCHECK(kArchVariant == kMips64r2);
   2732   GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
   2733 }
   2734 
   2735 
   2736 void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
   2737   DCHECK(kArchVariant == kMips64r2);
   2738   GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
   2739 }
   2740 
   2741 
   2742 // FPR.
   2743 void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2744                        FPURegister ft) {
   2745   DCHECK(kArchVariant == kMips64r6);
   2746   DCHECK((fmt == D) || (fmt == S));
   2747   GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
   2748 }
   2749 
   2750 
   2751 // Arithmetic.
   2752 
   2753 void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2754   GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
   2755 }
   2756 
   2757 
   2758 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2759   GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
   2760 }
   2761 
   2762 
   2763 void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2764   GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
   2765 }
   2766 
   2767 
   2768 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2769   GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
   2770 }
   2771 
   2772 
   2773 void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2774   GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
   2775 }
   2776 
   2777 
   2778 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2779   GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
   2780 }
   2781 
   2782 
   2783 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
   2784     FPURegister ft) {
   2785   GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
   2786 }
   2787 
   2788 
   2789 void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
   2790   GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
   2791 }
   2792 
   2793 
   2794 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
   2795   GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
   2796 }
   2797 
   2798 
   2799 void Assembler::abs_s(FPURegister fd, FPURegister fs) {
   2800   GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
   2801 }
   2802 
   2803 
   2804 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
   2805   GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
   2806 }
   2807 
   2808 
   2809 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
   2810   GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
   2811 }
   2812 
   2813 
   2814 void Assembler::mov_s(FPURegister fd, FPURegister fs) {
   2815   GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
   2816 }
   2817 
   2818 
   2819 void Assembler::neg_s(FPURegister fd, FPURegister fs) {
   2820   GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
   2821 }
   2822 
   2823 
   2824 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
   2825   GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
   2826 }
   2827 
   2828 
   2829 void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
   2830   GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
   2831 }
   2832 
   2833 
   2834 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
   2835   GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
   2836 }
   2837 
   2838 
   2839 void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
   2840   GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
   2841 }
   2842 
   2843 
   2844 void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
   2845   GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
   2846 }
   2847 
   2848 
   2849 void Assembler::recip_d(FPURegister fd, FPURegister fs) {
   2850   GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
   2851 }
   2852 
   2853 
   2854 void Assembler::recip_s(FPURegister fd, FPURegister fs) {
   2855   GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
   2856 }
   2857 
   2858 
   2859 // Conversions.
   2860 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
   2861   GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
   2862 }
   2863 
   2864 
   2865 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
   2866   GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
   2867 }
   2868 
   2869 
   2870 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
   2871   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
   2872 }
   2873 
   2874 
   2875 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
   2876   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
   2877 }
   2878 
   2879 
   2880 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
   2881   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
   2882 }
   2883 
   2884 
   2885 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
   2886   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
   2887 }
   2888 
   2889 
   2890 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
   2891   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
   2892 }
   2893 
   2894 
   2895 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
   2896   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
   2897 }
   2898 
   2899 
   2900 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
   2901   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
   2902 }
   2903 
   2904 
   2905 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
   2906   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
   2907 }
   2908 
   2909 
   2910 void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
   2911 
   2912 
   2913 void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
   2914 
   2915 
   2916 void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
   2917   DCHECK(kArchVariant == kMips64r6);
   2918   GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
   2919 }
   2920 
   2921 
   2922 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
   2923   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2924   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
   2925 }
   2926 
   2927 
   2928 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
   2929   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2930   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
   2931 }
   2932 
   2933 
   2934 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
   2935   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2936   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
   2937 }
   2938 
   2939 
   2940 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
   2941   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   2942   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
   2943 }
   2944 
   2945 
   2946 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
   2947   GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
   2948 }
   2949 
   2950 
   2951 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
   2952   GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
   2953 }
   2954 
   2955 
   2956 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
   2957   GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
   2958 }
   2959 
   2960 
   2961 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
   2962   GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
   2963 }
   2964 
   2965 
   2966 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
   2967   GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
   2968 }
   2969 
   2970 
   2971 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
   2972   GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
   2973 }
   2974 
   2975 
   2976 void Assembler::class_s(FPURegister fd, FPURegister fs) {
   2977   DCHECK(kArchVariant == kMips64r6);
   2978   GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
   2979 }
   2980 
   2981 
   2982 void Assembler::class_d(FPURegister fd, FPURegister fs) {
   2983   DCHECK(kArchVariant == kMips64r6);
   2984   GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
   2985 }
   2986 
   2987 
   2988 void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2989                      FPURegister ft) {
   2990   DCHECK(kArchVariant == kMips64r6);
   2991   DCHECK((fmt == D) || (fmt == S));
   2992   GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
   2993 }
   2994 
   2995 
   2996 void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
   2997                      FPURegister ft) {
   2998   DCHECK(kArchVariant == kMips64r6);
   2999   DCHECK((fmt == D) || (fmt == S));
   3000   GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
   3001 }
   3002 
   3003 
   3004 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
   3005   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
   3006 }
   3007 
   3008 
   3009 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
   3010   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3011   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
   3012 }
   3013 
   3014 
   3015 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
   3016   GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
   3017 }
   3018 
   3019 
   3020 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
   3021   GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
   3022 }
   3023 
   3024 
   3025 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
   3026   DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
   3027   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
   3028 }
   3029 
   3030 
   3031 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
   3032   GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
   3033 }
   3034 
   3035 
   3036 // Conditions for >= MIPSr6.
   3037 void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
   3038     FPURegister fd, FPURegister fs, FPURegister ft) {
   3039   DCHECK(kArchVariant == kMips64r6);
   3040   DCHECK((fmt & ~(31 << kRsShift)) == 0);
   3041   Instr instr = COP1 | fmt | ft.code() << kFtShift |
   3042       fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
   3043   emit(instr);
   3044 }
   3045 
   3046 
   3047 void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
   3048                       FPURegister ft) {
   3049   cmp(cond, W, fd, fs, ft);
   3050 }
   3051 
   3052 void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
   3053                       FPURegister ft) {
   3054   cmp(cond, L, fd, fs, ft);
   3055 }
   3056 
   3057 
   3058 void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
   3059   DCHECK(kArchVariant == kMips64r6);
   3060   Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
   3061   emit(instr);
   3062 }
   3063 
   3064 
   3065 void Assembler::bc1nez(int16_t offset, FPURegister ft) {
   3066   DCHECK(kArchVariant == kMips64r6);
   3067   Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
   3068   emit(instr);
   3069 }
   3070 
   3071 
   3072 // Conditions for < MIPSr6.
   3073 void Assembler::c(FPUCondition cond, SecondaryField fmt,
   3074     FPURegister fs, FPURegister ft, uint16_t cc) {
   3075   DCHECK(kArchVariant != kMips64r6);
   3076   DCHECK(is_uint3(cc));
   3077   DCHECK(fmt == S || fmt == D);
   3078   DCHECK((fmt & ~(31 << kRsShift)) == 0);
   3079   Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
   3080       | cc << 8 | 3 << 4 | cond;
   3081   emit(instr);
   3082 }
   3083 
   3084 
   3085 void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
   3086                     uint16_t cc) {
   3087   c(cond, S, fs, ft, cc);
   3088 }
   3089 
   3090 
   3091 void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
   3092                     uint16_t cc) {
   3093   c(cond, D, fs, ft, cc);
   3094 }
   3095 
   3096 
   3097 void Assembler::fcmp(FPURegister src1, const double src2,
   3098       FPUCondition cond) {
   3099   DCHECK(src2 == 0.0);
   3100   mtc1(zero_reg, f14);
   3101   cvt_d_w(f14, f14);
   3102   c(cond, D, src1, f14, 0);
   3103 }
   3104 
   3105 
   3106 void Assembler::bc1f(int16_t offset, uint16_t cc) {
   3107   DCHECK(is_uint3(cc));
   3108   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   3109   emit(instr);
   3110 }
   3111 
   3112 
   3113 void Assembler::bc1t(int16_t offset, uint16_t cc) {
   3114   DCHECK(is_uint3(cc));
   3115   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   3116   emit(instr);
   3117 }
   3118 
   3119 
   3120 int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
   3121                                          intptr_t pc_delta) {
   3122   if (RelocInfo::IsInternalReference(rmode)) {
   3123     int64_t* p = reinterpret_cast<int64_t*>(pc);
   3124     if (*p == kEndOfJumpChain) {
   3125       return 0;  // Number of instructions patched.
   3126     }
   3127     *p += pc_delta;
   3128     return 2;  // Number of instructions patched.
   3129   }
   3130   Instr instr = instr_at(pc);
   3131   DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
   3132   if (IsLui(instr)) {
   3133     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
   3134     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
   3135     Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
   3136     DCHECK(IsOri(instr_ori));
   3137     DCHECK(IsOri(instr_ori2));
   3138     // TODO(plind): symbolic names for the shifts.
   3139     int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
   3140     imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
   3141     imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
   3142     // Sign extend address.
   3143     imm >>= 16;
   3144 
   3145     if (imm == kEndOfJumpChain) {
   3146       return 0;  // Number of instructions patched.
   3147     }
   3148     imm += pc_delta;
   3149     DCHECK((imm & 3) == 0);
   3150 
   3151     instr_lui &= ~kImm16Mask;
   3152     instr_ori &= ~kImm16Mask;
   3153     instr_ori2 &= ~kImm16Mask;
   3154 
   3155     instr_at_put(pc + 0 * Assembler::kInstrSize,
   3156                  instr_lui | ((imm >> 32) & kImm16Mask));
   3157     instr_at_put(pc + 1 * Assembler::kInstrSize,
   3158                  instr_ori | (imm >> 16 & kImm16Mask));
   3159     instr_at_put(pc + 3 * Assembler::kInstrSize,
   3160                  instr_ori2 | (imm & kImm16Mask));
   3161     return 4;  // Number of instructions patched.
   3162   } else if (IsJ(instr) || IsJal(instr)) {
   3163     // Regular j/jal relocation.
   3164     uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
   3165     imm28 += pc_delta;
   3166     imm28 &= kImm28Mask;
   3167     instr &= ~kImm26Mask;
   3168     DCHECK((imm28 & 3) == 0);
   3169     uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
   3170     instr_at_put(pc, instr | (imm26 & kImm26Mask));
   3171     return 1;  // Number of instructions patched.
   3172   } else {
   3173     DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
   3174            ((instr & kJumpRawMask) == kJalRawMark));
   3175     // Unbox raw offset and emit j/jal.
   3176     int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
   3177     // Sign extend 28-bit offset to 32-bit.
   3178     imm28 = (imm28 << 4) >> 4;
   3179     uint64_t target =
   3180         static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
   3181     target &= kImm28Mask;
   3182     DCHECK((imm28 & 3) == 0);
   3183     uint32_t imm26 = static_cast<uint32_t>(target >> 2);
   3184     // Check markings whether to emit j or jal.
   3185     uint32_t unbox = (instr & kJRawMark) ? J : JAL;
   3186     instr_at_put(pc, unbox | (imm26 & kImm26Mask));
   3187     return 1;  // Number of instructions patched.
   3188   }
   3189 }
   3190 
   3191 
   3192 void Assembler::GrowBuffer() {
   3193   if (!own_buffer_) FATAL("external code buffer is too small");
   3194 
   3195   // Compute new buffer size.
   3196   CodeDesc desc;  // The new buffer.
   3197   if (buffer_size_ < 1 * MB) {
   3198     desc.buffer_size = 2*buffer_size_;
   3199   } else {
   3200     desc.buffer_size = buffer_size_ + 1*MB;
   3201   }
   3202   CHECK_GT(desc.buffer_size, 0);  // No overflow.
   3203 
   3204   // Set up new buffer.
   3205   desc.buffer = NewArray<byte>(desc.buffer_size);
   3206   desc.origin = this;
   3207 
   3208   desc.instr_size = pc_offset();
   3209   desc.reloc_size =
   3210       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   3211 
   3212   // Copy the data.
   3213   intptr_t pc_delta = desc.buffer - buffer_;
   3214   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
   3215       (buffer_ + buffer_size_);
   3216   MemMove(desc.buffer, buffer_, desc.instr_size);
   3217   MemMove(reloc_info_writer.pos() + rc_delta,
   3218               reloc_info_writer.pos(), desc.reloc_size);
   3219 
   3220   // Switch buffers.
   3221   DeleteArray(buffer_);
   3222   buffer_ = desc.buffer;
   3223   buffer_size_ = desc.buffer_size;
   3224   pc_ += pc_delta;
   3225   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   3226                                reloc_info_writer.last_pc() + pc_delta);
   3227 
   3228   // Relocate runtime entries.
   3229   for (RelocIterator it(desc); !it.done(); it.next()) {
   3230     RelocInfo::Mode rmode = it.rinfo()->rmode();
   3231     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
   3232       byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
   3233       RelocateInternalReference(rmode, p, pc_delta);
   3234     }
   3235   }
   3236   DCHECK(!overflow());
   3237 }
   3238 
   3239 
   3240 void Assembler::db(uint8_t data) {
   3241   CheckForEmitInForbiddenSlot();
   3242   EmitHelper(data);
   3243 }
   3244 
   3245 
   3246 void Assembler::dd(uint32_t data) {
   3247   CheckForEmitInForbiddenSlot();
   3248   EmitHelper(data);
   3249 }
   3250 
   3251 
   3252 void Assembler::dq(uint64_t data) {
   3253   CheckForEmitInForbiddenSlot();
   3254   EmitHelper(data);
   3255 }
   3256 
   3257 
   3258 void Assembler::dd(Label* label) {
   3259   uint64_t data;
   3260   CheckForEmitInForbiddenSlot();
   3261   if (label->is_bound()) {
   3262     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
   3263   } else {
   3264     data = jump_address(label);
   3265     unbound_labels_count_++;
   3266     internal_reference_positions_.insert(label->pos());
   3267   }
   3268   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   3269   EmitHelper(data);
   3270 }
   3271 
   3272 
   3273 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   3274   // We do not try to reuse pool constants.
   3275   RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
   3276   if (rmode >= RelocInfo::COMMENT &&
   3277       rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
   3278     // Adjust code for new modes.
   3279     DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
   3280            || RelocInfo::IsComment(rmode)
   3281            || RelocInfo::IsPosition(rmode));
   3282     // These modes do not need an entry in the constant pool.
   3283   }
   3284   if (!RelocInfo::IsNone(rinfo.rmode())) {
   3285     // Don't record external references unless the heap will be serialized.
   3286     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
   3287         !serializer_enabled() && !emit_debug_code()) {
   3288       return;
   3289     }
   3290     DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
   3291     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   3292       RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
   3293                                        RecordedAstId().ToInt(), NULL);
   3294       ClearRecordedAstId();
   3295       reloc_info_writer.Write(&reloc_info_with_ast_id);
   3296     } else {
   3297       reloc_info_writer.Write(&rinfo);
   3298     }
   3299   }
   3300 }
   3301 
   3302 
   3303 void Assembler::BlockTrampolinePoolFor(int instructions) {
   3304   CheckTrampolinePoolQuick(instructions);
   3305   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   3306 }
   3307 
   3308 
   3309 void Assembler::CheckTrampolinePool() {
   3310   // Some small sequences of instructions must not be broken up by the
   3311   // insertion of a trampoline pool; such sequences are protected by setting
   3312   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   3313   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   3314   // are blocked by trampoline_pool_blocked_nesting_.
   3315   if ((trampoline_pool_blocked_nesting_ > 0) ||
   3316       (pc_offset() < no_trampoline_pool_before_)) {
   3317     // Emission is currently blocked; make sure we try again as soon as
   3318     // possible.
   3319     if (trampoline_pool_blocked_nesting_ > 0) {
   3320       next_buffer_check_ = pc_offset() + kInstrSize;
   3321     } else {
   3322       next_buffer_check_ = no_trampoline_pool_before_;
   3323     }
   3324     return;
   3325   }
   3326 
   3327   DCHECK(!trampoline_emitted_);
   3328   DCHECK(unbound_labels_count_ >= 0);
   3329   if (unbound_labels_count_ > 0) {
   3330     // First we emit jump (2 instructions), then we emit trampoline pool.
   3331     { BlockTrampolinePoolScope block_trampoline_pool(this);
   3332       Label after_pool;
   3333       if (kArchVariant == kMips64r6) {
   3334         bc(&after_pool);
   3335       } else {
   3336         b(&after_pool);
   3337       }
   3338       nop();
   3339 
   3340       int pool_start = pc_offset();
   3341       for (int i = 0; i < unbound_labels_count_; i++) {
   3342         { BlockGrowBufferScope block_buf_growth(this);
   3343           // Buffer growth (and relocation) must be blocked for internal
   3344           // references until associated instructions are emitted and available
   3345           // to be patched.
   3346           RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   3347           j(&after_pool);
   3348         }
   3349         nop();
   3350       }
   3351       bind(&after_pool);
   3352       trampoline_ = Trampoline(pool_start, unbound_labels_count_);
   3353 
   3354       trampoline_emitted_ = true;
   3355       // As we are only going to emit trampoline once, we need to prevent any
   3356       // further emission.
   3357       next_buffer_check_ = kMaxInt;
   3358     }
   3359   } else {
   3360     // Number of branches to unbound label at this point is zero, so we can
   3361     // move next buffer check to maximum.
   3362     next_buffer_check_ = pc_offset() +
   3363         kMaxBranchOffset - kTrampolineSlotsSize * 16;
   3364   }
   3365   return;
   3366 }
   3367 
   3368 
   3369 Address Assembler::target_address_at(Address pc) {
   3370   Instr instr0 = instr_at(pc);
   3371   Instr instr1 = instr_at(pc + 1 * kInstrSize);
   3372   Instr instr3 = instr_at(pc + 3 * kInstrSize);
   3373 
   3374   // Interpret 4 instructions for address generated by li: See listing in
   3375   // Assembler::set_target_address_at() just below.
   3376   if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
   3377       (GetOpcodeField(instr3) == ORI)) {
   3378     // Assemble the 48 bit value.
   3379      int64_t addr  = static_cast<int64_t>(
   3380           ((uint64_t)(GetImmediate16(instr0)) << 32) |
   3381           ((uint64_t)(GetImmediate16(instr1)) << 16) |
   3382           ((uint64_t)(GetImmediate16(instr3))));
   3383 
   3384     // Sign extend to get canonical address.
   3385     addr = (addr << 16) >> 16;
   3386     return reinterpret_cast<Address>(addr);
   3387   }
   3388   // We should never get here, force a bad address if we do.
   3389   UNREACHABLE();
   3390   return (Address)0x0;
   3391 }
   3392 
   3393 
   3394 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
   3395 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
   3396 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
   3397 // OS::nan_value() returns a qNaN.
   3398 void Assembler::QuietNaN(HeapObject* object) {
   3399   HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
   3400 }
   3401 
   3402 
   3403 // On Mips64, a target address is stored in a 4-instruction sequence:
   3404 //    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
   3405 //    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
   3406 //    2: dsll(rd, rd, 16);
   3407 //    3: ori(rd, rd, j.imm32_ & kImm16Mask);
   3408 //
   3409 // Patching the address must replace all the lui & ori instructions,
   3410 // and flush the i-cache.
   3411 //
   3412 // There is an optimization below, which emits a nop when the address
   3413 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
   3414 // and possibly removed.
   3415 void Assembler::set_target_address_at(Isolate* isolate, Address pc,
   3416                                       Address target,
   3417                                       ICacheFlushMode icache_flush_mode) {
   3418 // There is an optimization where only 4 instructions are used to load address
   3419 // in code on MIP64 because only 48-bits of address is effectively used.
   3420 // It relies on fact the upper [63:48] bits are not used for virtual address
   3421 // translation and they have to be set according to value of bit 47 in order
   3422 // get canonical address.
   3423   Instr instr1 = instr_at(pc + kInstrSize);
   3424   uint32_t rt_code = GetRt(instr1);
   3425   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   3426   uint64_t itarget = reinterpret_cast<uint64_t>(target);
   3427 
   3428 #ifdef DEBUG
   3429   // Check we have the result from a li macro-instruction.
   3430   Instr instr0 = instr_at(pc);
   3431   Instr instr3 = instr_at(pc + kInstrSize * 3);
   3432   CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
   3433          GetOpcodeField(instr3) == ORI));
   3434 #endif
   3435 
   3436   // Must use 4 instructions to insure patchable code.
   3437   // lui rt, upper-16.
   3438   // ori rt, rt, lower-16.
   3439   // dsll rt, rt, 16.
   3440   // ori rt rt, lower-16.
   3441   *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
   3442   *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
   3443       | ((itarget >> 16) & kImm16Mask);
   3444   *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
   3445       | (itarget & kImm16Mask);
   3446 
   3447   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
   3448     Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
   3449   }
   3450 }
   3451 
   3452 }  // namespace internal
   3453 }  // namespace v8
   3454 
   3455 #endif  // V8_TARGET_ARCH_MIPS64
   3456