Home | History | Annotate | Download | only in mips
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/code-generator.h"
      6 #include "src/compilation-info.h"
      7 #include "src/compiler/code-generator-impl.h"
      8 #include "src/compiler/gap-resolver.h"
      9 #include "src/compiler/node-matchers.h"
     10 #include "src/compiler/osr.h"
     11 #include "src/mips/macro-assembler-mips.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 namespace compiler {
     16 
     17 #define __ masm()->
     18 
     19 
     20 // TODO(plind): Possibly avoid using these lithium names.
     21 #define kScratchReg kLithiumScratchReg
     22 #define kCompareReg kLithiumScratchReg2
     23 #define kScratchReg2 kLithiumScratchReg2
     24 #define kScratchDoubleReg kLithiumScratchDouble
     25 
     26 
     27 // TODO(plind): consider renaming these macros.
     28 #define TRACE_MSG(msg)                                                      \
     29   PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
     30          __LINE__)
     31 
     32 #define TRACE_UNIMPL()                                                       \
     33   PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
     34          __LINE__)
     35 
     36 
     37 // Adds Mips-specific methods to convert InstructionOperands.
     38 class MipsOperandConverter final : public InstructionOperandConverter {
     39  public:
     40   MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
     41       : InstructionOperandConverter(gen, instr) {}
     42 
     43   FloatRegister OutputSingleRegister(size_t index = 0) {
     44     return ToSingleRegister(instr_->OutputAt(index));
     45   }
     46 
     47   FloatRegister InputSingleRegister(size_t index) {
     48     return ToSingleRegister(instr_->InputAt(index));
     49   }
     50 
     51   FloatRegister ToSingleRegister(InstructionOperand* op) {
     52     // Single (Float) and Double register namespace is same on MIPS,
     53     // both are typedefs of FPURegister.
     54     return ToDoubleRegister(op);
     55   }
     56 
     57   Register InputOrZeroRegister(size_t index) {
     58     if (instr_->InputAt(index)->IsImmediate()) {
     59       DCHECK((InputInt32(index) == 0));
     60       return zero_reg;
     61     }
     62     return InputRegister(index);
     63   }
     64 
     65   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
     66     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
     67 
     68     return InputDoubleRegister(index);
     69   }
     70 
     71   DoubleRegister InputOrZeroSingleRegister(size_t index) {
     72     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
     73 
     74     return InputSingleRegister(index);
     75   }
     76 
     77   Operand InputImmediate(size_t index) {
     78     Constant constant = ToConstant(instr_->InputAt(index));
     79     switch (constant.type()) {
     80       case Constant::kInt32:
     81         return Operand(constant.ToInt32());
     82       case Constant::kFloat32:
     83         return Operand(
     84             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
     85       case Constant::kFloat64:
     86         return Operand(
     87             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
     88       case Constant::kInt64:
     89       case Constant::kExternalReference:
     90       case Constant::kHeapObject:
     91         // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
     92         //    maybe not done on arm due to const pool ??
     93         break;
     94       case Constant::kRpoNumber:
     95         UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
     96         break;
     97     }
     98     UNREACHABLE();
     99     return Operand(zero_reg);
    100   }
    101 
    102   Operand InputOperand(size_t index) {
    103     InstructionOperand* op = instr_->InputAt(index);
    104     if (op->IsRegister()) {
    105       return Operand(ToRegister(op));
    106     }
    107     return InputImmediate(index);
    108   }
    109 
    110   MemOperand MemoryOperand(size_t* first_index) {
    111     const size_t index = *first_index;
    112     switch (AddressingModeField::decode(instr_->opcode())) {
    113       case kMode_None:
    114         break;
    115       case kMode_MRI:
    116         *first_index += 2;
    117         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
    118       case kMode_MRR:
    119         // TODO(plind): r6 address mode, to be implemented ...
    120         UNREACHABLE();
    121     }
    122     UNREACHABLE();
    123     return MemOperand(no_reg);
    124   }
    125 
    126   MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
    127 
    128   MemOperand ToMemOperand(InstructionOperand* op) const {
    129     DCHECK_NOT_NULL(op);
    130     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
    131     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
    132   }
    133 
    134   MemOperand SlotToMemOperand(int slot) const {
    135     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
    136     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
    137   }
    138 };
    139 
    140 
    141 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
    142   return instr->InputAt(index)->IsRegister();
    143 }
    144 
    145 
    146 namespace {
    147 
    148 class OutOfLineLoadSingle final : public OutOfLineCode {
    149  public:
    150   OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
    151       : OutOfLineCode(gen), result_(result) {}
    152 
    153   void Generate() final {
    154     __ Move(result_, std::numeric_limits<float>::quiet_NaN());
    155   }
    156 
    157  private:
    158   FloatRegister const result_;
    159 };
    160 
    161 
    162 class OutOfLineLoadDouble final : public OutOfLineCode {
    163  public:
    164   OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
    165       : OutOfLineCode(gen), result_(result) {}
    166 
    167   void Generate() final {
    168     __ Move(result_, std::numeric_limits<double>::quiet_NaN());
    169   }
    170 
    171  private:
    172   DoubleRegister const result_;
    173 };
    174 
    175 
    176 class OutOfLineLoadInteger final : public OutOfLineCode {
    177  public:
    178   OutOfLineLoadInteger(CodeGenerator* gen, Register result)
    179       : OutOfLineCode(gen), result_(result) {}
    180 
    181   void Generate() final { __ mov(result_, zero_reg); }
    182 
    183  private:
    184   Register const result_;
    185 };
    186 
    187 
    188 class OutOfLineRound : public OutOfLineCode {
    189  public:
    190   OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
    191       : OutOfLineCode(gen), result_(result) {}
    192 
    193   void Generate() final {
    194     // Handle rounding to zero case where sign has to be preserved.
    195     // High bits of double input already in kScratchReg.
    196     __ srl(at, kScratchReg, 31);
    197     __ sll(at, at, 31);
    198     __ Mthc1(at, result_);
    199   }
    200 
    201  private:
    202   DoubleRegister const result_;
    203 };
    204 
    205 
    206 class OutOfLineRound32 : public OutOfLineCode {
    207  public:
    208   OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
    209       : OutOfLineCode(gen), result_(result) {}
    210 
    211   void Generate() final {
    212     // Handle rounding to zero case where sign has to be preserved.
    213     // High bits of float input already in kScratchReg.
    214     __ srl(at, kScratchReg, 31);
    215     __ sll(at, at, 31);
    216     __ mtc1(at, result_);
    217   }
    218 
    219  private:
    220   DoubleRegister const result_;
    221 };
    222 
    223 
    224 class OutOfLineRecordWrite final : public OutOfLineCode {
    225  public:
    226   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
    227                        Register value, Register scratch0, Register scratch1,
    228                        RecordWriteMode mode)
    229       : OutOfLineCode(gen),
    230         object_(object),
    231         index_(index),
    232         value_(value),
    233         scratch0_(scratch0),
    234         scratch1_(scratch1),
    235         mode_(mode),
    236         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    237 
    238   void Generate() final {
    239     if (mode_ > RecordWriteMode::kValueIsPointer) {
    240       __ JumpIfSmi(value_, exit());
    241     }
    242     __ CheckPageFlag(value_, scratch0_,
    243                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
    244                      exit());
    245     RememberedSetAction const remembered_set_action =
    246         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
    247                                              : OMIT_REMEMBERED_SET;
    248     SaveFPRegsMode const save_fp_mode =
    249         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
    250     if (must_save_lr_) {
    251       // We need to save and restore ra if the frame was elided.
    252       __ Push(ra);
    253     }
    254     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
    255                          remembered_set_action, save_fp_mode);
    256     __ Addu(scratch1_, object_, index_);
    257     __ CallStub(&stub);
    258     if (must_save_lr_) {
    259       __ Pop(ra);
    260     }
    261   }
    262 
    263  private:
    264   Register const object_;
    265   Register const index_;
    266   Register const value_;
    267   Register const scratch0_;
    268   Register const scratch1_;
    269   RecordWriteMode const mode_;
    270   bool must_save_lr_;
    271 };
    272 
    273 #define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)                 \
    274   class ool_name final : public OutOfLineCode {                      \
    275    public:                                                           \
    276     ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
    277         : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
    278                                                                      \
    279     void Generate() final { __ masm_ool_name(dst_, src1_, src2_); }  \
    280                                                                      \
    281    private:                                                          \
    282     T const dst_;                                                    \
    283     T const src1_;                                                   \
    284     T const src2_;                                                   \
    285   }
    286 
    287 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
    288 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
    289 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
    290 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
    291 
    292 #undef CREATE_OOL_CLASS
    293 
    294 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
    295   switch (condition) {
    296     case kEqual:
    297       return eq;
    298     case kNotEqual:
    299       return ne;
    300     case kSignedLessThan:
    301       return lt;
    302     case kSignedGreaterThanOrEqual:
    303       return ge;
    304     case kSignedLessThanOrEqual:
    305       return le;
    306     case kSignedGreaterThan:
    307       return gt;
    308     case kUnsignedLessThan:
    309       return lo;
    310     case kUnsignedGreaterThanOrEqual:
    311       return hs;
    312     case kUnsignedLessThanOrEqual:
    313       return ls;
    314     case kUnsignedGreaterThan:
    315       return hi;
    316     case kUnorderedEqual:
    317     case kUnorderedNotEqual:
    318       break;
    319     default:
    320       break;
    321   }
    322   UNREACHABLE();
    323   return kNoCondition;
    324 }
    325 
    326 
    327 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
    328   switch (condition) {
    329     case kNotEqual:
    330       return ne;
    331     case kEqual:
    332       return eq;
    333     default:
    334       break;
    335   }
    336   UNREACHABLE();
    337   return kNoCondition;
    338 }
    339 
    340 
    341 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
    342                                              FlagsCondition condition) {
    343   switch (condition) {
    344     case kEqual:
    345       predicate = true;
    346       return EQ;
    347     case kNotEqual:
    348       predicate = false;
    349       return EQ;
    350     case kUnsignedLessThan:
    351       predicate = true;
    352       return OLT;
    353     case kUnsignedGreaterThanOrEqual:
    354       predicate = false;
    355       return ULT;
    356     case kUnsignedLessThanOrEqual:
    357       predicate = true;
    358       return OLE;
    359     case kUnsignedGreaterThan:
    360       predicate = false;
    361       return ULE;
    362     case kUnorderedEqual:
    363     case kUnorderedNotEqual:
    364       predicate = true;
    365       break;
    366     default:
    367       predicate = true;
    368       break;
    369   }
    370   UNREACHABLE();
    371   return kNoFPUCondition;
    372 }
    373 
    374 }  // namespace
    375 
    376 
    377 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
    378   do {                                                                        \
    379     auto result = i.Output##width##Register();                                \
    380     auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
    381     if (instr->InputAt(0)->IsRegister()) {                                    \
    382       auto offset = i.InputRegister(0);                                       \
    383       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
    384       __ addu(kScratchReg, i.InputRegister(2), offset);                       \
    385       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
    386     } else {                                                                  \
    387       auto offset = i.InputOperand(0).immediate();                            \
    388       __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
    389       __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
    390     }                                                                         \
    391     __ bind(ool->exit());                                                     \
    392   } while (0)
    393 
    394 
    395 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
    396   do {                                                                        \
    397     auto result = i.OutputRegister();                                         \
    398     auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
    399     if (instr->InputAt(0)->IsRegister()) {                                    \
    400       auto offset = i.InputRegister(0);                                       \
    401       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
    402       __ addu(kScratchReg, i.InputRegister(2), offset);                       \
    403       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
    404     } else {                                                                  \
    405       auto offset = i.InputOperand(0).immediate();                            \
    406       __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
    407       __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
    408     }                                                                         \
    409     __ bind(ool->exit());                                                     \
    410   } while (0)
    411 
    412 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
    413   do {                                                                 \
    414     Label done;                                                        \
    415     if (instr->InputAt(0)->IsRegister()) {                             \
    416       auto offset = i.InputRegister(0);                                \
    417       auto value = i.InputOrZero##width##Register(2);                  \
    418       if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
    419         __ Move(kDoubleRegZero, 0.0);                                  \
    420       }                                                                \
    421       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
    422       __ addu(kScratchReg, i.InputRegister(3), offset);                \
    423       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
    424     } else {                                                           \
    425       auto offset = i.InputOperand(0).immediate();                     \
    426       auto value = i.InputOrZero##width##Register(2);                  \
    427       if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
    428         __ Move(kDoubleRegZero, 0.0);                                  \
    429       }                                                                \
    430       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
    431       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
    432     }                                                                  \
    433     __ bind(&done);                                                    \
    434   } while (0)
    435 
    436 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
    437   do {                                                                 \
    438     Label done;                                                        \
    439     if (instr->InputAt(0)->IsRegister()) {                             \
    440       auto offset = i.InputRegister(0);                                \
    441       auto value = i.InputOrZeroRegister(2);                           \
    442       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
    443       __ addu(kScratchReg, i.InputRegister(3), offset);                \
    444       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
    445     } else {                                                           \
    446       auto offset = i.InputOperand(0).immediate();                     \
    447       auto value = i.InputOrZeroRegister(2);                           \
    448       __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
    449       __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
    450     }                                                                  \
    451     __ bind(&done);                                                    \
    452   } while (0)
    453 
    454 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
    455   if (IsMipsArchVariant(kMips32r6)) {                                          \
    456     __ cfc1(kScratchReg, FCSR);                                                \
    457     __ li(at, Operand(mode_##mode));                                           \
    458     __ ctc1(at, FCSR);                                                         \
    459     __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));             \
    460     __ ctc1(kScratchReg, FCSR);                                                \
    461   } else {                                                                     \
    462     auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister());    \
    463     Label done;                                                                \
    464     __ Mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
    465     __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
    466            HeapNumber::kExponentBits);                                         \
    467     __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
    468               Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
    469     __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
    470     __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));         \
    471     __ Move(at, kScratchReg2, i.OutputDoubleRegister());                       \
    472     __ or_(at, at, kScratchReg2);                                              \
    473     __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
    474     __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
    475     __ bind(ool->exit());                                                      \
    476     __ bind(&done);                                                            \
    477   }
    478 
    479 
    480 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode)                                   \
    481   if (IsMipsArchVariant(kMips32r6)) {                                         \
    482     __ cfc1(kScratchReg, FCSR);                                               \
    483     __ li(at, Operand(mode_##mode));                                          \
    484     __ ctc1(at, FCSR);                                                        \
    485     __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));            \
    486     __ ctc1(kScratchReg, FCSR);                                               \
    487   } else {                                                                    \
    488     int32_t kFloat32ExponentBias = 127;                                       \
    489     int32_t kFloat32MantissaBits = 23;                                        \
    490     int32_t kFloat32ExponentBits = 8;                                         \
    491     auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
    492     Label done;                                                               \
    493     __ mfc1(kScratchReg, i.InputDoubleRegister(0));                           \
    494     __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits);      \
    495     __ Branch(USE_DELAY_SLOT, &done, hs, at,                                  \
    496               Operand(kFloat32ExponentBias + kFloat32MantissaBits));          \
    497     __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));             \
    498     __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));        \
    499     __ mfc1(at, i.OutputDoubleRegister());                                    \
    500     __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));       \
    501     __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister());           \
    502     __ bind(ool->exit());                                                     \
    503     __ bind(&done);                                                           \
    504   }
    505 
    506 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)          \
    507   do {                                                   \
    508     __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
    509     __ sync();                                           \
    510   } while (0)
    511 
    512 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)               \
    513   do {                                                         \
    514     __ sync();                                                 \
    515     __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
    516     __ sync();                                                 \
    517   } while (0)
    518 
    519 #define ASSEMBLE_IEEE754_BINOP(name)                                          \
    520   do {                                                                        \
    521     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    522     __ PrepareCallCFunction(0, 2, kScratchReg);                               \
    523     __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
    524                             i.InputDoubleRegister(1));                        \
    525     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
    526                      0, 2);                                                   \
    527     /* Move the result in the double result register. */                      \
    528     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
    529   } while (0)
    530 
    531 #define ASSEMBLE_IEEE754_UNOP(name)                                           \
    532   do {                                                                        \
    533     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    534     __ PrepareCallCFunction(0, 1, kScratchReg);                               \
    535     __ MovToFloatParameter(i.InputDoubleRegister(0));                         \
    536     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
    537                      0, 1);                                                   \
    538     /* Move the result in the double result register. */                      \
    539     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
    540   } while (0)
    541 
    542 void CodeGenerator::AssembleDeconstructFrame() {
    543   __ mov(sp, fp);
    544   __ Pop(ra, fp);
    545 }
    546 
    547 void CodeGenerator::AssemblePrepareTailCall() {
    548   if (frame_access_state()->has_frame()) {
    549     __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
    550     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
    551   }
    552   frame_access_state()->SetFrameAccessToSP();
    553 }
    554 
    555 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
    556                                                      Register scratch1,
    557                                                      Register scratch2,
    558                                                      Register scratch3) {
    559   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
    560   Label done;
    561 
    562   // Check if current frame is an arguments adaptor frame.
    563   __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
    564   __ Branch(&done, ne, scratch1,
    565             Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
    566 
    567   // Load arguments count from current arguments adaptor frame (note, it
    568   // does not include receiver).
    569   Register caller_args_count_reg = scratch1;
    570   __ lw(caller_args_count_reg,
    571         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
    572   __ SmiUntag(caller_args_count_reg);
    573 
    574   ParameterCount callee_args_count(args_reg);
    575   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
    576                         scratch3);
    577   __ bind(&done);
    578 }
    579 
    580 namespace {
    581 
    582 void AdjustStackPointerForTailCall(MacroAssembler* masm,
    583                                    FrameAccessState* state,
    584                                    int new_slot_above_sp,
    585                                    bool allow_shrinkage = true) {
    586   int current_sp_offset = state->GetSPToFPSlotCount() +
    587                           StandardFrameConstants::kFixedSlotCountAboveFp;
    588   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
    589   if (stack_slot_delta > 0) {
    590     masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
    591     state->IncreaseSPDelta(stack_slot_delta);
    592   } else if (allow_shrinkage && stack_slot_delta < 0) {
    593     masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
    594     state->IncreaseSPDelta(stack_slot_delta);
    595   }
    596 }
    597 
    598 }  // namespace
    599 
    600 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
    601                                               int first_unused_stack_slot) {
    602   AdjustStackPointerForTailCall(masm(), frame_access_state(),
    603                                 first_unused_stack_slot, false);
    604 }
    605 
    606 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
    607                                              int first_unused_stack_slot) {
    608   AdjustStackPointerForTailCall(masm(), frame_access_state(),
    609                                 first_unused_stack_slot);
    610 }
    611 
    612 // Assembles an instruction after register allocation, producing machine code.
    613 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
    614     Instruction* instr) {
    615   MipsOperandConverter i(this, instr);
    616   InstructionCode opcode = instr->opcode();
    617   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
    618   switch (arch_opcode) {
    619     case kArchCallCodeObject: {
    620       EnsureSpaceForLazyDeopt();
    621       if (instr->InputAt(0)->IsImmediate()) {
    622         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
    623                 RelocInfo::CODE_TARGET);
    624       } else {
    625         __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
    626         __ Call(at);
    627       }
    628       RecordCallPosition(instr);
    629       frame_access_state()->ClearSPDelta();
    630       break;
    631     }
    632     case kArchTailCallCodeObjectFromJSFunction:
    633     case kArchTailCallCodeObject: {
    634       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
    635         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    636                                          i.TempRegister(0), i.TempRegister(1),
    637                                          i.TempRegister(2));
    638       }
    639       if (instr->InputAt(0)->IsImmediate()) {
    640         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
    641                 RelocInfo::CODE_TARGET);
    642       } else {
    643         __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
    644         __ Jump(at);
    645       }
    646       frame_access_state()->ClearSPDelta();
    647       frame_access_state()->SetFrameAccessToDefault();
    648       break;
    649     }
    650     case kArchTailCallAddress: {
    651       CHECK(!instr->InputAt(0)->IsImmediate());
    652       __ Jump(i.InputRegister(0));
    653       frame_access_state()->ClearSPDelta();
    654       frame_access_state()->SetFrameAccessToDefault();
    655       break;
    656     }
    657     case kArchCallJSFunction: {
    658       EnsureSpaceForLazyDeopt();
    659       Register func = i.InputRegister(0);
    660       if (FLAG_debug_code) {
    661         // Check the function's context matches the context argument.
    662         __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
    663         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
    664       }
    665 
    666       __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    667       __ Call(at);
    668       RecordCallPosition(instr);
    669       frame_access_state()->ClearSPDelta();
    670       frame_access_state()->SetFrameAccessToDefault();
    671       break;
    672     }
    673     case kArchTailCallJSFunctionFromJSFunction: {
    674       Register func = i.InputRegister(0);
    675       if (FLAG_debug_code) {
    676         // Check the function's context matches the context argument.
    677         __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
    678         __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
    679       }
    680       AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    681                                        i.TempRegister(0), i.TempRegister(1),
    682                                        i.TempRegister(2));
    683       __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    684       __ Jump(at);
    685       frame_access_state()->ClearSPDelta();
    686       break;
    687     }
    688     case kArchPrepareCallCFunction: {
    689       int const num_parameters = MiscField::decode(instr->opcode());
    690       __ PrepareCallCFunction(num_parameters, kScratchReg);
    691       // Frame alignment requires using FP-relative frame addressing.
    692       frame_access_state()->SetFrameAccessToFP();
    693       break;
    694     }
    695     case kArchPrepareTailCall:
    696       AssemblePrepareTailCall();
    697       break;
    698     case kArchCallCFunction: {
    699       int const num_parameters = MiscField::decode(instr->opcode());
    700       if (instr->InputAt(0)->IsImmediate()) {
    701         ExternalReference ref = i.InputExternalReference(0);
    702         __ CallCFunction(ref, num_parameters);
    703       } else {
    704         Register func = i.InputRegister(0);
    705         __ CallCFunction(func, num_parameters);
    706       }
    707       frame_access_state()->SetFrameAccessToDefault();
    708       frame_access_state()->ClearSPDelta();
    709       break;
    710     }
    711     case kArchJmp:
    712       AssembleArchJump(i.InputRpo(0));
    713       break;
    714     case kArchLookupSwitch:
    715       AssembleArchLookupSwitch(instr);
    716       break;
    717     case kArchTableSwitch:
    718       AssembleArchTableSwitch(instr);
    719       break;
    720     case kArchDebugBreak:
    721       __ stop("kArchDebugBreak");
    722       break;
    723     case kArchComment: {
    724       Address comment_string = i.InputExternalReference(0).address();
    725       __ RecordComment(reinterpret_cast<const char*>(comment_string));
    726       break;
    727     }
    728     case kArchNop:
    729     case kArchThrowTerminator:
    730       // don't emit code for nops.
    731       break;
    732     case kArchDeoptimize: {
    733       int deopt_state_id =
    734           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
    735       CodeGenResult result =
    736           AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
    737       if (result != kSuccess) return result;
    738       break;
    739     }
    740     case kArchRet:
    741       AssembleReturn(instr->InputAt(0));
    742       break;
    743     case kArchStackPointer:
    744       __ mov(i.OutputRegister(), sp);
    745       break;
    746     case kArchFramePointer:
    747       __ mov(i.OutputRegister(), fp);
    748       break;
    749     case kArchParentFramePointer:
    750       if (frame_access_state()->has_frame()) {
    751         __ lw(i.OutputRegister(), MemOperand(fp, 0));
    752       } else {
    753         __ mov(i.OutputRegister(), fp);
    754       }
    755       break;
    756     case kArchTruncateDoubleToI:
    757       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
    758       break;
    759     case kArchStoreWithWriteBarrier: {
    760       RecordWriteMode mode =
    761           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
    762       Register object = i.InputRegister(0);
    763       Register index = i.InputRegister(1);
    764       Register value = i.InputRegister(2);
    765       Register scratch0 = i.TempRegister(0);
    766       Register scratch1 = i.TempRegister(1);
    767       auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
    768                                                    scratch0, scratch1, mode);
    769       __ Addu(at, object, index);
    770       __ sw(value, MemOperand(at));
    771       __ CheckPageFlag(object, scratch0,
    772                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
    773                        ool->entry());
    774       __ bind(ool->exit());
    775       break;
    776     }
    777     case kArchStackSlot: {
    778       FrameOffset offset =
    779           frame_access_state()->GetFrameOffset(i.InputInt32(0));
    780       __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
    781               Operand(offset.offset()));
    782       break;
    783     }
    784     case kIeee754Float64Acos:
    785       ASSEMBLE_IEEE754_UNOP(acos);
    786       break;
    787     case kIeee754Float64Acosh:
    788       ASSEMBLE_IEEE754_UNOP(acosh);
    789       break;
    790     case kIeee754Float64Asin:
    791       ASSEMBLE_IEEE754_UNOP(asin);
    792       break;
    793     case kIeee754Float64Asinh:
    794       ASSEMBLE_IEEE754_UNOP(asinh);
    795       break;
    796     case kIeee754Float64Atan:
    797       ASSEMBLE_IEEE754_UNOP(atan);
    798       break;
    799     case kIeee754Float64Atanh:
    800       ASSEMBLE_IEEE754_UNOP(atanh);
    801       break;
    802     case kIeee754Float64Atan2:
    803       ASSEMBLE_IEEE754_BINOP(atan2);
    804       break;
    805     case kIeee754Float64Cos:
    806       ASSEMBLE_IEEE754_UNOP(cos);
    807       break;
    808     case kIeee754Float64Cosh:
    809       ASSEMBLE_IEEE754_UNOP(cosh);
    810       break;
    811     case kIeee754Float64Cbrt:
    812       ASSEMBLE_IEEE754_UNOP(cbrt);
    813       break;
    814     case kIeee754Float64Exp:
    815       ASSEMBLE_IEEE754_UNOP(exp);
    816       break;
    817     case kIeee754Float64Expm1:
    818       ASSEMBLE_IEEE754_UNOP(expm1);
    819       break;
    820     case kIeee754Float64Log:
    821       ASSEMBLE_IEEE754_UNOP(log);
    822       break;
    823     case kIeee754Float64Log1p:
    824       ASSEMBLE_IEEE754_UNOP(log1p);
    825       break;
    826     case kIeee754Float64Log10:
    827       ASSEMBLE_IEEE754_UNOP(log10);
    828       break;
    829     case kIeee754Float64Log2:
    830       ASSEMBLE_IEEE754_UNOP(log2);
    831       break;
    832     case kIeee754Float64Pow: {
    833       MathPowStub stub(isolate(), MathPowStub::DOUBLE);
    834       __ CallStub(&stub);
    835       break;
    836     }
    837     case kIeee754Float64Sin:
    838       ASSEMBLE_IEEE754_UNOP(sin);
    839       break;
    840     case kIeee754Float64Sinh:
    841       ASSEMBLE_IEEE754_UNOP(sinh);
    842       break;
    843     case kIeee754Float64Tan:
    844       ASSEMBLE_IEEE754_UNOP(tan);
    845       break;
    846     case kIeee754Float64Tanh:
    847       ASSEMBLE_IEEE754_UNOP(tanh);
    848       break;
    849     case kMipsAdd:
    850       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    851       break;
    852     case kMipsAddOvf:
    853       // Pseudo-instruction used for overflow/branch. No opcode emitted here.
    854       break;
    855     case kMipsSub:
    856       __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    857       break;
    858     case kMipsSubOvf:
    859       // Pseudo-instruction used for overflow/branch. No opcode emitted here.
    860       break;
    861     case kMipsMul:
    862       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    863       break;
    864     case kMipsMulOvf:
    865       // Pseudo-instruction used for overflow/branch. No opcode emitted here.
    866       break;
    867     case kMipsMulHigh:
    868       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    869       break;
    870     case kMipsMulHighU:
    871       __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    872       break;
    873     case kMipsDiv:
    874       __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    875       if (IsMipsArchVariant(kMips32r6)) {
    876         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    877       } else {
    878         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
    879       }
    880       break;
    881     case kMipsDivU:
    882       __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    883       if (IsMipsArchVariant(kMips32r6)) {
    884         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    885       } else {
    886         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
    887       }
    888       break;
    889     case kMipsMod:
    890       __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    891       break;
    892     case kMipsModU:
    893       __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    894       break;
    895     case kMipsAnd:
    896       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    897       break;
    898     case kMipsOr:
    899       __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    900       break;
    901     case kMipsNor:
    902       if (instr->InputAt(1)->IsRegister()) {
    903         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    904       } else {
    905         DCHECK(i.InputOperand(1).immediate() == 0);
    906         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
    907       }
    908       break;
    909     case kMipsXor:
    910       __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
    911       break;
    912     case kMipsClz:
    913       __ Clz(i.OutputRegister(), i.InputRegister(0));
    914       break;
    915     case kMipsCtz: {
    916       Register reg1 = kScratchReg;
    917       Register reg2 = kScratchReg2;
    918       Label skip_for_zero;
    919       Label end;
    920       // Branch if the operand is zero
    921       __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
    922       // Find the number of bits before the last bit set to 1.
    923       __ Subu(reg2, zero_reg, i.InputRegister(0));
    924       __ And(reg2, reg2, i.InputRegister(0));
    925       __ clz(reg2, reg2);
    926       // Get the number of bits after the last bit set to 1.
    927       __ li(reg1, 0x1F);
    928       __ Subu(i.OutputRegister(), reg1, reg2);
    929       __ Branch(&end);
    930       __ bind(&skip_for_zero);
    931       // If the operand is zero, return word length as the result.
    932       __ li(i.OutputRegister(), 0x20);
    933       __ bind(&end);
    934     } break;
    935     case kMipsPopcnt: {
    936       Register reg1 = kScratchReg;
    937       Register reg2 = kScratchReg2;
    938       uint32_t m1 = 0x55555555;
    939       uint32_t m2 = 0x33333333;
    940       uint32_t m4 = 0x0f0f0f0f;
    941       uint32_t m8 = 0x00ff00ff;
    942       uint32_t m16 = 0x0000ffff;
    943 
    944       // Put count of ones in every 2 bits into those 2 bits.
    945       __ li(at, m1);
    946       __ srl(reg1, i.InputRegister(0), 1);
    947       __ And(reg2, i.InputRegister(0), at);
    948       __ And(reg1, reg1, at);
    949       __ addu(reg1, reg1, reg2);
    950 
    951       // Put count of ones in every 4 bits into those 4 bits.
    952       __ li(at, m2);
    953       __ srl(reg2, reg1, 2);
    954       __ And(reg2, reg2, at);
    955       __ And(reg1, reg1, at);
    956       __ addu(reg1, reg1, reg2);
    957 
    958       // Put count of ones in every 8 bits into those 8 bits.
    959       __ li(at, m4);
    960       __ srl(reg2, reg1, 4);
    961       __ And(reg2, reg2, at);
    962       __ And(reg1, reg1, at);
    963       __ addu(reg1, reg1, reg2);
    964 
    965       // Put count of ones in every 16 bits into those 16 bits.
    966       __ li(at, m8);
    967       __ srl(reg2, reg1, 8);
    968       __ And(reg2, reg2, at);
    969       __ And(reg1, reg1, at);
    970       __ addu(reg1, reg1, reg2);
    971 
    972       // Calculate total number of ones.
    973       __ li(at, m16);
    974       __ srl(reg2, reg1, 16);
    975       __ And(reg2, reg2, at);
    976       __ And(reg1, reg1, at);
    977       __ addu(i.OutputRegister(), reg1, reg2);
    978     } break;
    979     case kMipsShl:
    980       if (instr->InputAt(1)->IsRegister()) {
    981         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    982       } else {
    983         int32_t imm = i.InputOperand(1).immediate();
    984         __ sll(i.OutputRegister(), i.InputRegister(0), imm);
    985       }
    986       break;
    987     case kMipsShr:
    988       if (instr->InputAt(1)->IsRegister()) {
    989         __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    990       } else {
    991         int32_t imm = i.InputOperand(1).immediate();
    992         __ srl(i.OutputRegister(), i.InputRegister(0), imm);
    993       }
    994       break;
    995     case kMipsSar:
    996       if (instr->InputAt(1)->IsRegister()) {
    997         __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    998       } else {
    999         int32_t imm = i.InputOperand(1).immediate();
   1000         __ sra(i.OutputRegister(), i.InputRegister(0), imm);
   1001       }
   1002       break;
   1003     case kMipsShlPair: {
   1004       Register second_output =
   1005           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1006       if (instr->InputAt(2)->IsRegister()) {
   1007         __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1008                    i.InputRegister(1), i.InputRegister(2));
   1009       } else {
   1010         uint32_t imm = i.InputOperand(2).immediate();
   1011         __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1012                    i.InputRegister(1), imm);
   1013       }
   1014     } break;
   1015     case kMipsShrPair: {
   1016       Register second_output =
   1017           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1018       if (instr->InputAt(2)->IsRegister()) {
   1019         __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1020                    i.InputRegister(1), i.InputRegister(2));
   1021       } else {
   1022         uint32_t imm = i.InputOperand(2).immediate();
   1023         __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1024                    i.InputRegister(1), imm);
   1025       }
   1026     } break;
   1027     case kMipsSarPair: {
   1028       Register second_output =
   1029           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1030       if (instr->InputAt(2)->IsRegister()) {
   1031         __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1032                    i.InputRegister(1), i.InputRegister(2));
   1033       } else {
   1034         uint32_t imm = i.InputOperand(2).immediate();
   1035         __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1036                    i.InputRegister(1), imm);
   1037       }
   1038     } break;
   1039     case kMipsExt:
   1040       __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
   1041              i.InputInt8(2));
   1042       break;
   1043     case kMipsIns:
   1044       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
   1045         __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
   1046       } else {
   1047         __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
   1048                i.InputInt8(2));
   1049       }
   1050       break;
   1051     case kMipsRor:
   1052       __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
   1053       break;
   1054     case kMipsTst:
   1055       // Pseudo-instruction used for tst/branch. No opcode emitted here.
   1056       break;
   1057     case kMipsCmp:
   1058       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
   1059       break;
   1060     case kMipsMov:
   1061       // TODO(plind): Should we combine mov/li like this, or use separate instr?
   1062       //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
   1063       if (HasRegisterInput(instr, 0)) {
   1064         __ mov(i.OutputRegister(), i.InputRegister(0));
   1065       } else {
   1066         __ li(i.OutputRegister(), i.InputOperand(0));
   1067       }
   1068       break;
   1069     case kMipsLsa:
   1070       DCHECK(instr->InputAt(2)->IsImmediate());
   1071       __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1072              i.InputInt8(2));
   1073       break;
   1074     case kMipsCmpS:
   1075       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
   1076       break;
   1077     case kMipsAddS:
   1078       // TODO(plind): add special case: combine mult & add.
   1079       __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1080                i.InputDoubleRegister(1));
   1081       break;
   1082     case kMipsSubS:
   1083       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1084                i.InputDoubleRegister(1));
   1085       break;
   1086     case kMipsMulS:
   1087       // TODO(plind): add special case: right op is -1.0, see arm port.
   1088       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1089                i.InputDoubleRegister(1));
   1090       break;
   1091     case kMipsDivS:
   1092       __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1093                i.InputDoubleRegister(1));
   1094       break;
   1095     case kMipsModS: {
   1096       // TODO(bmeurer): We should really get rid of this special instruction,
   1097       // and generate a CallAddress instruction instead.
   1098       FrameScope scope(masm(), StackFrame::MANUAL);
   1099       __ PrepareCallCFunction(0, 2, kScratchReg);
   1100       __ MovToFloatParameters(i.InputDoubleRegister(0),
   1101                               i.InputDoubleRegister(1));
   1102       // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
   1103       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   1104                        0, 2);
   1105       // Move the result in the double result register.
   1106       __ MovFromFloatResult(i.OutputSingleRegister());
   1107       break;
   1108     }
   1109     case kMipsAbsS:
   1110       __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
   1111       break;
   1112     case kMipsSqrtS: {
   1113       __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1114       break;
   1115     }
   1116     case kMipsMaxS:
   1117       __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1118                i.InputDoubleRegister(1));
   1119       break;
   1120     case kMipsMinS:
   1121       __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1122                i.InputDoubleRegister(1));
   1123       break;
   1124     case kMipsCmpD:
   1125       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
   1126       break;
   1127     case kMipsAddPair:
   1128       __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
   1129                  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
   1130       break;
   1131     case kMipsSubPair:
   1132       __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
   1133                  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
   1134       break;
   1135     case kMipsMulPair: {
   1136       __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
   1137               i.InputRegister(2));
   1138       __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
   1139       __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
   1140       __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
   1141       __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
   1142     } break;
   1143     case kMipsAddD:
   1144       // TODO(plind): add special case: combine mult & add.
   1145       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1146                i.InputDoubleRegister(1));
   1147       break;
   1148     case kMipsSubD:
   1149       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1150                i.InputDoubleRegister(1));
   1151       break;
   1152     case kMipsMaddS:
   1153       __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
   1154                 i.InputFloatRegister(1), i.InputFloatRegister(2),
   1155                 kScratchDoubleReg);
   1156       break;
   1157     case kMipsMaddD:
   1158       __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1159                 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
   1160                 kScratchDoubleReg);
   1161       break;
   1162     case kMipsMsubS:
   1163       __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
   1164                 i.InputFloatRegister(1), i.InputFloatRegister(2),
   1165                 kScratchDoubleReg);
   1166       break;
   1167     case kMipsMsubD:
   1168       __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1169                 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
   1170                 kScratchDoubleReg);
   1171       break;
   1172     case kMipsMulD:
   1173       // TODO(plind): add special case: right op is -1.0, see arm port.
   1174       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1175                i.InputDoubleRegister(1));
   1176       break;
   1177     case kMipsDivD:
   1178       __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1179                i.InputDoubleRegister(1));
   1180       break;
   1181     case kMipsModD: {
   1182       // TODO(bmeurer): We should really get rid of this special instruction,
   1183       // and generate a CallAddress instruction instead.
   1184       FrameScope scope(masm(), StackFrame::MANUAL);
   1185       __ PrepareCallCFunction(0, 2, kScratchReg);
   1186       __ MovToFloatParameters(i.InputDoubleRegister(0),
   1187                               i.InputDoubleRegister(1));
   1188       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   1189                        0, 2);
   1190       // Move the result in the double result register.
   1191       __ MovFromFloatResult(i.OutputDoubleRegister());
   1192       break;
   1193     }
   1194     case kMipsAbsD:
   1195       __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1196       break;
   1197     case kMipsNegS:
   1198       __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
   1199       break;
   1200     case kMipsNegD:
   1201       __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1202       break;
   1203     case kMipsSqrtD: {
   1204       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1205       break;
   1206     }
   1207     case kMipsMaxD:
   1208       __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1209                i.InputDoubleRegister(1));
   1210       break;
   1211     case kMipsMinD:
   1212       __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1213                i.InputDoubleRegister(1));
   1214       break;
   1215     case kMipsFloat64RoundDown: {
   1216       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
   1217       break;
   1218     }
   1219     case kMipsFloat32RoundDown: {
   1220       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
   1221       break;
   1222     }
   1223     case kMipsFloat64RoundTruncate: {
   1224       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
   1225       break;
   1226     }
   1227     case kMipsFloat32RoundTruncate: {
   1228       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
   1229       break;
   1230     }
   1231     case kMipsFloat64RoundUp: {
   1232       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
   1233       break;
   1234     }
   1235     case kMipsFloat32RoundUp: {
   1236       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
   1237       break;
   1238     }
   1239     case kMipsFloat64RoundTiesEven: {
   1240       ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
   1241       break;
   1242     }
   1243     case kMipsFloat32RoundTiesEven: {
   1244       ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
   1245       break;
   1246     }
   1247     case kMipsFloat32Max: {
   1248       FPURegister dst = i.OutputSingleRegister();
   1249       FPURegister src1 = i.InputSingleRegister(0);
   1250       FPURegister src2 = i.InputSingleRegister(1);
   1251       auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
   1252       __ Float32Max(dst, src1, src2, ool->entry());
   1253       __ bind(ool->exit());
   1254       break;
   1255     }
   1256     case kMipsFloat64Max: {
   1257       DoubleRegister dst = i.OutputDoubleRegister();
   1258       DoubleRegister src1 = i.InputDoubleRegister(0);
   1259       DoubleRegister src2 = i.InputDoubleRegister(1);
   1260       auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
   1261       __ Float64Max(dst, src1, src2, ool->entry());
   1262       __ bind(ool->exit());
   1263       break;
   1264     }
   1265     case kMipsFloat32Min: {
   1266       FPURegister dst = i.OutputSingleRegister();
   1267       FPURegister src1 = i.InputSingleRegister(0);
   1268       FPURegister src2 = i.InputSingleRegister(1);
   1269       auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
   1270       __ Float32Min(dst, src1, src2, ool->entry());
   1271       __ bind(ool->exit());
   1272       break;
   1273     }
   1274     case kMipsFloat64Min: {
   1275       DoubleRegister dst = i.OutputDoubleRegister();
   1276       DoubleRegister src1 = i.InputDoubleRegister(0);
   1277       DoubleRegister src2 = i.InputDoubleRegister(1);
   1278       auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
   1279       __ Float64Min(dst, src1, src2, ool->entry());
   1280       __ bind(ool->exit());
   1281       break;
   1282     }
   1283     case kMipsCvtSD: {
   1284       __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
   1285       break;
   1286     }
   1287     case kMipsCvtDS: {
   1288       __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
   1289       break;
   1290     }
   1291     case kMipsCvtDW: {
   1292       FPURegister scratch = kScratchDoubleReg;
   1293       __ mtc1(i.InputRegister(0), scratch);
   1294       __ cvt_d_w(i.OutputDoubleRegister(), scratch);
   1295       break;
   1296     }
   1297     case kMipsCvtSW: {
   1298       FPURegister scratch = kScratchDoubleReg;
   1299       __ mtc1(i.InputRegister(0), scratch);
   1300       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
   1301       break;
   1302     }
   1303     case kMipsCvtSUw: {
   1304       FPURegister scratch = kScratchDoubleReg;
   1305       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
   1306       __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
   1307       break;
   1308     }
   1309     case kMipsCvtDUw: {
   1310       FPURegister scratch = kScratchDoubleReg;
   1311       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
   1312       break;
   1313     }
   1314     case kMipsFloorWD: {
   1315       FPURegister scratch = kScratchDoubleReg;
   1316       __ floor_w_d(scratch, i.InputDoubleRegister(0));
   1317       __ mfc1(i.OutputRegister(), scratch);
   1318       break;
   1319     }
   1320     case kMipsCeilWD: {
   1321       FPURegister scratch = kScratchDoubleReg;
   1322       __ ceil_w_d(scratch, i.InputDoubleRegister(0));
   1323       __ mfc1(i.OutputRegister(), scratch);
   1324       break;
   1325     }
   1326     case kMipsRoundWD: {
   1327       FPURegister scratch = kScratchDoubleReg;
   1328       __ round_w_d(scratch, i.InputDoubleRegister(0));
   1329       __ mfc1(i.OutputRegister(), scratch);
   1330       break;
   1331     }
   1332     case kMipsTruncWD: {
   1333       FPURegister scratch = kScratchDoubleReg;
   1334       // Other arches use round to zero here, so we follow.
   1335       __ trunc_w_d(scratch, i.InputDoubleRegister(0));
   1336       __ mfc1(i.OutputRegister(), scratch);
   1337       break;
   1338     }
   1339     case kMipsFloorWS: {
   1340       FPURegister scratch = kScratchDoubleReg;
   1341       __ floor_w_s(scratch, i.InputDoubleRegister(0));
   1342       __ mfc1(i.OutputRegister(), scratch);
   1343       break;
   1344     }
   1345     case kMipsCeilWS: {
   1346       FPURegister scratch = kScratchDoubleReg;
   1347       __ ceil_w_s(scratch, i.InputDoubleRegister(0));
   1348       __ mfc1(i.OutputRegister(), scratch);
   1349       break;
   1350     }
   1351     case kMipsRoundWS: {
   1352       FPURegister scratch = kScratchDoubleReg;
   1353       __ round_w_s(scratch, i.InputDoubleRegister(0));
   1354       __ mfc1(i.OutputRegister(), scratch);
   1355       break;
   1356     }
   1357     case kMipsTruncWS: {
   1358       FPURegister scratch = kScratchDoubleReg;
   1359       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
   1360       __ mfc1(i.OutputRegister(), scratch);
   1361       // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
   1362       // because INT32_MIN allows easier out-of-bounds detection.
   1363       __ addiu(kScratchReg, i.OutputRegister(), 1);
   1364       __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
   1365       __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
   1366       break;
   1367     }
   1368     case kMipsTruncUwD: {
   1369       FPURegister scratch = kScratchDoubleReg;
   1370       // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
   1371       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
   1372       break;
   1373     }
   1374     case kMipsTruncUwS: {
   1375       FPURegister scratch = kScratchDoubleReg;
   1376       // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
   1377       __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
   1378       // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
   1379       // because 0 allows easier out-of-bounds detection.
   1380       __ addiu(kScratchReg, i.OutputRegister(), 1);
   1381       __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
   1382       break;
   1383     }
   1384     case kMipsFloat64ExtractLowWord32:
   1385       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
   1386       break;
   1387     case kMipsFloat64ExtractHighWord32:
   1388       __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
   1389       break;
   1390     case kMipsFloat64InsertLowWord32:
   1391       __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
   1392       break;
   1393     case kMipsFloat64InsertHighWord32:
   1394       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
   1395       break;
   1396     case kMipsFloat64SilenceNaN:
   1397       __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1398       break;
   1399 
   1400     // ... more basic instructions ...
   1401     case kMipsSeb:
   1402       __ Seb(i.OutputRegister(), i.InputRegister(0));
   1403       break;
   1404     case kMipsSeh:
   1405       __ Seh(i.OutputRegister(), i.InputRegister(0));
   1406       break;
   1407     case kMipsLbu:
   1408       __ lbu(i.OutputRegister(), i.MemoryOperand());
   1409       break;
   1410     case kMipsLb:
   1411       __ lb(i.OutputRegister(), i.MemoryOperand());
   1412       break;
   1413     case kMipsSb:
   1414       __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
   1415       break;
   1416     case kMipsLhu:
   1417       __ lhu(i.OutputRegister(), i.MemoryOperand());
   1418       break;
   1419     case kMipsUlhu:
   1420       __ Ulhu(i.OutputRegister(), i.MemoryOperand());
   1421       break;
   1422     case kMipsLh:
   1423       __ lh(i.OutputRegister(), i.MemoryOperand());
   1424       break;
   1425     case kMipsUlh:
   1426       __ Ulh(i.OutputRegister(), i.MemoryOperand());
   1427       break;
   1428     case kMipsSh:
   1429       __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
   1430       break;
   1431     case kMipsUsh:
   1432       __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
   1433       break;
   1434     case kMipsLw:
   1435       __ lw(i.OutputRegister(), i.MemoryOperand());
   1436       break;
   1437     case kMipsUlw:
   1438       __ Ulw(i.OutputRegister(), i.MemoryOperand());
   1439       break;
   1440     case kMipsSw:
   1441       __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
   1442       break;
   1443     case kMipsUsw:
   1444       __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
   1445       break;
   1446     case kMipsLwc1: {
   1447       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
   1448       break;
   1449     }
   1450     case kMipsUlwc1: {
   1451       __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
   1452       break;
   1453     }
   1454     case kMipsSwc1: {
   1455       size_t index = 0;
   1456       MemOperand operand = i.MemoryOperand(&index);
   1457       FPURegister ft = i.InputOrZeroSingleRegister(index);
   1458       if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
   1459         __ Move(kDoubleRegZero, 0.0);
   1460       }
   1461       __ swc1(ft, operand);
   1462       break;
   1463     }
   1464     case kMipsUswc1: {
   1465       size_t index = 0;
   1466       MemOperand operand = i.MemoryOperand(&index);
   1467       FPURegister ft = i.InputOrZeroSingleRegister(index);
   1468       if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
   1469         __ Move(kDoubleRegZero, 0.0);
   1470       }
   1471       __ Uswc1(ft, operand, kScratchReg);
   1472       break;
   1473     }
   1474     case kMipsLdc1:
   1475       __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
   1476       break;
   1477     case kMipsUldc1:
   1478       __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
   1479       break;
   1480     case kMipsSdc1: {
   1481       FPURegister ft = i.InputOrZeroDoubleRegister(2);
   1482       if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
   1483         __ Move(kDoubleRegZero, 0.0);
   1484       }
   1485       __ sdc1(ft, i.MemoryOperand());
   1486       break;
   1487     }
   1488     case kMipsUsdc1: {
   1489       FPURegister ft = i.InputOrZeroDoubleRegister(2);
   1490       if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
   1491         __ Move(kDoubleRegZero, 0.0);
   1492       }
   1493       __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
   1494       break;
   1495     }
   1496     case kMipsPush:
   1497       if (instr->InputAt(0)->IsFPRegister()) {
   1498         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
   1499         __ Subu(sp, sp, Operand(kDoubleSize));
   1500         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
   1501       } else {
   1502         __ Push(i.InputRegister(0));
   1503         frame_access_state()->IncreaseSPDelta(1);
   1504       }
   1505       break;
   1506     case kMipsStackClaim: {
   1507       __ Subu(sp, sp, Operand(i.InputInt32(0)));
   1508       frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
   1509       break;
   1510     }
   1511     case kMipsStoreToStackSlot: {
   1512       if (instr->InputAt(0)->IsFPRegister()) {
   1513         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
   1514         if (op->representation() == MachineRepresentation::kFloat64) {
   1515           __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
   1516         } else {
   1517           DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
   1518           __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
   1519         }
   1520       } else {
   1521         __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
   1522       }
   1523       break;
   1524     }
   1525     case kMipsByteSwap32: {
   1526       __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
   1527       break;
   1528     }
   1529     case kCheckedLoadInt8:
   1530       ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
   1531       break;
   1532     case kCheckedLoadUint8:
   1533       ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
   1534       break;
   1535     case kCheckedLoadInt16:
   1536       ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
   1537       break;
   1538     case kCheckedLoadUint16:
   1539       ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
   1540       break;
   1541     case kCheckedLoadWord32:
   1542       ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
   1543       break;
   1544     case kCheckedLoadFloat32:
   1545       ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
   1546       break;
   1547     case kCheckedLoadFloat64:
   1548       ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
   1549       break;
   1550     case kCheckedStoreWord8:
   1551       ASSEMBLE_CHECKED_STORE_INTEGER(sb);
   1552       break;
   1553     case kCheckedStoreWord16:
   1554       ASSEMBLE_CHECKED_STORE_INTEGER(sh);
   1555       break;
   1556     case kCheckedStoreWord32:
   1557       ASSEMBLE_CHECKED_STORE_INTEGER(sw);
   1558       break;
   1559     case kCheckedStoreFloat32:
   1560       ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
   1561       break;
   1562     case kCheckedStoreFloat64:
   1563       ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
   1564       break;
   1565     case kCheckedLoadWord64:
   1566     case kCheckedStoreWord64:
   1567       UNREACHABLE();  // currently unsupported checked int64 load/store.
   1568       break;
   1569     case kAtomicLoadInt8:
   1570       ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
   1571       break;
   1572     case kAtomicLoadUint8:
   1573       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
   1574       break;
   1575     case kAtomicLoadInt16:
   1576       ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
   1577       break;
   1578     case kAtomicLoadUint16:
   1579       ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
   1580       break;
   1581     case kAtomicLoadWord32:
   1582       ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
   1583       break;
   1584     case kAtomicStoreWord8:
   1585       ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
   1586       break;
   1587     case kAtomicStoreWord16:
   1588       ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
   1589       break;
   1590     case kAtomicStoreWord32:
   1591       ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
   1592       break;
   1593   }
   1594   return kSuccess;
   1595 }  // NOLINT(readability/fn_size)
   1596 
   1597 
   1598 #define UNSUPPORTED_COND(opcode, condition)                                  \
   1599   OFStream out(stdout);                                                      \
   1600   out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
   1601   UNIMPLEMENTED();
   1602 
   1603 static bool convertCondition(FlagsCondition condition, Condition& cc) {
   1604   switch (condition) {
   1605     case kEqual:
   1606       cc = eq;
   1607       return true;
   1608     case kNotEqual:
   1609       cc = ne;
   1610       return true;
   1611     case kUnsignedLessThan:
   1612       cc = lt;
   1613       return true;
   1614     case kUnsignedGreaterThanOrEqual:
   1615       cc = uge;
   1616       return true;
   1617     case kUnsignedLessThanOrEqual:
   1618       cc = le;
   1619       return true;
   1620     case kUnsignedGreaterThan:
   1621       cc = ugt;
   1622       return true;
   1623     default:
   1624       break;
   1625   }
   1626   return false;
   1627 }
   1628 
   1629 void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
   1630                             Instruction* instr, FlagsCondition condition,
   1631                             Label* tlabel, Label* flabel, bool fallthru) {
   1632 #undef __
   1633 #define __ masm->
   1634 
   1635   Condition cc = kNoCondition;
   1636   // MIPS does not have condition code flags, so compare and branch are
   1637   // implemented differently than on the other arch's. The compare operations
   1638   // emit mips pseudo-instructions, which are handled here by branch
   1639   // instructions that do the actual comparison. Essential that the input
   1640   // registers to compare pseudo-op are not modified before this branch op, as
   1641   // they are tested here.
   1642 
   1643   MipsOperandConverter i(gen, instr);
   1644   if (instr->arch_opcode() == kMipsTst) {
   1645     cc = FlagsConditionToConditionTst(condition);
   1646     __ And(at, i.InputRegister(0), i.InputOperand(1));
   1647     __ Branch(tlabel, cc, at, Operand(zero_reg));
   1648   } else if (instr->arch_opcode() == kMipsAddOvf) {
   1649     switch (condition) {
   1650       case kOverflow:
   1651         __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1652                         i.InputOperand(1), tlabel, flabel);
   1653         break;
   1654       case kNotOverflow:
   1655         __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1656                         i.InputOperand(1), flabel, tlabel);
   1657         break;
   1658       default:
   1659         UNSUPPORTED_COND(kMipsAddOvf, condition);
   1660         break;
   1661     }
   1662   } else if (instr->arch_opcode() == kMipsSubOvf) {
   1663     switch (condition) {
   1664       case kOverflow:
   1665         __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1666                         i.InputOperand(1), tlabel, flabel);
   1667         break;
   1668       case kNotOverflow:
   1669         __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1670                         i.InputOperand(1), flabel, tlabel);
   1671         break;
   1672       default:
   1673         UNSUPPORTED_COND(kMipsAddOvf, condition);
   1674         break;
   1675     }
   1676   } else if (instr->arch_opcode() == kMipsMulOvf) {
   1677     switch (condition) {
   1678       case kOverflow:
   1679         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1680                         i.InputOperand(1), tlabel, flabel);
   1681         break;
   1682       case kNotOverflow:
   1683         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
   1684                         i.InputOperand(1), flabel, tlabel);
   1685         break;
   1686       default:
   1687         UNSUPPORTED_COND(kMipsMulOvf, condition);
   1688         break;
   1689     }
   1690   } else if (instr->arch_opcode() == kMipsCmp) {
   1691     cc = FlagsConditionToConditionCmp(condition);
   1692     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
   1693   } else if (instr->arch_opcode() == kMipsCmpS) {
   1694     if (!convertCondition(condition, cc)) {
   1695       UNSUPPORTED_COND(kMips64CmpS, condition);
   1696     }
   1697     FPURegister left = i.InputOrZeroSingleRegister(0);
   1698     FPURegister right = i.InputOrZeroSingleRegister(1);
   1699     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
   1700         !__ IsDoubleZeroRegSet()) {
   1701       __ Move(kDoubleRegZero, 0.0);
   1702     }
   1703     __ BranchF32(tlabel, nullptr, cc, left, right);
   1704   } else if (instr->arch_opcode() == kMipsCmpD) {
   1705     if (!convertCondition(condition, cc)) {
   1706       UNSUPPORTED_COND(kMips64CmpD, condition);
   1707     }
   1708     FPURegister left = i.InputOrZeroDoubleRegister(0);
   1709     FPURegister right = i.InputOrZeroDoubleRegister(1);
   1710     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
   1711         !__ IsDoubleZeroRegSet()) {
   1712       __ Move(kDoubleRegZero, 0.0);
   1713     }
   1714     __ BranchF64(tlabel, nullptr, cc, left, right);
   1715   } else {
   1716     PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
   1717            instr->arch_opcode());
   1718     UNIMPLEMENTED();
   1719   }
   1720   if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
   1721 #undef __
   1722 #define __ masm()->
   1723 }
   1724 
   1725 // Assembles branches after an instruction.
   1726 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   1727   Label* tlabel = branch->true_label;
   1728   Label* flabel = branch->false_label;
   1729   AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
   1730                          branch->fallthru);
   1731 }
   1732 
   1733 
   1734 void CodeGenerator::AssembleArchJump(RpoNumber target) {
   1735   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
   1736 }
   1737 
   1738 void CodeGenerator::AssembleArchTrap(Instruction* instr,
   1739                                      FlagsCondition condition) {
   1740   class OutOfLineTrap final : public OutOfLineCode {
   1741    public:
   1742     OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
   1743         : OutOfLineCode(gen),
   1744           frame_elided_(frame_elided),
   1745           instr_(instr),
   1746           gen_(gen) {}
   1747 
   1748     void Generate() final {
   1749       MipsOperandConverter i(gen_, instr_);
   1750 
   1751       Builtins::Name trap_id =
   1752           static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
   1753       bool old_has_frame = __ has_frame();
   1754       if (frame_elided_) {
   1755         __ set_has_frame(true);
   1756         __ EnterFrame(StackFrame::WASM_COMPILED);
   1757       }
   1758       GenerateCallToTrap(trap_id);
   1759       if (frame_elided_) {
   1760         __ set_has_frame(old_has_frame);
   1761       }
   1762     }
   1763 
   1764    private:
   1765     void GenerateCallToTrap(Builtins::Name trap_id) {
   1766       if (trap_id == Builtins::builtin_count) {
   1767         // We cannot test calls to the runtime in cctest/test-run-wasm.
   1768         // Therefore we emit a call to C here instead of a call to the runtime.
   1769         // We use the context register as the scratch register, because we do
   1770         // not have a context here.
   1771         __ PrepareCallCFunction(0, 0, cp);
   1772         __ CallCFunction(
   1773             ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
   1774             0);
   1775         __ LeaveFrame(StackFrame::WASM_COMPILED);
   1776         __ Ret();
   1777       } else {
   1778         gen_->AssembleSourcePosition(instr_);
   1779         __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
   1780                 RelocInfo::CODE_TARGET);
   1781         ReferenceMap* reference_map =
   1782             new (gen_->zone()) ReferenceMap(gen_->zone());
   1783         gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
   1784                               Safepoint::kNoLazyDeopt);
   1785         if (FLAG_debug_code) {
   1786           __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
   1787         }
   1788       }
   1789     }
   1790 
   1791     bool frame_elided_;
   1792     Instruction* instr_;
   1793     CodeGenerator* gen_;
   1794   };
   1795   bool frame_elided = !frame_access_state()->has_frame();
   1796   auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
   1797   Label* tlabel = ool->entry();
   1798   AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
   1799 }
   1800 
   1801 // Assembles boolean materializations after an instruction.
   1802 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
   1803                                         FlagsCondition condition) {
   1804   MipsOperandConverter i(this, instr);
   1805   Label done;
   1806 
   1807   // Materialize a full 32-bit 1 or 0 value. The result register is always the
   1808   // last output of the instruction.
   1809   Label false_value;
   1810   DCHECK_NE(0u, instr->OutputCount());
   1811   Register result = i.OutputRegister(instr->OutputCount() - 1);
   1812   Condition cc = kNoCondition;
   1813   // MIPS does not have condition code flags, so compare and branch are
   1814   // implemented differently than on the other arch's. The compare operations
   1815   // emit mips psuedo-instructions, which are checked and handled here.
   1816 
   1817   if (instr->arch_opcode() == kMipsTst) {
   1818     cc = FlagsConditionToConditionTst(condition);
   1819     if (instr->InputAt(1)->IsImmediate() &&
   1820         base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
   1821       uint16_t pos =
   1822           base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
   1823       __ Ext(result, i.InputRegister(0), pos, 1);
   1824     } else {
   1825       __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
   1826       __ Sltu(result, zero_reg, kScratchReg);
   1827     }
   1828     if (cc == eq) {
   1829       // Sltu produces 0 for equality, invert the result.
   1830       __ xori(result, result, 1);
   1831     }
   1832     return;
   1833   } else if (instr->arch_opcode() == kMipsAddOvf ||
   1834              instr->arch_opcode() == kMipsSubOvf ||
   1835              instr->arch_opcode() == kMipsMulOvf) {
   1836     Label flabel, tlabel;
   1837     switch (instr->arch_opcode()) {
   1838       case kMipsAddOvf:
   1839         __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
   1840                           i.InputOperand(1), &flabel);
   1841 
   1842         break;
   1843       case kMipsSubOvf:
   1844         __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
   1845                           i.InputOperand(1), &flabel);
   1846         break;
   1847       case kMipsMulOvf:
   1848         __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
   1849                           i.InputOperand(1), &flabel);
   1850         break;
   1851       default:
   1852         UNREACHABLE();
   1853         break;
   1854     }
   1855     __ li(result, 1);
   1856     __ Branch(&tlabel);
   1857     __ bind(&flabel);
   1858     __ li(result, 0);
   1859     __ bind(&tlabel);
   1860   } else if (instr->arch_opcode() == kMipsCmp) {
   1861     cc = FlagsConditionToConditionCmp(condition);
   1862     switch (cc) {
   1863       case eq:
   1864       case ne: {
   1865         Register left = i.InputRegister(0);
   1866         Operand right = i.InputOperand(1);
   1867         Register select;
   1868         if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
   1869           // Pass left operand if right is zero.
   1870           select = left;
   1871         } else {
   1872           __ Subu(kScratchReg, left, right);
   1873           select = kScratchReg;
   1874         }
   1875         __ Sltu(result, zero_reg, select);
   1876         if (cc == eq) {
   1877           // Sltu produces 0 for equality, invert the result.
   1878           __ xori(result, result, 1);
   1879         }
   1880       } break;
   1881       case lt:
   1882       case ge: {
   1883         Register left = i.InputRegister(0);
   1884         Operand right = i.InputOperand(1);
   1885         __ Slt(result, left, right);
   1886         if (cc == ge) {
   1887           __ xori(result, result, 1);
   1888         }
   1889       } break;
   1890       case gt:
   1891       case le: {
   1892         Register left = i.InputRegister(1);
   1893         Operand right = i.InputOperand(0);
   1894         __ Slt(result, left, right);
   1895         if (cc == le) {
   1896           __ xori(result, result, 1);
   1897         }
   1898       } break;
   1899       case lo:
   1900       case hs: {
   1901         Register left = i.InputRegister(0);
   1902         Operand right = i.InputOperand(1);
   1903         __ Sltu(result, left, right);
   1904         if (cc == hs) {
   1905           __ xori(result, result, 1);
   1906         }
   1907       } break;
   1908       case hi:
   1909       case ls: {
   1910         Register left = i.InputRegister(1);
   1911         Operand right = i.InputOperand(0);
   1912         __ Sltu(result, left, right);
   1913         if (cc == ls) {
   1914           __ xori(result, result, 1);
   1915         }
   1916       } break;
   1917       default:
   1918         UNREACHABLE();
   1919     }
   1920     return;
   1921   } else if (instr->arch_opcode() == kMipsCmpD ||
   1922              instr->arch_opcode() == kMipsCmpS) {
   1923     FPURegister left = i.InputOrZeroDoubleRegister(0);
   1924     FPURegister right = i.InputOrZeroDoubleRegister(1);
   1925     if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
   1926         !__ IsDoubleZeroRegSet()) {
   1927       __ Move(kDoubleRegZero, 0.0);
   1928     }
   1929     bool predicate;
   1930     FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
   1931     if (!IsMipsArchVariant(kMips32r6)) {
   1932       __ li(result, Operand(1));
   1933       if (instr->arch_opcode() == kMipsCmpD) {
   1934         __ c(cc, D, left, right);
   1935       } else {
   1936         DCHECK(instr->arch_opcode() == kMipsCmpS);
   1937         __ c(cc, S, left, right);
   1938       }
   1939       if (predicate) {
   1940         __ Movf(result, zero_reg);
   1941       } else {
   1942         __ Movt(result, zero_reg);
   1943       }
   1944     } else {
   1945       if (instr->arch_opcode() == kMipsCmpD) {
   1946         __ cmp(cc, L, kDoubleCompareReg, left, right);
   1947       } else {
   1948         DCHECK(instr->arch_opcode() == kMipsCmpS);
   1949         __ cmp(cc, W, kDoubleCompareReg, left, right);
   1950       }
   1951       __ mfc1(result, kDoubleCompareReg);
   1952       __ andi(result, result, 1);  // Cmp returns all 1's/0's, use only LSB.
   1953       if (!predicate)          // Toggle result for not equal.
   1954         __ xori(result, result, 1);
   1955     }
   1956     return;
   1957   } else {
   1958     PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
   1959            instr->arch_opcode());
   1960     TRACE_UNIMPL();
   1961     UNIMPLEMENTED();
   1962   }
   1963 }
   1964 
   1965 
   1966 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
   1967   MipsOperandConverter i(this, instr);
   1968   Register input = i.InputRegister(0);
   1969   for (size_t index = 2; index < instr->InputCount(); index += 2) {
   1970     __ li(at, Operand(i.InputInt32(index + 0)));
   1971     __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
   1972   }
   1973   __ nop();  // Branch delay slot of the last beq.
   1974   AssembleArchJump(i.InputRpo(1));
   1975 }
   1976 
   1977 
   1978 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   1979   MipsOperandConverter i(this, instr);
   1980   Register input = i.InputRegister(0);
   1981   size_t const case_count = instr->InputCount() - 2;
   1982   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
   1983   __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
   1984     return GetLabel(i.InputRpo(index + 2));
   1985   });
   1986 }
   1987 
   1988 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
   1989     int deoptimization_id, SourcePosition pos) {
   1990   DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
   1991   DeoptimizeReason deoptimization_reason =
   1992       GetDeoptimizationReason(deoptimization_id);
   1993   Deoptimizer::BailoutType bailout_type =
   1994       deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
   1995                                                    : Deoptimizer::EAGER;
   1996   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
   1997       isolate(), deoptimization_id, bailout_type);
   1998   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   1999   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   2000   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   2001   return kSuccess;
   2002 }
   2003 
   2004 void CodeGenerator::FinishFrame(Frame* frame) {
   2005   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2006 
   2007   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
   2008   if (saves_fpu != 0) {
   2009     frame->AlignSavedCalleeRegisterSlots();
   2010   }
   2011 
   2012   if (saves_fpu != 0) {
   2013     int count = base::bits::CountPopulation32(saves_fpu);
   2014     DCHECK(kNumCalleeSavedFPU == count);
   2015     frame->AllocateSavedCalleeRegisterSlots(count *
   2016                                             (kDoubleSize / kPointerSize));
   2017   }
   2018 
   2019   const RegList saves = descriptor->CalleeSavedRegisters();
   2020   if (saves != 0) {
   2021     int count = base::bits::CountPopulation32(saves);
   2022     DCHECK(kNumCalleeSaved == count + 1);
   2023     frame->AllocateSavedCalleeRegisterSlots(count);
   2024   }
   2025 }
   2026 
   2027 void CodeGenerator::AssembleConstructFrame() {
   2028   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2029   if (frame_access_state()->has_frame()) {
   2030     if (descriptor->IsCFunctionCall()) {
   2031       __ Push(ra, fp);
   2032       __ mov(fp, sp);
   2033     } else if (descriptor->IsJSFunctionCall()) {
   2034       __ Prologue(this->info()->GeneratePreagedPrologue());
   2035       if (descriptor->PushArgumentCount()) {
   2036         __ Push(kJavaScriptCallArgCountRegister);
   2037       }
   2038     } else {
   2039       __ StubPrologue(info()->GetOutputStackFrameType());
   2040     }
   2041   }
   2042 
   2043   int shrink_slots =
   2044       frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
   2045 
   2046   if (info()->is_osr()) {
   2047     // TurboFan OSR-compiled functions cannot be entered directly.
   2048     __ Abort(kShouldNotDirectlyEnterOsrFunction);
   2049 
   2050     // Unoptimized code jumps directly to this entrypoint while the unoptimized
   2051     // frame is still on the stack. Optimized code uses OSR values directly from
   2052     // the unoptimized frame. Thus, all that needs to be done is to allocate the
   2053     // remaining stack slots.
   2054     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
   2055     osr_pc_offset_ = __ pc_offset();
   2056     shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   2057   }
   2058 
   2059   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
   2060   if (shrink_slots > 0) {
   2061     __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
   2062   }
   2063 
   2064   // Save callee-saved FPU registers.
   2065   if (saves_fpu != 0) {
   2066     __ MultiPushFPU(saves_fpu);
   2067   }
   2068 
   2069   const RegList saves = descriptor->CalleeSavedRegisters();
   2070   if (saves != 0) {
   2071     // Save callee-saved registers.
   2072     __ MultiPush(saves);
   2073     DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
   2074   }
   2075 }
   2076 
   2077 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   2078   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2079   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   2080 
   2081   // Restore GP registers.
   2082   const RegList saves = descriptor->CalleeSavedRegisters();
   2083   if (saves != 0) {
   2084     __ MultiPop(saves);
   2085   }
   2086 
   2087   // Restore FPU registers.
   2088   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
   2089   if (saves_fpu != 0) {
   2090     __ MultiPopFPU(saves_fpu);
   2091   }
   2092 
   2093   MipsOperandConverter g(this, nullptr);
   2094   if (descriptor->IsCFunctionCall()) {
   2095     AssembleDeconstructFrame();
   2096   } else if (frame_access_state()->has_frame()) {
   2097     // Canonicalize JSFunction return sites for now unless they have an variable
   2098     // number of stack slot pops.
   2099     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
   2100       if (return_label_.is_bound()) {
   2101         __ Branch(&return_label_);
   2102         return;
   2103       } else {
   2104         __ bind(&return_label_);
   2105         AssembleDeconstructFrame();
   2106       }
   2107     } else {
   2108       AssembleDeconstructFrame();
   2109     }
   2110   }
   2111   if (pop->IsImmediate()) {
   2112     DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
   2113     pop_count += g.ToConstant(pop).ToInt32();
   2114   } else {
   2115     Register pop_reg = g.ToRegister(pop);
   2116     __ sll(pop_reg, pop_reg, kPointerSizeLog2);
   2117     __ Addu(sp, sp, Operand(pop_reg));
   2118   }
   2119   if (pop_count != 0) {
   2120     __ DropAndRet(pop_count);
   2121   } else {
   2122     __ Ret();
   2123   }
   2124 }
   2125 
   2126 
   2127 void CodeGenerator::AssembleMove(InstructionOperand* source,
   2128                                  InstructionOperand* destination) {
   2129   MipsOperandConverter g(this, nullptr);
   2130   // Dispatch on the source and destination operand kinds.  Not all
   2131   // combinations are possible.
   2132   if (source->IsRegister()) {
   2133     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2134     Register src = g.ToRegister(source);
   2135     if (destination->IsRegister()) {
   2136       __ mov(g.ToRegister(destination), src);
   2137     } else {
   2138       __ sw(src, g.ToMemOperand(destination));
   2139     }
   2140   } else if (source->IsStackSlot()) {
   2141     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2142     MemOperand src = g.ToMemOperand(source);
   2143     if (destination->IsRegister()) {
   2144       __ lw(g.ToRegister(destination), src);
   2145     } else {
   2146       Register temp = kScratchReg;
   2147       __ lw(temp, src);
   2148       __ sw(temp, g.ToMemOperand(destination));
   2149     }
   2150   } else if (source->IsConstant()) {
   2151     Constant src = g.ToConstant(source);
   2152     if (destination->IsRegister() || destination->IsStackSlot()) {
   2153       Register dst =
   2154           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
   2155       switch (src.type()) {
   2156         case Constant::kInt32:
   2157           if (RelocInfo::IsWasmReference(src.rmode())) {
   2158             __ li(dst, Operand(src.ToInt32(), src.rmode()));
   2159           } else {
   2160             __ li(dst, Operand(src.ToInt32()));
   2161           }
   2162           break;
   2163         case Constant::kFloat32:
   2164           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
   2165           break;
   2166         case Constant::kInt64:
   2167           UNREACHABLE();
   2168           break;
   2169         case Constant::kFloat64:
   2170           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
   2171           break;
   2172         case Constant::kExternalReference:
   2173           __ li(dst, Operand(src.ToExternalReference()));
   2174           break;
   2175         case Constant::kHeapObject: {
   2176           Handle<HeapObject> src_object = src.ToHeapObject();
   2177           Heap::RootListIndex index;
   2178           if (IsMaterializableFromRoot(src_object, &index)) {
   2179             __ LoadRoot(dst, index);
   2180           } else {
   2181             __ li(dst, src_object);
   2182           }
   2183           break;
   2184         }
   2185         case Constant::kRpoNumber:
   2186           UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips.
   2187           break;
   2188       }
   2189       if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
   2190     } else if (src.type() == Constant::kFloat32) {
   2191       if (destination->IsFPStackSlot()) {
   2192         MemOperand dst = g.ToMemOperand(destination);
   2193         if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
   2194           __ sw(zero_reg, dst);
   2195         } else {
   2196           __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
   2197           __ sw(at, dst);
   2198         }
   2199       } else {
   2200         DCHECK(destination->IsFPRegister());
   2201         FloatRegister dst = g.ToSingleRegister(destination);
   2202         __ Move(dst, src.ToFloat32());
   2203       }
   2204     } else {
   2205       DCHECK_EQ(Constant::kFloat64, src.type());
   2206       DoubleRegister dst = destination->IsFPRegister()
   2207                                ? g.ToDoubleRegister(destination)
   2208                                : kScratchDoubleReg;
   2209       __ Move(dst, src.ToFloat64());
   2210       if (destination->IsFPStackSlot()) {
   2211         __ sdc1(dst, g.ToMemOperand(destination));
   2212       }
   2213     }
   2214   } else if (source->IsFPRegister()) {
   2215     FPURegister src = g.ToDoubleRegister(source);
   2216     if (destination->IsFPRegister()) {
   2217       FPURegister dst = g.ToDoubleRegister(destination);
   2218       __ Move(dst, src);
   2219     } else {
   2220       DCHECK(destination->IsFPStackSlot());
   2221       MachineRepresentation rep =
   2222           LocationOperand::cast(source)->representation();
   2223       if (rep == MachineRepresentation::kFloat64) {
   2224         __ sdc1(src, g.ToMemOperand(destination));
   2225       } else if (rep == MachineRepresentation::kFloat32) {
   2226         __ swc1(src, g.ToMemOperand(destination));
   2227       } else {
   2228         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
   2229         UNREACHABLE();
   2230       }
   2231     }
   2232   } else if (source->IsFPStackSlot()) {
   2233     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
   2234     MemOperand src = g.ToMemOperand(source);
   2235     MachineRepresentation rep = LocationOperand::cast(source)->representation();
   2236     if (destination->IsFPRegister()) {
   2237       if (rep == MachineRepresentation::kFloat64) {
   2238         __ ldc1(g.ToDoubleRegister(destination), src);
   2239       } else if (rep == MachineRepresentation::kFloat32) {
   2240         __ lwc1(g.ToDoubleRegister(destination), src);
   2241       } else {
   2242         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
   2243         UNREACHABLE();
   2244       }
   2245     } else {
   2246       FPURegister temp = kScratchDoubleReg;
   2247       if (rep == MachineRepresentation::kFloat64) {
   2248         __ ldc1(temp, src);
   2249         __ sdc1(temp, g.ToMemOperand(destination));
   2250       } else if (rep == MachineRepresentation::kFloat32) {
   2251         __ lwc1(temp, src);
   2252         __ swc1(temp, g.ToMemOperand(destination));
   2253       } else {
   2254         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
   2255         UNREACHABLE();
   2256       }
   2257     }
   2258   } else {
   2259     UNREACHABLE();
   2260   }
   2261 }
   2262 
   2263 
   2264 void CodeGenerator::AssembleSwap(InstructionOperand* source,
   2265                                  InstructionOperand* destination) {
   2266   MipsOperandConverter g(this, nullptr);
   2267   // Dispatch on the source and destination operand kinds.  Not all
   2268   // combinations are possible.
   2269   if (source->IsRegister()) {
   2270     // Register-register.
   2271     Register temp = kScratchReg;
   2272     Register src = g.ToRegister(source);
   2273     if (destination->IsRegister()) {
   2274       Register dst = g.ToRegister(destination);
   2275       __ Move(temp, src);
   2276       __ Move(src, dst);
   2277       __ Move(dst, temp);
   2278     } else {
   2279       DCHECK(destination->IsStackSlot());
   2280       MemOperand dst = g.ToMemOperand(destination);
   2281       __ mov(temp, src);
   2282       __ lw(src, dst);
   2283       __ sw(temp, dst);
   2284     }
   2285   } else if (source->IsStackSlot()) {
   2286     DCHECK(destination->IsStackSlot());
   2287     Register temp_0 = kScratchReg;
   2288     Register temp_1 = kCompareReg;
   2289     MemOperand src = g.ToMemOperand(source);
   2290     MemOperand dst = g.ToMemOperand(destination);
   2291     __ lw(temp_0, src);
   2292     __ lw(temp_1, dst);
   2293     __ sw(temp_0, dst);
   2294     __ sw(temp_1, src);
   2295   } else if (source->IsFPRegister()) {
   2296     FPURegister temp = kScratchDoubleReg;
   2297     FPURegister src = g.ToDoubleRegister(source);
   2298     if (destination->IsFPRegister()) {
   2299       FPURegister dst = g.ToDoubleRegister(destination);
   2300       __ Move(temp, src);
   2301       __ Move(src, dst);
   2302       __ Move(dst, temp);
   2303     } else {
   2304       DCHECK(destination->IsFPStackSlot());
   2305       MemOperand dst = g.ToMemOperand(destination);
   2306       MachineRepresentation rep =
   2307           LocationOperand::cast(source)->representation();
   2308       if (rep == MachineRepresentation::kFloat64) {
   2309         __ Move(temp, src);
   2310         __ ldc1(src, dst);
   2311         __ sdc1(temp, dst);
   2312       } else if (rep == MachineRepresentation::kFloat32) {
   2313         __ Move(temp, src);
   2314         __ lwc1(src, dst);
   2315         __ swc1(temp, dst);
   2316       } else {
   2317         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
   2318         UNREACHABLE();
   2319       }
   2320     }
   2321   } else if (source->IsFPStackSlot()) {
   2322     DCHECK(destination->IsFPStackSlot());
   2323     Register temp_0 = kScratchReg;
   2324     FPURegister temp_1 = kScratchDoubleReg;
   2325     MemOperand src0 = g.ToMemOperand(source);
   2326     MemOperand dst0 = g.ToMemOperand(destination);
   2327     MachineRepresentation rep = LocationOperand::cast(source)->representation();
   2328     if (rep == MachineRepresentation::kFloat64) {
   2329       MemOperand src1(src0.rm(), src0.offset() + kIntSize);
   2330       MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
   2331       __ ldc1(temp_1, dst0);  // Save destination in temp_1.
   2332       __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
   2333       __ sw(temp_0, dst0);
   2334       __ lw(temp_0, src1);
   2335       __ sw(temp_0, dst1);
   2336       __ sdc1(temp_1, src0);
   2337     } else if (rep == MachineRepresentation::kFloat32) {
   2338       __ lwc1(temp_1, dst0);  // Save destination in temp_1.
   2339       __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
   2340       __ sw(temp_0, dst0);
   2341       __ swc1(temp_1, src0);
   2342     } else {
   2343       DCHECK_EQ(MachineRepresentation::kSimd128, rep);
   2344       UNREACHABLE();
   2345     }
   2346   } else {
   2347     // No other combinations are possible.
   2348     UNREACHABLE();
   2349   }
   2350 }
   2351 
   2352 
   2353 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
   2354   // On 32-bit MIPS we emit the jump tables inline.
   2355   UNREACHABLE();
   2356 }
   2357 
   2358 
   2359 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   2360   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
   2361     return;
   2362   }
   2363 
   2364   int space_needed = Deoptimizer::patch_size();
   2365   // Ensure that we have enough space after the previous lazy-bailout
   2366   // instruction for patching the code here.
   2367   int current_pc = masm()->pc_offset();
   2368   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   2369     // Block tramoline pool emission for duration of padding.
   2370     v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
   2371         masm());
   2372     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   2373     DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
   2374     while (padding_size > 0) {
   2375       __ nop();
   2376       padding_size -= v8::internal::Assembler::kInstrSize;
   2377     }
   2378   }
   2379 }
   2380 
   2381 #undef __
   2382 
   2383 }  // namespace compiler
   2384 }  // namespace internal
   2385 }  // namespace v8
   2386