Home | History | Annotate | Download | only in mips
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/base/adapters.h"
      6 #include "src/base/bits.h"
      7 #include "src/compiler/instruction-selector-impl.h"
      8 #include "src/compiler/node-matchers.h"
      9 #include "src/compiler/node-properties.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 namespace compiler {
     14 
     15 #define TRACE_UNIMPL() \
     16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
     17 
     18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
     19 
     20 
     21 // Adds Mips-specific methods for generating InstructionOperands.
     22 class MipsOperandGenerator final : public OperandGenerator {
     23  public:
     24   explicit MipsOperandGenerator(InstructionSelector* selector)
     25       : OperandGenerator(selector) {}
     26 
     27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
     28     if (CanBeImmediate(node, opcode)) {
     29       return UseImmediate(node);
     30     }
     31     return UseRegister(node);
     32   }
     33 
     34   // Use the zero register if the node has the immediate value zero, otherwise
     35   // assign a register.
     36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
     37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
     38         (IsFloatConstant(node) &&
     39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
     40       return UseImmediate(node);
     41     }
     42     return UseRegister(node);
     43   }
     44 
     45   bool IsIntegerConstant(Node* node) {
     46     return (node->opcode() == IrOpcode::kInt32Constant);
     47   }
     48 
     49   int64_t GetIntegerConstantValue(Node* node) {
     50     DCHECK(node->opcode() == IrOpcode::kInt32Constant);
     51     return OpParameter<int32_t>(node);
     52   }
     53 
     54   bool IsFloatConstant(Node* node) {
     55     return (node->opcode() == IrOpcode::kFloat32Constant) ||
     56            (node->opcode() == IrOpcode::kFloat64Constant);
     57   }
     58 
     59   double GetFloatConstantValue(Node* node) {
     60     if (node->opcode() == IrOpcode::kFloat32Constant) {
     61       return OpParameter<float>(node);
     62     }
     63     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
     64     return OpParameter<double>(node);
     65   }
     66 
     67   bool CanBeImmediate(Node* node, InstructionCode opcode) {
     68     Int32Matcher m(node);
     69     if (!m.HasValue()) return false;
     70     int32_t value = m.Value();
     71     switch (ArchOpcodeField::decode(opcode)) {
     72       case kMipsShl:
     73       case kMipsSar:
     74       case kMipsShr:
     75         return is_uint5(value);
     76       case kMipsAdd:
     77       case kMipsAnd:
     78       case kMipsOr:
     79       case kMipsTst:
     80       case kMipsSub:
     81       case kMipsXor:
     82         return is_uint16(value);
     83       case kMipsLb:
     84       case kMipsLbu:
     85       case kMipsSb:
     86       case kMipsLh:
     87       case kMipsLhu:
     88       case kMipsSh:
     89       case kMipsLw:
     90       case kMipsSw:
     91       case kMipsLwc1:
     92       case kMipsSwc1:
     93       case kMipsLdc1:
     94       case kMipsSdc1:
     95       case kCheckedLoadInt8:
     96       case kCheckedLoadUint8:
     97       case kCheckedLoadInt16:
     98       case kCheckedLoadUint16:
     99       case kCheckedLoadWord32:
    100       case kCheckedStoreWord8:
    101       case kCheckedStoreWord16:
    102       case kCheckedStoreWord32:
    103       case kCheckedLoadFloat32:
    104       case kCheckedLoadFloat64:
    105       case kCheckedStoreFloat32:
    106       case kCheckedStoreFloat64:
    107         // true even for 32b values, offsets > 16b
    108         // are handled in assembler-mips.cc
    109         return is_int32(value);
    110       default:
    111         return is_int16(value);
    112     }
    113   }
    114 
    115  private:
    116   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
    117     TRACE_UNIMPL();
    118     return false;
    119   }
    120 };
    121 
    122 
    123 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
    124                      Node* node) {
    125   MipsOperandGenerator g(selector);
    126   selector->Emit(opcode, g.DefineAsRegister(node),
    127                  g.UseRegister(node->InputAt(0)),
    128                  g.UseRegister(node->InputAt(1)));
    129 }
    130 
    131 
    132 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
    133                     Node* node) {
    134   MipsOperandGenerator g(selector);
    135   selector->Emit(opcode, g.DefineAsRegister(node),
    136                  g.UseRegister(node->InputAt(0)));
    137 }
    138 
    139 
    140 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
    141                      Node* node) {
    142   MipsOperandGenerator g(selector);
    143   selector->Emit(opcode, g.DefineAsRegister(node),
    144                  g.UseRegister(node->InputAt(0)),
    145                  g.UseOperand(node->InputAt(1), opcode));
    146 }
    147 
    148 bool TryMatchImmediate(InstructionSelector* selector,
    149                        InstructionCode* opcode_return, Node* node,
    150                        size_t* input_count_return, InstructionOperand* inputs) {
    151   MipsOperandGenerator g(selector);
    152   if (g.CanBeImmediate(node, *opcode_return)) {
    153     *opcode_return |= AddressingModeField::encode(kMode_MRI);
    154     inputs[0] = g.UseImmediate(node);
    155     *input_count_return = 1;
    156     return true;
    157   }
    158   return false;
    159 }
    160 
    161 static void VisitBinop(InstructionSelector* selector, Node* node,
    162                        InstructionCode opcode, bool has_reverse_opcode,
    163                        InstructionCode reverse_opcode,
    164                        FlagsContinuation* cont) {
    165   MipsOperandGenerator g(selector);
    166   Int32BinopMatcher m(node);
    167   InstructionOperand inputs[4];
    168   size_t input_count = 0;
    169   InstructionOperand outputs[2];
    170   size_t output_count = 0;
    171 
    172   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
    173                         &inputs[1])) {
    174     inputs[0] = g.UseRegister(m.left().node());
    175     input_count++;
    176   } else if (has_reverse_opcode &&
    177              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
    178                                &input_count, &inputs[1])) {
    179     inputs[0] = g.UseRegister(m.right().node());
    180     opcode = reverse_opcode;
    181     input_count++;
    182   } else {
    183     inputs[input_count++] = g.UseRegister(m.left().node());
    184     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
    185   }
    186 
    187   if (cont->IsBranch()) {
    188     inputs[input_count++] = g.Label(cont->true_block());
    189     inputs[input_count++] = g.Label(cont->false_block());
    190   } else if (cont->IsTrap()) {
    191     inputs[input_count++] = g.TempImmediate(cont->trap_id());
    192   }
    193 
    194   if (cont->IsDeoptimize()) {
    195     // If we can deoptimize as a result of the binop, we need to make sure that
    196     // the deopt inputs are not overwritten by the binop result. One way
    197     // to achieve that is to declare the output register as same-as-first.
    198     outputs[output_count++] = g.DefineSameAsFirst(node);
    199   } else {
    200     outputs[output_count++] = g.DefineAsRegister(node);
    201   }
    202   if (cont->IsSet()) {
    203     outputs[output_count++] = g.DefineAsRegister(cont->result());
    204   }
    205 
    206   DCHECK_NE(0u, input_count);
    207   DCHECK_NE(0u, output_count);
    208   DCHECK_GE(arraysize(inputs), input_count);
    209   DCHECK_GE(arraysize(outputs), output_count);
    210 
    211   opcode = cont->Encode(opcode);
    212   if (cont->IsDeoptimize()) {
    213     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    214                              cont->kind(), cont->reason(), cont->frame_state());
    215   } else {
    216     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    217   }
    218 }
    219 
    220 static void VisitBinop(InstructionSelector* selector, Node* node,
    221                        InstructionCode opcode, bool has_reverse_opcode,
    222                        InstructionCode reverse_opcode) {
    223   FlagsContinuation cont;
    224   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
    225 }
    226 
    227 static void VisitBinop(InstructionSelector* selector, Node* node,
    228                        InstructionCode opcode, FlagsContinuation* cont) {
    229   VisitBinop(selector, node, opcode, false, kArchNop, cont);
    230 }
    231 
    232 static void VisitBinop(InstructionSelector* selector, Node* node,
    233                        InstructionCode opcode) {
    234   VisitBinop(selector, node, opcode, false, kArchNop);
    235 }
    236 
    237 
    238 void InstructionSelector::VisitLoad(Node* node) {
    239   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    240   MipsOperandGenerator g(this);
    241   Node* base = node->InputAt(0);
    242   Node* index = node->InputAt(1);
    243 
    244   ArchOpcode opcode = kArchNop;
    245   switch (load_rep.representation()) {
    246     case MachineRepresentation::kFloat32:
    247       opcode = kMipsLwc1;
    248       break;
    249     case MachineRepresentation::kFloat64:
    250       opcode = kMipsLdc1;
    251       break;
    252     case MachineRepresentation::kBit:  // Fall through.
    253     case MachineRepresentation::kWord8:
    254       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
    255       break;
    256     case MachineRepresentation::kWord16:
    257       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
    258       break;
    259     case MachineRepresentation::kTaggedSigned:   // Fall through.
    260     case MachineRepresentation::kTaggedPointer:  // Fall through.
    261     case MachineRepresentation::kTagged:  // Fall through.
    262     case MachineRepresentation::kWord32:
    263       opcode = kMipsLw;
    264       break;
    265     case MachineRepresentation::kWord64:   // Fall through.
    266     case MachineRepresentation::kSimd128:  // Fall through.
    267     case MachineRepresentation::kSimd1x4:  // Fall through.
    268     case MachineRepresentation::kSimd1x8:  // Fall through.
    269     case MachineRepresentation::kSimd1x16:  // Fall through.
    270     case MachineRepresentation::kNone:
    271       UNREACHABLE();
    272       return;
    273   }
    274 
    275   if (g.CanBeImmediate(index, opcode)) {
    276     Emit(opcode | AddressingModeField::encode(kMode_MRI),
    277          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
    278   } else {
    279     InstructionOperand addr_reg = g.TempRegister();
    280     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
    281          g.UseRegister(index), g.UseRegister(base));
    282     // Emit desired load opcode, using temp addr_reg.
    283     Emit(opcode | AddressingModeField::encode(kMode_MRI),
    284          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
    285   }
    286 }
    287 
    288 void InstructionSelector::VisitProtectedLoad(Node* node) {
    289   // TODO(eholk)
    290   UNIMPLEMENTED();
    291 }
    292 
    293 void InstructionSelector::VisitStore(Node* node) {
    294   MipsOperandGenerator g(this);
    295   Node* base = node->InputAt(0);
    296   Node* index = node->InputAt(1);
    297   Node* value = node->InputAt(2);
    298 
    299   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    300   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    301   MachineRepresentation rep = store_rep.representation();
    302 
    303   // TODO(mips): I guess this could be done in a better way.
    304   if (write_barrier_kind != kNoWriteBarrier) {
    305     DCHECK(CanBeTaggedPointer(rep));
    306     InstructionOperand inputs[3];
    307     size_t input_count = 0;
    308     inputs[input_count++] = g.UseUniqueRegister(base);
    309     inputs[input_count++] = g.UseUniqueRegister(index);
    310     inputs[input_count++] = g.UseUniqueRegister(value);
    311     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    312     switch (write_barrier_kind) {
    313       case kNoWriteBarrier:
    314         UNREACHABLE();
    315         break;
    316       case kMapWriteBarrier:
    317         record_write_mode = RecordWriteMode::kValueIsMap;
    318         break;
    319       case kPointerWriteBarrier:
    320         record_write_mode = RecordWriteMode::kValueIsPointer;
    321         break;
    322       case kFullWriteBarrier:
    323         record_write_mode = RecordWriteMode::kValueIsAny;
    324         break;
    325     }
    326     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    327     size_t const temp_count = arraysize(temps);
    328     InstructionCode code = kArchStoreWithWriteBarrier;
    329     code |= MiscField::encode(static_cast<int>(record_write_mode));
    330     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    331   } else {
    332     ArchOpcode opcode = kArchNop;
    333     switch (rep) {
    334       case MachineRepresentation::kFloat32:
    335         opcode = kMipsSwc1;
    336         break;
    337       case MachineRepresentation::kFloat64:
    338         opcode = kMipsSdc1;
    339         break;
    340       case MachineRepresentation::kBit:  // Fall through.
    341       case MachineRepresentation::kWord8:
    342         opcode = kMipsSb;
    343         break;
    344       case MachineRepresentation::kWord16:
    345         opcode = kMipsSh;
    346         break;
    347       case MachineRepresentation::kTaggedSigned:   // Fall through.
    348       case MachineRepresentation::kTaggedPointer:  // Fall through.
    349       case MachineRepresentation::kTagged:  // Fall through.
    350       case MachineRepresentation::kWord32:
    351         opcode = kMipsSw;
    352         break;
    353       case MachineRepresentation::kWord64:   // Fall through.
    354       case MachineRepresentation::kSimd128:  // Fall through.
    355       case MachineRepresentation::kSimd1x4:  // Fall through.
    356       case MachineRepresentation::kSimd1x8:  // Fall through.
    357       case MachineRepresentation::kSimd1x16:  // Fall through.
    358       case MachineRepresentation::kNone:
    359         UNREACHABLE();
    360         return;
    361     }
    362 
    363     if (g.CanBeImmediate(index, opcode)) {
    364       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
    365            g.UseRegister(base), g.UseImmediate(index),
    366            g.UseRegisterOrImmediateZero(value));
    367     } else {
    368       InstructionOperand addr_reg = g.TempRegister();
    369       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
    370            g.UseRegister(index), g.UseRegister(base));
    371       // Emit desired store opcode, using temp addr_reg.
    372       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
    373            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
    374     }
    375   }
    376 }
    377 
    378 void InstructionSelector::VisitProtectedStore(Node* node) {
    379   // TODO(eholk)
    380   UNIMPLEMENTED();
    381 }
    382 
    383 void InstructionSelector::VisitWord32And(Node* node) {
    384   MipsOperandGenerator g(this);
    385   Int32BinopMatcher m(node);
    386   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
    387       m.right().HasValue()) {
    388     uint32_t mask = m.right().Value();
    389     uint32_t mask_width = base::bits::CountPopulation32(mask);
    390     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
    391     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
    392       // The mask must be contiguous, and occupy the least-significant bits.
    393       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
    394 
    395       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
    396       // significant bits.
    397       Int32BinopMatcher mleft(m.left().node());
    398       if (mleft.right().HasValue()) {
    399         // Any shift value can match; int32 shifts use `value % 32`.
    400         uint32_t lsb = mleft.right().Value() & 0x1f;
    401 
    402         // Ext cannot extract bits past the register size, however since
    403         // shifting the original value would have introduced some zeros we can
    404         // still use Ext with a smaller mask and the remaining bits will be
    405         // zeros.
    406         if (lsb + mask_width > 32) mask_width = 32 - lsb;
    407 
    408         if (lsb == 0 && mask_width == 32) {
    409           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
    410         } else {
    411           Emit(kMipsExt, g.DefineAsRegister(node),
    412                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    413                g.TempImmediate(mask_width));
    414         }
    415         return;
    416       }
    417       // Other cases fall through to the normal And operation.
    418     }
    419   }
    420   if (m.right().HasValue()) {
    421     uint32_t mask = m.right().Value();
    422     uint32_t shift = base::bits::CountPopulation32(~mask);
    423     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
    424     if (shift != 0 && shift != 32 && msb + shift == 32) {
    425       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
    426       // and remove constant loading of invereted mask.
    427       Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
    428            g.TempImmediate(0), g.TempImmediate(shift));
    429       return;
    430     }
    431   }
    432   VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
    433 }
    434 
    435 
    436 void InstructionSelector::VisitWord32Or(Node* node) {
    437   VisitBinop(this, node, kMipsOr, true, kMipsOr);
    438 }
    439 
    440 
    441 void InstructionSelector::VisitWord32Xor(Node* node) {
    442   Int32BinopMatcher m(node);
    443   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
    444       m.right().Is(-1)) {
    445     Int32BinopMatcher mleft(m.left().node());
    446     if (!mleft.right().HasValue()) {
    447       MipsOperandGenerator g(this);
    448       Emit(kMipsNor, g.DefineAsRegister(node),
    449            g.UseRegister(mleft.left().node()),
    450            g.UseRegister(mleft.right().node()));
    451       return;
    452     }
    453   }
    454   if (m.right().Is(-1)) {
    455     // Use Nor for bit negation and eliminate constant loading for xori.
    456     MipsOperandGenerator g(this);
    457     Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    458          g.TempImmediate(0));
    459     return;
    460   }
    461   VisitBinop(this, node, kMipsXor, true, kMipsXor);
    462 }
    463 
    464 
    465 void InstructionSelector::VisitWord32Shl(Node* node) {
    466   Int32BinopMatcher m(node);
    467   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
    468       m.right().IsInRange(1, 31)) {
    469     MipsOperandGenerator g(this);
    470     Int32BinopMatcher mleft(m.left().node());
    471     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
    472     // contiguous, and the shift immediate non-zero.
    473     if (mleft.right().HasValue()) {
    474       uint32_t mask = mleft.right().Value();
    475       uint32_t mask_width = base::bits::CountPopulation32(mask);
    476       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
    477       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
    478         uint32_t shift = m.right().Value();
    479         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
    480         DCHECK_NE(0u, shift);
    481         if ((shift + mask_width) >= 32) {
    482           // If the mask is contiguous and reaches or extends beyond the top
    483           // bit, only the shift is needed.
    484           Emit(kMipsShl, g.DefineAsRegister(node),
    485                g.UseRegister(mleft.left().node()),
    486                g.UseImmediate(m.right().node()));
    487           return;
    488         }
    489       }
    490     }
    491   }
    492   VisitRRO(this, kMipsShl, node);
    493 }
    494 
    495 
    496 void InstructionSelector::VisitWord32Shr(Node* node) {
    497   Int32BinopMatcher m(node);
    498   if (m.left().IsWord32And() && m.right().HasValue()) {
    499     uint32_t lsb = m.right().Value() & 0x1f;
    500     Int32BinopMatcher mleft(m.left().node());
    501     if (mleft.right().HasValue()) {
    502       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
    503       // shifted into the least-significant bits.
    504       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
    505       unsigned mask_width = base::bits::CountPopulation32(mask);
    506       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
    507       if ((mask_msb + mask_width + lsb) == 32) {
    508         MipsOperandGenerator g(this);
    509         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
    510         Emit(kMipsExt, g.DefineAsRegister(node),
    511              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    512              g.TempImmediate(mask_width));
    513         return;
    514       }
    515     }
    516   }
    517   VisitRRO(this, kMipsShr, node);
    518 }
    519 
    520 
    521 void InstructionSelector::VisitWord32Sar(Node* node) {
    522   Int32BinopMatcher m(node);
    523   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
    524     Int32BinopMatcher mleft(m.left().node());
    525     if (m.right().HasValue() && mleft.right().HasValue()) {
    526       MipsOperandGenerator g(this);
    527       uint32_t sar = m.right().Value();
    528       uint32_t shl = mleft.right().Value();
    529       if ((sar == shl) && (sar == 16)) {
    530         Emit(kMipsSeh, g.DefineAsRegister(node),
    531              g.UseRegister(mleft.left().node()));
    532         return;
    533       } else if ((sar == shl) && (sar == 24)) {
    534         Emit(kMipsSeb, g.DefineAsRegister(node),
    535              g.UseRegister(mleft.left().node()));
    536         return;
    537       }
    538     }
    539   }
    540   VisitRRO(this, kMipsSar, node);
    541 }
    542 
    543 static void VisitInt32PairBinop(InstructionSelector* selector,
    544                                 InstructionCode pair_opcode,
    545                                 InstructionCode single_opcode, Node* node) {
    546   MipsOperandGenerator g(selector);
    547 
    548   Node* projection1 = NodeProperties::FindProjection(node, 1);
    549 
    550   if (projection1) {
    551     // We use UseUniqueRegister here to avoid register sharing with the output
    552     // register.
    553     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
    554                                    g.UseUniqueRegister(node->InputAt(1)),
    555                                    g.UseUniqueRegister(node->InputAt(2)),
    556                                    g.UseUniqueRegister(node->InputAt(3))};
    557 
    558     InstructionOperand outputs[] = {
    559         g.DefineAsRegister(node),
    560         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
    561     selector->Emit(pair_opcode, 2, outputs, 4, inputs);
    562   } else {
    563     // The high word of the result is not used, so we emit the standard 32 bit
    564     // instruction.
    565     selector->Emit(single_opcode, g.DefineSameAsFirst(node),
    566                    g.UseRegister(node->InputAt(0)),
    567                    g.UseRegister(node->InputAt(2)));
    568   }
    569 }
    570 
    571 void InstructionSelector::VisitInt32PairAdd(Node* node) {
    572   VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
    573 }
    574 
    575 void InstructionSelector::VisitInt32PairSub(Node* node) {
    576   VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
    577 }
    578 
    579 void InstructionSelector::VisitInt32PairMul(Node* node) {
    580   VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
    581 }
    582 
    583 // Shared routine for multiple shift operations.
    584 static void VisitWord32PairShift(InstructionSelector* selector,
    585                                  InstructionCode opcode, Node* node) {
    586   MipsOperandGenerator g(selector);
    587   Int32Matcher m(node->InputAt(2));
    588   InstructionOperand shift_operand;
    589   if (m.HasValue()) {
    590     shift_operand = g.UseImmediate(m.node());
    591   } else {
    592     shift_operand = g.UseUniqueRegister(m.node());
    593   }
    594 
    595   // We use UseUniqueRegister here to avoid register sharing with the output
    596   // register.
    597   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
    598                                  g.UseUniqueRegister(node->InputAt(1)),
    599                                  shift_operand};
    600 
    601   Node* projection1 = NodeProperties::FindProjection(node, 1);
    602 
    603   InstructionOperand outputs[2];
    604   InstructionOperand temps[1];
    605   int32_t output_count = 0;
    606   int32_t temp_count = 0;
    607 
    608   outputs[output_count++] = g.DefineAsRegister(node);
    609   if (projection1) {
    610     outputs[output_count++] = g.DefineAsRegister(projection1);
    611   } else {
    612     temps[temp_count++] = g.TempRegister();
    613   }
    614 
    615   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
    616 }
    617 
    618 void InstructionSelector::VisitWord32PairShl(Node* node) {
    619   VisitWord32PairShift(this, kMipsShlPair, node);
    620 }
    621 
    622 void InstructionSelector::VisitWord32PairShr(Node* node) {
    623   VisitWord32PairShift(this, kMipsShrPair, node);
    624 }
    625 
    626 void InstructionSelector::VisitWord32PairSar(Node* node) {
    627   VisitWord32PairShift(this, kMipsSarPair, node);
    628 }
    629 
    630 void InstructionSelector::VisitWord32Ror(Node* node) {
    631   VisitRRO(this, kMipsRor, node);
    632 }
    633 
    634 
    635 void InstructionSelector::VisitWord32Clz(Node* node) {
    636   VisitRR(this, kMipsClz, node);
    637 }
    638 
    639 
    640 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
    641 
    642 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
    643 
    644 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
    645   MipsOperandGenerator g(this);
    646   Emit(kMipsByteSwap32, g.DefineAsRegister(node),
    647        g.UseRegister(node->InputAt(0)));
    648 }
    649 
    650 void InstructionSelector::VisitWord32Ctz(Node* node) {
    651   MipsOperandGenerator g(this);
    652   Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
    653 }
    654 
    655 
    656 void InstructionSelector::VisitWord32Popcnt(Node* node) {
    657   MipsOperandGenerator g(this);
    658   Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
    659 }
    660 
    661 
    662 void InstructionSelector::VisitInt32Add(Node* node) {
    663   MipsOperandGenerator g(this);
    664   Int32BinopMatcher m(node);
    665 
    666   // Select Lsa for (left + (left_of_right << imm)).
    667   if (m.right().opcode() == IrOpcode::kWord32Shl &&
    668       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
    669     Int32BinopMatcher mright(m.right().node());
    670     if (mright.right().HasValue() && !m.left().HasValue()) {
    671       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
    672       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    673            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
    674       return;
    675     }
    676   }
    677 
    678   // Select Lsa for ((left_of_left << imm) + right).
    679   if (m.left().opcode() == IrOpcode::kWord32Shl &&
    680       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
    681     Int32BinopMatcher mleft(m.left().node());
    682     if (mleft.right().HasValue() && !m.right().HasValue()) {
    683       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
    684       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
    685            g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
    686       return;
    687     }
    688   }
    689 
    690   VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
    691 }
    692 
    693 
    694 void InstructionSelector::VisitInt32Sub(Node* node) {
    695   VisitBinop(this, node, kMipsSub);
    696 }
    697 
    698 
    699 void InstructionSelector::VisitInt32Mul(Node* node) {
    700   MipsOperandGenerator g(this);
    701   Int32BinopMatcher m(node);
    702   if (m.right().HasValue() && m.right().Value() > 0) {
    703     int32_t value = m.right().Value();
    704     if (base::bits::IsPowerOfTwo32(value)) {
    705       Emit(kMipsShl | AddressingModeField::encode(kMode_None),
    706            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    707            g.TempImmediate(WhichPowerOf2(value)));
    708       return;
    709     }
    710     if (base::bits::IsPowerOfTwo32(value - 1)) {
    711       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    712            g.UseRegister(m.left().node()),
    713            g.TempImmediate(WhichPowerOf2(value - 1)));
    714       return;
    715     }
    716     if (base::bits::IsPowerOfTwo32(value + 1)) {
    717       InstructionOperand temp = g.TempRegister();
    718       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
    719            g.UseRegister(m.left().node()),
    720            g.TempImmediate(WhichPowerOf2(value + 1)));
    721       Emit(kMipsSub | AddressingModeField::encode(kMode_None),
    722            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
    723       return;
    724     }
    725   }
    726   VisitRRR(this, kMipsMul, node);
    727 }
    728 
    729 
    730 void InstructionSelector::VisitInt32MulHigh(Node* node) {
    731   VisitRRR(this, kMipsMulHigh, node);
    732 }
    733 
    734 
    735 void InstructionSelector::VisitUint32MulHigh(Node* node) {
    736   MipsOperandGenerator g(this);
    737   Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
    738        g.UseRegister(node->InputAt(1)));
    739 }
    740 
    741 
    742 void InstructionSelector::VisitInt32Div(Node* node) {
    743   MipsOperandGenerator g(this);
    744   Int32BinopMatcher m(node);
    745   Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
    746        g.UseRegister(m.right().node()));
    747 }
    748 
    749 
    750 void InstructionSelector::VisitUint32Div(Node* node) {
    751   MipsOperandGenerator g(this);
    752   Int32BinopMatcher m(node);
    753   Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
    754        g.UseRegister(m.right().node()));
    755 }
    756 
    757 
    758 void InstructionSelector::VisitInt32Mod(Node* node) {
    759   MipsOperandGenerator g(this);
    760   Int32BinopMatcher m(node);
    761   Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    762        g.UseRegister(m.right().node()));
    763 }
    764 
    765 
    766 void InstructionSelector::VisitUint32Mod(Node* node) {
    767   MipsOperandGenerator g(this);
    768   Int32BinopMatcher m(node);
    769   Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    770        g.UseRegister(m.right().node()));
    771 }
    772 
    773 
    774 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
    775   VisitRR(this, kMipsCvtDS, node);
    776 }
    777 
    778 
    779 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
    780   VisitRR(this, kMipsCvtSW, node);
    781 }
    782 
    783 
    784 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
    785   VisitRR(this, kMipsCvtSUw, node);
    786 }
    787 
    788 
    789 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
    790   VisitRR(this, kMipsCvtDW, node);
    791 }
    792 
    793 
    794 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
    795   VisitRR(this, kMipsCvtDUw, node);
    796 }
    797 
    798 
    799 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
    800   VisitRR(this, kMipsTruncWS, node);
    801 }
    802 
    803 
    804 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
    805   VisitRR(this, kMipsTruncUwS, node);
    806 }
    807 
    808 
    809 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
    810   MipsOperandGenerator g(this);
    811   Node* value = node->InputAt(0);
    812   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
    813   // which does rounding and conversion to integer format.
    814   if (CanCover(node, value)) {
    815     switch (value->opcode()) {
    816       case IrOpcode::kFloat64RoundDown:
    817         Emit(kMipsFloorWD, g.DefineAsRegister(node),
    818              g.UseRegister(value->InputAt(0)));
    819         return;
    820       case IrOpcode::kFloat64RoundUp:
    821         Emit(kMipsCeilWD, g.DefineAsRegister(node),
    822              g.UseRegister(value->InputAt(0)));
    823         return;
    824       case IrOpcode::kFloat64RoundTiesEven:
    825         Emit(kMipsRoundWD, g.DefineAsRegister(node),
    826              g.UseRegister(value->InputAt(0)));
    827         return;
    828       case IrOpcode::kFloat64RoundTruncate:
    829         Emit(kMipsTruncWD, g.DefineAsRegister(node),
    830              g.UseRegister(value->InputAt(0)));
    831         return;
    832       default:
    833         break;
    834     }
    835     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
    836       Node* next = value->InputAt(0);
    837       if (CanCover(value, next)) {
    838         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
    839         switch (next->opcode()) {
    840           case IrOpcode::kFloat32RoundDown:
    841             Emit(kMipsFloorWS, g.DefineAsRegister(node),
    842                  g.UseRegister(next->InputAt(0)));
    843             return;
    844           case IrOpcode::kFloat32RoundUp:
    845             Emit(kMipsCeilWS, g.DefineAsRegister(node),
    846                  g.UseRegister(next->InputAt(0)));
    847             return;
    848           case IrOpcode::kFloat32RoundTiesEven:
    849             Emit(kMipsRoundWS, g.DefineAsRegister(node),
    850                  g.UseRegister(next->InputAt(0)));
    851             return;
    852           case IrOpcode::kFloat32RoundTruncate:
    853             Emit(kMipsTruncWS, g.DefineAsRegister(node),
    854                  g.UseRegister(next->InputAt(0)));
    855             return;
    856           default:
    857             Emit(kMipsTruncWS, g.DefineAsRegister(node),
    858                  g.UseRegister(value->InputAt(0)));
    859             return;
    860         }
    861       } else {
    862         // Match float32 -> float64 -> int32 representation change path.
    863         Emit(kMipsTruncWS, g.DefineAsRegister(node),
    864              g.UseRegister(value->InputAt(0)));
    865         return;
    866       }
    867     }
    868   }
    869   VisitRR(this, kMipsTruncWD, node);
    870 }
    871 
    872 
    873 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
    874   VisitRR(this, kMipsTruncUwD, node);
    875 }
    876 
    877 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
    878   VisitRR(this, kMipsTruncUwD, node);
    879 }
    880 
    881 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
    882   MipsOperandGenerator g(this);
    883   Node* value = node->InputAt(0);
    884   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
    885   // instruction.
    886   if (CanCover(node, value) &&
    887       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
    888     Emit(kMipsCvtSW, g.DefineAsRegister(node),
    889          g.UseRegister(value->InputAt(0)));
    890     return;
    891   }
    892   VisitRR(this, kMipsCvtSD, node);
    893 }
    894 
    895 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
    896   VisitRR(this, kArchTruncateDoubleToI, node);
    897 }
    898 
    899 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
    900   VisitRR(this, kMipsTruncWD, node);
    901 }
    902 
    903 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
    904   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
    905 }
    906 
    907 
    908 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
    909   MipsOperandGenerator g(this);
    910   Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
    911        ImmediateOperand(ImmediateOperand::INLINE, 0),
    912        g.UseRegister(node->InputAt(0)));
    913 }
    914 
    915 
    916 void InstructionSelector::VisitFloat32Add(Node* node) {
    917   MipsOperandGenerator g(this);
    918   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
    919     Float32BinopMatcher m(node);
    920     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
    921       // For Add.S(Mul.S(x, y), z):
    922       Float32BinopMatcher mleft(m.left().node());
    923       Emit(kMipsMaddS, g.DefineAsRegister(node),
    924            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    925            g.UseRegister(mleft.right().node()));
    926       return;
    927     }
    928     if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
    929       // For Add.S(x, Mul.S(y, z)):
    930       Float32BinopMatcher mright(m.right().node());
    931       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    932            g.UseRegister(mright.left().node()),
    933            g.UseRegister(mright.right().node()));
    934       return;
    935     }
    936   }
    937   VisitRRR(this, kMipsAddS, node);
    938 }
    939 
    940 
    941 void InstructionSelector::VisitFloat64Add(Node* node) {
    942   MipsOperandGenerator g(this);
    943   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
    944     Float64BinopMatcher m(node);
    945     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
    946       // For Add.D(Mul.D(x, y), z):
    947       Float64BinopMatcher mleft(m.left().node());
    948       Emit(kMipsMaddD, g.DefineAsRegister(node),
    949            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    950            g.UseRegister(mleft.right().node()));
    951       return;
    952     }
    953     if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
    954       // For Add.D(x, Mul.D(y, z)):
    955       Float64BinopMatcher mright(m.right().node());
    956       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    957            g.UseRegister(mright.left().node()),
    958            g.UseRegister(mright.right().node()));
    959       return;
    960     }
    961   }
    962   VisitRRR(this, kMipsAddD, node);
    963 }
    964 
    965 
    966 void InstructionSelector::VisitFloat32Sub(Node* node) {
    967   MipsOperandGenerator g(this);
    968   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
    969     Float32BinopMatcher m(node);
    970     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
    971       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
    972       Float32BinopMatcher mleft(m.left().node());
    973       Emit(kMipsMsubS, g.DefineAsRegister(node),
    974            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    975            g.UseRegister(mleft.right().node()));
    976       return;
    977     }
    978   }
    979   VisitRRR(this, kMipsSubS, node);
    980 }
    981 
    982 void InstructionSelector::VisitFloat64Sub(Node* node) {
    983   MipsOperandGenerator g(this);
    984   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
    985     Float64BinopMatcher m(node);
    986     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
    987       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
    988       Float64BinopMatcher mleft(m.left().node());
    989       Emit(kMipsMsubD, g.DefineAsRegister(node),
    990            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    991            g.UseRegister(mleft.right().node()));
    992       return;
    993     }
    994   }
    995   VisitRRR(this, kMipsSubD, node);
    996 }
    997 
    998 void InstructionSelector::VisitFloat32Mul(Node* node) {
    999   VisitRRR(this, kMipsMulS, node);
   1000 }
   1001 
   1002 
   1003 void InstructionSelector::VisitFloat64Mul(Node* node) {
   1004   VisitRRR(this, kMipsMulD, node);
   1005 }
   1006 
   1007 
   1008 void InstructionSelector::VisitFloat32Div(Node* node) {
   1009   VisitRRR(this, kMipsDivS, node);
   1010 }
   1011 
   1012 
   1013 void InstructionSelector::VisitFloat64Div(Node* node) {
   1014   VisitRRR(this, kMipsDivD, node);
   1015 }
   1016 
   1017 
   1018 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1019   MipsOperandGenerator g(this);
   1020   Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
   1021        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
   1022 }
   1023 
   1024 void InstructionSelector::VisitFloat32Max(Node* node) {
   1025   MipsOperandGenerator g(this);
   1026   Emit(kMipsFloat32Max, g.DefineAsRegister(node),
   1027        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1028 }
   1029 
   1030 void InstructionSelector::VisitFloat64Max(Node* node) {
   1031   MipsOperandGenerator g(this);
   1032   Emit(kMipsFloat64Max, g.DefineAsRegister(node),
   1033        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1034 }
   1035 
   1036 void InstructionSelector::VisitFloat32Min(Node* node) {
   1037   MipsOperandGenerator g(this);
   1038   Emit(kMipsFloat32Min, g.DefineAsRegister(node),
   1039        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1040 }
   1041 
   1042 void InstructionSelector::VisitFloat64Min(Node* node) {
   1043   MipsOperandGenerator g(this);
   1044   Emit(kMipsFloat64Min, g.DefineAsRegister(node),
   1045        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1046 }
   1047 
   1048 
   1049 void InstructionSelector::VisitFloat32Abs(Node* node) {
   1050   VisitRR(this, kMipsAbsS, node);
   1051 }
   1052 
   1053 
   1054 void InstructionSelector::VisitFloat64Abs(Node* node) {
   1055   VisitRR(this, kMipsAbsD, node);
   1056 }
   1057 
   1058 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   1059   VisitRR(this, kMipsSqrtS, node);
   1060 }
   1061 
   1062 
   1063 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   1064   VisitRR(this, kMipsSqrtD, node);
   1065 }
   1066 
   1067 
   1068 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
   1069   VisitRR(this, kMipsFloat32RoundDown, node);
   1070 }
   1071 
   1072 
   1073 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
   1074   VisitRR(this, kMipsFloat64RoundDown, node);
   1075 }
   1076 
   1077 
   1078 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
   1079   VisitRR(this, kMipsFloat32RoundUp, node);
   1080 }
   1081 
   1082 
   1083 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
   1084   VisitRR(this, kMipsFloat64RoundUp, node);
   1085 }
   1086 
   1087 
   1088 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
   1089   VisitRR(this, kMipsFloat32RoundTruncate, node);
   1090 }
   1091 
   1092 
   1093 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
   1094   VisitRR(this, kMipsFloat64RoundTruncate, node);
   1095 }
   1096 
   1097 
   1098 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   1099   UNREACHABLE();
   1100 }
   1101 
   1102 
   1103 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
   1104   VisitRR(this, kMipsFloat32RoundTiesEven, node);
   1105 }
   1106 
   1107 
   1108 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
   1109   VisitRR(this, kMipsFloat64RoundTiesEven, node);
   1110 }
   1111 
   1112 void InstructionSelector::VisitFloat32Neg(Node* node) {
   1113   VisitRR(this, kMipsNegS, node);
   1114 }
   1115 
   1116 void InstructionSelector::VisitFloat64Neg(Node* node) {
   1117   VisitRR(this, kMipsNegD, node);
   1118 }
   1119 
   1120 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1121                                                    InstructionCode opcode) {
   1122   MipsOperandGenerator g(this);
   1123   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
   1124        g.UseFixed(node->InputAt(1), f4))
   1125       ->MarkAsCall();
   1126 }
   1127 
   1128 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1129                                                   InstructionCode opcode) {
   1130   MipsOperandGenerator g(this);
   1131   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
   1132       ->MarkAsCall();
   1133 }
   1134 
   1135 void InstructionSelector::EmitPrepareArguments(
   1136     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1137     Node* node) {
   1138   MipsOperandGenerator g(this);
   1139 
   1140   // Prepare for C function call.
   1141   if (descriptor->IsCFunctionCall()) {
   1142     Emit(kArchPrepareCallCFunction |
   1143              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
   1144          0, nullptr, 0, nullptr);
   1145 
   1146     // Poke any stack arguments.
   1147     int slot = kCArgSlotCount;
   1148     for (PushParameter input : (*arguments)) {
   1149       if (input.node()) {
   1150         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
   1151              g.TempImmediate(slot << kPointerSizeLog2));
   1152         ++slot;
   1153       }
   1154     }
   1155   } else {
   1156     // Possibly align stack here for functions.
   1157     int push_count = static_cast<int>(descriptor->StackParameterCount());
   1158     if (push_count > 0) {
   1159       Emit(kMipsStackClaim, g.NoOutput(),
   1160            g.TempImmediate(push_count << kPointerSizeLog2));
   1161     }
   1162     for (size_t n = 0; n < arguments->size(); ++n) {
   1163       PushParameter input = (*arguments)[n];
   1164       if (input.node()) {
   1165         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
   1166              g.TempImmediate(n << kPointerSizeLog2));
   1167       }
   1168     }
   1169   }
   1170 }
   1171 
   1172 
   1173 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
   1174 
   1175 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1176 
   1177 void InstructionSelector::VisitUnalignedLoad(Node* node) {
   1178   UnalignedLoadRepresentation load_rep =
   1179       UnalignedLoadRepresentationOf(node->op());
   1180   MipsOperandGenerator g(this);
   1181   Node* base = node->InputAt(0);
   1182   Node* index = node->InputAt(1);
   1183 
   1184   ArchOpcode opcode = kArchNop;
   1185   switch (load_rep.representation()) {
   1186     case MachineRepresentation::kBit:  // Fall through.
   1187     case MachineRepresentation::kWord8:
   1188       UNREACHABLE();
   1189       break;
   1190     case MachineRepresentation::kWord16:
   1191       opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
   1192       break;
   1193     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1194     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1195     case MachineRepresentation::kTagged:  // Fall through.
   1196     case MachineRepresentation::kWord32:
   1197       opcode = kMipsUlw;
   1198       break;
   1199     case MachineRepresentation::kFloat32:
   1200       opcode = kMipsUlwc1;
   1201       break;
   1202     case MachineRepresentation::kFloat64:
   1203       opcode = kMipsUldc1;
   1204       break;
   1205     case MachineRepresentation::kWord64:   // Fall through.
   1206     case MachineRepresentation::kSimd128:  // Fall through.
   1207     case MachineRepresentation::kSimd1x4:  // Fall through.
   1208     case MachineRepresentation::kSimd1x8:  // Fall through.
   1209     case MachineRepresentation::kSimd1x16:  // Fall through.
   1210     case MachineRepresentation::kNone:
   1211       UNREACHABLE();
   1212       return;
   1213   }
   1214 
   1215   if (g.CanBeImmediate(index, opcode)) {
   1216     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1217          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
   1218   } else {
   1219     InstructionOperand addr_reg = g.TempRegister();
   1220     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
   1221          g.UseRegister(index), g.UseRegister(base));
   1222     // Emit desired load opcode, using temp addr_reg.
   1223     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1224          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
   1225   }
   1226 }
   1227 
   1228 void InstructionSelector::VisitUnalignedStore(Node* node) {
   1229   MipsOperandGenerator g(this);
   1230   Node* base = node->InputAt(0);
   1231   Node* index = node->InputAt(1);
   1232   Node* value = node->InputAt(2);
   1233 
   1234   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
   1235 
   1236   // TODO(mips): I guess this could be done in a better way.
   1237   ArchOpcode opcode = kArchNop;
   1238   switch (rep) {
   1239     case MachineRepresentation::kFloat32:
   1240       opcode = kMipsUswc1;
   1241       break;
   1242     case MachineRepresentation::kFloat64:
   1243       opcode = kMipsUsdc1;
   1244       break;
   1245     case MachineRepresentation::kBit:  // Fall through.
   1246     case MachineRepresentation::kWord8:
   1247       UNREACHABLE();
   1248       break;
   1249     case MachineRepresentation::kWord16:
   1250       opcode = kMipsUsh;
   1251       break;
   1252     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1253     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1254     case MachineRepresentation::kTagged:  // Fall through.
   1255     case MachineRepresentation::kWord32:
   1256       opcode = kMipsUsw;
   1257       break;
   1258     case MachineRepresentation::kWord64:   // Fall through.
   1259     case MachineRepresentation::kSimd128:  // Fall through.
   1260     case MachineRepresentation::kSimd1x4:  // Fall through.
   1261     case MachineRepresentation::kSimd1x8:  // Fall through.
   1262     case MachineRepresentation::kSimd1x16:  // Fall through.
   1263     case MachineRepresentation::kNone:
   1264       UNREACHABLE();
   1265       return;
   1266   }
   1267 
   1268   if (g.CanBeImmediate(index, opcode)) {
   1269     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1270          g.UseRegister(base), g.UseImmediate(index),
   1271          g.UseRegisterOrImmediateZero(value));
   1272   } else {
   1273     InstructionOperand addr_reg = g.TempRegister();
   1274     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
   1275          g.UseRegister(index), g.UseRegister(base));
   1276     // Emit desired store opcode, using temp addr_reg.
   1277     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1278          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   1279   }
   1280 }
   1281 
   1282 void InstructionSelector::VisitCheckedLoad(Node* node) {
   1283   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
   1284   MipsOperandGenerator g(this);
   1285   Node* const buffer = node->InputAt(0);
   1286   Node* const offset = node->InputAt(1);
   1287   Node* const length = node->InputAt(2);
   1288   ArchOpcode opcode = kArchNop;
   1289   switch (load_rep.representation()) {
   1290     case MachineRepresentation::kWord8:
   1291       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
   1292       break;
   1293     case MachineRepresentation::kWord16:
   1294       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
   1295       break;
   1296     case MachineRepresentation::kWord32:
   1297       opcode = kCheckedLoadWord32;
   1298       break;
   1299     case MachineRepresentation::kFloat32:
   1300       opcode = kCheckedLoadFloat32;
   1301       break;
   1302     case MachineRepresentation::kFloat64:
   1303       opcode = kCheckedLoadFloat64;
   1304       break;
   1305     case MachineRepresentation::kBit:      // Fall through.
   1306     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1307     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1308     case MachineRepresentation::kTagged:   // Fall through.
   1309     case MachineRepresentation::kWord64:   // Fall through.
   1310     case MachineRepresentation::kSimd128:  // Fall through.
   1311     case MachineRepresentation::kSimd1x4:  // Fall through.
   1312     case MachineRepresentation::kSimd1x8:  // Fall through.
   1313     case MachineRepresentation::kSimd1x16:  // Fall through.
   1314     case MachineRepresentation::kNone:
   1315       UNREACHABLE();
   1316       return;
   1317   }
   1318   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
   1319                                           ? g.UseImmediate(offset)
   1320                                           : g.UseRegister(offset);
   1321 
   1322   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
   1323                                           ? g.CanBeImmediate(length, opcode)
   1324                                                 ? g.UseImmediate(length)
   1325                                                 : g.UseRegister(length)
   1326                                           : g.UseRegister(length);
   1327 
   1328   Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1329        g.DefineAsRegister(node), offset_operand, length_operand,
   1330        g.UseRegister(buffer));
   1331 }
   1332 
   1333 
   1334 void InstructionSelector::VisitCheckedStore(Node* node) {
   1335   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
   1336   MipsOperandGenerator g(this);
   1337   Node* const buffer = node->InputAt(0);
   1338   Node* const offset = node->InputAt(1);
   1339   Node* const length = node->InputAt(2);
   1340   Node* const value = node->InputAt(3);
   1341   ArchOpcode opcode = kArchNop;
   1342   switch (rep) {
   1343     case MachineRepresentation::kWord8:
   1344       opcode = kCheckedStoreWord8;
   1345       break;
   1346     case MachineRepresentation::kWord16:
   1347       opcode = kCheckedStoreWord16;
   1348       break;
   1349     case MachineRepresentation::kWord32:
   1350       opcode = kCheckedStoreWord32;
   1351       break;
   1352     case MachineRepresentation::kFloat32:
   1353       opcode = kCheckedStoreFloat32;
   1354       break;
   1355     case MachineRepresentation::kFloat64:
   1356       opcode = kCheckedStoreFloat64;
   1357       break;
   1358     default:
   1359       UNREACHABLE();
   1360       return;
   1361   }
   1362   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
   1363                                           ? g.UseImmediate(offset)
   1364                                           : g.UseRegister(offset);
   1365 
   1366   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
   1367                                           ? g.CanBeImmediate(length, opcode)
   1368                                                 ? g.UseImmediate(length)
   1369                                                 : g.UseRegister(length)
   1370                                           : g.UseRegister(length);
   1371 
   1372   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1373        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
   1374        g.UseRegister(buffer));
   1375 }
   1376 
   1377 
   1378 namespace {
   1379 // Shared routine for multiple compare operations.
   1380 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1381                          InstructionOperand left, InstructionOperand right,
   1382                          FlagsContinuation* cont) {
   1383   MipsOperandGenerator g(selector);
   1384   opcode = cont->Encode(opcode);
   1385   if (cont->IsBranch()) {
   1386     selector->Emit(opcode, g.NoOutput(), left, right,
   1387                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1388   } else if (cont->IsDeoptimize()) {
   1389     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
   1390                              cont->reason(), cont->frame_state());
   1391   } else if (cont->IsSet()) {
   1392     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   1393   } else {
   1394     DCHECK(cont->IsTrap());
   1395     selector->Emit(opcode, g.NoOutput(), left, right,
   1396                    g.TempImmediate(cont->trap_id()));
   1397   }
   1398 }
   1399 
   1400 
   1401 // Shared routine for multiple float32 compare operations.
   1402 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1403                          FlagsContinuation* cont) {
   1404   MipsOperandGenerator g(selector);
   1405   Float32BinopMatcher m(node);
   1406   InstructionOperand lhs, rhs;
   1407 
   1408   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
   1409                           : g.UseRegister(m.left().node());
   1410   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
   1411                            : g.UseRegister(m.right().node());
   1412   VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
   1413 }
   1414 
   1415 
   1416 // Shared routine for multiple float64 compare operations.
   1417 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1418                          FlagsContinuation* cont) {
   1419   MipsOperandGenerator g(selector);
   1420   Float64BinopMatcher m(node);
   1421   InstructionOperand lhs, rhs;
   1422 
   1423   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
   1424                           : g.UseRegister(m.left().node());
   1425   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
   1426                            : g.UseRegister(m.right().node());
   1427   VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
   1428 }
   1429 
   1430 
   1431 // Shared routine for multiple word compare operations.
   1432 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1433                       InstructionCode opcode, FlagsContinuation* cont,
   1434                       bool commutative) {
   1435   MipsOperandGenerator g(selector);
   1436   Node* left = node->InputAt(0);
   1437   Node* right = node->InputAt(1);
   1438 
   1439   // Match immediates on left or right side of comparison.
   1440   if (g.CanBeImmediate(right, opcode)) {
   1441     if (opcode == kMipsTst) {
   1442       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
   1443                    cont);
   1444     } else {
   1445       switch (cont->condition()) {
   1446         case kEqual:
   1447         case kNotEqual:
   1448           if (cont->IsSet()) {
   1449             VisitCompare(selector, opcode, g.UseRegister(left),
   1450                          g.UseImmediate(right), cont);
   1451           } else {
   1452             VisitCompare(selector, opcode, g.UseRegister(left),
   1453                          g.UseRegister(right), cont);
   1454           }
   1455           break;
   1456         case kSignedLessThan:
   1457         case kSignedGreaterThanOrEqual:
   1458         case kUnsignedLessThan:
   1459         case kUnsignedGreaterThanOrEqual:
   1460           VisitCompare(selector, opcode, g.UseRegister(left),
   1461                        g.UseImmediate(right), cont);
   1462           break;
   1463         default:
   1464           VisitCompare(selector, opcode, g.UseRegister(left),
   1465                        g.UseRegister(right), cont);
   1466       }
   1467     }
   1468   } else if (g.CanBeImmediate(left, opcode)) {
   1469     if (!commutative) cont->Commute();
   1470     if (opcode == kMipsTst) {
   1471       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
   1472                    cont);
   1473     } else {
   1474       switch (cont->condition()) {
   1475         case kEqual:
   1476         case kNotEqual:
   1477           if (cont->IsSet()) {
   1478             VisitCompare(selector, opcode, g.UseRegister(right),
   1479                          g.UseImmediate(left), cont);
   1480           } else {
   1481             VisitCompare(selector, opcode, g.UseRegister(right),
   1482                          g.UseRegister(left), cont);
   1483           }
   1484           break;
   1485         case kSignedLessThan:
   1486         case kSignedGreaterThanOrEqual:
   1487         case kUnsignedLessThan:
   1488         case kUnsignedGreaterThanOrEqual:
   1489           VisitCompare(selector, opcode, g.UseRegister(right),
   1490                        g.UseImmediate(left), cont);
   1491           break;
   1492         default:
   1493           VisitCompare(selector, opcode, g.UseRegister(right),
   1494                        g.UseRegister(left), cont);
   1495       }
   1496     }
   1497   } else {
   1498     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
   1499                  cont);
   1500   }
   1501 }
   1502 
   1503 
   1504 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1505                       FlagsContinuation* cont) {
   1506   VisitWordCompare(selector, node, kMipsCmp, cont, false);
   1507 }
   1508 
   1509 // Shared routine for word comparisons against zero.
   1510 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   1511                           Node* value, FlagsContinuation* cont) {
   1512   // Try to combine with comparisons against 0 by simply inverting the branch.
   1513   while (value->opcode() == IrOpcode::kWord32Equal &&
   1514          selector->CanCover(user, value)) {
   1515     Int32BinopMatcher m(value);
   1516     if (!m.right().Is(0)) break;
   1517 
   1518     user = value;
   1519     value = m.left().node();
   1520     cont->Negate();
   1521   }
   1522 
   1523   if (selector->CanCover(user, value)) {
   1524     switch (value->opcode()) {
   1525       case IrOpcode::kWord32Equal:
   1526         cont->OverwriteAndNegateIfEqual(kEqual);
   1527         return VisitWordCompare(selector, value, cont);
   1528       case IrOpcode::kInt32LessThan:
   1529         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1530         return VisitWordCompare(selector, value, cont);
   1531       case IrOpcode::kInt32LessThanOrEqual:
   1532         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1533         return VisitWordCompare(selector, value, cont);
   1534       case IrOpcode::kUint32LessThan:
   1535         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1536         return VisitWordCompare(selector, value, cont);
   1537       case IrOpcode::kUint32LessThanOrEqual:
   1538         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1539         return VisitWordCompare(selector, value, cont);
   1540       case IrOpcode::kFloat32Equal:
   1541         cont->OverwriteAndNegateIfEqual(kEqual);
   1542         return VisitFloat32Compare(selector, value, cont);
   1543       case IrOpcode::kFloat32LessThan:
   1544         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1545         return VisitFloat32Compare(selector, value, cont);
   1546       case IrOpcode::kFloat32LessThanOrEqual:
   1547         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1548         return VisitFloat32Compare(selector, value, cont);
   1549       case IrOpcode::kFloat64Equal:
   1550         cont->OverwriteAndNegateIfEqual(kEqual);
   1551         return VisitFloat64Compare(selector, value, cont);
   1552       case IrOpcode::kFloat64LessThan:
   1553         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1554         return VisitFloat64Compare(selector, value, cont);
   1555       case IrOpcode::kFloat64LessThanOrEqual:
   1556         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1557         return VisitFloat64Compare(selector, value, cont);
   1558       case IrOpcode::kProjection:
   1559         // Check if this is the overflow output projection of an
   1560         // <Operation>WithOverflow node.
   1561         if (ProjectionIndexOf(value->op()) == 1u) {
   1562           // We cannot combine the <Operation>WithOverflow with this branch
   1563           // unless the 0th projection (the use of the actual value of the
   1564           // <Operation> is either nullptr, which means there's no use of the
   1565           // actual value, or was already defined, which means it is scheduled
   1566           // *AFTER* this branch).
   1567           Node* const node = value->InputAt(0);
   1568           Node* const result = NodeProperties::FindProjection(node, 0);
   1569           if (!result || selector->IsDefined(result)) {
   1570             switch (node->opcode()) {
   1571               case IrOpcode::kInt32AddWithOverflow:
   1572                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1573                 return VisitBinop(selector, node, kMipsAddOvf, cont);
   1574               case IrOpcode::kInt32SubWithOverflow:
   1575                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1576                 return VisitBinop(selector, node, kMipsSubOvf, cont);
   1577               case IrOpcode::kInt32MulWithOverflow:
   1578                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1579                 return VisitBinop(selector, node, kMipsMulOvf, cont);
   1580               default:
   1581                 break;
   1582             }
   1583           }
   1584         }
   1585         break;
   1586       case IrOpcode::kWord32And:
   1587         return VisitWordCompare(selector, value, kMipsTst, cont, true);
   1588       default:
   1589         break;
   1590     }
   1591   }
   1592 
   1593   // Continuation could not be combined with a compare, emit compare against 0.
   1594   MipsOperandGenerator g(selector);
   1595   InstructionCode const opcode = cont->Encode(kMipsCmp);
   1596   InstructionOperand const value_operand = g.UseRegister(value);
   1597   if (cont->IsBranch()) {
   1598     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
   1599                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1600   } else if (cont->IsDeoptimize()) {
   1601     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
   1602                              g.TempImmediate(0), cont->kind(), cont->reason(),
   1603                              cont->frame_state());
   1604   } else if (cont->IsSet()) {
   1605     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
   1606                    g.TempImmediate(0));
   1607   } else {
   1608     DCHECK(cont->IsTrap());
   1609     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
   1610                    g.TempImmediate(cont->trap_id()));
   1611   }
   1612 }
   1613 
   1614 }  // namespace
   1615 
   1616 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   1617                                       BasicBlock* fbranch) {
   1618   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   1619   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   1620 }
   1621 
   1622 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   1623   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1624   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1625       kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   1626   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1627 }
   1628 
   1629 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   1630   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1631   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1632       kEqual, p.kind(), p.reason(), node->InputAt(1));
   1633   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1634 }
   1635 
   1636 void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
   1637   FlagsContinuation cont =
   1638       FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
   1639   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1640 }
   1641 
   1642 void InstructionSelector::VisitTrapUnless(Node* node,
   1643                                           Runtime::FunctionId func_id) {
   1644   FlagsContinuation cont =
   1645       FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   1646   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1647 }
   1648 
   1649 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   1650   MipsOperandGenerator g(this);
   1651   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   1652 
   1653   // Emit either ArchTableSwitch or ArchLookupSwitch.
   1654   size_t table_space_cost = 9 + sw.value_range;
   1655   size_t table_time_cost = 3;
   1656   size_t lookup_space_cost = 2 + 2 * sw.case_count;
   1657   size_t lookup_time_cost = sw.case_count;
   1658   if (sw.case_count > 0 &&
   1659       table_space_cost + 3 * table_time_cost <=
   1660           lookup_space_cost + 3 * lookup_time_cost &&
   1661       sw.min_value > std::numeric_limits<int32_t>::min()) {
   1662     InstructionOperand index_operand = value_operand;
   1663     if (sw.min_value) {
   1664       index_operand = g.TempRegister();
   1665       Emit(kMipsSub, index_operand, value_operand,
   1666            g.TempImmediate(sw.min_value));
   1667     }
   1668     // Generate a table lookup.
   1669     return EmitTableSwitch(sw, index_operand);
   1670   }
   1671 
   1672   // Generate a sequence of conditional jumps.
   1673   return EmitLookupSwitch(sw, value_operand);
   1674 }
   1675 
   1676 
   1677 void InstructionSelector::VisitWord32Equal(Node* const node) {
   1678   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   1679   Int32BinopMatcher m(node);
   1680   if (m.right().Is(0)) {
   1681     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   1682   }
   1683   VisitWordCompare(this, node, &cont);
   1684 }
   1685 
   1686 
   1687 void InstructionSelector::VisitInt32LessThan(Node* node) {
   1688   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   1689   VisitWordCompare(this, node, &cont);
   1690 }
   1691 
   1692 
   1693 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   1694   FlagsContinuation cont =
   1695       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   1696   VisitWordCompare(this, node, &cont);
   1697 }
   1698 
   1699 
   1700 void InstructionSelector::VisitUint32LessThan(Node* node) {
   1701   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   1702   VisitWordCompare(this, node, &cont);
   1703 }
   1704 
   1705 
   1706 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   1707   FlagsContinuation cont =
   1708       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   1709   VisitWordCompare(this, node, &cont);
   1710 }
   1711 
   1712 
   1713 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   1714   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1715     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1716     return VisitBinop(this, node, kMipsAddOvf, &cont);
   1717   }
   1718   FlagsContinuation cont;
   1719   VisitBinop(this, node, kMipsAddOvf, &cont);
   1720 }
   1721 
   1722 
   1723 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   1724   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1725     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1726     return VisitBinop(this, node, kMipsSubOvf, &cont);
   1727   }
   1728   FlagsContinuation cont;
   1729   VisitBinop(this, node, kMipsSubOvf, &cont);
   1730 }
   1731 
   1732 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   1733   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1734     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1735     return VisitBinop(this, node, kMipsMulOvf, &cont);
   1736   }
   1737   FlagsContinuation cont;
   1738   VisitBinop(this, node, kMipsMulOvf, &cont);
   1739 }
   1740 
   1741 void InstructionSelector::VisitFloat32Equal(Node* node) {
   1742   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   1743   VisitFloat32Compare(this, node, &cont);
   1744 }
   1745 
   1746 
   1747 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   1748   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   1749   VisitFloat32Compare(this, node, &cont);
   1750 }
   1751 
   1752 
   1753 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   1754   FlagsContinuation cont =
   1755       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   1756   VisitFloat32Compare(this, node, &cont);
   1757 }
   1758 
   1759 
   1760 void InstructionSelector::VisitFloat64Equal(Node* node) {
   1761   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   1762   VisitFloat64Compare(this, node, &cont);
   1763 }
   1764 
   1765 
   1766 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   1767   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   1768   VisitFloat64Compare(this, node, &cont);
   1769 }
   1770 
   1771 
   1772 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   1773   FlagsContinuation cont =
   1774       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   1775   VisitFloat64Compare(this, node, &cont);
   1776 }
   1777 
   1778 
   1779 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   1780   MipsOperandGenerator g(this);
   1781   Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
   1782        g.UseRegister(node->InputAt(0)));
   1783 }
   1784 
   1785 
   1786 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
   1787   MipsOperandGenerator g(this);
   1788   Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
   1789        g.UseRegister(node->InputAt(0)));
   1790 }
   1791 
   1792 
   1793 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   1794   MipsOperandGenerator g(this);
   1795   Node* left = node->InputAt(0);
   1796   Node* right = node->InputAt(1);
   1797   Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
   1798        g.UseRegister(left), g.UseRegister(right));
   1799 }
   1800 
   1801 
   1802 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   1803   MipsOperandGenerator g(this);
   1804   Node* left = node->InputAt(0);
   1805   Node* right = node->InputAt(1);
   1806   Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
   1807        g.UseRegister(left), g.UseRegister(right));
   1808 }
   1809 
   1810 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   1811   MipsOperandGenerator g(this);
   1812   Node* left = node->InputAt(0);
   1813   InstructionOperand temps[] = {g.TempRegister()};
   1814   Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
   1815        arraysize(temps), temps);
   1816 }
   1817 
   1818 void InstructionSelector::VisitAtomicLoad(Node* node) {
   1819   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   1820   MipsOperandGenerator g(this);
   1821   Node* base = node->InputAt(0);
   1822   Node* index = node->InputAt(1);
   1823   ArchOpcode opcode = kArchNop;
   1824   switch (load_rep.representation()) {
   1825     case MachineRepresentation::kWord8:
   1826       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
   1827       break;
   1828     case MachineRepresentation::kWord16:
   1829       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
   1830       break;
   1831     case MachineRepresentation::kWord32:
   1832       opcode = kAtomicLoadWord32;
   1833       break;
   1834     default:
   1835       UNREACHABLE();
   1836       return;
   1837   }
   1838 
   1839   if (g.CanBeImmediate(index, opcode)) {
   1840     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1841          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
   1842   } else {
   1843     InstructionOperand addr_reg = g.TempRegister();
   1844     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
   1845          g.UseRegister(index), g.UseRegister(base));
   1846     // Emit desired load opcode, using temp addr_reg.
   1847     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1848          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
   1849   }
   1850 }
   1851 
   1852 void InstructionSelector::VisitAtomicStore(Node* node) {
   1853   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   1854   MipsOperandGenerator g(this);
   1855   Node* base = node->InputAt(0);
   1856   Node* index = node->InputAt(1);
   1857   Node* value = node->InputAt(2);
   1858   ArchOpcode opcode = kArchNop;
   1859   switch (rep) {
   1860     case MachineRepresentation::kWord8:
   1861       opcode = kAtomicStoreWord8;
   1862       break;
   1863     case MachineRepresentation::kWord16:
   1864       opcode = kAtomicStoreWord16;
   1865       break;
   1866     case MachineRepresentation::kWord32:
   1867       opcode = kAtomicStoreWord32;
   1868       break;
   1869     default:
   1870       UNREACHABLE();
   1871       return;
   1872   }
   1873 
   1874   if (g.CanBeImmediate(index, opcode)) {
   1875     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1876          g.UseRegister(base), g.UseImmediate(index),
   1877          g.UseRegisterOrImmediateZero(value));
   1878   } else {
   1879     InstructionOperand addr_reg = g.TempRegister();
   1880     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
   1881          g.UseRegister(index), g.UseRegister(base));
   1882     // Emit desired store opcode, using temp addr_reg.
   1883     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1884          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   1885   }
   1886 }
   1887 
   1888 // static
   1889 MachineOperatorBuilder::Flags
   1890 InstructionSelector::SupportedMachineOperatorFlags() {
   1891   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
   1892   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
   1893       IsFp64Mode()) {
   1894     flags |= MachineOperatorBuilder::kFloat64RoundDown |
   1895              MachineOperatorBuilder::kFloat64RoundUp |
   1896              MachineOperatorBuilder::kFloat64RoundTruncate |
   1897              MachineOperatorBuilder::kFloat64RoundTiesEven;
   1898   }
   1899 
   1900   return flags | MachineOperatorBuilder::kWord32Ctz |
   1901          MachineOperatorBuilder::kWord32Popcnt |
   1902          MachineOperatorBuilder::kInt32DivIsSafe |
   1903          MachineOperatorBuilder::kUint32DivIsSafe |
   1904          MachineOperatorBuilder::kWord32ShiftIsSafe |
   1905          MachineOperatorBuilder::kFloat32RoundDown |
   1906          MachineOperatorBuilder::kFloat32RoundUp |
   1907          MachineOperatorBuilder::kFloat32RoundTruncate |
   1908          MachineOperatorBuilder::kFloat32RoundTiesEven |
   1909          MachineOperatorBuilder::kWord32ReverseBytes |
   1910          MachineOperatorBuilder::kWord64ReverseBytes;
   1911 }
   1912 
   1913 // static
   1914 MachineOperatorBuilder::AlignmentRequirements
   1915 InstructionSelector::AlignmentRequirements() {
   1916   if (IsMipsArchVariant(kMips32r6)) {
   1917     return MachineOperatorBuilder::AlignmentRequirements::
   1918         FullUnalignedAccessSupport();
   1919   } else {
   1920     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
   1921            IsMipsArchVariant(kMips32r2));
   1922     return MachineOperatorBuilder::AlignmentRequirements::
   1923         NoUnalignedAccessSupport();
   1924   }
   1925 }
   1926 
   1927 }  // namespace compiler
   1928 }  // namespace internal
   1929 }  // namespace v8
   1930