Home | History | Annotate | Download | only in mips64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/base/adapters.h"
      6 #include "src/base/bits.h"
      7 #include "src/compiler/instruction-selector-impl.h"
      8 #include "src/compiler/node-matchers.h"
      9 #include "src/compiler/node-properties.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 namespace compiler {
     14 
     15 #define TRACE_UNIMPL() \
     16   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
     17 
     18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
     19 
     20 
     21 // Adds Mips-specific methods for generating InstructionOperands.
     22 class Mips64OperandGenerator final : public OperandGenerator {
     23  public:
     24   explicit Mips64OperandGenerator(InstructionSelector* selector)
     25       : OperandGenerator(selector) {}
     26 
     27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
     28     if (CanBeImmediate(node, opcode)) {
     29       return UseImmediate(node);
     30     }
     31     return UseRegister(node);
     32   }
     33 
     34   // Use the zero register if the node has the immediate value zero, otherwise
     35   // assign a register.
     36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
     37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
     38         (IsFloatConstant(node) &&
     39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
     40       return UseImmediate(node);
     41     }
     42     return UseRegister(node);
     43   }
     44 
     45   bool IsIntegerConstant(Node* node) {
     46     return (node->opcode() == IrOpcode::kInt32Constant) ||
     47            (node->opcode() == IrOpcode::kInt64Constant);
     48   }
     49 
     50   int64_t GetIntegerConstantValue(Node* node) {
     51     if (node->opcode() == IrOpcode::kInt32Constant) {
     52       return OpParameter<int32_t>(node);
     53     }
     54     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
     55     return OpParameter<int64_t>(node);
     56   }
     57 
     58   bool IsFloatConstant(Node* node) {
     59     return (node->opcode() == IrOpcode::kFloat32Constant) ||
     60            (node->opcode() == IrOpcode::kFloat64Constant);
     61   }
     62 
     63   double GetFloatConstantValue(Node* node) {
     64     if (node->opcode() == IrOpcode::kFloat32Constant) {
     65       return OpParameter<float>(node);
     66     }
     67     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
     68     return OpParameter<double>(node);
     69   }
     70 
     71   bool CanBeImmediate(Node* node, InstructionCode mode) {
     72     return IsIntegerConstant(node) &&
     73            CanBeImmediate(GetIntegerConstantValue(node), mode);
     74   }
     75 
     76   bool CanBeImmediate(int64_t value, InstructionCode opcode) {
     77     switch (ArchOpcodeField::decode(opcode)) {
     78       case kMips64Shl:
     79       case kMips64Sar:
     80       case kMips64Shr:
     81         return is_uint5(value);
     82       case kMips64Dshl:
     83       case kMips64Dsar:
     84       case kMips64Dshr:
     85         return is_uint6(value);
     86       case kMips64Add:
     87       case kMips64And32:
     88       case kMips64And:
     89       case kMips64Dadd:
     90       case kMips64Or32:
     91       case kMips64Or:
     92       case kMips64Tst:
     93       case kMips64Xor:
     94         return is_uint16(value);
     95       case kMips64Lb:
     96       case kMips64Lbu:
     97       case kMips64Sb:
     98       case kMips64Lh:
     99       case kMips64Lhu:
    100       case kMips64Sh:
    101       case kMips64Lw:
    102       case kMips64Sw:
    103       case kMips64Ld:
    104       case kMips64Sd:
    105       case kMips64Lwc1:
    106       case kMips64Swc1:
    107       case kMips64Ldc1:
    108       case kMips64Sdc1:
    109       case kCheckedLoadInt8:
    110       case kCheckedLoadUint8:
    111       case kCheckedLoadInt16:
    112       case kCheckedLoadUint16:
    113       case kCheckedLoadWord32:
    114       case kCheckedLoadWord64:
    115       case kCheckedStoreWord8:
    116       case kCheckedStoreWord16:
    117       case kCheckedStoreWord32:
    118       case kCheckedStoreWord64:
    119       case kCheckedLoadFloat32:
    120       case kCheckedLoadFloat64:
    121       case kCheckedStoreFloat32:
    122       case kCheckedStoreFloat64:
    123         return is_int32(value);
    124       default:
    125         return is_int16(value);
    126     }
    127   }
    128 
    129  private:
    130   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
    131     TRACE_UNIMPL();
    132     return false;
    133   }
    134 };
    135 
    136 
    137 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
    138                     Node* node) {
    139   Mips64OperandGenerator g(selector);
    140   selector->Emit(opcode, g.DefineAsRegister(node),
    141                  g.UseRegister(node->InputAt(0)));
    142 }
    143 
    144 
    145 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
    146                      Node* node) {
    147   Mips64OperandGenerator g(selector);
    148   selector->Emit(opcode, g.DefineAsRegister(node),
    149                  g.UseRegister(node->InputAt(0)),
    150                  g.UseRegister(node->InputAt(1)));
    151 }
    152 
    153 
    154 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
    155                      Node* node) {
    156   Mips64OperandGenerator g(selector);
    157   selector->Emit(opcode, g.DefineAsRegister(node),
    158                  g.UseRegister(node->InputAt(0)),
    159                  g.UseOperand(node->InputAt(1), opcode));
    160 }
    161 
    162 struct ExtendingLoadMatcher {
    163   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
    164       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
    165     Initialize(node);
    166   }
    167 
    168   bool Matches() const { return matches_; }
    169 
    170   Node* base() const {
    171     DCHECK(Matches());
    172     return base_;
    173   }
    174   int64_t immediate() const {
    175     DCHECK(Matches());
    176     return immediate_;
    177   }
    178   ArchOpcode opcode() const {
    179     DCHECK(Matches());
    180     return opcode_;
    181   }
    182 
    183  private:
    184   bool matches_;
    185   InstructionSelector* selector_;
    186   Node* base_;
    187   int64_t immediate_;
    188   ArchOpcode opcode_;
    189 
    190   void Initialize(Node* node) {
    191     Int64BinopMatcher m(node);
    192     // When loading a 64-bit value and shifting by 32, we should
    193     // just load and sign-extend the interesting 4 bytes instead.
    194     // This happens, for example, when we're loading and untagging SMIs.
    195     DCHECK(m.IsWord64Sar());
    196     if (m.left().IsLoad() && m.right().Is(32) &&
    197         selector_->CanCover(m.node(), m.left().node())) {
    198       MachineRepresentation rep =
    199           LoadRepresentationOf(m.left().node()->op()).representation();
    200       DCHECK(ElementSizeLog2Of(rep) == 3);
    201       if (rep != MachineRepresentation::kTaggedSigned &&
    202           rep != MachineRepresentation::kTaggedPointer &&
    203           rep != MachineRepresentation::kTagged &&
    204           rep != MachineRepresentation::kWord64) {
    205         return;
    206       }
    207 
    208       Mips64OperandGenerator g(selector_);
    209       Node* load = m.left().node();
    210       Node* offset = load->InputAt(1);
    211       base_ = load->InputAt(0);
    212       opcode_ = kMips64Lw;
    213       if (g.CanBeImmediate(offset, opcode_)) {
    214 #if defined(V8_TARGET_LITTLE_ENDIAN)
    215         immediate_ = g.GetIntegerConstantValue(offset) + 4;
    216 #elif defined(V8_TARGET_BIG_ENDIAN)
    217         immediate_ = g.GetIntegerConstantValue(offset);
    218 #endif
    219         matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
    220       }
    221     }
    222   }
    223 };
    224 
    225 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
    226                           Node* output_node) {
    227   ExtendingLoadMatcher m(node, selector);
    228   Mips64OperandGenerator g(selector);
    229   if (m.Matches()) {
    230     InstructionOperand inputs[2];
    231     inputs[0] = g.UseRegister(m.base());
    232     InstructionCode opcode =
    233         m.opcode() | AddressingModeField::encode(kMode_MRI);
    234     DCHECK(is_int32(m.immediate()));
    235     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
    236     InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
    237     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
    238                    inputs);
    239     return true;
    240   }
    241   return false;
    242 }
    243 
    244 bool TryMatchImmediate(InstructionSelector* selector,
    245                        InstructionCode* opcode_return, Node* node,
    246                        size_t* input_count_return, InstructionOperand* inputs) {
    247   Mips64OperandGenerator g(selector);
    248   if (g.CanBeImmediate(node, *opcode_return)) {
    249     *opcode_return |= AddressingModeField::encode(kMode_MRI);
    250     inputs[0] = g.UseImmediate(node);
    251     *input_count_return = 1;
    252     return true;
    253   }
    254   return false;
    255 }
    256 
    257 static void VisitBinop(InstructionSelector* selector, Node* node,
    258                        InstructionCode opcode, bool has_reverse_opcode,
    259                        InstructionCode reverse_opcode,
    260                        FlagsContinuation* cont) {
    261   Mips64OperandGenerator g(selector);
    262   Int32BinopMatcher m(node);
    263   InstructionOperand inputs[4];
    264   size_t input_count = 0;
    265   InstructionOperand outputs[2];
    266   size_t output_count = 0;
    267 
    268   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
    269                         &inputs[1])) {
    270     inputs[0] = g.UseRegister(m.left().node());
    271     input_count++;
    272   } else if (has_reverse_opcode &&
    273              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
    274                                &input_count, &inputs[1])) {
    275     inputs[0] = g.UseRegister(m.right().node());
    276     opcode = reverse_opcode;
    277     input_count++;
    278   } else {
    279     inputs[input_count++] = g.UseRegister(m.left().node());
    280     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
    281   }
    282 
    283   if (cont->IsBranch()) {
    284     inputs[input_count++] = g.Label(cont->true_block());
    285     inputs[input_count++] = g.Label(cont->false_block());
    286   } else if (cont->IsTrap()) {
    287     inputs[input_count++] = g.TempImmediate(cont->trap_id());
    288   }
    289 
    290   if (cont->IsDeoptimize()) {
    291     // If we can deoptimize as a result of the binop, we need to make sure that
    292     // the deopt inputs are not overwritten by the binop result. One way
    293     // to achieve that is to declare the output register as same-as-first.
    294     outputs[output_count++] = g.DefineSameAsFirst(node);
    295   } else {
    296     outputs[output_count++] = g.DefineAsRegister(node);
    297   }
    298   if (cont->IsSet()) {
    299     outputs[output_count++] = g.DefineAsRegister(cont->result());
    300   }
    301 
    302   DCHECK_NE(0u, input_count);
    303   DCHECK_NE(0u, output_count);
    304   DCHECK_GE(arraysize(inputs), input_count);
    305   DCHECK_GE(arraysize(outputs), output_count);
    306 
    307   opcode = cont->Encode(opcode);
    308   if (cont->IsDeoptimize()) {
    309     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    310                              cont->kind(), cont->reason(), cont->frame_state());
    311   } else {
    312     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    313   }
    314 }
    315 
    316 static void VisitBinop(InstructionSelector* selector, Node* node,
    317                        InstructionCode opcode, bool has_reverse_opcode,
    318                        InstructionCode reverse_opcode) {
    319   FlagsContinuation cont;
    320   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
    321 }
    322 
    323 static void VisitBinop(InstructionSelector* selector, Node* node,
    324                        InstructionCode opcode, FlagsContinuation* cont) {
    325   VisitBinop(selector, node, opcode, false, kArchNop, cont);
    326 }
    327 
    328 static void VisitBinop(InstructionSelector* selector, Node* node,
    329                        InstructionCode opcode) {
    330   VisitBinop(selector, node, opcode, false, kArchNop);
    331 }
    332 
    333 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
    334               Node* output = nullptr) {
    335   Mips64OperandGenerator g(selector);
    336   Node* base = node->InputAt(0);
    337   Node* index = node->InputAt(1);
    338 
    339   if (g.CanBeImmediate(index, opcode)) {
    340     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
    341                    g.DefineAsRegister(output == nullptr ? node : output),
    342                    g.UseRegister(base), g.UseImmediate(index));
    343   } else {
    344     InstructionOperand addr_reg = g.TempRegister();
    345     selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
    346                    addr_reg, g.UseRegister(index), g.UseRegister(base));
    347     // Emit desired load opcode, using temp addr_reg.
    348     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
    349                    g.DefineAsRegister(output == nullptr ? node : output),
    350                    addr_reg, g.TempImmediate(0));
    351   }
    352 }
    353 
    354 void InstructionSelector::VisitLoad(Node* node) {
    355   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    356 
    357   ArchOpcode opcode = kArchNop;
    358   switch (load_rep.representation()) {
    359     case MachineRepresentation::kFloat32:
    360       opcode = kMips64Lwc1;
    361       break;
    362     case MachineRepresentation::kFloat64:
    363       opcode = kMips64Ldc1;
    364       break;
    365     case MachineRepresentation::kBit:  // Fall through.
    366     case MachineRepresentation::kWord8:
    367       opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
    368       break;
    369     case MachineRepresentation::kWord16:
    370       opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
    371       break;
    372     case MachineRepresentation::kWord32:
    373       opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
    374       break;
    375     case MachineRepresentation::kTaggedSigned:   // Fall through.
    376     case MachineRepresentation::kTaggedPointer:  // Fall through.
    377     case MachineRepresentation::kTagged:  // Fall through.
    378     case MachineRepresentation::kWord64:
    379       opcode = kMips64Ld;
    380       break;
    381     case MachineRepresentation::kSimd128:  // Fall through.
    382     case MachineRepresentation::kSimd1x4:  // Fall through.
    383     case MachineRepresentation::kSimd1x8:  // Fall through.
    384     case MachineRepresentation::kSimd1x16:  // Fall through.
    385     case MachineRepresentation::kNone:
    386       UNREACHABLE();
    387       return;
    388   }
    389 
    390   EmitLoad(this, node, opcode);
    391 }
    392 
    393 void InstructionSelector::VisitProtectedLoad(Node* node) {
    394   // TODO(eholk)
    395   UNIMPLEMENTED();
    396 }
    397 
    398 void InstructionSelector::VisitStore(Node* node) {
    399   Mips64OperandGenerator g(this);
    400   Node* base = node->InputAt(0);
    401   Node* index = node->InputAt(1);
    402   Node* value = node->InputAt(2);
    403 
    404   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    405   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    406   MachineRepresentation rep = store_rep.representation();
    407 
    408   // TODO(mips): I guess this could be done in a better way.
    409   if (write_barrier_kind != kNoWriteBarrier) {
    410     DCHECK(CanBeTaggedPointer(rep));
    411     InstructionOperand inputs[3];
    412     size_t input_count = 0;
    413     inputs[input_count++] = g.UseUniqueRegister(base);
    414     inputs[input_count++] = g.UseUniqueRegister(index);
    415     inputs[input_count++] = g.UseUniqueRegister(value);
    416     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    417     switch (write_barrier_kind) {
    418       case kNoWriteBarrier:
    419         UNREACHABLE();
    420         break;
    421       case kMapWriteBarrier:
    422         record_write_mode = RecordWriteMode::kValueIsMap;
    423         break;
    424       case kPointerWriteBarrier:
    425         record_write_mode = RecordWriteMode::kValueIsPointer;
    426         break;
    427       case kFullWriteBarrier:
    428         record_write_mode = RecordWriteMode::kValueIsAny;
    429         break;
    430     }
    431     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    432     size_t const temp_count = arraysize(temps);
    433     InstructionCode code = kArchStoreWithWriteBarrier;
    434     code |= MiscField::encode(static_cast<int>(record_write_mode));
    435     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    436   } else {
    437     ArchOpcode opcode = kArchNop;
    438     switch (rep) {
    439       case MachineRepresentation::kFloat32:
    440         opcode = kMips64Swc1;
    441         break;
    442       case MachineRepresentation::kFloat64:
    443         opcode = kMips64Sdc1;
    444         break;
    445       case MachineRepresentation::kBit:  // Fall through.
    446       case MachineRepresentation::kWord8:
    447         opcode = kMips64Sb;
    448         break;
    449       case MachineRepresentation::kWord16:
    450         opcode = kMips64Sh;
    451         break;
    452       case MachineRepresentation::kWord32:
    453         opcode = kMips64Sw;
    454         break;
    455       case MachineRepresentation::kTaggedSigned:   // Fall through.
    456       case MachineRepresentation::kTaggedPointer:  // Fall through.
    457       case MachineRepresentation::kTagged:  // Fall through.
    458       case MachineRepresentation::kWord64:
    459         opcode = kMips64Sd;
    460         break;
    461       case MachineRepresentation::kSimd128:  // Fall through.
    462       case MachineRepresentation::kSimd1x4:  // Fall through.
    463       case MachineRepresentation::kSimd1x8:  // Fall through.
    464       case MachineRepresentation::kSimd1x16:  // Fall through.
    465       case MachineRepresentation::kNone:
    466         UNREACHABLE();
    467         return;
    468     }
    469 
    470     if (g.CanBeImmediate(index, opcode)) {
    471       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
    472            g.UseRegister(base), g.UseImmediate(index),
    473            g.UseRegisterOrImmediateZero(value));
    474     } else {
    475       InstructionOperand addr_reg = g.TempRegister();
    476       Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
    477            g.UseRegister(index), g.UseRegister(base));
    478       // Emit desired store opcode, using temp addr_reg.
    479       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
    480            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
    481     }
    482   }
    483 }
    484 
    485 void InstructionSelector::VisitProtectedStore(Node* node) {
    486   // TODO(eholk)
    487   UNIMPLEMENTED();
    488 }
    489 
    490 void InstructionSelector::VisitWord32And(Node* node) {
    491   Mips64OperandGenerator g(this);
    492   Int32BinopMatcher m(node);
    493   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
    494       m.right().HasValue()) {
    495     uint32_t mask = m.right().Value();
    496     uint32_t mask_width = base::bits::CountPopulation32(mask);
    497     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
    498     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
    499       // The mask must be contiguous, and occupy the least-significant bits.
    500       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
    501 
    502       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
    503       // significant bits.
    504       Int32BinopMatcher mleft(m.left().node());
    505       if (mleft.right().HasValue()) {
    506         // Any shift value can match; int32 shifts use `value % 32`.
    507         uint32_t lsb = mleft.right().Value() & 0x1f;
    508 
    509         // Ext cannot extract bits past the register size, however since
    510         // shifting the original value would have introduced some zeros we can
    511         // still use Ext with a smaller mask and the remaining bits will be
    512         // zeros.
    513         if (lsb + mask_width > 32) mask_width = 32 - lsb;
    514 
    515         Emit(kMips64Ext, g.DefineAsRegister(node),
    516              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    517              g.TempImmediate(mask_width));
    518         return;
    519       }
    520       // Other cases fall through to the normal And operation.
    521     }
    522   }
    523   if (m.right().HasValue()) {
    524     uint32_t mask = m.right().Value();
    525     uint32_t shift = base::bits::CountPopulation32(~mask);
    526     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
    527     if (shift != 0 && shift != 32 && msb + shift == 32) {
    528       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
    529       // and remove constant loading of inverted mask.
    530       Emit(kMips64Ins, g.DefineSameAsFirst(node),
    531            g.UseRegister(m.left().node()), g.TempImmediate(0),
    532            g.TempImmediate(shift));
    533       return;
    534     }
    535   }
    536   VisitBinop(this, node, kMips64And32, true, kMips64And32);
    537 }
    538 
    539 
    540 void InstructionSelector::VisitWord64And(Node* node) {
    541   Mips64OperandGenerator g(this);
    542   Int64BinopMatcher m(node);
    543   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
    544       m.right().HasValue()) {
    545     uint64_t mask = m.right().Value();
    546     uint32_t mask_width = base::bits::CountPopulation64(mask);
    547     uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
    548     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
    549       // The mask must be contiguous, and occupy the least-significant bits.
    550       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
    551 
    552       // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
    553       // significant bits.
    554       Int64BinopMatcher mleft(m.left().node());
    555       if (mleft.right().HasValue()) {
    556         // Any shift value can match; int64 shifts use `value % 64`.
    557         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
    558 
    559         // Dext cannot extract bits past the register size, however since
    560         // shifting the original value would have introduced some zeros we can
    561         // still use Dext with a smaller mask and the remaining bits will be
    562         // zeros.
    563         if (lsb + mask_width > 64) mask_width = 64 - lsb;
    564 
    565         if (lsb == 0 && mask_width == 64) {
    566           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
    567         } else {
    568           Emit(kMips64Dext, g.DefineAsRegister(node),
    569                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    570                g.TempImmediate(static_cast<int32_t>(mask_width)));
    571         }
    572         return;
    573       }
    574       // Other cases fall through to the normal And operation.
    575     }
    576   }
    577   if (m.right().HasValue()) {
    578     uint64_t mask = m.right().Value();
    579     uint32_t shift = base::bits::CountPopulation64(~mask);
    580     uint32_t msb = base::bits::CountLeadingZeros64(~mask);
    581     if (shift != 0 && shift < 32 && msb + shift == 64) {
    582       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
    583       // and remove constant loading of inverted mask. Dins cannot insert bits
    584       // past word size, so shifts smaller than 32 are covered.
    585       Emit(kMips64Dins, g.DefineSameAsFirst(node),
    586            g.UseRegister(m.left().node()), g.TempImmediate(0),
    587            g.TempImmediate(shift));
    588       return;
    589     }
    590   }
    591   VisitBinop(this, node, kMips64And, true, kMips64And);
    592 }
    593 
    594 
    595 void InstructionSelector::VisitWord32Or(Node* node) {
    596   VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
    597 }
    598 
    599 
    600 void InstructionSelector::VisitWord64Or(Node* node) {
    601   VisitBinop(this, node, kMips64Or, true, kMips64Or);
    602 }
    603 
    604 
    605 void InstructionSelector::VisitWord32Xor(Node* node) {
    606   Int32BinopMatcher m(node);
    607   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
    608       m.right().Is(-1)) {
    609     Int32BinopMatcher mleft(m.left().node());
    610     if (!mleft.right().HasValue()) {
    611       Mips64OperandGenerator g(this);
    612       Emit(kMips64Nor32, g.DefineAsRegister(node),
    613            g.UseRegister(mleft.left().node()),
    614            g.UseRegister(mleft.right().node()));
    615       return;
    616     }
    617   }
    618   if (m.right().Is(-1)) {
    619     // Use Nor for bit negation and eliminate constant loading for xori.
    620     Mips64OperandGenerator g(this);
    621     Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    622          g.TempImmediate(0));
    623     return;
    624   }
    625   VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
    626 }
    627 
    628 
    629 void InstructionSelector::VisitWord64Xor(Node* node) {
    630   Int64BinopMatcher m(node);
    631   if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
    632       m.right().Is(-1)) {
    633     Int64BinopMatcher mleft(m.left().node());
    634     if (!mleft.right().HasValue()) {
    635       Mips64OperandGenerator g(this);
    636       Emit(kMips64Nor, g.DefineAsRegister(node),
    637            g.UseRegister(mleft.left().node()),
    638            g.UseRegister(mleft.right().node()));
    639       return;
    640     }
    641   }
    642   if (m.right().Is(-1)) {
    643     // Use Nor for bit negation and eliminate constant loading for xori.
    644     Mips64OperandGenerator g(this);
    645     Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    646          g.TempImmediate(0));
    647     return;
    648   }
    649   VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
    650 }
    651 
    652 
    653 void InstructionSelector::VisitWord32Shl(Node* node) {
    654   Int32BinopMatcher m(node);
    655   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
    656       m.right().IsInRange(1, 31)) {
    657     Mips64OperandGenerator g(this);
    658     Int32BinopMatcher mleft(m.left().node());
    659     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
    660     // contiguous, and the shift immediate non-zero.
    661     if (mleft.right().HasValue()) {
    662       uint32_t mask = mleft.right().Value();
    663       uint32_t mask_width = base::bits::CountPopulation32(mask);
    664       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
    665       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
    666         uint32_t shift = m.right().Value();
    667         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
    668         DCHECK_NE(0u, shift);
    669         if ((shift + mask_width) >= 32) {
    670           // If the mask is contiguous and reaches or extends beyond the top
    671           // bit, only the shift is needed.
    672           Emit(kMips64Shl, g.DefineAsRegister(node),
    673                g.UseRegister(mleft.left().node()),
    674                g.UseImmediate(m.right().node()));
    675           return;
    676         }
    677       }
    678     }
    679   }
    680   VisitRRO(this, kMips64Shl, node);
    681 }
    682 
    683 
    684 void InstructionSelector::VisitWord32Shr(Node* node) {
    685   Int32BinopMatcher m(node);
    686   if (m.left().IsWord32And() && m.right().HasValue()) {
    687     uint32_t lsb = m.right().Value() & 0x1f;
    688     Int32BinopMatcher mleft(m.left().node());
    689     if (mleft.right().HasValue()) {
    690       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
    691       // shifted into the least-significant bits.
    692       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
    693       unsigned mask_width = base::bits::CountPopulation32(mask);
    694       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
    695       if ((mask_msb + mask_width + lsb) == 32) {
    696         Mips64OperandGenerator g(this);
    697         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
    698         Emit(kMips64Ext, g.DefineAsRegister(node),
    699              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    700              g.TempImmediate(mask_width));
    701         return;
    702       }
    703     }
    704   }
    705   VisitRRO(this, kMips64Shr, node);
    706 }
    707 
    708 
    709 void InstructionSelector::VisitWord32Sar(Node* node) {
    710   Int32BinopMatcher m(node);
    711   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
    712     Int32BinopMatcher mleft(m.left().node());
    713     if (m.right().HasValue() && mleft.right().HasValue()) {
    714       Mips64OperandGenerator g(this);
    715       uint32_t sar = m.right().Value();
    716       uint32_t shl = mleft.right().Value();
    717       if ((sar == shl) && (sar == 16)) {
    718         Emit(kMips64Seh, g.DefineAsRegister(node),
    719              g.UseRegister(mleft.left().node()));
    720         return;
    721       } else if ((sar == shl) && (sar == 24)) {
    722         Emit(kMips64Seb, g.DefineAsRegister(node),
    723              g.UseRegister(mleft.left().node()));
    724         return;
    725       } else if ((sar == shl) && (sar == 32)) {
    726         Emit(kMips64Shl, g.DefineAsRegister(node),
    727              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
    728         return;
    729       }
    730     }
    731   }
    732   VisitRRO(this, kMips64Sar, node);
    733 }
    734 
    735 
    736 void InstructionSelector::VisitWord64Shl(Node* node) {
    737   Mips64OperandGenerator g(this);
    738   Int64BinopMatcher m(node);
    739   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
    740       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
    741     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
    742     // 32 bits anyway.
    743     Emit(kMips64Dshl, g.DefineSameAsFirst(node),
    744          g.UseRegister(m.left().node()->InputAt(0)),
    745          g.UseImmediate(m.right().node()));
    746     return;
    747   }
    748   if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
    749       m.right().IsInRange(1, 63)) {
    750     // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
    751     // contiguous, and the shift immediate non-zero.
    752     Int64BinopMatcher mleft(m.left().node());
    753     if (mleft.right().HasValue()) {
    754       uint64_t mask = mleft.right().Value();
    755       uint32_t mask_width = base::bits::CountPopulation64(mask);
    756       uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
    757       if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
    758         uint64_t shift = m.right().Value();
    759         DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
    760         DCHECK_NE(0u, shift);
    761 
    762         if ((shift + mask_width) >= 64) {
    763           // If the mask is contiguous and reaches or extends beyond the top
    764           // bit, only the shift is needed.
    765           Emit(kMips64Dshl, g.DefineAsRegister(node),
    766                g.UseRegister(mleft.left().node()),
    767                g.UseImmediate(m.right().node()));
    768           return;
    769         }
    770       }
    771     }
    772   }
    773   VisitRRO(this, kMips64Dshl, node);
    774 }
    775 
    776 
    777 void InstructionSelector::VisitWord64Shr(Node* node) {
    778   Int64BinopMatcher m(node);
    779   if (m.left().IsWord64And() && m.right().HasValue()) {
    780     uint32_t lsb = m.right().Value() & 0x3f;
    781     Int64BinopMatcher mleft(m.left().node());
    782     if (mleft.right().HasValue()) {
    783       // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
    784       // shifted into the least-significant bits.
    785       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
    786       unsigned mask_width = base::bits::CountPopulation64(mask);
    787       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
    788       if ((mask_msb + mask_width + lsb) == 64) {
    789         Mips64OperandGenerator g(this);
    790         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
    791         Emit(kMips64Dext, g.DefineAsRegister(node),
    792              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
    793              g.TempImmediate(mask_width));
    794         return;
    795       }
    796     }
    797   }
    798   VisitRRO(this, kMips64Dshr, node);
    799 }
    800 
    801 
    802 void InstructionSelector::VisitWord64Sar(Node* node) {
    803   if (TryEmitExtendingLoad(this, node, node)) return;
    804   VisitRRO(this, kMips64Dsar, node);
    805 }
    806 
    807 
    808 void InstructionSelector::VisitWord32Ror(Node* node) {
    809   VisitRRO(this, kMips64Ror, node);
    810 }
    811 
    812 
    813 void InstructionSelector::VisitWord32Clz(Node* node) {
    814   VisitRR(this, kMips64Clz, node);
    815 }
    816 
    817 
    818 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
    819 
    820 
    821 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
    822 
    823 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
    824   Mips64OperandGenerator g(this);
    825   Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
    826        g.UseRegister(node->InputAt(0)));
    827 }
    828 
    829 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
    830   Mips64OperandGenerator g(this);
    831   Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
    832        g.UseRegister(node->InputAt(0)));
    833 }
    834 
    835 void InstructionSelector::VisitWord32Ctz(Node* node) {
    836   Mips64OperandGenerator g(this);
    837   Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
    838 }
    839 
    840 
    841 void InstructionSelector::VisitWord64Ctz(Node* node) {
    842   Mips64OperandGenerator g(this);
    843   Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
    844 }
    845 
    846 
    847 void InstructionSelector::VisitWord32Popcnt(Node* node) {
    848   Mips64OperandGenerator g(this);
    849   Emit(kMips64Popcnt, g.DefineAsRegister(node),
    850        g.UseRegister(node->InputAt(0)));
    851 }
    852 
    853 
    854 void InstructionSelector::VisitWord64Popcnt(Node* node) {
    855   Mips64OperandGenerator g(this);
    856   Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
    857        g.UseRegister(node->InputAt(0)));
    858 }
    859 
    860 
    861 void InstructionSelector::VisitWord64Ror(Node* node) {
    862   VisitRRO(this, kMips64Dror, node);
    863 }
    864 
    865 
    866 void InstructionSelector::VisitWord64Clz(Node* node) {
    867   VisitRR(this, kMips64Dclz, node);
    868 }
    869 
    870 
    871 void InstructionSelector::VisitInt32Add(Node* node) {
    872   Mips64OperandGenerator g(this);
    873   Int32BinopMatcher m(node);
    874 
    875   // Select Lsa for (left + (left_of_right << imm)).
    876   if (m.right().opcode() == IrOpcode::kWord32Shl &&
    877       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
    878     Int32BinopMatcher mright(m.right().node());
    879     if (mright.right().HasValue() && !m.left().HasValue()) {
    880       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
    881       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    882            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
    883       return;
    884     }
    885   }
    886 
    887   // Select Lsa for ((left_of_left << imm) + right).
    888   if (m.left().opcode() == IrOpcode::kWord32Shl &&
    889       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
    890     Int32BinopMatcher mleft(m.left().node());
    891     if (mleft.right().HasValue() && !m.right().HasValue()) {
    892       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
    893       Emit(kMips64Lsa, g.DefineAsRegister(node),
    894            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    895            g.TempImmediate(shift_value));
    896       return;
    897     }
    898   }
    899   VisitBinop(this, node, kMips64Add, true, kMips64Add);
    900 }
    901 
    902 
    903 void InstructionSelector::VisitInt64Add(Node* node) {
    904   Mips64OperandGenerator g(this);
    905   Int64BinopMatcher m(node);
    906 
    907   // Select Dlsa for (left + (left_of_right << imm)).
    908   if (m.right().opcode() == IrOpcode::kWord64Shl &&
    909       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
    910     Int64BinopMatcher mright(m.right().node());
    911     if (mright.right().HasValue() && !m.left().HasValue()) {
    912       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
    913       Emit(kMips64Dlsa, g.DefineAsRegister(node),
    914            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
    915            g.TempImmediate(shift_value));
    916       return;
    917     }
    918   }
    919 
    920   // Select Dlsa for ((left_of_left << imm) + right).
    921   if (m.left().opcode() == IrOpcode::kWord64Shl &&
    922       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
    923     Int64BinopMatcher mleft(m.left().node());
    924     if (mleft.right().HasValue() && !m.right().HasValue()) {
    925       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
    926       Emit(kMips64Dlsa, g.DefineAsRegister(node),
    927            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
    928            g.TempImmediate(shift_value));
    929       return;
    930     }
    931   }
    932 
    933   VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
    934 }
    935 
    936 
    937 void InstructionSelector::VisitInt32Sub(Node* node) {
    938   VisitBinop(this, node, kMips64Sub);
    939 }
    940 
    941 
    942 void InstructionSelector::VisitInt64Sub(Node* node) {
    943   VisitBinop(this, node, kMips64Dsub);
    944 }
    945 
    946 
    947 void InstructionSelector::VisitInt32Mul(Node* node) {
    948   Mips64OperandGenerator g(this);
    949   Int32BinopMatcher m(node);
    950   if (m.right().HasValue() && m.right().Value() > 0) {
    951     int32_t value = m.right().Value();
    952     if (base::bits::IsPowerOfTwo32(value)) {
    953       Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
    954            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    955            g.TempImmediate(WhichPowerOf2(value)));
    956       return;
    957     }
    958     if (base::bits::IsPowerOfTwo32(value - 1)) {
    959       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    960            g.UseRegister(m.left().node()),
    961            g.TempImmediate(WhichPowerOf2(value - 1)));
    962       return;
    963     }
    964     if (base::bits::IsPowerOfTwo32(value + 1)) {
    965       InstructionOperand temp = g.TempRegister();
    966       Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
    967            g.UseRegister(m.left().node()),
    968            g.TempImmediate(WhichPowerOf2(value + 1)));
    969       Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
    970            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
    971       return;
    972     }
    973   }
    974   Node* left = node->InputAt(0);
    975   Node* right = node->InputAt(1);
    976   if (CanCover(node, left) && CanCover(node, right)) {
    977     if (left->opcode() == IrOpcode::kWord64Sar &&
    978         right->opcode() == IrOpcode::kWord64Sar) {
    979       Int64BinopMatcher leftInput(left), rightInput(right);
    980       if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
    981         // Combine untagging shifts with Dmul high.
    982         Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
    983              g.UseRegister(leftInput.left().node()),
    984              g.UseRegister(rightInput.left().node()));
    985         return;
    986       }
    987     }
    988   }
    989   VisitRRR(this, kMips64Mul, node);
    990 }
    991 
    992 
    993 void InstructionSelector::VisitInt32MulHigh(Node* node) {
    994   VisitRRR(this, kMips64MulHigh, node);
    995 }
    996 
    997 
    998 void InstructionSelector::VisitUint32MulHigh(Node* node) {
    999   VisitRRR(this, kMips64MulHighU, node);
   1000 }
   1001 
   1002 
   1003 void InstructionSelector::VisitInt64Mul(Node* node) {
   1004   Mips64OperandGenerator g(this);
   1005   Int64BinopMatcher m(node);
   1006   // TODO(dusmil): Add optimization for shifts larger than 32.
   1007   if (m.right().HasValue() && m.right().Value() > 0) {
   1008     int32_t value = static_cast<int32_t>(m.right().Value());
   1009     if (base::bits::IsPowerOfTwo32(value)) {
   1010       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
   1011            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1012            g.TempImmediate(WhichPowerOf2(value)));
   1013       return;
   1014     }
   1015     if (base::bits::IsPowerOfTwo32(value - 1)) {
   1016       // Dlsa macro will handle the shifting value out of bound cases.
   1017       Emit(kMips64Dlsa, g.DefineAsRegister(node),
   1018            g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
   1019            g.TempImmediate(WhichPowerOf2(value - 1)));
   1020       return;
   1021     }
   1022     if (base::bits::IsPowerOfTwo32(value + 1)) {
   1023       InstructionOperand temp = g.TempRegister();
   1024       Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
   1025            g.UseRegister(m.left().node()),
   1026            g.TempImmediate(WhichPowerOf2(value + 1)));
   1027       Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
   1028            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
   1029       return;
   1030     }
   1031   }
   1032   Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1033        g.UseRegister(m.right().node()));
   1034 }
   1035 
   1036 
   1037 void InstructionSelector::VisitInt32Div(Node* node) {
   1038   Mips64OperandGenerator g(this);
   1039   Int32BinopMatcher m(node);
   1040   Node* left = node->InputAt(0);
   1041   Node* right = node->InputAt(1);
   1042   if (CanCover(node, left) && CanCover(node, right)) {
   1043     if (left->opcode() == IrOpcode::kWord64Sar &&
   1044         right->opcode() == IrOpcode::kWord64Sar) {
   1045       Int64BinopMatcher rightInput(right), leftInput(left);
   1046       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
   1047         // Combine both shifted operands with Ddiv.
   1048         Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
   1049              g.UseRegister(leftInput.left().node()),
   1050              g.UseRegister(rightInput.left().node()));
   1051         return;
   1052       }
   1053     }
   1054   }
   1055   Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1056        g.UseRegister(m.right().node()));
   1057 }
   1058 
   1059 
   1060 void InstructionSelector::VisitUint32Div(Node* node) {
   1061   Mips64OperandGenerator g(this);
   1062   Int32BinopMatcher m(node);
   1063   Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1064        g.UseRegister(m.right().node()));
   1065 }
   1066 
   1067 
   1068 void InstructionSelector::VisitInt32Mod(Node* node) {
   1069   Mips64OperandGenerator g(this);
   1070   Int32BinopMatcher m(node);
   1071   Node* left = node->InputAt(0);
   1072   Node* right = node->InputAt(1);
   1073   if (CanCover(node, left) && CanCover(node, right)) {
   1074     if (left->opcode() == IrOpcode::kWord64Sar &&
   1075         right->opcode() == IrOpcode::kWord64Sar) {
   1076       Int64BinopMatcher rightInput(right), leftInput(left);
   1077       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
   1078         // Combine both shifted operands with Dmod.
   1079         Emit(kMips64Dmod, g.DefineSameAsFirst(node),
   1080              g.UseRegister(leftInput.left().node()),
   1081              g.UseRegister(rightInput.left().node()));
   1082         return;
   1083       }
   1084     }
   1085   }
   1086   Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1087        g.UseRegister(m.right().node()));
   1088 }
   1089 
   1090 
   1091 void InstructionSelector::VisitUint32Mod(Node* node) {
   1092   Mips64OperandGenerator g(this);
   1093   Int32BinopMatcher m(node);
   1094   Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1095        g.UseRegister(m.right().node()));
   1096 }
   1097 
   1098 
   1099 void InstructionSelector::VisitInt64Div(Node* node) {
   1100   Mips64OperandGenerator g(this);
   1101   Int64BinopMatcher m(node);
   1102   Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1103        g.UseRegister(m.right().node()));
   1104 }
   1105 
   1106 
   1107 void InstructionSelector::VisitUint64Div(Node* node) {
   1108   Mips64OperandGenerator g(this);
   1109   Int64BinopMatcher m(node);
   1110   Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1111        g.UseRegister(m.right().node()));
   1112 }
   1113 
   1114 
   1115 void InstructionSelector::VisitInt64Mod(Node* node) {
   1116   Mips64OperandGenerator g(this);
   1117   Int64BinopMatcher m(node);
   1118   Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1119        g.UseRegister(m.right().node()));
   1120 }
   1121 
   1122 
   1123 void InstructionSelector::VisitUint64Mod(Node* node) {
   1124   Mips64OperandGenerator g(this);
   1125   Int64BinopMatcher m(node);
   1126   Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1127        g.UseRegister(m.right().node()));
   1128 }
   1129 
   1130 
   1131 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
   1132   VisitRR(this, kMips64CvtDS, node);
   1133 }
   1134 
   1135 
   1136 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
   1137   VisitRR(this, kMips64CvtSW, node);
   1138 }
   1139 
   1140 
   1141 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
   1142   VisitRR(this, kMips64CvtSUw, node);
   1143 }
   1144 
   1145 
   1146 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   1147   VisitRR(this, kMips64CvtDW, node);
   1148 }
   1149 
   1150 
   1151 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
   1152   VisitRR(this, kMips64CvtDUw, node);
   1153 }
   1154 
   1155 
   1156 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
   1157   VisitRR(this, kMips64TruncWS, node);
   1158 }
   1159 
   1160 
   1161 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
   1162   VisitRR(this, kMips64TruncUwS, node);
   1163 }
   1164 
   1165 
   1166 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   1167   Mips64OperandGenerator g(this);
   1168   Node* value = node->InputAt(0);
   1169   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
   1170   // which does rounding and conversion to integer format.
   1171   if (CanCover(node, value)) {
   1172     switch (value->opcode()) {
   1173       case IrOpcode::kFloat64RoundDown:
   1174         Emit(kMips64FloorWD, g.DefineAsRegister(node),
   1175              g.UseRegister(value->InputAt(0)));
   1176         return;
   1177       case IrOpcode::kFloat64RoundUp:
   1178         Emit(kMips64CeilWD, g.DefineAsRegister(node),
   1179              g.UseRegister(value->InputAt(0)));
   1180         return;
   1181       case IrOpcode::kFloat64RoundTiesEven:
   1182         Emit(kMips64RoundWD, g.DefineAsRegister(node),
   1183              g.UseRegister(value->InputAt(0)));
   1184         return;
   1185       case IrOpcode::kFloat64RoundTruncate:
   1186         Emit(kMips64TruncWD, g.DefineAsRegister(node),
   1187              g.UseRegister(value->InputAt(0)));
   1188         return;
   1189       default:
   1190         break;
   1191     }
   1192     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
   1193       Node* next = value->InputAt(0);
   1194       if (CanCover(value, next)) {
   1195         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
   1196         switch (next->opcode()) {
   1197           case IrOpcode::kFloat32RoundDown:
   1198             Emit(kMips64FloorWS, g.DefineAsRegister(node),
   1199                  g.UseRegister(next->InputAt(0)));
   1200             return;
   1201           case IrOpcode::kFloat32RoundUp:
   1202             Emit(kMips64CeilWS, g.DefineAsRegister(node),
   1203                  g.UseRegister(next->InputAt(0)));
   1204             return;
   1205           case IrOpcode::kFloat32RoundTiesEven:
   1206             Emit(kMips64RoundWS, g.DefineAsRegister(node),
   1207                  g.UseRegister(next->InputAt(0)));
   1208             return;
   1209           case IrOpcode::kFloat32RoundTruncate:
   1210             Emit(kMips64TruncWS, g.DefineAsRegister(node),
   1211                  g.UseRegister(next->InputAt(0)));
   1212             return;
   1213           default:
   1214             Emit(kMips64TruncWS, g.DefineAsRegister(node),
   1215                  g.UseRegister(value->InputAt(0)));
   1216             return;
   1217         }
   1218       } else {
   1219         // Match float32 -> float64 -> int32 representation change path.
   1220         Emit(kMips64TruncWS, g.DefineAsRegister(node),
   1221              g.UseRegister(value->InputAt(0)));
   1222         return;
   1223       }
   1224     }
   1225   }
   1226   VisitRR(this, kMips64TruncWD, node);
   1227 }
   1228 
   1229 
   1230 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   1231   VisitRR(this, kMips64TruncUwD, node);
   1232 }
   1233 
   1234 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
   1235   VisitRR(this, kMips64TruncUwD, node);
   1236 }
   1237 
   1238 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   1239   Mips64OperandGenerator g(this);
   1240   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1241   InstructionOperand outputs[2];
   1242   size_t output_count = 0;
   1243   outputs[output_count++] = g.DefineAsRegister(node);
   1244 
   1245   Node* success_output = NodeProperties::FindProjection(node, 1);
   1246   if (success_output) {
   1247     outputs[output_count++] = g.DefineAsRegister(success_output);
   1248   }
   1249 
   1250   this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
   1251 }
   1252 
   1253 
   1254 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
   1255   Mips64OperandGenerator g(this);
   1256   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1257   InstructionOperand outputs[2];
   1258   size_t output_count = 0;
   1259   outputs[output_count++] = g.DefineAsRegister(node);
   1260 
   1261   Node* success_output = NodeProperties::FindProjection(node, 1);
   1262   if (success_output) {
   1263     outputs[output_count++] = g.DefineAsRegister(success_output);
   1264   }
   1265 
   1266   Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
   1267 }
   1268 
   1269 
   1270 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
   1271   Mips64OperandGenerator g(this);
   1272   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1273   InstructionOperand outputs[2];
   1274   size_t output_count = 0;
   1275   outputs[output_count++] = g.DefineAsRegister(node);
   1276 
   1277   Node* success_output = NodeProperties::FindProjection(node, 1);
   1278   if (success_output) {
   1279     outputs[output_count++] = g.DefineAsRegister(success_output);
   1280   }
   1281 
   1282   Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
   1283 }
   1284 
   1285 
   1286 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
   1287   Mips64OperandGenerator g(this);
   1288 
   1289   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1290   InstructionOperand outputs[2];
   1291   size_t output_count = 0;
   1292   outputs[output_count++] = g.DefineAsRegister(node);
   1293 
   1294   Node* success_output = NodeProperties::FindProjection(node, 1);
   1295   if (success_output) {
   1296     outputs[output_count++] = g.DefineAsRegister(success_output);
   1297   }
   1298 
   1299   Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
   1300 }
   1301 
   1302 
   1303 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   1304   Node* value = node->InputAt(0);
   1305   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
   1306     // Generate sign-extending load.
   1307     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
   1308     InstructionCode opcode = kArchNop;
   1309     switch (load_rep.representation()) {
   1310       case MachineRepresentation::kBit:  // Fall through.
   1311       case MachineRepresentation::kWord8:
   1312         opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
   1313         break;
   1314       case MachineRepresentation::kWord16:
   1315         opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
   1316         break;
   1317       case MachineRepresentation::kWord32:
   1318         opcode = kMips64Lw;
   1319         break;
   1320       default:
   1321         UNREACHABLE();
   1322         return;
   1323     }
   1324     EmitLoad(this, value, opcode, node);
   1325   } else {
   1326     Mips64OperandGenerator g(this);
   1327     Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
   1328          g.TempImmediate(0));
   1329   }
   1330 }
   1331 
   1332 
   1333 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   1334   Mips64OperandGenerator g(this);
   1335   Node* value = node->InputAt(0);
   1336   switch (value->opcode()) {
   1337     // 32-bit operations will write their result in a 64 bit register,
   1338     // clearing the top 32 bits of the destination register.
   1339     case IrOpcode::kUint32Div:
   1340     case IrOpcode::kUint32Mod:
   1341     case IrOpcode::kUint32MulHigh: {
   1342       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
   1343       return;
   1344     }
   1345     case IrOpcode::kLoad: {
   1346       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
   1347       if (load_rep.IsUnsigned()) {
   1348         switch (load_rep.representation()) {
   1349           case MachineRepresentation::kWord8:
   1350           case MachineRepresentation::kWord16:
   1351           case MachineRepresentation::kWord32:
   1352             Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
   1353             return;
   1354           default:
   1355             break;
   1356         }
   1357       }
   1358     }
   1359     default:
   1360       break;
   1361   }
   1362   Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
   1363        g.TempImmediate(0), g.TempImmediate(32));
   1364 }
   1365 
   1366 
   1367 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   1368   Mips64OperandGenerator g(this);
   1369   Node* value = node->InputAt(0);
   1370   if (CanCover(node, value)) {
   1371     switch (value->opcode()) {
   1372       case IrOpcode::kWord64Sar: {
   1373         if (TryEmitExtendingLoad(this, value, node)) {
   1374           return;
   1375         } else {
   1376           Int64BinopMatcher m(value);
   1377           if (m.right().IsInRange(32, 63)) {
   1378             // After smi untagging no need for truncate. Combine sequence.
   1379             Emit(kMips64Dsar, g.DefineSameAsFirst(node),
   1380                  g.UseRegister(m.left().node()),
   1381                  g.UseImmediate(m.right().node()));
   1382             return;
   1383           }
   1384         }
   1385         break;
   1386       }
   1387       default:
   1388         break;
   1389     }
   1390   }
   1391   Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
   1392        g.TempImmediate(0), g.TempImmediate(32));
   1393 }
   1394 
   1395 
   1396 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   1397   Mips64OperandGenerator g(this);
   1398   Node* value = node->InputAt(0);
   1399   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
   1400   // instruction.
   1401   if (CanCover(node, value) &&
   1402       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
   1403     Emit(kMips64CvtSW, g.DefineAsRegister(node),
   1404          g.UseRegister(value->InputAt(0)));
   1405     return;
   1406   }
   1407   VisitRR(this, kMips64CvtSD, node);
   1408 }
   1409 
   1410 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   1411   VisitRR(this, kArchTruncateDoubleToI, node);
   1412 }
   1413 
   1414 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
   1415   VisitRR(this, kMips64TruncWD, node);
   1416 }
   1417 
   1418 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
   1419   VisitRR(this, kMips64CvtSL, node);
   1420 }
   1421 
   1422 
   1423 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
   1424   VisitRR(this, kMips64CvtDL, node);
   1425 }
   1426 
   1427 
   1428 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   1429   VisitRR(this, kMips64CvtSUl, node);
   1430 }
   1431 
   1432 
   1433 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
   1434   VisitRR(this, kMips64CvtDUl, node);
   1435 }
   1436 
   1437 
   1438 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
   1439   VisitRR(this, kMips64Float64ExtractLowWord32, node);
   1440 }
   1441 
   1442 
   1443 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
   1444   VisitRR(this, kMips64BitcastDL, node);
   1445 }
   1446 
   1447 
   1448 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
   1449   Mips64OperandGenerator g(this);
   1450   Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
   1451        ImmediateOperand(ImmediateOperand::INLINE, 0),
   1452        g.UseRegister(node->InputAt(0)));
   1453 }
   1454 
   1455 
   1456 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
   1457   VisitRR(this, kMips64BitcastLD, node);
   1458 }
   1459 
   1460 
   1461 void InstructionSelector::VisitFloat32Add(Node* node) {
   1462   Mips64OperandGenerator g(this);
   1463   if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
   1464     Float32BinopMatcher m(node);
   1465     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
   1466       // For Add.S(Mul.S(x, y), z):
   1467       Float32BinopMatcher mleft(m.left().node());
   1468       Emit(kMips64MaddS, g.DefineAsRegister(node),
   1469            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1470            g.UseRegister(mleft.right().node()));
   1471       return;
   1472     }
   1473     if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
   1474       // For Add.S(x, Mul.S(y, z)):
   1475       Float32BinopMatcher mright(m.right().node());
   1476       Emit(kMips64MaddS, g.DefineAsRegister(node),
   1477            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
   1478            g.UseRegister(mright.right().node()));
   1479       return;
   1480     }
   1481   }
   1482   VisitRRR(this, kMips64AddS, node);
   1483 }
   1484 
   1485 
   1486 void InstructionSelector::VisitFloat64Add(Node* node) {
   1487   Mips64OperandGenerator g(this);
   1488   if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
   1489     Float64BinopMatcher m(node);
   1490     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
   1491       // For Add.D(Mul.D(x, y), z):
   1492       Float64BinopMatcher mleft(m.left().node());
   1493       Emit(kMips64MaddD, g.DefineAsRegister(node),
   1494            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1495            g.UseRegister(mleft.right().node()));
   1496       return;
   1497     }
   1498     if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
   1499       // For Add.D(x, Mul.D(y, z)):
   1500       Float64BinopMatcher mright(m.right().node());
   1501       Emit(kMips64MaddD, g.DefineAsRegister(node),
   1502            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
   1503            g.UseRegister(mright.right().node()));
   1504       return;
   1505     }
   1506   }
   1507   VisitRRR(this, kMips64AddD, node);
   1508 }
   1509 
   1510 
   1511 void InstructionSelector::VisitFloat32Sub(Node* node) {
   1512   Mips64OperandGenerator g(this);
   1513   if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
   1514     Float32BinopMatcher m(node);
   1515     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
   1516       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
   1517       Float32BinopMatcher mleft(m.left().node());
   1518       Emit(kMips64MsubS, g.DefineAsRegister(node),
   1519            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1520            g.UseRegister(mleft.right().node()));
   1521       return;
   1522     }
   1523   }
   1524   VisitRRR(this, kMips64SubS, node);
   1525 }
   1526 
   1527 void InstructionSelector::VisitFloat64Sub(Node* node) {
   1528   Mips64OperandGenerator g(this);
   1529   if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
   1530     Float64BinopMatcher m(node);
   1531     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
   1532       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
   1533       Float64BinopMatcher mleft(m.left().node());
   1534       Emit(kMips64MsubD, g.DefineAsRegister(node),
   1535            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1536            g.UseRegister(mleft.right().node()));
   1537       return;
   1538     }
   1539   }
   1540   VisitRRR(this, kMips64SubD, node);
   1541 }
   1542 
   1543 void InstructionSelector::VisitFloat32Mul(Node* node) {
   1544   VisitRRR(this, kMips64MulS, node);
   1545 }
   1546 
   1547 
   1548 void InstructionSelector::VisitFloat64Mul(Node* node) {
   1549   VisitRRR(this, kMips64MulD, node);
   1550 }
   1551 
   1552 
   1553 void InstructionSelector::VisitFloat32Div(Node* node) {
   1554   VisitRRR(this, kMips64DivS, node);
   1555 }
   1556 
   1557 
   1558 void InstructionSelector::VisitFloat64Div(Node* node) {
   1559   VisitRRR(this, kMips64DivD, node);
   1560 }
   1561 
   1562 
   1563 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1564   Mips64OperandGenerator g(this);
   1565   Emit(kMips64ModD, g.DefineAsFixed(node, f0),
   1566        g.UseFixed(node->InputAt(0), f12),
   1567        g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
   1568 }
   1569 
   1570 void InstructionSelector::VisitFloat32Max(Node* node) {
   1571   Mips64OperandGenerator g(this);
   1572   Emit(kMips64Float32Max, g.DefineAsRegister(node),
   1573        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1574 }
   1575 
   1576 void InstructionSelector::VisitFloat64Max(Node* node) {
   1577   Mips64OperandGenerator g(this);
   1578   Emit(kMips64Float64Max, g.DefineAsRegister(node),
   1579        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1580 }
   1581 
   1582 void InstructionSelector::VisitFloat32Min(Node* node) {
   1583   Mips64OperandGenerator g(this);
   1584   Emit(kMips64Float32Min, g.DefineAsRegister(node),
   1585        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1586 }
   1587 
   1588 void InstructionSelector::VisitFloat64Min(Node* node) {
   1589   Mips64OperandGenerator g(this);
   1590   Emit(kMips64Float64Min, g.DefineAsRegister(node),
   1591        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   1592 }
   1593 
   1594 
   1595 void InstructionSelector::VisitFloat32Abs(Node* node) {
   1596   VisitRR(this, kMips64AbsS, node);
   1597 }
   1598 
   1599 
   1600 void InstructionSelector::VisitFloat64Abs(Node* node) {
   1601   VisitRR(this, kMips64AbsD, node);
   1602 }
   1603 
   1604 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   1605   VisitRR(this, kMips64SqrtS, node);
   1606 }
   1607 
   1608 
   1609 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   1610   VisitRR(this, kMips64SqrtD, node);
   1611 }
   1612 
   1613 
   1614 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
   1615   VisitRR(this, kMips64Float32RoundDown, node);
   1616 }
   1617 
   1618 
   1619 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
   1620   VisitRR(this, kMips64Float64RoundDown, node);
   1621 }
   1622 
   1623 
   1624 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
   1625   VisitRR(this, kMips64Float32RoundUp, node);
   1626 }
   1627 
   1628 
   1629 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
   1630   VisitRR(this, kMips64Float64RoundUp, node);
   1631 }
   1632 
   1633 
   1634 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
   1635   VisitRR(this, kMips64Float32RoundTruncate, node);
   1636 }
   1637 
   1638 
   1639 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
   1640   VisitRR(this, kMips64Float64RoundTruncate, node);
   1641 }
   1642 
   1643 
   1644 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   1645   UNREACHABLE();
   1646 }
   1647 
   1648 
   1649 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
   1650   VisitRR(this, kMips64Float32RoundTiesEven, node);
   1651 }
   1652 
   1653 
   1654 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
   1655   VisitRR(this, kMips64Float64RoundTiesEven, node);
   1656 }
   1657 
   1658 void InstructionSelector::VisitFloat32Neg(Node* node) {
   1659   VisitRR(this, kMips64NegS, node);
   1660 }
   1661 
   1662 void InstructionSelector::VisitFloat64Neg(Node* node) {
   1663   VisitRR(this, kMips64NegD, node);
   1664 }
   1665 
   1666 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1667                                                    InstructionCode opcode) {
   1668   Mips64OperandGenerator g(this);
   1669   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
   1670        g.UseFixed(node->InputAt(1), f4))
   1671       ->MarkAsCall();
   1672 }
   1673 
   1674 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1675                                                   InstructionCode opcode) {
   1676   Mips64OperandGenerator g(this);
   1677   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
   1678       ->MarkAsCall();
   1679 }
   1680 
   1681 void InstructionSelector::EmitPrepareArguments(
   1682     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1683     Node* node) {
   1684   Mips64OperandGenerator g(this);
   1685 
   1686   // Prepare for C function call.
   1687   if (descriptor->IsCFunctionCall()) {
   1688     Emit(kArchPrepareCallCFunction |
   1689              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
   1690          0, nullptr, 0, nullptr);
   1691 
   1692     // Poke any stack arguments.
   1693     int slot = kCArgSlotCount;
   1694     for (PushParameter input : (*arguments)) {
   1695       Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
   1696            g.TempImmediate(slot << kPointerSizeLog2));
   1697       ++slot;
   1698     }
   1699   } else {
   1700     int push_count = static_cast<int>(descriptor->StackParameterCount());
   1701     if (push_count > 0) {
   1702       Emit(kMips64StackClaim, g.NoOutput(),
   1703            g.TempImmediate(push_count << kPointerSizeLog2));
   1704     }
   1705     for (size_t n = 0; n < arguments->size(); ++n) {
   1706       PushParameter input = (*arguments)[n];
   1707       if (input.node()) {
   1708         Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
   1709              g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
   1710       }
   1711     }
   1712   }
   1713 }
   1714 
   1715 
   1716 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
   1717 
   1718 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1719 
   1720 void InstructionSelector::VisitUnalignedLoad(Node* node) {
   1721   UnalignedLoadRepresentation load_rep =
   1722       UnalignedLoadRepresentationOf(node->op());
   1723   Mips64OperandGenerator g(this);
   1724   Node* base = node->InputAt(0);
   1725   Node* index = node->InputAt(1);
   1726 
   1727   ArchOpcode opcode = kArchNop;
   1728   switch (load_rep.representation()) {
   1729     case MachineRepresentation::kFloat32:
   1730       opcode = kMips64Ulwc1;
   1731       break;
   1732     case MachineRepresentation::kFloat64:
   1733       opcode = kMips64Uldc1;
   1734       break;
   1735     case MachineRepresentation::kBit:  // Fall through.
   1736     case MachineRepresentation::kWord8:
   1737       UNREACHABLE();
   1738       break;
   1739     case MachineRepresentation::kWord16:
   1740       opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
   1741       break;
   1742     case MachineRepresentation::kWord32:
   1743       opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
   1744       break;
   1745     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1746     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1747     case MachineRepresentation::kTagged:  // Fall through.
   1748     case MachineRepresentation::kWord64:
   1749       opcode = kMips64Uld;
   1750       break;
   1751     case MachineRepresentation::kSimd128:  // Fall through.
   1752     case MachineRepresentation::kSimd1x4:  // Fall through.
   1753     case MachineRepresentation::kSimd1x8:  // Fall through.
   1754     case MachineRepresentation::kSimd1x16:  // Fall through.
   1755     case MachineRepresentation::kNone:
   1756       UNREACHABLE();
   1757       return;
   1758   }
   1759 
   1760   if (g.CanBeImmediate(index, opcode)) {
   1761     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1762          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
   1763   } else {
   1764     InstructionOperand addr_reg = g.TempRegister();
   1765     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
   1766          g.UseRegister(index), g.UseRegister(base));
   1767     // Emit desired load opcode, using temp addr_reg.
   1768     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1769          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
   1770   }
   1771 }
   1772 
   1773 void InstructionSelector::VisitUnalignedStore(Node* node) {
   1774   Mips64OperandGenerator g(this);
   1775   Node* base = node->InputAt(0);
   1776   Node* index = node->InputAt(1);
   1777   Node* value = node->InputAt(2);
   1778 
   1779   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
   1780   ArchOpcode opcode = kArchNop;
   1781   switch (rep) {
   1782     case MachineRepresentation::kFloat32:
   1783       opcode = kMips64Uswc1;
   1784       break;
   1785     case MachineRepresentation::kFloat64:
   1786       opcode = kMips64Usdc1;
   1787       break;
   1788     case MachineRepresentation::kBit:  // Fall through.
   1789     case MachineRepresentation::kWord8:
   1790       UNREACHABLE();
   1791       break;
   1792     case MachineRepresentation::kWord16:
   1793       opcode = kMips64Ush;
   1794       break;
   1795     case MachineRepresentation::kWord32:
   1796       opcode = kMips64Usw;
   1797       break;
   1798     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1799     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1800     case MachineRepresentation::kTagged:  // Fall through.
   1801     case MachineRepresentation::kWord64:
   1802       opcode = kMips64Usd;
   1803       break;
   1804     case MachineRepresentation::kSimd128:  // Fall through.
   1805     case MachineRepresentation::kSimd1x4:  // Fall through.
   1806     case MachineRepresentation::kSimd1x8:  // Fall through.
   1807     case MachineRepresentation::kSimd1x16:  // Fall through.
   1808     case MachineRepresentation::kNone:
   1809       UNREACHABLE();
   1810       return;
   1811   }
   1812 
   1813   if (g.CanBeImmediate(index, opcode)) {
   1814     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1815          g.UseRegister(base), g.UseImmediate(index),
   1816          g.UseRegisterOrImmediateZero(value));
   1817   } else {
   1818     InstructionOperand addr_reg = g.TempRegister();
   1819     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
   1820          g.UseRegister(index), g.UseRegister(base));
   1821     // Emit desired store opcode, using temp addr_reg.
   1822     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1823          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   1824   }
   1825 }
   1826 
   1827 void InstructionSelector::VisitCheckedLoad(Node* node) {
   1828   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
   1829   Mips64OperandGenerator g(this);
   1830   Node* const buffer = node->InputAt(0);
   1831   Node* const offset = node->InputAt(1);
   1832   Node* const length = node->InputAt(2);
   1833   ArchOpcode opcode = kArchNop;
   1834   switch (load_rep.representation()) {
   1835     case MachineRepresentation::kWord8:
   1836       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
   1837       break;
   1838     case MachineRepresentation::kWord16:
   1839       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
   1840       break;
   1841     case MachineRepresentation::kWord32:
   1842       opcode = kCheckedLoadWord32;
   1843       break;
   1844     case MachineRepresentation::kWord64:
   1845       opcode = kCheckedLoadWord64;
   1846       break;
   1847     case MachineRepresentation::kFloat32:
   1848       opcode = kCheckedLoadFloat32;
   1849       break;
   1850     case MachineRepresentation::kFloat64:
   1851       opcode = kCheckedLoadFloat64;
   1852       break;
   1853     case MachineRepresentation::kBit:
   1854     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1855     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1856     case MachineRepresentation::kTagged:
   1857     case MachineRepresentation::kSimd128:
   1858     case MachineRepresentation::kSimd1x4:   // Fall through.
   1859     case MachineRepresentation::kSimd1x8:   // Fall through.
   1860     case MachineRepresentation::kSimd1x16:  // Fall through.
   1861     case MachineRepresentation::kNone:
   1862       UNREACHABLE();
   1863       return;
   1864   }
   1865   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
   1866                                           ? g.UseImmediate(offset)
   1867                                           : g.UseRegister(offset);
   1868 
   1869   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
   1870                                           ? g.CanBeImmediate(length, opcode)
   1871                                                 ? g.UseImmediate(length)
   1872                                                 : g.UseRegister(length)
   1873                                           : g.UseRegister(length);
   1874 
   1875   if (length->opcode() == IrOpcode::kInt32Constant) {
   1876     Int32Matcher m(length);
   1877     if (m.IsPowerOf2()) {
   1878       Emit(opcode, g.DefineAsRegister(node), offset_operand,
   1879            g.UseImmediate(length), g.UseRegister(buffer));
   1880       return;
   1881     }
   1882   }
   1883 
   1884   Emit(opcode | AddressingModeField::encode(kMode_MRI),
   1885        g.DefineAsRegister(node), offset_operand, length_operand,
   1886        g.UseRegister(buffer));
   1887 }
   1888 
   1889 
   1890 void InstructionSelector::VisitCheckedStore(Node* node) {
   1891   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
   1892   Mips64OperandGenerator g(this);
   1893   Node* const buffer = node->InputAt(0);
   1894   Node* const offset = node->InputAt(1);
   1895   Node* const length = node->InputAt(2);
   1896   Node* const value = node->InputAt(3);
   1897   ArchOpcode opcode = kArchNop;
   1898   switch (rep) {
   1899     case MachineRepresentation::kWord8:
   1900       opcode = kCheckedStoreWord8;
   1901       break;
   1902     case MachineRepresentation::kWord16:
   1903       opcode = kCheckedStoreWord16;
   1904       break;
   1905     case MachineRepresentation::kWord32:
   1906       opcode = kCheckedStoreWord32;
   1907       break;
   1908     case MachineRepresentation::kWord64:
   1909       opcode = kCheckedStoreWord64;
   1910       break;
   1911     case MachineRepresentation::kFloat32:
   1912       opcode = kCheckedStoreFloat32;
   1913       break;
   1914     case MachineRepresentation::kFloat64:
   1915       opcode = kCheckedStoreFloat64;
   1916       break;
   1917     case MachineRepresentation::kBit:
   1918     case MachineRepresentation::kTaggedSigned:   // Fall through.
   1919     case MachineRepresentation::kTaggedPointer:  // Fall through.
   1920     case MachineRepresentation::kTagged:
   1921     case MachineRepresentation::kSimd128:
   1922     case MachineRepresentation::kSimd1x4:   // Fall through.
   1923     case MachineRepresentation::kSimd1x8:   // Fall through.
   1924     case MachineRepresentation::kSimd1x16:  // Fall through.
   1925     case MachineRepresentation::kNone:
   1926       UNREACHABLE();
   1927       return;
   1928   }
   1929   InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
   1930                                           ? g.UseImmediate(offset)
   1931                                           : g.UseRegister(offset);
   1932 
   1933   InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
   1934                                           ? g.CanBeImmediate(length, opcode)
   1935                                                 ? g.UseImmediate(length)
   1936                                                 : g.UseRegister(length)
   1937                                           : g.UseRegister(length);
   1938 
   1939   if (length->opcode() == IrOpcode::kInt32Constant) {
   1940     Int32Matcher m(length);
   1941     if (m.IsPowerOf2()) {
   1942       Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
   1943            g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
   1944       return;
   1945     }
   1946   }
   1947 
   1948   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   1949        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
   1950        g.UseRegister(buffer));
   1951 }
   1952 
   1953 
   1954 namespace {
   1955 
   1956 // Shared routine for multiple compare operations.
   1957 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1958                          InstructionOperand left, InstructionOperand right,
   1959                          FlagsContinuation* cont) {
   1960   Mips64OperandGenerator g(selector);
   1961   opcode = cont->Encode(opcode);
   1962   if (cont->IsBranch()) {
   1963     selector->Emit(opcode, g.NoOutput(), left, right,
   1964                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1965   } else if (cont->IsDeoptimize()) {
   1966     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
   1967                              cont->reason(), cont->frame_state());
   1968   } else if (cont->IsSet()) {
   1969     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   1970   } else {
   1971     DCHECK(cont->IsTrap());
   1972     selector->Emit(opcode, g.NoOutput(), left, right,
   1973                    g.TempImmediate(cont->trap_id()));
   1974   }
   1975 }
   1976 
   1977 
   1978 // Shared routine for multiple float32 compare operations.
   1979 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1980                          FlagsContinuation* cont) {
   1981   Mips64OperandGenerator g(selector);
   1982   Float32BinopMatcher m(node);
   1983   InstructionOperand lhs, rhs;
   1984 
   1985   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
   1986                           : g.UseRegister(m.left().node());
   1987   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
   1988                            : g.UseRegister(m.right().node());
   1989   VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
   1990 }
   1991 
   1992 
   1993 // Shared routine for multiple float64 compare operations.
   1994 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1995                          FlagsContinuation* cont) {
   1996   Mips64OperandGenerator g(selector);
   1997   Float64BinopMatcher m(node);
   1998   InstructionOperand lhs, rhs;
   1999 
   2000   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
   2001                           : g.UseRegister(m.left().node());
   2002   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
   2003                            : g.UseRegister(m.right().node());
   2004   VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
   2005 }
   2006 
   2007 
   2008 // Shared routine for multiple word compare operations.
   2009 void VisitWordCompare(InstructionSelector* selector, Node* node,
   2010                       InstructionCode opcode, FlagsContinuation* cont,
   2011                       bool commutative) {
   2012   Mips64OperandGenerator g(selector);
   2013   Node* left = node->InputAt(0);
   2014   Node* right = node->InputAt(1);
   2015 
   2016   // Match immediates on left or right side of comparison.
   2017   if (g.CanBeImmediate(right, opcode)) {
   2018     if (opcode == kMips64Tst) {
   2019       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
   2020                    cont);
   2021     } else {
   2022       switch (cont->condition()) {
   2023         case kEqual:
   2024         case kNotEqual:
   2025           if (cont->IsSet()) {
   2026             VisitCompare(selector, opcode, g.UseRegister(left),
   2027                          g.UseImmediate(right), cont);
   2028           } else {
   2029             VisitCompare(selector, opcode, g.UseRegister(left),
   2030                          g.UseRegister(right), cont);
   2031           }
   2032           break;
   2033         case kSignedLessThan:
   2034         case kSignedGreaterThanOrEqual:
   2035         case kUnsignedLessThan:
   2036         case kUnsignedGreaterThanOrEqual:
   2037           VisitCompare(selector, opcode, g.UseRegister(left),
   2038                        g.UseImmediate(right), cont);
   2039           break;
   2040         default:
   2041           VisitCompare(selector, opcode, g.UseRegister(left),
   2042                        g.UseRegister(right), cont);
   2043       }
   2044     }
   2045   } else if (g.CanBeImmediate(left, opcode)) {
   2046     if (!commutative) cont->Commute();
   2047     if (opcode == kMips64Tst) {
   2048       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
   2049                    cont);
   2050     } else {
   2051       switch (cont->condition()) {
   2052         case kEqual:
   2053         case kNotEqual:
   2054           if (cont->IsSet()) {
   2055             VisitCompare(selector, opcode, g.UseRegister(right),
   2056                          g.UseImmediate(left), cont);
   2057           } else {
   2058             VisitCompare(selector, opcode, g.UseRegister(right),
   2059                          g.UseRegister(left), cont);
   2060           }
   2061           break;
   2062         case kSignedLessThan:
   2063         case kSignedGreaterThanOrEqual:
   2064         case kUnsignedLessThan:
   2065         case kUnsignedGreaterThanOrEqual:
   2066           VisitCompare(selector, opcode, g.UseRegister(right),
   2067                        g.UseImmediate(left), cont);
   2068           break;
   2069         default:
   2070           VisitCompare(selector, opcode, g.UseRegister(right),
   2071                        g.UseRegister(left), cont);
   2072       }
   2073     }
   2074   } else {
   2075     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
   2076                  cont);
   2077   }
   2078 }
   2079 
   2080 bool IsNodeUnsigned(Node* n) {
   2081   NodeMatcher m(n);
   2082 
   2083   if (m.IsLoad()) {
   2084     LoadRepresentation load_rep = LoadRepresentationOf(n->op());
   2085     return load_rep.IsUnsigned();
   2086   } else if (m.IsUnalignedLoad()) {
   2087     UnalignedLoadRepresentation load_rep =
   2088         UnalignedLoadRepresentationOf(n->op());
   2089     return load_rep.IsUnsigned();
   2090   } else {
   2091     return m.IsUint32Div() || m.IsUint32LessThan() ||
   2092            m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
   2093            m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
   2094            m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
   2095   }
   2096 }
   2097 
   2098 // Shared routine for multiple word compare operations.
   2099 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
   2100                             InstructionCode opcode, FlagsContinuation* cont) {
   2101   Mips64OperandGenerator g(selector);
   2102   InstructionOperand leftOp = g.TempRegister();
   2103   InstructionOperand rightOp = g.TempRegister();
   2104 
   2105   selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
   2106                  g.TempImmediate(32));
   2107   selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
   2108                  g.TempImmediate(32));
   2109 
   2110   VisitCompare(selector, opcode, leftOp, rightOp, cont);
   2111 }
   2112 
   2113 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
   2114                                  InstructionCode opcode,
   2115                                  FlagsContinuation* cont) {
   2116   if (FLAG_debug_code) {
   2117     Mips64OperandGenerator g(selector);
   2118     InstructionOperand leftOp = g.TempRegister();
   2119     InstructionOperand rightOp = g.TempRegister();
   2120     InstructionOperand optimizedResult = g.TempRegister();
   2121     InstructionOperand fullResult = g.TempRegister();
   2122     FlagsCondition condition = cont->condition();
   2123     InstructionCode testOpcode = opcode |
   2124                                  FlagsConditionField::encode(condition) |
   2125                                  FlagsModeField::encode(kFlags_set);
   2126 
   2127     selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
   2128                    g.UseRegister(node->InputAt(1)));
   2129 
   2130     selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
   2131                    g.TempImmediate(32));
   2132     selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
   2133                    g.TempImmediate(32));
   2134     selector->Emit(testOpcode, fullResult, leftOp, rightOp);
   2135 
   2136     selector->Emit(
   2137         kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
   2138         g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
   2139   }
   2140 
   2141   VisitWordCompare(selector, node, opcode, cont, false);
   2142 }
   2143 
   2144 void VisitWord32Compare(InstructionSelector* selector, Node* node,
   2145                         FlagsContinuation* cont) {
   2146   // MIPS64 doesn't support Word32 compare instructions. Instead it relies
   2147   // that the values in registers are correctly sign-extended and uses
   2148   // Word64 comparison instead. This behavior is correct in most cases,
   2149   // but doesn't work when comparing signed with unsigned operands.
   2150   // We could simulate full Word32 compare in all cases but this would
   2151   // create an unnecessary overhead since unsigned integers are rarely
   2152   // used in JavaScript.
   2153   // The solution proposed here tries to match a comparison of signed
   2154   // with unsigned operand, and perform full Word32Compare only
   2155   // in those cases. Unfortunately, the solution is not complete because
   2156   // it might skip cases where Word32 full compare is needed, so
   2157   // basically it is a hack.
   2158   if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
   2159     VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
   2160   } else {
   2161     VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
   2162   }
   2163 }
   2164 
   2165 
   2166 void VisitWord64Compare(InstructionSelector* selector, Node* node,
   2167                         FlagsContinuation* cont) {
   2168   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
   2169 }
   2170 
   2171 
   2172 
   2173 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
   2174                          FlagsContinuation* cont) {
   2175   Mips64OperandGenerator g(selector);
   2176   InstructionCode opcode = cont->Encode(kMips64Cmp);
   2177   InstructionOperand const value_operand = g.UseRegister(value);
   2178   if (cont->IsBranch()) {
   2179     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
   2180                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   2181   } else if (cont->IsDeoptimize()) {
   2182     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
   2183                              g.TempImmediate(0), cont->kind(), cont->reason(),
   2184                              cont->frame_state());
   2185   } else if (cont->IsTrap()) {
   2186     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
   2187                    g.TempImmediate(cont->trap_id()));
   2188   } else {
   2189     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
   2190                    g.TempImmediate(0));
   2191   }
   2192 }
   2193 
   2194 
   2195 // Shared routine for word comparisons against zero.
   2196 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   2197                           Node* value, FlagsContinuation* cont) {
   2198   // Try to combine with comparisons against 0 by simply inverting the branch.
   2199   while (selector->CanCover(user, value)) {
   2200     if (value->opcode() == IrOpcode::kWord32Equal) {
   2201       Int32BinopMatcher m(value);
   2202       if (!m.right().Is(0)) break;
   2203       user = value;
   2204       value = m.left().node();
   2205     } else if (value->opcode() == IrOpcode::kWord64Equal) {
   2206       Int64BinopMatcher m(value);
   2207       if (!m.right().Is(0)) break;
   2208       user = value;
   2209       value = m.left().node();
   2210     } else {
   2211       break;
   2212     }
   2213 
   2214     cont->Negate();
   2215   }
   2216 
   2217   if (selector->CanCover(user, value)) {
   2218     switch (value->opcode()) {
   2219       case IrOpcode::kWord32Equal:
   2220         cont->OverwriteAndNegateIfEqual(kEqual);
   2221         return VisitWord32Compare(selector, value, cont);
   2222       case IrOpcode::kInt32LessThan:
   2223         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   2224         return VisitWord32Compare(selector, value, cont);
   2225       case IrOpcode::kInt32LessThanOrEqual:
   2226         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   2227         return VisitWord32Compare(selector, value, cont);
   2228       case IrOpcode::kUint32LessThan:
   2229         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2230         return VisitWord32Compare(selector, value, cont);
   2231       case IrOpcode::kUint32LessThanOrEqual:
   2232         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2233         return VisitWord32Compare(selector, value, cont);
   2234       case IrOpcode::kWord64Equal:
   2235         cont->OverwriteAndNegateIfEqual(kEqual);
   2236         return VisitWord64Compare(selector, value, cont);
   2237       case IrOpcode::kInt64LessThan:
   2238         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   2239         return VisitWord64Compare(selector, value, cont);
   2240       case IrOpcode::kInt64LessThanOrEqual:
   2241         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   2242         return VisitWord64Compare(selector, value, cont);
   2243       case IrOpcode::kUint64LessThan:
   2244         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2245         return VisitWord64Compare(selector, value, cont);
   2246       case IrOpcode::kUint64LessThanOrEqual:
   2247         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2248         return VisitWord64Compare(selector, value, cont);
   2249       case IrOpcode::kFloat32Equal:
   2250         cont->OverwriteAndNegateIfEqual(kEqual);
   2251         return VisitFloat32Compare(selector, value, cont);
   2252       case IrOpcode::kFloat32LessThan:
   2253         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2254         return VisitFloat32Compare(selector, value, cont);
   2255       case IrOpcode::kFloat32LessThanOrEqual:
   2256         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2257         return VisitFloat32Compare(selector, value, cont);
   2258       case IrOpcode::kFloat64Equal:
   2259         cont->OverwriteAndNegateIfEqual(kEqual);
   2260         return VisitFloat64Compare(selector, value, cont);
   2261       case IrOpcode::kFloat64LessThan:
   2262         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2263         return VisitFloat64Compare(selector, value, cont);
   2264       case IrOpcode::kFloat64LessThanOrEqual:
   2265         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2266         return VisitFloat64Compare(selector, value, cont);
   2267       case IrOpcode::kProjection:
   2268         // Check if this is the overflow output projection of an
   2269         // <Operation>WithOverflow node.
   2270         if (ProjectionIndexOf(value->op()) == 1u) {
   2271           // We cannot combine the <Operation>WithOverflow with this branch
   2272           // unless the 0th projection (the use of the actual value of the
   2273           // <Operation> is either nullptr, which means there's no use of the
   2274           // actual value, or was already defined, which means it is scheduled
   2275           // *AFTER* this branch).
   2276           Node* const node = value->InputAt(0);
   2277           Node* const result = NodeProperties::FindProjection(node, 0);
   2278           if (result == nullptr || selector->IsDefined(result)) {
   2279             switch (node->opcode()) {
   2280               case IrOpcode::kInt32AddWithOverflow:
   2281                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2282                 return VisitBinop(selector, node, kMips64Dadd, cont);
   2283               case IrOpcode::kInt32SubWithOverflow:
   2284                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2285                 return VisitBinop(selector, node, kMips64Dsub, cont);
   2286               case IrOpcode::kInt32MulWithOverflow:
   2287                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2288                 return VisitBinop(selector, node, kMips64MulOvf, cont);
   2289               case IrOpcode::kInt64AddWithOverflow:
   2290                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2291                 return VisitBinop(selector, node, kMips64DaddOvf, cont);
   2292               case IrOpcode::kInt64SubWithOverflow:
   2293                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2294                 return VisitBinop(selector, node, kMips64DsubOvf, cont);
   2295               default:
   2296                 break;
   2297             }
   2298           }
   2299         }
   2300         break;
   2301       case IrOpcode::kWord32And:
   2302       case IrOpcode::kWord64And:
   2303         return VisitWordCompare(selector, value, kMips64Tst, cont, true);
   2304       default:
   2305         break;
   2306     }
   2307   }
   2308 
   2309   // Continuation could not be combined with a compare, emit compare against 0.
   2310   EmitWordCompareZero(selector, value, cont);
   2311 }
   2312 
   2313 }  // namespace
   2314 
   2315 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   2316                                       BasicBlock* fbranch) {
   2317   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   2318   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   2319 }
   2320 
   2321 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   2322   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2323   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2324       kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   2325   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2326 }
   2327 
   2328 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   2329   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2330   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2331       kEqual, p.kind(), p.reason(), node->InputAt(1));
   2332   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2333 }
   2334 
   2335 void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
   2336   FlagsContinuation cont =
   2337       FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
   2338   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2339 }
   2340 
   2341 void InstructionSelector::VisitTrapUnless(Node* node,
   2342                                           Runtime::FunctionId func_id) {
   2343   FlagsContinuation cont =
   2344       FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   2345   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2346 }
   2347 
   2348 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   2349   Mips64OperandGenerator g(this);
   2350   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   2351 
   2352   // Emit either ArchTableSwitch or ArchLookupSwitch.
   2353   size_t table_space_cost = 10 + 2 * sw.value_range;
   2354   size_t table_time_cost = 3;
   2355   size_t lookup_space_cost = 2 + 2 * sw.case_count;
   2356   size_t lookup_time_cost = sw.case_count;
   2357   if (sw.case_count > 0 &&
   2358       table_space_cost + 3 * table_time_cost <=
   2359           lookup_space_cost + 3 * lookup_time_cost &&
   2360       sw.min_value > std::numeric_limits<int32_t>::min()) {
   2361     InstructionOperand index_operand = value_operand;
   2362     if (sw.min_value) {
   2363       index_operand = g.TempRegister();
   2364       Emit(kMips64Sub, index_operand, value_operand,
   2365            g.TempImmediate(sw.min_value));
   2366     }
   2367     // Generate a table lookup.
   2368     return EmitTableSwitch(sw, index_operand);
   2369   }
   2370 
   2371   // Generate a sequence of conditional jumps.
   2372   return EmitLookupSwitch(sw, value_operand);
   2373 }
   2374 
   2375 
   2376 void InstructionSelector::VisitWord32Equal(Node* const node) {
   2377   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2378   Int32BinopMatcher m(node);
   2379   if (m.right().Is(0)) {
   2380     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   2381   }
   2382 
   2383   VisitWord32Compare(this, node, &cont);
   2384 }
   2385 
   2386 
   2387 void InstructionSelector::VisitInt32LessThan(Node* node) {
   2388   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2389   VisitWord32Compare(this, node, &cont);
   2390 }
   2391 
   2392 
   2393 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   2394   FlagsContinuation cont =
   2395       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2396   VisitWord32Compare(this, node, &cont);
   2397 }
   2398 
   2399 
   2400 void InstructionSelector::VisitUint32LessThan(Node* node) {
   2401   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2402   VisitWord32Compare(this, node, &cont);
   2403 }
   2404 
   2405 
   2406 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   2407   FlagsContinuation cont =
   2408       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2409   VisitWord32Compare(this, node, &cont);
   2410 }
   2411 
   2412 
   2413 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   2414   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2415     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2416     return VisitBinop(this, node, kMips64Dadd, &cont);
   2417   }
   2418   FlagsContinuation cont;
   2419   VisitBinop(this, node, kMips64Dadd, &cont);
   2420 }
   2421 
   2422 
   2423 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   2424   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2425     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2426     return VisitBinop(this, node, kMips64Dsub, &cont);
   2427   }
   2428   FlagsContinuation cont;
   2429   VisitBinop(this, node, kMips64Dsub, &cont);
   2430 }
   2431 
   2432 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   2433   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2434     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2435     return VisitBinop(this, node, kMips64MulOvf, &cont);
   2436   }
   2437   FlagsContinuation cont;
   2438   VisitBinop(this, node, kMips64MulOvf, &cont);
   2439 }
   2440 
   2441 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   2442   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2443     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2444     return VisitBinop(this, node, kMips64DaddOvf, &cont);
   2445   }
   2446   FlagsContinuation cont;
   2447   VisitBinop(this, node, kMips64DaddOvf, &cont);
   2448 }
   2449 
   2450 
   2451 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   2452   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2453     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2454     return VisitBinop(this, node, kMips64DsubOvf, &cont);
   2455   }
   2456   FlagsContinuation cont;
   2457   VisitBinop(this, node, kMips64DsubOvf, &cont);
   2458 }
   2459 
   2460 
   2461 void InstructionSelector::VisitWord64Equal(Node* const node) {
   2462   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2463   Int64BinopMatcher m(node);
   2464   if (m.right().Is(0)) {
   2465     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   2466   }
   2467 
   2468   VisitWord64Compare(this, node, &cont);
   2469 }
   2470 
   2471 
   2472 void InstructionSelector::VisitInt64LessThan(Node* node) {
   2473   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2474   VisitWord64Compare(this, node, &cont);
   2475 }
   2476 
   2477 
   2478 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
   2479   FlagsContinuation cont =
   2480       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2481   VisitWord64Compare(this, node, &cont);
   2482 }
   2483 
   2484 
   2485 void InstructionSelector::VisitUint64LessThan(Node* node) {
   2486   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2487   VisitWord64Compare(this, node, &cont);
   2488 }
   2489 
   2490 
   2491 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
   2492   FlagsContinuation cont =
   2493       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2494   VisitWord64Compare(this, node, &cont);
   2495 }
   2496 
   2497 
   2498 void InstructionSelector::VisitFloat32Equal(Node* node) {
   2499   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2500   VisitFloat32Compare(this, node, &cont);
   2501 }
   2502 
   2503 
   2504 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   2505   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2506   VisitFloat32Compare(this, node, &cont);
   2507 }
   2508 
   2509 
   2510 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   2511   FlagsContinuation cont =
   2512       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2513   VisitFloat32Compare(this, node, &cont);
   2514 }
   2515 
   2516 
   2517 void InstructionSelector::VisitFloat64Equal(Node* node) {
   2518   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2519   VisitFloat64Compare(this, node, &cont);
   2520 }
   2521 
   2522 
   2523 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   2524   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2525   VisitFloat64Compare(this, node, &cont);
   2526 }
   2527 
   2528 
   2529 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   2530   FlagsContinuation cont =
   2531       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2532   VisitFloat64Compare(this, node, &cont);
   2533 }
   2534 
   2535 
   2536 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   2537   VisitRR(this, kMips64Float64ExtractLowWord32, node);
   2538 }
   2539 
   2540 
   2541 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
   2542   VisitRR(this, kMips64Float64ExtractHighWord32, node);
   2543 }
   2544 
   2545 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   2546   VisitRR(this, kMips64Float64SilenceNaN, node);
   2547 }
   2548 
   2549 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   2550   Mips64OperandGenerator g(this);
   2551   Node* left = node->InputAt(0);
   2552   Node* right = node->InputAt(1);
   2553   Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
   2554        g.UseRegister(left), g.UseRegister(right));
   2555 }
   2556 
   2557 
   2558 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   2559   Mips64OperandGenerator g(this);
   2560   Node* left = node->InputAt(0);
   2561   Node* right = node->InputAt(1);
   2562   Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
   2563        g.UseRegister(left), g.UseRegister(right));
   2564 }
   2565 
   2566 void InstructionSelector::VisitAtomicLoad(Node* node) {
   2567   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   2568   Mips64OperandGenerator g(this);
   2569   Node* base = node->InputAt(0);
   2570   Node* index = node->InputAt(1);
   2571   ArchOpcode opcode = kArchNop;
   2572   switch (load_rep.representation()) {
   2573     case MachineRepresentation::kWord8:
   2574       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
   2575       break;
   2576     case MachineRepresentation::kWord16:
   2577       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
   2578       break;
   2579     case MachineRepresentation::kWord32:
   2580       opcode = kAtomicLoadWord32;
   2581       break;
   2582     default:
   2583       UNREACHABLE();
   2584       return;
   2585   }
   2586   if (g.CanBeImmediate(index, opcode)) {
   2587     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   2588          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
   2589   } else {
   2590     InstructionOperand addr_reg = g.TempRegister();
   2591     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
   2592          g.UseRegister(index), g.UseRegister(base));
   2593     // Emit desired load opcode, using temp addr_reg.
   2594     Emit(opcode | AddressingModeField::encode(kMode_MRI),
   2595          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
   2596   }
   2597 }
   2598 
   2599 void InstructionSelector::VisitAtomicStore(Node* node) {
   2600   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   2601   Mips64OperandGenerator g(this);
   2602   Node* base = node->InputAt(0);
   2603   Node* index = node->InputAt(1);
   2604   Node* value = node->InputAt(2);
   2605   ArchOpcode opcode = kArchNop;
   2606   switch (rep) {
   2607     case MachineRepresentation::kWord8:
   2608       opcode = kAtomicStoreWord8;
   2609       break;
   2610     case MachineRepresentation::kWord16:
   2611       opcode = kAtomicStoreWord16;
   2612       break;
   2613     case MachineRepresentation::kWord32:
   2614       opcode = kAtomicStoreWord32;
   2615       break;
   2616     default:
   2617       UNREACHABLE();
   2618       return;
   2619   }
   2620 
   2621   if (g.CanBeImmediate(index, opcode)) {
   2622     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   2623          g.UseRegister(base), g.UseImmediate(index),
   2624          g.UseRegisterOrImmediateZero(value));
   2625   } else {
   2626     InstructionOperand addr_reg = g.TempRegister();
   2627     Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
   2628          g.UseRegister(index), g.UseRegister(base));
   2629     // Emit desired store opcode, using temp addr_reg.
   2630     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
   2631          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
   2632   }
   2633 }
   2634 
   2635 // static
   2636 MachineOperatorBuilder::Flags
   2637 InstructionSelector::SupportedMachineOperatorFlags() {
   2638   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
   2639   return flags | MachineOperatorBuilder::kWord32Ctz |
   2640          MachineOperatorBuilder::kWord64Ctz |
   2641          MachineOperatorBuilder::kWord32Popcnt |
   2642          MachineOperatorBuilder::kWord64Popcnt |
   2643          MachineOperatorBuilder::kWord32ShiftIsSafe |
   2644          MachineOperatorBuilder::kInt32DivIsSafe |
   2645          MachineOperatorBuilder::kUint32DivIsSafe |
   2646          MachineOperatorBuilder::kFloat64RoundDown |
   2647          MachineOperatorBuilder::kFloat32RoundDown |
   2648          MachineOperatorBuilder::kFloat64RoundUp |
   2649          MachineOperatorBuilder::kFloat32RoundUp |
   2650          MachineOperatorBuilder::kFloat64RoundTruncate |
   2651          MachineOperatorBuilder::kFloat32RoundTruncate |
   2652          MachineOperatorBuilder::kFloat64RoundTiesEven |
   2653          MachineOperatorBuilder::kFloat32RoundTiesEven |
   2654          MachineOperatorBuilder::kWord32ReverseBytes |
   2655          MachineOperatorBuilder::kWord64ReverseBytes;
   2656 }
   2657 
   2658 // static
   2659 MachineOperatorBuilder::AlignmentRequirements
   2660 InstructionSelector::AlignmentRequirements() {
   2661   if (kArchVariant == kMips64r6) {
   2662     return MachineOperatorBuilder::AlignmentRequirements::
   2663         FullUnalignedAccessSupport();
   2664   } else {
   2665     DCHECK(kArchVariant == kMips64r2);
   2666     return MachineOperatorBuilder::AlignmentRequirements::
   2667         NoUnalignedAccessSupport();
   2668   }
   2669 }
   2670 
   2671 }  // namespace compiler
   2672 }  // namespace internal
   2673 }  // namespace v8
   2674