Home | History | Annotate | Download | only in arm
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/base/adapters.h"
      6 #include "src/base/bits.h"
      7 #include "src/compiler/instruction-selector-impl.h"
      8 #include "src/compiler/node-matchers.h"
      9 #include "src/compiler/node-properties.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 namespace compiler {
     14 
     15 // Adds Arm-specific methods for generating InstructionOperands.
     16 class ArmOperandGenerator : public OperandGenerator {
     17  public:
     18   explicit ArmOperandGenerator(InstructionSelector* selector)
     19       : OperandGenerator(selector) {}
     20 
     21   bool CanBeImmediate(int32_t value) const {
     22     return Assembler::ImmediateFitsAddrMode1Instruction(value);
     23   }
     24 
     25   bool CanBeImmediate(uint32_t value) const {
     26     return CanBeImmediate(bit_cast<int32_t>(value));
     27   }
     28 
     29   bool CanBeImmediate(Node* node, InstructionCode opcode) {
     30     Int32Matcher m(node);
     31     if (!m.HasValue()) return false;
     32     int32_t value = m.Value();
     33     switch (ArchOpcodeField::decode(opcode)) {
     34       case kArmAnd:
     35       case kArmMov:
     36       case kArmMvn:
     37       case kArmBic:
     38         return CanBeImmediate(value) || CanBeImmediate(~value);
     39 
     40       case kArmAdd:
     41       case kArmSub:
     42       case kArmCmp:
     43       case kArmCmn:
     44         return CanBeImmediate(value) || CanBeImmediate(-value);
     45 
     46       case kArmTst:
     47       case kArmTeq:
     48       case kArmOrr:
     49       case kArmEor:
     50       case kArmRsb:
     51         return CanBeImmediate(value);
     52 
     53       case kArmVldrF32:
     54       case kArmVstrF32:
     55       case kArmVldrF64:
     56       case kArmVstrF64:
     57         return value >= -1020 && value <= 1020 && (value % 4) == 0;
     58 
     59       case kArmLdrb:
     60       case kArmLdrsb:
     61       case kArmStrb:
     62       case kArmLdr:
     63       case kArmStr:
     64         return value >= -4095 && value <= 4095;
     65 
     66       case kArmLdrh:
     67       case kArmLdrsh:
     68       case kArmStrh:
     69         return value >= -255 && value <= 255;
     70 
     71       default:
     72         break;
     73     }
     74     return false;
     75   }
     76 
     77   // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
     78   // register.
     79   InstructionOperand UseRegisterOrStackPointer(Node* node) {
     80     if (node->opcode() == IrOpcode::kLoadStackPointer) {
     81       return LocationOperand(LocationOperand::EXPLICIT,
     82                              LocationOperand::REGISTER,
     83                              MachineRepresentation::kWord32, sp.code());
     84     }
     85     return UseRegister(node);
     86   }
     87 };
     88 
     89 
     90 namespace {
     91 
     92 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
     93   ArmOperandGenerator g(selector);
     94   selector->Emit(opcode, g.DefineAsRegister(node),
     95                  g.UseRegister(node->InputAt(0)));
     96 }
     97 
     98 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
     99   ArmOperandGenerator g(selector);
    100   selector->Emit(opcode, g.DefineAsRegister(node),
    101                  g.UseRegister(node->InputAt(0)),
    102                  g.UseRegister(node->InputAt(1)));
    103 }
    104 
    105 void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
    106                      Node* node) {
    107   ArmOperandGenerator g(selector);
    108   // Swap inputs to save an instruction in the CodeGenerator for High ops.
    109   if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight ||
    110       opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight ||
    111       opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight ||
    112       opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight ||
    113       opcode == kArmS8x16TransposeRight) {
    114     Node* in0 = node->InputAt(0);
    115     Node* in1 = node->InputAt(1);
    116     node->ReplaceInput(0, in1);
    117     node->ReplaceInput(1, in0);
    118   }
    119   // Use DefineSameAsFirst for binary ops that clobber their inputs, e.g. the
    120   // NEON vzip, vuzp, and vtrn instructions.
    121   selector->Emit(opcode, g.DefineSameAsFirst(node),
    122                  g.UseRegister(node->InputAt(0)),
    123                  g.UseRegister(node->InputAt(1)));
    124 }
    125 
    126 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
    127   ArmOperandGenerator g(selector);
    128   int32_t imm = OpParameter<int32_t>(node->op());
    129   selector->Emit(opcode, g.DefineAsRegister(node),
    130                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
    131 }
    132 
    133 void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
    134   ArmOperandGenerator g(selector);
    135   int32_t imm = OpParameter<int32_t>(node->op());
    136   selector->Emit(opcode, g.DefineAsRegister(node),
    137                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
    138                  g.UseRegister(node->InputAt(1)));
    139 }
    140 
    141 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
    142           AddressingMode kImmMode, AddressingMode kRegMode>
    143 bool TryMatchShift(InstructionSelector* selector,
    144                    InstructionCode* opcode_return, Node* node,
    145                    InstructionOperand* value_return,
    146                    InstructionOperand* shift_return) {
    147   ArmOperandGenerator g(selector);
    148   if (node->opcode() == kOpcode) {
    149     Int32BinopMatcher m(node);
    150     *value_return = g.UseRegister(m.left().node());
    151     if (m.right().IsInRange(kImmMin, kImmMax)) {
    152       *opcode_return |= AddressingModeField::encode(kImmMode);
    153       *shift_return = g.UseImmediate(m.right().node());
    154     } else {
    155       *opcode_return |= AddressingModeField::encode(kRegMode);
    156       *shift_return = g.UseRegister(m.right().node());
    157     }
    158     return true;
    159   }
    160   return false;
    161 }
    162 
    163 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
    164           AddressingMode kImmMode>
    165 bool TryMatchShiftImmediate(InstructionSelector* selector,
    166                             InstructionCode* opcode_return, Node* node,
    167                             InstructionOperand* value_return,
    168                             InstructionOperand* shift_return) {
    169   ArmOperandGenerator g(selector);
    170   if (node->opcode() == kOpcode) {
    171     Int32BinopMatcher m(node);
    172     if (m.right().IsInRange(kImmMin, kImmMax)) {
    173       *opcode_return |= AddressingModeField::encode(kImmMode);
    174       *value_return = g.UseRegister(m.left().node());
    175       *shift_return = g.UseImmediate(m.right().node());
    176       return true;
    177     }
    178   }
    179   return false;
    180 }
    181 
    182 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
    183                  Node* node, InstructionOperand* value_return,
    184                  InstructionOperand* shift_return) {
    185   return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
    186                        kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
    187                                                value_return, shift_return);
    188 }
    189 
    190 
    191 bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
    192                  Node* node, InstructionOperand* value_return,
    193                  InstructionOperand* shift_return) {
    194   return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
    195                        kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
    196                                                value_return, shift_return);
    197 }
    198 
    199 
    200 bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
    201                  Node* node, InstructionOperand* value_return,
    202                  InstructionOperand* shift_return) {
    203   return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
    204                        kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
    205                                                value_return, shift_return);
    206 }
    207 
    208 bool TryMatchLSLImmediate(InstructionSelector* selector,
    209                           InstructionCode* opcode_return, Node* node,
    210                           InstructionOperand* value_return,
    211                           InstructionOperand* shift_return) {
    212   return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
    213                                 kMode_Operand2_R_LSL_I>(
    214       selector, opcode_return, node, value_return, shift_return);
    215 }
    216 
    217 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
    218                  Node* node, InstructionOperand* value_return,
    219                  InstructionOperand* shift_return) {
    220   return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
    221                        kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
    222                                                value_return, shift_return);
    223 }
    224 
    225 
    226 bool TryMatchShift(InstructionSelector* selector,
    227                    InstructionCode* opcode_return, Node* node,
    228                    InstructionOperand* value_return,
    229                    InstructionOperand* shift_return) {
    230   return (
    231       TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
    232       TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
    233       TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
    234       TryMatchROR(selector, opcode_return, node, value_return, shift_return));
    235 }
    236 
    237 
    238 bool TryMatchImmediateOrShift(InstructionSelector* selector,
    239                               InstructionCode* opcode_return, Node* node,
    240                               size_t* input_count_return,
    241                               InstructionOperand* inputs) {
    242   ArmOperandGenerator g(selector);
    243   if (g.CanBeImmediate(node, *opcode_return)) {
    244     *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
    245     inputs[0] = g.UseImmediate(node);
    246     *input_count_return = 1;
    247     return true;
    248   }
    249   if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
    250     *input_count_return = 2;
    251     return true;
    252   }
    253   return false;
    254 }
    255 
    256 
    257 void VisitBinop(InstructionSelector* selector, Node* node,
    258                 InstructionCode opcode, InstructionCode reverse_opcode,
    259                 FlagsContinuation* cont) {
    260   ArmOperandGenerator g(selector);
    261   Int32BinopMatcher m(node);
    262   InstructionOperand inputs[3];
    263   size_t input_count = 0;
    264   InstructionOperand outputs[1];
    265   size_t output_count = 0;
    266 
    267   if (m.left().node() == m.right().node()) {
    268     // If both inputs refer to the same operand, enforce allocating a register
    269     // for both of them to ensure that we don't end up generating code like
    270     // this:
    271     //
    272     //   mov r0, r1, asr #16
    273     //   adds r0, r0, r1, asr #16
    274     //   bvs label
    275     InstructionOperand const input = g.UseRegister(m.left().node());
    276     opcode |= AddressingModeField::encode(kMode_Operand2_R);
    277     inputs[input_count++] = input;
    278     inputs[input_count++] = input;
    279   } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
    280                                       &input_count, &inputs[1])) {
    281     inputs[0] = g.UseRegister(m.left().node());
    282     input_count++;
    283   } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
    284                                       m.left().node(), &input_count,
    285                                       &inputs[1])) {
    286     inputs[0] = g.UseRegister(m.right().node());
    287     opcode = reverse_opcode;
    288     input_count++;
    289   } else {
    290     opcode |= AddressingModeField::encode(kMode_Operand2_R);
    291     inputs[input_count++] = g.UseRegister(m.left().node());
    292     inputs[input_count++] = g.UseRegister(m.right().node());
    293   }
    294 
    295   outputs[output_count++] = g.DefineAsRegister(node);
    296 
    297   DCHECK_NE(0u, input_count);
    298   DCHECK_EQ(1u, output_count);
    299   DCHECK_GE(arraysize(inputs), input_count);
    300   DCHECK_GE(arraysize(outputs), output_count);
    301   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
    302 
    303   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
    304                                  inputs, cont);
    305 }
    306 
    307 
    308 void VisitBinop(InstructionSelector* selector, Node* node,
    309                 InstructionCode opcode, InstructionCode reverse_opcode) {
    310   FlagsContinuation cont;
    311   VisitBinop(selector, node, opcode, reverse_opcode, &cont);
    312 }
    313 
    314 
    315 void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
    316              ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
    317              InstructionOperand result_operand, InstructionOperand left_operand,
    318              InstructionOperand right_operand) {
    319   ArmOperandGenerator g(selector);
    320   if (selector->IsSupported(SUDIV)) {
    321     selector->Emit(div_opcode, result_operand, left_operand, right_operand);
    322     return;
    323   }
    324   InstructionOperand left_double_operand = g.TempDoubleRegister();
    325   InstructionOperand right_double_operand = g.TempDoubleRegister();
    326   InstructionOperand result_double_operand = g.TempDoubleRegister();
    327   selector->Emit(f64i32_opcode, left_double_operand, left_operand);
    328   selector->Emit(f64i32_opcode, right_double_operand, right_operand);
    329   selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
    330                  right_double_operand);
    331   selector->Emit(i32f64_opcode, result_operand, result_double_operand);
    332 }
    333 
    334 
    335 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
    336               ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
    337   ArmOperandGenerator g(selector);
    338   Int32BinopMatcher m(node);
    339   EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
    340           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    341           g.UseRegister(m.right().node()));
    342 }
    343 
    344 
    345 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
    346               ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
    347   ArmOperandGenerator g(selector);
    348   Int32BinopMatcher m(node);
    349   InstructionOperand div_operand = g.TempRegister();
    350   InstructionOperand result_operand = g.DefineAsRegister(node);
    351   InstructionOperand left_operand = g.UseRegister(m.left().node());
    352   InstructionOperand right_operand = g.UseRegister(m.right().node());
    353   EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
    354           left_operand, right_operand);
    355   if (selector->IsSupported(ARMv7)) {
    356     selector->Emit(kArmMls, result_operand, div_operand, right_operand,
    357                    left_operand);
    358   } else {
    359     InstructionOperand mul_operand = g.TempRegister();
    360     selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
    361     selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
    362                    result_operand, left_operand, mul_operand);
    363   }
    364 }
    365 
    366 void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
    367               InstructionOperand* output, Node* base, Node* index) {
    368   ArmOperandGenerator g(selector);
    369   InstructionOperand inputs[3];
    370   size_t input_count = 2;
    371 
    372   inputs[0] = g.UseRegister(base);
    373   if (g.CanBeImmediate(index, opcode)) {
    374     inputs[1] = g.UseImmediate(index);
    375     opcode |= AddressingModeField::encode(kMode_Offset_RI);
    376   } else if ((opcode == kArmLdr) &&
    377              TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
    378                                   &inputs[2])) {
    379     input_count = 3;
    380   } else {
    381     inputs[1] = g.UseRegister(index);
    382     opcode |= AddressingModeField::encode(kMode_Offset_RR);
    383   }
    384   selector->Emit(opcode, 1, output, input_count, inputs);
    385 }
    386 
    387 void EmitStore(InstructionSelector* selector, InstructionCode opcode,
    388                size_t input_count, InstructionOperand* inputs,
    389                Node* index) {
    390   ArmOperandGenerator g(selector);
    391 
    392   if (g.CanBeImmediate(index, opcode)) {
    393     inputs[input_count++] = g.UseImmediate(index);
    394     opcode |= AddressingModeField::encode(kMode_Offset_RI);
    395   } else if ((opcode == kArmStr) &&
    396              TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
    397                                   &inputs[3])) {
    398     input_count = 4;
    399   } else {
    400     inputs[input_count++] = g.UseRegister(index);
    401     opcode |= AddressingModeField::encode(kMode_Offset_RR);
    402   }
    403   selector->Emit(opcode, 0, nullptr, input_count, inputs);
    404 }
    405 
    406 void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
    407                           ArchOpcode opcode) {
    408   ArmOperandGenerator g(selector);
    409   Node* base = node->InputAt(0);
    410   Node* index = node->InputAt(1);
    411   Node* value = node->InputAt(2);
    412   Node* value_high = node->InputAt(3);
    413   AddressingMode addressing_mode = kMode_Offset_RR;
    414   InstructionOperand inputs[] = {g.UseUniqueRegister(value),
    415                                  g.UseUniqueRegister(value_high),
    416                                  g.UseRegister(base), g.UseRegister(index)};
    417   InstructionOperand outputs[] = {
    418       g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
    419       g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
    420   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
    421                                 g.TempRegister(r7), g.TempRegister()};
    422   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
    423   selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
    424                  arraysize(temps), temps);
    425 }
    426 
    427 void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
    428                             ArchOpcode opcode) {
    429   ArmOperandGenerator g(selector);
    430   Node* base = node->InputAt(0);
    431   Node* index = node->InputAt(1);
    432   Node* value = node->InputAt(2);
    433   AddressingMode addressing_mode = kMode_Offset_RR;
    434   InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
    435                                   g.UseUniqueRegister(value)};
    436   InstructionOperand outputs[] = {
    437       g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
    438       g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
    439   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
    440                                 g.TempRegister()};
    441   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
    442   selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
    443                  arraysize(temps), temps);
    444 }
    445 
    446 }  // namespace
    447 
    448 void InstructionSelector::VisitStackSlot(Node* node) {
    449   StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
    450   int slot = frame_->AllocateSpillSlot(rep.size());
    451   OperandGenerator g(this);
    452 
    453   Emit(kArchStackSlot, g.DefineAsRegister(node),
    454        sequence()->AddImmediate(Constant(slot)), 0, nullptr);
    455 }
    456 
    457 void InstructionSelector::VisitDebugAbort(Node* node) {
    458   ArmOperandGenerator g(this);
    459   Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
    460 }
    461 
    462 void InstructionSelector::VisitLoad(Node* node) {
    463   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    464   ArmOperandGenerator g(this);
    465   Node* base = node->InputAt(0);
    466   Node* index = node->InputAt(1);
    467 
    468   InstructionCode opcode = kArchNop;
    469   switch (load_rep.representation()) {
    470     case MachineRepresentation::kFloat32:
    471       opcode = kArmVldrF32;
    472       break;
    473     case MachineRepresentation::kFloat64:
    474       opcode = kArmVldrF64;
    475       break;
    476     case MachineRepresentation::kBit:  // Fall through.
    477     case MachineRepresentation::kWord8:
    478       opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
    479       break;
    480     case MachineRepresentation::kWord16:
    481       opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
    482       break;
    483     case MachineRepresentation::kTaggedSigned:   // Fall through.
    484     case MachineRepresentation::kTaggedPointer:  // Fall through.
    485     case MachineRepresentation::kTagged:  // Fall through.
    486     case MachineRepresentation::kWord32:
    487       opcode = kArmLdr;
    488       break;
    489     case MachineRepresentation::kSimd128:
    490       opcode = kArmVld1S128;
    491       break;
    492     case MachineRepresentation::kWord64:   // Fall through.
    493     case MachineRepresentation::kNone:
    494       UNREACHABLE();
    495       return;
    496   }
    497   if (node->opcode() == IrOpcode::kPoisonedLoad) {
    498     CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
    499     opcode |= MiscField::encode(kMemoryAccessPoisoned);
    500   }
    501 
    502   InstructionOperand output = g.DefineAsRegister(node);
    503   EmitLoad(this, opcode, &output, base, index);
    504 }
    505 
    506 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
    507 
    508 void InstructionSelector::VisitProtectedLoad(Node* node) {
    509   // TODO(eholk)
    510   UNIMPLEMENTED();
    511 }
    512 
    513 void InstructionSelector::VisitStore(Node* node) {
    514   ArmOperandGenerator g(this);
    515   Node* base = node->InputAt(0);
    516   Node* index = node->InputAt(1);
    517   Node* value = node->InputAt(2);
    518 
    519   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    520   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    521   MachineRepresentation rep = store_rep.representation();
    522 
    523   if (write_barrier_kind != kNoWriteBarrier) {
    524     DCHECK(CanBeTaggedPointer(rep));
    525     AddressingMode addressing_mode;
    526     InstructionOperand inputs[3];
    527     size_t input_count = 0;
    528     inputs[input_count++] = g.UseUniqueRegister(base);
    529     // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
    530     // for the store itself, so we must check compatibility with both.
    531     if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
    532       inputs[input_count++] = g.UseImmediate(index);
    533       addressing_mode = kMode_Offset_RI;
    534     } else {
    535       inputs[input_count++] = g.UseUniqueRegister(index);
    536       addressing_mode = kMode_Offset_RR;
    537     }
    538     inputs[input_count++] = g.UseUniqueRegister(value);
    539     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    540     switch (write_barrier_kind) {
    541       case kNoWriteBarrier:
    542         UNREACHABLE();
    543         break;
    544       case kMapWriteBarrier:
    545         record_write_mode = RecordWriteMode::kValueIsMap;
    546         break;
    547       case kPointerWriteBarrier:
    548         record_write_mode = RecordWriteMode::kValueIsPointer;
    549         break;
    550       case kFullWriteBarrier:
    551         record_write_mode = RecordWriteMode::kValueIsAny;
    552         break;
    553     }
    554     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    555     size_t const temp_count = arraysize(temps);
    556     InstructionCode code = kArchStoreWithWriteBarrier;
    557     code |= AddressingModeField::encode(addressing_mode);
    558     code |= MiscField::encode(static_cast<int>(record_write_mode));
    559     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    560   } else {
    561     InstructionCode opcode = kArchNop;
    562     switch (rep) {
    563       case MachineRepresentation::kFloat32:
    564         opcode = kArmVstrF32;
    565         break;
    566       case MachineRepresentation::kFloat64:
    567         opcode = kArmVstrF64;
    568         break;
    569       case MachineRepresentation::kBit:  // Fall through.
    570       case MachineRepresentation::kWord8:
    571         opcode = kArmStrb;
    572         break;
    573       case MachineRepresentation::kWord16:
    574         opcode = kArmStrh;
    575         break;
    576       case MachineRepresentation::kTaggedSigned:   // Fall through.
    577       case MachineRepresentation::kTaggedPointer:  // Fall through.
    578       case MachineRepresentation::kTagged:  // Fall through.
    579       case MachineRepresentation::kWord32:
    580         opcode = kArmStr;
    581         break;
    582       case MachineRepresentation::kSimd128:
    583         opcode = kArmVst1S128;
    584         break;
    585       case MachineRepresentation::kWord64:   // Fall through.
    586       case MachineRepresentation::kNone:
    587         UNREACHABLE();
    588         return;
    589     }
    590 
    591     InstructionOperand inputs[4];
    592     size_t input_count = 0;
    593     inputs[input_count++] = g.UseRegister(value);
    594     inputs[input_count++] = g.UseRegister(base);
    595     EmitStore(this, opcode, input_count, inputs, index);
    596   }
    597 }
    598 
    599 void InstructionSelector::VisitProtectedStore(Node* node) {
    600   // TODO(eholk)
    601   UNIMPLEMENTED();
    602 }
    603 
    604 void InstructionSelector::VisitUnalignedLoad(Node* node) {
    605   MachineRepresentation load_rep =
    606       LoadRepresentationOf(node->op()).representation();
    607   ArmOperandGenerator g(this);
    608   Node* base = node->InputAt(0);
    609   Node* index = node->InputAt(1);
    610 
    611   InstructionCode opcode = kArmLdr;
    612   // Only floating point loads need to be specially handled; integer loads
    613   // support unaligned access. We support unaligned FP loads by loading to
    614   // integer registers first, then moving to the destination FP register. If
    615   // NEON is supported, we use the vld1.8 instruction.
    616   switch (load_rep) {
    617     case MachineRepresentation::kFloat32: {
    618       InstructionOperand temp = g.TempRegister();
    619       EmitLoad(this, opcode, &temp, base, index);
    620       Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
    621       return;
    622     }
    623     case MachineRepresentation::kFloat64:
    624     case MachineRepresentation::kSimd128: {
    625       // Compute the address of the least-significant byte of the FP value.
    626       // We assume that the base node is unlikely to be an encodable immediate
    627       // or the result of a shift operation, so only consider the addressing
    628       // mode that should be used for the index node.
    629       InstructionCode add_opcode = kArmAdd;
    630       InstructionOperand inputs[3];
    631       inputs[0] = g.UseRegister(base);
    632 
    633       size_t input_count;
    634       if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
    635                                    &inputs[1])) {
    636         // input_count has been set by TryMatchImmediateOrShift(), so
    637         // increment it to account for the base register in inputs[0].
    638         input_count++;
    639       } else {
    640         add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
    641         inputs[1] = g.UseRegister(index);
    642         input_count = 2;  // Base register and index.
    643       }
    644 
    645       InstructionOperand addr = g.TempRegister();
    646       Emit(add_opcode, 1, &addr, input_count, inputs);
    647 
    648       if (CpuFeatures::IsSupported(NEON)) {
    649         // With NEON we can load directly from the calculated address.
    650         InstructionCode op = load_rep == MachineRepresentation::kFloat64
    651                                  ? kArmVld1F64
    652                                  : kArmVld1S128;
    653         op |= AddressingModeField::encode(kMode_Operand2_R);
    654         Emit(op, g.DefineAsRegister(node), addr);
    655       } else {
    656         DCHECK_NE(MachineRepresentation::kSimd128, load_rep);
    657         // Load both halves and move to an FP register.
    658         InstructionOperand fp_lo = g.TempRegister();
    659         InstructionOperand fp_hi = g.TempRegister();
    660         opcode |= AddressingModeField::encode(kMode_Offset_RI);
    661         Emit(opcode, fp_lo, addr, g.TempImmediate(0));
    662         Emit(opcode, fp_hi, addr, g.TempImmediate(4));
    663         Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
    664       }
    665       return;
    666     }
    667     default:
    668       // All other cases should support unaligned accesses.
    669       UNREACHABLE();
    670       return;
    671   }
    672 }
    673 
    674 void InstructionSelector::VisitUnalignedStore(Node* node) {
    675   ArmOperandGenerator g(this);
    676   Node* base = node->InputAt(0);
    677   Node* index = node->InputAt(1);
    678   Node* value = node->InputAt(2);
    679 
    680   InstructionOperand inputs[4];
    681   size_t input_count = 0;
    682 
    683   UnalignedStoreRepresentation store_rep =
    684       UnalignedStoreRepresentationOf(node->op());
    685 
    686   // Only floating point stores need to be specially handled; integer stores
    687   // support unaligned access. We support unaligned FP stores by moving the
    688   // value to integer registers first, then storing to the destination address.
    689   // If NEON is supported, we use the vst1.8 instruction.
    690   switch (store_rep) {
    691     case MachineRepresentation::kFloat32: {
    692       inputs[input_count++] = g.TempRegister();
    693       Emit(kArmVmovU32F32, inputs[0], g.UseRegister(value));
    694       inputs[input_count++] = g.UseRegister(base);
    695       EmitStore(this, kArmStr, input_count, inputs, index);
    696       return;
    697     }
    698     case MachineRepresentation::kFloat64:
    699     case MachineRepresentation::kSimd128: {
    700       if (CpuFeatures::IsSupported(NEON)) {
    701         InstructionOperand address = g.TempRegister();
    702         {
    703           // First we have to calculate the actual address.
    704           InstructionCode add_opcode = kArmAdd;
    705           InstructionOperand inputs[3];
    706           inputs[0] = g.UseRegister(base);
    707 
    708           size_t input_count;
    709           if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
    710                                        &inputs[1])) {
    711             // input_count has been set by TryMatchImmediateOrShift(), so
    712             // increment it to account for the base register in inputs[0].
    713             input_count++;
    714           } else {
    715             add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
    716             inputs[1] = g.UseRegister(index);
    717             input_count = 2;  // Base register and index.
    718           }
    719 
    720           Emit(add_opcode, 1, &address, input_count, inputs);
    721         }
    722 
    723         inputs[input_count++] = g.UseRegister(value);
    724         inputs[input_count++] = address;
    725         InstructionCode op = store_rep == MachineRepresentation::kFloat64
    726                                  ? kArmVst1F64
    727                                  : kArmVst1S128;
    728         op |= AddressingModeField::encode(kMode_Operand2_R);
    729         Emit(op, 0, nullptr, input_count, inputs);
    730       } else {
    731         DCHECK_NE(MachineRepresentation::kSimd128, store_rep);
    732         // Store a 64-bit floating point value using two 32-bit integer stores.
    733         // Computing the store address here would require three live temporary
    734         // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
    735         // storing the least-significant half of the value.
    736 
    737         // First, move the 64-bit FP value into two temporary integer registers.
    738         InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
    739         inputs[input_count++] = g.UseRegister(value);
    740         Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count, inputs);
    741 
    742         // Store the least-significant half.
    743         inputs[0] = fp[0];  // Low 32-bits of FP value.
    744         inputs[input_count++] =
    745             g.UseRegister(base);  // First store base address.
    746         EmitStore(this, kArmStr, input_count, inputs, index);
    747 
    748         // Store the most-significant half.
    749         InstructionOperand base4 = g.TempRegister();
    750         Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
    751              g.UseRegister(base), g.TempImmediate(4));  // Compute base + 4.
    752         inputs[0] = fp[1];  // High 32-bits of FP value.
    753         inputs[1] = base4;  // Second store base + 4 address.
    754         EmitStore(this, kArmStr, input_count, inputs, index);
    755       }
    756       return;
    757     }
    758     default:
    759       // All other cases should support unaligned accesses.
    760       UNREACHABLE();
    761       return;
    762   }
    763 }
    764 
    765 namespace {
    766 
    767 void EmitBic(InstructionSelector* selector, Node* node, Node* left,
    768              Node* right) {
    769   ArmOperandGenerator g(selector);
    770   InstructionCode opcode = kArmBic;
    771   InstructionOperand value_operand;
    772   InstructionOperand shift_operand;
    773   if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
    774     selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
    775                    value_operand, shift_operand);
    776     return;
    777   }
    778   selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
    779                  g.DefineAsRegister(node), g.UseRegister(left),
    780                  g.UseRegister(right));
    781 }
    782 
    783 
    784 void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
    785               uint32_t lsb, uint32_t width) {
    786   DCHECK_LE(1u, width);
    787   DCHECK_LE(width, 32u - lsb);
    788   ArmOperandGenerator g(selector);
    789   selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
    790                  g.TempImmediate(lsb), g.TempImmediate(width));
    791 }
    792 
    793 }  // namespace
    794 
    795 
    796 void InstructionSelector::VisitWord32And(Node* node) {
    797   ArmOperandGenerator g(this);
    798   Int32BinopMatcher m(node);
    799   if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
    800     Int32BinopMatcher mleft(m.left().node());
    801     if (mleft.right().Is(-1)) {
    802       EmitBic(this, node, m.right().node(), mleft.left().node());
    803       return;
    804     }
    805   }
    806   if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
    807     Int32BinopMatcher mright(m.right().node());
    808     if (mright.right().Is(-1)) {
    809       EmitBic(this, node, m.left().node(), mright.left().node());
    810       return;
    811     }
    812   }
    813   if (m.right().HasValue()) {
    814     uint32_t const value = m.right().Value();
    815     uint32_t width = base::bits::CountPopulation(value);
    816     uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
    817 
    818     // Try to merge SHR operations on the left hand input into this AND.
    819     if (m.left().IsWord32Shr()) {
    820       Int32BinopMatcher mshr(m.left().node());
    821       if (mshr.right().HasValue()) {
    822         uint32_t const shift = mshr.right().Value();
    823 
    824         if (((shift == 8) || (shift == 16) || (shift == 24)) &&
    825             (value == 0xFF)) {
    826           // Merge SHR into AND by emitting a UXTB instruction with a
    827           // bytewise rotation.
    828           Emit(kArmUxtb, g.DefineAsRegister(m.node()),
    829                g.UseRegister(mshr.left().node()),
    830                g.TempImmediate(mshr.right().Value()));
    831           return;
    832         } else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
    833           // Merge SHR into AND by emitting a UXTH instruction with a
    834           // bytewise rotation.
    835           Emit(kArmUxth, g.DefineAsRegister(m.node()),
    836                g.UseRegister(mshr.left().node()),
    837                g.TempImmediate(mshr.right().Value()));
    838           return;
    839         } else if (IsSupported(ARMv7) && (width != 0) &&
    840                    ((leading_zeros + width) == 32)) {
    841           // Merge Shr into And by emitting a UBFX instruction.
    842           DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
    843           if ((1 <= shift) && (shift <= 31)) {
    844             // UBFX cannot extract bits past the register size, however since
    845             // shifting the original value would have introduced some zeros we
    846             // can still use UBFX with a smaller mask and the remaining bits
    847             // will be zeros.
    848             EmitUbfx(this, node, mshr.left().node(), shift,
    849                      std::min(width, 32 - shift));
    850             return;
    851           }
    852         }
    853       }
    854     } else if (value == 0xFFFF) {
    855       // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
    856       // better than AND 0xFF for this operation.
    857       Emit(kArmUxth, g.DefineAsRegister(m.node()),
    858            g.UseRegister(m.left().node()), g.TempImmediate(0));
    859       return;
    860     }
    861     if (g.CanBeImmediate(~value)) {
    862       // Emit BIC for this AND by inverting the immediate value first.
    863       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
    864            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    865            g.TempImmediate(~value));
    866       return;
    867     }
    868     if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
    869       // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
    870       // replace this AND with UBFX. Other contiguous bit patterns have already
    871       // been handled by BIC or will be handled by AND.
    872       if ((width != 0) && ((leading_zeros + width) == 32) &&
    873           (9 <= leading_zeros) && (leading_zeros <= 23)) {
    874         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
    875         EmitUbfx(this, node, m.left().node(), 0, width);
    876         return;
    877       }
    878 
    879       width = 32 - width;
    880       leading_zeros = base::bits::CountLeadingZeros32(~value);
    881       uint32_t lsb = base::bits::CountTrailingZeros32(~value);
    882       if ((leading_zeros + width + lsb) == 32) {
    883         // This AND can be replaced with BFC.
    884         Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
    885              g.TempImmediate(lsb), g.TempImmediate(width));
    886         return;
    887       }
    888     }
    889   }
    890   VisitBinop(this, node, kArmAnd, kArmAnd);
    891 }
    892 
    893 
    894 void InstructionSelector::VisitWord32Or(Node* node) {
    895   VisitBinop(this, node, kArmOrr, kArmOrr);
    896 }
    897 
    898 
    899 void InstructionSelector::VisitWord32Xor(Node* node) {
    900   ArmOperandGenerator g(this);
    901   Int32BinopMatcher m(node);
    902   if (m.right().Is(-1)) {
    903     InstructionCode opcode = kArmMvn;
    904     InstructionOperand value_operand;
    905     InstructionOperand shift_operand;
    906     if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
    907                       &shift_operand)) {
    908       Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
    909       return;
    910     }
    911     Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
    912          g.DefineAsRegister(node), g.UseRegister(m.left().node()));
    913     return;
    914   }
    915   VisitBinop(this, node, kArmEor, kArmEor);
    916 }
    917 
    918 
    919 namespace {
    920 
    921 template <typename TryMatchShift>
    922 void VisitShift(InstructionSelector* selector, Node* node,
    923                 TryMatchShift try_match_shift, FlagsContinuation* cont) {
    924   ArmOperandGenerator g(selector);
    925   InstructionCode opcode = kArmMov;
    926   InstructionOperand inputs[2];
    927   size_t input_count = 2;
    928   InstructionOperand outputs[1];
    929   size_t output_count = 0;
    930 
    931   CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
    932 
    933   outputs[output_count++] = g.DefineAsRegister(node);
    934 
    935   DCHECK_NE(0u, input_count);
    936   DCHECK_NE(0u, output_count);
    937   DCHECK_GE(arraysize(inputs), input_count);
    938   DCHECK_GE(arraysize(outputs), output_count);
    939   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
    940 
    941   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
    942                                  inputs, cont);
    943 }
    944 
    945 
    946 template <typename TryMatchShift>
    947 void VisitShift(InstructionSelector* selector, Node* node,
    948                               TryMatchShift try_match_shift) {
    949   FlagsContinuation cont;
    950   VisitShift(selector, node, try_match_shift, &cont);
    951 }
    952 
    953 }  // namespace
    954 
    955 
    956 void InstructionSelector::VisitWord32Shl(Node* node) {
    957   VisitShift(this, node, TryMatchLSL);
    958 }
    959 
    960 
    961 void InstructionSelector::VisitWord32Shr(Node* node) {
    962   ArmOperandGenerator g(this);
    963   Int32BinopMatcher m(node);
    964   if (IsSupported(ARMv7) && m.left().IsWord32And() &&
    965       m.right().IsInRange(0, 31)) {
    966     uint32_t lsb = m.right().Value();
    967     Int32BinopMatcher mleft(m.left().node());
    968     if (mleft.right().HasValue()) {
    969       uint32_t value = (mleft.right().Value() >> lsb) << lsb;
    970       uint32_t width = base::bits::CountPopulation(value);
    971       uint32_t msb = base::bits::CountLeadingZeros32(value);
    972       if (msb + width + lsb == 32) {
    973         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
    974         return EmitUbfx(this, node, mleft.left().node(), lsb, width);
    975       }
    976     }
    977   }
    978   VisitShift(this, node, TryMatchLSR);
    979 }
    980 
    981 
    982 void InstructionSelector::VisitWord32Sar(Node* node) {
    983   ArmOperandGenerator g(this);
    984   Int32BinopMatcher m(node);
    985   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
    986     Int32BinopMatcher mleft(m.left().node());
    987     if (m.right().HasValue() && mleft.right().HasValue()) {
    988       uint32_t sar = m.right().Value();
    989       uint32_t shl = mleft.right().Value();
    990       if ((sar == shl) && (sar == 16)) {
    991         Emit(kArmSxth, g.DefineAsRegister(node),
    992              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
    993         return;
    994       } else if ((sar == shl) && (sar == 24)) {
    995         Emit(kArmSxtb, g.DefineAsRegister(node),
    996              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
    997         return;
    998       } else if (IsSupported(ARMv7) && (sar >= shl)) {
    999         Emit(kArmSbfx, g.DefineAsRegister(node),
   1000              g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
   1001              g.TempImmediate(32 - sar));
   1002         return;
   1003       }
   1004     }
   1005   }
   1006   VisitShift(this, node, TryMatchASR);
   1007 }
   1008 
   1009 void InstructionSelector::VisitInt32PairAdd(Node* node) {
   1010   ArmOperandGenerator g(this);
   1011 
   1012   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1013   if (projection1) {
   1014     // We use UseUniqueRegister here to avoid register sharing with the output
   1015     // registers.
   1016     InstructionOperand inputs[] = {
   1017         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
   1018         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
   1019 
   1020     InstructionOperand outputs[] = {
   1021         g.DefineAsRegister(node),
   1022         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   1023 
   1024     Emit(kArmAddPair, 2, outputs, 4, inputs);
   1025   } else {
   1026     // The high word of the result is not used, so we emit the standard 32 bit
   1027     // instruction.
   1028     Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R),
   1029          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1030          g.UseRegister(node->InputAt(2)));
   1031   }
   1032 }
   1033 
   1034 void InstructionSelector::VisitInt32PairSub(Node* node) {
   1035   ArmOperandGenerator g(this);
   1036 
   1037   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1038   if (projection1) {
   1039     // We use UseUniqueRegister here to avoid register sharing with the output
   1040     // register.
   1041     InstructionOperand inputs[] = {
   1042         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
   1043         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
   1044 
   1045     InstructionOperand outputs[] = {
   1046         g.DefineAsRegister(node),
   1047         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   1048 
   1049     Emit(kArmSubPair, 2, outputs, 4, inputs);
   1050   } else {
   1051     // The high word of the result is not used, so we emit the standard 32 bit
   1052     // instruction.
   1053     Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
   1054          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1055          g.UseRegister(node->InputAt(2)));
   1056   }
   1057 }
   1058 
   1059 void InstructionSelector::VisitInt32PairMul(Node* node) {
   1060   ArmOperandGenerator g(this);
   1061   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1062   if (projection1) {
   1063     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
   1064                                    g.UseUniqueRegister(node->InputAt(1)),
   1065                                    g.UseUniqueRegister(node->InputAt(2)),
   1066                                    g.UseUniqueRegister(node->InputAt(3))};
   1067 
   1068     InstructionOperand outputs[] = {
   1069         g.DefineAsRegister(node),
   1070         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   1071 
   1072     Emit(kArmMulPair, 2, outputs, 4, inputs);
   1073   } else {
   1074     // The high word of the result is not used, so we emit the standard 32 bit
   1075     // instruction.
   1076     Emit(kArmMul | AddressingModeField::encode(kMode_Operand2_R),
   1077          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1078          g.UseRegister(node->InputAt(2)));
   1079   }
   1080 }
   1081 
   1082 namespace {
   1083 // Shared routine for multiple shift operations.
   1084 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
   1085                           Node* node) {
   1086   ArmOperandGenerator g(selector);
   1087   // We use g.UseUniqueRegister here to guarantee that there is
   1088   // no register aliasing of input registers with output registers.
   1089   Int32Matcher m(node->InputAt(2));
   1090   InstructionOperand shift_operand;
   1091   if (m.HasValue()) {
   1092     shift_operand = g.UseImmediate(m.node());
   1093   } else {
   1094     shift_operand = g.UseUniqueRegister(m.node());
   1095   }
   1096 
   1097   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
   1098                                  g.UseUniqueRegister(node->InputAt(1)),
   1099                                  shift_operand};
   1100 
   1101   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1102 
   1103   InstructionOperand outputs[2];
   1104   InstructionOperand temps[1];
   1105   int32_t output_count = 0;
   1106   int32_t temp_count = 0;
   1107 
   1108   outputs[output_count++] = g.DefineAsRegister(node);
   1109   if (projection1) {
   1110     outputs[output_count++] = g.DefineAsRegister(projection1);
   1111   } else {
   1112     temps[temp_count++] = g.TempRegister();
   1113   }
   1114 
   1115   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
   1116 }
   1117 }  // namespace
   1118 void InstructionSelector::VisitWord32PairShl(Node* node) {
   1119   VisitWord32PairShift(this, kArmLslPair, node);
   1120 }
   1121 
   1122 void InstructionSelector::VisitWord32PairShr(Node* node) {
   1123   VisitWord32PairShift(this, kArmLsrPair, node);
   1124 }
   1125 
   1126 void InstructionSelector::VisitWord32PairSar(Node* node) {
   1127   VisitWord32PairShift(this, kArmAsrPair, node);
   1128 }
   1129 
   1130 void InstructionSelector::VisitWord32Ror(Node* node) {
   1131   VisitShift(this, node, TryMatchROR);
   1132 }
   1133 
   1134 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
   1135 
   1136 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
   1137   DCHECK(IsSupported(ARMv7));
   1138   VisitRR(this, kArmRbit, node);
   1139 }
   1140 
   1141 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
   1142 
   1143 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
   1144   VisitRR(this, kArmRev, node);
   1145 }
   1146 
   1147 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
   1148 
   1149 void InstructionSelector::VisitSpeculationFence(Node* node) {
   1150   ArmOperandGenerator g(this);
   1151   Emit(kArmDsbIsb, g.NoOutput());
   1152 }
   1153 
   1154 void InstructionSelector::VisitInt32Add(Node* node) {
   1155   ArmOperandGenerator g(this);
   1156   Int32BinopMatcher m(node);
   1157   if (CanCover(node, m.left().node())) {
   1158     switch (m.left().opcode()) {
   1159       case IrOpcode::kInt32Mul: {
   1160         Int32BinopMatcher mleft(m.left().node());
   1161         Emit(kArmMla, g.DefineAsRegister(node),
   1162              g.UseRegister(mleft.left().node()),
   1163              g.UseRegister(mleft.right().node()),
   1164              g.UseRegister(m.right().node()));
   1165         return;
   1166       }
   1167       case IrOpcode::kInt32MulHigh: {
   1168         Int32BinopMatcher mleft(m.left().node());
   1169         Emit(kArmSmmla, g.DefineAsRegister(node),
   1170              g.UseRegister(mleft.left().node()),
   1171              g.UseRegister(mleft.right().node()),
   1172              g.UseRegister(m.right().node()));
   1173         return;
   1174       }
   1175       case IrOpcode::kWord32And: {
   1176         Int32BinopMatcher mleft(m.left().node());
   1177         if (mleft.right().Is(0xFF)) {
   1178           Emit(kArmUxtab, g.DefineAsRegister(node),
   1179                g.UseRegister(m.right().node()),
   1180                g.UseRegister(mleft.left().node()), g.TempImmediate(0));
   1181           return;
   1182         } else if (mleft.right().Is(0xFFFF)) {
   1183           Emit(kArmUxtah, g.DefineAsRegister(node),
   1184                g.UseRegister(m.right().node()),
   1185                g.UseRegister(mleft.left().node()), g.TempImmediate(0));
   1186           return;
   1187         }
   1188         break;
   1189       }
   1190       case IrOpcode::kWord32Sar: {
   1191         Int32BinopMatcher mleft(m.left().node());
   1192         if (CanCover(mleft.node(), mleft.left().node()) &&
   1193             mleft.left().IsWord32Shl()) {
   1194           Int32BinopMatcher mleftleft(mleft.left().node());
   1195           if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
   1196             Emit(kArmSxtab, g.DefineAsRegister(node),
   1197                  g.UseRegister(m.right().node()),
   1198                  g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
   1199             return;
   1200           } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
   1201             Emit(kArmSxtah, g.DefineAsRegister(node),
   1202                  g.UseRegister(m.right().node()),
   1203                  g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
   1204             return;
   1205           }
   1206         }
   1207         break;
   1208       }
   1209       default:
   1210         break;
   1211     }
   1212   }
   1213   if (CanCover(node, m.right().node())) {
   1214     switch (m.right().opcode()) {
   1215       case IrOpcode::kInt32Mul: {
   1216         Int32BinopMatcher mright(m.right().node());
   1217         Emit(kArmMla, g.DefineAsRegister(node),
   1218              g.UseRegister(mright.left().node()),
   1219              g.UseRegister(mright.right().node()),
   1220              g.UseRegister(m.left().node()));
   1221         return;
   1222       }
   1223       case IrOpcode::kInt32MulHigh: {
   1224         Int32BinopMatcher mright(m.right().node());
   1225         Emit(kArmSmmla, g.DefineAsRegister(node),
   1226              g.UseRegister(mright.left().node()),
   1227              g.UseRegister(mright.right().node()),
   1228              g.UseRegister(m.left().node()));
   1229         return;
   1230       }
   1231       case IrOpcode::kWord32And: {
   1232         Int32BinopMatcher mright(m.right().node());
   1233         if (mright.right().Is(0xFF)) {
   1234           Emit(kArmUxtab, g.DefineAsRegister(node),
   1235                g.UseRegister(m.left().node()),
   1236                g.UseRegister(mright.left().node()), g.TempImmediate(0));
   1237           return;
   1238         } else if (mright.right().Is(0xFFFF)) {
   1239           Emit(kArmUxtah, g.DefineAsRegister(node),
   1240                g.UseRegister(m.left().node()),
   1241                g.UseRegister(mright.left().node()), g.TempImmediate(0));
   1242           return;
   1243         }
   1244         break;
   1245       }
   1246       case IrOpcode::kWord32Sar: {
   1247         Int32BinopMatcher mright(m.right().node());
   1248         if (CanCover(mright.node(), mright.left().node()) &&
   1249             mright.left().IsWord32Shl()) {
   1250           Int32BinopMatcher mrightleft(mright.left().node());
   1251           if (mright.right().Is(24) && mrightleft.right().Is(24)) {
   1252             Emit(kArmSxtab, g.DefineAsRegister(node),
   1253                  g.UseRegister(m.left().node()),
   1254                  g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
   1255             return;
   1256           } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
   1257             Emit(kArmSxtah, g.DefineAsRegister(node),
   1258                  g.UseRegister(m.left().node()),
   1259                  g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
   1260             return;
   1261           }
   1262         }
   1263         break;
   1264       }
   1265       default:
   1266         break;
   1267     }
   1268   }
   1269   VisitBinop(this, node, kArmAdd, kArmAdd);
   1270 }
   1271 
   1272 
   1273 void InstructionSelector::VisitInt32Sub(Node* node) {
   1274   ArmOperandGenerator g(this);
   1275   Int32BinopMatcher m(node);
   1276   if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
   1277       CanCover(node, m.right().node())) {
   1278     Int32BinopMatcher mright(m.right().node());
   1279     Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
   1280          g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
   1281     return;
   1282   }
   1283   VisitBinop(this, node, kArmSub, kArmRsb);
   1284 }
   1285 
   1286 namespace {
   1287 
   1288 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
   1289                               FlagsContinuation* cont) {
   1290   ArmOperandGenerator g(selector);
   1291   Int32BinopMatcher m(node);
   1292   InstructionOperand result_operand = g.DefineAsRegister(node);
   1293   InstructionOperand temp_operand = g.TempRegister();
   1294   InstructionOperand outputs[] = {result_operand, temp_operand};
   1295   InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
   1296                                  g.UseRegister(m.right().node())};
   1297   selector->Emit(kArmSmull, 2, outputs, 2, inputs);
   1298 
   1299   // result operand needs shift operator.
   1300   InstructionOperand shift_31 = g.UseImmediate(31);
   1301   InstructionCode opcode =
   1302       kArmCmp | AddressingModeField::encode(kMode_Operand2_R_ASR_I);
   1303   selector->EmitWithContinuation(opcode, temp_operand, result_operand, shift_31,
   1304                                  cont);
   1305 }
   1306 
   1307 }  // namespace
   1308 
   1309 void InstructionSelector::VisitInt32Mul(Node* node) {
   1310   ArmOperandGenerator g(this);
   1311   Int32BinopMatcher m(node);
   1312   if (m.right().HasValue() && m.right().Value() > 0) {
   1313     int32_t value = m.right().Value();
   1314     if (base::bits::IsPowerOfTwo(value - 1)) {
   1315       Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1316            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1317            g.UseRegister(m.left().node()),
   1318            g.TempImmediate(WhichPowerOf2(value - 1)));
   1319       return;
   1320     }
   1321     if (value < kMaxInt && base::bits::IsPowerOfTwo(value + 1)) {
   1322       Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1323            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1324            g.UseRegister(m.left().node()),
   1325            g.TempImmediate(WhichPowerOf2(value + 1)));
   1326       return;
   1327     }
   1328   }
   1329   VisitRRR(this, kArmMul, node);
   1330 }
   1331 
   1332 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   1333   ArmOperandGenerator g(this);
   1334   InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
   1335   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
   1336                                  g.UseRegister(node->InputAt(1))};
   1337   Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
   1338 }
   1339 
   1340 
   1341 void InstructionSelector::VisitInt32Div(Node* node) {
   1342   VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
   1343 }
   1344 
   1345 
   1346 void InstructionSelector::VisitUint32Div(Node* node) {
   1347   VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
   1348 }
   1349 
   1350 
   1351 void InstructionSelector::VisitInt32Mod(Node* node) {
   1352   VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
   1353 }
   1354 
   1355 
   1356 void InstructionSelector::VisitUint32Mod(Node* node) {
   1357   VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
   1358 }
   1359 
   1360 #define RR_OP_LIST(V)                                \
   1361   V(Word32Clz, kArmClz)                              \
   1362   V(ChangeFloat32ToFloat64, kArmVcvtF64F32)          \
   1363   V(RoundInt32ToFloat32, kArmVcvtF32S32)             \
   1364   V(RoundUint32ToFloat32, kArmVcvtF32U32)            \
   1365   V(ChangeInt32ToFloat64, kArmVcvtF64S32)            \
   1366   V(ChangeUint32ToFloat64, kArmVcvtF64U32)           \
   1367   V(TruncateFloat32ToInt32, kArmVcvtS32F32)          \
   1368   V(TruncateFloat32ToUint32, kArmVcvtU32F32)         \
   1369   V(ChangeFloat64ToInt32, kArmVcvtS32F64)            \
   1370   V(ChangeFloat64ToUint32, kArmVcvtU32F64)           \
   1371   V(TruncateFloat64ToUint32, kArmVcvtU32F64)         \
   1372   V(TruncateFloat64ToFloat32, kArmVcvtF32F64)        \
   1373   V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
   1374   V(RoundFloat64ToInt32, kArmVcvtS32F64)             \
   1375   V(BitcastFloat32ToInt32, kArmVmovU32F32)           \
   1376   V(BitcastInt32ToFloat32, kArmVmovF32U32)           \
   1377   V(Float64ExtractLowWord32, kArmVmovLowU32F64)      \
   1378   V(Float64ExtractHighWord32, kArmVmovHighU32F64)    \
   1379   V(Float64SilenceNaN, kArmFloat64SilenceNaN)        \
   1380   V(Float32Abs, kArmVabsF32)                         \
   1381   V(Float64Abs, kArmVabsF64)                         \
   1382   V(Float32Neg, kArmVnegF32)                         \
   1383   V(Float64Neg, kArmVnegF64)                         \
   1384   V(Float32Sqrt, kArmVsqrtF32)                       \
   1385   V(Float64Sqrt, kArmVsqrtF64)
   1386 
   1387 #define RR_OP_LIST_V8(V)                 \
   1388   V(Float32RoundDown, kArmVrintmF32)     \
   1389   V(Float64RoundDown, kArmVrintmF64)     \
   1390   V(Float32RoundUp, kArmVrintpF32)       \
   1391   V(Float64RoundUp, kArmVrintpF64)       \
   1392   V(Float32RoundTruncate, kArmVrintzF32) \
   1393   V(Float64RoundTruncate, kArmVrintzF64) \
   1394   V(Float64RoundTiesAway, kArmVrintaF64) \
   1395   V(Float32RoundTiesEven, kArmVrintnF32) \
   1396   V(Float64RoundTiesEven, kArmVrintnF64)
   1397 
   1398 #define RRR_OP_LIST(V)          \
   1399   V(Int32MulHigh, kArmSmmul)    \
   1400   V(Float32Mul, kArmVmulF32)    \
   1401   V(Float64Mul, kArmVmulF64)    \
   1402   V(Float32Div, kArmVdivF32)    \
   1403   V(Float64Div, kArmVdivF64)    \
   1404   V(Float32Max, kArmFloat32Max) \
   1405   V(Float64Max, kArmFloat64Max) \
   1406   V(Float32Min, kArmFloat32Min) \
   1407   V(Float64Min, kArmFloat64Min)
   1408 
   1409 #define RR_VISITOR(Name, opcode)                      \
   1410   void InstructionSelector::Visit##Name(Node* node) { \
   1411     VisitRR(this, opcode, node);                      \
   1412   }
   1413 RR_OP_LIST(RR_VISITOR)
   1414 #undef RR_VISITOR
   1415 #undef RR_OP_LIST
   1416 
   1417 #define RR_VISITOR_V8(Name, opcode)                   \
   1418   void InstructionSelector::Visit##Name(Node* node) { \
   1419     DCHECK(CpuFeatures::IsSupported(ARMv8));          \
   1420     VisitRR(this, opcode, node);                      \
   1421   }
   1422 RR_OP_LIST_V8(RR_VISITOR_V8)
   1423 #undef RR_VISITOR_V8
   1424 #undef RR_OP_LIST_V8
   1425 
   1426 #define RRR_VISITOR(Name, opcode)                     \
   1427   void InstructionSelector::Visit##Name(Node* node) { \
   1428     VisitRRR(this, opcode, node);                     \
   1429   }
   1430 RRR_OP_LIST(RRR_VISITOR)
   1431 #undef RRR_VISITOR
   1432 #undef RRR_OP_LIST
   1433 
   1434 void InstructionSelector::VisitFloat32Add(Node* node) {
   1435   ArmOperandGenerator g(this);
   1436   Float32BinopMatcher m(node);
   1437   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
   1438     Float32BinopMatcher mleft(m.left().node());
   1439     Emit(kArmVmlaF32, g.DefineSameAsFirst(node),
   1440          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1441          g.UseRegister(mleft.right().node()));
   1442     return;
   1443   }
   1444   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
   1445     Float32BinopMatcher mright(m.right().node());
   1446     Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1447          g.UseRegister(mright.left().node()),
   1448          g.UseRegister(mright.right().node()));
   1449     return;
   1450   }
   1451   VisitRRR(this, kArmVaddF32, node);
   1452 }
   1453 
   1454 
   1455 void InstructionSelector::VisitFloat64Add(Node* node) {
   1456   ArmOperandGenerator g(this);
   1457   Float64BinopMatcher m(node);
   1458   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
   1459     Float64BinopMatcher mleft(m.left().node());
   1460     Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
   1461          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1462          g.UseRegister(mleft.right().node()));
   1463     return;
   1464   }
   1465   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
   1466     Float64BinopMatcher mright(m.right().node());
   1467     Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1468          g.UseRegister(mright.left().node()),
   1469          g.UseRegister(mright.right().node()));
   1470     return;
   1471   }
   1472   VisitRRR(this, kArmVaddF64, node);
   1473 }
   1474 
   1475 void InstructionSelector::VisitFloat32Sub(Node* node) {
   1476   ArmOperandGenerator g(this);
   1477   Float32BinopMatcher m(node);
   1478   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
   1479     Float32BinopMatcher mright(m.right().node());
   1480     Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1481          g.UseRegister(mright.left().node()),
   1482          g.UseRegister(mright.right().node()));
   1483     return;
   1484   }
   1485   VisitRRR(this, kArmVsubF32, node);
   1486 }
   1487 
   1488 void InstructionSelector::VisitFloat64Sub(Node* node) {
   1489   ArmOperandGenerator g(this);
   1490   Float64BinopMatcher m(node);
   1491   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
   1492     Float64BinopMatcher mright(m.right().node());
   1493     Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1494          g.UseRegister(mright.left().node()),
   1495          g.UseRegister(mright.right().node()));
   1496     return;
   1497   }
   1498   VisitRRR(this, kArmVsubF64, node);
   1499 }
   1500 
   1501 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1502   ArmOperandGenerator g(this);
   1503   Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
   1504        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
   1505 }
   1506 
   1507 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1508                                                    InstructionCode opcode) {
   1509   ArmOperandGenerator g(this);
   1510   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
   1511        g.UseFixed(node->InputAt(1), d1))
   1512       ->MarkAsCall();
   1513 }
   1514 
   1515 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1516                                                   InstructionCode opcode) {
   1517   ArmOperandGenerator g(this);
   1518   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
   1519       ->MarkAsCall();
   1520 }
   1521 
   1522 void InstructionSelector::EmitPrepareArguments(
   1523     ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
   1524     Node* node) {
   1525   ArmOperandGenerator g(this);
   1526 
   1527   // Prepare for C function call.
   1528   if (call_descriptor->IsCFunctionCall()) {
   1529     Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
   1530                                          call_descriptor->ParameterCount())),
   1531          0, nullptr, 0, nullptr);
   1532 
   1533     // Poke any stack arguments.
   1534     for (size_t n = 0; n < arguments->size(); ++n) {
   1535       PushParameter input = (*arguments)[n];
   1536       if (input.node) {
   1537         int slot = static_cast<int>(n);
   1538         Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
   1539              g.UseRegister(input.node));
   1540       }
   1541     }
   1542   } else {
   1543     // Push any stack arguments.
   1544     for (PushParameter input : base::Reversed(*arguments)) {
   1545       // Skip any alignment holes in pushed nodes.
   1546       if (input.node == nullptr) continue;
   1547       Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
   1548     }
   1549   }
   1550 }
   1551 
   1552 void InstructionSelector::EmitPrepareResults(
   1553     ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
   1554     Node* node) {
   1555   ArmOperandGenerator g(this);
   1556 
   1557   int reverse_slot = 0;
   1558   for (PushParameter output : *results) {
   1559     if (!output.location.IsCallerFrameSlot()) continue;
   1560     // Skip any alignment holes in nodes.
   1561     if (output.node != nullptr) {
   1562       DCHECK(!call_descriptor->IsCFunctionCall());
   1563       if (output.location.GetType() == MachineType::Float32()) {
   1564         MarkAsFloat32(output.node);
   1565       } else if (output.location.GetType() == MachineType::Float64()) {
   1566         MarkAsFloat64(output.node);
   1567       }
   1568       Emit(kArmPeek, g.DefineAsRegister(output.node),
   1569            g.UseImmediate(reverse_slot));
   1570     }
   1571     reverse_slot += output.location.GetSizeInPointers();
   1572   }
   1573 }
   1574 
   1575 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
   1576 
   1577 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1578 
   1579 namespace {
   1580 
   1581 // Shared routine for multiple compare operations.
   1582 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1583                   InstructionOperand left, InstructionOperand right,
   1584                   FlagsContinuation* cont) {
   1585   selector->EmitWithContinuation(opcode, left, right, cont);
   1586 }
   1587 
   1588 
   1589 // Shared routine for multiple float32 compare operations.
   1590 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1591                          FlagsContinuation* cont) {
   1592   ArmOperandGenerator g(selector);
   1593   Float32BinopMatcher m(node);
   1594   if (m.right().Is(0.0f)) {
   1595     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
   1596                  g.UseImmediate(m.right().node()), cont);
   1597   } else if (m.left().Is(0.0f)) {
   1598     cont->Commute();
   1599     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
   1600                  g.UseImmediate(m.left().node()), cont);
   1601   } else {
   1602     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
   1603                  g.UseRegister(m.right().node()), cont);
   1604   }
   1605 }
   1606 
   1607 
   1608 // Shared routine for multiple float64 compare operations.
   1609 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1610                          FlagsContinuation* cont) {
   1611   ArmOperandGenerator g(selector);
   1612   Float64BinopMatcher m(node);
   1613   if (m.right().Is(0.0)) {
   1614     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
   1615                  g.UseImmediate(m.right().node()), cont);
   1616   } else if (m.left().Is(0.0)) {
   1617     cont->Commute();
   1618     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
   1619                  g.UseImmediate(m.left().node()), cont);
   1620   } else {
   1621     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
   1622                  g.UseRegister(m.right().node()), cont);
   1623   }
   1624 }
   1625 
   1626 // Check whether we can convert:
   1627 // ((a <op> b) cmp 0), b.<cond>
   1628 // to:
   1629 // (a <ops> b), b.<cond'>
   1630 // where <ops> is the flag setting version of <op>.
   1631 // We only generate conditions <cond'> that are a combination of the N
   1632 // and Z flags. This avoids the need to make this function dependent on
   1633 // the flag-setting operation.
   1634 bool CanUseFlagSettingBinop(FlagsCondition cond) {
   1635   switch (cond) {
   1636     case kEqual:
   1637     case kNotEqual:
   1638     case kSignedLessThan:
   1639     case kSignedGreaterThanOrEqual:
   1640     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1641     case kUnsignedGreaterThan:      // x > 0 -> x != 0
   1642       return true;
   1643     default:
   1644       return false;
   1645   }
   1646 }
   1647 
   1648 // Map <cond> to <cond'> so that the following transformation is possible:
   1649 // ((a <op> b) cmp 0), b.<cond>
   1650 // to:
   1651 // (a <ops> b), b.<cond'>
   1652 // where <ops> is the flag setting version of <op>.
   1653 FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
   1654   DCHECK(CanUseFlagSettingBinop(cond));
   1655   switch (cond) {
   1656     case kEqual:
   1657     case kNotEqual:
   1658       return cond;
   1659     case kSignedLessThan:
   1660       return kNegative;
   1661     case kSignedGreaterThanOrEqual:
   1662       return kPositiveOrZero;
   1663     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1664       return kEqual;
   1665     case kUnsignedGreaterThan:  // x > 0 -> x != 0
   1666       return kNotEqual;
   1667     default:
   1668       UNREACHABLE();
   1669   }
   1670 }
   1671 
   1672 // Check if we can perform the transformation:
   1673 // ((a <op> b) cmp 0), b.<cond>
   1674 // to:
   1675 // (a <ops> b), b.<cond'>
   1676 // where <ops> is the flag setting version of <op>, and if so,
   1677 // updates {node}, {opcode} and {cont} accordingly.
   1678 void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
   1679                                              Node** node, Node* binop,
   1680                                              InstructionCode* opcode,
   1681                                              FlagsCondition cond,
   1682                                              FlagsContinuation* cont) {
   1683   InstructionCode binop_opcode;
   1684   InstructionCode no_output_opcode;
   1685   switch (binop->opcode()) {
   1686     case IrOpcode::kInt32Add:
   1687       binop_opcode = kArmAdd;
   1688       no_output_opcode = kArmCmn;
   1689       break;
   1690     case IrOpcode::kWord32And:
   1691       binop_opcode = kArmAnd;
   1692       no_output_opcode = kArmTst;
   1693       break;
   1694     case IrOpcode::kWord32Or:
   1695       binop_opcode = kArmOrr;
   1696       no_output_opcode = kArmOrr;
   1697       break;
   1698     case IrOpcode::kWord32Xor:
   1699       binop_opcode = kArmEor;
   1700       no_output_opcode = kArmTeq;
   1701       break;
   1702     default:
   1703       UNREACHABLE();
   1704       return;
   1705   }
   1706   if (selector->CanCover(*node, binop)) {
   1707     // The comparison is the only user of {node}.
   1708     cont->Overwrite(MapForFlagSettingBinop(cond));
   1709     *opcode = no_output_opcode;
   1710     *node = binop;
   1711   } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
   1712     // We can also handle the case where the {node} and the comparison are in
   1713     // the same basic block, and the comparison is the only user of {node} in
   1714     // this basic block ({node} has users in other basic blocks).
   1715     cont->Overwrite(MapForFlagSettingBinop(cond));
   1716     *opcode = binop_opcode;
   1717     *node = binop;
   1718   }
   1719 }
   1720 
   1721 // Shared routine for multiple word compare operations.
   1722 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1723                       InstructionCode opcode, FlagsContinuation* cont) {
   1724   ArmOperandGenerator g(selector);
   1725   Int32BinopMatcher m(node);
   1726   InstructionOperand inputs[3];
   1727   size_t input_count = 0;
   1728   InstructionOperand outputs[2];
   1729   size_t output_count = 0;
   1730   bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
   1731                     (opcode != kArmTst) && (opcode != kArmTeq);
   1732 
   1733   if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
   1734                                &input_count, &inputs[1])) {
   1735     inputs[0] = g.UseRegisterOrStackPointer(m.left().node());
   1736     input_count++;
   1737   } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
   1738                                       &input_count, &inputs[1])) {
   1739     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1740     inputs[0] = g.UseRegisterOrStackPointer(m.right().node());
   1741     input_count++;
   1742   } else {
   1743     opcode |= AddressingModeField::encode(kMode_Operand2_R);
   1744     inputs[input_count++] = g.UseRegisterOrStackPointer(m.left().node());
   1745     inputs[input_count++] = g.UseRegisterOrStackPointer(m.right().node());
   1746   }
   1747 
   1748   if (has_result) {
   1749     if (cont->IsDeoptimize()) {
   1750       // If we can deoptimize as a result of the binop, we need to make sure
   1751       // that the deopt inputs are not overwritten by the binop result. One way
   1752       // to achieve that is to declare the output register as same-as-first.
   1753       outputs[output_count++] = g.DefineSameAsFirst(node);
   1754     } else {
   1755       outputs[output_count++] = g.DefineAsRegister(node);
   1756     }
   1757   }
   1758 
   1759   DCHECK_NE(0u, input_count);
   1760   DCHECK_GE(arraysize(inputs), input_count);
   1761   DCHECK_GE(arraysize(outputs), output_count);
   1762 
   1763   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
   1764                                  inputs, cont);
   1765 }
   1766 
   1767 
   1768 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1769                       FlagsContinuation* cont) {
   1770   InstructionCode opcode = kArmCmp;
   1771   Int32BinopMatcher m(node);
   1772 
   1773   FlagsCondition cond = cont->condition();
   1774   if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32Or() ||
   1775                           m.left().IsWord32And() || m.left().IsWord32Xor())) {
   1776     // Emit flag setting instructions for comparisons against zero.
   1777     if (CanUseFlagSettingBinop(cond)) {
   1778       Node* binop = m.left().node();
   1779       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   1780                                               cond, cont);
   1781     }
   1782   } else if (m.left().Is(0) &&
   1783              (m.right().IsInt32Add() || m.right().IsWord32Or() ||
   1784               m.right().IsWord32And() || m.right().IsWord32Xor())) {
   1785     // Same as above, but we need to commute the condition before we
   1786     // continue with the rest of the checks.
   1787     cond = CommuteFlagsCondition(cond);
   1788     if (CanUseFlagSettingBinop(cond)) {
   1789       Node* binop = m.right().node();
   1790       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   1791                                               cond, cont);
   1792     }
   1793   }
   1794 
   1795   VisitWordCompare(selector, node, opcode, cont);
   1796 }
   1797 
   1798 }  // namespace
   1799 
   1800 // Shared routine for word comparisons against zero.
   1801 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
   1802                                                FlagsContinuation* cont) {
   1803   // Try to combine with comparisons against 0 by simply inverting the branch.
   1804   while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
   1805     Int32BinopMatcher m(value);
   1806     if (!m.right().Is(0)) break;
   1807 
   1808     user = value;
   1809     value = m.left().node();
   1810     cont->Negate();
   1811   }
   1812 
   1813   if (CanCover(user, value)) {
   1814     switch (value->opcode()) {
   1815       case IrOpcode::kWord32Equal:
   1816         cont->OverwriteAndNegateIfEqual(kEqual);
   1817         return VisitWordCompare(this, value, cont);
   1818       case IrOpcode::kInt32LessThan:
   1819         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1820         return VisitWordCompare(this, value, cont);
   1821       case IrOpcode::kInt32LessThanOrEqual:
   1822         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1823         return VisitWordCompare(this, value, cont);
   1824       case IrOpcode::kUint32LessThan:
   1825         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1826         return VisitWordCompare(this, value, cont);
   1827       case IrOpcode::kUint32LessThanOrEqual:
   1828         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1829         return VisitWordCompare(this, value, cont);
   1830       case IrOpcode::kFloat32Equal:
   1831         cont->OverwriteAndNegateIfEqual(kEqual);
   1832         return VisitFloat32Compare(this, value, cont);
   1833       case IrOpcode::kFloat32LessThan:
   1834         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   1835         return VisitFloat32Compare(this, value, cont);
   1836       case IrOpcode::kFloat32LessThanOrEqual:
   1837         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   1838         return VisitFloat32Compare(this, value, cont);
   1839       case IrOpcode::kFloat64Equal:
   1840         cont->OverwriteAndNegateIfEqual(kEqual);
   1841         return VisitFloat64Compare(this, value, cont);
   1842       case IrOpcode::kFloat64LessThan:
   1843         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   1844         return VisitFloat64Compare(this, value, cont);
   1845       case IrOpcode::kFloat64LessThanOrEqual:
   1846         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   1847         return VisitFloat64Compare(this, value, cont);
   1848       case IrOpcode::kProjection:
   1849         // Check if this is the overflow output projection of an
   1850         // <Operation>WithOverflow node.
   1851         if (ProjectionIndexOf(value->op()) == 1u) {
   1852           // We cannot combine the <Operation>WithOverflow with this branch
   1853           // unless the 0th projection (the use of the actual value of the
   1854           // <Operation> is either nullptr, which means there's no use of the
   1855           // actual value, or was already defined, which means it is scheduled
   1856           // *AFTER* this branch).
   1857           Node* const node = value->InputAt(0);
   1858           Node* const result = NodeProperties::FindProjection(node, 0);
   1859           if (!result || IsDefined(result)) {
   1860             switch (node->opcode()) {
   1861               case IrOpcode::kInt32AddWithOverflow:
   1862                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1863                 return VisitBinop(this, node, kArmAdd, kArmAdd, cont);
   1864               case IrOpcode::kInt32SubWithOverflow:
   1865                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1866                 return VisitBinop(this, node, kArmSub, kArmRsb, cont);
   1867               case IrOpcode::kInt32MulWithOverflow:
   1868                 // ARM doesn't set the overflow flag for multiplication, so we
   1869                 // need to test on kNotEqual. Here is the code sequence used:
   1870                 //   smull resultlow, resulthigh, left, right
   1871                 //   cmp resulthigh, Operand(resultlow, ASR, 31)
   1872                 cont->OverwriteAndNegateIfEqual(kNotEqual);
   1873                 return EmitInt32MulWithOverflow(this, node, cont);
   1874               default:
   1875                 break;
   1876             }
   1877           }
   1878         }
   1879         break;
   1880       case IrOpcode::kInt32Add:
   1881         return VisitWordCompare(this, value, kArmCmn, cont);
   1882       case IrOpcode::kInt32Sub:
   1883         return VisitWordCompare(this, value, kArmCmp, cont);
   1884       case IrOpcode::kWord32And:
   1885         return VisitWordCompare(this, value, kArmTst, cont);
   1886       case IrOpcode::kWord32Or:
   1887         return VisitBinop(this, value, kArmOrr, kArmOrr, cont);
   1888       case IrOpcode::kWord32Xor:
   1889         return VisitWordCompare(this, value, kArmTeq, cont);
   1890       case IrOpcode::kWord32Sar:
   1891         return VisitShift(this, value, TryMatchASR, cont);
   1892       case IrOpcode::kWord32Shl:
   1893         return VisitShift(this, value, TryMatchLSL, cont);
   1894       case IrOpcode::kWord32Shr:
   1895         return VisitShift(this, value, TryMatchLSR, cont);
   1896       case IrOpcode::kWord32Ror:
   1897         return VisitShift(this, value, TryMatchROR, cont);
   1898       default:
   1899         break;
   1900     }
   1901   }
   1902 
   1903   if (user->opcode() == IrOpcode::kWord32Equal) {
   1904     return VisitWordCompare(this, user, cont);
   1905   }
   1906 
   1907   // Continuation could not be combined with a compare, emit compare against 0.
   1908   ArmOperandGenerator g(this);
   1909   InstructionCode const opcode =
   1910       kArmTst | AddressingModeField::encode(kMode_Operand2_R);
   1911   InstructionOperand const value_operand = g.UseRegister(value);
   1912   EmitWithContinuation(opcode, value_operand, value_operand, cont);
   1913 }
   1914 
   1915 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   1916   ArmOperandGenerator g(this);
   1917   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   1918 
   1919   // Emit either ArchTableSwitch or ArchLookupSwitch.
   1920   if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
   1921     static const size_t kMaxTableSwitchValueRange = 2 << 16;
   1922     size_t table_space_cost = 4 + sw.value_range();
   1923     size_t table_time_cost = 3;
   1924     size_t lookup_space_cost = 3 + 2 * sw.case_count();
   1925     size_t lookup_time_cost = sw.case_count();
   1926     if (sw.case_count() > 0 &&
   1927         table_space_cost + 3 * table_time_cost <=
   1928             lookup_space_cost + 3 * lookup_time_cost &&
   1929         sw.min_value() > std::numeric_limits<int32_t>::min() &&
   1930         sw.value_range() <= kMaxTableSwitchValueRange) {
   1931       InstructionOperand index_operand = value_operand;
   1932       if (sw.min_value()) {
   1933         index_operand = g.TempRegister();
   1934         Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
   1935              index_operand, value_operand, g.TempImmediate(sw.min_value()));
   1936       }
   1937       // Generate a table lookup.
   1938       return EmitTableSwitch(sw, index_operand);
   1939     }
   1940   }
   1941 
   1942   // Generate a tree of conditional jumps.
   1943   return EmitBinarySearchSwitch(sw, value_operand);
   1944 }
   1945 
   1946 void InstructionSelector::VisitWord32Equal(Node* const node) {
   1947   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   1948   Int32BinopMatcher m(node);
   1949   if (m.right().Is(0)) {
   1950     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
   1951   }
   1952   VisitWordCompare(this, node, &cont);
   1953 }
   1954 
   1955 
   1956 void InstructionSelector::VisitInt32LessThan(Node* node) {
   1957   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   1958   VisitWordCompare(this, node, &cont);
   1959 }
   1960 
   1961 
   1962 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   1963   FlagsContinuation cont =
   1964       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   1965   VisitWordCompare(this, node, &cont);
   1966 }
   1967 
   1968 
   1969 void InstructionSelector::VisitUint32LessThan(Node* node) {
   1970   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   1971   VisitWordCompare(this, node, &cont);
   1972 }
   1973 
   1974 
   1975 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   1976   FlagsContinuation cont =
   1977       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   1978   VisitWordCompare(this, node, &cont);
   1979 }
   1980 
   1981 
   1982 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   1983   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1984     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1985     return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   1986   }
   1987   FlagsContinuation cont;
   1988   VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   1989 }
   1990 
   1991 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   1992   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1993     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1994     return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   1995   }
   1996   FlagsContinuation cont;
   1997   VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   1998 }
   1999 
   2000 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   2001   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2002     // ARM doesn't set the overflow flag for multiplication, so we need to test
   2003     // on kNotEqual. Here is the code sequence used:
   2004     //   smull resultlow, resulthigh, left, right
   2005     //   cmp resulthigh, Operand(resultlow, ASR, 31)
   2006     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
   2007     return EmitInt32MulWithOverflow(this, node, &cont);
   2008   }
   2009   FlagsContinuation cont;
   2010   EmitInt32MulWithOverflow(this, node, &cont);
   2011 }
   2012 
   2013 void InstructionSelector::VisitFloat32Equal(Node* node) {
   2014   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2015   VisitFloat32Compare(this, node, &cont);
   2016 }
   2017 
   2018 
   2019 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   2020   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2021   VisitFloat32Compare(this, node, &cont);
   2022 }
   2023 
   2024 
   2025 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   2026   FlagsContinuation cont =
   2027       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2028   VisitFloat32Compare(this, node, &cont);
   2029 }
   2030 
   2031 
   2032 void InstructionSelector::VisitFloat64Equal(Node* node) {
   2033   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2034   VisitFloat64Compare(this, node, &cont);
   2035 }
   2036 
   2037 
   2038 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   2039   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2040   VisitFloat64Compare(this, node, &cont);
   2041 }
   2042 
   2043 
   2044 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   2045   FlagsContinuation cont =
   2046       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2047   VisitFloat64Compare(this, node, &cont);
   2048 }
   2049 
   2050 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   2051   ArmOperandGenerator g(this);
   2052   Node* left = node->InputAt(0);
   2053   Node* right = node->InputAt(1);
   2054   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
   2055       CanCover(node, left)) {
   2056     left = left->InputAt(1);
   2057     Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
   2058          g.UseRegister(left));
   2059     return;
   2060   }
   2061   Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
   2062        g.UseRegister(right));
   2063 }
   2064 
   2065 
   2066 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   2067   ArmOperandGenerator g(this);
   2068   Node* left = node->InputAt(0);
   2069   Node* right = node->InputAt(1);
   2070   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
   2071       CanCover(node, left)) {
   2072     left = left->InputAt(1);
   2073     Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
   2074          g.UseRegister(right));
   2075     return;
   2076   }
   2077   Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
   2078        g.UseRegister(right));
   2079 }
   2080 
   2081 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
   2082   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   2083   ArmOperandGenerator g(this);
   2084   Node* base = node->InputAt(0);
   2085   Node* index = node->InputAt(1);
   2086   ArchOpcode opcode = kArchNop;
   2087   switch (load_rep.representation()) {
   2088     case MachineRepresentation::kWord8:
   2089       opcode =
   2090           load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
   2091       break;
   2092     case MachineRepresentation::kWord16:
   2093       opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
   2094                                    : kWord32AtomicLoadUint16;
   2095       break;
   2096     case MachineRepresentation::kWord32:
   2097       opcode = kWord32AtomicLoadWord32;
   2098       break;
   2099     default:
   2100       UNREACHABLE();
   2101       return;
   2102   }
   2103   Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
   2104        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
   2105 }
   2106 
   2107 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
   2108   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   2109   ArmOperandGenerator g(this);
   2110   Node* base = node->InputAt(0);
   2111   Node* index = node->InputAt(1);
   2112   Node* value = node->InputAt(2);
   2113   ArchOpcode opcode = kArchNop;
   2114   switch (rep) {
   2115     case MachineRepresentation::kWord8:
   2116       opcode = kWord32AtomicStoreWord8;
   2117       break;
   2118     case MachineRepresentation::kWord16:
   2119       opcode = kWord32AtomicStoreWord16;
   2120       break;
   2121     case MachineRepresentation::kWord32:
   2122       opcode = kWord32AtomicStoreWord32;
   2123       break;
   2124     default:
   2125       UNREACHABLE();
   2126       return;
   2127   }
   2128 
   2129   AddressingMode addressing_mode = kMode_Offset_RR;
   2130   InstructionOperand inputs[4];
   2131   size_t input_count = 0;
   2132   inputs[input_count++] = g.UseUniqueRegister(base);
   2133   inputs[input_count++] = g.UseUniqueRegister(index);
   2134   inputs[input_count++] = g.UseUniqueRegister(value);
   2135   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2136   Emit(code, 0, nullptr, input_count, inputs);
   2137 }
   2138 
   2139 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
   2140   ArmOperandGenerator g(this);
   2141   Node* base = node->InputAt(0);
   2142   Node* index = node->InputAt(1);
   2143   Node* value = node->InputAt(2);
   2144   ArchOpcode opcode = kArchNop;
   2145   MachineType type = AtomicOpType(node->op());
   2146   if (type == MachineType::Int8()) {
   2147     opcode = kWord32AtomicExchangeInt8;
   2148   } else if (type == MachineType::Uint8()) {
   2149     opcode = kWord32AtomicExchangeUint8;
   2150   } else if (type == MachineType::Int16()) {
   2151     opcode = kWord32AtomicExchangeInt16;
   2152   } else if (type == MachineType::Uint16()) {
   2153     opcode = kWord32AtomicExchangeUint16;
   2154   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
   2155     opcode = kWord32AtomicExchangeWord32;
   2156   } else {
   2157     UNREACHABLE();
   2158     return;
   2159   }
   2160 
   2161   AddressingMode addressing_mode = kMode_Offset_RR;
   2162   InstructionOperand inputs[3];
   2163   size_t input_count = 0;
   2164   inputs[input_count++] = g.UseRegister(base);
   2165   inputs[input_count++] = g.UseRegister(index);
   2166   inputs[input_count++] = g.UseUniqueRegister(value);
   2167   InstructionOperand outputs[1];
   2168   outputs[0] = g.DefineAsRegister(node);
   2169   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
   2170   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2171   Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
   2172 }
   2173 
   2174 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
   2175   ArmOperandGenerator g(this);
   2176   Node* base = node->InputAt(0);
   2177   Node* index = node->InputAt(1);
   2178   Node* old_value = node->InputAt(2);
   2179   Node* new_value = node->InputAt(3);
   2180   ArchOpcode opcode = kArchNop;
   2181   MachineType type = AtomicOpType(node->op());
   2182   if (type == MachineType::Int8()) {
   2183     opcode = kWord32AtomicCompareExchangeInt8;
   2184   } else if (type == MachineType::Uint8()) {
   2185     opcode = kWord32AtomicCompareExchangeUint8;
   2186   } else if (type == MachineType::Int16()) {
   2187     opcode = kWord32AtomicCompareExchangeInt16;
   2188   } else if (type == MachineType::Uint16()) {
   2189     opcode = kWord32AtomicCompareExchangeUint16;
   2190   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
   2191     opcode = kWord32AtomicCompareExchangeWord32;
   2192   } else {
   2193     UNREACHABLE();
   2194     return;
   2195   }
   2196 
   2197   AddressingMode addressing_mode = kMode_Offset_RR;
   2198   InstructionOperand inputs[4];
   2199   size_t input_count = 0;
   2200   inputs[input_count++] = g.UseRegister(base);
   2201   inputs[input_count++] = g.UseRegister(index);
   2202   inputs[input_count++] = g.UseUniqueRegister(old_value);
   2203   inputs[input_count++] = g.UseUniqueRegister(new_value);
   2204   InstructionOperand outputs[1];
   2205   outputs[0] = g.DefineAsRegister(node);
   2206   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
   2207                                 g.TempRegister()};
   2208   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2209   Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
   2210 }
   2211 
   2212 void InstructionSelector::VisitWord32AtomicBinaryOperation(
   2213     Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
   2214     ArchOpcode uint16_op, ArchOpcode word32_op) {
   2215   ArmOperandGenerator g(this);
   2216   Node* base = node->InputAt(0);
   2217   Node* index = node->InputAt(1);
   2218   Node* value = node->InputAt(2);
   2219   ArchOpcode opcode = kArchNop;
   2220   MachineType type = AtomicOpType(node->op());
   2221   if (type == MachineType::Int8()) {
   2222     opcode = int8_op;
   2223   } else if (type == MachineType::Uint8()) {
   2224     opcode = uint8_op;
   2225   } else if (type == MachineType::Int16()) {
   2226     opcode = int16_op;
   2227   } else if (type == MachineType::Uint16()) {
   2228     opcode = uint16_op;
   2229   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
   2230     opcode = word32_op;
   2231   } else {
   2232     UNREACHABLE();
   2233     return;
   2234   }
   2235 
   2236   AddressingMode addressing_mode = kMode_Offset_RR;
   2237   InstructionOperand inputs[3];
   2238   size_t input_count = 0;
   2239   inputs[input_count++] = g.UseRegister(base);
   2240   inputs[input_count++] = g.UseRegister(index);
   2241   inputs[input_count++] = g.UseUniqueRegister(value);
   2242   InstructionOperand outputs[1];
   2243   outputs[0] = g.DefineAsRegister(node);
   2244   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
   2245                                 g.TempRegister()};
   2246   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2247   Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
   2248 }
   2249 
   2250 #define VISIT_ATOMIC_BINOP(op)                                   \
   2251   void InstructionSelector::VisitWord32Atomic##op(Node* node) {  \
   2252     VisitWord32AtomicBinaryOperation(                            \
   2253         node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
   2254         kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16,     \
   2255         kWord32Atomic##op##Word32);                              \
   2256   }
   2257 VISIT_ATOMIC_BINOP(Add)
   2258 VISIT_ATOMIC_BINOP(Sub)
   2259 VISIT_ATOMIC_BINOP(And)
   2260 VISIT_ATOMIC_BINOP(Or)
   2261 VISIT_ATOMIC_BINOP(Xor)
   2262 #undef VISIT_ATOMIC_BINOP
   2263 
   2264 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
   2265   ArmOperandGenerator g(this);
   2266   Node* base = node->InputAt(0);
   2267   Node* index = node->InputAt(1);
   2268   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
   2269   InstructionOperand outputs[] = {
   2270       g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r0),
   2271       g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r1)};
   2272   InstructionOperand temps[] = {g.TempRegister()};
   2273   AddressingMode addressing_mode = kMode_Offset_RR;
   2274   InstructionCode code =
   2275       kArmWord32AtomicPairLoad | AddressingModeField::encode(addressing_mode);
   2276   Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
   2277        arraysize(temps), temps);
   2278 }
   2279 
   2280 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
   2281   ArmOperandGenerator g(this);
   2282   Node* base = node->InputAt(0);
   2283   Node* index = node->InputAt(1);
   2284   Node* value_low = node->InputAt(2);
   2285   Node* value_high = node->InputAt(3);
   2286   AddressingMode addressing_mode = kMode_Offset_RR;
   2287   InstructionOperand inputs[] = {
   2288       g.UseUniqueRegister(base), g.UseUniqueRegister(index),
   2289       g.UseFixed(value_low, r2), g.UseFixed(value_high, r3)};
   2290   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
   2291                                 g.TempRegister(r1)};
   2292   InstructionCode code =
   2293       kArmWord32AtomicPairStore | AddressingModeField::encode(addressing_mode);
   2294   Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
   2295 }
   2296 
   2297 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
   2298   VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAdd);
   2299 }
   2300 
   2301 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
   2302   VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairSub);
   2303 }
   2304 
   2305 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
   2306   VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAnd);
   2307 }
   2308 
   2309 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
   2310   VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairOr);
   2311 }
   2312 
   2313 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
   2314   VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
   2315 }
   2316 
   2317 void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
   2318                                                        ArchOpcode uint8_op,
   2319                                                        ArchOpcode uint16_op,
   2320                                                        ArchOpcode uint32_op) {
   2321   MachineType type = AtomicOpType(node->op());
   2322   DCHECK(type != MachineType::Uint64());
   2323   ArchOpcode opcode = kArchNop;
   2324   if (type == MachineType::Uint32()) {
   2325     opcode = uint32_op;
   2326   } else if (type == MachineType::Uint16()) {
   2327     opcode = uint16_op;
   2328   } else if (type == MachineType::Uint8()) {
   2329     opcode = uint8_op;
   2330   } else {
   2331     UNREACHABLE();
   2332     return;
   2333   }
   2334   VisitNarrowAtomicBinOp(this, node, opcode);
   2335 }
   2336 
   2337 #define VISIT_ATOMIC_BINOP(op)                                            \
   2338   void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) {     \
   2339     VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
   2340                                  kArmWord64AtomicNarrow##op##Uint16,      \
   2341                                  kArmWord64AtomicNarrow##op##Uint32);     \
   2342   }
   2343 VISIT_ATOMIC_BINOP(Add)
   2344 VISIT_ATOMIC_BINOP(Sub)
   2345 VISIT_ATOMIC_BINOP(And)
   2346 VISIT_ATOMIC_BINOP(Or)
   2347 VISIT_ATOMIC_BINOP(Xor)
   2348 #undef VISIT_ATOMIC_BINOP
   2349 
   2350 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
   2351   ArmOperandGenerator g(this);
   2352   Node* base = node->InputAt(0);
   2353   Node* index = node->InputAt(1);
   2354   Node* value = node->InputAt(2);
   2355   Node* value_high = node->InputAt(3);
   2356   AddressingMode addressing_mode = kMode_Offset_RR;
   2357   InstructionOperand inputs[] = {g.UseFixed(value, r0),
   2358                                  g.UseFixed(value_high, r1),
   2359                                  g.UseRegister(base), g.UseRegister(index)};
   2360   InstructionOperand outputs[] = {
   2361       g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r6),
   2362       g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r7)};
   2363   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
   2364   InstructionCode code = kArmWord32AtomicPairExchange |
   2365                          AddressingModeField::encode(addressing_mode);
   2366   Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
   2367        arraysize(temps), temps);
   2368 }
   2369 
   2370 void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
   2371   ArmOperandGenerator g(this);
   2372   Node* base = node->InputAt(0);
   2373   Node* index = node->InputAt(1);
   2374   Node* value = node->InputAt(2);
   2375   ArchOpcode opcode = kArchNop;
   2376   MachineType type = AtomicOpType(node->op());
   2377   if (type == MachineType::Uint8()) {
   2378     opcode = kArmWord64AtomicNarrowExchangeUint8;
   2379   } else if (type == MachineType::Uint16()) {
   2380     opcode = kArmWord64AtomicNarrowExchangeUint16;
   2381   } else if (type == MachineType::Uint32()) {
   2382     opcode = kArmWord64AtomicNarrowExchangeUint32;
   2383   } else {
   2384     UNREACHABLE();
   2385     return;
   2386   }
   2387   AddressingMode addressing_mode = kMode_Offset_RR;
   2388   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
   2389                                  g.UseUniqueRegister(value)};
   2390   InstructionOperand outputs[] = {
   2391       g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
   2392       g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   2393   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
   2394   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2395   Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
   2396        arraysize(temps), temps);
   2397 }
   2398 
   2399 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
   2400   ArmOperandGenerator g(this);
   2401   AddressingMode addressing_mode = kMode_Offset_RR;
   2402   InstructionOperand inputs[] = {
   2403       g.UseFixed(node->InputAt(2), r4), g.UseFixed(node->InputAt(3), r5),
   2404       g.UseFixed(node->InputAt(4), r8), g.UseFixed(node->InputAt(5), r9),
   2405       g.UseRegister(node->InputAt(0)),  g.UseRegister(node->InputAt(1))};
   2406   InstructionOperand outputs[] = {
   2407       g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
   2408       g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
   2409   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
   2410   InstructionCode code = kArmWord32AtomicPairCompareExchange |
   2411                          AddressingModeField::encode(addressing_mode);
   2412   Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
   2413        arraysize(temps), temps);
   2414 }
   2415 
   2416 void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
   2417   ArmOperandGenerator g(this);
   2418   Node* base = node->InputAt(0);
   2419   Node* index = node->InputAt(1);
   2420   Node* old_value = node->InputAt(2);
   2421   Node* new_value = node->InputAt(3);
   2422   ArchOpcode opcode = kArchNop;
   2423   MachineType type = AtomicOpType(node->op());
   2424   if (type == MachineType::Uint8()) {
   2425     opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
   2426   } else if (type == MachineType::Uint16()) {
   2427     opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
   2428   } else if (type == MachineType::Uint32()) {
   2429     opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
   2430   } else {
   2431     UNREACHABLE();
   2432     return;
   2433   }
   2434   AddressingMode addressing_mode = kMode_Offset_RR;
   2435   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
   2436                                  g.UseUniqueRegister(old_value),
   2437                                  g.UseUniqueRegister(new_value)};
   2438   InstructionOperand outputs[] = {
   2439       g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
   2440       g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   2441   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
   2442                                 g.TempRegister()};
   2443   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2444   Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
   2445        arraysize(temps), temps);
   2446 }
   2447 
   2448 #define SIMD_TYPE_LIST(V) \
   2449   V(F32x4)                \
   2450   V(I32x4)                \
   2451   V(I16x8)                \
   2452   V(I8x16)
   2453 
   2454 #define SIMD_UNOP_LIST(V)                               \
   2455   V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4)         \
   2456   V(F32x4UConvertI32x4, kArmF32x4UConvertI32x4)         \
   2457   V(F32x4Abs, kArmF32x4Abs)                             \
   2458   V(F32x4Neg, kArmF32x4Neg)                             \
   2459   V(F32x4RecipApprox, kArmF32x4RecipApprox)             \
   2460   V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox)     \
   2461   V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4)         \
   2462   V(I32x4SConvertI16x8Low, kArmI32x4SConvertI16x8Low)   \
   2463   V(I32x4SConvertI16x8High, kArmI32x4SConvertI16x8High) \
   2464   V(I32x4Neg, kArmI32x4Neg)                             \
   2465   V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4)         \
   2466   V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low)   \
   2467   V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \
   2468   V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low)   \
   2469   V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \
   2470   V(I16x8Neg, kArmI16x8Neg)                             \
   2471   V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low)   \
   2472   V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
   2473   V(I8x16Neg, kArmI8x16Neg)                             \
   2474   V(S128Not, kArmS128Not)                               \
   2475   V(S1x4AnyTrue, kArmS1x4AnyTrue)                       \
   2476   V(S1x4AllTrue, kArmS1x4AllTrue)                       \
   2477   V(S1x8AnyTrue, kArmS1x8AnyTrue)                       \
   2478   V(S1x8AllTrue, kArmS1x8AllTrue)                       \
   2479   V(S1x16AnyTrue, kArmS1x16AnyTrue)                     \
   2480   V(S1x16AllTrue, kArmS1x16AllTrue)
   2481 
   2482 #define SIMD_SHIFT_OP_LIST(V) \
   2483   V(I32x4Shl)                 \
   2484   V(I32x4ShrS)                \
   2485   V(I32x4ShrU)                \
   2486   V(I16x8Shl)                 \
   2487   V(I16x8ShrS)                \
   2488   V(I16x8ShrU)                \
   2489   V(I8x16Shl)                 \
   2490   V(I8x16ShrS)                \
   2491   V(I8x16ShrU)
   2492 
   2493 #define SIMD_BINOP_LIST(V)                      \
   2494   V(F32x4Add, kArmF32x4Add)                     \
   2495   V(F32x4AddHoriz, kArmF32x4AddHoriz)           \
   2496   V(F32x4Sub, kArmF32x4Sub)                     \
   2497   V(F32x4Mul, kArmF32x4Mul)                     \
   2498   V(F32x4Min, kArmF32x4Min)                     \
   2499   V(F32x4Max, kArmF32x4Max)                     \
   2500   V(F32x4Eq, kArmF32x4Eq)                       \
   2501   V(F32x4Ne, kArmF32x4Ne)                       \
   2502   V(F32x4Lt, kArmF32x4Lt)                       \
   2503   V(F32x4Le, kArmF32x4Le)                       \
   2504   V(I32x4Add, kArmI32x4Add)                     \
   2505   V(I32x4AddHoriz, kArmI32x4AddHoriz)           \
   2506   V(I32x4Sub, kArmI32x4Sub)                     \
   2507   V(I32x4Mul, kArmI32x4Mul)                     \
   2508   V(I32x4MinS, kArmI32x4MinS)                   \
   2509   V(I32x4MaxS, kArmI32x4MaxS)                   \
   2510   V(I32x4Eq, kArmI32x4Eq)                       \
   2511   V(I32x4Ne, kArmI32x4Ne)                       \
   2512   V(I32x4GtS, kArmI32x4GtS)                     \
   2513   V(I32x4GeS, kArmI32x4GeS)                     \
   2514   V(I32x4MinU, kArmI32x4MinU)                   \
   2515   V(I32x4MaxU, kArmI32x4MaxU)                   \
   2516   V(I32x4GtU, kArmI32x4GtU)                     \
   2517   V(I32x4GeU, kArmI32x4GeU)                     \
   2518   V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
   2519   V(I16x8Add, kArmI16x8Add)                     \
   2520   V(I16x8AddSaturateS, kArmI16x8AddSaturateS)   \
   2521   V(I16x8AddHoriz, kArmI16x8AddHoriz)           \
   2522   V(I16x8Sub, kArmI16x8Sub)                     \
   2523   V(I16x8SubSaturateS, kArmI16x8SubSaturateS)   \
   2524   V(I16x8Mul, kArmI16x8Mul)                     \
   2525   V(I16x8MinS, kArmI16x8MinS)                   \
   2526   V(I16x8MaxS, kArmI16x8MaxS)                   \
   2527   V(I16x8Eq, kArmI16x8Eq)                       \
   2528   V(I16x8Ne, kArmI16x8Ne)                       \
   2529   V(I16x8GtS, kArmI16x8GtS)                     \
   2530   V(I16x8GeS, kArmI16x8GeS)                     \
   2531   V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
   2532   V(I16x8AddSaturateU, kArmI16x8AddSaturateU)   \
   2533   V(I16x8SubSaturateU, kArmI16x8SubSaturateU)   \
   2534   V(I16x8MinU, kArmI16x8MinU)                   \
   2535   V(I16x8MaxU, kArmI16x8MaxU)                   \
   2536   V(I16x8GtU, kArmI16x8GtU)                     \
   2537   V(I16x8GeU, kArmI16x8GeU)                     \
   2538   V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
   2539   V(I8x16Add, kArmI8x16Add)                     \
   2540   V(I8x16AddSaturateS, kArmI8x16AddSaturateS)   \
   2541   V(I8x16Sub, kArmI8x16Sub)                     \
   2542   V(I8x16SubSaturateS, kArmI8x16SubSaturateS)   \
   2543   V(I8x16Mul, kArmI8x16Mul)                     \
   2544   V(I8x16MinS, kArmI8x16MinS)                   \
   2545   V(I8x16MaxS, kArmI8x16MaxS)                   \
   2546   V(I8x16Eq, kArmI8x16Eq)                       \
   2547   V(I8x16Ne, kArmI8x16Ne)                       \
   2548   V(I8x16GtS, kArmI8x16GtS)                     \
   2549   V(I8x16GeS, kArmI8x16GeS)                     \
   2550   V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
   2551   V(I8x16AddSaturateU, kArmI8x16AddSaturateU)   \
   2552   V(I8x16SubSaturateU, kArmI8x16SubSaturateU)   \
   2553   V(I8x16MinU, kArmI8x16MinU)                   \
   2554   V(I8x16MaxU, kArmI8x16MaxU)                   \
   2555   V(I8x16GtU, kArmI8x16GtU)                     \
   2556   V(I8x16GeU, kArmI8x16GeU)                     \
   2557   V(S128And, kArmS128And)                       \
   2558   V(S128Or, kArmS128Or)                         \
   2559   V(S128Xor, kArmS128Xor)
   2560 
   2561 void InstructionSelector::VisitS128Zero(Node* node) {
   2562   ArmOperandGenerator g(this);
   2563   Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
   2564 }
   2565 
   2566 #define SIMD_VISIT_SPLAT(Type)                               \
   2567   void InstructionSelector::Visit##Type##Splat(Node* node) { \
   2568     VisitRR(this, kArm##Type##Splat, node);                  \
   2569   }
   2570 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
   2571 #undef SIMD_VISIT_SPLAT
   2572 
   2573 #define SIMD_VISIT_EXTRACT_LANE(Type)                              \
   2574   void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
   2575     VisitRRI(this, kArm##Type##ExtractLane, node);                 \
   2576   }
   2577 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
   2578 #undef SIMD_VISIT_EXTRACT_LANE
   2579 
   2580 #define SIMD_VISIT_REPLACE_LANE(Type)                              \
   2581   void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
   2582     VisitRRIR(this, kArm##Type##ReplaceLane, node);                \
   2583   }
   2584 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
   2585 #undef SIMD_VISIT_REPLACE_LANE
   2586 #undef SIMD_TYPE_LIST
   2587 
   2588 #define SIMD_VISIT_UNOP(Name, instruction)            \
   2589   void InstructionSelector::Visit##Name(Node* node) { \
   2590     VisitRR(this, instruction, node);                 \
   2591   }
   2592 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
   2593 #undef SIMD_VISIT_UNOP
   2594 #undef SIMD_UNOP_LIST
   2595 
   2596 #define SIMD_VISIT_SHIFT_OP(Name)                     \
   2597   void InstructionSelector::Visit##Name(Node* node) { \
   2598     VisitRRI(this, kArm##Name, node);                 \
   2599   }
   2600 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
   2601 #undef SIMD_VISIT_SHIFT_OP
   2602 #undef SIMD_SHIFT_OP_LIST
   2603 
   2604 #define SIMD_VISIT_BINOP(Name, instruction)           \
   2605   void InstructionSelector::Visit##Name(Node* node) { \
   2606     VisitRRR(this, instruction, node);                \
   2607   }
   2608 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
   2609 #undef SIMD_VISIT_BINOP
   2610 #undef SIMD_BINOP_LIST
   2611 
   2612 void InstructionSelector::VisitS128Select(Node* node) {
   2613   ArmOperandGenerator g(this);
   2614   Emit(kArmS128Select, g.DefineSameAsFirst(node),
   2615        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
   2616        g.UseRegister(node->InputAt(2)));
   2617 }
   2618 
   2619 namespace {
   2620 
   2621 struct ShuffleEntry {
   2622   uint8_t shuffle[kSimd128Size];
   2623   ArchOpcode opcode;
   2624 };
   2625 
   2626 static const ShuffleEntry arch_shuffles[] = {
   2627     {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
   2628      kArmS32x4ZipLeft},
   2629     {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
   2630      kArmS32x4ZipRight},
   2631     {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
   2632      kArmS32x4UnzipLeft},
   2633     {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
   2634      kArmS32x4UnzipRight},
   2635     {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
   2636      kArmS32x4TransposeLeft},
   2637     {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
   2638      kArmS32x4TransposeRight},
   2639     {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, kArmS32x2Reverse},
   2640 
   2641     {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
   2642      kArmS16x8ZipLeft},
   2643     {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
   2644      kArmS16x8ZipRight},
   2645     {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
   2646      kArmS16x8UnzipLeft},
   2647     {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
   2648      kArmS16x8UnzipRight},
   2649     {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
   2650      kArmS16x8TransposeLeft},
   2651     {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
   2652      kArmS16x8TransposeRight},
   2653     {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kArmS16x4Reverse},
   2654     {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kArmS16x2Reverse},
   2655 
   2656     {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
   2657      kArmS8x16ZipLeft},
   2658     {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
   2659      kArmS8x16ZipRight},
   2660     {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
   2661      kArmS8x16UnzipLeft},
   2662     {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
   2663      kArmS8x16UnzipRight},
   2664     {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
   2665      kArmS8x16TransposeLeft},
   2666     {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
   2667      kArmS8x16TransposeRight},
   2668     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArmS8x8Reverse},
   2669     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
   2670     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
   2671 
   2672 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
   2673                          size_t num_entries, bool is_swizzle,
   2674                          ArchOpcode* opcode) {
   2675   uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
   2676   for (size_t i = 0; i < num_entries; ++i) {
   2677     const ShuffleEntry& entry = table[i];
   2678     int j = 0;
   2679     for (; j < kSimd128Size; ++j) {
   2680       if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
   2681         break;
   2682       }
   2683     }
   2684     if (j == kSimd128Size) {
   2685       *opcode = entry.opcode;
   2686       return true;
   2687     }
   2688   }
   2689   return false;
   2690 }
   2691 
   2692 void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
   2693                          InstructionOperand* src0, InstructionOperand* src1) {
   2694   if (input0 == input1) {
   2695     // Unary, any q-register can be the table.
   2696     *src0 = *src1 = g->UseRegister(input0);
   2697   } else {
   2698     // Binary, table registers must be consecutive.
   2699     *src0 = g->UseFixed(input0, q0);
   2700     *src1 = g->UseFixed(input1, q1);
   2701   }
   2702 }
   2703 
   2704 }  // namespace
   2705 
   2706 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
   2707   uint8_t shuffle[kSimd128Size];
   2708   bool is_swizzle;
   2709   CanonicalizeShuffle(node, shuffle, &is_swizzle);
   2710   Node* input0 = node->InputAt(0);
   2711   Node* input1 = node->InputAt(1);
   2712   uint8_t shuffle32x4[4];
   2713   ArmOperandGenerator g(this);
   2714   int index = 0;
   2715   if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
   2716     if (TryMatchDup<4>(shuffle, &index)) {
   2717       DCHECK_GT(4, index);
   2718       Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
   2719            g.UseImmediate(Neon32), g.UseImmediate(index % 4));
   2720     } else if (TryMatchIdentity(shuffle)) {
   2721       EmitIdentity(node);
   2722     } else {
   2723       // 32x4 shuffles are implemented as s-register moves. To simplify these,
   2724       // make sure the destination is distinct from both sources.
   2725       InstructionOperand src0 = g.UseUniqueRegister(input0);
   2726       InstructionOperand src1 = is_swizzle ? src0 : g.UseUniqueRegister(input1);
   2727       Emit(kArmS32x4Shuffle, g.DefineAsRegister(node), src0, src1,
   2728            g.UseImmediate(Pack4Lanes(shuffle32x4)));
   2729     }
   2730     return;
   2731   }
   2732   if (TryMatchDup<8>(shuffle, &index)) {
   2733     DCHECK_GT(8, index);
   2734     Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
   2735          g.UseImmediate(Neon16), g.UseImmediate(index % 8));
   2736     return;
   2737   }
   2738   if (TryMatchDup<16>(shuffle, &index)) {
   2739     DCHECK_GT(16, index);
   2740     Emit(kArmS128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
   2741          g.UseImmediate(Neon8), g.UseImmediate(index % 16));
   2742     return;
   2743   }
   2744   ArchOpcode opcode;
   2745   if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
   2746                           is_swizzle, &opcode)) {
   2747     VisitRRRShuffle(this, opcode, node);
   2748     return;
   2749   }
   2750   uint8_t offset;
   2751   if (TryMatchConcat(shuffle, &offset)) {
   2752     Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
   2753          g.UseRegister(input1), g.UseImmediate(offset));
   2754     return;
   2755   }
   2756   // Code generator uses vtbl, arrange sources to form a valid lookup table.
   2757   InstructionOperand src0, src1;
   2758   ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
   2759   Emit(kArmS8x16Shuffle, g.DefineAsRegister(node), src0, src1,
   2760        g.UseImmediate(Pack4Lanes(shuffle)),
   2761        g.UseImmediate(Pack4Lanes(shuffle + 4)),
   2762        g.UseImmediate(Pack4Lanes(shuffle + 8)),
   2763        g.UseImmediate(Pack4Lanes(shuffle + 12)));
   2764 }
   2765 
   2766 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
   2767   ArmOperandGenerator g(this);
   2768   Emit(kArmSxtb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
   2769        g.TempImmediate(0));
   2770 }
   2771 
   2772 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
   2773   ArmOperandGenerator g(this);
   2774   Emit(kArmSxth, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
   2775        g.TempImmediate(0));
   2776 }
   2777 
   2778 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
   2779   UNREACHABLE();
   2780 }
   2781 
   2782 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
   2783   UNREACHABLE();
   2784 }
   2785 
   2786 // static
   2787 MachineOperatorBuilder::Flags
   2788 InstructionSelector::SupportedMachineOperatorFlags() {
   2789   MachineOperatorBuilder::Flags flags =
   2790       MachineOperatorBuilder::kSpeculationFence;
   2791   if (CpuFeatures::IsSupported(SUDIV)) {
   2792     // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
   2793     // but the fall-back implementation does not.
   2794     flags |= MachineOperatorBuilder::kInt32DivIsSafe |
   2795              MachineOperatorBuilder::kUint32DivIsSafe;
   2796   }
   2797   if (CpuFeatures::IsSupported(ARMv7)) {
   2798     flags |= MachineOperatorBuilder::kWord32ReverseBits;
   2799   }
   2800   if (CpuFeatures::IsSupported(ARMv8)) {
   2801     flags |= MachineOperatorBuilder::kFloat32RoundDown |
   2802              MachineOperatorBuilder::kFloat64RoundDown |
   2803              MachineOperatorBuilder::kFloat32RoundUp |
   2804              MachineOperatorBuilder::kFloat64RoundUp |
   2805              MachineOperatorBuilder::kFloat32RoundTruncate |
   2806              MachineOperatorBuilder::kFloat64RoundTruncate |
   2807              MachineOperatorBuilder::kFloat64RoundTiesAway |
   2808              MachineOperatorBuilder::kFloat32RoundTiesEven |
   2809              MachineOperatorBuilder::kFloat64RoundTiesEven;
   2810   }
   2811   return flags;
   2812 }
   2813 
   2814 // static
   2815 MachineOperatorBuilder::AlignmentRequirements
   2816 InstructionSelector::AlignmentRequirements() {
   2817   EnumSet<MachineRepresentation> req_aligned;
   2818   req_aligned.Add(MachineRepresentation::kFloat32);
   2819   req_aligned.Add(MachineRepresentation::kFloat64);
   2820   return MachineOperatorBuilder::AlignmentRequirements::
   2821       SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
   2822 }
   2823 
   2824 }  // namespace compiler
   2825 }  // namespace internal
   2826 }  // namespace v8
   2827