Home | History | Annotate | Download | only in arm
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/base/adapters.h"
      6 #include "src/base/bits.h"
      7 #include "src/compiler/instruction-selector-impl.h"
      8 #include "src/compiler/node-matchers.h"
      9 #include "src/compiler/node-properties.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 namespace compiler {
     14 
     15 // Adds Arm-specific methods for generating InstructionOperands.
     16 class ArmOperandGenerator : public OperandGenerator {
     17  public:
     18   explicit ArmOperandGenerator(InstructionSelector* selector)
     19       : OperandGenerator(selector) {}
     20 
     21   bool CanBeImmediate(int32_t value) const {
     22     return Assembler::ImmediateFitsAddrMode1Instruction(value);
     23   }
     24 
     25   bool CanBeImmediate(uint32_t value) const {
     26     return CanBeImmediate(bit_cast<int32_t>(value));
     27   }
     28 
     29   bool CanBeImmediate(Node* node, InstructionCode opcode) {
     30     Int32Matcher m(node);
     31     if (!m.HasValue()) return false;
     32     int32_t value = m.Value();
     33     switch (ArchOpcodeField::decode(opcode)) {
     34       case kArmAnd:
     35       case kArmMov:
     36       case kArmMvn:
     37       case kArmBic:
     38         return CanBeImmediate(value) || CanBeImmediate(~value);
     39 
     40       case kArmAdd:
     41       case kArmSub:
     42       case kArmCmp:
     43       case kArmCmn:
     44         return CanBeImmediate(value) || CanBeImmediate(-value);
     45 
     46       case kArmTst:
     47       case kArmTeq:
     48       case kArmOrr:
     49       case kArmEor:
     50       case kArmRsb:
     51         return CanBeImmediate(value);
     52 
     53       case kArmVldrF32:
     54       case kArmVstrF32:
     55       case kArmVldrF64:
     56       case kArmVstrF64:
     57         return value >= -1020 && value <= 1020 && (value % 4) == 0;
     58 
     59       case kArmLdrb:
     60       case kArmLdrsb:
     61       case kArmStrb:
     62       case kArmLdr:
     63       case kArmStr:
     64         return value >= -4095 && value <= 4095;
     65 
     66       case kArmLdrh:
     67       case kArmLdrsh:
     68       case kArmStrh:
     69         return value >= -255 && value <= 255;
     70 
     71       default:
     72         break;
     73     }
     74     return false;
     75   }
     76 };
     77 
     78 
     79 namespace {
     80 
     81 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
     82   ArmOperandGenerator g(selector);
     83   selector->Emit(opcode, g.DefineAsRegister(node),
     84                  g.UseRegister(node->InputAt(0)));
     85 }
     86 
     87 
     88 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
     89   ArmOperandGenerator g(selector);
     90   selector->Emit(opcode, g.DefineAsRegister(node),
     91                  g.UseRegister(node->InputAt(0)),
     92                  g.UseRegister(node->InputAt(1)));
     93 }
     94 
     95 
     96 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
     97           AddressingMode kImmMode, AddressingMode kRegMode>
     98 bool TryMatchShift(InstructionSelector* selector,
     99                    InstructionCode* opcode_return, Node* node,
    100                    InstructionOperand* value_return,
    101                    InstructionOperand* shift_return) {
    102   ArmOperandGenerator g(selector);
    103   if (node->opcode() == kOpcode) {
    104     Int32BinopMatcher m(node);
    105     *value_return = g.UseRegister(m.left().node());
    106     if (m.right().IsInRange(kImmMin, kImmMax)) {
    107       *opcode_return |= AddressingModeField::encode(kImmMode);
    108       *shift_return = g.UseImmediate(m.right().node());
    109     } else {
    110       *opcode_return |= AddressingModeField::encode(kRegMode);
    111       *shift_return = g.UseRegister(m.right().node());
    112     }
    113     return true;
    114   }
    115   return false;
    116 }
    117 
    118 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
    119           AddressingMode kImmMode>
    120 bool TryMatchShiftImmediate(InstructionSelector* selector,
    121                             InstructionCode* opcode_return, Node* node,
    122                             InstructionOperand* value_return,
    123                             InstructionOperand* shift_return) {
    124   ArmOperandGenerator g(selector);
    125   if (node->opcode() == kOpcode) {
    126     Int32BinopMatcher m(node);
    127     if (m.right().IsInRange(kImmMin, kImmMax)) {
    128       *opcode_return |= AddressingModeField::encode(kImmMode);
    129       *value_return = g.UseRegister(m.left().node());
    130       *shift_return = g.UseImmediate(m.right().node());
    131       return true;
    132     }
    133   }
    134   return false;
    135 }
    136 
    137 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
    138                  Node* node, InstructionOperand* value_return,
    139                  InstructionOperand* shift_return) {
    140   return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
    141                        kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
    142                                                value_return, shift_return);
    143 }
    144 
    145 
    146 bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
    147                  Node* node, InstructionOperand* value_return,
    148                  InstructionOperand* shift_return) {
    149   return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
    150                        kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
    151                                                value_return, shift_return);
    152 }
    153 
    154 
    155 bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
    156                  Node* node, InstructionOperand* value_return,
    157                  InstructionOperand* shift_return) {
    158   return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
    159                        kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
    160                                                value_return, shift_return);
    161 }
    162 
    163 bool TryMatchLSLImmediate(InstructionSelector* selector,
    164                           InstructionCode* opcode_return, Node* node,
    165                           InstructionOperand* value_return,
    166                           InstructionOperand* shift_return) {
    167   return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
    168                                 kMode_Operand2_R_LSL_I>(
    169       selector, opcode_return, node, value_return, shift_return);
    170 }
    171 
    172 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
    173                  Node* node, InstructionOperand* value_return,
    174                  InstructionOperand* shift_return) {
    175   return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
    176                        kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
    177                                                value_return, shift_return);
    178 }
    179 
    180 
    181 bool TryMatchShift(InstructionSelector* selector,
    182                    InstructionCode* opcode_return, Node* node,
    183                    InstructionOperand* value_return,
    184                    InstructionOperand* shift_return) {
    185   return (
    186       TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
    187       TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
    188       TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
    189       TryMatchROR(selector, opcode_return, node, value_return, shift_return));
    190 }
    191 
    192 
    193 bool TryMatchImmediateOrShift(InstructionSelector* selector,
    194                               InstructionCode* opcode_return, Node* node,
    195                               size_t* input_count_return,
    196                               InstructionOperand* inputs) {
    197   ArmOperandGenerator g(selector);
    198   if (g.CanBeImmediate(node, *opcode_return)) {
    199     *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
    200     inputs[0] = g.UseImmediate(node);
    201     *input_count_return = 1;
    202     return true;
    203   }
    204   if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
    205     *input_count_return = 2;
    206     return true;
    207   }
    208   return false;
    209 }
    210 
    211 
    212 void VisitBinop(InstructionSelector* selector, Node* node,
    213                 InstructionCode opcode, InstructionCode reverse_opcode,
    214                 FlagsContinuation* cont) {
    215   ArmOperandGenerator g(selector);
    216   Int32BinopMatcher m(node);
    217   InstructionOperand inputs[5];
    218   size_t input_count = 0;
    219   InstructionOperand outputs[2];
    220   size_t output_count = 0;
    221 
    222   if (m.left().node() == m.right().node()) {
    223     // If both inputs refer to the same operand, enforce allocating a register
    224     // for both of them to ensure that we don't end up generating code like
    225     // this:
    226     //
    227     //   mov r0, r1, asr #16
    228     //   adds r0, r0, r1, asr #16
    229     //   bvs label
    230     InstructionOperand const input = g.UseRegister(m.left().node());
    231     opcode |= AddressingModeField::encode(kMode_Operand2_R);
    232     inputs[input_count++] = input;
    233     inputs[input_count++] = input;
    234   } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
    235                                       &input_count, &inputs[1])) {
    236     inputs[0] = g.UseRegister(m.left().node());
    237     input_count++;
    238   } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
    239                                       m.left().node(), &input_count,
    240                                       &inputs[1])) {
    241     inputs[0] = g.UseRegister(m.right().node());
    242     opcode = reverse_opcode;
    243     input_count++;
    244   } else {
    245     opcode |= AddressingModeField::encode(kMode_Operand2_R);
    246     inputs[input_count++] = g.UseRegister(m.left().node());
    247     inputs[input_count++] = g.UseRegister(m.right().node());
    248   }
    249 
    250   if (cont->IsBranch()) {
    251     inputs[input_count++] = g.Label(cont->true_block());
    252     inputs[input_count++] = g.Label(cont->false_block());
    253   }
    254 
    255   outputs[output_count++] = g.DefineAsRegister(node);
    256   if (cont->IsSet()) {
    257     outputs[output_count++] = g.DefineAsRegister(cont->result());
    258   }
    259 
    260   DCHECK_NE(0u, input_count);
    261   DCHECK_NE(0u, output_count);
    262   DCHECK_GE(arraysize(inputs), input_count);
    263   DCHECK_GE(arraysize(outputs), output_count);
    264   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
    265 
    266   opcode = cont->Encode(opcode);
    267   if (cont->IsDeoptimize()) {
    268     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    269                              cont->reason(), cont->frame_state());
    270   } else {
    271     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    272   }
    273 }
    274 
    275 
    276 void VisitBinop(InstructionSelector* selector, Node* node,
    277                 InstructionCode opcode, InstructionCode reverse_opcode) {
    278   FlagsContinuation cont;
    279   VisitBinop(selector, node, opcode, reverse_opcode, &cont);
    280 }
    281 
    282 
    283 void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
    284              ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
    285              InstructionOperand result_operand, InstructionOperand left_operand,
    286              InstructionOperand right_operand) {
    287   ArmOperandGenerator g(selector);
    288   if (selector->IsSupported(SUDIV)) {
    289     selector->Emit(div_opcode, result_operand, left_operand, right_operand);
    290     return;
    291   }
    292   InstructionOperand left_double_operand = g.TempDoubleRegister();
    293   InstructionOperand right_double_operand = g.TempDoubleRegister();
    294   InstructionOperand result_double_operand = g.TempDoubleRegister();
    295   selector->Emit(f64i32_opcode, left_double_operand, left_operand);
    296   selector->Emit(f64i32_opcode, right_double_operand, right_operand);
    297   selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
    298                  right_double_operand);
    299   selector->Emit(i32f64_opcode, result_operand, result_double_operand);
    300 }
    301 
    302 
    303 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
    304               ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
    305   ArmOperandGenerator g(selector);
    306   Int32BinopMatcher m(node);
    307   EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
    308           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    309           g.UseRegister(m.right().node()));
    310 }
    311 
    312 
    313 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
    314               ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
    315   ArmOperandGenerator g(selector);
    316   Int32BinopMatcher m(node);
    317   InstructionOperand div_operand = g.TempRegister();
    318   InstructionOperand result_operand = g.DefineAsRegister(node);
    319   InstructionOperand left_operand = g.UseRegister(m.left().node());
    320   InstructionOperand right_operand = g.UseRegister(m.right().node());
    321   EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
    322           left_operand, right_operand);
    323   if (selector->IsSupported(ARMv7)) {
    324     selector->Emit(kArmMls, result_operand, div_operand, right_operand,
    325                    left_operand);
    326   } else {
    327     InstructionOperand mul_operand = g.TempRegister();
    328     selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
    329     selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
    330                    result_operand, left_operand, mul_operand);
    331   }
    332 }
    333 
    334 void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
    335               InstructionOperand* output, Node* base, Node* index) {
    336   ArmOperandGenerator g(selector);
    337   InstructionOperand inputs[3];
    338   size_t input_count = 2;
    339 
    340   inputs[0] = g.UseRegister(base);
    341   if (g.CanBeImmediate(index, opcode)) {
    342     inputs[1] = g.UseImmediate(index);
    343     opcode |= AddressingModeField::encode(kMode_Offset_RI);
    344   } else if ((opcode == kArmLdr) &&
    345              TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
    346                                   &inputs[2])) {
    347     input_count = 3;
    348   } else {
    349     inputs[1] = g.UseRegister(index);
    350     opcode |= AddressingModeField::encode(kMode_Offset_RR);
    351   }
    352   selector->Emit(opcode, 1, output, input_count, inputs);
    353 }
    354 
    355 void EmitStore(InstructionSelector* selector, InstructionCode opcode,
    356                size_t input_count, InstructionOperand* inputs,
    357                Node* index) {
    358   ArmOperandGenerator g(selector);
    359 
    360   if (g.CanBeImmediate(index, opcode)) {
    361     inputs[input_count++] = g.UseImmediate(index);
    362     opcode |= AddressingModeField::encode(kMode_Offset_RI);
    363   } else if ((opcode == kArmStr) &&
    364              TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
    365                                   &inputs[3])) {
    366     input_count = 4;
    367   } else {
    368     inputs[input_count++] = g.UseRegister(index);
    369     opcode |= AddressingModeField::encode(kMode_Offset_RR);
    370   }
    371   selector->Emit(opcode, 0, nullptr, input_count, inputs);
    372 }
    373 
    374 }  // namespace
    375 
    376 
    377 void InstructionSelector::VisitLoad(Node* node) {
    378   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    379   ArmOperandGenerator g(this);
    380   Node* base = node->InputAt(0);
    381   Node* index = node->InputAt(1);
    382 
    383   InstructionCode opcode = kArchNop;
    384   switch (load_rep.representation()) {
    385     case MachineRepresentation::kFloat32:
    386       opcode = kArmVldrF32;
    387       break;
    388     case MachineRepresentation::kFloat64:
    389       opcode = kArmVldrF64;
    390       break;
    391     case MachineRepresentation::kBit:  // Fall through.
    392     case MachineRepresentation::kWord8:
    393       opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
    394       break;
    395     case MachineRepresentation::kWord16:
    396       opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
    397       break;
    398     case MachineRepresentation::kTaggedSigned:   // Fall through.
    399     case MachineRepresentation::kTaggedPointer:  // Fall through.
    400     case MachineRepresentation::kTagged:  // Fall through.
    401     case MachineRepresentation::kWord32:
    402       opcode = kArmLdr;
    403       break;
    404     case MachineRepresentation::kWord64:   // Fall through.
    405     case MachineRepresentation::kSimd128:  // Fall through.
    406     case MachineRepresentation::kNone:
    407       UNREACHABLE();
    408       return;
    409   }
    410 
    411   InstructionOperand output = g.DefineAsRegister(node);
    412   EmitLoad(this, opcode, &output, base, index);
    413 }
    414 
    415 void InstructionSelector::VisitProtectedLoad(Node* node) {
    416   // TODO(eholk)
    417   UNIMPLEMENTED();
    418 }
    419 
    420 void InstructionSelector::VisitStore(Node* node) {
    421   ArmOperandGenerator g(this);
    422   Node* base = node->InputAt(0);
    423   Node* index = node->InputAt(1);
    424   Node* value = node->InputAt(2);
    425 
    426   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    427   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    428   MachineRepresentation rep = store_rep.representation();
    429 
    430   if (write_barrier_kind != kNoWriteBarrier) {
    431     DCHECK(CanBeTaggedPointer(rep));
    432     AddressingMode addressing_mode;
    433     InstructionOperand inputs[3];
    434     size_t input_count = 0;
    435     inputs[input_count++] = g.UseUniqueRegister(base);
    436     // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
    437     // for the store itself, so we must check compatibility with both.
    438     if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
    439       inputs[input_count++] = g.UseImmediate(index);
    440       addressing_mode = kMode_Offset_RI;
    441     } else {
    442       inputs[input_count++] = g.UseUniqueRegister(index);
    443       addressing_mode = kMode_Offset_RR;
    444     }
    445     inputs[input_count++] = g.UseUniqueRegister(value);
    446     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    447     switch (write_barrier_kind) {
    448       case kNoWriteBarrier:
    449         UNREACHABLE();
    450         break;
    451       case kMapWriteBarrier:
    452         record_write_mode = RecordWriteMode::kValueIsMap;
    453         break;
    454       case kPointerWriteBarrier:
    455         record_write_mode = RecordWriteMode::kValueIsPointer;
    456         break;
    457       case kFullWriteBarrier:
    458         record_write_mode = RecordWriteMode::kValueIsAny;
    459         break;
    460     }
    461     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    462     size_t const temp_count = arraysize(temps);
    463     InstructionCode code = kArchStoreWithWriteBarrier;
    464     code |= AddressingModeField::encode(addressing_mode);
    465     code |= MiscField::encode(static_cast<int>(record_write_mode));
    466     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    467   } else {
    468     InstructionCode opcode = kArchNop;
    469     switch (rep) {
    470       case MachineRepresentation::kFloat32:
    471         opcode = kArmVstrF32;
    472         break;
    473       case MachineRepresentation::kFloat64:
    474         opcode = kArmVstrF64;
    475         break;
    476       case MachineRepresentation::kBit:  // Fall through.
    477       case MachineRepresentation::kWord8:
    478         opcode = kArmStrb;
    479         break;
    480       case MachineRepresentation::kWord16:
    481         opcode = kArmStrh;
    482         break;
    483       case MachineRepresentation::kTaggedSigned:   // Fall through.
    484       case MachineRepresentation::kTaggedPointer:  // Fall through.
    485       case MachineRepresentation::kTagged:  // Fall through.
    486       case MachineRepresentation::kWord32:
    487         opcode = kArmStr;
    488         break;
    489       case MachineRepresentation::kWord64:   // Fall through.
    490       case MachineRepresentation::kSimd128:  // Fall through.
    491       case MachineRepresentation::kNone:
    492         UNREACHABLE();
    493         return;
    494     }
    495 
    496     InstructionOperand inputs[4];
    497     size_t input_count = 0;
    498     inputs[input_count++] = g.UseRegister(value);
    499     inputs[input_count++] = g.UseRegister(base);
    500     EmitStore(this, opcode, input_count, inputs, index);
    501   }
    502 }
    503 
    504 void InstructionSelector::VisitUnalignedLoad(Node* node) {
    505   UnalignedLoadRepresentation load_rep =
    506       UnalignedLoadRepresentationOf(node->op());
    507   ArmOperandGenerator g(this);
    508   Node* base = node->InputAt(0);
    509   Node* index = node->InputAt(1);
    510 
    511   InstructionCode opcode = kArmLdr;
    512   // Only floating point loads need to be specially handled; integer loads
    513   // support unaligned access. We support unaligned FP loads by loading to
    514   // integer registers first, then moving to the destination FP register.
    515   switch (load_rep.representation()) {
    516     case MachineRepresentation::kFloat32: {
    517       InstructionOperand temp = g.TempRegister();
    518       EmitLoad(this, opcode, &temp, base, index);
    519       Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
    520       return;
    521     }
    522     case MachineRepresentation::kFloat64: {
    523       // TODO(arm): use vld1.8 for this when NEON is available.
    524       // Compute the address of the least-significant half of the FP value.
    525       // We assume that the base node is unlikely to be an encodable immediate
    526       // or the result of a shift operation, so only consider the addressing
    527       // mode that should be used for the index node.
    528       InstructionCode add_opcode = kArmAdd;
    529       InstructionOperand inputs[3];
    530       inputs[0] = g.UseRegister(base);
    531 
    532       size_t input_count;
    533       if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
    534                                    &inputs[1])) {
    535         // input_count has been set by TryMatchImmediateOrShift(), so increment
    536         // it to account for the base register in inputs[0].
    537         input_count++;
    538       } else {
    539         add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
    540         inputs[1] = g.UseRegister(index);
    541         input_count = 2;  // Base register and index.
    542       }
    543 
    544       InstructionOperand addr = g.TempRegister();
    545       Emit(add_opcode, 1, &addr, input_count, inputs);
    546 
    547       // Load both halves and move to an FP register.
    548       InstructionOperand fp_lo = g.TempRegister();
    549       InstructionOperand fp_hi = g.TempRegister();
    550       opcode |= AddressingModeField::encode(kMode_Offset_RI);
    551       Emit(opcode, fp_lo, addr, g.TempImmediate(0));
    552       Emit(opcode, fp_hi, addr, g.TempImmediate(4));
    553       Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
    554       return;
    555     }
    556     default:
    557       // All other cases should support unaligned accesses.
    558       UNREACHABLE();
    559       return;
    560   }
    561 }
    562 
    563 void InstructionSelector::VisitUnalignedStore(Node* node) {
    564   ArmOperandGenerator g(this);
    565   Node* base = node->InputAt(0);
    566   Node* index = node->InputAt(1);
    567   Node* value = node->InputAt(2);
    568 
    569   InstructionOperand inputs[4];
    570   size_t input_count = 0;
    571 
    572   UnalignedStoreRepresentation store_rep =
    573       UnalignedStoreRepresentationOf(node->op());
    574 
    575   // Only floating point stores need to be specially handled; integer stores
    576   // support unaligned access. We support unaligned FP stores by moving the
    577   // value to integer registers first, then storing to the destination address.
    578   switch (store_rep) {
    579     case MachineRepresentation::kFloat32: {
    580       inputs[input_count++] = g.TempRegister();
    581       Emit(kArmVmovU32F32, inputs[0], g.UseRegister(value));
    582       inputs[input_count++] = g.UseRegister(base);
    583       EmitStore(this, kArmStr, input_count, inputs, index);
    584       return;
    585     }
    586     case MachineRepresentation::kFloat64: {
    587       // TODO(arm): use vst1.8 for this when NEON is available.
    588       // Store a 64-bit floating point value using two 32-bit integer stores.
    589       // Computing the store address here would require three live temporary
    590       // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
    591       // storing the least-significant half of the value.
    592 
    593       // First, move the 64-bit FP value into two temporary integer registers.
    594       InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
    595       inputs[input_count++] = g.UseRegister(value);
    596       Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count,
    597            inputs);
    598 
    599       // Store the least-significant half.
    600       inputs[0] = fp[0];  // Low 32-bits of FP value.
    601       inputs[input_count++] = g.UseRegister(base);  // First store base address.
    602       EmitStore(this, kArmStr, input_count, inputs, index);
    603 
    604       // Store the most-significant half.
    605       InstructionOperand base4 = g.TempRegister();
    606       Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
    607            g.UseRegister(base), g.TempImmediate(4));  // Compute base + 4.
    608       inputs[0] = fp[1];  // High 32-bits of FP value.
    609       inputs[1] = base4;  // Second store base + 4 address.
    610       EmitStore(this, kArmStr, input_count, inputs, index);
    611       return;
    612     }
    613     default:
    614       // All other cases should support unaligned accesses.
    615       UNREACHABLE();
    616       return;
    617   }
    618 }
    619 
    620 void InstructionSelector::VisitCheckedLoad(Node* node) {
    621   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
    622   ArmOperandGenerator g(this);
    623   Node* const buffer = node->InputAt(0);
    624   Node* const offset = node->InputAt(1);
    625   Node* const length = node->InputAt(2);
    626   ArchOpcode opcode = kArchNop;
    627   switch (load_rep.representation()) {
    628     case MachineRepresentation::kWord8:
    629       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
    630       break;
    631     case MachineRepresentation::kWord16:
    632       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
    633       break;
    634     case MachineRepresentation::kWord32:
    635       opcode = kCheckedLoadWord32;
    636       break;
    637     case MachineRepresentation::kFloat32:
    638       opcode = kCheckedLoadFloat32;
    639       break;
    640     case MachineRepresentation::kFloat64:
    641       opcode = kCheckedLoadFloat64;
    642       break;
    643     case MachineRepresentation::kBit:      // Fall through.
    644     case MachineRepresentation::kTaggedSigned:   // Fall through.
    645     case MachineRepresentation::kTaggedPointer:  // Fall through.
    646     case MachineRepresentation::kTagged:   // Fall through.
    647     case MachineRepresentation::kWord64:   // Fall through.
    648     case MachineRepresentation::kSimd128:  // Fall through.
    649     case MachineRepresentation::kNone:
    650       UNREACHABLE();
    651       return;
    652   }
    653   InstructionOperand offset_operand = g.UseRegister(offset);
    654   InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
    655                                           ? g.UseImmediate(length)
    656                                           : g.UseRegister(length);
    657   Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
    658        g.DefineAsRegister(node), offset_operand, length_operand,
    659        g.UseRegister(buffer), offset_operand);
    660 }
    661 
    662 
    663 void InstructionSelector::VisitCheckedStore(Node* node) {
    664   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
    665   ArmOperandGenerator g(this);
    666   Node* const buffer = node->InputAt(0);
    667   Node* const offset = node->InputAt(1);
    668   Node* const length = node->InputAt(2);
    669   Node* const value = node->InputAt(3);
    670   ArchOpcode opcode = kArchNop;
    671   switch (rep) {
    672     case MachineRepresentation::kWord8:
    673       opcode = kCheckedStoreWord8;
    674       break;
    675     case MachineRepresentation::kWord16:
    676       opcode = kCheckedStoreWord16;
    677       break;
    678     case MachineRepresentation::kWord32:
    679       opcode = kCheckedStoreWord32;
    680       break;
    681     case MachineRepresentation::kFloat32:
    682       opcode = kCheckedStoreFloat32;
    683       break;
    684     case MachineRepresentation::kFloat64:
    685       opcode = kCheckedStoreFloat64;
    686       break;
    687     case MachineRepresentation::kBit:      // Fall through.
    688     case MachineRepresentation::kTaggedSigned:   // Fall through.
    689     case MachineRepresentation::kTaggedPointer:  // Fall through.
    690     case MachineRepresentation::kTagged:   // Fall through.
    691     case MachineRepresentation::kWord64:   // Fall through.
    692     case MachineRepresentation::kSimd128:  // Fall through.
    693     case MachineRepresentation::kNone:
    694       UNREACHABLE();
    695       return;
    696   }
    697   InstructionOperand offset_operand = g.UseRegister(offset);
    698   InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
    699                                           ? g.UseImmediate(length)
    700                                           : g.UseRegister(length);
    701   Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
    702        offset_operand, length_operand, g.UseRegister(value),
    703        g.UseRegister(buffer), offset_operand);
    704 }
    705 
    706 
    707 namespace {
    708 
    709 void EmitBic(InstructionSelector* selector, Node* node, Node* left,
    710              Node* right) {
    711   ArmOperandGenerator g(selector);
    712   InstructionCode opcode = kArmBic;
    713   InstructionOperand value_operand;
    714   InstructionOperand shift_operand;
    715   if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
    716     selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
    717                    value_operand, shift_operand);
    718     return;
    719   }
    720   selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
    721                  g.DefineAsRegister(node), g.UseRegister(left),
    722                  g.UseRegister(right));
    723 }
    724 
    725 
    726 void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
    727               uint32_t lsb, uint32_t width) {
    728   DCHECK_LE(1u, width);
    729   DCHECK_LE(width, 32u - lsb);
    730   ArmOperandGenerator g(selector);
    731   selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
    732                  g.TempImmediate(lsb), g.TempImmediate(width));
    733 }
    734 
    735 }  // namespace
    736 
    737 
    738 void InstructionSelector::VisitWord32And(Node* node) {
    739   ArmOperandGenerator g(this);
    740   Int32BinopMatcher m(node);
    741   if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
    742     Int32BinopMatcher mleft(m.left().node());
    743     if (mleft.right().Is(-1)) {
    744       EmitBic(this, node, m.right().node(), mleft.left().node());
    745       return;
    746     }
    747   }
    748   if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
    749     Int32BinopMatcher mright(m.right().node());
    750     if (mright.right().Is(-1)) {
    751       EmitBic(this, node, m.left().node(), mright.left().node());
    752       return;
    753     }
    754   }
    755   if (m.right().HasValue()) {
    756     uint32_t const value = m.right().Value();
    757     uint32_t width = base::bits::CountPopulation32(value);
    758     uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
    759 
    760     // Try to merge SHR operations on the left hand input into this AND.
    761     if (m.left().IsWord32Shr()) {
    762       Int32BinopMatcher mshr(m.left().node());
    763       if (mshr.right().HasValue()) {
    764         uint32_t const shift = mshr.right().Value();
    765 
    766         if (((shift == 8) || (shift == 16) || (shift == 24)) &&
    767             ((value == 0xff) || (value == 0xffff))) {
    768           // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
    769           // bytewise rotation.
    770           Emit((value == 0xff) ? kArmUxtb : kArmUxth,
    771                g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
    772                g.TempImmediate(mshr.right().Value()));
    773           return;
    774         } else if (IsSupported(ARMv7) && (width != 0) &&
    775                    ((leading_zeros + width) == 32)) {
    776           // Merge Shr into And by emitting a UBFX instruction.
    777           DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
    778           if ((1 <= shift) && (shift <= 31)) {
    779             // UBFX cannot extract bits past the register size, however since
    780             // shifting the original value would have introduced some zeros we
    781             // can still use UBFX with a smaller mask and the remaining bits
    782             // will be zeros.
    783             EmitUbfx(this, node, mshr.left().node(), shift,
    784                      std::min(width, 32 - shift));
    785             return;
    786           }
    787         }
    788       }
    789     } else if (value == 0xffff) {
    790       // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
    791       // better than AND 0xff for this operation.
    792       Emit(kArmUxth, g.DefineAsRegister(m.node()),
    793            g.UseRegister(m.left().node()), g.TempImmediate(0));
    794       return;
    795     }
    796     if (g.CanBeImmediate(~value)) {
    797       // Emit BIC for this AND by inverting the immediate value first.
    798       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
    799            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    800            g.TempImmediate(~value));
    801       return;
    802     }
    803     if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
    804       // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
    805       // replace this AND with UBFX. Other contiguous bit patterns have already
    806       // been handled by BIC or will be handled by AND.
    807       if ((width != 0) && ((leading_zeros + width) == 32) &&
    808           (9 <= leading_zeros) && (leading_zeros <= 23)) {
    809         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
    810         EmitUbfx(this, node, m.left().node(), 0, width);
    811         return;
    812       }
    813 
    814       width = 32 - width;
    815       leading_zeros = base::bits::CountLeadingZeros32(~value);
    816       uint32_t lsb = base::bits::CountTrailingZeros32(~value);
    817       if ((leading_zeros + width + lsb) == 32) {
    818         // This AND can be replaced with BFC.
    819         Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
    820              g.TempImmediate(lsb), g.TempImmediate(width));
    821         return;
    822       }
    823     }
    824   }
    825   VisitBinop(this, node, kArmAnd, kArmAnd);
    826 }
    827 
    828 
    829 void InstructionSelector::VisitWord32Or(Node* node) {
    830   VisitBinop(this, node, kArmOrr, kArmOrr);
    831 }
    832 
    833 
    834 void InstructionSelector::VisitWord32Xor(Node* node) {
    835   ArmOperandGenerator g(this);
    836   Int32BinopMatcher m(node);
    837   if (m.right().Is(-1)) {
    838     InstructionCode opcode = kArmMvn;
    839     InstructionOperand value_operand;
    840     InstructionOperand shift_operand;
    841     if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
    842                       &shift_operand)) {
    843       Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
    844       return;
    845     }
    846     Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
    847          g.DefineAsRegister(node), g.UseRegister(m.left().node()));
    848     return;
    849   }
    850   VisitBinop(this, node, kArmEor, kArmEor);
    851 }
    852 
    853 
    854 namespace {
    855 
    856 template <typename TryMatchShift>
    857 void VisitShift(InstructionSelector* selector, Node* node,
    858                 TryMatchShift try_match_shift, FlagsContinuation* cont) {
    859   ArmOperandGenerator g(selector);
    860   InstructionCode opcode = kArmMov;
    861   InstructionOperand inputs[4];
    862   size_t input_count = 2;
    863   InstructionOperand outputs[2];
    864   size_t output_count = 0;
    865 
    866   CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
    867 
    868   if (cont->IsBranch()) {
    869     inputs[input_count++] = g.Label(cont->true_block());
    870     inputs[input_count++] = g.Label(cont->false_block());
    871   }
    872 
    873   outputs[output_count++] = g.DefineAsRegister(node);
    874   if (cont->IsSet()) {
    875     outputs[output_count++] = g.DefineAsRegister(cont->result());
    876   }
    877 
    878   DCHECK_NE(0u, input_count);
    879   DCHECK_NE(0u, output_count);
    880   DCHECK_GE(arraysize(inputs), input_count);
    881   DCHECK_GE(arraysize(outputs), output_count);
    882   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
    883 
    884   opcode = cont->Encode(opcode);
    885   if (cont->IsDeoptimize()) {
    886     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    887                              cont->reason(), cont->frame_state());
    888   } else {
    889     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    890   }
    891 }
    892 
    893 
    894 template <typename TryMatchShift>
    895 void VisitShift(InstructionSelector* selector, Node* node,
    896                               TryMatchShift try_match_shift) {
    897   FlagsContinuation cont;
    898   VisitShift(selector, node, try_match_shift, &cont);
    899 }
    900 
    901 }  // namespace
    902 
    903 
    904 void InstructionSelector::VisitWord32Shl(Node* node) {
    905   VisitShift(this, node, TryMatchLSL);
    906 }
    907 
    908 
    909 void InstructionSelector::VisitWord32Shr(Node* node) {
    910   ArmOperandGenerator g(this);
    911   Int32BinopMatcher m(node);
    912   if (IsSupported(ARMv7) && m.left().IsWord32And() &&
    913       m.right().IsInRange(0, 31)) {
    914     uint32_t lsb = m.right().Value();
    915     Int32BinopMatcher mleft(m.left().node());
    916     if (mleft.right().HasValue()) {
    917       uint32_t value = (mleft.right().Value() >> lsb) << lsb;
    918       uint32_t width = base::bits::CountPopulation32(value);
    919       uint32_t msb = base::bits::CountLeadingZeros32(value);
    920       if (msb + width + lsb == 32) {
    921         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
    922         return EmitUbfx(this, node, mleft.left().node(), lsb, width);
    923       }
    924     }
    925   }
    926   VisitShift(this, node, TryMatchLSR);
    927 }
    928 
    929 
    930 void InstructionSelector::VisitWord32Sar(Node* node) {
    931   ArmOperandGenerator g(this);
    932   Int32BinopMatcher m(node);
    933   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
    934     Int32BinopMatcher mleft(m.left().node());
    935     if (m.right().HasValue() && mleft.right().HasValue()) {
    936       uint32_t sar = m.right().Value();
    937       uint32_t shl = mleft.right().Value();
    938       if ((sar == shl) && (sar == 16)) {
    939         Emit(kArmSxth, g.DefineAsRegister(node),
    940              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
    941         return;
    942       } else if ((sar == shl) && (sar == 24)) {
    943         Emit(kArmSxtb, g.DefineAsRegister(node),
    944              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
    945         return;
    946       } else if (IsSupported(ARMv7) && (sar >= shl)) {
    947         Emit(kArmSbfx, g.DefineAsRegister(node),
    948              g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
    949              g.TempImmediate(32 - sar));
    950         return;
    951       }
    952     }
    953   }
    954   VisitShift(this, node, TryMatchASR);
    955 }
    956 
    957 void InstructionSelector::VisitInt32PairAdd(Node* node) {
    958   ArmOperandGenerator g(this);
    959 
    960   Node* projection1 = NodeProperties::FindProjection(node, 1);
    961   if (projection1) {
    962     // We use UseUniqueRegister here to avoid register sharing with the output
    963     // registers.
    964     InstructionOperand inputs[] = {
    965         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
    966         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
    967 
    968     InstructionOperand outputs[] = {
    969         g.DefineAsRegister(node),
    970         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
    971 
    972     Emit(kArmAddPair, 2, outputs, 4, inputs);
    973   } else {
    974     // The high word of the result is not used, so we emit the standard 32 bit
    975     // instruction.
    976     Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R),
    977          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
    978          g.UseRegister(node->InputAt(2)));
    979   }
    980 }
    981 
    982 void InstructionSelector::VisitInt32PairSub(Node* node) {
    983   ArmOperandGenerator g(this);
    984 
    985   Node* projection1 = NodeProperties::FindProjection(node, 1);
    986   if (projection1) {
    987     // We use UseUniqueRegister here to avoid register sharing with the output
    988     // register.
    989     InstructionOperand inputs[] = {
    990         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
    991         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
    992 
    993     InstructionOperand outputs[] = {
    994         g.DefineAsRegister(node),
    995         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
    996 
    997     Emit(kArmSubPair, 2, outputs, 4, inputs);
    998   } else {
    999     // The high word of the result is not used, so we emit the standard 32 bit
   1000     // instruction.
   1001     Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
   1002          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1003          g.UseRegister(node->InputAt(2)));
   1004   }
   1005 }
   1006 
   1007 void InstructionSelector::VisitInt32PairMul(Node* node) {
   1008   ArmOperandGenerator g(this);
   1009   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1010   if (projection1) {
   1011     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
   1012                                    g.UseUniqueRegister(node->InputAt(1)),
   1013                                    g.UseUniqueRegister(node->InputAt(2)),
   1014                                    g.UseUniqueRegister(node->InputAt(3))};
   1015 
   1016     InstructionOperand outputs[] = {
   1017         g.DefineAsRegister(node),
   1018         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
   1019 
   1020     Emit(kArmMulPair, 2, outputs, 4, inputs);
   1021   } else {
   1022     // The high word of the result is not used, so we emit the standard 32 bit
   1023     // instruction.
   1024     Emit(kArmMul | AddressingModeField::encode(kMode_Operand2_R),
   1025          g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1026          g.UseRegister(node->InputAt(2)));
   1027   }
   1028 }
   1029 
   1030 namespace {
   1031 // Shared routine for multiple shift operations.
   1032 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
   1033                           Node* node) {
   1034   ArmOperandGenerator g(selector);
   1035   // We use g.UseUniqueRegister here to guarantee that there is
   1036   // no register aliasing of input registers with output registers.
   1037   Int32Matcher m(node->InputAt(2));
   1038   InstructionOperand shift_operand;
   1039   if (m.HasValue()) {
   1040     shift_operand = g.UseImmediate(m.node());
   1041   } else {
   1042     shift_operand = g.UseUniqueRegister(m.node());
   1043   }
   1044 
   1045   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
   1046                                  g.UseUniqueRegister(node->InputAt(1)),
   1047                                  shift_operand};
   1048 
   1049   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1050 
   1051   InstructionOperand outputs[2];
   1052   InstructionOperand temps[1];
   1053   int32_t output_count = 0;
   1054   int32_t temp_count = 0;
   1055 
   1056   outputs[output_count++] = g.DefineAsRegister(node);
   1057   if (projection1) {
   1058     outputs[output_count++] = g.DefineAsRegister(projection1);
   1059   } else {
   1060     temps[temp_count++] = g.TempRegister();
   1061   }
   1062 
   1063   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
   1064 }
   1065 }  // namespace
   1066 void InstructionSelector::VisitWord32PairShl(Node* node) {
   1067   VisitWord32PairShift(this, kArmLslPair, node);
   1068 }
   1069 
   1070 void InstructionSelector::VisitWord32PairShr(Node* node) {
   1071   VisitWord32PairShift(this, kArmLsrPair, node);
   1072 }
   1073 
   1074 void InstructionSelector::VisitWord32PairSar(Node* node) {
   1075   VisitWord32PairShift(this, kArmAsrPair, node);
   1076 }
   1077 
   1078 void InstructionSelector::VisitWord32Ror(Node* node) {
   1079   VisitShift(this, node, TryMatchROR);
   1080 }
   1081 
   1082 
   1083 void InstructionSelector::VisitWord32Clz(Node* node) {
   1084   VisitRR(this, kArmClz, node);
   1085 }
   1086 
   1087 
   1088 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
   1089 
   1090 
   1091 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
   1092   DCHECK(IsSupported(ARMv7));
   1093   VisitRR(this, kArmRbit, node);
   1094 }
   1095 
   1096 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
   1097 
   1098 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
   1099 
   1100 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
   1101 
   1102 
   1103 void InstructionSelector::VisitInt32Add(Node* node) {
   1104   ArmOperandGenerator g(this);
   1105   Int32BinopMatcher m(node);
   1106   if (CanCover(node, m.left().node())) {
   1107     switch (m.left().opcode()) {
   1108       case IrOpcode::kInt32Mul: {
   1109         Int32BinopMatcher mleft(m.left().node());
   1110         Emit(kArmMla, g.DefineAsRegister(node),
   1111              g.UseRegister(mleft.left().node()),
   1112              g.UseRegister(mleft.right().node()),
   1113              g.UseRegister(m.right().node()));
   1114         return;
   1115       }
   1116       case IrOpcode::kInt32MulHigh: {
   1117         Int32BinopMatcher mleft(m.left().node());
   1118         Emit(kArmSmmla, g.DefineAsRegister(node),
   1119              g.UseRegister(mleft.left().node()),
   1120              g.UseRegister(mleft.right().node()),
   1121              g.UseRegister(m.right().node()));
   1122         return;
   1123       }
   1124       case IrOpcode::kWord32And: {
   1125         Int32BinopMatcher mleft(m.left().node());
   1126         if (mleft.right().Is(0xff)) {
   1127           Emit(kArmUxtab, g.DefineAsRegister(node),
   1128                g.UseRegister(m.right().node()),
   1129                g.UseRegister(mleft.left().node()), g.TempImmediate(0));
   1130           return;
   1131         } else if (mleft.right().Is(0xffff)) {
   1132           Emit(kArmUxtah, g.DefineAsRegister(node),
   1133                g.UseRegister(m.right().node()),
   1134                g.UseRegister(mleft.left().node()), g.TempImmediate(0));
   1135           return;
   1136         }
   1137       }
   1138       case IrOpcode::kWord32Sar: {
   1139         Int32BinopMatcher mleft(m.left().node());
   1140         if (CanCover(mleft.node(), mleft.left().node()) &&
   1141             mleft.left().IsWord32Shl()) {
   1142           Int32BinopMatcher mleftleft(mleft.left().node());
   1143           if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
   1144             Emit(kArmSxtab, g.DefineAsRegister(node),
   1145                  g.UseRegister(m.right().node()),
   1146                  g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
   1147             return;
   1148           } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
   1149             Emit(kArmSxtah, g.DefineAsRegister(node),
   1150                  g.UseRegister(m.right().node()),
   1151                  g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
   1152             return;
   1153           }
   1154         }
   1155       }
   1156       default:
   1157         break;
   1158     }
   1159   }
   1160   if (CanCover(node, m.right().node())) {
   1161     switch (m.right().opcode()) {
   1162       case IrOpcode::kInt32Mul: {
   1163         Int32BinopMatcher mright(m.right().node());
   1164         Emit(kArmMla, g.DefineAsRegister(node),
   1165              g.UseRegister(mright.left().node()),
   1166              g.UseRegister(mright.right().node()),
   1167              g.UseRegister(m.left().node()));
   1168         return;
   1169       }
   1170       case IrOpcode::kInt32MulHigh: {
   1171         Int32BinopMatcher mright(m.right().node());
   1172         Emit(kArmSmmla, g.DefineAsRegister(node),
   1173              g.UseRegister(mright.left().node()),
   1174              g.UseRegister(mright.right().node()),
   1175              g.UseRegister(m.left().node()));
   1176         return;
   1177       }
   1178       case IrOpcode::kWord32And: {
   1179         Int32BinopMatcher mright(m.right().node());
   1180         if (mright.right().Is(0xff)) {
   1181           Emit(kArmUxtab, g.DefineAsRegister(node),
   1182                g.UseRegister(m.left().node()),
   1183                g.UseRegister(mright.left().node()), g.TempImmediate(0));
   1184           return;
   1185         } else if (mright.right().Is(0xffff)) {
   1186           Emit(kArmUxtah, g.DefineAsRegister(node),
   1187                g.UseRegister(m.left().node()),
   1188                g.UseRegister(mright.left().node()), g.TempImmediate(0));
   1189           return;
   1190         }
   1191       }
   1192       case IrOpcode::kWord32Sar: {
   1193         Int32BinopMatcher mright(m.right().node());
   1194         if (CanCover(mright.node(), mright.left().node()) &&
   1195             mright.left().IsWord32Shl()) {
   1196           Int32BinopMatcher mrightleft(mright.left().node());
   1197           if (mright.right().Is(24) && mrightleft.right().Is(24)) {
   1198             Emit(kArmSxtab, g.DefineAsRegister(node),
   1199                  g.UseRegister(m.left().node()),
   1200                  g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
   1201             return;
   1202           } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
   1203             Emit(kArmSxtah, g.DefineAsRegister(node),
   1204                  g.UseRegister(m.left().node()),
   1205                  g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
   1206             return;
   1207           }
   1208         }
   1209       }
   1210       default:
   1211         break;
   1212     }
   1213   }
   1214   VisitBinop(this, node, kArmAdd, kArmAdd);
   1215 }
   1216 
   1217 
   1218 void InstructionSelector::VisitInt32Sub(Node* node) {
   1219   ArmOperandGenerator g(this);
   1220   Int32BinopMatcher m(node);
   1221   if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
   1222       CanCover(node, m.right().node())) {
   1223     Int32BinopMatcher mright(m.right().node());
   1224     Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
   1225          g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
   1226     return;
   1227   }
   1228   VisitBinop(this, node, kArmSub, kArmRsb);
   1229 }
   1230 
   1231 namespace {
   1232 
   1233 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
   1234                               FlagsContinuation* cont) {
   1235   ArmOperandGenerator g(selector);
   1236   Int32BinopMatcher m(node);
   1237   InstructionOperand result_operand = g.DefineAsRegister(node);
   1238   InstructionOperand temp_operand = g.TempRegister();
   1239   InstructionOperand outputs[] = {result_operand, temp_operand};
   1240   InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
   1241                                  g.UseRegister(m.right().node())};
   1242   selector->Emit(kArmSmull, 2, outputs, 2, inputs);
   1243 
   1244   // result operand needs shift operator.
   1245   InstructionOperand shift_31 = g.UseImmediate(31);
   1246   InstructionCode opcode = cont->Encode(kArmCmp) |
   1247                            AddressingModeField::encode(kMode_Operand2_R_ASR_I);
   1248   if (cont->IsBranch()) {
   1249     selector->Emit(opcode, g.NoOutput(), temp_operand, result_operand, shift_31,
   1250                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1251   } else if (cont->IsDeoptimize()) {
   1252     InstructionOperand in[] = {temp_operand, result_operand, shift_31};
   1253     selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
   1254                              cont->frame_state());
   1255   } else {
   1256     DCHECK(cont->IsSet());
   1257     selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
   1258                    result_operand, shift_31);
   1259   }
   1260 }
   1261 
   1262 }  // namespace
   1263 
   1264 void InstructionSelector::VisitInt32Mul(Node* node) {
   1265   ArmOperandGenerator g(this);
   1266   Int32BinopMatcher m(node);
   1267   if (m.right().HasValue() && m.right().Value() > 0) {
   1268     int32_t value = m.right().Value();
   1269     if (base::bits::IsPowerOfTwo32(value - 1)) {
   1270       Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1271            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1272            g.UseRegister(m.left().node()),
   1273            g.TempImmediate(WhichPowerOf2(value - 1)));
   1274       return;
   1275     }
   1276     if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
   1277       Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1278            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1279            g.UseRegister(m.left().node()),
   1280            g.TempImmediate(WhichPowerOf2(value + 1)));
   1281       return;
   1282     }
   1283   }
   1284   VisitRRR(this, kArmMul, node);
   1285 }
   1286 
   1287 
   1288 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   1289   VisitRRR(this, kArmSmmul, node);
   1290 }
   1291 
   1292 
   1293 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   1294   ArmOperandGenerator g(this);
   1295   InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
   1296   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
   1297                                  g.UseRegister(node->InputAt(1))};
   1298   Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
   1299 }
   1300 
   1301 
   1302 void InstructionSelector::VisitInt32Div(Node* node) {
   1303   VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
   1304 }
   1305 
   1306 
   1307 void InstructionSelector::VisitUint32Div(Node* node) {
   1308   VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
   1309 }
   1310 
   1311 
   1312 void InstructionSelector::VisitInt32Mod(Node* node) {
   1313   VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
   1314 }
   1315 
   1316 
   1317 void InstructionSelector::VisitUint32Mod(Node* node) {
   1318   VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
   1319 }
   1320 
   1321 
   1322 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
   1323   VisitRR(this, kArmVcvtF64F32, node);
   1324 }
   1325 
   1326 
   1327 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
   1328   VisitRR(this, kArmVcvtF32S32, node);
   1329 }
   1330 
   1331 
   1332 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
   1333   VisitRR(this, kArmVcvtF32U32, node);
   1334 }
   1335 
   1336 
   1337 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   1338   VisitRR(this, kArmVcvtF64S32, node);
   1339 }
   1340 
   1341 
   1342 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
   1343   VisitRR(this, kArmVcvtF64U32, node);
   1344 }
   1345 
   1346 
   1347 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
   1348   VisitRR(this, kArmVcvtS32F32, node);
   1349 }
   1350 
   1351 
   1352 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
   1353   VisitRR(this, kArmVcvtU32F32, node);
   1354 }
   1355 
   1356 
   1357 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   1358   VisitRR(this, kArmVcvtS32F64, node);
   1359 }
   1360 
   1361 
   1362 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   1363   VisitRR(this, kArmVcvtU32F64, node);
   1364 }
   1365 
   1366 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
   1367   VisitRR(this, kArmVcvtU32F64, node);
   1368 }
   1369 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   1370   VisitRR(this, kArmVcvtF32F64, node);
   1371 }
   1372 
   1373 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   1374   VisitRR(this, kArchTruncateDoubleToI, node);
   1375 }
   1376 
   1377 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
   1378   VisitRR(this, kArmVcvtS32F64, node);
   1379 }
   1380 
   1381 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
   1382   VisitRR(this, kArmVmovU32F32, node);
   1383 }
   1384 
   1385 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
   1386   VisitRR(this, kArmVmovF32U32, node);
   1387 }
   1388 
   1389 void InstructionSelector::VisitFloat32Add(Node* node) {
   1390   ArmOperandGenerator g(this);
   1391   Float32BinopMatcher m(node);
   1392   if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
   1393     Float32BinopMatcher mleft(m.left().node());
   1394     Emit(kArmVmlaF32, g.DefineSameAsFirst(node),
   1395          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1396          g.UseRegister(mleft.right().node()));
   1397     return;
   1398   }
   1399   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
   1400     Float32BinopMatcher mright(m.right().node());
   1401     Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1402          g.UseRegister(mright.left().node()),
   1403          g.UseRegister(mright.right().node()));
   1404     return;
   1405   }
   1406   VisitRRR(this, kArmVaddF32, node);
   1407 }
   1408 
   1409 
   1410 void InstructionSelector::VisitFloat64Add(Node* node) {
   1411   ArmOperandGenerator g(this);
   1412   Float64BinopMatcher m(node);
   1413   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
   1414     Float64BinopMatcher mleft(m.left().node());
   1415     Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
   1416          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
   1417          g.UseRegister(mleft.right().node()));
   1418     return;
   1419   }
   1420   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
   1421     Float64BinopMatcher mright(m.right().node());
   1422     Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1423          g.UseRegister(mright.left().node()),
   1424          g.UseRegister(mright.right().node()));
   1425     return;
   1426   }
   1427   VisitRRR(this, kArmVaddF64, node);
   1428 }
   1429 
   1430 void InstructionSelector::VisitFloat32Sub(Node* node) {
   1431   ArmOperandGenerator g(this);
   1432   Float32BinopMatcher m(node);
   1433   if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
   1434     Float32BinopMatcher mright(m.right().node());
   1435     Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1436          g.UseRegister(mright.left().node()),
   1437          g.UseRegister(mright.right().node()));
   1438     return;
   1439   }
   1440   VisitRRR(this, kArmVsubF32, node);
   1441 }
   1442 
   1443 void InstructionSelector::VisitFloat64Sub(Node* node) {
   1444   ArmOperandGenerator g(this);
   1445   Float64BinopMatcher m(node);
   1446   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
   1447     Float64BinopMatcher mright(m.right().node());
   1448     Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
   1449          g.UseRegister(mright.left().node()),
   1450          g.UseRegister(mright.right().node()));
   1451     return;
   1452   }
   1453   VisitRRR(this, kArmVsubF64, node);
   1454 }
   1455 
   1456 void InstructionSelector::VisitFloat32Mul(Node* node) {
   1457   VisitRRR(this, kArmVmulF32, node);
   1458 }
   1459 
   1460 
   1461 void InstructionSelector::VisitFloat64Mul(Node* node) {
   1462   VisitRRR(this, kArmVmulF64, node);
   1463 }
   1464 
   1465 
   1466 void InstructionSelector::VisitFloat32Div(Node* node) {
   1467   VisitRRR(this, kArmVdivF32, node);
   1468 }
   1469 
   1470 
   1471 void InstructionSelector::VisitFloat64Div(Node* node) {
   1472   VisitRRR(this, kArmVdivF64, node);
   1473 }
   1474 
   1475 
   1476 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1477   ArmOperandGenerator g(this);
   1478   Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
   1479        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
   1480 }
   1481 
   1482 void InstructionSelector::VisitFloat32Max(Node* node) {
   1483   VisitRRR(this, kArmFloat32Max, node);
   1484 }
   1485 
   1486 void InstructionSelector::VisitFloat64Max(Node* node) {
   1487   VisitRRR(this, kArmFloat64Max, node);
   1488 }
   1489 
   1490 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   1491   VisitRR(this, kArmFloat64SilenceNaN, node);
   1492 }
   1493 
   1494 void InstructionSelector::VisitFloat32Min(Node* node) {
   1495   VisitRRR(this, kArmFloat32Min, node);
   1496 }
   1497 
   1498 void InstructionSelector::VisitFloat64Min(Node* node) {
   1499   VisitRRR(this, kArmFloat64Min, node);
   1500 }
   1501 
   1502 void InstructionSelector::VisitFloat32Abs(Node* node) {
   1503   VisitRR(this, kArmVabsF32, node);
   1504 }
   1505 
   1506 
   1507 void InstructionSelector::VisitFloat64Abs(Node* node) {
   1508   VisitRR(this, kArmVabsF64, node);
   1509 }
   1510 
   1511 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
   1512   VisitRR(this, kArmVsqrtF32, node);
   1513 }
   1514 
   1515 
   1516 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
   1517   VisitRR(this, kArmVsqrtF64, node);
   1518 }
   1519 
   1520 
   1521 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
   1522   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1523   VisitRR(this, kArmVrintmF32, node);
   1524 }
   1525 
   1526 
   1527 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
   1528   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1529   VisitRR(this, kArmVrintmF64, node);
   1530 }
   1531 
   1532 
   1533 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
   1534   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1535   VisitRR(this, kArmVrintpF32, node);
   1536 }
   1537 
   1538 
   1539 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
   1540   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1541   VisitRR(this, kArmVrintpF64, node);
   1542 }
   1543 
   1544 
   1545 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
   1546   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1547   VisitRR(this, kArmVrintzF32, node);
   1548 }
   1549 
   1550 
   1551 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
   1552   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1553   VisitRR(this, kArmVrintzF64, node);
   1554 }
   1555 
   1556 
   1557 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   1558   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1559   VisitRR(this, kArmVrintaF64, node);
   1560 }
   1561 
   1562 
   1563 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
   1564   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1565   VisitRR(this, kArmVrintnF32, node);
   1566 }
   1567 
   1568 
   1569 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
   1570   DCHECK(CpuFeatures::IsSupported(ARMv8));
   1571   VisitRR(this, kArmVrintnF64, node);
   1572 }
   1573 
   1574 void InstructionSelector::VisitFloat32Neg(Node* node) {
   1575   VisitRR(this, kArmVnegF32, node);
   1576 }
   1577 
   1578 void InstructionSelector::VisitFloat64Neg(Node* node) {
   1579   VisitRR(this, kArmVnegF64, node);
   1580 }
   1581 
   1582 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1583                                                    InstructionCode opcode) {
   1584   ArmOperandGenerator g(this);
   1585   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
   1586        g.UseFixed(node->InputAt(1), d1))
   1587       ->MarkAsCall();
   1588 }
   1589 
   1590 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1591                                                   InstructionCode opcode) {
   1592   ArmOperandGenerator g(this);
   1593   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
   1594       ->MarkAsCall();
   1595 }
   1596 
   1597 void InstructionSelector::EmitPrepareArguments(
   1598     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1599     Node* node) {
   1600   ArmOperandGenerator g(this);
   1601 
   1602   // Prepare for C function call.
   1603   if (descriptor->IsCFunctionCall()) {
   1604     Emit(kArchPrepareCallCFunction |
   1605              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
   1606          0, nullptr, 0, nullptr);
   1607 
   1608     // Poke any stack arguments.
   1609     for (size_t n = 0; n < arguments->size(); ++n) {
   1610       PushParameter input = (*arguments)[n];
   1611       if (input.node()) {
   1612         int slot = static_cast<int>(n);
   1613         Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
   1614              g.UseRegister(input.node()));
   1615       }
   1616     }
   1617   } else {
   1618     // Push any stack arguments.
   1619     for (PushParameter input : base::Reversed(*arguments)) {
   1620       // Skip any alignment holes in pushed nodes.
   1621       if (input.node() == nullptr) continue;
   1622       Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
   1623     }
   1624   }
   1625 }
   1626 
   1627 
   1628 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
   1629 
   1630 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1631 
   1632 namespace {
   1633 
   1634 // Shared routine for multiple compare operations.
   1635 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1636                   InstructionOperand left, InstructionOperand right,
   1637                   FlagsContinuation* cont) {
   1638   ArmOperandGenerator g(selector);
   1639   opcode = cont->Encode(opcode);
   1640   if (cont->IsBranch()) {
   1641     selector->Emit(opcode, g.NoOutput(), left, right,
   1642                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1643   } else if (cont->IsDeoptimize()) {
   1644     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
   1645                              cont->frame_state());
   1646   } else {
   1647     DCHECK(cont->IsSet());
   1648     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   1649   }
   1650 }
   1651 
   1652 
   1653 // Shared routine for multiple float32 compare operations.
   1654 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1655                          FlagsContinuation* cont) {
   1656   ArmOperandGenerator g(selector);
   1657   Float32BinopMatcher m(node);
   1658   if (m.right().Is(0.0f)) {
   1659     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
   1660                  g.UseImmediate(m.right().node()), cont);
   1661   } else if (m.left().Is(0.0f)) {
   1662     cont->Commute();
   1663     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
   1664                  g.UseImmediate(m.left().node()), cont);
   1665   } else {
   1666     VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
   1667                  g.UseRegister(m.right().node()), cont);
   1668   }
   1669 }
   1670 
   1671 
   1672 // Shared routine for multiple float64 compare operations.
   1673 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1674                          FlagsContinuation* cont) {
   1675   ArmOperandGenerator g(selector);
   1676   Float64BinopMatcher m(node);
   1677   if (m.right().Is(0.0)) {
   1678     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
   1679                  g.UseImmediate(m.right().node()), cont);
   1680   } else if (m.left().Is(0.0)) {
   1681     cont->Commute();
   1682     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
   1683                  g.UseImmediate(m.left().node()), cont);
   1684   } else {
   1685     VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
   1686                  g.UseRegister(m.right().node()), cont);
   1687   }
   1688 }
   1689 
   1690 // Check whether we can convert:
   1691 // ((a <op> b) cmp 0), b.<cond>
   1692 // to:
   1693 // (a <ops> b), b.<cond'>
   1694 // where <ops> is the flag setting version of <op>.
   1695 // We only generate conditions <cond'> that are a combination of the N
   1696 // and Z flags. This avoids the need to make this function dependent on
   1697 // the flag-setting operation.
   1698 bool CanUseFlagSettingBinop(FlagsCondition cond) {
   1699   switch (cond) {
   1700     case kEqual:
   1701     case kNotEqual:
   1702     case kSignedLessThan:
   1703     case kSignedGreaterThanOrEqual:
   1704     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1705     case kUnsignedGreaterThan:      // x > 0 -> x != 0
   1706       return true;
   1707     default:
   1708       return false;
   1709   }
   1710 }
   1711 
   1712 // Map <cond> to <cond'> so that the following transformation is possible:
   1713 // ((a <op> b) cmp 0), b.<cond>
   1714 // to:
   1715 // (a <ops> b), b.<cond'>
   1716 // where <ops> is the flag setting version of <op>.
   1717 FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
   1718   DCHECK(CanUseFlagSettingBinop(cond));
   1719   switch (cond) {
   1720     case kEqual:
   1721     case kNotEqual:
   1722       return cond;
   1723     case kSignedLessThan:
   1724       return kNegative;
   1725     case kSignedGreaterThanOrEqual:
   1726       return kPositiveOrZero;
   1727     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1728       return kEqual;
   1729     case kUnsignedGreaterThan:  // x > 0 -> x != 0
   1730       return kNotEqual;
   1731     default:
   1732       UNREACHABLE();
   1733       return cond;
   1734   }
   1735 }
   1736 
   1737 // Check if we can perform the transformation:
   1738 // ((a <op> b) cmp 0), b.<cond>
   1739 // to:
   1740 // (a <ops> b), b.<cond'>
   1741 // where <ops> is the flag setting version of <op>, and if so,
   1742 // updates {node}, {opcode} and {cont} accordingly.
   1743 void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
   1744                                              Node** node, Node* binop,
   1745                                              InstructionCode* opcode,
   1746                                              FlagsCondition cond,
   1747                                              FlagsContinuation* cont) {
   1748   InstructionCode binop_opcode;
   1749   InstructionCode no_output_opcode;
   1750   switch (binop->opcode()) {
   1751     case IrOpcode::kInt32Add:
   1752       binop_opcode = kArmAdd;
   1753       no_output_opcode = kArmCmn;
   1754       break;
   1755     case IrOpcode::kWord32And:
   1756       binop_opcode = kArmAnd;
   1757       no_output_opcode = kArmTst;
   1758       break;
   1759     case IrOpcode::kWord32Or:
   1760       binop_opcode = kArmOrr;
   1761       no_output_opcode = kArmOrr;
   1762       break;
   1763     case IrOpcode::kWord32Xor:
   1764       binop_opcode = kArmEor;
   1765       no_output_opcode = kArmTeq;
   1766       break;
   1767     default:
   1768       UNREACHABLE();
   1769       return;
   1770   }
   1771   if (selector->CanCover(*node, binop)) {
   1772     // The comparison is the only user of {node}.
   1773     cont->Overwrite(MapForFlagSettingBinop(cond));
   1774     *opcode = no_output_opcode;
   1775     *node = binop;
   1776   } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
   1777     // We can also handle the case where the {node} and the comparison are in
   1778     // the same basic block, and the comparison is the only user of {node} in
   1779     // this basic block ({node} has users in other basic blocks).
   1780     cont->Overwrite(MapForFlagSettingBinop(cond));
   1781     *opcode = binop_opcode;
   1782     *node = binop;
   1783   }
   1784 }
   1785 
   1786 // Shared routine for multiple word compare operations.
   1787 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1788                       InstructionCode opcode, FlagsContinuation* cont) {
   1789   ArmOperandGenerator g(selector);
   1790   Int32BinopMatcher m(node);
   1791   InstructionOperand inputs[5];
   1792   size_t input_count = 0;
   1793   InstructionOperand outputs[2];
   1794   size_t output_count = 0;
   1795   bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
   1796                     (opcode != kArmTst) && (opcode != kArmTeq);
   1797 
   1798   if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
   1799                                &input_count, &inputs[1])) {
   1800     inputs[0] = g.UseRegister(m.left().node());
   1801     input_count++;
   1802   } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
   1803                                       &input_count, &inputs[1])) {
   1804     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1805     inputs[0] = g.UseRegister(m.right().node());
   1806     input_count++;
   1807   } else {
   1808     opcode |= AddressingModeField::encode(kMode_Operand2_R);
   1809     inputs[input_count++] = g.UseRegister(m.left().node());
   1810     inputs[input_count++] = g.UseRegister(m.right().node());
   1811   }
   1812 
   1813   if (has_result) {
   1814     if (cont->IsDeoptimize()) {
   1815       // If we can deoptimize as a result of the binop, we need to make sure
   1816       // that the deopt inputs are not overwritten by the binop result. One way
   1817       // to achieve that is to declare the output register as same-as-first.
   1818       outputs[output_count++] = g.DefineSameAsFirst(node);
   1819     } else {
   1820       outputs[output_count++] = g.DefineAsRegister(node);
   1821     }
   1822   }
   1823 
   1824   if (cont->IsBranch()) {
   1825     inputs[input_count++] = g.Label(cont->true_block());
   1826     inputs[input_count++] = g.Label(cont->false_block());
   1827   } else if (cont->IsSet()) {
   1828     outputs[output_count++] = g.DefineAsRegister(cont->result());
   1829   }
   1830 
   1831   DCHECK_NE(0u, input_count);
   1832   DCHECK_GE(arraysize(inputs), input_count);
   1833   DCHECK_GE(arraysize(outputs), output_count);
   1834 
   1835   opcode = cont->Encode(opcode);
   1836   if (cont->IsDeoptimize()) {
   1837     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
   1838                              cont->reason(), cont->frame_state());
   1839   } else {
   1840     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   1841   }
   1842 }
   1843 
   1844 
   1845 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1846                       FlagsContinuation* cont) {
   1847   InstructionCode opcode = kArmCmp;
   1848   Int32BinopMatcher m(node);
   1849 
   1850   FlagsCondition cond = cont->condition();
   1851   if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32Or() ||
   1852                           m.left().IsWord32And() || m.left().IsWord32Xor())) {
   1853     // Emit flag setting instructions for comparisons against zero.
   1854     if (CanUseFlagSettingBinop(cond)) {
   1855       Node* binop = m.left().node();
   1856       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   1857                                               cond, cont);
   1858     }
   1859   } else if (m.left().Is(0) &&
   1860              (m.right().IsInt32Add() || m.right().IsWord32Or() ||
   1861               m.right().IsWord32And() || m.right().IsWord32Xor())) {
   1862     // Same as above, but we need to commute the condition before we
   1863     // continue with the rest of the checks.
   1864     cond = CommuteFlagsCondition(cond);
   1865     if (CanUseFlagSettingBinop(cond)) {
   1866       Node* binop = m.right().node();
   1867       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   1868                                               cond, cont);
   1869     }
   1870   }
   1871 
   1872   VisitWordCompare(selector, node, opcode, cont);
   1873 }
   1874 
   1875 
   1876 // Shared routine for word comparisons against zero.
   1877 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   1878                           Node* value, FlagsContinuation* cont) {
   1879   // Try to combine with comparisons against 0 by simply inverting the branch.
   1880   while (value->opcode() == IrOpcode::kWord32Equal &&
   1881          selector->CanCover(user, value)) {
   1882     Int32BinopMatcher m(value);
   1883     if (!m.right().Is(0)) break;
   1884 
   1885     user = value;
   1886     value = m.left().node();
   1887     cont->Negate();
   1888   }
   1889 
   1890   if (selector->CanCover(user, value)) {
   1891     switch (value->opcode()) {
   1892       case IrOpcode::kWord32Equal:
   1893         cont->OverwriteAndNegateIfEqual(kEqual);
   1894         return VisitWordCompare(selector, value, cont);
   1895       case IrOpcode::kInt32LessThan:
   1896         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1897         return VisitWordCompare(selector, value, cont);
   1898       case IrOpcode::kInt32LessThanOrEqual:
   1899         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1900         return VisitWordCompare(selector, value, cont);
   1901       case IrOpcode::kUint32LessThan:
   1902         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1903         return VisitWordCompare(selector, value, cont);
   1904       case IrOpcode::kUint32LessThanOrEqual:
   1905         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1906         return VisitWordCompare(selector, value, cont);
   1907       case IrOpcode::kFloat32Equal:
   1908         cont->OverwriteAndNegateIfEqual(kEqual);
   1909         return VisitFloat32Compare(selector, value, cont);
   1910       case IrOpcode::kFloat32LessThan:
   1911         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   1912         return VisitFloat32Compare(selector, value, cont);
   1913       case IrOpcode::kFloat32LessThanOrEqual:
   1914         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   1915         return VisitFloat32Compare(selector, value, cont);
   1916       case IrOpcode::kFloat64Equal:
   1917         cont->OverwriteAndNegateIfEqual(kEqual);
   1918         return VisitFloat64Compare(selector, value, cont);
   1919       case IrOpcode::kFloat64LessThan:
   1920         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   1921         return VisitFloat64Compare(selector, value, cont);
   1922       case IrOpcode::kFloat64LessThanOrEqual:
   1923         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   1924         return VisitFloat64Compare(selector, value, cont);
   1925       case IrOpcode::kProjection:
   1926         // Check if this is the overflow output projection of an
   1927         // <Operation>WithOverflow node.
   1928         if (ProjectionIndexOf(value->op()) == 1u) {
   1929           // We cannot combine the <Operation>WithOverflow with this branch
   1930           // unless the 0th projection (the use of the actual value of the
   1931           // <Operation> is either nullptr, which means there's no use of the
   1932           // actual value, or was already defined, which means it is scheduled
   1933           // *AFTER* this branch).
   1934           Node* const node = value->InputAt(0);
   1935           Node* const result = NodeProperties::FindProjection(node, 0);
   1936           if (!result || selector->IsDefined(result)) {
   1937             switch (node->opcode()) {
   1938               case IrOpcode::kInt32AddWithOverflow:
   1939                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1940                 return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
   1941               case IrOpcode::kInt32SubWithOverflow:
   1942                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1943                 return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
   1944               case IrOpcode::kInt32MulWithOverflow:
   1945                 // ARM doesn't set the overflow flag for multiplication, so we
   1946                 // need to test on kNotEqual. Here is the code sequence used:
   1947                 //   smull resultlow, resulthigh, left, right
   1948                 //   cmp resulthigh, Operand(resultlow, ASR, 31)
   1949                 cont->OverwriteAndNegateIfEqual(kNotEqual);
   1950                 return EmitInt32MulWithOverflow(selector, node, cont);
   1951               default:
   1952                 break;
   1953             }
   1954           }
   1955         }
   1956         break;
   1957       case IrOpcode::kInt32Add:
   1958         return VisitWordCompare(selector, value, kArmCmn, cont);
   1959       case IrOpcode::kInt32Sub:
   1960         return VisitWordCompare(selector, value, kArmCmp, cont);
   1961       case IrOpcode::kWord32And:
   1962         return VisitWordCompare(selector, value, kArmTst, cont);
   1963       case IrOpcode::kWord32Or:
   1964         return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
   1965       case IrOpcode::kWord32Xor:
   1966         return VisitWordCompare(selector, value, kArmTeq, cont);
   1967       case IrOpcode::kWord32Sar:
   1968         return VisitShift(selector, value, TryMatchASR, cont);
   1969       case IrOpcode::kWord32Shl:
   1970         return VisitShift(selector, value, TryMatchLSL, cont);
   1971       case IrOpcode::kWord32Shr:
   1972         return VisitShift(selector, value, TryMatchLSR, cont);
   1973       case IrOpcode::kWord32Ror:
   1974         return VisitShift(selector, value, TryMatchROR, cont);
   1975       default:
   1976         break;
   1977     }
   1978   }
   1979 
   1980   if (user->opcode() == IrOpcode::kWord32Equal) {
   1981     return VisitWordCompare(selector, user, cont);
   1982   }
   1983 
   1984   // Continuation could not be combined with a compare, emit compare against 0.
   1985   ArmOperandGenerator g(selector);
   1986   InstructionCode const opcode =
   1987       cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
   1988   InstructionOperand const value_operand = g.UseRegister(value);
   1989   if (cont->IsBranch()) {
   1990     selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
   1991                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1992   } else if (cont->IsDeoptimize()) {
   1993     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
   1994                              cont->reason(), cont->frame_state());
   1995   } else {
   1996     DCHECK(cont->IsSet());
   1997     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
   1998                    value_operand);
   1999   }
   2000 }
   2001 
   2002 }  // namespace
   2003 
   2004 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   2005                                       BasicBlock* fbranch) {
   2006   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   2007   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   2008 }
   2009 
   2010 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   2011   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2012       kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   2013   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2014 }
   2015 
   2016 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   2017   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2018       kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
   2019   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2020 }
   2021 
   2022 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   2023   ArmOperandGenerator g(this);
   2024   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   2025 
   2026   // Emit either ArchTableSwitch or ArchLookupSwitch.
   2027   size_t table_space_cost = 4 + sw.value_range;
   2028   size_t table_time_cost = 3;
   2029   size_t lookup_space_cost = 3 + 2 * sw.case_count;
   2030   size_t lookup_time_cost = sw.case_count;
   2031   if (sw.case_count > 0 &&
   2032       table_space_cost + 3 * table_time_cost <=
   2033           lookup_space_cost + 3 * lookup_time_cost &&
   2034       sw.min_value > std::numeric_limits<int32_t>::min()) {
   2035     InstructionOperand index_operand = value_operand;
   2036     if (sw.min_value) {
   2037       index_operand = g.TempRegister();
   2038       Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
   2039            index_operand, value_operand, g.TempImmediate(sw.min_value));
   2040     }
   2041     // Generate a table lookup.
   2042     return EmitTableSwitch(sw, index_operand);
   2043   }
   2044 
   2045   // Generate a sequence of conditional jumps.
   2046   return EmitLookupSwitch(sw, value_operand);
   2047 }
   2048 
   2049 
   2050 void InstructionSelector::VisitWord32Equal(Node* const node) {
   2051   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2052   Int32BinopMatcher m(node);
   2053   if (m.right().Is(0)) {
   2054     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   2055   }
   2056   VisitWordCompare(this, node, &cont);
   2057 }
   2058 
   2059 
   2060 void InstructionSelector::VisitInt32LessThan(Node* node) {
   2061   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2062   VisitWordCompare(this, node, &cont);
   2063 }
   2064 
   2065 
   2066 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   2067   FlagsContinuation cont =
   2068       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2069   VisitWordCompare(this, node, &cont);
   2070 }
   2071 
   2072 
   2073 void InstructionSelector::VisitUint32LessThan(Node* node) {
   2074   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2075   VisitWordCompare(this, node, &cont);
   2076 }
   2077 
   2078 
   2079 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   2080   FlagsContinuation cont =
   2081       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2082   VisitWordCompare(this, node, &cont);
   2083 }
   2084 
   2085 
   2086 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   2087   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2088     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2089     return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   2090   }
   2091   FlagsContinuation cont;
   2092   VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   2093 }
   2094 
   2095 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   2096   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2097     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2098     return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   2099   }
   2100   FlagsContinuation cont;
   2101   VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   2102 }
   2103 
   2104 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   2105   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2106     // ARM doesn't set the overflow flag for multiplication, so we need to test
   2107     // on kNotEqual. Here is the code sequence used:
   2108     //   smull resultlow, resulthigh, left, right
   2109     //   cmp resulthigh, Operand(resultlow, ASR, 31)
   2110     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
   2111     return EmitInt32MulWithOverflow(this, node, &cont);
   2112   }
   2113   FlagsContinuation cont;
   2114   EmitInt32MulWithOverflow(this, node, &cont);
   2115 }
   2116 
   2117 void InstructionSelector::VisitFloat32Equal(Node* node) {
   2118   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2119   VisitFloat32Compare(this, node, &cont);
   2120 }
   2121 
   2122 
   2123 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   2124   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2125   VisitFloat32Compare(this, node, &cont);
   2126 }
   2127 
   2128 
   2129 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   2130   FlagsContinuation cont =
   2131       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2132   VisitFloat32Compare(this, node, &cont);
   2133 }
   2134 
   2135 
   2136 void InstructionSelector::VisitFloat64Equal(Node* node) {
   2137   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2138   VisitFloat64Compare(this, node, &cont);
   2139 }
   2140 
   2141 
   2142 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   2143   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2144   VisitFloat64Compare(this, node, &cont);
   2145 }
   2146 
   2147 
   2148 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   2149   FlagsContinuation cont =
   2150       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2151   VisitFloat64Compare(this, node, &cont);
   2152 }
   2153 
   2154 
   2155 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   2156   VisitRR(this, kArmVmovLowU32F64, node);
   2157 }
   2158 
   2159 
   2160 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
   2161   VisitRR(this, kArmVmovHighU32F64, node);
   2162 }
   2163 
   2164 
   2165 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   2166   ArmOperandGenerator g(this);
   2167   Node* left = node->InputAt(0);
   2168   Node* right = node->InputAt(1);
   2169   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
   2170       CanCover(node, left)) {
   2171     left = left->InputAt(1);
   2172     Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
   2173          g.UseRegister(left));
   2174     return;
   2175   }
   2176   Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
   2177        g.UseRegister(right));
   2178 }
   2179 
   2180 
   2181 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   2182   ArmOperandGenerator g(this);
   2183   Node* left = node->InputAt(0);
   2184   Node* right = node->InputAt(1);
   2185   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
   2186       CanCover(node, left)) {
   2187     left = left->InputAt(1);
   2188     Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
   2189          g.UseRegister(right));
   2190     return;
   2191   }
   2192   Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
   2193        g.UseRegister(right));
   2194 }
   2195 
   2196 void InstructionSelector::VisitAtomicLoad(Node* node) {
   2197   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   2198   ArmOperandGenerator g(this);
   2199   Node* base = node->InputAt(0);
   2200   Node* index = node->InputAt(1);
   2201   ArchOpcode opcode = kArchNop;
   2202   switch (load_rep.representation()) {
   2203     case MachineRepresentation::kWord8:
   2204       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
   2205       break;
   2206     case MachineRepresentation::kWord16:
   2207       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
   2208       break;
   2209     case MachineRepresentation::kWord32:
   2210       opcode = kAtomicLoadWord32;
   2211       break;
   2212     default:
   2213       UNREACHABLE();
   2214       return;
   2215   }
   2216   Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
   2217        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
   2218 }
   2219 
   2220 void InstructionSelector::VisitAtomicStore(Node* node) {
   2221   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   2222   ArmOperandGenerator g(this);
   2223   Node* base = node->InputAt(0);
   2224   Node* index = node->InputAt(1);
   2225   Node* value = node->InputAt(2);
   2226   ArchOpcode opcode = kArchNop;
   2227   switch (rep) {
   2228     case MachineRepresentation::kWord8:
   2229       opcode = kAtomicStoreWord8;
   2230       break;
   2231     case MachineRepresentation::kWord16:
   2232       opcode = kAtomicStoreWord16;
   2233       break;
   2234     case MachineRepresentation::kWord32:
   2235       opcode = kAtomicStoreWord32;
   2236       break;
   2237     default:
   2238       UNREACHABLE();
   2239       return;
   2240   }
   2241 
   2242   AddressingMode addressing_mode = kMode_Offset_RR;
   2243   InstructionOperand inputs[4];
   2244   size_t input_count = 0;
   2245   inputs[input_count++] = g.UseUniqueRegister(base);
   2246   inputs[input_count++] = g.UseUniqueRegister(index);
   2247   inputs[input_count++] = g.UseUniqueRegister(value);
   2248   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2249   Emit(code, 0, nullptr, input_count, inputs);
   2250 }
   2251 
   2252 // static
   2253 MachineOperatorBuilder::Flags
   2254 InstructionSelector::SupportedMachineOperatorFlags() {
   2255   MachineOperatorBuilder::Flags flags;
   2256   if (CpuFeatures::IsSupported(SUDIV)) {
   2257     // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
   2258     // but the fall-back implementation does not.
   2259     flags |= MachineOperatorBuilder::kInt32DivIsSafe |
   2260              MachineOperatorBuilder::kUint32DivIsSafe;
   2261   }
   2262   if (CpuFeatures::IsSupported(ARMv7)) {
   2263     flags |= MachineOperatorBuilder::kWord32ReverseBits;
   2264   }
   2265   if (CpuFeatures::IsSupported(ARMv8)) {
   2266     flags |= MachineOperatorBuilder::kFloat32RoundDown |
   2267              MachineOperatorBuilder::kFloat64RoundDown |
   2268              MachineOperatorBuilder::kFloat32RoundUp |
   2269              MachineOperatorBuilder::kFloat64RoundUp |
   2270              MachineOperatorBuilder::kFloat32RoundTruncate |
   2271              MachineOperatorBuilder::kFloat64RoundTruncate |
   2272              MachineOperatorBuilder::kFloat64RoundTiesAway |
   2273              MachineOperatorBuilder::kFloat32RoundTiesEven |
   2274              MachineOperatorBuilder::kFloat64RoundTiesEven;
   2275   }
   2276   return flags;
   2277 }
   2278 
   2279 // static
   2280 MachineOperatorBuilder::AlignmentRequirements
   2281 InstructionSelector::AlignmentRequirements() {
   2282   Vector<MachineType> req_aligned = Vector<MachineType>::New(2);
   2283   req_aligned[0] = MachineType::Float32();
   2284   req_aligned[1] = MachineType::Float64();
   2285   return MachineOperatorBuilder::AlignmentRequirements::
   2286       SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
   2287 }
   2288 
   2289 }  // namespace compiler
   2290 }  // namespace internal
   2291 }  // namespace v8
   2292