Home | History | Annotate | Download | only in arm64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/instruction-selector-impl.h"
      6 #include "src/compiler/node-matchers.h"
      7 #include "src/compiler/node-properties.h"
      8 
      9 namespace v8 {
     10 namespace internal {
     11 namespace compiler {
     12 
     13 enum ImmediateMode {
     14   kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
     15   kShift32Imm,     // 0 - 31
     16   kShift64Imm,     // 0 - 63
     17   kLogical32Imm,
     18   kLogical64Imm,
     19   kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
     20   kLoadStoreImm16,
     21   kLoadStoreImm32,
     22   kLoadStoreImm64,
     23   kNoImmediate
     24 };
     25 
     26 
     27 // Adds Arm64-specific methods for generating operands.
     28 class Arm64OperandGenerator final : public OperandGenerator {
     29  public:
     30   explicit Arm64OperandGenerator(InstructionSelector* selector)
     31       : OperandGenerator(selector) {}
     32 
     33   InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
     34     if (CanBeImmediate(node, mode)) {
     35       return UseImmediate(node);
     36     }
     37     return UseRegister(node);
     38   }
     39 
     40   // Use the zero register if the node has the immediate value zero, otherwise
     41   // assign a register.
     42   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
     43     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
     44         (IsFloatConstant(node) &&
     45          (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
     46       return UseImmediate(node);
     47     }
     48     return UseRegister(node);
     49   }
     50 
     51   // Use the provided node if it has the required value, or create a
     52   // TempImmediate otherwise.
     53   InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
     54     if (GetIntegerConstantValue(node) == value) {
     55       return UseImmediate(node);
     56     }
     57     return TempImmediate(value);
     58   }
     59 
     60   bool IsIntegerConstant(Node* node) {
     61     return (node->opcode() == IrOpcode::kInt32Constant) ||
     62            (node->opcode() == IrOpcode::kInt64Constant);
     63   }
     64 
     65   int64_t GetIntegerConstantValue(Node* node) {
     66     if (node->opcode() == IrOpcode::kInt32Constant) {
     67       return OpParameter<int32_t>(node);
     68     }
     69     DCHECK(node->opcode() == IrOpcode::kInt64Constant);
     70     return OpParameter<int64_t>(node);
     71   }
     72 
     73   bool IsFloatConstant(Node* node) {
     74     return (node->opcode() == IrOpcode::kFloat32Constant) ||
     75            (node->opcode() == IrOpcode::kFloat64Constant);
     76   }
     77 
     78   double GetFloatConstantValue(Node* node) {
     79     if (node->opcode() == IrOpcode::kFloat32Constant) {
     80       return OpParameter<float>(node);
     81     }
     82     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
     83     return OpParameter<double>(node);
     84   }
     85 
     86   bool CanBeImmediate(Node* node, ImmediateMode mode) {
     87     return IsIntegerConstant(node) &&
     88            CanBeImmediate(GetIntegerConstantValue(node), mode);
     89   }
     90 
     91   bool CanBeImmediate(int64_t value, ImmediateMode mode) {
     92     unsigned ignored;
     93     switch (mode) {
     94       case kLogical32Imm:
     95         // TODO(dcarney): some unencodable values can be handled by
     96         // switching instructions.
     97         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
     98                                        &ignored, &ignored, &ignored);
     99       case kLogical64Imm:
    100         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
    101                                        &ignored, &ignored, &ignored);
    102       case kArithmeticImm:
    103         return Assembler::IsImmAddSub(value);
    104       case kLoadStoreImm8:
    105         return IsLoadStoreImmediate(value, LSByte);
    106       case kLoadStoreImm16:
    107         return IsLoadStoreImmediate(value, LSHalfword);
    108       case kLoadStoreImm32:
    109         return IsLoadStoreImmediate(value, LSWord);
    110       case kLoadStoreImm64:
    111         return IsLoadStoreImmediate(value, LSDoubleWord);
    112       case kNoImmediate:
    113         return false;
    114       case kShift32Imm:  // Fall through.
    115       case kShift64Imm:
    116         // Shift operations only observe the bottom 5 or 6 bits of the value.
    117         // All possible shifts can be encoded by discarding bits which have no
    118         // effect.
    119         return true;
    120     }
    121     return false;
    122   }
    123 
    124   bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
    125     // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
    126     DCHECK_GT(MachineRepresentation::kSimd128, rep);
    127     return IsIntegerConstant(node) &&
    128            (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
    129   }
    130 
    131  private:
    132   bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
    133     return Assembler::IsImmLSScaled(value, size) ||
    134            Assembler::IsImmLSUnscaled(value);
    135   }
    136 };
    137 
    138 
    139 namespace {
    140 
    141 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
    142   Arm64OperandGenerator g(selector);
    143   selector->Emit(opcode, g.DefineAsRegister(node),
    144                  g.UseRegister(node->InputAt(0)));
    145 }
    146 
    147 
    148 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
    149   Arm64OperandGenerator g(selector);
    150   selector->Emit(opcode, g.DefineAsRegister(node),
    151                  g.UseRegister(node->InputAt(0)),
    152                  g.UseRegister(node->InputAt(1)));
    153 }
    154 
    155 
    156 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
    157               ImmediateMode operand_mode) {
    158   Arm64OperandGenerator g(selector);
    159   selector->Emit(opcode, g.DefineAsRegister(node),
    160                  g.UseRegister(node->InputAt(0)),
    161                  g.UseOperand(node->InputAt(1), operand_mode));
    162 }
    163 
    164 struct ExtendingLoadMatcher {
    165   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
    166       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
    167     Initialize(node);
    168   }
    169 
    170   bool Matches() const { return matches_; }
    171 
    172   Node* base() const {
    173     DCHECK(Matches());
    174     return base_;
    175   }
    176   int64_t immediate() const {
    177     DCHECK(Matches());
    178     return immediate_;
    179   }
    180   ArchOpcode opcode() const {
    181     DCHECK(Matches());
    182     return opcode_;
    183   }
    184 
    185  private:
    186   bool matches_;
    187   InstructionSelector* selector_;
    188   Node* base_;
    189   int64_t immediate_;
    190   ArchOpcode opcode_;
    191 
    192   void Initialize(Node* node) {
    193     Int64BinopMatcher m(node);
    194     // When loading a 64-bit value and shifting by 32, we should
    195     // just load and sign-extend the interesting 4 bytes instead.
    196     // This happens, for example, when we're loading and untagging SMIs.
    197     DCHECK(m.IsWord64Sar());
    198     if (m.left().IsLoad() && m.right().Is(32) &&
    199         selector_->CanCover(m.node(), m.left().node())) {
    200       Arm64OperandGenerator g(selector_);
    201       Node* load = m.left().node();
    202       Node* offset = load->InputAt(1);
    203       base_ = load->InputAt(0);
    204       opcode_ = kArm64Ldrsw;
    205       if (g.IsIntegerConstant(offset)) {
    206         immediate_ = g.GetIntegerConstantValue(offset) + 4;
    207         matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32);
    208       }
    209     }
    210   }
    211 };
    212 
    213 bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) {
    214   ExtendingLoadMatcher m(node, selector);
    215   return m.Matches();
    216 }
    217 
    218 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
    219   ExtendingLoadMatcher m(node, selector);
    220   Arm64OperandGenerator g(selector);
    221   if (m.Matches()) {
    222     InstructionOperand inputs[2];
    223     inputs[0] = g.UseRegister(m.base());
    224     InstructionCode opcode =
    225         m.opcode() | AddressingModeField::encode(kMode_MRI);
    226     DCHECK(is_int32(m.immediate()));
    227     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
    228     InstructionOperand outputs[] = {g.DefineAsRegister(node)};
    229     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
    230                    inputs);
    231     return true;
    232   }
    233   return false;
    234 }
    235 
    236 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
    237                       Node* input_node, InstructionCode* opcode, bool try_ror) {
    238   Arm64OperandGenerator g(selector);
    239 
    240   if (!selector->CanCover(node, input_node)) return false;
    241   if (input_node->InputCount() != 2) return false;
    242   if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
    243 
    244   switch (input_node->opcode()) {
    245     case IrOpcode::kWord32Shl:
    246     case IrOpcode::kWord64Shl:
    247       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
    248       return true;
    249     case IrOpcode::kWord32Shr:
    250     case IrOpcode::kWord64Shr:
    251       *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
    252       return true;
    253     case IrOpcode::kWord32Sar:
    254       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
    255       return true;
    256     case IrOpcode::kWord64Sar:
    257       if (TryMatchExtendingLoad(selector, input_node)) return false;
    258       *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
    259       return true;
    260     case IrOpcode::kWord32Ror:
    261     case IrOpcode::kWord64Ror:
    262       if (try_ror) {
    263         *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
    264         return true;
    265       }
    266       return false;
    267     default:
    268       return false;
    269   }
    270 }
    271 
    272 
    273 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
    274                        Node* node, Node* left_node, Node* right_node,
    275                        InstructionOperand* left_op,
    276                        InstructionOperand* right_op, InstructionCode* opcode) {
    277   if (!selector->CanCover(node, right_node)) return false;
    278 
    279   NodeMatcher nm(right_node);
    280 
    281   if (nm.IsWord32And()) {
    282     Int32BinopMatcher mright(right_node);
    283     if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
    284       int32_t mask = mright.right().Value();
    285       *left_op = g->UseRegister(left_node);
    286       *right_op = g->UseRegister(mright.left().node());
    287       *opcode |= AddressingModeField::encode(
    288           (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
    289       return true;
    290     }
    291   } else if (nm.IsWord32Sar()) {
    292     Int32BinopMatcher mright(right_node);
    293     if (selector->CanCover(mright.node(), mright.left().node()) &&
    294         mright.left().IsWord32Shl()) {
    295       Int32BinopMatcher mleft_of_right(mright.left().node());
    296       if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
    297           (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
    298         int32_t shift = mright.right().Value();
    299         *left_op = g->UseRegister(left_node);
    300         *right_op = g->UseRegister(mleft_of_right.left().node());
    301         *opcode |= AddressingModeField::encode(
    302             (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
    303         return true;
    304       }
    305     }
    306   }
    307   return false;
    308 }
    309 
    310 bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
    311                             InstructionSelector* selector,
    312                             MachineRepresentation rep, Node* node, Node* index,
    313                             InstructionOperand* index_op,
    314                             InstructionOperand* shift_immediate_op) {
    315   if (!selector->CanCover(node, index)) return false;
    316   if (index->InputCount() != 2) return false;
    317   Node* left = index->InputAt(0);
    318   Node* right = index->InputAt(1);
    319   switch (index->opcode()) {
    320     case IrOpcode::kWord32Shl:
    321     case IrOpcode::kWord64Shl:
    322       if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
    323         return false;
    324       }
    325       *index_op = g->UseRegister(left);
    326       *shift_immediate_op = g->UseImmediate(right);
    327       return true;
    328     default:
    329       return false;
    330   }
    331 }
    332 
    333 // Bitfields describing binary operator properties:
    334 // CanCommuteField is true if we can switch the two operands, potentially
    335 // requiring commuting the flags continuation condition.
    336 typedef BitField8<bool, 1, 1> CanCommuteField;
    337 // MustCommuteCondField is true when we need to commute the flags continuation
    338 // condition in order to switch the operands.
    339 typedef BitField8<bool, 2, 1> MustCommuteCondField;
    340 // IsComparisonField is true when the operation is a comparison and has no other
    341 // result other than the condition.
    342 typedef BitField8<bool, 3, 1> IsComparisonField;
    343 // IsAddSubField is true when an instruction is encoded as ADD or SUB.
    344 typedef BitField8<bool, 4, 1> IsAddSubField;
    345 
    346 // Get properties of a binary operator.
    347 uint8_t GetBinopProperties(InstructionCode opcode) {
    348   uint8_t result = 0;
    349   switch (opcode) {
    350     case kArm64Cmp32:
    351     case kArm64Cmp:
    352       // We can commute CMP by switching the inputs and commuting
    353       // the flags continuation.
    354       result = CanCommuteField::update(result, true);
    355       result = MustCommuteCondField::update(result, true);
    356       result = IsComparisonField::update(result, true);
    357       // The CMP and CMN instructions are encoded as SUB or ADD
    358       // with zero output register, and therefore support the same
    359       // operand modes.
    360       result = IsAddSubField::update(result, true);
    361       break;
    362     case kArm64Cmn32:
    363     case kArm64Cmn:
    364       result = CanCommuteField::update(result, true);
    365       result = IsComparisonField::update(result, true);
    366       result = IsAddSubField::update(result, true);
    367       break;
    368     case kArm64Add32:
    369     case kArm64Add:
    370       result = CanCommuteField::update(result, true);
    371       result = IsAddSubField::update(result, true);
    372       break;
    373     case kArm64Sub32:
    374     case kArm64Sub:
    375       result = IsAddSubField::update(result, true);
    376       break;
    377     case kArm64Tst32:
    378     case kArm64Tst:
    379       result = CanCommuteField::update(result, true);
    380       result = IsComparisonField::update(result, true);
    381       break;
    382     case kArm64And32:
    383     case kArm64And:
    384     case kArm64Or32:
    385     case kArm64Or:
    386     case kArm64Eor32:
    387     case kArm64Eor:
    388       result = CanCommuteField::update(result, true);
    389       break;
    390     default:
    391       UNREACHABLE();
    392       return 0;
    393   }
    394   DCHECK_IMPLIES(MustCommuteCondField::decode(result),
    395                  CanCommuteField::decode(result));
    396   return result;
    397 }
    398 
    399 // Shared routine for multiple binary operations.
    400 template <typename Matcher>
    401 void VisitBinop(InstructionSelector* selector, Node* node,
    402                 InstructionCode opcode, ImmediateMode operand_mode,
    403                 FlagsContinuation* cont) {
    404   Arm64OperandGenerator g(selector);
    405   InstructionOperand inputs[5];
    406   size_t input_count = 0;
    407   InstructionOperand outputs[2];
    408   size_t output_count = 0;
    409 
    410   Node* left_node = node->InputAt(0);
    411   Node* right_node = node->InputAt(1);
    412 
    413   uint8_t properties = GetBinopProperties(opcode);
    414   bool can_commute = CanCommuteField::decode(properties);
    415   bool must_commute_cond = MustCommuteCondField::decode(properties);
    416   bool is_add_sub = IsAddSubField::decode(properties);
    417 
    418   if (g.CanBeImmediate(right_node, operand_mode)) {
    419     inputs[input_count++] = g.UseRegister(left_node);
    420     inputs[input_count++] = g.UseImmediate(right_node);
    421   } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
    422     if (must_commute_cond) cont->Commute();
    423     inputs[input_count++] = g.UseRegister(right_node);
    424     inputs[input_count++] = g.UseImmediate(left_node);
    425   } else if (is_add_sub &&
    426              TryMatchAnyExtend(&g, selector, node, left_node, right_node,
    427                                &inputs[0], &inputs[1], &opcode)) {
    428     input_count += 2;
    429   } else if (is_add_sub && can_commute &&
    430              TryMatchAnyExtend(&g, selector, node, right_node, left_node,
    431                                &inputs[0], &inputs[1], &opcode)) {
    432     if (must_commute_cond) cont->Commute();
    433     input_count += 2;
    434   } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
    435                               !is_add_sub)) {
    436     Matcher m_shift(right_node);
    437     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
    438     inputs[input_count++] = g.UseRegister(m_shift.left().node());
    439     // We only need at most the last 6 bits of the shift.
    440     inputs[input_count++] =
    441         g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
    442   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
    443                                              !is_add_sub)) {
    444     if (must_commute_cond) cont->Commute();
    445     Matcher m_shift(left_node);
    446     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
    447     inputs[input_count++] = g.UseRegister(m_shift.left().node());
    448     // We only need at most the last 6 bits of the shift.
    449     inputs[input_count++] =
    450         g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
    451   } else {
    452     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
    453     inputs[input_count++] = g.UseRegister(right_node);
    454   }
    455 
    456   if (cont->IsBranch()) {
    457     inputs[input_count++] = g.Label(cont->true_block());
    458     inputs[input_count++] = g.Label(cont->false_block());
    459   }
    460 
    461   if (!IsComparisonField::decode(properties)) {
    462     outputs[output_count++] = g.DefineAsRegister(node);
    463   }
    464 
    465   if (cont->IsSet()) {
    466     outputs[output_count++] = g.DefineAsRegister(cont->result());
    467   }
    468 
    469   DCHECK_NE(0u, input_count);
    470   DCHECK((output_count != 0) || IsComparisonField::decode(properties));
    471   DCHECK_GE(arraysize(inputs), input_count);
    472   DCHECK_GE(arraysize(outputs), output_count);
    473 
    474   opcode = cont->Encode(opcode);
    475   if (cont->IsDeoptimize()) {
    476     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    477                              cont->kind(), cont->reason(), cont->frame_state());
    478   } else if (cont->IsTrap()) {
    479     inputs[input_count++] = g.UseImmediate(cont->trap_id());
    480     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    481   } else {
    482     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    483   }
    484 }
    485 
    486 
    487 // Shared routine for multiple binary operations.
    488 template <typename Matcher>
    489 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
    490                 ImmediateMode operand_mode) {
    491   FlagsContinuation cont;
    492   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
    493 }
    494 
    495 
    496 template <typename Matcher>
    497 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
    498                  ArchOpcode negate_opcode) {
    499   Arm64OperandGenerator g(selector);
    500   Matcher m(node);
    501   if (m.right().HasValue() && (m.right().Value() < 0) &&
    502       g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
    503     selector->Emit(negate_opcode, g.DefineAsRegister(node),
    504                    g.UseRegister(m.left().node()),
    505                    g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
    506   } else {
    507     VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
    508   }
    509 }
    510 
    511 
    512 // For multiplications by immediate of the form x * (2^k + 1), where k > 0,
    513 // return the value of k, otherwise return zero. This is used to reduce the
    514 // multiplication to addition with left shift: x + (x << k).
    515 template <typename Matcher>
    516 int32_t LeftShiftForReducedMultiply(Matcher* m) {
    517   DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
    518   if (m->right().HasValue() && m->right().Value() >= 3) {
    519     uint64_t value_minus_one = m->right().Value() - 1;
    520     if (base::bits::IsPowerOfTwo64(value_minus_one)) {
    521       return WhichPowerOf2_64(value_minus_one);
    522     }
    523   }
    524   return 0;
    525 }
    526 
    527 }  // namespace
    528 
    529 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
    530               ImmediateMode immediate_mode, MachineRepresentation rep,
    531               Node* output = nullptr) {
    532   Arm64OperandGenerator g(selector);
    533   Node* base = node->InputAt(0);
    534   Node* index = node->InputAt(1);
    535   InstructionOperand inputs[3];
    536   size_t input_count = 0;
    537   InstructionOperand outputs[1];
    538 
    539   // If output is not nullptr, use that as the output register. This
    540   // is used when we merge a conversion into the load.
    541   outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
    542   inputs[0] = g.UseRegister(base);
    543 
    544   if (g.CanBeImmediate(index, immediate_mode)) {
    545     input_count = 2;
    546     inputs[1] = g.UseImmediate(index);
    547     opcode |= AddressingModeField::encode(kMode_MRI);
    548   } else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
    549                                     &inputs[2])) {
    550     input_count = 3;
    551     opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
    552   } else {
    553     input_count = 2;
    554     inputs[1] = g.UseRegister(index);
    555     opcode |= AddressingModeField::encode(kMode_MRR);
    556   }
    557 
    558   selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
    559 }
    560 
    561 void InstructionSelector::VisitLoad(Node* node) {
    562   InstructionCode opcode = kArchNop;
    563   ImmediateMode immediate_mode = kNoImmediate;
    564   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    565   MachineRepresentation rep = load_rep.representation();
    566   switch (rep) {
    567     case MachineRepresentation::kFloat32:
    568       opcode = kArm64LdrS;
    569       immediate_mode = kLoadStoreImm32;
    570       break;
    571     case MachineRepresentation::kFloat64:
    572       opcode = kArm64LdrD;
    573       immediate_mode = kLoadStoreImm64;
    574       break;
    575     case MachineRepresentation::kBit:  // Fall through.
    576     case MachineRepresentation::kWord8:
    577       opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
    578       immediate_mode = kLoadStoreImm8;
    579       break;
    580     case MachineRepresentation::kWord16:
    581       opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
    582       immediate_mode = kLoadStoreImm16;
    583       break;
    584     case MachineRepresentation::kWord32:
    585       opcode = kArm64LdrW;
    586       immediate_mode = kLoadStoreImm32;
    587       break;
    588     case MachineRepresentation::kTaggedSigned:   // Fall through.
    589     case MachineRepresentation::kTaggedPointer:  // Fall through.
    590     case MachineRepresentation::kTagged:  // Fall through.
    591     case MachineRepresentation::kWord64:
    592       opcode = kArm64Ldr;
    593       immediate_mode = kLoadStoreImm64;
    594       break;
    595     case MachineRepresentation::kSimd128:  // Fall through.
    596     case MachineRepresentation::kSimd1x4:  // Fall through.
    597     case MachineRepresentation::kSimd1x8:  // Fall through.
    598     case MachineRepresentation::kSimd1x16:  // Fall through.
    599     case MachineRepresentation::kNone:
    600       UNREACHABLE();
    601       return;
    602   }
    603   EmitLoad(this, node, opcode, immediate_mode, rep);
    604 }
    605 
    606 void InstructionSelector::VisitProtectedLoad(Node* node) {
    607   // TODO(eholk)
    608   UNIMPLEMENTED();
    609 }
    610 
    611 void InstructionSelector::VisitStore(Node* node) {
    612   Arm64OperandGenerator g(this);
    613   Node* base = node->InputAt(0);
    614   Node* index = node->InputAt(1);
    615   Node* value = node->InputAt(2);
    616 
    617   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    618   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    619   MachineRepresentation rep = store_rep.representation();
    620 
    621   // TODO(arm64): I guess this could be done in a better way.
    622   if (write_barrier_kind != kNoWriteBarrier) {
    623     DCHECK(CanBeTaggedPointer(rep));
    624     AddressingMode addressing_mode;
    625     InstructionOperand inputs[3];
    626     size_t input_count = 0;
    627     inputs[input_count++] = g.UseUniqueRegister(base);
    628     // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
    629     // must check kArithmeticImm as well as kLoadStoreImm64.
    630     if (g.CanBeImmediate(index, kArithmeticImm) &&
    631         g.CanBeImmediate(index, kLoadStoreImm64)) {
    632       inputs[input_count++] = g.UseImmediate(index);
    633       addressing_mode = kMode_MRI;
    634     } else {
    635       inputs[input_count++] = g.UseUniqueRegister(index);
    636       addressing_mode = kMode_MRR;
    637     }
    638     inputs[input_count++] = g.UseUniqueRegister(value);
    639     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    640     switch (write_barrier_kind) {
    641       case kNoWriteBarrier:
    642         UNREACHABLE();
    643         break;
    644       case kMapWriteBarrier:
    645         record_write_mode = RecordWriteMode::kValueIsMap;
    646         break;
    647       case kPointerWriteBarrier:
    648         record_write_mode = RecordWriteMode::kValueIsPointer;
    649         break;
    650       case kFullWriteBarrier:
    651         record_write_mode = RecordWriteMode::kValueIsAny;
    652         break;
    653     }
    654     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    655     size_t const temp_count = arraysize(temps);
    656     InstructionCode code = kArchStoreWithWriteBarrier;
    657     code |= AddressingModeField::encode(addressing_mode);
    658     code |= MiscField::encode(static_cast<int>(record_write_mode));
    659     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    660   } else {
    661     InstructionOperand inputs[4];
    662     size_t input_count = 0;
    663     InstructionCode opcode = kArchNop;
    664     ImmediateMode immediate_mode = kNoImmediate;
    665     switch (rep) {
    666       case MachineRepresentation::kFloat32:
    667         opcode = kArm64StrS;
    668         immediate_mode = kLoadStoreImm32;
    669         break;
    670       case MachineRepresentation::kFloat64:
    671         opcode = kArm64StrD;
    672         immediate_mode = kLoadStoreImm64;
    673         break;
    674       case MachineRepresentation::kBit:  // Fall through.
    675       case MachineRepresentation::kWord8:
    676         opcode = kArm64Strb;
    677         immediate_mode = kLoadStoreImm8;
    678         break;
    679       case MachineRepresentation::kWord16:
    680         opcode = kArm64Strh;
    681         immediate_mode = kLoadStoreImm16;
    682         break;
    683       case MachineRepresentation::kWord32:
    684         opcode = kArm64StrW;
    685         immediate_mode = kLoadStoreImm32;
    686         break;
    687       case MachineRepresentation::kTaggedSigned:   // Fall through.
    688       case MachineRepresentation::kTaggedPointer:  // Fall through.
    689       case MachineRepresentation::kTagged:  // Fall through.
    690       case MachineRepresentation::kWord64:
    691         opcode = kArm64Str;
    692         immediate_mode = kLoadStoreImm64;
    693         break;
    694       case MachineRepresentation::kSimd128:  // Fall through.
    695       case MachineRepresentation::kSimd1x4:  // Fall through.
    696       case MachineRepresentation::kSimd1x8:  // Fall through.
    697       case MachineRepresentation::kSimd1x16:  // Fall through.
    698       case MachineRepresentation::kNone:
    699         UNREACHABLE();
    700         return;
    701     }
    702 
    703     inputs[0] = g.UseRegisterOrImmediateZero(value);
    704     inputs[1] = g.UseRegister(base);
    705 
    706     if (g.CanBeImmediate(index, immediate_mode)) {
    707       input_count = 3;
    708       inputs[2] = g.UseImmediate(index);
    709       opcode |= AddressingModeField::encode(kMode_MRI);
    710     } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
    711                                       &inputs[3])) {
    712       input_count = 4;
    713       opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
    714     } else {
    715       input_count = 3;
    716       inputs[2] = g.UseRegister(index);
    717       opcode |= AddressingModeField::encode(kMode_MRR);
    718     }
    719 
    720     Emit(opcode, 0, nullptr, input_count, inputs);
    721   }
    722 }
    723 
    724 void InstructionSelector::VisitProtectedStore(Node* node) {
    725   // TODO(eholk)
    726   UNIMPLEMENTED();
    727 }
    728 
    729 // Architecture supports unaligned access, therefore VisitLoad is used instead
    730 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
    731 
    732 // Architecture supports unaligned access, therefore VisitStore is used instead
    733 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
    734 
    735 void InstructionSelector::VisitCheckedLoad(Node* node) {
    736   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
    737   Arm64OperandGenerator g(this);
    738   Node* const buffer = node->InputAt(0);
    739   Node* const offset = node->InputAt(1);
    740   Node* const length = node->InputAt(2);
    741   ArchOpcode opcode = kArchNop;
    742   switch (load_rep.representation()) {
    743     case MachineRepresentation::kWord8:
    744       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
    745       break;
    746     case MachineRepresentation::kWord16:
    747       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
    748       break;
    749     case MachineRepresentation::kWord32:
    750       opcode = kCheckedLoadWord32;
    751       break;
    752     case MachineRepresentation::kWord64:
    753       opcode = kCheckedLoadWord64;
    754       break;
    755     case MachineRepresentation::kFloat32:
    756       opcode = kCheckedLoadFloat32;
    757       break;
    758     case MachineRepresentation::kFloat64:
    759       opcode = kCheckedLoadFloat64;
    760       break;
    761     case MachineRepresentation::kBit:      // Fall through.
    762     case MachineRepresentation::kTaggedSigned:   // Fall through.
    763     case MachineRepresentation::kTaggedPointer:  // Fall through.
    764     case MachineRepresentation::kTagged:   // Fall through.
    765     case MachineRepresentation::kSimd128:  // Fall through.
    766     case MachineRepresentation::kSimd1x4:  // Fall through.
    767     case MachineRepresentation::kSimd1x8:  // Fall through.
    768     case MachineRepresentation::kSimd1x16:  // Fall through.
    769     case MachineRepresentation::kNone:
    770       UNREACHABLE();
    771       return;
    772   }
    773   // If the length is a constant power of two, allow the code generator to
    774   // pick a more efficient bounds check sequence by passing the length as an
    775   // immediate.
    776   if (length->opcode() == IrOpcode::kInt32Constant) {
    777     Int32Matcher m(length);
    778     if (m.IsPowerOf2()) {
    779       Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
    780            g.UseRegister(offset), g.UseImmediate(length));
    781       return;
    782     }
    783   }
    784   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
    785        g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
    786 }
    787 
    788 
    789 void InstructionSelector::VisitCheckedStore(Node* node) {
    790   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
    791   Arm64OperandGenerator g(this);
    792   Node* const buffer = node->InputAt(0);
    793   Node* const offset = node->InputAt(1);
    794   Node* const length = node->InputAt(2);
    795   Node* const value = node->InputAt(3);
    796   ArchOpcode opcode = kArchNop;
    797   switch (rep) {
    798     case MachineRepresentation::kWord8:
    799       opcode = kCheckedStoreWord8;
    800       break;
    801     case MachineRepresentation::kWord16:
    802       opcode = kCheckedStoreWord16;
    803       break;
    804     case MachineRepresentation::kWord32:
    805       opcode = kCheckedStoreWord32;
    806       break;
    807     case MachineRepresentation::kWord64:
    808       opcode = kCheckedStoreWord64;
    809       break;
    810     case MachineRepresentation::kFloat32:
    811       opcode = kCheckedStoreFloat32;
    812       break;
    813     case MachineRepresentation::kFloat64:
    814       opcode = kCheckedStoreFloat64;
    815       break;
    816     case MachineRepresentation::kBit:      // Fall through.
    817     case MachineRepresentation::kTaggedSigned:   // Fall through.
    818     case MachineRepresentation::kTaggedPointer:  // Fall through.
    819     case MachineRepresentation::kTagged:   // Fall through.
    820     case MachineRepresentation::kSimd128:  // Fall through.
    821     case MachineRepresentation::kSimd1x4:  // Fall through.
    822     case MachineRepresentation::kSimd1x8:  // Fall through.
    823     case MachineRepresentation::kSimd1x16:  // Fall through.
    824     case MachineRepresentation::kNone:
    825       UNREACHABLE();
    826       return;
    827   }
    828   // If the length is a constant power of two, allow the code generator to
    829   // pick a more efficient bounds check sequence by passing the length as an
    830   // immediate.
    831   if (length->opcode() == IrOpcode::kInt32Constant) {
    832     Int32Matcher m(length);
    833     if (m.IsPowerOf2()) {
    834       Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
    835            g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
    836       return;
    837     }
    838   }
    839   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
    840        g.UseOperand(length, kArithmeticImm),
    841        g.UseRegisterOrImmediateZero(value));
    842 }
    843 
    844 
    845 template <typename Matcher>
    846 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
    847                          ArchOpcode opcode, bool left_can_cover,
    848                          bool right_can_cover, ImmediateMode imm_mode) {
    849   Arm64OperandGenerator g(selector);
    850 
    851   // Map instruction to equivalent operation with inverted right input.
    852   ArchOpcode inv_opcode = opcode;
    853   switch (opcode) {
    854     case kArm64And32:
    855       inv_opcode = kArm64Bic32;
    856       break;
    857     case kArm64And:
    858       inv_opcode = kArm64Bic;
    859       break;
    860     case kArm64Or32:
    861       inv_opcode = kArm64Orn32;
    862       break;
    863     case kArm64Or:
    864       inv_opcode = kArm64Orn;
    865       break;
    866     case kArm64Eor32:
    867       inv_opcode = kArm64Eon32;
    868       break;
    869     case kArm64Eor:
    870       inv_opcode = kArm64Eon;
    871       break;
    872     default:
    873       UNREACHABLE();
    874   }
    875 
    876   // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
    877   if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
    878     Matcher mleft(m->left().node());
    879     if (mleft.right().Is(-1)) {
    880       // TODO(all): support shifted operand on right.
    881       selector->Emit(inv_opcode, g.DefineAsRegister(node),
    882                      g.UseRegister(m->right().node()),
    883                      g.UseRegister(mleft.left().node()));
    884       return;
    885     }
    886   }
    887 
    888   // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
    889   if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
    890       right_can_cover) {
    891     Matcher mright(m->right().node());
    892     if (mright.right().Is(-1)) {
    893       // TODO(all): support shifted operand on right.
    894       selector->Emit(inv_opcode, g.DefineAsRegister(node),
    895                      g.UseRegister(m->left().node()),
    896                      g.UseRegister(mright.left().node()));
    897       return;
    898     }
    899   }
    900 
    901   if (m->IsWord32Xor() && m->right().Is(-1)) {
    902     selector->Emit(kArm64Not32, g.DefineAsRegister(node),
    903                    g.UseRegister(m->left().node()));
    904   } else if (m->IsWord64Xor() && m->right().Is(-1)) {
    905     selector->Emit(kArm64Not, g.DefineAsRegister(node),
    906                    g.UseRegister(m->left().node()));
    907   } else {
    908     VisitBinop<Matcher>(selector, node, opcode, imm_mode);
    909   }
    910 }
    911 
    912 
    913 void InstructionSelector::VisitWord32And(Node* node) {
    914   Arm64OperandGenerator g(this);
    915   Int32BinopMatcher m(node);
    916   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
    917       m.right().HasValue()) {
    918     uint32_t mask = m.right().Value();
    919     uint32_t mask_width = base::bits::CountPopulation32(mask);
    920     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
    921     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
    922       // The mask must be contiguous, and occupy the least-significant bits.
    923       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
    924 
    925       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
    926       // significant bits.
    927       Int32BinopMatcher mleft(m.left().node());
    928       if (mleft.right().HasValue()) {
    929         // Any shift value can match; int32 shifts use `value % 32`.
    930         uint32_t lsb = mleft.right().Value() & 0x1f;
    931 
    932         // Ubfx cannot extract bits past the register size, however since
    933         // shifting the original value would have introduced some zeros we can
    934         // still use ubfx with a smaller mask and the remaining bits will be
    935         // zeros.
    936         if (lsb + mask_width > 32) mask_width = 32 - lsb;
    937 
    938         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
    939              g.UseRegister(mleft.left().node()),
    940              g.UseImmediateOrTemp(mleft.right().node(), lsb),
    941              g.TempImmediate(mask_width));
    942         return;
    943       }
    944       // Other cases fall through to the normal And operation.
    945     }
    946   }
    947   VisitLogical<Int32BinopMatcher>(
    948       this, node, &m, kArm64And32, CanCover(node, m.left().node()),
    949       CanCover(node, m.right().node()), kLogical32Imm);
    950 }
    951 
    952 
    953 void InstructionSelector::VisitWord64And(Node* node) {
    954   Arm64OperandGenerator g(this);
    955   Int64BinopMatcher m(node);
    956   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
    957       m.right().HasValue()) {
    958     uint64_t mask = m.right().Value();
    959     uint64_t mask_width = base::bits::CountPopulation64(mask);
    960     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
    961     if ((mask_width != 0) && (mask_width != 64) &&
    962         (mask_msb + mask_width == 64)) {
    963       // The mask must be contiguous, and occupy the least-significant bits.
    964       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
    965 
    966       // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
    967       // significant bits.
    968       Int64BinopMatcher mleft(m.left().node());
    969       if (mleft.right().HasValue()) {
    970         // Any shift value can match; int64 shifts use `value % 64`.
    971         uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
    972 
    973         // Ubfx cannot extract bits past the register size, however since
    974         // shifting the original value would have introduced some zeros we can
    975         // still use ubfx with a smaller mask and the remaining bits will be
    976         // zeros.
    977         if (lsb + mask_width > 64) mask_width = 64 - lsb;
    978 
    979         Emit(kArm64Ubfx, g.DefineAsRegister(node),
    980              g.UseRegister(mleft.left().node()),
    981              g.UseImmediateOrTemp(mleft.right().node(), lsb),
    982              g.TempImmediate(static_cast<int32_t>(mask_width)));
    983         return;
    984       }
    985       // Other cases fall through to the normal And operation.
    986     }
    987   }
    988   VisitLogical<Int64BinopMatcher>(
    989       this, node, &m, kArm64And, CanCover(node, m.left().node()),
    990       CanCover(node, m.right().node()), kLogical64Imm);
    991 }
    992 
    993 
    994 void InstructionSelector::VisitWord32Or(Node* node) {
    995   Int32BinopMatcher m(node);
    996   VisitLogical<Int32BinopMatcher>(
    997       this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
    998       CanCover(node, m.right().node()), kLogical32Imm);
    999 }
   1000 
   1001 
   1002 void InstructionSelector::VisitWord64Or(Node* node) {
   1003   Int64BinopMatcher m(node);
   1004   VisitLogical<Int64BinopMatcher>(
   1005       this, node, &m, kArm64Or, CanCover(node, m.left().node()),
   1006       CanCover(node, m.right().node()), kLogical64Imm);
   1007 }
   1008 
   1009 
   1010 void InstructionSelector::VisitWord32Xor(Node* node) {
   1011   Int32BinopMatcher m(node);
   1012   VisitLogical<Int32BinopMatcher>(
   1013       this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
   1014       CanCover(node, m.right().node()), kLogical32Imm);
   1015 }
   1016 
   1017 
   1018 void InstructionSelector::VisitWord64Xor(Node* node) {
   1019   Int64BinopMatcher m(node);
   1020   VisitLogical<Int64BinopMatcher>(
   1021       this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
   1022       CanCover(node, m.right().node()), kLogical64Imm);
   1023 }
   1024 
   1025 
   1026 void InstructionSelector::VisitWord32Shl(Node* node) {
   1027   Int32BinopMatcher m(node);
   1028   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
   1029       m.right().IsInRange(1, 31)) {
   1030     Arm64OperandGenerator g(this);
   1031     Int32BinopMatcher mleft(m.left().node());
   1032     if (mleft.right().HasValue()) {
   1033       uint32_t mask = mleft.right().Value();
   1034       uint32_t mask_width = base::bits::CountPopulation32(mask);
   1035       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
   1036       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
   1037         uint32_t shift = m.right().Value();
   1038         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
   1039         DCHECK_NE(0u, shift);
   1040 
   1041         if ((shift + mask_width) >= 32) {
   1042           // If the mask is contiguous and reaches or extends beyond the top
   1043           // bit, only the shift is needed.
   1044           Emit(kArm64Lsl32, g.DefineAsRegister(node),
   1045                g.UseRegister(mleft.left().node()),
   1046                g.UseImmediate(m.right().node()));
   1047           return;
   1048         } else {
   1049           // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
   1050           // contiguous, and the shift immediate non-zero.
   1051           Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
   1052                g.UseRegister(mleft.left().node()),
   1053                g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
   1054           return;
   1055         }
   1056       }
   1057     }
   1058   }
   1059   VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
   1060 }
   1061 
   1062 
   1063 void InstructionSelector::VisitWord64Shl(Node* node) {
   1064   Arm64OperandGenerator g(this);
   1065   Int64BinopMatcher m(node);
   1066   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
   1067       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
   1068     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
   1069     // 32 bits anyway.
   1070     Emit(kArm64Lsl, g.DefineAsRegister(node),
   1071          g.UseRegister(m.left().node()->InputAt(0)),
   1072          g.UseImmediate(m.right().node()));
   1073     return;
   1074   }
   1075   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
   1076 }
   1077 
   1078 
   1079 namespace {
   1080 
   1081 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
   1082   Arm64OperandGenerator g(selector);
   1083   Int32BinopMatcher m(node);
   1084   if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
   1085     // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
   1086     // OP is >>> or >> and (K & 0x1f) != 0.
   1087     Int32BinopMatcher mleft(m.left().node());
   1088     if (mleft.right().HasValue() && m.right().HasValue() &&
   1089         (mleft.right().Value() & 0x1f) != 0 &&
   1090         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
   1091       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
   1092       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
   1093 
   1094       int right_val = m.right().Value() & 0x1f;
   1095       DCHECK_NE(right_val, 0);
   1096 
   1097       selector->Emit(opcode, g.DefineAsRegister(node),
   1098                      g.UseRegister(mleft.left().node()), g.TempImmediate(0),
   1099                      g.TempImmediate(32 - right_val));
   1100       return true;
   1101     }
   1102   }
   1103   return false;
   1104 }
   1105 
   1106 }  // namespace
   1107 
   1108 
   1109 void InstructionSelector::VisitWord32Shr(Node* node) {
   1110   Int32BinopMatcher m(node);
   1111   if (m.left().IsWord32And() && m.right().HasValue()) {
   1112     uint32_t lsb = m.right().Value() & 0x1f;
   1113     Int32BinopMatcher mleft(m.left().node());
   1114     if (mleft.right().HasValue()) {
   1115       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
   1116       // shifted into the least-significant bits.
   1117       uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
   1118       unsigned mask_width = base::bits::CountPopulation32(mask);
   1119       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
   1120       if ((mask_msb + mask_width + lsb) == 32) {
   1121         Arm64OperandGenerator g(this);
   1122         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
   1123         Emit(kArm64Ubfx32, g.DefineAsRegister(node),
   1124              g.UseRegister(mleft.left().node()),
   1125              g.UseImmediateOrTemp(m.right().node(), lsb),
   1126              g.TempImmediate(mask_width));
   1127         return;
   1128       }
   1129     }
   1130   } else if (TryEmitBitfieldExtract32(this, node)) {
   1131     return;
   1132   }
   1133 
   1134   if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
   1135       CanCover(node, node->InputAt(0))) {
   1136     // Combine this shift with the multiply and shift that would be generated
   1137     // by Uint32MulHigh.
   1138     Arm64OperandGenerator g(this);
   1139     Node* left = m.left().node();
   1140     int shift = m.right().Value() & 0x1f;
   1141     InstructionOperand const smull_operand = g.TempRegister();
   1142     Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
   1143          g.UseRegister(left->InputAt(1)));
   1144     Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
   1145          g.TempImmediate(32 + shift));
   1146     return;
   1147   }
   1148 
   1149   VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
   1150 }
   1151 
   1152 
   1153 void InstructionSelector::VisitWord64Shr(Node* node) {
   1154   Int64BinopMatcher m(node);
   1155   if (m.left().IsWord64And() && m.right().HasValue()) {
   1156     uint32_t lsb = m.right().Value() & 0x3f;
   1157     Int64BinopMatcher mleft(m.left().node());
   1158     if (mleft.right().HasValue()) {
   1159       // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
   1160       // shifted into the least-significant bits.
   1161       uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
   1162       unsigned mask_width = base::bits::CountPopulation64(mask);
   1163       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
   1164       if ((mask_msb + mask_width + lsb) == 64) {
   1165         Arm64OperandGenerator g(this);
   1166         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
   1167         Emit(kArm64Ubfx, g.DefineAsRegister(node),
   1168              g.UseRegister(mleft.left().node()),
   1169              g.UseImmediateOrTemp(m.right().node(), lsb),
   1170              g.TempImmediate(mask_width));
   1171         return;
   1172       }
   1173     }
   1174   }
   1175   VisitRRO(this, kArm64Lsr, node, kShift64Imm);
   1176 }
   1177 
   1178 
   1179 void InstructionSelector::VisitWord32Sar(Node* node) {
   1180   if (TryEmitBitfieldExtract32(this, node)) {
   1181     return;
   1182   }
   1183 
   1184   Int32BinopMatcher m(node);
   1185   if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
   1186       CanCover(node, node->InputAt(0))) {
   1187     // Combine this shift with the multiply and shift that would be generated
   1188     // by Int32MulHigh.
   1189     Arm64OperandGenerator g(this);
   1190     Node* left = m.left().node();
   1191     int shift = m.right().Value() & 0x1f;
   1192     InstructionOperand const smull_operand = g.TempRegister();
   1193     Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
   1194          g.UseRegister(left->InputAt(1)));
   1195     Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
   1196          g.TempImmediate(32 + shift));
   1197     return;
   1198   }
   1199 
   1200   if (m.left().IsInt32Add() && m.right().HasValue() &&
   1201       CanCover(node, node->InputAt(0))) {
   1202     Node* add_node = m.left().node();
   1203     Int32BinopMatcher madd_node(add_node);
   1204     if (madd_node.left().IsInt32MulHigh() &&
   1205         CanCover(add_node, madd_node.left().node())) {
   1206       // Combine the shift that would be generated by Int32MulHigh with the add
   1207       // on the left of this Sar operation. We do it here, as the result of the
   1208       // add potentially has 33 bits, so we have to ensure the result is
   1209       // truncated by being the input to this 32-bit Sar operation.
   1210       Arm64OperandGenerator g(this);
   1211       Node* mul_node = madd_node.left().node();
   1212 
   1213       InstructionOperand const smull_operand = g.TempRegister();
   1214       Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
   1215            g.UseRegister(mul_node->InputAt(1)));
   1216 
   1217       InstructionOperand const add_operand = g.TempRegister();
   1218       Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
   1219            add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
   1220            g.TempImmediate(32));
   1221 
   1222       Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
   1223            g.UseImmediate(node->InputAt(1)));
   1224       return;
   1225     }
   1226   }
   1227 
   1228   VisitRRO(this, kArm64Asr32, node, kShift32Imm);
   1229 }
   1230 
   1231 
   1232 void InstructionSelector::VisitWord64Sar(Node* node) {
   1233   if (TryEmitExtendingLoad(this, node)) return;
   1234   VisitRRO(this, kArm64Asr, node, kShift64Imm);
   1235 }
   1236 
   1237 
   1238 void InstructionSelector::VisitWord32Ror(Node* node) {
   1239   VisitRRO(this, kArm64Ror32, node, kShift32Imm);
   1240 }
   1241 
   1242 
   1243 void InstructionSelector::VisitWord64Ror(Node* node) {
   1244   VisitRRO(this, kArm64Ror, node, kShift64Imm);
   1245 }
   1246 
   1247 #define RR_OP_LIST(V)                                         \
   1248   V(Word64Clz, kArm64Clz)                                     \
   1249   V(Word32Clz, kArm64Clz32)                                   \
   1250   V(Word32ReverseBits, kArm64Rbit32)                          \
   1251   V(Word64ReverseBits, kArm64Rbit)                            \
   1252   V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64)           \
   1253   V(RoundInt32ToFloat32, kArm64Int32ToFloat32)                \
   1254   V(RoundUint32ToFloat32, kArm64Uint32ToFloat32)              \
   1255   V(ChangeInt32ToFloat64, kArm64Int32ToFloat64)               \
   1256   V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64)             \
   1257   V(TruncateFloat32ToInt32, kArm64Float32ToInt32)             \
   1258   V(ChangeFloat64ToInt32, kArm64Float64ToInt32)               \
   1259   V(TruncateFloat32ToUint32, kArm64Float32ToUint32)           \
   1260   V(ChangeFloat64ToUint32, kArm64Float64ToUint32)             \
   1261   V(TruncateFloat64ToUint32, kArm64Float64ToUint32)           \
   1262   V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32)         \
   1263   V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)          \
   1264   V(RoundFloat64ToInt32, kArm64Float64ToInt32)                \
   1265   V(RoundInt64ToFloat32, kArm64Int64ToFloat32)                \
   1266   V(RoundInt64ToFloat64, kArm64Int64ToFloat64)                \
   1267   V(RoundUint64ToFloat32, kArm64Uint64ToFloat32)              \
   1268   V(RoundUint64ToFloat64, kArm64Uint64ToFloat64)              \
   1269   V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32)     \
   1270   V(BitcastFloat64ToInt64, kArm64U64MoveFloat64)              \
   1271   V(BitcastInt32ToFloat32, kArm64Float64MoveU64)              \
   1272   V(BitcastInt64ToFloat64, kArm64Float64MoveU64)              \
   1273   V(Float32Abs, kArm64Float32Abs)                             \
   1274   V(Float64Abs, kArm64Float64Abs)                             \
   1275   V(Float32Sqrt, kArm64Float32Sqrt)                           \
   1276   V(Float64Sqrt, kArm64Float64Sqrt)                           \
   1277   V(Float32RoundDown, kArm64Float32RoundDown)                 \
   1278   V(Float64RoundDown, kArm64Float64RoundDown)                 \
   1279   V(Float32RoundUp, kArm64Float32RoundUp)                     \
   1280   V(Float64RoundUp, kArm64Float64RoundUp)                     \
   1281   V(Float32RoundTruncate, kArm64Float32RoundTruncate)         \
   1282   V(Float64RoundTruncate, kArm64Float64RoundTruncate)         \
   1283   V(Float64RoundTiesAway, kArm64Float64RoundTiesAway)         \
   1284   V(Float32RoundTiesEven, kArm64Float32RoundTiesEven)         \
   1285   V(Float64RoundTiesEven, kArm64Float64RoundTiesEven)         \
   1286   V(Float32Neg, kArm64Float32Neg)                             \
   1287   V(Float64Neg, kArm64Float64Neg)                             \
   1288   V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32)   \
   1289   V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
   1290   V(Float64SilenceNaN, kArm64Float64SilenceNaN)
   1291 
   1292 #define RRR_OP_LIST(V)            \
   1293   V(Int32Div, kArm64Idiv32)       \
   1294   V(Int64Div, kArm64Idiv)         \
   1295   V(Uint32Div, kArm64Udiv32)      \
   1296   V(Uint64Div, kArm64Udiv)        \
   1297   V(Int32Mod, kArm64Imod32)       \
   1298   V(Int64Mod, kArm64Imod)         \
   1299   V(Uint32Mod, kArm64Umod32)      \
   1300   V(Uint64Mod, kArm64Umod)        \
   1301   V(Float32Add, kArm64Float32Add) \
   1302   V(Float64Add, kArm64Float64Add) \
   1303   V(Float32Sub, kArm64Float32Sub) \
   1304   V(Float64Sub, kArm64Float64Sub) \
   1305   V(Float32Mul, kArm64Float32Mul) \
   1306   V(Float64Mul, kArm64Float64Mul) \
   1307   V(Float32Div, kArm64Float32Div) \
   1308   V(Float64Div, kArm64Float64Div) \
   1309   V(Float32Max, kArm64Float32Max) \
   1310   V(Float64Max, kArm64Float64Max) \
   1311   V(Float32Min, kArm64Float32Min) \
   1312   V(Float64Min, kArm64Float64Min)
   1313 
   1314 #define RR_VISITOR(Name, opcode)                      \
   1315   void InstructionSelector::Visit##Name(Node* node) { \
   1316     VisitRR(this, opcode, node);                      \
   1317   }
   1318 RR_OP_LIST(RR_VISITOR)
   1319 #undef RR_VISITOR
   1320 
   1321 #define RRR_VISITOR(Name, opcode)                     \
   1322   void InstructionSelector::Visit##Name(Node* node) { \
   1323     VisitRRR(this, opcode, node);                     \
   1324   }
   1325 RRR_OP_LIST(RRR_VISITOR)
   1326 #undef RRR_VISITOR
   1327 
   1328 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
   1329 
   1330 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
   1331 
   1332 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
   1333 
   1334 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
   1335 
   1336 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
   1337 
   1338 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
   1339 
   1340 void InstructionSelector::VisitInt32Add(Node* node) {
   1341   Arm64OperandGenerator g(this);
   1342   Int32BinopMatcher m(node);
   1343   // Select Madd(x, y, z) for Add(Mul(x, y), z).
   1344   if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
   1345     Int32BinopMatcher mleft(m.left().node());
   1346     // Check multiply can't be later reduced to addition with shift.
   1347     if (LeftShiftForReducedMultiply(&mleft) == 0) {
   1348       Emit(kArm64Madd32, g.DefineAsRegister(node),
   1349            g.UseRegister(mleft.left().node()),
   1350            g.UseRegister(mleft.right().node()),
   1351            g.UseRegister(m.right().node()));
   1352       return;
   1353     }
   1354   }
   1355   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
   1356   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
   1357     Int32BinopMatcher mright(m.right().node());
   1358     // Check multiply can't be later reduced to addition with shift.
   1359     if (LeftShiftForReducedMultiply(&mright) == 0) {
   1360       Emit(kArm64Madd32, g.DefineAsRegister(node),
   1361            g.UseRegister(mright.left().node()),
   1362            g.UseRegister(mright.right().node()),
   1363            g.UseRegister(m.left().node()));
   1364       return;
   1365     }
   1366   }
   1367   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
   1368 }
   1369 
   1370 
   1371 void InstructionSelector::VisitInt64Add(Node* node) {
   1372   Arm64OperandGenerator g(this);
   1373   Int64BinopMatcher m(node);
   1374   // Select Madd(x, y, z) for Add(Mul(x, y), z).
   1375   if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
   1376     Int64BinopMatcher mleft(m.left().node());
   1377     // Check multiply can't be later reduced to addition with shift.
   1378     if (LeftShiftForReducedMultiply(&mleft) == 0) {
   1379       Emit(kArm64Madd, g.DefineAsRegister(node),
   1380            g.UseRegister(mleft.left().node()),
   1381            g.UseRegister(mleft.right().node()),
   1382            g.UseRegister(m.right().node()));
   1383       return;
   1384     }
   1385   }
   1386   // Select Madd(x, y, z) for Add(z, Mul(x, y)).
   1387   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
   1388     Int64BinopMatcher mright(m.right().node());
   1389     // Check multiply can't be later reduced to addition with shift.
   1390     if (LeftShiftForReducedMultiply(&mright) == 0) {
   1391       Emit(kArm64Madd, g.DefineAsRegister(node),
   1392            g.UseRegister(mright.left().node()),
   1393            g.UseRegister(mright.right().node()),
   1394            g.UseRegister(m.left().node()));
   1395       return;
   1396     }
   1397   }
   1398   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
   1399 }
   1400 
   1401 
   1402 void InstructionSelector::VisitInt32Sub(Node* node) {
   1403   Arm64OperandGenerator g(this);
   1404   Int32BinopMatcher m(node);
   1405 
   1406   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
   1407   if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
   1408     Int32BinopMatcher mright(m.right().node());
   1409     // Check multiply can't be later reduced to addition with shift.
   1410     if (LeftShiftForReducedMultiply(&mright) == 0) {
   1411       Emit(kArm64Msub32, g.DefineAsRegister(node),
   1412            g.UseRegister(mright.left().node()),
   1413            g.UseRegister(mright.right().node()),
   1414            g.UseRegister(m.left().node()));
   1415       return;
   1416     }
   1417   }
   1418 
   1419   VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
   1420 }
   1421 
   1422 
   1423 void InstructionSelector::VisitInt64Sub(Node* node) {
   1424   Arm64OperandGenerator g(this);
   1425   Int64BinopMatcher m(node);
   1426 
   1427   // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
   1428   if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
   1429     Int64BinopMatcher mright(m.right().node());
   1430     // Check multiply can't be later reduced to addition with shift.
   1431     if (LeftShiftForReducedMultiply(&mright) == 0) {
   1432       Emit(kArm64Msub, g.DefineAsRegister(node),
   1433            g.UseRegister(mright.left().node()),
   1434            g.UseRegister(mright.right().node()),
   1435            g.UseRegister(m.left().node()));
   1436       return;
   1437     }
   1438   }
   1439 
   1440   VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
   1441 }
   1442 
   1443 namespace {
   1444 
   1445 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
   1446                               FlagsContinuation* cont) {
   1447   Arm64OperandGenerator g(selector);
   1448   Int32BinopMatcher m(node);
   1449   InstructionOperand result = g.DefineAsRegister(node);
   1450   InstructionOperand left = g.UseRegister(m.left().node());
   1451   InstructionOperand right = g.UseRegister(m.right().node());
   1452   selector->Emit(kArm64Smull, result, left, right);
   1453 
   1454   InstructionCode opcode = cont->Encode(kArm64Cmp) |
   1455                            AddressingModeField::encode(kMode_Operand2_R_SXTW);
   1456   if (cont->IsBranch()) {
   1457     selector->Emit(opcode, g.NoOutput(), result, result,
   1458                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1459   } else if (cont->IsDeoptimize()) {
   1460     InstructionOperand in[] = {result, result};
   1461     selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
   1462                              cont->reason(), cont->frame_state());
   1463   } else if (cont->IsSet()) {
   1464     selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
   1465   } else {
   1466     DCHECK(cont->IsTrap());
   1467     selector->Emit(opcode, g.NoOutput(), result, result,
   1468                    g.UseImmediate(cont->trap_id()));
   1469   }
   1470 }
   1471 
   1472 }  // namespace
   1473 
   1474 void InstructionSelector::VisitInt32Mul(Node* node) {
   1475   Arm64OperandGenerator g(this);
   1476   Int32BinopMatcher m(node);
   1477 
   1478   // First, try to reduce the multiplication to addition with left shift.
   1479   // x * (2^k + 1) -> x + (x << k)
   1480   int32_t shift = LeftShiftForReducedMultiply(&m);
   1481   if (shift > 0) {
   1482     Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1483          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1484          g.UseRegister(m.left().node()), g.TempImmediate(shift));
   1485     return;
   1486   }
   1487 
   1488   if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
   1489     Int32BinopMatcher mleft(m.left().node());
   1490 
   1491     // Select Mneg(x, y) for Mul(Sub(0, x), y).
   1492     if (mleft.left().Is(0)) {
   1493       Emit(kArm64Mneg32, g.DefineAsRegister(node),
   1494            g.UseRegister(mleft.right().node()),
   1495            g.UseRegister(m.right().node()));
   1496       return;
   1497     }
   1498   }
   1499 
   1500   if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
   1501     Int32BinopMatcher mright(m.right().node());
   1502 
   1503     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
   1504     if (mright.left().Is(0)) {
   1505       Emit(kArm64Mneg32, g.DefineAsRegister(node),
   1506            g.UseRegister(m.left().node()),
   1507            g.UseRegister(mright.right().node()));
   1508       return;
   1509     }
   1510   }
   1511 
   1512   VisitRRR(this, kArm64Mul32, node);
   1513 }
   1514 
   1515 
   1516 void InstructionSelector::VisitInt64Mul(Node* node) {
   1517   Arm64OperandGenerator g(this);
   1518   Int64BinopMatcher m(node);
   1519 
   1520   // First, try to reduce the multiplication to addition with left shift.
   1521   // x * (2^k + 1) -> x + (x << k)
   1522   int32_t shift = LeftShiftForReducedMultiply(&m);
   1523   if (shift > 0) {
   1524     Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
   1525          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1526          g.UseRegister(m.left().node()), g.TempImmediate(shift));
   1527     return;
   1528   }
   1529 
   1530   if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
   1531     Int64BinopMatcher mleft(m.left().node());
   1532 
   1533     // Select Mneg(x, y) for Mul(Sub(0, x), y).
   1534     if (mleft.left().Is(0)) {
   1535       Emit(kArm64Mneg, g.DefineAsRegister(node),
   1536            g.UseRegister(mleft.right().node()),
   1537            g.UseRegister(m.right().node()));
   1538       return;
   1539     }
   1540   }
   1541 
   1542   if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
   1543     Int64BinopMatcher mright(m.right().node());
   1544 
   1545     // Select Mneg(x, y) for Mul(x, Sub(0, y)).
   1546     if (mright.left().Is(0)) {
   1547       Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
   1548            g.UseRegister(mright.right().node()));
   1549       return;
   1550     }
   1551   }
   1552 
   1553   VisitRRR(this, kArm64Mul, node);
   1554 }
   1555 
   1556 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   1557   Arm64OperandGenerator g(this);
   1558   InstructionOperand const smull_operand = g.TempRegister();
   1559   Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
   1560        g.UseRegister(node->InputAt(1)));
   1561   Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
   1562 }
   1563 
   1564 
   1565 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   1566   Arm64OperandGenerator g(this);
   1567   InstructionOperand const smull_operand = g.TempRegister();
   1568   Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
   1569        g.UseRegister(node->InputAt(1)));
   1570   Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
   1571 }
   1572 
   1573 
   1574 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   1575   Arm64OperandGenerator g(this);
   1576 
   1577   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1578   InstructionOperand outputs[2];
   1579   size_t output_count = 0;
   1580   outputs[output_count++] = g.DefineAsRegister(node);
   1581 
   1582   Node* success_output = NodeProperties::FindProjection(node, 1);
   1583   if (success_output) {
   1584     outputs[output_count++] = g.DefineAsRegister(success_output);
   1585   }
   1586 
   1587   Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
   1588 }
   1589 
   1590 
   1591 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
   1592   Arm64OperandGenerator g(this);
   1593 
   1594   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1595   InstructionOperand outputs[2];
   1596   size_t output_count = 0;
   1597   outputs[output_count++] = g.DefineAsRegister(node);
   1598 
   1599   Node* success_output = NodeProperties::FindProjection(node, 1);
   1600   if (success_output) {
   1601     outputs[output_count++] = g.DefineAsRegister(success_output);
   1602   }
   1603 
   1604   Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
   1605 }
   1606 
   1607 
   1608 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
   1609   Arm64OperandGenerator g(this);
   1610 
   1611   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1612   InstructionOperand outputs[2];
   1613   size_t output_count = 0;
   1614   outputs[output_count++] = g.DefineAsRegister(node);
   1615 
   1616   Node* success_output = NodeProperties::FindProjection(node, 1);
   1617   if (success_output) {
   1618     outputs[output_count++] = g.DefineAsRegister(success_output);
   1619   }
   1620 
   1621   Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
   1622 }
   1623 
   1624 
   1625 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
   1626   Arm64OperandGenerator g(this);
   1627 
   1628   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1629   InstructionOperand outputs[2];
   1630   size_t output_count = 0;
   1631   outputs[output_count++] = g.DefineAsRegister(node);
   1632 
   1633   Node* success_output = NodeProperties::FindProjection(node, 1);
   1634   if (success_output) {
   1635     outputs[output_count++] = g.DefineAsRegister(success_output);
   1636   }
   1637 
   1638   Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
   1639 }
   1640 
   1641 
   1642 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   1643   Node* value = node->InputAt(0);
   1644   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
   1645     // Generate sign-extending load.
   1646     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
   1647     MachineRepresentation rep = load_rep.representation();
   1648     InstructionCode opcode = kArchNop;
   1649     ImmediateMode immediate_mode = kNoImmediate;
   1650     switch (rep) {
   1651       case MachineRepresentation::kBit:  // Fall through.
   1652       case MachineRepresentation::kWord8:
   1653         opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
   1654         immediate_mode = kLoadStoreImm8;
   1655         break;
   1656       case MachineRepresentation::kWord16:
   1657         opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
   1658         immediate_mode = kLoadStoreImm16;
   1659         break;
   1660       case MachineRepresentation::kWord32:
   1661         opcode = kArm64Ldrsw;
   1662         immediate_mode = kLoadStoreImm32;
   1663         break;
   1664       default:
   1665         UNREACHABLE();
   1666         return;
   1667     }
   1668     EmitLoad(this, value, opcode, immediate_mode, rep, node);
   1669   } else {
   1670     VisitRR(this, kArm64Sxtw, node);
   1671   }
   1672 }
   1673 
   1674 
   1675 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   1676   Arm64OperandGenerator g(this);
   1677   Node* value = node->InputAt(0);
   1678   switch (value->opcode()) {
   1679     case IrOpcode::kWord32And:
   1680     case IrOpcode::kWord32Or:
   1681     case IrOpcode::kWord32Xor:
   1682     case IrOpcode::kWord32Shl:
   1683     case IrOpcode::kWord32Shr:
   1684     case IrOpcode::kWord32Sar:
   1685     case IrOpcode::kWord32Ror:
   1686     case IrOpcode::kWord32Equal:
   1687     case IrOpcode::kInt32Add:
   1688     case IrOpcode::kInt32AddWithOverflow:
   1689     case IrOpcode::kInt32Sub:
   1690     case IrOpcode::kInt32SubWithOverflow:
   1691     case IrOpcode::kInt32Mul:
   1692     case IrOpcode::kInt32MulHigh:
   1693     case IrOpcode::kInt32Div:
   1694     case IrOpcode::kInt32Mod:
   1695     case IrOpcode::kInt32LessThan:
   1696     case IrOpcode::kInt32LessThanOrEqual:
   1697     case IrOpcode::kUint32Div:
   1698     case IrOpcode::kUint32LessThan:
   1699     case IrOpcode::kUint32LessThanOrEqual:
   1700     case IrOpcode::kUint32Mod:
   1701     case IrOpcode::kUint32MulHigh: {
   1702       // 32-bit operations will write their result in a W register (implicitly
   1703       // clearing the top 32-bit of the corresponding X register) so the
   1704       // zero-extension is a no-op.
   1705       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
   1706       return;
   1707     }
   1708     case IrOpcode::kLoad: {
   1709       // As for the operations above, a 32-bit load will implicitly clear the
   1710       // top 32 bits of the destination register.
   1711       LoadRepresentation load_rep = LoadRepresentationOf(value->op());
   1712       switch (load_rep.representation()) {
   1713         case MachineRepresentation::kWord8:
   1714         case MachineRepresentation::kWord16:
   1715         case MachineRepresentation::kWord32:
   1716           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
   1717           return;
   1718         default:
   1719           break;
   1720       }
   1721     }
   1722     default:
   1723       break;
   1724   }
   1725   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
   1726 }
   1727 
   1728 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   1729   Arm64OperandGenerator g(this);
   1730   Node* value = node->InputAt(0);
   1731   // The top 32 bits in the 64-bit register will be undefined, and
   1732   // must not be used by a dependent node.
   1733   Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
   1734 }
   1735 
   1736 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1737   Arm64OperandGenerator g(this);
   1738   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
   1739        g.UseFixed(node->InputAt(0), d0),
   1740        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
   1741 }
   1742 
   1743 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1744                                                    InstructionCode opcode) {
   1745   Arm64OperandGenerator g(this);
   1746   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
   1747        g.UseFixed(node->InputAt(1), d1))
   1748       ->MarkAsCall();
   1749 }
   1750 
   1751 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1752                                                   InstructionCode opcode) {
   1753   Arm64OperandGenerator g(this);
   1754   Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
   1755       ->MarkAsCall();
   1756 }
   1757 
   1758 void InstructionSelector::EmitPrepareArguments(
   1759     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1760     Node* node) {
   1761   Arm64OperandGenerator g(this);
   1762 
   1763   bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
   1764   bool to_native_stack = descriptor->UseNativeStack();
   1765 
   1766   bool always_claim = to_native_stack != from_native_stack;
   1767 
   1768   int claim_count = static_cast<int>(arguments->size());
   1769   int slot = claim_count - 1;
   1770   // Bump the stack pointer(s).
   1771   if (claim_count > 0 || always_claim) {
   1772     // TODO(titzer): claim and poke probably take small immediates.
   1773     // TODO(titzer): it would be better to bump the csp here only
   1774     //                and emit paired stores with increment for non c frames.
   1775     ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
   1776     // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
   1777     Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
   1778   }
   1779 
   1780   // Poke the arguments into the stack.
   1781   ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
   1782   while (slot >= 0) {
   1783     Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
   1784          g.TempImmediate(slot));
   1785     slot--;
   1786     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
   1787     //              same type.
   1788     // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
   1789     //      g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
   1790     // slot -= 2;
   1791   }
   1792 }
   1793 
   1794 
   1795 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
   1796 
   1797 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1798 
   1799 namespace {
   1800 
   1801 // Shared routine for multiple compare operations.
   1802 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1803                   InstructionOperand left, InstructionOperand right,
   1804                   FlagsContinuation* cont) {
   1805   Arm64OperandGenerator g(selector);
   1806   opcode = cont->Encode(opcode);
   1807   if (cont->IsBranch()) {
   1808     selector->Emit(opcode, g.NoOutput(), left, right,
   1809                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1810   } else if (cont->IsDeoptimize()) {
   1811     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
   1812                              cont->reason(), cont->frame_state());
   1813   } else if (cont->IsSet()) {
   1814     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   1815   } else {
   1816     DCHECK(cont->IsTrap());
   1817     selector->Emit(opcode, g.NoOutput(), left, right,
   1818                    g.UseImmediate(cont->trap_id()));
   1819   }
   1820 }
   1821 
   1822 
   1823 // Shared routine for multiple word compare operations.
   1824 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1825                       InstructionCode opcode, FlagsContinuation* cont,
   1826                       bool commutative, ImmediateMode immediate_mode) {
   1827   Arm64OperandGenerator g(selector);
   1828   Node* left = node->InputAt(0);
   1829   Node* right = node->InputAt(1);
   1830 
   1831   // Match immediates on left or right side of comparison.
   1832   if (g.CanBeImmediate(right, immediate_mode)) {
   1833     VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
   1834                  cont);
   1835   } else if (g.CanBeImmediate(left, immediate_mode)) {
   1836     if (!commutative) cont->Commute();
   1837     VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
   1838                  cont);
   1839   } else {
   1840     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
   1841                  cont);
   1842   }
   1843 }
   1844 
   1845 // This function checks whether we can convert:
   1846 // ((a <op> b) cmp 0), b.<cond>
   1847 // to:
   1848 // (a <ops> b), b.<cond'>
   1849 // where <ops> is the flag setting version of <op>.
   1850 // We only generate conditions <cond'> that are a combination of the N
   1851 // and Z flags. This avoids the need to make this function dependent on
   1852 // the flag-setting operation.
   1853 bool CanUseFlagSettingBinop(FlagsCondition cond) {
   1854   switch (cond) {
   1855     case kEqual:
   1856     case kNotEqual:
   1857     case kSignedLessThan:
   1858     case kSignedGreaterThanOrEqual:
   1859     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1860     case kUnsignedGreaterThan:      // x > 0 -> x != 0
   1861       return true;
   1862     default:
   1863       return false;
   1864   }
   1865 }
   1866 
   1867 // Map <cond> to <cond'> so that the following transformation is possible:
   1868 // ((a <op> b) cmp 0), b.<cond>
   1869 // to:
   1870 // (a <ops> b), b.<cond'>
   1871 // where <ops> is the flag setting version of <op>.
   1872 FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
   1873   DCHECK(CanUseFlagSettingBinop(cond));
   1874   switch (cond) {
   1875     case kEqual:
   1876     case kNotEqual:
   1877       return cond;
   1878     case kSignedLessThan:
   1879       return kNegative;
   1880     case kSignedGreaterThanOrEqual:
   1881       return kPositiveOrZero;
   1882     case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
   1883       return kEqual;
   1884     case kUnsignedGreaterThan:  // x > 0 -> x != 0
   1885       return kNotEqual;
   1886     default:
   1887       UNREACHABLE();
   1888       return cond;
   1889   }
   1890 }
   1891 
   1892 // This function checks if we can perform the transformation:
   1893 // ((a <op> b) cmp 0), b.<cond>
   1894 // to:
   1895 // (a <ops> b), b.<cond'>
   1896 // where <ops> is the flag setting version of <op>, and if so,
   1897 // updates {node}, {opcode} and {cont} accordingly.
   1898 void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
   1899                                              Node** node, Node* binop,
   1900                                              ArchOpcode* opcode,
   1901                                              FlagsCondition cond,
   1902                                              FlagsContinuation* cont,
   1903                                              ImmediateMode* immediate_mode) {
   1904   ArchOpcode binop_opcode;
   1905   ArchOpcode no_output_opcode;
   1906   ImmediateMode binop_immediate_mode;
   1907   switch (binop->opcode()) {
   1908     case IrOpcode::kInt32Add:
   1909       binop_opcode = kArm64Add32;
   1910       no_output_opcode = kArm64Cmn32;
   1911       binop_immediate_mode = kArithmeticImm;
   1912       break;
   1913     case IrOpcode::kWord32And:
   1914       binop_opcode = kArm64And32;
   1915       no_output_opcode = kArm64Tst32;
   1916       binop_immediate_mode = kLogical32Imm;
   1917       break;
   1918     default:
   1919       UNREACHABLE();
   1920       return;
   1921   }
   1922   if (selector->CanCover(*node, binop)) {
   1923     // The comparison is the only user of the add or and, so we can generate
   1924     // a cmn or tst instead.
   1925     cont->Overwrite(MapForFlagSettingBinop(cond));
   1926     *opcode = no_output_opcode;
   1927     *node = binop;
   1928     *immediate_mode = binop_immediate_mode;
   1929   } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
   1930     // We can also handle the case where the add and the compare are in the
   1931     // same basic block, and the compare is the only use of add in this basic
   1932     // block (the add has users in other basic blocks).
   1933     cont->Overwrite(MapForFlagSettingBinop(cond));
   1934     *opcode = binop_opcode;
   1935     *node = binop;
   1936     *immediate_mode = binop_immediate_mode;
   1937   }
   1938 }
   1939 
   1940 // Map {cond} to kEqual or kNotEqual, so that we can select
   1941 // either TBZ or TBNZ when generating code for:
   1942 // (x cmp 0), b.{cond}
   1943 FlagsCondition MapForTbz(FlagsCondition cond) {
   1944   switch (cond) {
   1945     case kSignedLessThan:  // generate TBNZ
   1946       return kNotEqual;
   1947     case kSignedGreaterThanOrEqual:  // generate TBZ
   1948       return kEqual;
   1949     default:
   1950       UNREACHABLE();
   1951       return cond;
   1952   }
   1953 }
   1954 
   1955 // Map {cond} to kEqual or kNotEqual, so that we can select
   1956 // either CBZ or CBNZ when generating code for:
   1957 // (x cmp 0), b.{cond}
   1958 FlagsCondition MapForCbz(FlagsCondition cond) {
   1959   switch (cond) {
   1960     case kEqual:     // generate CBZ
   1961     case kNotEqual:  // generate CBNZ
   1962       return cond;
   1963     case kUnsignedLessThanOrEqual:  // generate CBZ
   1964       return kEqual;
   1965     case kUnsignedGreaterThan:  // generate CBNZ
   1966       return kNotEqual;
   1967     default:
   1968       UNREACHABLE();
   1969       return cond;
   1970   }
   1971 }
   1972 
   1973 void EmitBranchOrDeoptimize(InstructionSelector* selector,
   1974                             InstructionCode opcode, InstructionOperand value,
   1975                             FlagsContinuation* cont) {
   1976   Arm64OperandGenerator g(selector);
   1977   if (cont->IsBranch()) {
   1978     selector->Emit(cont->Encode(opcode), g.NoOutput(), value,
   1979                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1980   } else {
   1981     DCHECK(cont->IsDeoptimize());
   1982     selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
   1983                              cont->kind(), cont->reason(), cont->frame_state());
   1984   }
   1985 }
   1986 
   1987 // Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
   1988 // against zero, depending on the condition.
   1989 bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
   1990                      FlagsCondition cond, FlagsContinuation* cont) {
   1991   Int32BinopMatcher m_user(user);
   1992   USE(m_user);
   1993   DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
   1994 
   1995   // Only handle branches and deoptimisations.
   1996   if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
   1997 
   1998   switch (cond) {
   1999     case kSignedLessThan:
   2000     case kSignedGreaterThanOrEqual: {
   2001       // We don't generate TBZ/TBNZ for deoptimisations, as they have a
   2002       // shorter range than conditional branches and generating them for
   2003       // deoptimisations results in more veneers.
   2004       if (cont->IsDeoptimize()) return false;
   2005       Arm64OperandGenerator g(selector);
   2006       cont->Overwrite(MapForTbz(cond));
   2007       Int32Matcher m(node);
   2008       if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
   2009         // SignedLessThan(Float64ExtractHighWord32(x), 0) and
   2010         // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially
   2011         // check the sign bit of a 64-bit floating point value.
   2012         InstructionOperand temp = g.TempRegister();
   2013         selector->Emit(kArm64U64MoveFloat64, temp,
   2014                        g.UseRegister(node->InputAt(0)));
   2015         selector->Emit(cont->Encode(kArm64TestAndBranch), g.NoOutput(), temp,
   2016                        g.TempImmediate(63), g.Label(cont->true_block()),
   2017                        g.Label(cont->false_block()));
   2018         return true;
   2019       }
   2020       selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
   2021                      g.UseRegister(node), g.TempImmediate(31),
   2022                      g.Label(cont->true_block()), g.Label(cont->false_block()));
   2023       return true;
   2024     }
   2025     case kEqual:
   2026     case kNotEqual:
   2027     case kUnsignedLessThanOrEqual:
   2028     case kUnsignedGreaterThan: {
   2029       Arm64OperandGenerator g(selector);
   2030       cont->Overwrite(MapForCbz(cond));
   2031       EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
   2032                              g.UseRegister(node), cont);
   2033       return true;
   2034     }
   2035     default:
   2036       return false;
   2037   }
   2038 }
   2039 
   2040 void VisitWord32Compare(InstructionSelector* selector, Node* node,
   2041                         FlagsContinuation* cont) {
   2042   Int32BinopMatcher m(node);
   2043   ArchOpcode opcode = kArm64Cmp32;
   2044   FlagsCondition cond = cont->condition();
   2045   if (m.right().Is(0)) {
   2046     if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
   2047   } else if (m.left().Is(0)) {
   2048     FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
   2049     if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
   2050       return;
   2051   }
   2052   ImmediateMode immediate_mode = kArithmeticImm;
   2053   if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
   2054     // Emit flag setting add/and instructions for comparisons against zero.
   2055     if (CanUseFlagSettingBinop(cond)) {
   2056       Node* binop = m.left().node();
   2057       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   2058                                               cond, cont, &immediate_mode);
   2059     }
   2060   } else if (m.left().Is(0) &&
   2061              (m.right().IsInt32Add() || m.right().IsWord32And())) {
   2062     // Same as above, but we need to commute the condition before we
   2063     // continue with the rest of the checks.
   2064     FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
   2065     if (CanUseFlagSettingBinop(commuted_cond)) {
   2066       Node* binop = m.right().node();
   2067       MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
   2068                                               commuted_cond, cont,
   2069                                               &immediate_mode);
   2070     }
   2071   } else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
   2072     // Select negated compare for comparisons with negated right input.
   2073     // Only do this for kEqual and kNotEqual, which do not depend on the
   2074     // C and V flags, as those flags will be different with CMN when the
   2075     // right-hand side of the original subtraction is INT_MIN.
   2076     Node* sub = m.right().node();
   2077     Int32BinopMatcher msub(sub);
   2078     if (msub.left().Is(0)) {
   2079       bool can_cover = selector->CanCover(node, sub);
   2080       node->ReplaceInput(1, msub.right().node());
   2081       // Even if the comparison node covers the subtraction, after the input
   2082       // replacement above, the node still won't cover the input to the
   2083       // subtraction; the subtraction still uses it.
   2084       // In order to get shifted operations to work, we must remove the rhs
   2085       // input to the subtraction, as TryMatchAnyShift requires this node to
   2086       // cover the input shift. We do this by setting it to the lhs input,
   2087       // as we know it's zero, and the result of the subtraction isn't used by
   2088       // any other node.
   2089       if (can_cover) sub->ReplaceInput(1, msub.left().node());
   2090       opcode = kArm64Cmn32;
   2091     }
   2092   }
   2093   VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
   2094 }
   2095 
   2096 
   2097 void VisitWordTest(InstructionSelector* selector, Node* node,
   2098                    InstructionCode opcode, FlagsContinuation* cont) {
   2099   Arm64OperandGenerator g(selector);
   2100   VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
   2101                cont);
   2102 }
   2103 
   2104 
   2105 void VisitWord32Test(InstructionSelector* selector, Node* node,
   2106                      FlagsContinuation* cont) {
   2107   VisitWordTest(selector, node, kArm64Tst32, cont);
   2108 }
   2109 
   2110 
   2111 void VisitWord64Test(InstructionSelector* selector, Node* node,
   2112                      FlagsContinuation* cont) {
   2113   VisitWordTest(selector, node, kArm64Tst, cont);
   2114 }
   2115 
   2116 template <typename Matcher, ArchOpcode kOpcode>
   2117 bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
   2118                           FlagsContinuation* cont) {
   2119   Arm64OperandGenerator g(selector);
   2120   Matcher m(node);
   2121   if (cont->IsBranch() && m.right().HasValue() &&
   2122       (base::bits::CountPopulation(m.right().Value()) == 1)) {
   2123     // If the mask has only one bit set, we can use tbz/tbnz.
   2124     DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
   2125     selector->Emit(
   2126         cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
   2127         g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
   2128         g.Label(cont->true_block()), g.Label(cont->false_block()));
   2129     return true;
   2130   }
   2131   return false;
   2132 }
   2133 
   2134 // Shared routine for multiple float32 compare operations.
   2135 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   2136                          FlagsContinuation* cont) {
   2137   Arm64OperandGenerator g(selector);
   2138   Float32BinopMatcher m(node);
   2139   if (m.right().Is(0.0f)) {
   2140     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
   2141                  g.UseImmediate(m.right().node()), cont);
   2142   } else if (m.left().Is(0.0f)) {
   2143     cont->Commute();
   2144     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
   2145                  g.UseImmediate(m.left().node()), cont);
   2146   } else {
   2147     VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
   2148                  g.UseRegister(m.right().node()), cont);
   2149   }
   2150 }
   2151 
   2152 
   2153 // Shared routine for multiple float64 compare operations.
   2154 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   2155                          FlagsContinuation* cont) {
   2156   Arm64OperandGenerator g(selector);
   2157   Float64BinopMatcher m(node);
   2158   if (m.right().Is(0.0)) {
   2159     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
   2160                  g.UseImmediate(m.right().node()), cont);
   2161   } else if (m.left().Is(0.0)) {
   2162     cont->Commute();
   2163     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
   2164                  g.UseImmediate(m.left().node()), cont);
   2165   } else {
   2166     VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
   2167                  g.UseRegister(m.right().node()), cont);
   2168   }
   2169 }
   2170 
   2171 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   2172                           Node* value, FlagsContinuation* cont) {
   2173   Arm64OperandGenerator g(selector);
   2174   // Try to combine with comparisons against 0 by simply inverting the branch.
   2175   while (value->opcode() == IrOpcode::kWord32Equal &&
   2176          selector->CanCover(user, value)) {
   2177     Int32BinopMatcher m(value);
   2178     if (!m.right().Is(0)) break;
   2179 
   2180     user = value;
   2181     value = m.left().node();
   2182     cont->Negate();
   2183   }
   2184 
   2185   if (selector->CanCover(user, value)) {
   2186     switch (value->opcode()) {
   2187       case IrOpcode::kWord32Equal:
   2188         cont->OverwriteAndNegateIfEqual(kEqual);
   2189         return VisitWord32Compare(selector, value, cont);
   2190       case IrOpcode::kInt32LessThan:
   2191         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   2192         return VisitWord32Compare(selector, value, cont);
   2193       case IrOpcode::kInt32LessThanOrEqual:
   2194         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   2195         return VisitWord32Compare(selector, value, cont);
   2196       case IrOpcode::kUint32LessThan:
   2197         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2198         return VisitWord32Compare(selector, value, cont);
   2199       case IrOpcode::kUint32LessThanOrEqual:
   2200         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2201         return VisitWord32Compare(selector, value, cont);
   2202       case IrOpcode::kWord64Equal: {
   2203         cont->OverwriteAndNegateIfEqual(kEqual);
   2204         Int64BinopMatcher m(value);
   2205         if (m.right().Is(0)) {
   2206           Node* const left = m.left().node();
   2207           if (selector->CanCover(value, left) &&
   2208               left->opcode() == IrOpcode::kWord64And) {
   2209             // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
   2210             // into a tbz/tbnz instruction.
   2211             if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
   2212                     selector, left, cont)) {
   2213               return;
   2214             }
   2215             return VisitWordCompare(selector, left, kArm64Tst, cont, true,
   2216                                     kLogical64Imm);
   2217           }
   2218           // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
   2219           if (cont->IsBranch() || cont->IsDeoptimize()) {
   2220             EmitBranchOrDeoptimize(selector,
   2221                                    cont->Encode(kArm64CompareAndBranch),
   2222                                    g.UseRegister(left), cont);
   2223             return;
   2224           }
   2225         }
   2226         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
   2227                                 kArithmeticImm);
   2228       }
   2229       case IrOpcode::kInt64LessThan:
   2230         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   2231         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
   2232                                 kArithmeticImm);
   2233       case IrOpcode::kInt64LessThanOrEqual:
   2234         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   2235         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
   2236                                 kArithmeticImm);
   2237       case IrOpcode::kUint64LessThan:
   2238         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   2239         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
   2240                                 kArithmeticImm);
   2241       case IrOpcode::kUint64LessThanOrEqual:
   2242         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   2243         return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
   2244                                 kArithmeticImm);
   2245       case IrOpcode::kFloat32Equal:
   2246         cont->OverwriteAndNegateIfEqual(kEqual);
   2247         return VisitFloat32Compare(selector, value, cont);
   2248       case IrOpcode::kFloat32LessThan:
   2249         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   2250         return VisitFloat32Compare(selector, value, cont);
   2251       case IrOpcode::kFloat32LessThanOrEqual:
   2252         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   2253         return VisitFloat32Compare(selector, value, cont);
   2254       case IrOpcode::kFloat64Equal:
   2255         cont->OverwriteAndNegateIfEqual(kEqual);
   2256         return VisitFloat64Compare(selector, value, cont);
   2257       case IrOpcode::kFloat64LessThan:
   2258         cont->OverwriteAndNegateIfEqual(kFloatLessThan);
   2259         return VisitFloat64Compare(selector, value, cont);
   2260       case IrOpcode::kFloat64LessThanOrEqual:
   2261         cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
   2262         return VisitFloat64Compare(selector, value, cont);
   2263       case IrOpcode::kProjection:
   2264         // Check if this is the overflow output projection of an
   2265         // <Operation>WithOverflow node.
   2266         if (ProjectionIndexOf(value->op()) == 1u) {
   2267           // We cannot combine the <Operation>WithOverflow with this branch
   2268           // unless the 0th projection (the use of the actual value of the
   2269           // <Operation> is either nullptr, which means there's no use of the
   2270           // actual value, or was already defined, which means it is scheduled
   2271           // *AFTER* this branch).
   2272           Node* const node = value->InputAt(0);
   2273           Node* const result = NodeProperties::FindProjection(node, 0);
   2274           if (result == nullptr || selector->IsDefined(result)) {
   2275             switch (node->opcode()) {
   2276               case IrOpcode::kInt32AddWithOverflow:
   2277                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2278                 return VisitBinop<Int32BinopMatcher>(
   2279                     selector, node, kArm64Add32, kArithmeticImm, cont);
   2280               case IrOpcode::kInt32SubWithOverflow:
   2281                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2282                 return VisitBinop<Int32BinopMatcher>(
   2283                     selector, node, kArm64Sub32, kArithmeticImm, cont);
   2284               case IrOpcode::kInt32MulWithOverflow:
   2285                 // ARM64 doesn't set the overflow flag for multiplication, so we
   2286                 // need to test on kNotEqual. Here is the code sequence used:
   2287                 //   smull result, left, right
   2288                 //   cmp result.X(), Operand(result, SXTW)
   2289                 cont->OverwriteAndNegateIfEqual(kNotEqual);
   2290                 return EmitInt32MulWithOverflow(selector, node, cont);
   2291               case IrOpcode::kInt64AddWithOverflow:
   2292                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2293                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
   2294                                                      kArithmeticImm, cont);
   2295               case IrOpcode::kInt64SubWithOverflow:
   2296                 cont->OverwriteAndNegateIfEqual(kOverflow);
   2297                 return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
   2298                                                      kArithmeticImm, cont);
   2299               default:
   2300                 break;
   2301             }
   2302           }
   2303         }
   2304         break;
   2305       case IrOpcode::kInt32Add:
   2306         return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
   2307                                 kArithmeticImm);
   2308       case IrOpcode::kInt32Sub:
   2309         return VisitWord32Compare(selector, value, cont);
   2310       case IrOpcode::kWord32And:
   2311         if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
   2312                 selector, value, cont)) {
   2313           return;
   2314         }
   2315         return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
   2316                                 kLogical32Imm);
   2317       case IrOpcode::kWord64And:
   2318         if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
   2319                 selector, value, cont)) {
   2320           return;
   2321         }
   2322         return VisitWordCompare(selector, value, kArm64Tst, cont, true,
   2323                                 kLogical64Imm);
   2324       default:
   2325         break;
   2326     }
   2327   }
   2328 
   2329   // Branch could not be combined with a compare, compare against 0 and branch.
   2330   if (cont->IsBranch()) {
   2331     selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
   2332                    g.UseRegister(value), g.Label(cont->true_block()),
   2333                    g.Label(cont->false_block()));
   2334   } else if (cont->IsDeoptimize()) {
   2335     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
   2336                              g.UseRegister(value), g.UseRegister(value),
   2337                              cont->kind(), cont->reason(), cont->frame_state());
   2338   } else {
   2339     DCHECK(cont->IsTrap());
   2340     selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
   2341                    g.UseRegister(value), g.UseRegister(value),
   2342                    g.UseImmediate(cont->trap_id()));
   2343   }
   2344 }
   2345 
   2346 }  // namespace
   2347 
   2348 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   2349                                       BasicBlock* fbranch) {
   2350   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   2351   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   2352 }
   2353 
   2354 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   2355   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2356   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2357       kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   2358   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2359 }
   2360 
   2361 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   2362   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2363   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2364       kEqual, p.kind(), p.reason(), node->InputAt(1));
   2365   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2366 }
   2367 
   2368 void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
   2369   FlagsContinuation cont =
   2370       FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
   2371   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2372 }
   2373 
   2374 void InstructionSelector::VisitTrapUnless(Node* node,
   2375                                           Runtime::FunctionId func_id) {
   2376   FlagsContinuation cont =
   2377       FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   2378   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2379 }
   2380 
   2381 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   2382   Arm64OperandGenerator g(this);
   2383   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   2384 
   2385   // Emit either ArchTableSwitch or ArchLookupSwitch.
   2386   size_t table_space_cost = 4 + sw.value_range;
   2387   size_t table_time_cost = 3;
   2388   size_t lookup_space_cost = 3 + 2 * sw.case_count;
   2389   size_t lookup_time_cost = sw.case_count;
   2390   if (sw.case_count > 0 &&
   2391       table_space_cost + 3 * table_time_cost <=
   2392           lookup_space_cost + 3 * lookup_time_cost &&
   2393       sw.min_value > std::numeric_limits<int32_t>::min()) {
   2394     InstructionOperand index_operand = value_operand;
   2395     if (sw.min_value) {
   2396       index_operand = g.TempRegister();
   2397       Emit(kArm64Sub32, index_operand, value_operand,
   2398            g.TempImmediate(sw.min_value));
   2399     }
   2400     // Generate a table lookup.
   2401     return EmitTableSwitch(sw, index_operand);
   2402   }
   2403 
   2404   // Generate a sequence of conditional jumps.
   2405   return EmitLookupSwitch(sw, value_operand);
   2406 }
   2407 
   2408 
   2409 void InstructionSelector::VisitWord32Equal(Node* const node) {
   2410   Node* const user = node;
   2411   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2412   Int32BinopMatcher m(user);
   2413   if (m.right().Is(0)) {
   2414     Node* const value = m.left().node();
   2415     if (CanCover(user, value)) {
   2416       switch (value->opcode()) {
   2417         case IrOpcode::kInt32Add:
   2418         case IrOpcode::kWord32And:
   2419           return VisitWord32Compare(this, node, &cont);
   2420         case IrOpcode::kInt32Sub:
   2421           return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
   2422                                   kArithmeticImm);
   2423         case IrOpcode::kWord32Equal: {
   2424           // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
   2425           Int32BinopMatcher mequal(value);
   2426           node->ReplaceInput(0, mequal.left().node());
   2427           node->ReplaceInput(1, mequal.right().node());
   2428           cont.Negate();
   2429           // {node} still does not cover its new operands, because {mequal} is
   2430           // still using them.
   2431           // Since we won't generate any more code for {mequal}, set its
   2432           // operands to zero to make sure {node} can cover them.
   2433           // This improves pattern matching in VisitWord32Compare.
   2434           mequal.node()->ReplaceInput(0, m.right().node());
   2435           mequal.node()->ReplaceInput(1, m.right().node());
   2436           return VisitWord32Compare(this, node, &cont);
   2437         }
   2438         default:
   2439           break;
   2440       }
   2441       return VisitWord32Test(this, value, &cont);
   2442     }
   2443   }
   2444   VisitWord32Compare(this, node, &cont);
   2445 }
   2446 
   2447 
   2448 void InstructionSelector::VisitInt32LessThan(Node* node) {
   2449   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2450   VisitWord32Compare(this, node, &cont);
   2451 }
   2452 
   2453 
   2454 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   2455   FlagsContinuation cont =
   2456       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2457   VisitWord32Compare(this, node, &cont);
   2458 }
   2459 
   2460 
   2461 void InstructionSelector::VisitUint32LessThan(Node* node) {
   2462   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2463   VisitWord32Compare(this, node, &cont);
   2464 }
   2465 
   2466 
   2467 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   2468   FlagsContinuation cont =
   2469       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2470   VisitWord32Compare(this, node, &cont);
   2471 }
   2472 
   2473 
   2474 void InstructionSelector::VisitWord64Equal(Node* const node) {
   2475   Node* const user = node;
   2476   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2477   Int64BinopMatcher m(user);
   2478   if (m.right().Is(0)) {
   2479     Node* const value = m.left().node();
   2480     if (CanCover(user, value)) {
   2481       switch (value->opcode()) {
   2482         case IrOpcode::kWord64And:
   2483           return VisitWordCompare(this, value, kArm64Tst, &cont, true,
   2484                                   kLogical64Imm);
   2485         default:
   2486           break;
   2487       }
   2488       return VisitWord64Test(this, value, &cont);
   2489     }
   2490   }
   2491   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
   2492 }
   2493 
   2494 
   2495 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   2496   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2497     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2498     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
   2499                                          kArithmeticImm, &cont);
   2500   }
   2501   FlagsContinuation cont;
   2502   VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
   2503 }
   2504 
   2505 
   2506 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   2507   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2508     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2509     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
   2510                                          kArithmeticImm, &cont);
   2511   }
   2512   FlagsContinuation cont;
   2513   VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
   2514 }
   2515 
   2516 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   2517   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2518     // ARM64 doesn't set the overflow flag for multiplication, so we need to
   2519     // test on kNotEqual. Here is the code sequence used:
   2520     //   smull result, left, right
   2521     //   cmp result.X(), Operand(result, SXTW)
   2522     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
   2523     return EmitInt32MulWithOverflow(this, node, &cont);
   2524   }
   2525   FlagsContinuation cont;
   2526   EmitInt32MulWithOverflow(this, node, &cont);
   2527 }
   2528 
   2529 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   2530   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2531     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2532     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
   2533                                          &cont);
   2534   }
   2535   FlagsContinuation cont;
   2536   VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
   2537 }
   2538 
   2539 
   2540 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   2541   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2542     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2543     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
   2544                                          &cont);
   2545   }
   2546   FlagsContinuation cont;
   2547   VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
   2548 }
   2549 
   2550 
   2551 void InstructionSelector::VisitInt64LessThan(Node* node) {
   2552   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2553   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
   2554 }
   2555 
   2556 
   2557 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
   2558   FlagsContinuation cont =
   2559       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2560   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
   2561 }
   2562 
   2563 
   2564 void InstructionSelector::VisitUint64LessThan(Node* node) {
   2565   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2566   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
   2567 }
   2568 
   2569 
   2570 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
   2571   FlagsContinuation cont =
   2572       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2573   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
   2574 }
   2575 
   2576 
   2577 void InstructionSelector::VisitFloat32Equal(Node* node) {
   2578   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2579   VisitFloat32Compare(this, node, &cont);
   2580 }
   2581 
   2582 
   2583 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   2584   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2585   VisitFloat32Compare(this, node, &cont);
   2586 }
   2587 
   2588 
   2589 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   2590   FlagsContinuation cont =
   2591       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2592   VisitFloat32Compare(this, node, &cont);
   2593 }
   2594 
   2595 
   2596 void InstructionSelector::VisitFloat64Equal(Node* node) {
   2597   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2598   VisitFloat64Compare(this, node, &cont);
   2599 }
   2600 
   2601 
   2602 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   2603   FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   2604   VisitFloat64Compare(this, node, &cont);
   2605 }
   2606 
   2607 
   2608 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   2609   FlagsContinuation cont =
   2610       FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   2611   VisitFloat64Compare(this, node, &cont);
   2612 }
   2613 
   2614 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   2615   Arm64OperandGenerator g(this);
   2616   Node* left = node->InputAt(0);
   2617   Node* right = node->InputAt(1);
   2618   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
   2619       CanCover(node, left)) {
   2620     Node* right_of_left = left->InputAt(1);
   2621     Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
   2622          g.UseRegister(right_of_left), g.TempImmediate(32),
   2623          g.TempImmediate(32));
   2624     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
   2625     return;
   2626   }
   2627   Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
   2628        g.UseRegister(left), g.UseRegister(right));
   2629 }
   2630 
   2631 
   2632 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   2633   Arm64OperandGenerator g(this);
   2634   Node* left = node->InputAt(0);
   2635   Node* right = node->InputAt(1);
   2636   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
   2637       CanCover(node, left)) {
   2638     Node* right_of_left = left->InputAt(1);
   2639     Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
   2640          g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
   2641     Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
   2642     return;
   2643   }
   2644   Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
   2645        g.UseRegister(left), g.UseRegister(right));
   2646 }
   2647 
   2648 void InstructionSelector::VisitAtomicLoad(Node* node) {
   2649   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   2650   Arm64OperandGenerator g(this);
   2651   Node* base = node->InputAt(0);
   2652   Node* index = node->InputAt(1);
   2653   ArchOpcode opcode = kArchNop;
   2654   switch (load_rep.representation()) {
   2655     case MachineRepresentation::kWord8:
   2656       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
   2657       break;
   2658     case MachineRepresentation::kWord16:
   2659       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
   2660       break;
   2661     case MachineRepresentation::kWord32:
   2662       opcode = kAtomicLoadWord32;
   2663       break;
   2664     default:
   2665       UNREACHABLE();
   2666       return;
   2667   }
   2668   Emit(opcode | AddressingModeField::encode(kMode_MRR),
   2669        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
   2670 }
   2671 
   2672 void InstructionSelector::VisitAtomicStore(Node* node) {
   2673   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   2674   Arm64OperandGenerator g(this);
   2675   Node* base = node->InputAt(0);
   2676   Node* index = node->InputAt(1);
   2677   Node* value = node->InputAt(2);
   2678   ArchOpcode opcode = kArchNop;
   2679   switch (rep) {
   2680     case MachineRepresentation::kWord8:
   2681       opcode = kAtomicStoreWord8;
   2682       break;
   2683     case MachineRepresentation::kWord16:
   2684       opcode = kAtomicStoreWord16;
   2685       break;
   2686     case MachineRepresentation::kWord32:
   2687       opcode = kAtomicStoreWord32;
   2688       break;
   2689     default:
   2690       UNREACHABLE();
   2691       return;
   2692   }
   2693 
   2694   AddressingMode addressing_mode = kMode_MRR;
   2695   InstructionOperand inputs[3];
   2696   size_t input_count = 0;
   2697   inputs[input_count++] = g.UseUniqueRegister(base);
   2698   inputs[input_count++] = g.UseUniqueRegister(index);
   2699   inputs[input_count++] = g.UseUniqueRegister(value);
   2700   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2701   Emit(code, 0, nullptr, input_count, inputs);
   2702 }
   2703 
   2704 // static
   2705 MachineOperatorBuilder::Flags
   2706 InstructionSelector::SupportedMachineOperatorFlags() {
   2707   return MachineOperatorBuilder::kFloat32RoundDown |
   2708          MachineOperatorBuilder::kFloat64RoundDown |
   2709          MachineOperatorBuilder::kFloat32RoundUp |
   2710          MachineOperatorBuilder::kFloat64RoundUp |
   2711          MachineOperatorBuilder::kFloat32RoundTruncate |
   2712          MachineOperatorBuilder::kFloat64RoundTruncate |
   2713          MachineOperatorBuilder::kFloat64RoundTiesAway |
   2714          MachineOperatorBuilder::kFloat32RoundTiesEven |
   2715          MachineOperatorBuilder::kFloat64RoundTiesEven |
   2716          MachineOperatorBuilder::kWord32ShiftIsSafe |
   2717          MachineOperatorBuilder::kInt32DivIsSafe |
   2718          MachineOperatorBuilder::kUint32DivIsSafe |
   2719          MachineOperatorBuilder::kWord32ReverseBits |
   2720          MachineOperatorBuilder::kWord64ReverseBits;
   2721 }
   2722 
   2723 // static
   2724 MachineOperatorBuilder::AlignmentRequirements
   2725 InstructionSelector::AlignmentRequirements() {
   2726   return MachineOperatorBuilder::AlignmentRequirements::
   2727       FullUnalignedAccessSupport();
   2728 }
   2729 
   2730 }  // namespace compiler
   2731 }  // namespace internal
   2732 }  // namespace v8
   2733