Home | History | Annotate | Download | only in x64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <algorithm>
      6 
      7 #include "src/base/adapters.h"
      8 #include "src/compiler/instruction-selector-impl.h"
      9 #include "src/compiler/node-matchers.h"
     10 #include "src/compiler/node-properties.h"
     11 
     12 namespace v8 {
     13 namespace internal {
     14 namespace compiler {
     15 
     16 // Adds X64-specific methods for generating operands.
     17 class X64OperandGenerator final : public OperandGenerator {
     18  public:
     19   explicit X64OperandGenerator(InstructionSelector* selector)
     20       : OperandGenerator(selector) {}
     21 
     22   bool CanBeImmediate(Node* node) {
     23     switch (node->opcode()) {
     24       case IrOpcode::kInt32Constant:
     25       case IrOpcode::kRelocatableInt32Constant:
     26         return true;
     27       case IrOpcode::kInt64Constant: {
     28         const int64_t value = OpParameter<int64_t>(node);
     29         return value == static_cast<int64_t>(static_cast<int32_t>(value));
     30       }
     31       case IrOpcode::kNumberConstant: {
     32         const double value = OpParameter<double>(node);
     33         return bit_cast<int64_t>(value) == 0;
     34       }
     35       default:
     36         return false;
     37     }
     38   }
     39 
     40   int32_t GetImmediateIntegerValue(Node* node) {
     41     DCHECK(CanBeImmediate(node));
     42     if (node->opcode() == IrOpcode::kInt32Constant) {
     43       return OpParameter<int32_t>(node);
     44     }
     45     DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
     46     return static_cast<int32_t>(OpParameter<int64_t>(node));
     47   }
     48 
     49   bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
     50                           int effect_level) {
     51     if (input->opcode() != IrOpcode::kLoad ||
     52         !selector()->CanCover(node, input)) {
     53       return false;
     54     }
     55     if (effect_level != selector()->GetEffectLevel(input)) {
     56       return false;
     57     }
     58     MachineRepresentation rep =
     59         LoadRepresentationOf(input->op()).representation();
     60     switch (opcode) {
     61       case kX64Push:
     62       case kX64Cmp:
     63       case kX64Test:
     64         return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
     65       case kX64Cmp32:
     66       case kX64Test32:
     67         return rep == MachineRepresentation::kWord32;
     68       case kX64Cmp16:
     69       case kX64Test16:
     70         return rep == MachineRepresentation::kWord16;
     71       case kX64Cmp8:
     72       case kX64Test8:
     73         return rep == MachineRepresentation::kWord8;
     74       default:
     75         break;
     76     }
     77     return false;
     78   }
     79 
     80   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
     81                                              Node* base, Node* displacement,
     82                                              DisplacementMode displacement_mode,
     83                                              InstructionOperand inputs[],
     84                                              size_t* input_count) {
     85     AddressingMode mode = kMode_MRI;
     86     if (base != nullptr && (index != nullptr || displacement != nullptr)) {
     87       if (base->opcode() == IrOpcode::kInt32Constant &&
     88           OpParameter<int32_t>(base) == 0) {
     89         base = nullptr;
     90       } else if (base->opcode() == IrOpcode::kInt64Constant &&
     91                  OpParameter<int64_t>(base) == 0) {
     92         base = nullptr;
     93       }
     94     }
     95     if (base != nullptr) {
     96       inputs[(*input_count)++] = UseRegister(base);
     97       if (index != nullptr) {
     98         DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
     99         inputs[(*input_count)++] = UseRegister(index);
    100         if (displacement != nullptr) {
    101           inputs[(*input_count)++] = displacement_mode
    102                                          ? UseNegatedImmediate(displacement)
    103                                          : UseImmediate(displacement);
    104           static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
    105                                                        kMode_MR4I, kMode_MR8I};
    106           mode = kMRnI_modes[scale_exponent];
    107         } else {
    108           static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
    109                                                       kMode_MR4, kMode_MR8};
    110           mode = kMRn_modes[scale_exponent];
    111         }
    112       } else {
    113         if (displacement == nullptr) {
    114           mode = kMode_MR;
    115         } else {
    116           inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
    117                                          ? UseNegatedImmediate(displacement)
    118                                          : UseImmediate(displacement);
    119           mode = kMode_MRI;
    120         }
    121       }
    122     } else {
    123       DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
    124       if (displacement != nullptr) {
    125         if (index == nullptr) {
    126           inputs[(*input_count)++] = UseRegister(displacement);
    127           mode = kMode_MR;
    128         } else {
    129           inputs[(*input_count)++] = UseRegister(index);
    130           inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
    131                                          ? UseNegatedImmediate(displacement)
    132                                          : UseImmediate(displacement);
    133           static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
    134                                                       kMode_M4I, kMode_M8I};
    135           mode = kMnI_modes[scale_exponent];
    136         }
    137       } else {
    138         inputs[(*input_count)++] = UseRegister(index);
    139         static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
    140                                                    kMode_M4, kMode_M8};
    141         mode = kMn_modes[scale_exponent];
    142         if (mode == kMode_MR1) {
    143           // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
    144           inputs[(*input_count)++] = UseRegister(index);
    145         }
    146       }
    147     }
    148     return mode;
    149   }
    150 
    151   AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
    152                                                   InstructionOperand inputs[],
    153                                                   size_t* input_count) {
    154     if (selector()->CanAddressRelativeToRootsRegister()) {
    155       LoadMatcher<ExternalReferenceMatcher> m(operand);
    156       if (m.index().HasValue() && m.object().HasValue()) {
    157         Address const kRootsRegisterValue =
    158             kRootRegisterBias +
    159             reinterpret_cast<Address>(
    160                 selector()->isolate()->heap()->roots_array_start());
    161         ptrdiff_t const delta =
    162             m.index().Value() +
    163             (m.object().Value().address() - kRootsRegisterValue);
    164         if (is_int32(delta)) {
    165           inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
    166           return kMode_Root;
    167         }
    168       }
    169     }
    170     BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
    171     DCHECK(m.matches());
    172     if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
    173       return GenerateMemoryOperandInputs(
    174           m.index(), m.scale(), m.base(), m.displacement(),
    175           m.displacement_mode(), inputs, input_count);
    176     } else if (m.base() == nullptr &&
    177                m.displacement_mode() == kPositiveDisplacement) {
    178       // The displacement cannot be an immediate, but we can use the
    179       // displacement as base instead and still benefit from addressing
    180       // modes for the scale.
    181       return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
    182                                          nullptr, m.displacement_mode(), inputs,
    183                                          input_count);
    184     } else {
    185       inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
    186       inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
    187       return kMode_MR1;
    188     }
    189   }
    190 
    191   bool CanBeBetterLeftOperand(Node* node) const {
    192     return !selector()->IsLive(node);
    193   }
    194 };
    195 
    196 namespace {
    197 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
    198   ArchOpcode opcode = kArchNop;
    199   switch (load_rep.representation()) {
    200     case MachineRepresentation::kFloat32:
    201       opcode = kX64Movss;
    202       break;
    203     case MachineRepresentation::kFloat64:
    204       opcode = kX64Movsd;
    205       break;
    206     case MachineRepresentation::kBit:  // Fall through.
    207     case MachineRepresentation::kWord8:
    208       opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
    209       break;
    210     case MachineRepresentation::kWord16:
    211       opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
    212       break;
    213     case MachineRepresentation::kWord32:
    214       opcode = kX64Movl;
    215       break;
    216     case MachineRepresentation::kTaggedSigned:   // Fall through.
    217     case MachineRepresentation::kTaggedPointer:  // Fall through.
    218     case MachineRepresentation::kTagged:  // Fall through.
    219     case MachineRepresentation::kWord64:
    220       opcode = kX64Movq;
    221       break;
    222     case MachineRepresentation::kSimd128:  // Fall through.
    223     case MachineRepresentation::kSimd1x4:  // Fall through.
    224     case MachineRepresentation::kSimd1x8:  // Fall through.
    225     case MachineRepresentation::kSimd1x16:  // Fall through.
    226     case MachineRepresentation::kNone:
    227       UNREACHABLE();
    228       break;
    229   }
    230   return opcode;
    231 }
    232 
    233 ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
    234   switch (store_rep.representation()) {
    235     case MachineRepresentation::kFloat32:
    236       return kX64Movss;
    237       break;
    238     case MachineRepresentation::kFloat64:
    239       return kX64Movsd;
    240       break;
    241     case MachineRepresentation::kBit:  // Fall through.
    242     case MachineRepresentation::kWord8:
    243       return kX64Movb;
    244       break;
    245     case MachineRepresentation::kWord16:
    246       return kX64Movw;
    247       break;
    248     case MachineRepresentation::kWord32:
    249       return kX64Movl;
    250       break;
    251     case MachineRepresentation::kTaggedSigned:   // Fall through.
    252     case MachineRepresentation::kTaggedPointer:  // Fall through.
    253     case MachineRepresentation::kTagged:         // Fall through.
    254     case MachineRepresentation::kWord64:
    255       return kX64Movq;
    256       break;
    257     case MachineRepresentation::kSimd128:  // Fall through.
    258     case MachineRepresentation::kSimd1x4:  // Fall through.
    259     case MachineRepresentation::kSimd1x8:  // Fall through.
    260     case MachineRepresentation::kSimd1x16:  // Fall through.
    261     case MachineRepresentation::kNone:
    262       UNREACHABLE();
    263       return kArchNop;
    264   }
    265   UNREACHABLE();
    266   return kArchNop;
    267 }
    268 
    269 }  // namespace
    270 
    271 void InstructionSelector::VisitLoad(Node* node) {
    272   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    273   X64OperandGenerator g(this);
    274 
    275   ArchOpcode opcode = GetLoadOpcode(load_rep);
    276   InstructionOperand outputs[1];
    277   outputs[0] = g.DefineAsRegister(node);
    278   InstructionOperand inputs[4];
    279   size_t input_count = 0;
    280   AddressingMode mode =
    281       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
    282   InstructionCode code = opcode | AddressingModeField::encode(mode);
    283   if (node->opcode() == IrOpcode::kProtectedLoad) {
    284     code |= MiscField::encode(X64MemoryProtection::kProtected);
    285     // Add the source position as an input
    286     inputs[input_count++] = g.UseImmediate(node->InputAt(2));
    287   }
    288   Emit(code, 1, outputs, input_count, inputs);
    289 }
    290 
    291 void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
    292 
    293 void InstructionSelector::VisitStore(Node* node) {
    294   X64OperandGenerator g(this);
    295   Node* base = node->InputAt(0);
    296   Node* index = node->InputAt(1);
    297   Node* value = node->InputAt(2);
    298 
    299   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    300   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    301 
    302   if (write_barrier_kind != kNoWriteBarrier) {
    303     DCHECK(CanBeTaggedPointer(store_rep.representation()));
    304     AddressingMode addressing_mode;
    305     InstructionOperand inputs[3];
    306     size_t input_count = 0;
    307     inputs[input_count++] = g.UseUniqueRegister(base);
    308     if (g.CanBeImmediate(index)) {
    309       inputs[input_count++] = g.UseImmediate(index);
    310       addressing_mode = kMode_MRI;
    311     } else {
    312       inputs[input_count++] = g.UseUniqueRegister(index);
    313       addressing_mode = kMode_MR1;
    314     }
    315     inputs[input_count++] = g.UseUniqueRegister(value);
    316     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    317     switch (write_barrier_kind) {
    318       case kNoWriteBarrier:
    319         UNREACHABLE();
    320         break;
    321       case kMapWriteBarrier:
    322         record_write_mode = RecordWriteMode::kValueIsMap;
    323         break;
    324       case kPointerWriteBarrier:
    325         record_write_mode = RecordWriteMode::kValueIsPointer;
    326         break;
    327       case kFullWriteBarrier:
    328         record_write_mode = RecordWriteMode::kValueIsAny;
    329         break;
    330     }
    331     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    332     size_t const temp_count = arraysize(temps);
    333     InstructionCode code = kArchStoreWithWriteBarrier;
    334     code |= AddressingModeField::encode(addressing_mode);
    335     code |= MiscField::encode(static_cast<int>(record_write_mode));
    336     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    337   } else {
    338     ArchOpcode opcode = GetStoreOpcode(store_rep);
    339     InstructionOperand inputs[4];
    340     size_t input_count = 0;
    341     AddressingMode addressing_mode =
    342         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
    343     InstructionCode code =
    344         opcode | AddressingModeField::encode(addressing_mode);
    345     if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
    346         (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
    347         CanCover(node, value)) {
    348       value = value->InputAt(0);
    349     }
    350     InstructionOperand value_operand =
    351         g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
    352     inputs[input_count++] = value_operand;
    353     Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
    354          inputs);
    355   }
    356 }
    357 
    358 void InstructionSelector::VisitProtectedStore(Node* node) {
    359   X64OperandGenerator g(this);
    360   Node* value = node->InputAt(2);
    361   Node* position = node->InputAt(3);
    362 
    363   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    364 
    365   ArchOpcode opcode = GetStoreOpcode(store_rep);
    366   InstructionOperand inputs[5];
    367   size_t input_count = 0;
    368   AddressingMode addressing_mode =
    369       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
    370   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
    371                          MiscField::encode(X64MemoryProtection::kProtected);
    372   InstructionOperand value_operand =
    373       g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
    374   inputs[input_count++] = value_operand;
    375   inputs[input_count++] = g.UseImmediate(position);
    376   Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
    377 }
    378 
    379 // Architecture supports unaligned access, therefore VisitLoad is used instead
    380 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
    381 
    382 // Architecture supports unaligned access, therefore VisitStore is used instead
    383 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
    384 
    385 void InstructionSelector::VisitCheckedLoad(Node* node) {
    386   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
    387   X64OperandGenerator g(this);
    388   Node* const buffer = node->InputAt(0);
    389   Node* const offset = node->InputAt(1);
    390   Node* const length = node->InputAt(2);
    391   ArchOpcode opcode = kArchNop;
    392   switch (load_rep.representation()) {
    393     case MachineRepresentation::kWord8:
    394       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
    395       break;
    396     case MachineRepresentation::kWord16:
    397       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
    398       break;
    399     case MachineRepresentation::kWord32:
    400       opcode = kCheckedLoadWord32;
    401       break;
    402     case MachineRepresentation::kWord64:
    403       opcode = kCheckedLoadWord64;
    404       break;
    405     case MachineRepresentation::kFloat32:
    406       opcode = kCheckedLoadFloat32;
    407       break;
    408     case MachineRepresentation::kFloat64:
    409       opcode = kCheckedLoadFloat64;
    410       break;
    411     case MachineRepresentation::kBit:      // Fall through.
    412     case MachineRepresentation::kSimd128:  // Fall through.
    413     case MachineRepresentation::kSimd1x4:  // Fall through.
    414     case MachineRepresentation::kSimd1x8:  // Fall through.
    415     case MachineRepresentation::kSimd1x16:       // Fall through.
    416     case MachineRepresentation::kTaggedSigned:   // Fall through.
    417     case MachineRepresentation::kTaggedPointer:  // Fall through.
    418     case MachineRepresentation::kTagged:   // Fall through.
    419     case MachineRepresentation::kNone:
    420       UNREACHABLE();
    421       return;
    422   }
    423   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
    424     Int32Matcher mlength(length);
    425     Int32BinopMatcher moffset(offset);
    426     if (mlength.HasValue() && moffset.right().HasValue() &&
    427         moffset.right().Value() >= 0 &&
    428         mlength.Value() >= moffset.right().Value()) {
    429       Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
    430            g.UseRegister(moffset.left().node()),
    431            g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
    432       return;
    433     }
    434   }
    435   InstructionOperand length_operand =
    436       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
    437   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
    438        g.UseRegister(offset), g.TempImmediate(0), length_operand);
    439 }
    440 
    441 
    442 void InstructionSelector::VisitCheckedStore(Node* node) {
    443   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
    444   X64OperandGenerator g(this);
    445   Node* const buffer = node->InputAt(0);
    446   Node* const offset = node->InputAt(1);
    447   Node* const length = node->InputAt(2);
    448   Node* const value = node->InputAt(3);
    449   ArchOpcode opcode = kArchNop;
    450   switch (rep) {
    451     case MachineRepresentation::kWord8:
    452       opcode = kCheckedStoreWord8;
    453       break;
    454     case MachineRepresentation::kWord16:
    455       opcode = kCheckedStoreWord16;
    456       break;
    457     case MachineRepresentation::kWord32:
    458       opcode = kCheckedStoreWord32;
    459       break;
    460     case MachineRepresentation::kWord64:
    461       opcode = kCheckedStoreWord64;
    462       break;
    463     case MachineRepresentation::kFloat32:
    464       opcode = kCheckedStoreFloat32;
    465       break;
    466     case MachineRepresentation::kFloat64:
    467       opcode = kCheckedStoreFloat64;
    468       break;
    469     case MachineRepresentation::kBit:      // Fall through.
    470     case MachineRepresentation::kSimd128:  // Fall through.
    471     case MachineRepresentation::kSimd1x4:  // Fall through.
    472     case MachineRepresentation::kSimd1x8:  // Fall through.
    473     case MachineRepresentation::kSimd1x16:       // Fall through.
    474     case MachineRepresentation::kTaggedSigned:   // Fall through.
    475     case MachineRepresentation::kTaggedPointer:  // Fall through.
    476     case MachineRepresentation::kTagged:   // Fall through.
    477     case MachineRepresentation::kNone:
    478       UNREACHABLE();
    479       return;
    480   }
    481   InstructionOperand value_operand =
    482       g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
    483   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
    484     Int32Matcher mlength(length);
    485     Int32BinopMatcher moffset(offset);
    486     if (mlength.HasValue() && moffset.right().HasValue() &&
    487         moffset.right().Value() >= 0 &&
    488         mlength.Value() >= moffset.right().Value()) {
    489       Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
    490            g.UseRegister(moffset.left().node()),
    491            g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
    492            value_operand);
    493       return;
    494     }
    495   }
    496   InstructionOperand length_operand =
    497       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
    498   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
    499        g.TempImmediate(0), length_operand, value_operand);
    500 }
    501 
    502 
    503 // Shared routine for multiple binary operations.
    504 static void VisitBinop(InstructionSelector* selector, Node* node,
    505                        InstructionCode opcode, FlagsContinuation* cont) {
    506   X64OperandGenerator g(selector);
    507   Int32BinopMatcher m(node);
    508   Node* left = m.left().node();
    509   Node* right = m.right().node();
    510   InstructionOperand inputs[4];
    511   size_t input_count = 0;
    512   InstructionOperand outputs[2];
    513   size_t output_count = 0;
    514 
    515   // TODO(turbofan): match complex addressing modes.
    516   if (left == right) {
    517     // If both inputs refer to the same operand, enforce allocating a register
    518     // for both of them to ensure that we don't end up generating code like
    519     // this:
    520     //
    521     //   mov rax, [rbp-0x10]
    522     //   add rax, [rbp-0x10]
    523     //   jo label
    524     InstructionOperand const input = g.UseRegister(left);
    525     inputs[input_count++] = input;
    526     inputs[input_count++] = input;
    527   } else if (g.CanBeImmediate(right)) {
    528     inputs[input_count++] = g.UseRegister(left);
    529     inputs[input_count++] = g.UseImmediate(right);
    530   } else {
    531     if (node->op()->HasProperty(Operator::kCommutative) &&
    532         g.CanBeBetterLeftOperand(right)) {
    533       std::swap(left, right);
    534     }
    535     inputs[input_count++] = g.UseRegister(left);
    536     inputs[input_count++] = g.Use(right);
    537   }
    538 
    539   if (cont->IsBranch()) {
    540     inputs[input_count++] = g.Label(cont->true_block());
    541     inputs[input_count++] = g.Label(cont->false_block());
    542   }
    543 
    544   outputs[output_count++] = g.DefineSameAsFirst(node);
    545   if (cont->IsSet()) {
    546     outputs[output_count++] = g.DefineAsRegister(cont->result());
    547   }
    548 
    549   DCHECK_NE(0u, input_count);
    550   DCHECK_NE(0u, output_count);
    551   DCHECK_GE(arraysize(inputs), input_count);
    552   DCHECK_GE(arraysize(outputs), output_count);
    553 
    554   opcode = cont->Encode(opcode);
    555   if (cont->IsDeoptimize()) {
    556     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    557                              cont->kind(), cont->reason(), cont->frame_state());
    558   } else {
    559     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    560   }
    561 }
    562 
    563 
    564 // Shared routine for multiple binary operations.
    565 static void VisitBinop(InstructionSelector* selector, Node* node,
    566                        InstructionCode opcode) {
    567   FlagsContinuation cont;
    568   VisitBinop(selector, node, opcode, &cont);
    569 }
    570 
    571 
    572 void InstructionSelector::VisitWord32And(Node* node) {
    573   X64OperandGenerator g(this);
    574   Uint32BinopMatcher m(node);
    575   if (m.right().Is(0xff)) {
    576     Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
    577   } else if (m.right().Is(0xffff)) {
    578     Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
    579   } else {
    580     VisitBinop(this, node, kX64And32);
    581   }
    582 }
    583 
    584 
    585 void InstructionSelector::VisitWord64And(Node* node) {
    586   VisitBinop(this, node, kX64And);
    587 }
    588 
    589 
    590 void InstructionSelector::VisitWord32Or(Node* node) {
    591   VisitBinop(this, node, kX64Or32);
    592 }
    593 
    594 
    595 void InstructionSelector::VisitWord64Or(Node* node) {
    596   VisitBinop(this, node, kX64Or);
    597 }
    598 
    599 
    600 void InstructionSelector::VisitWord32Xor(Node* node) {
    601   X64OperandGenerator g(this);
    602   Uint32BinopMatcher m(node);
    603   if (m.right().Is(-1)) {
    604     Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
    605   } else {
    606     VisitBinop(this, node, kX64Xor32);
    607   }
    608 }
    609 
    610 
    611 void InstructionSelector::VisitWord64Xor(Node* node) {
    612   X64OperandGenerator g(this);
    613   Uint64BinopMatcher m(node);
    614   if (m.right().Is(-1)) {
    615     Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
    616   } else {
    617     VisitBinop(this, node, kX64Xor);
    618   }
    619 }
    620 
    621 
    622 namespace {
    623 
    624 // Shared routine for multiple 32-bit shift operations.
    625 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
    626 void VisitWord32Shift(InstructionSelector* selector, Node* node,
    627                       ArchOpcode opcode) {
    628   X64OperandGenerator g(selector);
    629   Int32BinopMatcher m(node);
    630   Node* left = m.left().node();
    631   Node* right = m.right().node();
    632 
    633   if (g.CanBeImmediate(right)) {
    634     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    635                    g.UseImmediate(right));
    636   } else {
    637     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    638                    g.UseFixed(right, rcx));
    639   }
    640 }
    641 
    642 
    643 // Shared routine for multiple 64-bit shift operations.
    644 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
    645 void VisitWord64Shift(InstructionSelector* selector, Node* node,
    646                       ArchOpcode opcode) {
    647   X64OperandGenerator g(selector);
    648   Int64BinopMatcher m(node);
    649   Node* left = m.left().node();
    650   Node* right = m.right().node();
    651 
    652   if (g.CanBeImmediate(right)) {
    653     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    654                    g.UseImmediate(right));
    655   } else {
    656     if (m.right().IsWord64And()) {
    657       Int64BinopMatcher mright(right);
    658       if (mright.right().Is(0x3F)) {
    659         right = mright.left().node();
    660       }
    661     }
    662     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    663                    g.UseFixed(right, rcx));
    664   }
    665 }
    666 
    667 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
    668              Node* result, Node* index, int scale, Node* base,
    669              Node* displacement, DisplacementMode displacement_mode) {
    670   X64OperandGenerator g(selector);
    671 
    672   InstructionOperand inputs[4];
    673   size_t input_count = 0;
    674   AddressingMode mode =
    675       g.GenerateMemoryOperandInputs(index, scale, base, displacement,
    676                                     displacement_mode, inputs, &input_count);
    677 
    678   DCHECK_NE(0u, input_count);
    679   DCHECK_GE(arraysize(inputs), input_count);
    680 
    681   InstructionOperand outputs[1];
    682   outputs[0] = g.DefineAsRegister(result);
    683 
    684   opcode = AddressingModeField::encode(mode) | opcode;
    685 
    686   selector->Emit(opcode, 1, outputs, input_count, inputs);
    687 }
    688 
    689 }  // namespace
    690 
    691 
    692 void InstructionSelector::VisitWord32Shl(Node* node) {
    693   Int32ScaleMatcher m(node, true);
    694   if (m.matches()) {
    695     Node* index = node->InputAt(0);
    696     Node* base = m.power_of_two_plus_one() ? index : nullptr;
    697     EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
    698             kPositiveDisplacement);
    699     return;
    700   }
    701   VisitWord32Shift(this, node, kX64Shl32);
    702 }
    703 
    704 
    705 void InstructionSelector::VisitWord64Shl(Node* node) {
    706   X64OperandGenerator g(this);
    707   Int64ScaleMatcher m(node, true);
    708   if (m.matches()) {
    709     Node* index = node->InputAt(0);
    710     Node* base = m.power_of_two_plus_one() ? index : nullptr;
    711     EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
    712             kPositiveDisplacement);
    713     return;
    714   } else {
    715     Int64BinopMatcher m(node);
    716     if ((m.left().IsChangeInt32ToInt64() ||
    717          m.left().IsChangeUint32ToUint64()) &&
    718         m.right().IsInRange(32, 63)) {
    719       // There's no need to sign/zero-extend to 64-bit if we shift out the upper
    720       // 32 bits anyway.
    721       Emit(kX64Shl, g.DefineSameAsFirst(node),
    722            g.UseRegister(m.left().node()->InputAt(0)),
    723            g.UseImmediate(m.right().node()));
    724       return;
    725     }
    726   }
    727   VisitWord64Shift(this, node, kX64Shl);
    728 }
    729 
    730 
    731 void InstructionSelector::VisitWord32Shr(Node* node) {
    732   VisitWord32Shift(this, node, kX64Shr32);
    733 }
    734 
    735 namespace {
    736 bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
    737                                      InstructionCode opcode) {
    738   DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
    739          IrOpcode::kWord64Shr == node->opcode());
    740   X64OperandGenerator g(selector);
    741   Int64BinopMatcher m(node);
    742   if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
    743       m.right().Is(32)) {
    744     // Just load and sign-extend the interesting 4 bytes instead. This happens,
    745     // for example, when we're loading and untagging SMIs.
    746     BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
    747                                                 AddressOption::kAllowAll);
    748     if (mleft.matches() && (mleft.displacement() == nullptr ||
    749                             g.CanBeImmediate(mleft.displacement()))) {
    750       size_t input_count = 0;
    751       InstructionOperand inputs[3];
    752       AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
    753           m.left().node(), inputs, &input_count);
    754       if (mleft.displacement() == nullptr) {
    755         // Make sure that the addressing mode indicates the presence of an
    756         // immediate displacement. It seems that we never use M1 and M2, but we
    757         // handle them here anyways.
    758         switch (mode) {
    759           case kMode_MR:
    760             mode = kMode_MRI;
    761             break;
    762           case kMode_MR1:
    763             mode = kMode_MR1I;
    764             break;
    765           case kMode_MR2:
    766             mode = kMode_MR2I;
    767             break;
    768           case kMode_MR4:
    769             mode = kMode_MR4I;
    770             break;
    771           case kMode_MR8:
    772             mode = kMode_MR8I;
    773             break;
    774           case kMode_M1:
    775             mode = kMode_M1I;
    776             break;
    777           case kMode_M2:
    778             mode = kMode_M2I;
    779             break;
    780           case kMode_M4:
    781             mode = kMode_M4I;
    782             break;
    783           case kMode_M8:
    784             mode = kMode_M8I;
    785             break;
    786           case kMode_None:
    787           case kMode_MRI:
    788           case kMode_MR1I:
    789           case kMode_MR2I:
    790           case kMode_MR4I:
    791           case kMode_MR8I:
    792           case kMode_M1I:
    793           case kMode_M2I:
    794           case kMode_M4I:
    795           case kMode_M8I:
    796           case kMode_Root:
    797             UNREACHABLE();
    798         }
    799         inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
    800       } else {
    801         int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
    802         inputs[input_count - 1] =
    803             ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
    804       }
    805       InstructionOperand outputs[] = {g.DefineAsRegister(node)};
    806       InstructionCode code = opcode | AddressingModeField::encode(mode);
    807       selector->Emit(code, 1, outputs, input_count, inputs);
    808       return true;
    809     }
    810   }
    811   return false;
    812 }
    813 }  // namespace
    814 
    815 void InstructionSelector::VisitWord64Shr(Node* node) {
    816   if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
    817   VisitWord64Shift(this, node, kX64Shr);
    818 }
    819 
    820 void InstructionSelector::VisitWord32Sar(Node* node) {
    821   X64OperandGenerator g(this);
    822   Int32BinopMatcher m(node);
    823   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
    824     Int32BinopMatcher mleft(m.left().node());
    825     if (mleft.right().Is(16) && m.right().Is(16)) {
    826       Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
    827       return;
    828     } else if (mleft.right().Is(24) && m.right().Is(24)) {
    829       Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
    830       return;
    831     }
    832   }
    833   VisitWord32Shift(this, node, kX64Sar32);
    834 }
    835 
    836 void InstructionSelector::VisitWord64Sar(Node* node) {
    837   if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
    838   VisitWord64Shift(this, node, kX64Sar);
    839 }
    840 
    841 
    842 void InstructionSelector::VisitWord32Ror(Node* node) {
    843   VisitWord32Shift(this, node, kX64Ror32);
    844 }
    845 
    846 
    847 void InstructionSelector::VisitWord64Ror(Node* node) {
    848   VisitWord64Shift(this, node, kX64Ror);
    849 }
    850 
    851 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
    852 
    853 
    854 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
    855 
    856 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
    857 
    858 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
    859 
    860 void InstructionSelector::VisitInt32Add(Node* node) {
    861   X64OperandGenerator g(this);
    862 
    863   // Try to match the Add to a leal pattern
    864   BaseWithIndexAndDisplacement32Matcher m(node);
    865   if (m.matches() &&
    866       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
    867     EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
    868             m.displacement(), m.displacement_mode());
    869     return;
    870   }
    871 
    872   // No leal pattern match, use addl
    873   VisitBinop(this, node, kX64Add32);
    874 }
    875 
    876 
    877 void InstructionSelector::VisitInt64Add(Node* node) {
    878   X64OperandGenerator g(this);
    879 
    880   // Try to match the Add to a leaq pattern
    881   BaseWithIndexAndDisplacement64Matcher m(node);
    882   if (m.matches() &&
    883       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
    884     EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
    885             m.displacement(), m.displacement_mode());
    886     return;
    887   }
    888 
    889   // No leal pattern match, use addq
    890   VisitBinop(this, node, kX64Add);
    891 }
    892 
    893 
    894 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
    895   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
    896     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
    897     return VisitBinop(this, node, kX64Add, &cont);
    898   }
    899   FlagsContinuation cont;
    900   VisitBinop(this, node, kX64Add, &cont);
    901 }
    902 
    903 
    904 void InstructionSelector::VisitInt32Sub(Node* node) {
    905   X64OperandGenerator g(this);
    906   Int32BinopMatcher m(node);
    907   if (m.left().Is(0)) {
    908     Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
    909   } else {
    910     if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
    911       // Turn subtractions of constant values into immediate "leal" instructions
    912       // by negating the value.
    913       Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
    914            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    915            g.TempImmediate(-m.right().Value()));
    916       return;
    917     }
    918     VisitBinop(this, node, kX64Sub32);
    919   }
    920 }
    921 
    922 
    923 void InstructionSelector::VisitInt64Sub(Node* node) {
    924   X64OperandGenerator g(this);
    925   Int64BinopMatcher m(node);
    926   if (m.left().Is(0)) {
    927     Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
    928   } else {
    929     if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
    930       // Turn subtractions of constant values into immediate "leaq" instructions
    931       // by negating the value.
    932       Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
    933            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
    934            g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
    935       return;
    936     }
    937     VisitBinop(this, node, kX64Sub);
    938   }
    939 }
    940 
    941 
    942 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
    943   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
    944     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
    945     return VisitBinop(this, node, kX64Sub, &cont);
    946   }
    947   FlagsContinuation cont;
    948   VisitBinop(this, node, kX64Sub, &cont);
    949 }
    950 
    951 
    952 namespace {
    953 
    954 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    955   X64OperandGenerator g(selector);
    956   Int32BinopMatcher m(node);
    957   Node* left = m.left().node();
    958   Node* right = m.right().node();
    959   if (g.CanBeImmediate(right)) {
    960     selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
    961                    g.UseImmediate(right));
    962   } else {
    963     if (g.CanBeBetterLeftOperand(right)) {
    964       std::swap(left, right);
    965     }
    966     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    967                    g.Use(right));
    968   }
    969 }
    970 
    971 void VisitMulHigh(InstructionSelector* selector, Node* node,
    972                   ArchOpcode opcode) {
    973   X64OperandGenerator g(selector);
    974   Node* left = node->InputAt(0);
    975   Node* right = node->InputAt(1);
    976   if (selector->IsLive(left) && !selector->IsLive(right)) {
    977     std::swap(left, right);
    978   }
    979   InstructionOperand temps[] = {g.TempRegister(rax)};
    980   // TODO(turbofan): We use UseUniqueRegister here to improve register
    981   // allocation.
    982   selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
    983                  g.UseUniqueRegister(right), arraysize(temps), temps);
    984 }
    985 
    986 
    987 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    988   X64OperandGenerator g(selector);
    989   InstructionOperand temps[] = {g.TempRegister(rdx)};
    990   selector->Emit(
    991       opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
    992       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
    993 }
    994 
    995 
    996 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    997   X64OperandGenerator g(selector);
    998   InstructionOperand temps[] = {g.TempRegister(rax)};
    999   selector->Emit(
   1000       opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
   1001       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
   1002 }
   1003 
   1004 }  // namespace
   1005 
   1006 
   1007 void InstructionSelector::VisitInt32Mul(Node* node) {
   1008   Int32ScaleMatcher m(node, true);
   1009   if (m.matches()) {
   1010     Node* index = node->InputAt(0);
   1011     Node* base = m.power_of_two_plus_one() ? index : nullptr;
   1012     EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
   1013             kPositiveDisplacement);
   1014     return;
   1015   }
   1016   VisitMul(this, node, kX64Imul32);
   1017 }
   1018 
   1019 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   1020   // TODO(mvstanton): Use Int32ScaleMatcher somehow.
   1021   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1022     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1023     return VisitBinop(this, node, kX64Imul32, &cont);
   1024   }
   1025   FlagsContinuation cont;
   1026   VisitBinop(this, node, kX64Imul32, &cont);
   1027 }
   1028 
   1029 void InstructionSelector::VisitInt64Mul(Node* node) {
   1030   VisitMul(this, node, kX64Imul);
   1031 }
   1032 
   1033 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   1034   VisitMulHigh(this, node, kX64ImulHigh32);
   1035 }
   1036 
   1037 
   1038 void InstructionSelector::VisitInt32Div(Node* node) {
   1039   VisitDiv(this, node, kX64Idiv32);
   1040 }
   1041 
   1042 
   1043 void InstructionSelector::VisitInt64Div(Node* node) {
   1044   VisitDiv(this, node, kX64Idiv);
   1045 }
   1046 
   1047 
   1048 void InstructionSelector::VisitUint32Div(Node* node) {
   1049   VisitDiv(this, node, kX64Udiv32);
   1050 }
   1051 
   1052 
   1053 void InstructionSelector::VisitUint64Div(Node* node) {
   1054   VisitDiv(this, node, kX64Udiv);
   1055 }
   1056 
   1057 
   1058 void InstructionSelector::VisitInt32Mod(Node* node) {
   1059   VisitMod(this, node, kX64Idiv32);
   1060 }
   1061 
   1062 
   1063 void InstructionSelector::VisitInt64Mod(Node* node) {
   1064   VisitMod(this, node, kX64Idiv);
   1065 }
   1066 
   1067 
   1068 void InstructionSelector::VisitUint32Mod(Node* node) {
   1069   VisitMod(this, node, kX64Udiv32);
   1070 }
   1071 
   1072 
   1073 void InstructionSelector::VisitUint64Mod(Node* node) {
   1074   VisitMod(this, node, kX64Udiv);
   1075 }
   1076 
   1077 
   1078 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   1079   VisitMulHigh(this, node, kX64UmulHigh32);
   1080 }
   1081 
   1082 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   1083   X64OperandGenerator g(this);
   1084   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1085   InstructionOperand outputs[2];
   1086   size_t output_count = 0;
   1087   outputs[output_count++] = g.DefineAsRegister(node);
   1088 
   1089   Node* success_output = NodeProperties::FindProjection(node, 1);
   1090   if (success_output) {
   1091     outputs[output_count++] = g.DefineAsRegister(success_output);
   1092   }
   1093 
   1094   Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
   1095 }
   1096 
   1097 
   1098 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
   1099   X64OperandGenerator g(this);
   1100   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1101   InstructionOperand outputs[2];
   1102   size_t output_count = 0;
   1103   outputs[output_count++] = g.DefineAsRegister(node);
   1104 
   1105   Node* success_output = NodeProperties::FindProjection(node, 1);
   1106   if (success_output) {
   1107     outputs[output_count++] = g.DefineAsRegister(success_output);
   1108   }
   1109 
   1110   Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
   1111 }
   1112 
   1113 
   1114 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
   1115   X64OperandGenerator g(this);
   1116   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1117   InstructionOperand outputs[2];
   1118   size_t output_count = 0;
   1119   outputs[output_count++] = g.DefineAsRegister(node);
   1120 
   1121   Node* success_output = NodeProperties::FindProjection(node, 1);
   1122   if (success_output) {
   1123     outputs[output_count++] = g.DefineAsRegister(success_output);
   1124   }
   1125 
   1126   Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
   1127 }
   1128 
   1129 
   1130 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
   1131   X64OperandGenerator g(this);
   1132   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   1133   InstructionOperand outputs[2];
   1134   size_t output_count = 0;
   1135   outputs[output_count++] = g.DefineAsRegister(node);
   1136 
   1137   Node* success_output = NodeProperties::FindProjection(node, 1);
   1138   if (success_output) {
   1139     outputs[output_count++] = g.DefineAsRegister(success_output);
   1140   }
   1141 
   1142   Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
   1143 }
   1144 
   1145 
   1146 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   1147   X64OperandGenerator g(this);
   1148   Node* const value = node->InputAt(0);
   1149   if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
   1150     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
   1151     MachineRepresentation rep = load_rep.representation();
   1152     InstructionCode opcode = kArchNop;
   1153     switch (rep) {
   1154       case MachineRepresentation::kBit:  // Fall through.
   1155       case MachineRepresentation::kWord8:
   1156         opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
   1157         break;
   1158       case MachineRepresentation::kWord16:
   1159         opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
   1160         break;
   1161       case MachineRepresentation::kWord32:
   1162         opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
   1163         break;
   1164       default:
   1165         UNREACHABLE();
   1166         return;
   1167     }
   1168     InstructionOperand outputs[] = {g.DefineAsRegister(node)};
   1169     size_t input_count = 0;
   1170     InstructionOperand inputs[3];
   1171     AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
   1172         node->InputAt(0), inputs, &input_count);
   1173     opcode |= AddressingModeField::encode(mode);
   1174     Emit(opcode, 1, outputs, input_count, inputs);
   1175   } else {
   1176     Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
   1177   }
   1178 }
   1179 
   1180 namespace {
   1181 
   1182 bool ZeroExtendsWord32ToWord64(Node* node) {
   1183   switch (node->opcode()) {
   1184     case IrOpcode::kWord32And:
   1185     case IrOpcode::kWord32Or:
   1186     case IrOpcode::kWord32Xor:
   1187     case IrOpcode::kWord32Shl:
   1188     case IrOpcode::kWord32Shr:
   1189     case IrOpcode::kWord32Sar:
   1190     case IrOpcode::kWord32Ror:
   1191     case IrOpcode::kWord32Equal:
   1192     case IrOpcode::kInt32Add:
   1193     case IrOpcode::kInt32Sub:
   1194     case IrOpcode::kInt32Mul:
   1195     case IrOpcode::kInt32MulHigh:
   1196     case IrOpcode::kInt32Div:
   1197     case IrOpcode::kInt32LessThan:
   1198     case IrOpcode::kInt32LessThanOrEqual:
   1199     case IrOpcode::kInt32Mod:
   1200     case IrOpcode::kUint32Div:
   1201     case IrOpcode::kUint32LessThan:
   1202     case IrOpcode::kUint32LessThanOrEqual:
   1203     case IrOpcode::kUint32Mod:
   1204     case IrOpcode::kUint32MulHigh:
   1205       // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
   1206       // zero-extension is a no-op.
   1207       return true;
   1208     case IrOpcode::kProjection: {
   1209       Node* const value = node->InputAt(0);
   1210       switch (value->opcode()) {
   1211         case IrOpcode::kInt32AddWithOverflow:
   1212         case IrOpcode::kInt32SubWithOverflow:
   1213         case IrOpcode::kInt32MulWithOverflow:
   1214           return true;
   1215         default:
   1216           return false;
   1217       }
   1218     }
   1219     case IrOpcode::kLoad: {
   1220       // The movzxbl/movsxbl/movzxwl/movsxwl operations implicitly zero-extend
   1221       // to 64-bit on x64,
   1222       // so the zero-extension is a no-op.
   1223       LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   1224       switch (load_rep.representation()) {
   1225         case MachineRepresentation::kWord8:
   1226         case MachineRepresentation::kWord16:
   1227           return true;
   1228         default:
   1229           return false;
   1230       }
   1231     }
   1232     default:
   1233       return false;
   1234   }
   1235 }
   1236 
   1237 }  // namespace
   1238 
   1239 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   1240   X64OperandGenerator g(this);
   1241   Node* value = node->InputAt(0);
   1242   if (ZeroExtendsWord32ToWord64(value)) {
   1243     // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
   1244     // zero-extension is a no-op.
   1245     return EmitIdentity(node);
   1246   }
   1247   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
   1248 }
   1249 
   1250 
   1251 namespace {
   1252 
   1253 void VisitRO(InstructionSelector* selector, Node* node,
   1254              InstructionCode opcode) {
   1255   X64OperandGenerator g(selector);
   1256   selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
   1257 }
   1258 
   1259 
   1260 void VisitRR(InstructionSelector* selector, Node* node,
   1261              InstructionCode opcode) {
   1262   X64OperandGenerator g(selector);
   1263   selector->Emit(opcode, g.DefineAsRegister(node),
   1264                  g.UseRegister(node->InputAt(0)));
   1265 }
   1266 
   1267 void VisitRRO(InstructionSelector* selector, Node* node,
   1268               InstructionCode opcode) {
   1269   X64OperandGenerator g(selector);
   1270   selector->Emit(opcode, g.DefineSameAsFirst(node),
   1271                  g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
   1272 }
   1273 
   1274 void VisitFloatBinop(InstructionSelector* selector, Node* node,
   1275                      ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
   1276   X64OperandGenerator g(selector);
   1277   InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
   1278   InstructionOperand operand1 = g.Use(node->InputAt(1));
   1279   if (selector->IsSupported(AVX)) {
   1280     selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
   1281   } else {
   1282     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
   1283   }
   1284 }
   1285 
   1286 
   1287 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
   1288                     ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
   1289   X64OperandGenerator g(selector);
   1290   if (selector->IsSupported(AVX)) {
   1291     selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
   1292   } else {
   1293     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
   1294   }
   1295 }
   1296 
   1297 }  // namespace
   1298 
   1299 #define RO_OP_LIST(V)                                                    \
   1300   V(Word64Clz, kX64Lzcnt)                                                \
   1301   V(Word32Clz, kX64Lzcnt32)                                              \
   1302   V(Word64Ctz, kX64Tzcnt)                                                \
   1303   V(Word32Ctz, kX64Tzcnt32)                                              \
   1304   V(Word64Popcnt, kX64Popcnt)                                            \
   1305   V(Word32Popcnt, kX64Popcnt32)                                          \
   1306   V(Float64Sqrt, kSSEFloat64Sqrt)                                        \
   1307   V(Float32Sqrt, kSSEFloat32Sqrt)                                        \
   1308   V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)                            \
   1309   V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1))   \
   1310   V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
   1311   V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)                      \
   1312   V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)                        \
   1313   V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)                          \
   1314   V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)                        \
   1315   V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)                            \
   1316   V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)                          \
   1317   V(RoundFloat64ToInt32, kSSEFloat64ToInt32)                             \
   1318   V(RoundInt32ToFloat32, kSSEInt32ToFloat32)                             \
   1319   V(RoundInt64ToFloat32, kSSEInt64ToFloat32)                             \
   1320   V(RoundInt64ToFloat64, kSSEInt64ToFloat64)                             \
   1321   V(RoundUint32ToFloat32, kSSEUint32ToFloat32)                           \
   1322   V(BitcastFloat32ToInt32, kX64BitcastFI)                                \
   1323   V(BitcastFloat64ToInt64, kX64BitcastDL)                                \
   1324   V(BitcastInt32ToFloat32, kX64BitcastIF)                                \
   1325   V(BitcastInt64ToFloat64, kX64BitcastLD)                                \
   1326   V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32)                \
   1327   V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
   1328 
   1329 #define RR_OP_LIST(V)                                                         \
   1330   V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
   1331   V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
   1332   V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
   1333   V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
   1334   V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
   1335   V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
   1336   V(Float32RoundTiesEven,                                                     \
   1337     kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
   1338   V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
   1339 
   1340 #define RO_VISITOR(Name, opcode)                      \
   1341   void InstructionSelector::Visit##Name(Node* node) { \
   1342     VisitRO(this, node, opcode);                      \
   1343   }
   1344 RO_OP_LIST(RO_VISITOR)
   1345 #undef RO_VISITOR
   1346 
   1347 #define RR_VISITOR(Name, opcode)                      \
   1348   void InstructionSelector::Visit##Name(Node* node) { \
   1349     VisitRR(this, node, opcode);                      \
   1350   }
   1351 RR_OP_LIST(RR_VISITOR)
   1352 #undef RR_VISITOR
   1353 
   1354 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   1355   VisitRR(this, node, kArchTruncateDoubleToI);
   1356 }
   1357 
   1358 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   1359   X64OperandGenerator g(this);
   1360   Node* value = node->InputAt(0);
   1361   if (CanCover(node, value)) {
   1362     switch (value->opcode()) {
   1363       case IrOpcode::kWord64Sar:
   1364       case IrOpcode::kWord64Shr: {
   1365         Int64BinopMatcher m(value);
   1366         if (m.right().Is(32)) {
   1367           if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
   1368             return EmitIdentity(node);
   1369           }
   1370           Emit(kX64Shr, g.DefineSameAsFirst(node),
   1371                g.UseRegister(m.left().node()), g.TempImmediate(32));
   1372           return;
   1373         }
   1374         break;
   1375       }
   1376       default:
   1377         break;
   1378     }
   1379   }
   1380   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
   1381 }
   1382 
   1383 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   1384   X64OperandGenerator g(this);
   1385   InstructionOperand temps[] = {g.TempRegister()};
   1386   Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
   1387        arraysize(temps), temps);
   1388 }
   1389 
   1390 
   1391 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
   1392   X64OperandGenerator g(this);
   1393   InstructionOperand temps[] = {g.TempRegister()};
   1394   Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
   1395        arraysize(temps), temps);
   1396 }
   1397 
   1398 void InstructionSelector::VisitFloat32Add(Node* node) {
   1399   VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
   1400 }
   1401 
   1402 
   1403 void InstructionSelector::VisitFloat32Sub(Node* node) {
   1404   VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
   1405 }
   1406 
   1407 void InstructionSelector::VisitFloat32Mul(Node* node) {
   1408   VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
   1409 }
   1410 
   1411 
   1412 void InstructionSelector::VisitFloat32Div(Node* node) {
   1413   VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
   1414 }
   1415 
   1416 
   1417 void InstructionSelector::VisitFloat32Abs(Node* node) {
   1418   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
   1419 }
   1420 
   1421 
   1422 void InstructionSelector::VisitFloat32Max(Node* node) {
   1423   VisitRRO(this, node, kSSEFloat32Max);
   1424 }
   1425 
   1426 void InstructionSelector::VisitFloat32Min(Node* node) {
   1427   VisitRRO(this, node, kSSEFloat32Min);
   1428 }
   1429 
   1430 void InstructionSelector::VisitFloat64Add(Node* node) {
   1431   VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
   1432 }
   1433 
   1434 
   1435 void InstructionSelector::VisitFloat64Sub(Node* node) {
   1436   VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
   1437 }
   1438 
   1439 void InstructionSelector::VisitFloat64Mul(Node* node) {
   1440   VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
   1441 }
   1442 
   1443 
   1444 void InstructionSelector::VisitFloat64Div(Node* node) {
   1445   VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
   1446 }
   1447 
   1448 
   1449 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1450   X64OperandGenerator g(this);
   1451   InstructionOperand temps[] = {g.TempRegister(rax)};
   1452   Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
   1453        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
   1454        temps);
   1455 }
   1456 
   1457 
   1458 void InstructionSelector::VisitFloat64Max(Node* node) {
   1459   VisitRRO(this, node, kSSEFloat64Max);
   1460 }
   1461 
   1462 
   1463 void InstructionSelector::VisitFloat64Min(Node* node) {
   1464   VisitRRO(this, node, kSSEFloat64Min);
   1465 }
   1466 
   1467 
   1468 void InstructionSelector::VisitFloat64Abs(Node* node) {
   1469   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
   1470 }
   1471 
   1472 
   1473 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   1474   UNREACHABLE();
   1475 }
   1476 
   1477 
   1478 void InstructionSelector::VisitFloat32Neg(Node* node) {
   1479   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
   1480 }
   1481 
   1482 void InstructionSelector::VisitFloat64Neg(Node* node) {
   1483   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
   1484 }
   1485 
   1486 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1487                                                    InstructionCode opcode) {
   1488   X64OperandGenerator g(this);
   1489   Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
   1490        g.UseFixed(node->InputAt(1), xmm1))
   1491       ->MarkAsCall();
   1492 }
   1493 
   1494 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1495                                                   InstructionCode opcode) {
   1496   X64OperandGenerator g(this);
   1497   Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
   1498       ->MarkAsCall();
   1499 }
   1500 
   1501 void InstructionSelector::EmitPrepareArguments(
   1502     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1503     Node* node) {
   1504   X64OperandGenerator g(this);
   1505 
   1506   // Prepare for C function call.
   1507   if (descriptor->IsCFunctionCall()) {
   1508     Emit(kArchPrepareCallCFunction |
   1509              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
   1510          0, nullptr, 0, nullptr);
   1511 
   1512     // Poke any stack arguments.
   1513     for (size_t n = 0; n < arguments->size(); ++n) {
   1514       PushParameter input = (*arguments)[n];
   1515       if (input.node()) {
   1516         int slot = static_cast<int>(n);
   1517         InstructionOperand value = g.CanBeImmediate(input.node())
   1518                                        ? g.UseImmediate(input.node())
   1519                                        : g.UseRegister(input.node());
   1520         Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
   1521       }
   1522     }
   1523   } else {
   1524     // Push any stack arguments.
   1525     int effect_level = GetEffectLevel(node);
   1526     for (PushParameter input : base::Reversed(*arguments)) {
   1527       Node* input_node = input.node();
   1528       if (g.CanBeImmediate(input_node)) {
   1529         Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
   1530       } else if (IsSupported(ATOM) ||
   1531                  sequence()->IsFP(GetVirtualRegister(input_node))) {
   1532         // TODO(titzer): X64Push cannot handle stack->stack double moves
   1533         // because there is no way to encode fixed double slots.
   1534         Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
   1535       } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
   1536                                       effect_level)) {
   1537         InstructionOperand outputs[1];
   1538         InstructionOperand inputs[4];
   1539         size_t input_count = 0;
   1540         InstructionCode opcode = kX64Push;
   1541         AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
   1542             input_node, inputs, &input_count);
   1543         opcode |= AddressingModeField::encode(mode);
   1544         Emit(opcode, 0, outputs, input_count, inputs);
   1545       } else {
   1546         Emit(kX64Push, g.NoOutput(), g.Use(input_node));
   1547       }
   1548     }
   1549   }
   1550 }
   1551 
   1552 
   1553 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
   1554 
   1555 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
   1556 
   1557 namespace {
   1558 
   1559 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
   1560                                    InstructionCode opcode, Node* left,
   1561                                    InstructionOperand right,
   1562                                    FlagsContinuation* cont) {
   1563   DCHECK(left->opcode() == IrOpcode::kLoad);
   1564   X64OperandGenerator g(selector);
   1565   size_t input_count = 0;
   1566   InstructionOperand inputs[6];
   1567   AddressingMode addressing_mode =
   1568       g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
   1569   opcode |= AddressingModeField::encode(addressing_mode);
   1570   opcode = cont->Encode(opcode);
   1571   inputs[input_count++] = right;
   1572 
   1573   if (cont->IsBranch()) {
   1574     inputs[input_count++] = g.Label(cont->true_block());
   1575     inputs[input_count++] = g.Label(cont->false_block());
   1576     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   1577   } else if (cont->IsDeoptimize()) {
   1578     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
   1579                              cont->kind(), cont->reason(), cont->frame_state());
   1580   } else if (cont->IsSet()) {
   1581     InstructionOperand output = g.DefineAsRegister(cont->result());
   1582     selector->Emit(opcode, 1, &output, input_count, inputs);
   1583   } else {
   1584     DCHECK(cont->IsTrap());
   1585     inputs[input_count++] = g.UseImmediate(cont->trap_id());
   1586     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   1587   }
   1588 }
   1589 
   1590 // Shared routine for multiple compare operations.
   1591 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1592                   InstructionOperand left, InstructionOperand right,
   1593                   FlagsContinuation* cont) {
   1594   X64OperandGenerator g(selector);
   1595   opcode = cont->Encode(opcode);
   1596   if (cont->IsBranch()) {
   1597     selector->Emit(opcode, g.NoOutput(), left, right,
   1598                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1599   } else if (cont->IsDeoptimize()) {
   1600     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
   1601                              cont->reason(), cont->frame_state());
   1602   } else if (cont->IsSet()) {
   1603     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   1604   } else {
   1605     DCHECK(cont->IsTrap());
   1606     selector->Emit(opcode, g.NoOutput(), left, right,
   1607                    g.UseImmediate(cont->trap_id()));
   1608   }
   1609 }
   1610 
   1611 
   1612 // Shared routine for multiple compare operations.
   1613 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1614                   Node* left, Node* right, FlagsContinuation* cont,
   1615                   bool commutative) {
   1616   X64OperandGenerator g(selector);
   1617   if (commutative && g.CanBeBetterLeftOperand(right)) {
   1618     std::swap(left, right);
   1619   }
   1620   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
   1621 }
   1622 
   1623 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
   1624   if (hint_node->opcode() == IrOpcode::kLoad) {
   1625     MachineType hint = LoadRepresentationOf(hint_node->op());
   1626     if (node->opcode() == IrOpcode::kInt32Constant ||
   1627         node->opcode() == IrOpcode::kInt64Constant) {
   1628       int64_t constant = node->opcode() == IrOpcode::kInt32Constant
   1629                              ? OpParameter<int32_t>(node)
   1630                              : OpParameter<int64_t>(node);
   1631       if (hint == MachineType::Int8()) {
   1632         if (constant >= std::numeric_limits<int8_t>::min() &&
   1633             constant <= std::numeric_limits<int8_t>::max()) {
   1634           return hint;
   1635         }
   1636       } else if (hint == MachineType::Uint8()) {
   1637         if (constant >= std::numeric_limits<uint8_t>::min() &&
   1638             constant <= std::numeric_limits<uint8_t>::max()) {
   1639           return hint;
   1640         }
   1641       } else if (hint == MachineType::Int16()) {
   1642         if (constant >= std::numeric_limits<int16_t>::min() &&
   1643             constant <= std::numeric_limits<int16_t>::max()) {
   1644           return hint;
   1645         }
   1646       } else if (hint == MachineType::Uint16()) {
   1647         if (constant >= std::numeric_limits<uint16_t>::min() &&
   1648             constant <= std::numeric_limits<uint16_t>::max()) {
   1649           return hint;
   1650         }
   1651       } else if (hint == MachineType::Int32()) {
   1652         return hint;
   1653       } else if (hint == MachineType::Uint32()) {
   1654         if (constant >= 0) return hint;
   1655       }
   1656     }
   1657   }
   1658   return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
   1659                                            : MachineType::None();
   1660 }
   1661 
   1662 // Tries to match the size of the given opcode to that of the operands, if
   1663 // possible.
   1664 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
   1665                                     Node* right, FlagsContinuation* cont) {
   1666   // TODO(epertoso): we can probably get some size information out phi nodes.
   1667   // If the load representations don't match, both operands will be
   1668   // zero/sign-extended to 32bit.
   1669   MachineType left_type = MachineTypeForNarrow(left, right);
   1670   MachineType right_type = MachineTypeForNarrow(right, left);
   1671   if (left_type == right_type) {
   1672     switch (left_type.representation()) {
   1673       case MachineRepresentation::kBit:
   1674       case MachineRepresentation::kWord8: {
   1675         if (opcode == kX64Test32) return kX64Test8;
   1676         if (opcode == kX64Cmp32) {
   1677           if (left_type.semantic() == MachineSemantic::kUint32) {
   1678             cont->OverwriteUnsignedIfSigned();
   1679           } else {
   1680             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
   1681           }
   1682           return kX64Cmp8;
   1683         }
   1684         break;
   1685       }
   1686       case MachineRepresentation::kWord16:
   1687         if (opcode == kX64Test32) return kX64Test16;
   1688         if (opcode == kX64Cmp32) {
   1689           if (left_type.semantic() == MachineSemantic::kUint32) {
   1690             cont->OverwriteUnsignedIfSigned();
   1691           } else {
   1692             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
   1693           }
   1694           return kX64Cmp16;
   1695         }
   1696         break;
   1697       default:
   1698         break;
   1699     }
   1700   }
   1701   return opcode;
   1702 }
   1703 
   1704 // Shared routine for multiple word compare operations.
   1705 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1706                       InstructionCode opcode, FlagsContinuation* cont) {
   1707   X64OperandGenerator g(selector);
   1708   Node* left = node->InputAt(0);
   1709   Node* right = node->InputAt(1);
   1710 
   1711   opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
   1712 
   1713   // If one of the two inputs is an immediate, make sure it's on the right, or
   1714   // if one of the two inputs is a memory operand, make sure it's on the left.
   1715   int effect_level = selector->GetEffectLevel(node);
   1716   if (cont->IsBranch()) {
   1717     effect_level = selector->GetEffectLevel(
   1718         cont->true_block()->PredecessorAt(0)->control_input());
   1719   }
   1720 
   1721   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
   1722       (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
   1723        !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
   1724     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1725     std::swap(left, right);
   1726   }
   1727 
   1728   // Match immediates on right side of comparison.
   1729   if (g.CanBeImmediate(right)) {
   1730     if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
   1731       return VisitCompareWithMemoryOperand(selector, opcode, left,
   1732                                            g.UseImmediate(right), cont);
   1733     }
   1734     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
   1735                         cont);
   1736   }
   1737 
   1738   // Match memory operands on left side of comparison.
   1739   if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
   1740     return VisitCompareWithMemoryOperand(selector, opcode, left,
   1741                                          g.UseRegister(right), cont);
   1742   }
   1743 
   1744   return VisitCompare(selector, opcode, left, right, cont,
   1745                       node->op()->HasProperty(Operator::kCommutative));
   1746 }
   1747 
   1748 // Shared routine for 64-bit word comparison operations.
   1749 void VisitWord64Compare(InstructionSelector* selector, Node* node,
   1750                         FlagsContinuation* cont) {
   1751   X64OperandGenerator g(selector);
   1752   if (selector->CanUseRootsRegister()) {
   1753     Heap* const heap = selector->isolate()->heap();
   1754     Heap::RootListIndex root_index;
   1755     HeapObjectBinopMatcher m(node);
   1756     if (m.right().HasValue() &&
   1757         heap->IsRootHandle(m.right().Value(), &root_index)) {
   1758       if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1759       InstructionCode opcode =
   1760           kX64Cmp | AddressingModeField::encode(kMode_Root);
   1761       return VisitCompare(
   1762           selector, opcode,
   1763           g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
   1764           g.UseRegister(m.left().node()), cont);
   1765     } else if (m.left().HasValue() &&
   1766                heap->IsRootHandle(m.left().Value(), &root_index)) {
   1767       InstructionCode opcode =
   1768           kX64Cmp | AddressingModeField::encode(kMode_Root);
   1769       return VisitCompare(
   1770           selector, opcode,
   1771           g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
   1772           g.UseRegister(m.right().node()), cont);
   1773     }
   1774   }
   1775   Int64BinopMatcher m(node);
   1776   if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
   1777     LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
   1778     ExternalReference js_stack_limit =
   1779         ExternalReference::address_of_stack_limit(selector->isolate());
   1780     if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
   1781       // Compare(Load(js_stack_limit), LoadStackPointer)
   1782       if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1783       InstructionCode opcode = cont->Encode(kX64StackCheck);
   1784       if (cont->IsBranch()) {
   1785         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
   1786                        g.Label(cont->false_block()));
   1787       } else if (cont->IsDeoptimize()) {
   1788         selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
   1789                                  cont->reason(), cont->frame_state());
   1790       } else if (cont->IsSet()) {
   1791         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
   1792       } else {
   1793         DCHECK(cont->IsTrap());
   1794         selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
   1795       }
   1796       return;
   1797     }
   1798   }
   1799   VisitWordCompare(selector, node, kX64Cmp, cont);
   1800 }
   1801 
   1802 
   1803 // Shared routine for comparison with zero.
   1804 void VisitCompareZero(InstructionSelector* selector, Node* node,
   1805                       InstructionCode opcode, FlagsContinuation* cont) {
   1806   X64OperandGenerator g(selector);
   1807   VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
   1808 }
   1809 
   1810 
   1811 // Shared routine for multiple float32 compare operations (inputs commuted).
   1812 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1813                          FlagsContinuation* cont) {
   1814   Node* const left = node->InputAt(0);
   1815   Node* const right = node->InputAt(1);
   1816   InstructionCode const opcode =
   1817       selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
   1818   VisitCompare(selector, opcode, right, left, cont, false);
   1819 }
   1820 
   1821 
   1822 // Shared routine for multiple float64 compare operations (inputs commuted).
   1823 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1824                          FlagsContinuation* cont) {
   1825   Node* const left = node->InputAt(0);
   1826   Node* const right = node->InputAt(1);
   1827   InstructionCode const opcode =
   1828       selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
   1829   VisitCompare(selector, opcode, right, left, cont, false);
   1830 }
   1831 
   1832 // Shared routine for word comparison against zero.
   1833 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   1834                           Node* value, FlagsContinuation* cont) {
   1835   // Try to combine with comparisons against 0 by simply inverting the branch.
   1836   while (value->opcode() == IrOpcode::kWord32Equal &&
   1837          selector->CanCover(user, value)) {
   1838     Int32BinopMatcher m(value);
   1839     if (!m.right().Is(0)) break;
   1840 
   1841     user = value;
   1842     value = m.left().node();
   1843     cont->Negate();
   1844   }
   1845 
   1846   if (selector->CanCover(user, value)) {
   1847     switch (value->opcode()) {
   1848       case IrOpcode::kWord32Equal:
   1849         cont->OverwriteAndNegateIfEqual(kEqual);
   1850         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1851       case IrOpcode::kInt32LessThan:
   1852         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1853         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1854       case IrOpcode::kInt32LessThanOrEqual:
   1855         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1856         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1857       case IrOpcode::kUint32LessThan:
   1858         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1859         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1860       case IrOpcode::kUint32LessThanOrEqual:
   1861         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1862         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1863       case IrOpcode::kWord64Equal: {
   1864         cont->OverwriteAndNegateIfEqual(kEqual);
   1865         Int64BinopMatcher m(value);
   1866         if (m.right().Is(0)) {
   1867           // Try to combine the branch with a comparison.
   1868           Node* const user = m.node();
   1869           Node* const value = m.left().node();
   1870           if (selector->CanCover(user, value)) {
   1871             switch (value->opcode()) {
   1872               case IrOpcode::kInt64Sub:
   1873                 return VisitWord64Compare(selector, value, cont);
   1874               case IrOpcode::kWord64And:
   1875                 return VisitWordCompare(selector, value, kX64Test, cont);
   1876               default:
   1877                 break;
   1878             }
   1879           }
   1880           return VisitCompareZero(selector, value, kX64Cmp, cont);
   1881         }
   1882         return VisitWord64Compare(selector, value, cont);
   1883       }
   1884       case IrOpcode::kInt64LessThan:
   1885         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1886         return VisitWord64Compare(selector, value, cont);
   1887       case IrOpcode::kInt64LessThanOrEqual:
   1888         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1889         return VisitWord64Compare(selector, value, cont);
   1890       case IrOpcode::kUint64LessThan:
   1891         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1892         return VisitWord64Compare(selector, value, cont);
   1893       case IrOpcode::kUint64LessThanOrEqual:
   1894         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1895         return VisitWord64Compare(selector, value, cont);
   1896       case IrOpcode::kFloat32Equal:
   1897         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
   1898         return VisitFloat32Compare(selector, value, cont);
   1899       case IrOpcode::kFloat32LessThan:
   1900         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
   1901         return VisitFloat32Compare(selector, value, cont);
   1902       case IrOpcode::kFloat32LessThanOrEqual:
   1903         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
   1904         return VisitFloat32Compare(selector, value, cont);
   1905       case IrOpcode::kFloat64Equal:
   1906         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
   1907         return VisitFloat64Compare(selector, value, cont);
   1908       case IrOpcode::kFloat64LessThan: {
   1909         Float64BinopMatcher m(value);
   1910         if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
   1911           // This matches the pattern
   1912           //
   1913           //   Float64LessThan(#0.0, Float64Abs(x))
   1914           //
   1915           // which TurboFan generates for NumberToBoolean in the general case,
   1916           // and which evaluates to false if x is 0, -0 or NaN. We can compile
   1917           // this to a simple (v)ucomisd using not_equal flags condition, which
   1918           // avoids the costly Float64Abs.
   1919           cont->OverwriteAndNegateIfEqual(kNotEqual);
   1920           InstructionCode const opcode =
   1921               selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
   1922           return VisitCompare(selector, opcode, m.left().node(),
   1923                               m.right().InputAt(0), cont, false);
   1924         }
   1925         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
   1926         return VisitFloat64Compare(selector, value, cont);
   1927       }
   1928       case IrOpcode::kFloat64LessThanOrEqual:
   1929         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
   1930         return VisitFloat64Compare(selector, value, cont);
   1931       case IrOpcode::kProjection:
   1932         // Check if this is the overflow output projection of an
   1933         // <Operation>WithOverflow node.
   1934         if (ProjectionIndexOf(value->op()) == 1u) {
   1935           // We cannot combine the <Operation>WithOverflow with this branch
   1936           // unless the 0th projection (the use of the actual value of the
   1937           // <Operation> is either nullptr, which means there's no use of the
   1938           // actual value, or was already defined, which means it is scheduled
   1939           // *AFTER* this branch).
   1940           Node* const node = value->InputAt(0);
   1941           Node* const result = NodeProperties::FindProjection(node, 0);
   1942           if (result == nullptr || selector->IsDefined(result)) {
   1943             switch (node->opcode()) {
   1944               case IrOpcode::kInt32AddWithOverflow:
   1945                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1946                 return VisitBinop(selector, node, kX64Add32, cont);
   1947               case IrOpcode::kInt32SubWithOverflow:
   1948                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1949                 return VisitBinop(selector, node, kX64Sub32, cont);
   1950               case IrOpcode::kInt32MulWithOverflow:
   1951                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1952                 return VisitBinop(selector, node, kX64Imul32, cont);
   1953               case IrOpcode::kInt64AddWithOverflow:
   1954                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1955                 return VisitBinop(selector, node, kX64Add, cont);
   1956               case IrOpcode::kInt64SubWithOverflow:
   1957                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1958                 return VisitBinop(selector, node, kX64Sub, cont);
   1959               default:
   1960                 break;
   1961             }
   1962           }
   1963         }
   1964         break;
   1965       case IrOpcode::kInt32Sub:
   1966         return VisitWordCompare(selector, value, kX64Cmp32, cont);
   1967       case IrOpcode::kWord32And:
   1968         return VisitWordCompare(selector, value, kX64Test32, cont);
   1969       default:
   1970         break;
   1971     }
   1972   }
   1973 
   1974   // Branch could not be combined with a compare, emit compare against 0.
   1975   VisitCompareZero(selector, value, kX64Cmp32, cont);
   1976 }
   1977 
   1978 }  // namespace
   1979 
   1980 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   1981                                       BasicBlock* fbranch) {
   1982   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   1983   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   1984 }
   1985 
   1986 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   1987   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1988   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1989       kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   1990   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1991 }
   1992 
   1993 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   1994   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1995   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1996       kEqual, p.kind(), p.reason(), node->InputAt(1));
   1997   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1998 }
   1999 
   2000 void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
   2001   FlagsContinuation cont =
   2002       FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
   2003   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2004 }
   2005 
   2006 void InstructionSelector::VisitTrapUnless(Node* node,
   2007                                           Runtime::FunctionId func_id) {
   2008   FlagsContinuation cont =
   2009       FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   2010   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   2011 }
   2012 
   2013 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   2014   X64OperandGenerator g(this);
   2015   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   2016 
   2017   // Emit either ArchTableSwitch or ArchLookupSwitch.
   2018   size_t table_space_cost = 4 + sw.value_range;
   2019   size_t table_time_cost = 3;
   2020   size_t lookup_space_cost = 3 + 2 * sw.case_count;
   2021   size_t lookup_time_cost = sw.case_count;
   2022   if (sw.case_count > 4 &&
   2023       table_space_cost + 3 * table_time_cost <=
   2024           lookup_space_cost + 3 * lookup_time_cost &&
   2025       sw.min_value > std::numeric_limits<int32_t>::min()) {
   2026     InstructionOperand index_operand = g.TempRegister();
   2027     if (sw.min_value) {
   2028       // The leal automatically zero extends, so result is a valid 64-bit index.
   2029       Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
   2030            value_operand, g.TempImmediate(-sw.min_value));
   2031     } else {
   2032       // Zero extend, because we use it as 64-bit index into the jump table.
   2033       Emit(kX64Movl, index_operand, value_operand);
   2034     }
   2035     // Generate a table lookup.
   2036     return EmitTableSwitch(sw, index_operand);
   2037   }
   2038 
   2039   // Generate a sequence of conditional jumps.
   2040   return EmitLookupSwitch(sw, value_operand);
   2041 }
   2042 
   2043 
   2044 void InstructionSelector::VisitWord32Equal(Node* const node) {
   2045   Node* user = node;
   2046   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2047   Int32BinopMatcher m(user);
   2048   if (m.right().Is(0)) {
   2049     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   2050   }
   2051   VisitWordCompare(this, node, kX64Cmp32, &cont);
   2052 }
   2053 
   2054 
   2055 void InstructionSelector::VisitInt32LessThan(Node* node) {
   2056   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2057   VisitWordCompare(this, node, kX64Cmp32, &cont);
   2058 }
   2059 
   2060 
   2061 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   2062   FlagsContinuation cont =
   2063       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2064   VisitWordCompare(this, node, kX64Cmp32, &cont);
   2065 }
   2066 
   2067 
   2068 void InstructionSelector::VisitUint32LessThan(Node* node) {
   2069   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2070   VisitWordCompare(this, node, kX64Cmp32, &cont);
   2071 }
   2072 
   2073 
   2074 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   2075   FlagsContinuation cont =
   2076       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2077   VisitWordCompare(this, node, kX64Cmp32, &cont);
   2078 }
   2079 
   2080 
   2081 void InstructionSelector::VisitWord64Equal(Node* const node) {
   2082   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   2083   Int64BinopMatcher m(node);
   2084   if (m.right().Is(0)) {
   2085     // Try to combine the equality check with a comparison.
   2086     Node* const user = m.node();
   2087     Node* const value = m.left().node();
   2088     if (CanCover(user, value)) {
   2089       switch (value->opcode()) {
   2090         case IrOpcode::kInt64Sub:
   2091           return VisitWord64Compare(this, value, &cont);
   2092         case IrOpcode::kWord64And:
   2093           return VisitWordCompare(this, value, kX64Test, &cont);
   2094         default:
   2095           break;
   2096       }
   2097     }
   2098   }
   2099   VisitWord64Compare(this, node, &cont);
   2100 }
   2101 
   2102 
   2103 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   2104   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2105     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2106     return VisitBinop(this, node, kX64Add32, &cont);
   2107   }
   2108   FlagsContinuation cont;
   2109   VisitBinop(this, node, kX64Add32, &cont);
   2110 }
   2111 
   2112 
   2113 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   2114   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   2115     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   2116     return VisitBinop(this, node, kX64Sub32, &cont);
   2117   }
   2118   FlagsContinuation cont;
   2119   VisitBinop(this, node, kX64Sub32, &cont);
   2120 }
   2121 
   2122 
   2123 void InstructionSelector::VisitInt64LessThan(Node* node) {
   2124   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   2125   VisitWord64Compare(this, node, &cont);
   2126 }
   2127 
   2128 
   2129 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
   2130   FlagsContinuation cont =
   2131       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   2132   VisitWord64Compare(this, node, &cont);
   2133 }
   2134 
   2135 
   2136 void InstructionSelector::VisitUint64LessThan(Node* node) {
   2137   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   2138   VisitWord64Compare(this, node, &cont);
   2139 }
   2140 
   2141 
   2142 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
   2143   FlagsContinuation cont =
   2144       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   2145   VisitWord64Compare(this, node, &cont);
   2146 }
   2147 
   2148 
   2149 void InstructionSelector::VisitFloat32Equal(Node* node) {
   2150   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   2151   VisitFloat32Compare(this, node, &cont);
   2152 }
   2153 
   2154 
   2155 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   2156   FlagsContinuation cont =
   2157       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   2158   VisitFloat32Compare(this, node, &cont);
   2159 }
   2160 
   2161 
   2162 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   2163   FlagsContinuation cont =
   2164       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   2165   VisitFloat32Compare(this, node, &cont);
   2166 }
   2167 
   2168 
   2169 void InstructionSelector::VisitFloat64Equal(Node* node) {
   2170   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   2171   VisitFloat64Compare(this, node, &cont);
   2172 }
   2173 
   2174 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   2175   Float64BinopMatcher m(node);
   2176   if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
   2177     // This matches the pattern
   2178     //
   2179     //   Float64LessThan(#0.0, Float64Abs(x))
   2180     //
   2181     // which TurboFan generates for NumberToBoolean in the general case,
   2182     // and which evaluates to false if x is 0, -0 or NaN. We can compile
   2183     // this to a simple (v)ucomisd using not_equal flags condition, which
   2184     // avoids the costly Float64Abs.
   2185     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
   2186     InstructionCode const opcode =
   2187         IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
   2188     return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
   2189                         &cont, false);
   2190   }
   2191   FlagsContinuation cont =
   2192       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   2193   VisitFloat64Compare(this, node, &cont);
   2194 }
   2195 
   2196 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   2197   FlagsContinuation cont =
   2198       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   2199   VisitFloat64Compare(this, node, &cont);
   2200 }
   2201 
   2202 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   2203   X64OperandGenerator g(this);
   2204   Node* left = node->InputAt(0);
   2205   Node* right = node->InputAt(1);
   2206   Float64Matcher mleft(left);
   2207   if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
   2208     Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
   2209     return;
   2210   }
   2211   Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
   2212        g.UseRegister(left), g.Use(right));
   2213 }
   2214 
   2215 
   2216 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   2217   X64OperandGenerator g(this);
   2218   Node* left = node->InputAt(0);
   2219   Node* right = node->InputAt(1);
   2220   Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
   2221        g.UseRegister(left), g.Use(right));
   2222 }
   2223 
   2224 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   2225   X64OperandGenerator g(this);
   2226   Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
   2227        g.UseRegister(node->InputAt(0)));
   2228 }
   2229 
   2230 void InstructionSelector::VisitAtomicLoad(Node* node) {
   2231   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   2232   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
   2233          load_rep.representation() == MachineRepresentation::kWord16 ||
   2234          load_rep.representation() == MachineRepresentation::kWord32);
   2235   USE(load_rep);
   2236   VisitLoad(node);
   2237 }
   2238 
   2239 void InstructionSelector::VisitAtomicStore(Node* node) {
   2240   X64OperandGenerator g(this);
   2241   Node* base = node->InputAt(0);
   2242   Node* index = node->InputAt(1);
   2243   Node* value = node->InputAt(2);
   2244 
   2245   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   2246   ArchOpcode opcode = kArchNop;
   2247   switch (rep) {
   2248     case MachineRepresentation::kWord8:
   2249       opcode = kX64Xchgb;
   2250       break;
   2251     case MachineRepresentation::kWord16:
   2252       opcode = kX64Xchgw;
   2253       break;
   2254     case MachineRepresentation::kWord32:
   2255       opcode = kX64Xchgl;
   2256       break;
   2257     default:
   2258       UNREACHABLE();
   2259       return;
   2260   }
   2261   AddressingMode addressing_mode;
   2262   InstructionOperand inputs[4];
   2263   size_t input_count = 0;
   2264   inputs[input_count++] = g.UseUniqueRegister(base);
   2265   if (g.CanBeImmediate(index)) {
   2266     inputs[input_count++] = g.UseImmediate(index);
   2267     addressing_mode = kMode_MRI;
   2268   } else {
   2269     inputs[input_count++] = g.UseUniqueRegister(index);
   2270     addressing_mode = kMode_MR1;
   2271   }
   2272   inputs[input_count++] = g.UseUniqueRegister(value);
   2273   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   2274   Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
   2275 }
   2276 
   2277 void InstructionSelector::VisitCreateInt32x4(Node* node) {
   2278   X64OperandGenerator g(this);
   2279   Emit(kX64Int32x4Create, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
   2280 }
   2281 
   2282 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
   2283   X64OperandGenerator g(this);
   2284   int32_t lane = OpParameter<int32_t>(node);
   2285   Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
   2286        g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
   2287 }
   2288 
   2289 void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
   2290   X64OperandGenerator g(this);
   2291   int32_t lane = OpParameter<int32_t>(node);
   2292   Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
   2293        g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
   2294        g.Use(node->InputAt(1)));
   2295 }
   2296 
   2297 void InstructionSelector::VisitInt32x4Add(Node* node) {
   2298   X64OperandGenerator g(this);
   2299   Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
   2300        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   2301 }
   2302 
   2303 void InstructionSelector::VisitInt32x4Sub(Node* node) {
   2304   X64OperandGenerator g(this);
   2305   Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
   2306        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
   2307 }
   2308 
   2309 // static
   2310 MachineOperatorBuilder::Flags
   2311 InstructionSelector::SupportedMachineOperatorFlags() {
   2312   MachineOperatorBuilder::Flags flags =
   2313       MachineOperatorBuilder::kWord32ShiftIsSafe |
   2314       MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
   2315   if (CpuFeatures::IsSupported(POPCNT)) {
   2316     flags |= MachineOperatorBuilder::kWord32Popcnt |
   2317              MachineOperatorBuilder::kWord64Popcnt;
   2318   }
   2319   if (CpuFeatures::IsSupported(SSE4_1)) {
   2320     flags |= MachineOperatorBuilder::kFloat32RoundDown |
   2321              MachineOperatorBuilder::kFloat64RoundDown |
   2322              MachineOperatorBuilder::kFloat32RoundUp |
   2323              MachineOperatorBuilder::kFloat64RoundUp |
   2324              MachineOperatorBuilder::kFloat32RoundTruncate |
   2325              MachineOperatorBuilder::kFloat64RoundTruncate |
   2326              MachineOperatorBuilder::kFloat32RoundTiesEven |
   2327              MachineOperatorBuilder::kFloat64RoundTiesEven;
   2328   }
   2329   return flags;
   2330 }
   2331 
   2332 // static
   2333 MachineOperatorBuilder::AlignmentRequirements
   2334 InstructionSelector::AlignmentRequirements() {
   2335   return MachineOperatorBuilder::AlignmentRequirements::
   2336       FullUnalignedAccessSupport();
   2337 }
   2338 
   2339 }  // namespace compiler
   2340 }  // namespace internal
   2341 }  // namespace v8
   2342