Home | History | Annotate | Download | only in ia32
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/base/adapters.h"
      6 #include "src/compiler/instruction-selector-impl.h"
      7 #include "src/compiler/node-matchers.h"
      8 #include "src/compiler/node-properties.h"
      9 
     10 namespace v8 {
     11 namespace internal {
     12 namespace compiler {
     13 
     14 // Adds IA32-specific methods for generating operands.
     15 class IA32OperandGenerator final : public OperandGenerator {
     16  public:
     17   explicit IA32OperandGenerator(InstructionSelector* selector)
     18       : OperandGenerator(selector) {}
     19 
     20   InstructionOperand UseByteRegister(Node* node) {
     21     // TODO(titzer): encode byte register use constraints.
     22     return UseFixed(node, edx);
     23   }
     24 
     25   InstructionOperand DefineAsByteRegister(Node* node) {
     26     // TODO(titzer): encode byte register def constraints.
     27     return DefineAsRegister(node);
     28   }
     29 
     30   bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
     31                           int effect_level) {
     32     if (input->opcode() != IrOpcode::kLoad ||
     33         !selector()->CanCover(node, input)) {
     34       return false;
     35     }
     36     if (effect_level != selector()->GetEffectLevel(input)) {
     37       return false;
     38     }
     39     MachineRepresentation rep =
     40         LoadRepresentationOf(input->op()).representation();
     41     switch (opcode) {
     42       case kIA32Cmp:
     43       case kIA32Test:
     44         return rep == MachineRepresentation::kWord32 ||
     45                rep == MachineRepresentation::kTagged;
     46       case kIA32Cmp16:
     47       case kIA32Test16:
     48         return rep == MachineRepresentation::kWord16;
     49       case kIA32Cmp8:
     50       case kIA32Test8:
     51         return rep == MachineRepresentation::kWord8;
     52       default:
     53         break;
     54     }
     55     return false;
     56   }
     57 
     58   bool CanBeImmediate(Node* node) {
     59     switch (node->opcode()) {
     60       case IrOpcode::kInt32Constant:
     61       case IrOpcode::kNumberConstant:
     62       case IrOpcode::kExternalConstant:
     63       case IrOpcode::kRelocatableInt32Constant:
     64       case IrOpcode::kRelocatableInt64Constant:
     65         return true;
     66       case IrOpcode::kHeapConstant: {
     67 // TODO(bmeurer): We must not dereference handles concurrently. If we
     68 // really have to this here, then we need to find a way to put this
     69 // information on the HeapConstant node already.
     70 #if 0
     71         // Constants in new space cannot be used as immediates in V8 because
     72         // the GC does not scan code objects when collecting the new generation.
     73         Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
     74         Isolate* isolate = value->GetIsolate();
     75         return !isolate->heap()->InNewSpace(*value);
     76 #endif
     77       }
     78       default:
     79         return false;
     80     }
     81   }
     82 
     83   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
     84                                              Node* displacement_node,
     85                                              DisplacementMode displacement_mode,
     86                                              InstructionOperand inputs[],
     87                                              size_t* input_count) {
     88     AddressingMode mode = kMode_MRI;
     89     int32_t displacement = (displacement_node == nullptr)
     90                                ? 0
     91                                : OpParameter<int32_t>(displacement_node);
     92     if (displacement_mode == kNegativeDisplacement) {
     93       displacement = -displacement;
     94     }
     95     if (base != nullptr) {
     96       if (base->opcode() == IrOpcode::kInt32Constant) {
     97         displacement += OpParameter<int32_t>(base);
     98         base = nullptr;
     99       }
    100     }
    101     if (base != nullptr) {
    102       inputs[(*input_count)++] = UseRegister(base);
    103       if (index != nullptr) {
    104         DCHECK(scale >= 0 && scale <= 3);
    105         inputs[(*input_count)++] = UseRegister(index);
    106         if (displacement != 0) {
    107           inputs[(*input_count)++] = TempImmediate(displacement);
    108           static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
    109                                                        kMode_MR4I, kMode_MR8I};
    110           mode = kMRnI_modes[scale];
    111         } else {
    112           static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
    113                                                       kMode_MR4, kMode_MR8};
    114           mode = kMRn_modes[scale];
    115         }
    116       } else {
    117         if (displacement == 0) {
    118           mode = kMode_MR;
    119         } else {
    120           inputs[(*input_count)++] = TempImmediate(displacement);
    121           mode = kMode_MRI;
    122         }
    123       }
    124     } else {
    125       DCHECK(scale >= 0 && scale <= 3);
    126       if (index != nullptr) {
    127         inputs[(*input_count)++] = UseRegister(index);
    128         if (displacement != 0) {
    129           inputs[(*input_count)++] = TempImmediate(displacement);
    130           static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
    131                                                       kMode_M4I, kMode_M8I};
    132           mode = kMnI_modes[scale];
    133         } else {
    134           static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
    135                                                      kMode_M4, kMode_M8};
    136           mode = kMn_modes[scale];
    137         }
    138       } else {
    139         inputs[(*input_count)++] = TempImmediate(displacement);
    140         return kMode_MI;
    141       }
    142     }
    143     return mode;
    144   }
    145 
    146   AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
    147                                                   InstructionOperand inputs[],
    148                                                   size_t* input_count) {
    149     BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
    150     DCHECK(m.matches());
    151     if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
    152       return GenerateMemoryOperandInputs(
    153           m.index(), m.scale(), m.base(), m.displacement(),
    154           m.displacement_mode(), inputs, input_count);
    155     } else {
    156       inputs[(*input_count)++] = UseRegister(node->InputAt(0));
    157       inputs[(*input_count)++] = UseRegister(node->InputAt(1));
    158       return kMode_MR1;
    159     }
    160   }
    161 
    162   bool CanBeBetterLeftOperand(Node* node) const {
    163     return !selector()->IsLive(node);
    164   }
    165 };
    166 
    167 
    168 namespace {
    169 
    170 void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    171   IA32OperandGenerator g(selector);
    172   selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
    173 }
    174 
    175 
    176 void VisitRR(InstructionSelector* selector, Node* node,
    177              InstructionCode opcode) {
    178   IA32OperandGenerator g(selector);
    179   selector->Emit(opcode, g.DefineAsRegister(node),
    180                  g.UseRegister(node->InputAt(0)));
    181 }
    182 
    183 
    184 void VisitRROFloat(InstructionSelector* selector, Node* node,
    185                    ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
    186   IA32OperandGenerator g(selector);
    187   InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
    188   InstructionOperand operand1 = g.Use(node->InputAt(1));
    189   if (selector->IsSupported(AVX)) {
    190     selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
    191   } else {
    192     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
    193   }
    194 }
    195 
    196 
    197 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
    198                     ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
    199   IA32OperandGenerator g(selector);
    200   if (selector->IsSupported(AVX)) {
    201     selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
    202   } else {
    203     selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
    204   }
    205 }
    206 
    207 
    208 }  // namespace
    209 
    210 
    211 void InstructionSelector::VisitLoad(Node* node) {
    212   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
    213 
    214   ArchOpcode opcode = kArchNop;
    215   switch (load_rep.representation()) {
    216     case MachineRepresentation::kFloat32:
    217       opcode = kIA32Movss;
    218       break;
    219     case MachineRepresentation::kFloat64:
    220       opcode = kIA32Movsd;
    221       break;
    222     case MachineRepresentation::kBit:  // Fall through.
    223     case MachineRepresentation::kWord8:
    224       opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
    225       break;
    226     case MachineRepresentation::kWord16:
    227       opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
    228       break;
    229     case MachineRepresentation::kTaggedSigned:   // Fall through.
    230     case MachineRepresentation::kTaggedPointer:  // Fall through.
    231     case MachineRepresentation::kTagged:         // Fall through.
    232     case MachineRepresentation::kWord32:
    233       opcode = kIA32Movl;
    234       break;
    235     case MachineRepresentation::kWord64:   // Fall through.
    236     case MachineRepresentation::kSimd128:  // Fall through.
    237     case MachineRepresentation::kSimd1x4:  // Fall through.
    238     case MachineRepresentation::kSimd1x8:  // Fall through.
    239     case MachineRepresentation::kSimd1x16:  // Fall through.
    240     case MachineRepresentation::kNone:
    241       UNREACHABLE();
    242       return;
    243   }
    244 
    245   IA32OperandGenerator g(this);
    246   InstructionOperand outputs[1];
    247   outputs[0] = g.DefineAsRegister(node);
    248   InstructionOperand inputs[3];
    249   size_t input_count = 0;
    250   AddressingMode mode =
    251       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
    252   InstructionCode code = opcode | AddressingModeField::encode(mode);
    253   Emit(code, 1, outputs, input_count, inputs);
    254 }
    255 
    256 void InstructionSelector::VisitProtectedLoad(Node* node) {
    257   // TODO(eholk)
    258   UNIMPLEMENTED();
    259 }
    260 
    261 void InstructionSelector::VisitStore(Node* node) {
    262   IA32OperandGenerator g(this);
    263   Node* base = node->InputAt(0);
    264   Node* index = node->InputAt(1);
    265   Node* value = node->InputAt(2);
    266 
    267   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
    268   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
    269   MachineRepresentation rep = store_rep.representation();
    270 
    271   if (write_barrier_kind != kNoWriteBarrier) {
    272     DCHECK(CanBeTaggedPointer(rep));
    273     AddressingMode addressing_mode;
    274     InstructionOperand inputs[3];
    275     size_t input_count = 0;
    276     inputs[input_count++] = g.UseUniqueRegister(base);
    277     if (g.CanBeImmediate(index)) {
    278       inputs[input_count++] = g.UseImmediate(index);
    279       addressing_mode = kMode_MRI;
    280     } else {
    281       inputs[input_count++] = g.UseUniqueRegister(index);
    282       addressing_mode = kMode_MR1;
    283     }
    284     inputs[input_count++] = g.UseUniqueRegister(value);
    285     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
    286     switch (write_barrier_kind) {
    287       case kNoWriteBarrier:
    288         UNREACHABLE();
    289         break;
    290       case kMapWriteBarrier:
    291         record_write_mode = RecordWriteMode::kValueIsMap;
    292         break;
    293       case kPointerWriteBarrier:
    294         record_write_mode = RecordWriteMode::kValueIsPointer;
    295         break;
    296       case kFullWriteBarrier:
    297         record_write_mode = RecordWriteMode::kValueIsAny;
    298         break;
    299     }
    300     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
    301     size_t const temp_count = arraysize(temps);
    302     InstructionCode code = kArchStoreWithWriteBarrier;
    303     code |= AddressingModeField::encode(addressing_mode);
    304     code |= MiscField::encode(static_cast<int>(record_write_mode));
    305     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
    306   } else {
    307     ArchOpcode opcode = kArchNop;
    308     switch (rep) {
    309       case MachineRepresentation::kFloat32:
    310         opcode = kIA32Movss;
    311         break;
    312       case MachineRepresentation::kFloat64:
    313         opcode = kIA32Movsd;
    314         break;
    315       case MachineRepresentation::kBit:  // Fall through.
    316       case MachineRepresentation::kWord8:
    317         opcode = kIA32Movb;
    318         break;
    319       case MachineRepresentation::kWord16:
    320         opcode = kIA32Movw;
    321         break;
    322       case MachineRepresentation::kTaggedSigned:   // Fall through.
    323       case MachineRepresentation::kTaggedPointer:  // Fall through.
    324       case MachineRepresentation::kTagged:         // Fall through.
    325       case MachineRepresentation::kWord32:
    326         opcode = kIA32Movl;
    327         break;
    328       case MachineRepresentation::kWord64:   // Fall through.
    329       case MachineRepresentation::kSimd128:  // Fall through.
    330       case MachineRepresentation::kSimd1x4:  // Fall through.
    331       case MachineRepresentation::kSimd1x8:  // Fall through.
    332       case MachineRepresentation::kSimd1x16:  // Fall through.
    333       case MachineRepresentation::kNone:
    334         UNREACHABLE();
    335         return;
    336     }
    337 
    338     InstructionOperand val;
    339     if (g.CanBeImmediate(value)) {
    340       val = g.UseImmediate(value);
    341     } else if (rep == MachineRepresentation::kWord8 ||
    342                rep == MachineRepresentation::kBit) {
    343       val = g.UseByteRegister(value);
    344     } else {
    345       val = g.UseRegister(value);
    346     }
    347 
    348     InstructionOperand inputs[4];
    349     size_t input_count = 0;
    350     AddressingMode addressing_mode =
    351         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
    352     InstructionCode code =
    353         opcode | AddressingModeField::encode(addressing_mode);
    354     inputs[input_count++] = val;
    355     Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
    356          inputs);
    357   }
    358 }
    359 
    360 void InstructionSelector::VisitProtectedStore(Node* node) {
    361   // TODO(eholk)
    362   UNIMPLEMENTED();
    363 }
    364 
    365 // Architecture supports unaligned access, therefore VisitLoad is used instead
    366 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
    367 
    368 // Architecture supports unaligned access, therefore VisitStore is used instead
    369 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
    370 
    371 void InstructionSelector::VisitCheckedLoad(Node* node) {
    372   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
    373   IA32OperandGenerator g(this);
    374   Node* const buffer = node->InputAt(0);
    375   Node* const offset = node->InputAt(1);
    376   Node* const length = node->InputAt(2);
    377   ArchOpcode opcode = kArchNop;
    378   switch (load_rep.representation()) {
    379     case MachineRepresentation::kWord8:
    380       opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
    381       break;
    382     case MachineRepresentation::kWord16:
    383       opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
    384       break;
    385     case MachineRepresentation::kWord32:
    386       opcode = kCheckedLoadWord32;
    387       break;
    388     case MachineRepresentation::kFloat32:
    389       opcode = kCheckedLoadFloat32;
    390       break;
    391     case MachineRepresentation::kFloat64:
    392       opcode = kCheckedLoadFloat64;
    393       break;
    394     case MachineRepresentation::kBit:            // Fall through.
    395     case MachineRepresentation::kTaggedSigned:   // Fall through.
    396     case MachineRepresentation::kTaggedPointer:  // Fall through.
    397     case MachineRepresentation::kTagged:         // Fall through.
    398     case MachineRepresentation::kWord64:         // Fall through.
    399     case MachineRepresentation::kSimd128:        // Fall through.
    400     case MachineRepresentation::kSimd1x4:        // Fall through.
    401     case MachineRepresentation::kSimd1x8:        // Fall through.
    402     case MachineRepresentation::kSimd1x16:       // Fall through.
    403     case MachineRepresentation::kNone:
    404       UNREACHABLE();
    405       return;
    406   }
    407   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
    408     Int32BinopMatcher moffset(offset);
    409     InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
    410                                             ? g.UseImmediate(buffer)
    411                                             : g.UseRegister(buffer);
    412     Int32Matcher mlength(length);
    413     if (mlength.HasValue() && moffset.right().HasValue() &&
    414         moffset.right().Value() >= 0 &&
    415         mlength.Value() >= moffset.right().Value()) {
    416       Emit(opcode, g.DefineAsRegister(node),
    417            g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
    418            g.UseRegister(moffset.left().node()), buffer_operand);
    419       return;
    420     }
    421     IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
    422     if (mmlength.HasValue() && moffset.right().HasValue() &&
    423         moffset.right().Value() >= 0 &&
    424         mmlength.Value() >= moffset.right().Value()) {
    425       Emit(opcode, g.DefineAsRegister(node),
    426            g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
    427            g.UseRegister(moffset.left().node()), buffer_operand);
    428       return;
    429     }
    430   }
    431   InstructionOperand offset_operand = g.UseRegister(offset);
    432   InstructionOperand length_operand =
    433       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
    434   if (g.CanBeImmediate(buffer)) {
    435     Emit(opcode | AddressingModeField::encode(kMode_MRI),
    436          g.DefineAsRegister(node), offset_operand, length_operand,
    437          offset_operand, g.UseImmediate(buffer));
    438   } else {
    439     Emit(opcode | AddressingModeField::encode(kMode_MR1),
    440          g.DefineAsRegister(node), offset_operand, length_operand,
    441          g.UseRegister(buffer), offset_operand);
    442   }
    443 }
    444 
    445 
    446 void InstructionSelector::VisitCheckedStore(Node* node) {
    447   MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
    448   IA32OperandGenerator g(this);
    449   Node* const buffer = node->InputAt(0);
    450   Node* const offset = node->InputAt(1);
    451   Node* const length = node->InputAt(2);
    452   Node* const value = node->InputAt(3);
    453   ArchOpcode opcode = kArchNop;
    454   switch (rep) {
    455     case MachineRepresentation::kWord8:
    456       opcode = kCheckedStoreWord8;
    457       break;
    458     case MachineRepresentation::kWord16:
    459       opcode = kCheckedStoreWord16;
    460       break;
    461     case MachineRepresentation::kWord32:
    462       opcode = kCheckedStoreWord32;
    463       break;
    464     case MachineRepresentation::kFloat32:
    465       opcode = kCheckedStoreFloat32;
    466       break;
    467     case MachineRepresentation::kFloat64:
    468       opcode = kCheckedStoreFloat64;
    469       break;
    470     case MachineRepresentation::kBit:            // Fall through.
    471     case MachineRepresentation::kTaggedSigned:   // Fall through.
    472     case MachineRepresentation::kTaggedPointer:  // Fall through.
    473     case MachineRepresentation::kTagged:         // Fall through.
    474     case MachineRepresentation::kWord64:         // Fall through.
    475     case MachineRepresentation::kSimd128:        // Fall through.
    476     case MachineRepresentation::kSimd1x4:        // Fall through.
    477     case MachineRepresentation::kSimd1x8:        // Fall through.
    478     case MachineRepresentation::kSimd1x16:       // Fall through.
    479     case MachineRepresentation::kNone:
    480       UNREACHABLE();
    481       return;
    482   }
    483   InstructionOperand value_operand =
    484       g.CanBeImmediate(value) ? g.UseImmediate(value)
    485                               : ((rep == MachineRepresentation::kWord8 ||
    486                                   rep == MachineRepresentation::kBit)
    487                                      ? g.UseByteRegister(value)
    488                                      : g.UseRegister(value));
    489   if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
    490     Int32BinopMatcher moffset(offset);
    491     InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
    492                                             ? g.UseImmediate(buffer)
    493                                             : g.UseRegister(buffer);
    494     Int32Matcher mlength(length);
    495     if (mlength.HasValue() && moffset.right().HasValue() &&
    496         moffset.right().Value() >= 0 &&
    497         mlength.Value() >= moffset.right().Value()) {
    498       Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
    499            g.UseImmediate(length), value_operand,
    500            g.UseRegister(moffset.left().node()), buffer_operand);
    501       return;
    502     }
    503     IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
    504     if (mmlength.HasValue() && moffset.right().HasValue() &&
    505         moffset.right().Value() >= 0 &&
    506         mmlength.Value() >= moffset.right().Value()) {
    507       Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
    508            g.UseImmediate(length), value_operand,
    509            g.UseRegister(moffset.left().node()), buffer_operand);
    510       return;
    511     }
    512   }
    513   InstructionOperand offset_operand = g.UseRegister(offset);
    514   InstructionOperand length_operand =
    515       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
    516   if (g.CanBeImmediate(buffer)) {
    517     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
    518          offset_operand, length_operand, value_operand, offset_operand,
    519          g.UseImmediate(buffer));
    520   } else {
    521     Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
    522          offset_operand, length_operand, value_operand, g.UseRegister(buffer),
    523          offset_operand);
    524   }
    525 }
    526 
    527 namespace {
    528 
    529 // Shared routine for multiple binary operations.
    530 void VisitBinop(InstructionSelector* selector, Node* node,
    531                 InstructionCode opcode, FlagsContinuation* cont) {
    532   IA32OperandGenerator g(selector);
    533   Int32BinopMatcher m(node);
    534   Node* left = m.left().node();
    535   Node* right = m.right().node();
    536   InstructionOperand inputs[4];
    537   size_t input_count = 0;
    538   InstructionOperand outputs[2];
    539   size_t output_count = 0;
    540 
    541   // TODO(turbofan): match complex addressing modes.
    542   if (left == right) {
    543     // If both inputs refer to the same operand, enforce allocating a register
    544     // for both of them to ensure that we don't end up generating code like
    545     // this:
    546     //
    547     //   mov eax, [ebp-0x10]
    548     //   add eax, [ebp-0x10]
    549     //   jo label
    550     InstructionOperand const input = g.UseRegister(left);
    551     inputs[input_count++] = input;
    552     inputs[input_count++] = input;
    553   } else if (g.CanBeImmediate(right)) {
    554     inputs[input_count++] = g.UseRegister(left);
    555     inputs[input_count++] = g.UseImmediate(right);
    556   } else {
    557     if (node->op()->HasProperty(Operator::kCommutative) &&
    558         g.CanBeBetterLeftOperand(right)) {
    559       std::swap(left, right);
    560     }
    561     inputs[input_count++] = g.UseRegister(left);
    562     inputs[input_count++] = g.Use(right);
    563   }
    564 
    565   if (cont->IsBranch()) {
    566     inputs[input_count++] = g.Label(cont->true_block());
    567     inputs[input_count++] = g.Label(cont->false_block());
    568   }
    569 
    570   outputs[output_count++] = g.DefineSameAsFirst(node);
    571   if (cont->IsSet()) {
    572     outputs[output_count++] = g.DefineAsByteRegister(cont->result());
    573   }
    574 
    575   DCHECK_NE(0u, input_count);
    576   DCHECK_NE(0u, output_count);
    577   DCHECK_GE(arraysize(inputs), input_count);
    578   DCHECK_GE(arraysize(outputs), output_count);
    579 
    580   opcode = cont->Encode(opcode);
    581   if (cont->IsDeoptimize()) {
    582     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
    583                              cont->kind(), cont->reason(), cont->frame_state());
    584   } else {
    585     selector->Emit(opcode, output_count, outputs, input_count, inputs);
    586   }
    587 }
    588 
    589 
    590 // Shared routine for multiple binary operations.
    591 void VisitBinop(InstructionSelector* selector, Node* node,
    592                 InstructionCode opcode) {
    593   FlagsContinuation cont;
    594   VisitBinop(selector, node, opcode, &cont);
    595 }
    596 
    597 }  // namespace
    598 
    599 void InstructionSelector::VisitWord32And(Node* node) {
    600   VisitBinop(this, node, kIA32And);
    601 }
    602 
    603 
    604 void InstructionSelector::VisitWord32Or(Node* node) {
    605   VisitBinop(this, node, kIA32Or);
    606 }
    607 
    608 
    609 void InstructionSelector::VisitWord32Xor(Node* node) {
    610   IA32OperandGenerator g(this);
    611   Int32BinopMatcher m(node);
    612   if (m.right().Is(-1)) {
    613     Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
    614   } else {
    615     VisitBinop(this, node, kIA32Xor);
    616   }
    617 }
    618 
    619 
    620 // Shared routine for multiple shift operations.
    621 static inline void VisitShift(InstructionSelector* selector, Node* node,
    622                               ArchOpcode opcode) {
    623   IA32OperandGenerator g(selector);
    624   Node* left = node->InputAt(0);
    625   Node* right = node->InputAt(1);
    626 
    627   if (g.CanBeImmediate(right)) {
    628     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    629                    g.UseImmediate(right));
    630   } else {
    631     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
    632                    g.UseFixed(right, ecx));
    633   }
    634 }
    635 
    636 
    637 namespace {
    638 
    639 void VisitMulHigh(InstructionSelector* selector, Node* node,
    640                   ArchOpcode opcode) {
    641   IA32OperandGenerator g(selector);
    642   InstructionOperand temps[] = {g.TempRegister(eax)};
    643   selector->Emit(
    644       opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
    645       g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
    646 }
    647 
    648 
    649 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    650   IA32OperandGenerator g(selector);
    651   InstructionOperand temps[] = {g.TempRegister(edx)};
    652   selector->Emit(opcode, g.DefineAsFixed(node, eax),
    653                  g.UseFixed(node->InputAt(0), eax),
    654                  g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
    655 }
    656 
    657 
    658 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
    659   IA32OperandGenerator g(selector);
    660   InstructionOperand temps[] = {g.TempRegister(eax)};
    661   selector->Emit(opcode, g.DefineAsFixed(node, edx),
    662                  g.UseFixed(node->InputAt(0), eax),
    663                  g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
    664 }
    665 
    666 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
    667              int scale, Node* base, Node* displacement,
    668              DisplacementMode displacement_mode) {
    669   IA32OperandGenerator g(selector);
    670   InstructionOperand inputs[4];
    671   size_t input_count = 0;
    672   AddressingMode mode =
    673       g.GenerateMemoryOperandInputs(index, scale, base, displacement,
    674                                     displacement_mode, inputs, &input_count);
    675 
    676   DCHECK_NE(0u, input_count);
    677   DCHECK_GE(arraysize(inputs), input_count);
    678 
    679   InstructionOperand outputs[1];
    680   outputs[0] = g.DefineAsRegister(result);
    681 
    682   InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
    683 
    684   selector->Emit(opcode, 1, outputs, input_count, inputs);
    685 }
    686 
    687 }  // namespace
    688 
    689 
    690 void InstructionSelector::VisitWord32Shl(Node* node) {
    691   Int32ScaleMatcher m(node, true);
    692   if (m.matches()) {
    693     Node* index = node->InputAt(0);
    694     Node* base = m.power_of_two_plus_one() ? index : nullptr;
    695     EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
    696     return;
    697   }
    698   VisitShift(this, node, kIA32Shl);
    699 }
    700 
    701 
    702 void InstructionSelector::VisitWord32Shr(Node* node) {
    703   VisitShift(this, node, kIA32Shr);
    704 }
    705 
    706 
    707 void InstructionSelector::VisitWord32Sar(Node* node) {
    708   VisitShift(this, node, kIA32Sar);
    709 }
    710 
    711 void InstructionSelector::VisitInt32PairAdd(Node* node) {
    712   IA32OperandGenerator g(this);
    713 
    714   Node* projection1 = NodeProperties::FindProjection(node, 1);
    715   if (projection1) {
    716     // We use UseUniqueRegister here to avoid register sharing with the temp
    717     // register.
    718     InstructionOperand inputs[] = {
    719         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
    720         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
    721 
    722     InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
    723                                     g.DefineAsRegister(projection1)};
    724 
    725     InstructionOperand temps[] = {g.TempRegister()};
    726 
    727     Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
    728   } else {
    729     // The high word of the result is not used, so we emit the standard 32 bit
    730     // instruction.
    731     Emit(kIA32Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
    732          g.Use(node->InputAt(2)));
    733   }
    734 }
    735 
    736 void InstructionSelector::VisitInt32PairSub(Node* node) {
    737   IA32OperandGenerator g(this);
    738 
    739   Node* projection1 = NodeProperties::FindProjection(node, 1);
    740   if (projection1) {
    741     // We use UseUniqueRegister here to avoid register sharing with the temp
    742     // register.
    743     InstructionOperand inputs[] = {
    744         g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
    745         g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
    746 
    747     InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
    748                                     g.DefineAsRegister(projection1)};
    749 
    750     InstructionOperand temps[] = {g.TempRegister()};
    751 
    752     Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
    753   } else {
    754     // The high word of the result is not used, so we emit the standard 32 bit
    755     // instruction.
    756     Emit(kIA32Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
    757          g.Use(node->InputAt(2)));
    758   }
    759 }
    760 
    761 void InstructionSelector::VisitInt32PairMul(Node* node) {
    762   IA32OperandGenerator g(this);
    763 
    764   Node* projection1 = NodeProperties::FindProjection(node, 1);
    765   if (projection1) {
    766     // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
    767     // register and one mov instruction.
    768     InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
    769                                    g.UseUnique(node->InputAt(1)),
    770                                    g.UseUniqueRegister(node->InputAt(2)),
    771                                    g.UseFixed(node->InputAt(3), ecx)};
    772 
    773     InstructionOperand outputs[] = {
    774         g.DefineAsFixed(node, eax),
    775         g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
    776 
    777     InstructionOperand temps[] = {g.TempRegister(edx)};
    778 
    779     Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
    780   } else {
    781     // The high word of the result is not used, so we emit the standard 32 bit
    782     // instruction.
    783     Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
    784          g.Use(node->InputAt(2)));
    785   }
    786 }
    787 
    788 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
    789                           Node* node) {
    790   IA32OperandGenerator g(selector);
    791 
    792   Node* shift = node->InputAt(2);
    793   InstructionOperand shift_operand;
    794   if (g.CanBeImmediate(shift)) {
    795     shift_operand = g.UseImmediate(shift);
    796   } else {
    797     shift_operand = g.UseFixed(shift, ecx);
    798   }
    799   InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
    800                                  g.UseFixed(node->InputAt(1), edx),
    801                                  shift_operand};
    802 
    803   InstructionOperand outputs[2];
    804   InstructionOperand temps[1];
    805   int32_t output_count = 0;
    806   int32_t temp_count = 0;
    807   outputs[output_count++] = g.DefineAsFixed(node, eax);
    808   Node* projection1 = NodeProperties::FindProjection(node, 1);
    809   if (projection1) {
    810     outputs[output_count++] = g.DefineAsFixed(projection1, edx);
    811   } else {
    812     temps[temp_count++] = g.TempRegister(edx);
    813   }
    814 
    815   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
    816 }
    817 
    818 void InstructionSelector::VisitWord32PairShl(Node* node) {
    819   VisitWord32PairShift(this, kIA32ShlPair, node);
    820 }
    821 
    822 void InstructionSelector::VisitWord32PairShr(Node* node) {
    823   VisitWord32PairShift(this, kIA32ShrPair, node);
    824 }
    825 
    826 void InstructionSelector::VisitWord32PairSar(Node* node) {
    827   VisitWord32PairShift(this, kIA32SarPair, node);
    828 }
    829 
    830 void InstructionSelector::VisitWord32Ror(Node* node) {
    831   VisitShift(this, node, kIA32Ror);
    832 }
    833 
    834 #define RO_OP_LIST(V)                                     \
    835   V(Word32Clz, kIA32Lzcnt)                                \
    836   V(Word32Ctz, kIA32Tzcnt)                                \
    837   V(Word32Popcnt, kIA32Popcnt)                            \
    838   V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)         \
    839   V(RoundInt32ToFloat32, kSSEInt32ToFloat32)              \
    840   V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)             \
    841   V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)           \
    842   V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)           \
    843   V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)         \
    844   V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)             \
    845   V(ChangeFloat64ToUint32, kSSEFloat64ToUint32)           \
    846   V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)         \
    847   V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)       \
    848   V(RoundFloat64ToInt32, kSSEFloat64ToInt32)              \
    849   V(BitcastFloat32ToInt32, kIA32BitcastFI)                \
    850   V(BitcastInt32ToFloat32, kIA32BitcastIF)                \
    851   V(Float32Sqrt, kSSEFloat32Sqrt)                         \
    852   V(Float64Sqrt, kSSEFloat64Sqrt)                         \
    853   V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
    854   V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
    855 
    856 #define RR_OP_LIST(V)                                                         \
    857   V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)                          \
    858   V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
    859   V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
    860   V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
    861   V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
    862   V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
    863   V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
    864   V(Float32RoundTiesEven,                                                     \
    865     kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
    866   V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
    867 
    868 #define RRO_FLOAT_OP_LIST(V)                    \
    869   V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
    870   V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
    871   V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
    872   V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
    873   V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
    874   V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
    875   V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
    876   V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
    877 
    878 #define FLOAT_UNOP_LIST(V)                      \
    879   V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
    880   V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
    881   V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
    882   V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
    883 
    884 #define RO_VISITOR(Name, opcode)                      \
    885   void InstructionSelector::Visit##Name(Node* node) { \
    886     VisitRO(this, node, opcode);                      \
    887   }
    888 RO_OP_LIST(RO_VISITOR)
    889 #undef RO_VISITOR
    890 
    891 #define RR_VISITOR(Name, opcode)                      \
    892   void InstructionSelector::Visit##Name(Node* node) { \
    893     VisitRR(this, node, opcode);                      \
    894   }
    895 RR_OP_LIST(RR_VISITOR)
    896 #undef RR_VISITOR
    897 
    898 #define RRO_FLOAT_VISITOR(Name, avx, sse)             \
    899   void InstructionSelector::Visit##Name(Node* node) { \
    900     VisitRROFloat(this, node, avx, sse);              \
    901   }
    902 RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
    903 #undef RRO_FLOAT_VISITOR
    904 
    905 #define FLOAT_UNOP_VISITOR(Name, avx, sse)                  \
    906   void InstructionSelector::Visit##Name(Node* node) {       \
    907     VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
    908   }
    909 FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
    910 #undef FLOAT_UNOP_VISITOR
    911 
    912 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
    913 
    914 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
    915 
    916 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
    917 
    918 void InstructionSelector::VisitInt32Add(Node* node) {
    919   IA32OperandGenerator g(this);
    920 
    921   // Try to match the Add to a lea pattern
    922   BaseWithIndexAndDisplacement32Matcher m(node);
    923   if (m.matches() &&
    924       (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
    925     InstructionOperand inputs[4];
    926     size_t input_count = 0;
    927     AddressingMode mode = g.GenerateMemoryOperandInputs(
    928         m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
    929         inputs, &input_count);
    930 
    931     DCHECK_NE(0u, input_count);
    932     DCHECK_GE(arraysize(inputs), input_count);
    933 
    934     InstructionOperand outputs[1];
    935     outputs[0] = g.DefineAsRegister(node);
    936 
    937     InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
    938     Emit(opcode, 1, outputs, input_count, inputs);
    939     return;
    940   }
    941 
    942   // No lea pattern match, use add
    943   VisitBinop(this, node, kIA32Add);
    944 }
    945 
    946 
    947 void InstructionSelector::VisitInt32Sub(Node* node) {
    948   IA32OperandGenerator g(this);
    949   Int32BinopMatcher m(node);
    950   if (m.left().Is(0)) {
    951     Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
    952   } else {
    953     VisitBinop(this, node, kIA32Sub);
    954   }
    955 }
    956 
    957 
    958 void InstructionSelector::VisitInt32Mul(Node* node) {
    959   Int32ScaleMatcher m(node, true);
    960   if (m.matches()) {
    961     Node* index = node->InputAt(0);
    962     Node* base = m.power_of_two_plus_one() ? index : nullptr;
    963     EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
    964     return;
    965   }
    966   IA32OperandGenerator g(this);
    967   Node* left = node->InputAt(0);
    968   Node* right = node->InputAt(1);
    969   if (g.CanBeImmediate(right)) {
    970     Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
    971          g.UseImmediate(right));
    972   } else {
    973     if (g.CanBeBetterLeftOperand(right)) {
    974       std::swap(left, right);
    975     }
    976     Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
    977          g.Use(right));
    978   }
    979 }
    980 
    981 
    982 void InstructionSelector::VisitInt32MulHigh(Node* node) {
    983   VisitMulHigh(this, node, kIA32ImulHigh);
    984 }
    985 
    986 
    987 void InstructionSelector::VisitUint32MulHigh(Node* node) {
    988   VisitMulHigh(this, node, kIA32UmulHigh);
    989 }
    990 
    991 
    992 void InstructionSelector::VisitInt32Div(Node* node) {
    993   VisitDiv(this, node, kIA32Idiv);
    994 }
    995 
    996 
    997 void InstructionSelector::VisitUint32Div(Node* node) {
    998   VisitDiv(this, node, kIA32Udiv);
    999 }
   1000 
   1001 
   1002 void InstructionSelector::VisitInt32Mod(Node* node) {
   1003   VisitMod(this, node, kIA32Idiv);
   1004 }
   1005 
   1006 
   1007 void InstructionSelector::VisitUint32Mod(Node* node) {
   1008   VisitMod(this, node, kIA32Udiv);
   1009 }
   1010 
   1011 
   1012 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
   1013   IA32OperandGenerator g(this);
   1014   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
   1015   Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
   1016        arraysize(temps), temps);
   1017 }
   1018 
   1019 void InstructionSelector::VisitFloat64Mod(Node* node) {
   1020   IA32OperandGenerator g(this);
   1021   InstructionOperand temps[] = {g.TempRegister(eax)};
   1022   Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
   1023        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
   1024        temps);
   1025 }
   1026 
   1027 void InstructionSelector::VisitFloat32Max(Node* node) {
   1028   IA32OperandGenerator g(this);
   1029   InstructionOperand temps[] = {g.TempRegister()};
   1030   Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
   1031        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
   1032        arraysize(temps), temps);
   1033 }
   1034 
   1035 void InstructionSelector::VisitFloat64Max(Node* node) {
   1036   IA32OperandGenerator g(this);
   1037   InstructionOperand temps[] = {g.TempRegister()};
   1038   Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
   1039        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
   1040        arraysize(temps), temps);
   1041 }
   1042 
   1043 void InstructionSelector::VisitFloat32Min(Node* node) {
   1044   IA32OperandGenerator g(this);
   1045   InstructionOperand temps[] = {g.TempRegister()};
   1046   Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
   1047        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
   1048        arraysize(temps), temps);
   1049 }
   1050 
   1051 void InstructionSelector::VisitFloat64Min(Node* node) {
   1052   IA32OperandGenerator g(this);
   1053   InstructionOperand temps[] = {g.TempRegister()};
   1054   Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
   1055        g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
   1056        arraysize(temps), temps);
   1057 }
   1058 
   1059 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   1060   UNREACHABLE();
   1061 }
   1062 
   1063 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
   1064                                                    InstructionCode opcode) {
   1065   IA32OperandGenerator g(this);
   1066   Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
   1067        g.UseRegister(node->InputAt(1)))
   1068       ->MarkAsCall();
   1069 }
   1070 
   1071 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
   1072                                                   InstructionCode opcode) {
   1073   IA32OperandGenerator g(this);
   1074   Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
   1075       ->MarkAsCall();
   1076 }
   1077 
   1078 void InstructionSelector::EmitPrepareArguments(
   1079     ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
   1080     Node* node) {
   1081   IA32OperandGenerator g(this);
   1082 
   1083   // Prepare for C function call.
   1084   if (descriptor->IsCFunctionCall()) {
   1085     InstructionOperand temps[] = {g.TempRegister()};
   1086     size_t const temp_count = arraysize(temps);
   1087     Emit(kArchPrepareCallCFunction |
   1088              MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
   1089          0, nullptr, 0, nullptr, temp_count, temps);
   1090 
   1091     // Poke any stack arguments.
   1092     for (size_t n = 0; n < arguments->size(); ++n) {
   1093       PushParameter input = (*arguments)[n];
   1094       if (input.node()) {
   1095         int const slot = static_cast<int>(n);
   1096         InstructionOperand value = g.CanBeImmediate(node)
   1097                                        ? g.UseImmediate(input.node())
   1098                                        : g.UseRegister(input.node());
   1099         Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
   1100       }
   1101     }
   1102   } else {
   1103     // Push any stack arguments.
   1104     int effect_level = GetEffectLevel(node);
   1105     for (PushParameter input : base::Reversed(*arguments)) {
   1106       // Skip any alignment holes in pushed nodes.
   1107       Node* input_node = input.node();
   1108       if (input.node() == nullptr) continue;
   1109       if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
   1110         InstructionOperand outputs[1];
   1111         InstructionOperand inputs[4];
   1112         size_t input_count = 0;
   1113         InstructionCode opcode = kIA32Push;
   1114         AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
   1115             input_node, inputs, &input_count);
   1116         opcode |= AddressingModeField::encode(mode);
   1117         Emit(opcode, 0, outputs, input_count, inputs);
   1118       } else {
   1119         InstructionOperand value =
   1120             g.CanBeImmediate(input.node())
   1121                 ? g.UseImmediate(input.node())
   1122                 : IsSupported(ATOM) ||
   1123                           sequence()->IsFP(GetVirtualRegister(input.node()))
   1124                       ? g.UseRegister(input.node())
   1125                       : g.Use(input.node());
   1126         if (input.type() == MachineType::Float32()) {
   1127           Emit(kIA32PushFloat32, g.NoOutput(), value);
   1128         } else if (input.type() == MachineType::Float64()) {
   1129           Emit(kIA32PushFloat64, g.NoOutput(), value);
   1130         } else {
   1131           Emit(kIA32Push, g.NoOutput(), value);
   1132         }
   1133       }
   1134     }
   1135   }
   1136 }
   1137 
   1138 
   1139 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
   1140 
   1141 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
   1142 
   1143 namespace {
   1144 
   1145 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
   1146                                    InstructionCode opcode, Node* left,
   1147                                    InstructionOperand right,
   1148                                    FlagsContinuation* cont) {
   1149   DCHECK(left->opcode() == IrOpcode::kLoad);
   1150   IA32OperandGenerator g(selector);
   1151   size_t input_count = 0;
   1152   InstructionOperand inputs[6];
   1153   AddressingMode addressing_mode =
   1154       g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
   1155   opcode |= AddressingModeField::encode(addressing_mode);
   1156   opcode = cont->Encode(opcode);
   1157   inputs[input_count++] = right;
   1158 
   1159   if (cont->IsBranch()) {
   1160     inputs[input_count++] = g.Label(cont->true_block());
   1161     inputs[input_count++] = g.Label(cont->false_block());
   1162     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   1163   } else if (cont->IsDeoptimize()) {
   1164     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
   1165                              cont->kind(), cont->reason(), cont->frame_state());
   1166   } else if (cont->IsSet()) {
   1167     InstructionOperand output = g.DefineAsRegister(cont->result());
   1168     selector->Emit(opcode, 1, &output, input_count, inputs);
   1169   } else {
   1170     DCHECK(cont->IsTrap());
   1171     inputs[input_count++] = g.UseImmediate(cont->trap_id());
   1172     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   1173   }
   1174 }
   1175 
   1176 // Shared routine for multiple compare operations.
   1177 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1178                   InstructionOperand left, InstructionOperand right,
   1179                   FlagsContinuation* cont) {
   1180   IA32OperandGenerator g(selector);
   1181   opcode = cont->Encode(opcode);
   1182   if (cont->IsBranch()) {
   1183     selector->Emit(opcode, g.NoOutput(), left, right,
   1184                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   1185   } else if (cont->IsDeoptimize()) {
   1186     selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
   1187                              cont->reason(), cont->frame_state());
   1188   } else if (cont->IsSet()) {
   1189     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
   1190   } else {
   1191     DCHECK(cont->IsTrap());
   1192     selector->Emit(opcode, g.NoOutput(), left, right,
   1193                    g.UseImmediate(cont->trap_id()));
   1194   }
   1195 }
   1196 
   1197 
   1198 // Shared routine for multiple compare operations.
   1199 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
   1200                   Node* left, Node* right, FlagsContinuation* cont,
   1201                   bool commutative) {
   1202   IA32OperandGenerator g(selector);
   1203   if (commutative && g.CanBeBetterLeftOperand(right)) {
   1204     std::swap(left, right);
   1205   }
   1206   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
   1207 }
   1208 
   1209 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
   1210   if (hint_node->opcode() == IrOpcode::kLoad) {
   1211     MachineType hint = LoadRepresentationOf(hint_node->op());
   1212     if (node->opcode() == IrOpcode::kInt32Constant ||
   1213         node->opcode() == IrOpcode::kInt64Constant) {
   1214       int64_t constant = node->opcode() == IrOpcode::kInt32Constant
   1215                              ? OpParameter<int32_t>(node)
   1216                              : OpParameter<int64_t>(node);
   1217       if (hint == MachineType::Int8()) {
   1218         if (constant >= std::numeric_limits<int8_t>::min() &&
   1219             constant <= std::numeric_limits<int8_t>::max()) {
   1220           return hint;
   1221         }
   1222       } else if (hint == MachineType::Uint8()) {
   1223         if (constant >= std::numeric_limits<uint8_t>::min() &&
   1224             constant <= std::numeric_limits<uint8_t>::max()) {
   1225           return hint;
   1226         }
   1227       } else if (hint == MachineType::Int16()) {
   1228         if (constant >= std::numeric_limits<int16_t>::min() &&
   1229             constant <= std::numeric_limits<int16_t>::max()) {
   1230           return hint;
   1231         }
   1232       } else if (hint == MachineType::Uint16()) {
   1233         if (constant >= std::numeric_limits<uint16_t>::min() &&
   1234             constant <= std::numeric_limits<uint16_t>::max()) {
   1235           return hint;
   1236         }
   1237       } else if (hint == MachineType::Int32()) {
   1238         return hint;
   1239       } else if (hint == MachineType::Uint32()) {
   1240         if (constant >= 0) return hint;
   1241       }
   1242     }
   1243   }
   1244   return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
   1245                                            : MachineType::None();
   1246 }
   1247 
   1248 // Tries to match the size of the given opcode to that of the operands, if
   1249 // possible.
   1250 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
   1251                                     Node* right, FlagsContinuation* cont) {
   1252   // TODO(epertoso): we can probably get some size information out of phi nodes.
   1253   // If the load representations don't match, both operands will be
   1254   // zero/sign-extended to 32bit.
   1255   MachineType left_type = MachineTypeForNarrow(left, right);
   1256   MachineType right_type = MachineTypeForNarrow(right, left);
   1257   if (left_type == right_type) {
   1258     switch (left_type.representation()) {
   1259       case MachineRepresentation::kBit:
   1260       case MachineRepresentation::kWord8: {
   1261         if (opcode == kIA32Test) return kIA32Test8;
   1262         if (opcode == kIA32Cmp) {
   1263           if (left_type.semantic() == MachineSemantic::kUint32) {
   1264             cont->OverwriteUnsignedIfSigned();
   1265           } else {
   1266             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
   1267           }
   1268           return kIA32Cmp8;
   1269         }
   1270         break;
   1271       }
   1272       case MachineRepresentation::kWord16:
   1273         if (opcode == kIA32Test) return kIA32Test16;
   1274         if (opcode == kIA32Cmp) {
   1275           if (left_type.semantic() == MachineSemantic::kUint32) {
   1276             cont->OverwriteUnsignedIfSigned();
   1277           } else {
   1278             CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
   1279           }
   1280           return kIA32Cmp16;
   1281         }
   1282         break;
   1283       default:
   1284         break;
   1285     }
   1286   }
   1287   return opcode;
   1288 }
   1289 
   1290 // Shared routine for multiple float32 compare operations (inputs commuted).
   1291 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
   1292                          FlagsContinuation* cont) {
   1293   Node* const left = node->InputAt(0);
   1294   Node* const right = node->InputAt(1);
   1295   VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
   1296 }
   1297 
   1298 
   1299 // Shared routine for multiple float64 compare operations (inputs commuted).
   1300 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
   1301                          FlagsContinuation* cont) {
   1302   Node* const left = node->InputAt(0);
   1303   Node* const right = node->InputAt(1);
   1304   VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
   1305 }
   1306 
   1307 // Shared routine for multiple word compare operations.
   1308 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1309                       InstructionCode opcode, FlagsContinuation* cont) {
   1310   IA32OperandGenerator g(selector);
   1311   Node* left = node->InputAt(0);
   1312   Node* right = node->InputAt(1);
   1313 
   1314   InstructionCode narrowed_opcode =
   1315       TryNarrowOpcodeSize(opcode, left, right, cont);
   1316 
   1317   int effect_level = selector->GetEffectLevel(node);
   1318   if (cont->IsBranch()) {
   1319     effect_level = selector->GetEffectLevel(
   1320         cont->true_block()->PredecessorAt(0)->control_input());
   1321   }
   1322 
   1323   // If one of the two inputs is an immediate, make sure it's on the right, or
   1324   // if one of the two inputs is a memory operand, make sure it's on the left.
   1325   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
   1326       (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
   1327        !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
   1328     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1329     std::swap(left, right);
   1330   }
   1331 
   1332   // Match immediates on right side of comparison.
   1333   if (g.CanBeImmediate(right)) {
   1334     if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
   1335       return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
   1336                                            g.UseImmediate(right), cont);
   1337     }
   1338     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
   1339                         cont);
   1340   }
   1341 
   1342   // Match memory operands on left side of comparison.
   1343   if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
   1344     bool needs_byte_register =
   1345         narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
   1346     return VisitCompareWithMemoryOperand(
   1347         selector, narrowed_opcode, left,
   1348         needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
   1349         cont);
   1350   }
   1351 
   1352   return VisitCompare(selector, opcode, left, right, cont,
   1353                       node->op()->HasProperty(Operator::kCommutative));
   1354 }
   1355 
   1356 void VisitWordCompare(InstructionSelector* selector, Node* node,
   1357                       FlagsContinuation* cont) {
   1358   IA32OperandGenerator g(selector);
   1359   Int32BinopMatcher m(node);
   1360   if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
   1361     LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
   1362     ExternalReference js_stack_limit =
   1363         ExternalReference::address_of_stack_limit(selector->isolate());
   1364     if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
   1365       // Compare(Load(js_stack_limit), LoadStackPointer)
   1366       if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
   1367       InstructionCode opcode = cont->Encode(kIA32StackCheck);
   1368       if (cont->IsBranch()) {
   1369         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
   1370                        g.Label(cont->false_block()));
   1371       } else if (cont->IsDeoptimize()) {
   1372         selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
   1373                                  cont->reason(), cont->frame_state());
   1374       } else {
   1375         DCHECK(cont->IsSet());
   1376         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
   1377       }
   1378       return;
   1379     }
   1380   }
   1381   VisitWordCompare(selector, node, kIA32Cmp, cont);
   1382 }
   1383 
   1384 
   1385 // Shared routine for word comparison with zero.
   1386 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
   1387                           Node* value, FlagsContinuation* cont) {
   1388   // Try to combine with comparisons against 0 by simply inverting the branch.
   1389   while (value->opcode() == IrOpcode::kWord32Equal &&
   1390          selector->CanCover(user, value)) {
   1391     Int32BinopMatcher m(value);
   1392     if (!m.right().Is(0)) break;
   1393 
   1394     user = value;
   1395     value = m.left().node();
   1396     cont->Negate();
   1397   }
   1398 
   1399   if (selector->CanCover(user, value)) {
   1400     switch (value->opcode()) {
   1401       case IrOpcode::kWord32Equal:
   1402         cont->OverwriteAndNegateIfEqual(kEqual);
   1403         return VisitWordCompare(selector, value, cont);
   1404       case IrOpcode::kInt32LessThan:
   1405         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
   1406         return VisitWordCompare(selector, value, cont);
   1407       case IrOpcode::kInt32LessThanOrEqual:
   1408         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
   1409         return VisitWordCompare(selector, value, cont);
   1410       case IrOpcode::kUint32LessThan:
   1411         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
   1412         return VisitWordCompare(selector, value, cont);
   1413       case IrOpcode::kUint32LessThanOrEqual:
   1414         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
   1415         return VisitWordCompare(selector, value, cont);
   1416       case IrOpcode::kFloat32Equal:
   1417         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
   1418         return VisitFloat32Compare(selector, value, cont);
   1419       case IrOpcode::kFloat32LessThan:
   1420         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
   1421         return VisitFloat32Compare(selector, value, cont);
   1422       case IrOpcode::kFloat32LessThanOrEqual:
   1423         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
   1424         return VisitFloat32Compare(selector, value, cont);
   1425       case IrOpcode::kFloat64Equal:
   1426         cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
   1427         return VisitFloat64Compare(selector, value, cont);
   1428       case IrOpcode::kFloat64LessThan:
   1429         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
   1430         return VisitFloat64Compare(selector, value, cont);
   1431       case IrOpcode::kFloat64LessThanOrEqual:
   1432         cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
   1433         return VisitFloat64Compare(selector, value, cont);
   1434       case IrOpcode::kProjection:
   1435         // Check if this is the overflow output projection of an
   1436         // <Operation>WithOverflow node.
   1437         if (ProjectionIndexOf(value->op()) == 1u) {
   1438           // We cannot combine the <Operation>WithOverflow with this branch
   1439           // unless the 0th projection (the use of the actual value of the
   1440           // <Operation> is either nullptr, which means there's no use of the
   1441           // actual value, or was already defined, which means it is scheduled
   1442           // *AFTER* this branch).
   1443           Node* const node = value->InputAt(0);
   1444           Node* const result = NodeProperties::FindProjection(node, 0);
   1445           if (result == nullptr || selector->IsDefined(result)) {
   1446             switch (node->opcode()) {
   1447               case IrOpcode::kInt32AddWithOverflow:
   1448                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1449                 return VisitBinop(selector, node, kIA32Add, cont);
   1450               case IrOpcode::kInt32SubWithOverflow:
   1451                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1452                 return VisitBinop(selector, node, kIA32Sub, cont);
   1453               case IrOpcode::kInt32MulWithOverflow:
   1454                 cont->OverwriteAndNegateIfEqual(kOverflow);
   1455                 return VisitBinop(selector, node, kIA32Imul, cont);
   1456               default:
   1457                 break;
   1458             }
   1459           }
   1460         }
   1461         break;
   1462       case IrOpcode::kInt32Sub:
   1463         return VisitWordCompare(selector, value, cont);
   1464       case IrOpcode::kWord32And:
   1465         return VisitWordCompare(selector, value, kIA32Test, cont);
   1466       default:
   1467         break;
   1468     }
   1469   }
   1470 
   1471   // Continuation could not be combined with a compare, emit compare against 0.
   1472   IA32OperandGenerator g(selector);
   1473   VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
   1474 }
   1475 
   1476 }  // namespace
   1477 
   1478 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   1479                                       BasicBlock* fbranch) {
   1480   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   1481   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
   1482 }
   1483 
   1484 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   1485   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1486   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1487       kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   1488   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1489 }
   1490 
   1491 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   1492   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   1493   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   1494       kEqual, p.kind(), p.reason(), node->InputAt(1));
   1495   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1496 }
   1497 
   1498 void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
   1499   FlagsContinuation cont =
   1500       FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
   1501   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1502 }
   1503 
   1504 void InstructionSelector::VisitTrapUnless(Node* node,
   1505                                           Runtime::FunctionId func_id) {
   1506   FlagsContinuation cont =
   1507       FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   1508   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
   1509 }
   1510 
   1511 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   1512   IA32OperandGenerator g(this);
   1513   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
   1514 
   1515   // Emit either ArchTableSwitch or ArchLookupSwitch.
   1516   size_t table_space_cost = 4 + sw.value_range;
   1517   size_t table_time_cost = 3;
   1518   size_t lookup_space_cost = 3 + 2 * sw.case_count;
   1519   size_t lookup_time_cost = sw.case_count;
   1520   if (sw.case_count > 4 &&
   1521       table_space_cost + 3 * table_time_cost <=
   1522           lookup_space_cost + 3 * lookup_time_cost &&
   1523       sw.min_value > std::numeric_limits<int32_t>::min()) {
   1524     InstructionOperand index_operand = value_operand;
   1525     if (sw.min_value) {
   1526       index_operand = g.TempRegister();
   1527       Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
   1528            value_operand, g.TempImmediate(-sw.min_value));
   1529     }
   1530     // Generate a table lookup.
   1531     return EmitTableSwitch(sw, index_operand);
   1532   }
   1533 
   1534   // Generate a sequence of conditional jumps.
   1535   return EmitLookupSwitch(sw, value_operand);
   1536 }
   1537 
   1538 
   1539 void InstructionSelector::VisitWord32Equal(Node* const node) {
   1540   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   1541   Int32BinopMatcher m(node);
   1542   if (m.right().Is(0)) {
   1543     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   1544   }
   1545   VisitWordCompare(this, node, &cont);
   1546 }
   1547 
   1548 
   1549 void InstructionSelector::VisitInt32LessThan(Node* node) {
   1550   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   1551   VisitWordCompare(this, node, &cont);
   1552 }
   1553 
   1554 
   1555 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
   1556   FlagsContinuation cont =
   1557       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   1558   VisitWordCompare(this, node, &cont);
   1559 }
   1560 
   1561 
   1562 void InstructionSelector::VisitUint32LessThan(Node* node) {
   1563   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   1564   VisitWordCompare(this, node, &cont);
   1565 }
   1566 
   1567 
   1568 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
   1569   FlagsContinuation cont =
   1570       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   1571   VisitWordCompare(this, node, &cont);
   1572 }
   1573 
   1574 
   1575 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   1576   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1577     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1578     return VisitBinop(this, node, kIA32Add, &cont);
   1579   }
   1580   FlagsContinuation cont;
   1581   VisitBinop(this, node, kIA32Add, &cont);
   1582 }
   1583 
   1584 
   1585 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   1586   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1587     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1588     return VisitBinop(this, node, kIA32Sub, &cont);
   1589   }
   1590   FlagsContinuation cont;
   1591   VisitBinop(this, node, kIA32Sub, &cont);
   1592 }
   1593 
   1594 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   1595   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
   1596     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
   1597     return VisitBinop(this, node, kIA32Imul, &cont);
   1598   }
   1599   FlagsContinuation cont;
   1600   VisitBinop(this, node, kIA32Imul, &cont);
   1601 }
   1602 
   1603 void InstructionSelector::VisitFloat32Equal(Node* node) {
   1604   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   1605   VisitFloat32Compare(this, node, &cont);
   1606 }
   1607 
   1608 
   1609 void InstructionSelector::VisitFloat32LessThan(Node* node) {
   1610   FlagsContinuation cont =
   1611       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   1612   VisitFloat32Compare(this, node, &cont);
   1613 }
   1614 
   1615 
   1616 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
   1617   FlagsContinuation cont =
   1618       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   1619   VisitFloat32Compare(this, node, &cont);
   1620 }
   1621 
   1622 
   1623 void InstructionSelector::VisitFloat64Equal(Node* node) {
   1624   FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   1625   VisitFloat64Compare(this, node, &cont);
   1626 }
   1627 
   1628 
   1629 void InstructionSelector::VisitFloat64LessThan(Node* node) {
   1630   FlagsContinuation cont =
   1631       FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   1632   VisitFloat64Compare(this, node, &cont);
   1633 }
   1634 
   1635 
   1636 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
   1637   FlagsContinuation cont =
   1638       FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   1639   VisitFloat64Compare(this, node, &cont);
   1640 }
   1641 
   1642 
   1643 
   1644 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   1645   IA32OperandGenerator g(this);
   1646   Node* left = node->InputAt(0);
   1647   Node* right = node->InputAt(1);
   1648   Float64Matcher mleft(left);
   1649   if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
   1650     Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
   1651     return;
   1652   }
   1653   Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
   1654        g.UseRegister(left), g.Use(right));
   1655 }
   1656 
   1657 
   1658 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   1659   IA32OperandGenerator g(this);
   1660   Node* left = node->InputAt(0);
   1661   Node* right = node->InputAt(1);
   1662   Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
   1663        g.UseRegister(left), g.Use(right));
   1664 }
   1665 
   1666 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
   1667   IA32OperandGenerator g(this);
   1668   Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
   1669        g.UseRegister(node->InputAt(0)));
   1670 }
   1671 
   1672 void InstructionSelector::VisitAtomicLoad(Node* node) {
   1673   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   1674   DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
   1675          load_rep.representation() == MachineRepresentation::kWord16 ||
   1676          load_rep.representation() == MachineRepresentation::kWord32);
   1677   USE(load_rep);
   1678   VisitLoad(node);
   1679 }
   1680 
   1681 void InstructionSelector::VisitAtomicStore(Node* node) {
   1682   IA32OperandGenerator g(this);
   1683   Node* base = node->InputAt(0);
   1684   Node* index = node->InputAt(1);
   1685   Node* value = node->InputAt(2);
   1686 
   1687   MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
   1688   ArchOpcode opcode = kArchNop;
   1689   switch (rep) {
   1690     case MachineRepresentation::kWord8:
   1691       opcode = kIA32Xchgb;
   1692       break;
   1693     case MachineRepresentation::kWord16:
   1694       opcode = kIA32Xchgw;
   1695       break;
   1696     case MachineRepresentation::kWord32:
   1697       opcode = kIA32Xchgl;
   1698       break;
   1699     default:
   1700       UNREACHABLE();
   1701       break;
   1702   }
   1703   AddressingMode addressing_mode;
   1704   InstructionOperand inputs[4];
   1705   size_t input_count = 0;
   1706   inputs[input_count++] = g.UseUniqueRegister(base);
   1707   if (g.CanBeImmediate(index)) {
   1708     inputs[input_count++] = g.UseImmediate(index);
   1709     addressing_mode = kMode_MRI;
   1710   } else {
   1711     inputs[input_count++] = g.UseUniqueRegister(index);
   1712     addressing_mode = kMode_MR1;
   1713   }
   1714   inputs[input_count++] = g.UseUniqueRegister(value);
   1715   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
   1716   Emit(code, 0, nullptr, input_count, inputs);
   1717 }
   1718 
   1719 // static
   1720 MachineOperatorBuilder::Flags
   1721 InstructionSelector::SupportedMachineOperatorFlags() {
   1722   MachineOperatorBuilder::Flags flags =
   1723       MachineOperatorBuilder::kWord32ShiftIsSafe |
   1724       MachineOperatorBuilder::kWord32Ctz;
   1725   if (CpuFeatures::IsSupported(POPCNT)) {
   1726     flags |= MachineOperatorBuilder::kWord32Popcnt;
   1727   }
   1728   if (CpuFeatures::IsSupported(SSE4_1)) {
   1729     flags |= MachineOperatorBuilder::kFloat32RoundDown |
   1730              MachineOperatorBuilder::kFloat64RoundDown |
   1731              MachineOperatorBuilder::kFloat32RoundUp |
   1732              MachineOperatorBuilder::kFloat64RoundUp |
   1733              MachineOperatorBuilder::kFloat32RoundTruncate |
   1734              MachineOperatorBuilder::kFloat64RoundTruncate |
   1735              MachineOperatorBuilder::kFloat32RoundTiesEven |
   1736              MachineOperatorBuilder::kFloat64RoundTiesEven;
   1737   }
   1738   return flags;
   1739 }
   1740 
   1741 // static
   1742 MachineOperatorBuilder::AlignmentRequirements
   1743 InstructionSelector::AlignmentRequirements() {
   1744   return MachineOperatorBuilder::AlignmentRequirements::
   1745       FullUnalignedAccessSupport();
   1746 }
   1747 
   1748 }  // namespace compiler
   1749 }  // namespace internal
   1750 }  // namespace v8
   1751