Home | History | Annotate | Download | only in compiler
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/instruction-selector.h"
      6 
      7 #include <limits>
      8 
      9 #include "src/assembler-inl.h"
     10 #include "src/base/adapters.h"
     11 #include "src/compiler/compiler-source-position-table.h"
     12 #include "src/compiler/instruction-selector-impl.h"
     13 #include "src/compiler/node-matchers.h"
     14 #include "src/compiler/pipeline.h"
     15 #include "src/compiler/schedule.h"
     16 #include "src/compiler/state-values-utils.h"
     17 #include "src/deoptimizer.h"
     18 
     19 namespace v8 {
     20 namespace internal {
     21 namespace compiler {
     22 
     23 InstructionSelector::InstructionSelector(
     24     Zone* zone, size_t node_count, Linkage* linkage,
     25     InstructionSequence* sequence, Schedule* schedule,
     26     SourcePositionTable* source_positions, Frame* frame,
     27     EnableSwitchJumpTable enable_switch_jump_table,
     28     SourcePositionMode source_position_mode, Features features,
     29     EnableScheduling enable_scheduling,
     30     EnableRootsRelativeAddressing enable_roots_relative_addressing,
     31     PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
     32     : zone_(zone),
     33       linkage_(linkage),
     34       sequence_(sequence),
     35       source_positions_(source_positions),
     36       source_position_mode_(source_position_mode),
     37       features_(features),
     38       schedule_(schedule),
     39       current_block_(nullptr),
     40       instructions_(zone),
     41       continuation_inputs_(sequence->zone()),
     42       continuation_outputs_(sequence->zone()),
     43       defined_(node_count, false, zone),
     44       used_(node_count, false, zone),
     45       effect_level_(node_count, 0, zone),
     46       virtual_registers_(node_count,
     47                          InstructionOperand::kInvalidVirtualRegister, zone),
     48       virtual_register_rename_(zone),
     49       scheduler_(nullptr),
     50       enable_scheduling_(enable_scheduling),
     51       enable_roots_relative_addressing_(enable_roots_relative_addressing),
     52       enable_switch_jump_table_(enable_switch_jump_table),
     53       poisoning_level_(poisoning_level),
     54       frame_(frame),
     55       instruction_selection_failed_(false),
     56       instr_origins_(sequence->zone()),
     57       trace_turbo_(trace_turbo) {
     58   instructions_.reserve(node_count);
     59   continuation_inputs_.reserve(5);
     60   continuation_outputs_.reserve(2);
     61 
     62   if (trace_turbo_ == kEnableTraceTurboJson) {
     63     instr_origins_.assign(node_count, {-1, 0});
     64   }
     65 }
     66 
     67 bool InstructionSelector::SelectInstructions() {
     68   // Mark the inputs of all phis in loop headers as used.
     69   BasicBlockVector* blocks = schedule()->rpo_order();
     70   for (auto const block : *blocks) {
     71     if (!block->IsLoopHeader()) continue;
     72     DCHECK_LE(2u, block->PredecessorCount());
     73     for (Node* const phi : *block) {
     74       if (phi->opcode() != IrOpcode::kPhi) continue;
     75 
     76       // Mark all inputs as used.
     77       for (Node* const input : phi->inputs()) {
     78         MarkAsUsed(input);
     79       }
     80     }
     81   }
     82 
     83   // Visit each basic block in post order.
     84   for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
     85     VisitBlock(*i);
     86     if (instruction_selection_failed()) return false;
     87   }
     88 
     89   // Schedule the selected instructions.
     90   if (UseInstructionScheduling()) {
     91     scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
     92   }
     93 
     94   for (auto const block : *blocks) {
     95     InstructionBlock* instruction_block =
     96         sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
     97     for (size_t i = 0; i < instruction_block->phis().size(); i++) {
     98       UpdateRenamesInPhi(instruction_block->PhiAt(i));
     99     }
    100     size_t end = instruction_block->code_end();
    101     size_t start = instruction_block->code_start();
    102     DCHECK_LE(end, start);
    103     StartBlock(RpoNumber::FromInt(block->rpo_number()));
    104     if (end != start) {
    105       while (start-- > end + 1) {
    106         UpdateRenames(instructions_[start]);
    107         AddInstruction(instructions_[start]);
    108       }
    109       UpdateRenames(instructions_[end]);
    110       AddTerminator(instructions_[end]);
    111     }
    112     EndBlock(RpoNumber::FromInt(block->rpo_number()));
    113   }
    114 #if DEBUG
    115   sequence()->ValidateSSA();
    116 #endif
    117   return true;
    118 }
    119 
    120 void InstructionSelector::StartBlock(RpoNumber rpo) {
    121   if (UseInstructionScheduling()) {
    122     DCHECK_NOT_NULL(scheduler_);
    123     scheduler_->StartBlock(rpo);
    124   } else {
    125     sequence()->StartBlock(rpo);
    126   }
    127 }
    128 
    129 
    130 void InstructionSelector::EndBlock(RpoNumber rpo) {
    131   if (UseInstructionScheduling()) {
    132     DCHECK_NOT_NULL(scheduler_);
    133     scheduler_->EndBlock(rpo);
    134   } else {
    135     sequence()->EndBlock(rpo);
    136   }
    137 }
    138 
    139 void InstructionSelector::AddTerminator(Instruction* instr) {
    140   if (UseInstructionScheduling()) {
    141     DCHECK_NOT_NULL(scheduler_);
    142     scheduler_->AddTerminator(instr);
    143   } else {
    144     sequence()->AddInstruction(instr);
    145   }
    146 }
    147 
    148 void InstructionSelector::AddInstruction(Instruction* instr) {
    149   if (UseInstructionScheduling()) {
    150     DCHECK_NOT_NULL(scheduler_);
    151     scheduler_->AddInstruction(instr);
    152   } else {
    153     sequence()->AddInstruction(instr);
    154   }
    155 }
    156 
    157 Instruction* InstructionSelector::Emit(InstructionCode opcode,
    158                                        InstructionOperand output,
    159                                        size_t temp_count,
    160                                        InstructionOperand* temps) {
    161   size_t output_count = output.IsInvalid() ? 0 : 1;
    162   return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
    163 }
    164 
    165 
    166 Instruction* InstructionSelector::Emit(InstructionCode opcode,
    167                                        InstructionOperand output,
    168                                        InstructionOperand a, size_t temp_count,
    169                                        InstructionOperand* temps) {
    170   size_t output_count = output.IsInvalid() ? 0 : 1;
    171   return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
    172 }
    173 
    174 
    175 Instruction* InstructionSelector::Emit(InstructionCode opcode,
    176                                        InstructionOperand output,
    177                                        InstructionOperand a,
    178                                        InstructionOperand b, size_t temp_count,
    179                                        InstructionOperand* temps) {
    180   size_t output_count = output.IsInvalid() ? 0 : 1;
    181   InstructionOperand inputs[] = {a, b};
    182   size_t input_count = arraysize(inputs);
    183   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
    184               temps);
    185 }
    186 
    187 
    188 Instruction* InstructionSelector::Emit(InstructionCode opcode,
    189                                        InstructionOperand output,
    190                                        InstructionOperand a,
    191                                        InstructionOperand b,
    192                                        InstructionOperand c, size_t temp_count,
    193                                        InstructionOperand* temps) {
    194   size_t output_count = output.IsInvalid() ? 0 : 1;
    195   InstructionOperand inputs[] = {a, b, c};
    196   size_t input_count = arraysize(inputs);
    197   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
    198               temps);
    199 }
    200 
    201 
    202 Instruction* InstructionSelector::Emit(
    203     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
    204     InstructionOperand b, InstructionOperand c, InstructionOperand d,
    205     size_t temp_count, InstructionOperand* temps) {
    206   size_t output_count = output.IsInvalid() ? 0 : 1;
    207   InstructionOperand inputs[] = {a, b, c, d};
    208   size_t input_count = arraysize(inputs);
    209   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
    210               temps);
    211 }
    212 
    213 
    214 Instruction* InstructionSelector::Emit(
    215     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
    216     InstructionOperand b, InstructionOperand c, InstructionOperand d,
    217     InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
    218   size_t output_count = output.IsInvalid() ? 0 : 1;
    219   InstructionOperand inputs[] = {a, b, c, d, e};
    220   size_t input_count = arraysize(inputs);
    221   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
    222               temps);
    223 }
    224 
    225 
    226 Instruction* InstructionSelector::Emit(
    227     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
    228     InstructionOperand b, InstructionOperand c, InstructionOperand d,
    229     InstructionOperand e, InstructionOperand f, size_t temp_count,
    230     InstructionOperand* temps) {
    231   size_t output_count = output.IsInvalid() ? 0 : 1;
    232   InstructionOperand inputs[] = {a, b, c, d, e, f};
    233   size_t input_count = arraysize(inputs);
    234   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
    235               temps);
    236 }
    237 
    238 
    239 Instruction* InstructionSelector::Emit(
    240     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
    241     size_t input_count, InstructionOperand* inputs, size_t temp_count,
    242     InstructionOperand* temps) {
    243   if (output_count >= Instruction::kMaxOutputCount ||
    244       input_count >= Instruction::kMaxInputCount ||
    245       temp_count >= Instruction::kMaxTempCount) {
    246     set_instruction_selection_failed();
    247     return nullptr;
    248   }
    249 
    250   Instruction* instr =
    251       Instruction::New(instruction_zone(), opcode, output_count, outputs,
    252                        input_count, inputs, temp_count, temps);
    253   return Emit(instr);
    254 }
    255 
    256 
    257 Instruction* InstructionSelector::Emit(Instruction* instr) {
    258   instructions_.push_back(instr);
    259   return instr;
    260 }
    261 
    262 
    263 bool InstructionSelector::CanCover(Node* user, Node* node) const {
    264   // 1. Both {user} and {node} must be in the same basic block.
    265   if (schedule()->block(node) != schedule()->block(user)) {
    266     return false;
    267   }
    268   // 2. Pure {node}s must be owned by the {user}.
    269   if (node->op()->HasProperty(Operator::kPure)) {
    270     return node->OwnedBy(user);
    271   }
    272   // 3. Impure {node}s must match the effect level of {user}.
    273   if (GetEffectLevel(node) != GetEffectLevel(user)) {
    274     return false;
    275   }
    276   // 4. Only {node} must have value edges pointing to {user}.
    277   for (Edge const edge : node->use_edges()) {
    278     if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
    279       return false;
    280     }
    281   }
    282   return true;
    283 }
    284 
    285 bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
    286                                                       Node* node) const {
    287   BasicBlock* bb_user = schedule()->block(user);
    288   BasicBlock* bb_node = schedule()->block(node);
    289   if (bb_user != bb_node) return false;
    290   for (Edge const edge : node->use_edges()) {
    291     Node* from = edge.from();
    292     if ((from != user) && (schedule()->block(from) == bb_user)) {
    293       return false;
    294     }
    295   }
    296   return true;
    297 }
    298 
    299 void InstructionSelector::UpdateRenames(Instruction* instruction) {
    300   for (size_t i = 0; i < instruction->InputCount(); i++) {
    301     TryRename(instruction->InputAt(i));
    302   }
    303 }
    304 
    305 void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
    306   for (size_t i = 0; i < phi->operands().size(); i++) {
    307     int vreg = phi->operands()[i];
    308     int renamed = GetRename(vreg);
    309     if (vreg != renamed) {
    310       phi->RenameInput(i, renamed);
    311     }
    312   }
    313 }
    314 
    315 int InstructionSelector::GetRename(int virtual_register) {
    316   int rename = virtual_register;
    317   while (true) {
    318     if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
    319     int next = virtual_register_rename_[rename];
    320     if (next == InstructionOperand::kInvalidVirtualRegister) {
    321       break;
    322     }
    323     rename = next;
    324   }
    325   return rename;
    326 }
    327 
    328 void InstructionSelector::TryRename(InstructionOperand* op) {
    329   if (!op->IsUnallocated()) return;
    330   UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
    331   int vreg = unalloc->virtual_register();
    332   int rename = GetRename(vreg);
    333   if (rename != vreg) {
    334     *unalloc = UnallocatedOperand(*unalloc, rename);
    335   }
    336 }
    337 
    338 void InstructionSelector::SetRename(const Node* node, const Node* rename) {
    339   int vreg = GetVirtualRegister(node);
    340   if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
    341     int invalid = InstructionOperand::kInvalidVirtualRegister;
    342     virtual_register_rename_.resize(vreg + 1, invalid);
    343   }
    344   virtual_register_rename_[vreg] = GetVirtualRegister(rename);
    345 }
    346 
    347 int InstructionSelector::GetVirtualRegister(const Node* node) {
    348   DCHECK_NOT_NULL(node);
    349   size_t const id = node->id();
    350   DCHECK_LT(id, virtual_registers_.size());
    351   int virtual_register = virtual_registers_[id];
    352   if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
    353     virtual_register = sequence()->NextVirtualRegister();
    354     virtual_registers_[id] = virtual_register;
    355   }
    356   return virtual_register;
    357 }
    358 
    359 
    360 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
    361     const {
    362   std::map<NodeId, int> virtual_registers;
    363   for (size_t n = 0; n < virtual_registers_.size(); ++n) {
    364     if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
    365       NodeId const id = static_cast<NodeId>(n);
    366       virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
    367     }
    368   }
    369   return virtual_registers;
    370 }
    371 
    372 
    373 bool InstructionSelector::IsDefined(Node* node) const {
    374   DCHECK_NOT_NULL(node);
    375   size_t const id = node->id();
    376   DCHECK_LT(id, defined_.size());
    377   return defined_[id];
    378 }
    379 
    380 
    381 void InstructionSelector::MarkAsDefined(Node* node) {
    382   DCHECK_NOT_NULL(node);
    383   size_t const id = node->id();
    384   DCHECK_LT(id, defined_.size());
    385   defined_[id] = true;
    386 }
    387 
    388 
    389 bool InstructionSelector::IsUsed(Node* node) const {
    390   DCHECK_NOT_NULL(node);
    391   // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
    392   // that the Retain is actually emitted, otherwise the GC will mess up.
    393   if (node->opcode() == IrOpcode::kRetain) return true;
    394   if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
    395   size_t const id = node->id();
    396   DCHECK_LT(id, used_.size());
    397   return used_[id];
    398 }
    399 
    400 
    401 void InstructionSelector::MarkAsUsed(Node* node) {
    402   DCHECK_NOT_NULL(node);
    403   size_t const id = node->id();
    404   DCHECK_LT(id, used_.size());
    405   used_[id] = true;
    406 }
    407 
    408 int InstructionSelector::GetEffectLevel(Node* node) const {
    409   DCHECK_NOT_NULL(node);
    410   size_t const id = node->id();
    411   DCHECK_LT(id, effect_level_.size());
    412   return effect_level_[id];
    413 }
    414 
    415 void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
    416   DCHECK_NOT_NULL(node);
    417   size_t const id = node->id();
    418   DCHECK_LT(id, effect_level_.size());
    419   effect_level_[id] = effect_level;
    420 }
    421 
    422 bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
    423   return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing &&
    424          CanUseRootsRegister();
    425 }
    426 
    427 bool InstructionSelector::CanUseRootsRegister() const {
    428   return linkage()->GetIncomingDescriptor()->flags() &
    429          CallDescriptor::kCanUseRoots;
    430 }
    431 
    432 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
    433                                                const InstructionOperand& op) {
    434   UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
    435   sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
    436 }
    437 
    438 
    439 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
    440                                                Node* node) {
    441   sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
    442 }
    443 
    444 namespace {
    445 
    446 InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
    447                                    Node* input, FrameStateInputKind kind,
    448                                    MachineRepresentation rep) {
    449   if (rep == MachineRepresentation::kNone) {
    450     return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
    451   }
    452 
    453   switch (input->opcode()) {
    454     case IrOpcode::kInt32Constant:
    455     case IrOpcode::kInt64Constant:
    456     case IrOpcode::kNumberConstant:
    457     case IrOpcode::kFloat32Constant:
    458     case IrOpcode::kFloat64Constant:
    459       return g->UseImmediate(input);
    460     case IrOpcode::kHeapConstant: {
    461       if (!CanBeTaggedPointer(rep)) {
    462         // If we have inconsistent static and dynamic types, e.g. if we
    463         // smi-check a string, we can get here with a heap object that
    464         // says it is a smi. In that case, we return an invalid instruction
    465         // operand, which will be interpreted as an optimized-out value.
    466 
    467         // TODO(jarin) Ideally, we should turn the current instruction
    468         // into an abort (we should never execute it).
    469         return InstructionOperand();
    470       }
    471 
    472       Handle<HeapObject> constant = HeapConstantOf(input->op());
    473       Heap::RootListIndex root_index;
    474       if (isolate->heap()->IsRootHandle(constant, &root_index) &&
    475           root_index == Heap::kOptimizedOutRootIndex) {
    476         // For an optimized-out object we return an invalid instruction
    477         // operand, so that we take the fast path for optimized-out values.
    478         return InstructionOperand();
    479       }
    480 
    481       return g->UseImmediate(input);
    482     }
    483     case IrOpcode::kArgumentsElementsState:
    484     case IrOpcode::kArgumentsLengthState:
    485     case IrOpcode::kObjectState:
    486     case IrOpcode::kTypedObjectState:
    487       UNREACHABLE();
    488       break;
    489     default:
    490       switch (kind) {
    491         case FrameStateInputKind::kStackSlot:
    492           return g->UseUniqueSlot(input);
    493         case FrameStateInputKind::kAny:
    494           // Currently deopts "wrap" other operations, so the deopt's inputs
    495           // are potentially needed until the end of the deoptimising code.
    496           return g->UseAnyAtEnd(input);
    497       }
    498   }
    499   UNREACHABLE();
    500 }
    501 
    502 }  // namespace
    503 
    504 class StateObjectDeduplicator {
    505  public:
    506   explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
    507   static const size_t kNotDuplicated = SIZE_MAX;
    508 
    509   size_t GetObjectId(Node* node) {
    510     DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
    511            node->opcode() == IrOpcode::kObjectId ||
    512            node->opcode() == IrOpcode::kArgumentsElementsState);
    513     for (size_t i = 0; i < objects_.size(); ++i) {
    514       if (objects_[i] == node) return i;
    515       // ObjectId nodes are the Turbofan way to express objects with the same
    516       // identity in the deopt info. So they should always be mapped to
    517       // previously appearing TypedObjectState nodes.
    518       if (HasObjectId(objects_[i]) && HasObjectId(node) &&
    519           ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
    520         return i;
    521       }
    522     }
    523     DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
    524            node->opcode() == IrOpcode::kArgumentsElementsState);
    525     return kNotDuplicated;
    526   }
    527 
    528   size_t InsertObject(Node* node) {
    529     DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
    530            node->opcode() == IrOpcode::kObjectId ||
    531            node->opcode() == IrOpcode::kArgumentsElementsState);
    532     size_t id = objects_.size();
    533     objects_.push_back(node);
    534     return id;
    535   }
    536 
    537  private:
    538   static bool HasObjectId(Node* node) {
    539     return node->opcode() == IrOpcode::kTypedObjectState ||
    540            node->opcode() == IrOpcode::kObjectId;
    541   }
    542 
    543   ZoneVector<Node*> objects_;
    544 };
    545 
    546 // Returns the number of instruction operands added to inputs.
    547 size_t InstructionSelector::AddOperandToStateValueDescriptor(
    548     StateValueList* values, InstructionOperandVector* inputs,
    549     OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
    550     MachineType type, FrameStateInputKind kind, Zone* zone) {
    551   if (input == nullptr) {
    552     values->PushOptimizedOut();
    553     return 0;
    554   }
    555 
    556   switch (input->opcode()) {
    557     case IrOpcode::kArgumentsElementsState: {
    558       values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
    559       // The elements backing store of an arguments object participates in the
    560       // duplicate object counting, but can itself never appear duplicated.
    561       DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
    562                 deduplicator->GetObjectId(input));
    563       deduplicator->InsertObject(input);
    564       return 0;
    565     }
    566     case IrOpcode::kArgumentsLengthState: {
    567       values->PushArgumentsLength(ArgumentsStateTypeOf(input->op()));
    568       return 0;
    569     }
    570     case IrOpcode::kObjectState: {
    571       UNREACHABLE();
    572     }
    573     case IrOpcode::kTypedObjectState:
    574     case IrOpcode::kObjectId: {
    575       size_t id = deduplicator->GetObjectId(input);
    576       if (id == StateObjectDeduplicator::kNotDuplicated) {
    577         DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
    578         size_t entries = 0;
    579         id = deduplicator->InsertObject(input);
    580         StateValueList* nested = values->PushRecursiveField(zone, id);
    581         int const input_count = input->op()->ValueInputCount();
    582         ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
    583         for (int i = 0; i < input_count; ++i) {
    584           entries += AddOperandToStateValueDescriptor(
    585               nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
    586               kind, zone);
    587         }
    588         return entries;
    589       } else {
    590         // Deoptimizer counts duplicate objects for the running id, so we have
    591         // to push the input again.
    592         deduplicator->InsertObject(input);
    593         values->PushDuplicate(id);
    594         return 0;
    595       }
    596     }
    597     default: {
    598       InstructionOperand op =
    599           OperandForDeopt(isolate(), g, input, kind, type.representation());
    600       if (op.kind() == InstructionOperand::INVALID) {
    601         // Invalid operand means the value is impossible or optimized-out.
    602         values->PushOptimizedOut();
    603         return 0;
    604       } else {
    605         inputs->push_back(op);
    606         values->PushPlain(type);
    607         return 1;
    608       }
    609     }
    610   }
    611 }
    612 
    613 
    614 // Returns the number of instruction operands added to inputs.
    615 size_t InstructionSelector::AddInputsToFrameStateDescriptor(
    616     FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
    617     StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
    618     FrameStateInputKind kind, Zone* zone) {
    619   DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
    620 
    621   size_t entries = 0;
    622   size_t initial_size = inputs->size();
    623   USE(initial_size);  // initial_size is only used for debug.
    624 
    625   if (descriptor->outer_state()) {
    626     entries += AddInputsToFrameStateDescriptor(
    627         descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
    628         g, deduplicator, inputs, kind, zone);
    629   }
    630 
    631   Node* parameters = state->InputAt(kFrameStateParametersInput);
    632   Node* locals = state->InputAt(kFrameStateLocalsInput);
    633   Node* stack = state->InputAt(kFrameStateStackInput);
    634   Node* context = state->InputAt(kFrameStateContextInput);
    635   Node* function = state->InputAt(kFrameStateFunctionInput);
    636 
    637   DCHECK_EQ(descriptor->parameters_count(),
    638             StateValuesAccess(parameters).size());
    639   DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
    640   DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
    641 
    642   StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
    643 
    644   DCHECK_EQ(values_descriptor->size(), 0u);
    645   values_descriptor->ReserveSize(descriptor->GetSize());
    646 
    647   entries += AddOperandToStateValueDescriptor(
    648       values_descriptor, inputs, g, deduplicator, function,
    649       MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
    650   for (StateValuesAccess::TypedNode input_node :
    651        StateValuesAccess(parameters)) {
    652     entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
    653                                                 deduplicator, input_node.node,
    654                                                 input_node.type, kind, zone);
    655   }
    656   if (descriptor->HasContext()) {
    657     entries += AddOperandToStateValueDescriptor(
    658         values_descriptor, inputs, g, deduplicator, context,
    659         MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
    660   }
    661   for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
    662     entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
    663                                                 deduplicator, input_node.node,
    664                                                 input_node.type, kind, zone);
    665   }
    666   for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
    667     entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
    668                                                 deduplicator, input_node.node,
    669                                                 input_node.type, kind, zone);
    670   }
    671   DCHECK_EQ(initial_size + entries, inputs->size());
    672   return entries;
    673 }
    674 
    675 Instruction* InstructionSelector::EmitWithContinuation(
    676     InstructionCode opcode, FlagsContinuation* cont) {
    677   return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
    678 }
    679 
    680 Instruction* InstructionSelector::EmitWithContinuation(
    681     InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
    682   return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
    683 }
    684 
    685 Instruction* InstructionSelector::EmitWithContinuation(
    686     InstructionCode opcode, InstructionOperand a, InstructionOperand b,
    687     FlagsContinuation* cont) {
    688   InstructionOperand inputs[] = {a, b};
    689   return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
    690                               cont);
    691 }
    692 
    693 Instruction* InstructionSelector::EmitWithContinuation(
    694     InstructionCode opcode, InstructionOperand a, InstructionOperand b,
    695     InstructionOperand c, FlagsContinuation* cont) {
    696   InstructionOperand inputs[] = {a, b, c};
    697   return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
    698                               cont);
    699 }
    700 
    701 Instruction* InstructionSelector::EmitWithContinuation(
    702     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
    703     size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
    704   OperandGenerator g(this);
    705 
    706   opcode = cont->Encode(opcode);
    707 
    708   continuation_inputs_.resize(0);
    709   for (size_t i = 0; i < input_count; i++) {
    710     continuation_inputs_.push_back(inputs[i]);
    711   }
    712 
    713   continuation_outputs_.resize(0);
    714   for (size_t i = 0; i < output_count; i++) {
    715     continuation_outputs_.push_back(outputs[i]);
    716   }
    717 
    718   if (cont->IsBranch()) {
    719     continuation_inputs_.push_back(g.Label(cont->true_block()));
    720     continuation_inputs_.push_back(g.Label(cont->false_block()));
    721   } else if (cont->IsDeoptimize()) {
    722     opcode |= MiscField::encode(static_cast<int>(input_count));
    723     AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
    724                               cont->reason(), cont->feedback(),
    725                               cont->frame_state());
    726   } else if (cont->IsSet()) {
    727     continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
    728   } else if (cont->IsTrap()) {
    729     int trap_id = static_cast<int>(cont->trap_id());
    730     continuation_inputs_.push_back(g.UseImmediate(trap_id));
    731   } else {
    732     DCHECK(cont->IsNone());
    733   }
    734 
    735   size_t const emit_inputs_size = continuation_inputs_.size();
    736   auto* emit_inputs =
    737       emit_inputs_size ? &continuation_inputs_.front() : nullptr;
    738   size_t const emit_outputs_size = continuation_outputs_.size();
    739   auto* emit_outputs =
    740       emit_outputs_size ? &continuation_outputs_.front() : nullptr;
    741   return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
    742               emit_inputs, 0, nullptr);
    743 }
    744 
    745 void InstructionSelector::AppendDeoptimizeArguments(
    746     InstructionOperandVector* args, DeoptimizeKind kind,
    747     DeoptimizeReason reason, VectorSlotPair const& feedback,
    748     Node* frame_state) {
    749   OperandGenerator g(this);
    750   FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
    751   DCHECK_NE(DeoptimizeKind::kLazy, kind);
    752   int const state_id =
    753       sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
    754   args->push_back(g.TempImmediate(state_id));
    755   StateObjectDeduplicator deduplicator(instruction_zone());
    756   AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
    757                                   args, FrameStateInputKind::kAny,
    758                                   instruction_zone());
    759 }
    760 
    761 Instruction* InstructionSelector::EmitDeoptimize(
    762     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
    763     size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
    764     DeoptimizeReason reason, VectorSlotPair const& feedback,
    765     Node* frame_state) {
    766   InstructionOperandVector args(instruction_zone());
    767   for (size_t i = 0; i < input_count; ++i) {
    768     args.push_back(inputs[i]);
    769   }
    770   opcode |= MiscField::encode(static_cast<int>(input_count));
    771   AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
    772   return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
    773               nullptr);
    774 }
    775 
    776 // An internal helper class for generating the operands to calls.
    777 // TODO(bmeurer): Get rid of the CallBuffer business and make
    778 // InstructionSelector::VisitCall platform independent instead.
    779 struct CallBuffer {
    780   CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
    781              FrameStateDescriptor* frame_state)
    782       : descriptor(call_descriptor),
    783         frame_state_descriptor(frame_state),
    784         output_nodes(zone),
    785         outputs(zone),
    786         instruction_args(zone),
    787         pushed_nodes(zone) {
    788     output_nodes.reserve(call_descriptor->ReturnCount());
    789     outputs.reserve(call_descriptor->ReturnCount());
    790     pushed_nodes.reserve(input_count());
    791     instruction_args.reserve(input_count() + frame_state_value_count());
    792   }
    793 
    794 
    795   const CallDescriptor* descriptor;
    796   FrameStateDescriptor* frame_state_descriptor;
    797   ZoneVector<PushParameter> output_nodes;
    798   InstructionOperandVector outputs;
    799   InstructionOperandVector instruction_args;
    800   ZoneVector<PushParameter> pushed_nodes;
    801 
    802   size_t input_count() const { return descriptor->InputCount(); }
    803 
    804   size_t frame_state_count() const { return descriptor->FrameStateCount(); }
    805 
    806   size_t frame_state_value_count() const {
    807     return (frame_state_descriptor == nullptr)
    808                ? 0
    809                : (frame_state_descriptor->GetTotalSize() +
    810                   1);  // Include deopt id.
    811   }
    812 };
    813 
    814 
    815 // TODO(bmeurer): Get rid of the CallBuffer business and make
    816 // InstructionSelector::VisitCall platform independent instead.
    817 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
    818                                                CallBufferFlags flags,
    819                                                bool is_tail_call,
    820                                                int stack_param_delta) {
    821   OperandGenerator g(this);
    822   size_t ret_count = buffer->descriptor->ReturnCount();
    823   DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
    824   DCHECK_EQ(
    825       call->op()->ValueInputCount(),
    826       static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
    827 
    828   if (ret_count > 0) {
    829     // Collect the projections that represent multiple outputs from this call.
    830     if (ret_count == 1) {
    831       PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
    832       buffer->output_nodes.push_back(result);
    833     } else {
    834       buffer->output_nodes.resize(ret_count);
    835       int stack_count = 0;
    836       for (size_t i = 0; i < ret_count; ++i) {
    837         LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
    838         buffer->output_nodes[i] = PushParameter(nullptr, location);
    839         if (location.IsCallerFrameSlot()) {
    840           stack_count += location.GetSizeInPointers();
    841         }
    842       }
    843       for (Edge const edge : call->use_edges()) {
    844         if (!NodeProperties::IsValueEdge(edge)) continue;
    845         Node* node = edge.from();
    846         DCHECK_EQ(IrOpcode::kProjection, node->opcode());
    847         size_t const index = ProjectionIndexOf(node->op());
    848 
    849         DCHECK_LT(index, buffer->output_nodes.size());
    850         DCHECK(!buffer->output_nodes[index].node);
    851         buffer->output_nodes[index].node = node;
    852       }
    853       frame_->EnsureReturnSlots(stack_count);
    854     }
    855 
    856     // Filter out the outputs that aren't live because no projection uses them.
    857     size_t outputs_needed_by_framestate =
    858         buffer->frame_state_descriptor == nullptr
    859             ? 0
    860             : buffer->frame_state_descriptor->state_combine()
    861                   .ConsumedOutputCount();
    862     for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
    863       bool output_is_live = buffer->output_nodes[i].node != nullptr ||
    864                             i < outputs_needed_by_framestate;
    865       if (output_is_live) {
    866         LinkageLocation location = buffer->output_nodes[i].location;
    867         MachineRepresentation rep = location.GetType().representation();
    868 
    869         Node* output = buffer->output_nodes[i].node;
    870         InstructionOperand op = output == nullptr
    871                                     ? g.TempLocation(location)
    872                                     : g.DefineAsLocation(output, location);
    873         MarkAsRepresentation(rep, op);
    874 
    875         if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
    876           buffer->outputs.push_back(op);
    877           buffer->output_nodes[i].node = nullptr;
    878         }
    879       }
    880     }
    881   }
    882 
    883   // The first argument is always the callee code.
    884   Node* callee = call->InputAt(0);
    885   bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
    886   bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
    887   bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
    888   switch (buffer->descriptor->kind()) {
    889     case CallDescriptor::kCallCodeObject:
    890       // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
    891       // JS-linkage callers with a register code target. The problem is that the
    892       // code target register may be clobbered before the final jmp by
    893       // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
    894       // entirely remove support for tail-calls from JS-linkage callers.
    895       buffer->instruction_args.push_back(
    896           (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
    897               ? g.UseImmediate(callee)
    898               : call_use_fixed_target_reg
    899                     ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
    900                     : is_tail_call ? g.UseUniqueRegister(callee)
    901                                    : g.UseRegister(callee));
    902       break;
    903     case CallDescriptor::kCallAddress:
    904       buffer->instruction_args.push_back(
    905           (call_address_immediate &&
    906            callee->opcode() == IrOpcode::kExternalConstant)
    907               ? g.UseImmediate(callee)
    908               : call_use_fixed_target_reg
    909                     ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
    910                     : g.UseRegister(callee));
    911       break;
    912     case CallDescriptor::kCallWasmFunction:
    913       buffer->instruction_args.push_back(
    914           (call_address_immediate &&
    915            (callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
    916             callee->opcode() == IrOpcode::kRelocatableInt32Constant))
    917               ? g.UseImmediate(callee)
    918               : call_use_fixed_target_reg
    919                     ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
    920                     : g.UseRegister(callee));
    921       break;
    922     case CallDescriptor::kCallJSFunction:
    923       buffer->instruction_args.push_back(
    924           g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
    925       break;
    926   }
    927   DCHECK_EQ(1u, buffer->instruction_args.size());
    928 
    929   // Argument 1 is used for poison-alias index (encoded in a word-sized
    930   // immediate. This an index of the operand that aliases with poison register
    931   // or -1 if there is no aliasing.
    932   buffer->instruction_args.push_back(g.TempImmediate(-1));
    933   const size_t poison_alias_index = 1;
    934   DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
    935 
    936   // If the call needs a frame state, we insert the state information as
    937   // follows (n is the number of value inputs to the frame state):
    938   // arg 2               : deoptimization id.
    939   // arg 3 - arg (n + 2) : value inputs to the frame state.
    940   size_t frame_state_entries = 0;
    941   USE(frame_state_entries);  // frame_state_entries is only used for debug.
    942   if (buffer->frame_state_descriptor != nullptr) {
    943     Node* frame_state =
    944         call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
    945 
    946     // If it was a syntactic tail call we need to drop the current frame and
    947     // all the frames on top of it that are either an arguments adaptor frame
    948     // or a tail caller frame.
    949     if (is_tail_call) {
    950       frame_state = NodeProperties::GetFrameStateInput(frame_state);
    951       buffer->frame_state_descriptor =
    952           buffer->frame_state_descriptor->outer_state();
    953       while (buffer->frame_state_descriptor != nullptr &&
    954              buffer->frame_state_descriptor->type() ==
    955                  FrameStateType::kArgumentsAdaptor) {
    956         frame_state = NodeProperties::GetFrameStateInput(frame_state);
    957         buffer->frame_state_descriptor =
    958             buffer->frame_state_descriptor->outer_state();
    959       }
    960     }
    961 
    962     int const state_id = sequence()->AddDeoptimizationEntry(
    963         buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
    964         DeoptimizeReason::kUnknown, VectorSlotPair());
    965     buffer->instruction_args.push_back(g.TempImmediate(state_id));
    966 
    967     StateObjectDeduplicator deduplicator(instruction_zone());
    968 
    969     frame_state_entries =
    970         1 + AddInputsToFrameStateDescriptor(
    971                 buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
    972                 &buffer->instruction_args, FrameStateInputKind::kStackSlot,
    973                 instruction_zone());
    974 
    975     DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
    976   }
    977 
    978   size_t input_count = static_cast<size_t>(buffer->input_count());
    979 
    980   // Split the arguments into pushed_nodes and instruction_args. Pushed
    981   // arguments require an explicit push instruction before the call and do
    982   // not appear as arguments to the call. Everything else ends up
    983   // as an InstructionOperand argument to the call.
    984   auto iter(call->inputs().begin());
    985   size_t pushed_count = 0;
    986   bool call_tail = (flags & kCallTail) != 0;
    987   for (size_t index = 0; index < input_count; ++iter, ++index) {
    988     DCHECK(iter != call->inputs().end());
    989     DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
    990     if (index == 0) continue;  // The first argument (callee) is already done.
    991 
    992     LinkageLocation location = buffer->descriptor->GetInputLocation(index);
    993     if (call_tail) {
    994       location = LinkageLocation::ConvertToTailCallerLocation(
    995           location, stack_param_delta);
    996     }
    997     InstructionOperand op = g.UseLocation(*iter, location);
    998     UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
    999     if (unallocated.HasFixedSlotPolicy() && !call_tail) {
   1000       int stack_index = -unallocated.fixed_slot_index() - 1;
   1001       if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
   1002         buffer->pushed_nodes.resize(stack_index + 1);
   1003       }
   1004       PushParameter param = {*iter, location};
   1005       buffer->pushed_nodes[stack_index] = param;
   1006       pushed_count++;
   1007     } else {
   1008       // If we do load poisoning and the linkage uses the poisoning register,
   1009       // then we request the input in memory location, and during code
   1010       // generation, we move the input to the register.
   1011       if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
   1012           unallocated.HasFixedRegisterPolicy()) {
   1013         int reg = unallocated.fixed_register_index();
   1014         if (reg == kSpeculationPoisonRegister.code()) {
   1015           buffer->instruction_args[poison_alias_index] = g.TempImmediate(
   1016               static_cast<int32_t>(buffer->instruction_args.size()));
   1017           op = g.UseRegisterOrSlotOrConstant(*iter);
   1018         }
   1019       }
   1020       buffer->instruction_args.push_back(op);
   1021     }
   1022   }
   1023   DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
   1024                              frame_state_entries - 1);
   1025   if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
   1026       stack_param_delta != 0) {
   1027     // For tail calls that change the size of their parameter list and keep
   1028     // their return address on the stack, move the return address to just above
   1029     // the parameters.
   1030     LinkageLocation saved_return_location =
   1031         LinkageLocation::ForSavedCallerReturnAddress();
   1032     InstructionOperand return_address =
   1033         g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
   1034                                  saved_return_location, stack_param_delta),
   1035                              saved_return_location);
   1036     buffer->instruction_args.push_back(return_address);
   1037   }
   1038 }
   1039 
   1040 bool InstructionSelector::IsSourcePositionUsed(Node* node) {
   1041   return (source_position_mode_ == kAllSourcePositions ||
   1042           node->opcode() == IrOpcode::kCall ||
   1043           node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
   1044           node->opcode() == IrOpcode::kTrapIf ||
   1045           node->opcode() == IrOpcode::kTrapUnless ||
   1046           node->opcode() == IrOpcode::kProtectedLoad ||
   1047           node->opcode() == IrOpcode::kProtectedStore);
   1048 }
   1049 
   1050 void InstructionSelector::VisitBlock(BasicBlock* block) {
   1051   DCHECK(!current_block_);
   1052   current_block_ = block;
   1053   auto current_num_instructions = [&] {
   1054     DCHECK_GE(kMaxInt, instructions_.size());
   1055     return static_cast<int>(instructions_.size());
   1056   };
   1057   int current_block_end = current_num_instructions();
   1058 
   1059   int effect_level = 0;
   1060   for (Node* const node : *block) {
   1061     SetEffectLevel(node, effect_level);
   1062     if (node->opcode() == IrOpcode::kStore ||
   1063         node->opcode() == IrOpcode::kUnalignedStore ||
   1064         node->opcode() == IrOpcode::kCall ||
   1065         node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
   1066         node->opcode() == IrOpcode::kProtectedLoad ||
   1067         node->opcode() == IrOpcode::kProtectedStore) {
   1068       ++effect_level;
   1069     }
   1070   }
   1071 
   1072   // We visit the control first, then the nodes in the block, so the block's
   1073   // control input should be on the same effect level as the last node.
   1074   if (block->control_input() != nullptr) {
   1075     SetEffectLevel(block->control_input(), effect_level);
   1076   }
   1077 
   1078   auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
   1079     if (instruction_selection_failed()) return false;
   1080     if (current_num_instructions() == instruction_start) return true;
   1081     std::reverse(instructions_.begin() + instruction_start,
   1082                  instructions_.end());
   1083     if (!node) return true;
   1084     SourcePosition source_position = source_positions_->GetSourcePosition(node);
   1085     if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
   1086       sequence()->SetSourcePosition(instructions_[instruction_start],
   1087                                     source_position);
   1088     }
   1089     return true;
   1090   };
   1091 
   1092   // Generate code for the block control "top down", but schedule the code
   1093   // "bottom up".
   1094   VisitControl(block);
   1095   if (!FinishEmittedInstructions(block->control_input(), current_block_end))
   1096     return;
   1097 
   1098   // Visit code in reverse control flow order, because architecture-specific
   1099   // matching may cover more than one node at a time.
   1100   for (auto node : base::Reversed(*block)) {
   1101     int current_node_end = current_num_instructions();
   1102     // Skip nodes that are unused or already defined.
   1103     if (IsUsed(node) && !IsDefined(node)) {
   1104       // Generate code for this node "top down", but schedule the code "bottom
   1105       // up".
   1106       VisitNode(node);
   1107       if (!FinishEmittedInstructions(node, current_node_end)) return;
   1108     }
   1109     if (trace_turbo_ == kEnableTraceTurboJson) {
   1110       instr_origins_[node->id()] = {current_num_instructions(),
   1111                                     current_node_end};
   1112     }
   1113   }
   1114 
   1115   // We're done with the block.
   1116   InstructionBlock* instruction_block =
   1117       sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
   1118   if (current_num_instructions() == current_block_end) {
   1119     // Avoid empty block: insert a {kArchNop} instruction.
   1120     Emit(Instruction::New(sequence()->zone(), kArchNop));
   1121   }
   1122   instruction_block->set_code_start(current_num_instructions());
   1123   instruction_block->set_code_end(current_block_end);
   1124   current_block_ = nullptr;
   1125 }
   1126 
   1127 
   1128 void InstructionSelector::VisitControl(BasicBlock* block) {
   1129 #ifdef DEBUG
   1130   // SSA deconstruction requires targets of branches not to have phis.
   1131   // Edge split form guarantees this property, but is more strict.
   1132   if (block->SuccessorCount() > 1) {
   1133     for (BasicBlock* const successor : block->successors()) {
   1134       for (Node* const node : *successor) {
   1135         if (IrOpcode::IsPhiOpcode(node->opcode())) {
   1136           std::ostringstream str;
   1137           str << "You might have specified merged variables for a label with "
   1138               << "only one predecessor." << std::endl
   1139               << "# Current Block: " << *successor << std::endl
   1140               << "#          Node: " << *node;
   1141           FATAL("%s", str.str().c_str());
   1142         }
   1143       }
   1144     }
   1145   }
   1146 #endif
   1147 
   1148   Node* input = block->control_input();
   1149   int instruction_end = static_cast<int>(instructions_.size());
   1150   switch (block->control()) {
   1151     case BasicBlock::kGoto:
   1152       VisitGoto(block->SuccessorAt(0));
   1153       break;
   1154     case BasicBlock::kCall: {
   1155       DCHECK_EQ(IrOpcode::kCall, input->opcode());
   1156       BasicBlock* success = block->SuccessorAt(0);
   1157       BasicBlock* exception = block->SuccessorAt(1);
   1158       VisitCall(input, exception);
   1159       VisitGoto(success);
   1160       break;
   1161     }
   1162     case BasicBlock::kTailCall: {
   1163       DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
   1164       VisitTailCall(input);
   1165       break;
   1166     }
   1167     case BasicBlock::kBranch: {
   1168       DCHECK_EQ(IrOpcode::kBranch, input->opcode());
   1169       BasicBlock* tbranch = block->SuccessorAt(0);
   1170       BasicBlock* fbranch = block->SuccessorAt(1);
   1171       if (tbranch == fbranch) {
   1172         VisitGoto(tbranch);
   1173       } else {
   1174         VisitBranch(input, tbranch, fbranch);
   1175       }
   1176       break;
   1177     }
   1178     case BasicBlock::kSwitch: {
   1179       DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
   1180       // Last successor must be {IfDefault}.
   1181       BasicBlock* default_branch = block->successors().back();
   1182       DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
   1183       // All other successors must be {IfValue}s.
   1184       int32_t min_value = std::numeric_limits<int32_t>::max();
   1185       int32_t max_value = std::numeric_limits<int32_t>::min();
   1186       size_t case_count = block->SuccessorCount() - 1;
   1187       ZoneVector<CaseInfo> cases(case_count, zone());
   1188       for (size_t i = 0; i < case_count; ++i) {
   1189         BasicBlock* branch = block->SuccessorAt(i);
   1190         const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
   1191         cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
   1192         if (min_value > p.value()) min_value = p.value();
   1193         if (max_value < p.value()) max_value = p.value();
   1194       }
   1195       SwitchInfo sw(cases, min_value, max_value, default_branch);
   1196       VisitSwitch(input, sw);
   1197       break;
   1198     }
   1199     case BasicBlock::kReturn: {
   1200       DCHECK_EQ(IrOpcode::kReturn, input->opcode());
   1201       VisitReturn(input);
   1202       break;
   1203     }
   1204     case BasicBlock::kDeoptimize: {
   1205       DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
   1206       Node* value = input->InputAt(0);
   1207       VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
   1208       break;
   1209     }
   1210     case BasicBlock::kThrow:
   1211       DCHECK_EQ(IrOpcode::kThrow, input->opcode());
   1212       VisitThrow(input);
   1213       break;
   1214     case BasicBlock::kNone: {
   1215       // Exit block doesn't have control.
   1216       DCHECK_NULL(input);
   1217       break;
   1218     }
   1219     default:
   1220       UNREACHABLE();
   1221       break;
   1222   }
   1223   if (trace_turbo_ == kEnableTraceTurboJson && input) {
   1224     int instruction_start = static_cast<int>(instructions_.size());
   1225     instr_origins_[input->id()] = {instruction_start, instruction_end};
   1226   }
   1227 }
   1228 
   1229 void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
   1230   Node* projection0 = NodeProperties::FindProjection(node, 0);
   1231   if (projection0) {
   1232     MarkAsWord32(projection0);
   1233   }
   1234   Node* projection1 = NodeProperties::FindProjection(node, 1);
   1235   if (projection1) {
   1236     MarkAsWord32(projection1);
   1237   }
   1238 }
   1239 
   1240 void InstructionSelector::VisitNode(Node* node) {
   1241   DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
   1242   switch (node->opcode()) {
   1243     case IrOpcode::kStart:
   1244     case IrOpcode::kLoop:
   1245     case IrOpcode::kEnd:
   1246     case IrOpcode::kBranch:
   1247     case IrOpcode::kIfTrue:
   1248     case IrOpcode::kIfFalse:
   1249     case IrOpcode::kIfSuccess:
   1250     case IrOpcode::kSwitch:
   1251     case IrOpcode::kIfValue:
   1252     case IrOpcode::kIfDefault:
   1253     case IrOpcode::kEffectPhi:
   1254     case IrOpcode::kMerge:
   1255     case IrOpcode::kTerminate:
   1256     case IrOpcode::kBeginRegion:
   1257       // No code needed for these graph artifacts.
   1258       return;
   1259     case IrOpcode::kIfException:
   1260       return MarkAsReference(node), VisitIfException(node);
   1261     case IrOpcode::kFinishRegion:
   1262       return MarkAsReference(node), VisitFinishRegion(node);
   1263     case IrOpcode::kParameter: {
   1264       MachineType type =
   1265           linkage()->GetParameterType(ParameterIndexOf(node->op()));
   1266       MarkAsRepresentation(type.representation(), node);
   1267       return VisitParameter(node);
   1268     }
   1269     case IrOpcode::kOsrValue:
   1270       return MarkAsReference(node), VisitOsrValue(node);
   1271     case IrOpcode::kPhi: {
   1272       MachineRepresentation rep = PhiRepresentationOf(node->op());
   1273       if (rep == MachineRepresentation::kNone) return;
   1274       MarkAsRepresentation(rep, node);
   1275       return VisitPhi(node);
   1276     }
   1277     case IrOpcode::kProjection:
   1278       return VisitProjection(node);
   1279     case IrOpcode::kInt32Constant:
   1280     case IrOpcode::kInt64Constant:
   1281     case IrOpcode::kExternalConstant:
   1282     case IrOpcode::kRelocatableInt32Constant:
   1283     case IrOpcode::kRelocatableInt64Constant:
   1284       return VisitConstant(node);
   1285     case IrOpcode::kFloat32Constant:
   1286       return MarkAsFloat32(node), VisitConstant(node);
   1287     case IrOpcode::kFloat64Constant:
   1288       return MarkAsFloat64(node), VisitConstant(node);
   1289     case IrOpcode::kHeapConstant:
   1290       return MarkAsReference(node), VisitConstant(node);
   1291     case IrOpcode::kNumberConstant: {
   1292       double value = OpParameter<double>(node->op());
   1293       if (!IsSmiDouble(value)) MarkAsReference(node);
   1294       return VisitConstant(node);
   1295     }
   1296     case IrOpcode::kCall:
   1297       return VisitCall(node);
   1298     case IrOpcode::kCallWithCallerSavedRegisters:
   1299       return VisitCallWithCallerSavedRegisters(node);
   1300     case IrOpcode::kDeoptimizeIf:
   1301       return VisitDeoptimizeIf(node);
   1302     case IrOpcode::kDeoptimizeUnless:
   1303       return VisitDeoptimizeUnless(node);
   1304     case IrOpcode::kTrapIf:
   1305       return VisitTrapIf(node, TrapIdOf(node->op()));
   1306     case IrOpcode::kTrapUnless:
   1307       return VisitTrapUnless(node, TrapIdOf(node->op()));
   1308     case IrOpcode::kFrameState:
   1309     case IrOpcode::kStateValues:
   1310     case IrOpcode::kObjectState:
   1311       return;
   1312     case IrOpcode::kDebugAbort:
   1313       VisitDebugAbort(node);
   1314       return;
   1315     case IrOpcode::kDebugBreak:
   1316       VisitDebugBreak(node);
   1317       return;
   1318     case IrOpcode::kUnreachable:
   1319       VisitUnreachable(node);
   1320       return;
   1321     case IrOpcode::kDeadValue:
   1322       VisitDeadValue(node);
   1323       return;
   1324     case IrOpcode::kComment:
   1325       VisitComment(node);
   1326       return;
   1327     case IrOpcode::kRetain:
   1328       VisitRetain(node);
   1329       return;
   1330     case IrOpcode::kLoad: {
   1331       LoadRepresentation type = LoadRepresentationOf(node->op());
   1332       MarkAsRepresentation(type.representation(), node);
   1333       return VisitLoad(node);
   1334     }
   1335     case IrOpcode::kPoisonedLoad: {
   1336       LoadRepresentation type = LoadRepresentationOf(node->op());
   1337       MarkAsRepresentation(type.representation(), node);
   1338       return VisitPoisonedLoad(node);
   1339     }
   1340     case IrOpcode::kStore:
   1341       return VisitStore(node);
   1342     case IrOpcode::kProtectedStore:
   1343       return VisitProtectedStore(node);
   1344     case IrOpcode::kWord32And:
   1345       return MarkAsWord32(node), VisitWord32And(node);
   1346     case IrOpcode::kWord32Or:
   1347       return MarkAsWord32(node), VisitWord32Or(node);
   1348     case IrOpcode::kWord32Xor:
   1349       return MarkAsWord32(node), VisitWord32Xor(node);
   1350     case IrOpcode::kWord32Shl:
   1351       return MarkAsWord32(node), VisitWord32Shl(node);
   1352     case IrOpcode::kWord32Shr:
   1353       return MarkAsWord32(node), VisitWord32Shr(node);
   1354     case IrOpcode::kWord32Sar:
   1355       return MarkAsWord32(node), VisitWord32Sar(node);
   1356     case IrOpcode::kWord32Ror:
   1357       return MarkAsWord32(node), VisitWord32Ror(node);
   1358     case IrOpcode::kWord32Equal:
   1359       return VisitWord32Equal(node);
   1360     case IrOpcode::kWord32Clz:
   1361       return MarkAsWord32(node), VisitWord32Clz(node);
   1362     case IrOpcode::kWord32Ctz:
   1363       return MarkAsWord32(node), VisitWord32Ctz(node);
   1364     case IrOpcode::kWord32ReverseBits:
   1365       return MarkAsWord32(node), VisitWord32ReverseBits(node);
   1366     case IrOpcode::kWord32ReverseBytes:
   1367       return MarkAsWord32(node), VisitWord32ReverseBytes(node);
   1368     case IrOpcode::kInt32AbsWithOverflow:
   1369       return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
   1370     case IrOpcode::kWord32Popcnt:
   1371       return MarkAsWord32(node), VisitWord32Popcnt(node);
   1372     case IrOpcode::kWord64Popcnt:
   1373       return MarkAsWord32(node), VisitWord64Popcnt(node);
   1374     case IrOpcode::kWord64And:
   1375       return MarkAsWord64(node), VisitWord64And(node);
   1376     case IrOpcode::kWord64Or:
   1377       return MarkAsWord64(node), VisitWord64Or(node);
   1378     case IrOpcode::kWord64Xor:
   1379       return MarkAsWord64(node), VisitWord64Xor(node);
   1380     case IrOpcode::kWord64Shl:
   1381       return MarkAsWord64(node), VisitWord64Shl(node);
   1382     case IrOpcode::kWord64Shr:
   1383       return MarkAsWord64(node), VisitWord64Shr(node);
   1384     case IrOpcode::kWord64Sar:
   1385       return MarkAsWord64(node), VisitWord64Sar(node);
   1386     case IrOpcode::kWord64Ror:
   1387       return MarkAsWord64(node), VisitWord64Ror(node);
   1388     case IrOpcode::kWord64Clz:
   1389       return MarkAsWord64(node), VisitWord64Clz(node);
   1390     case IrOpcode::kWord64Ctz:
   1391       return MarkAsWord64(node), VisitWord64Ctz(node);
   1392     case IrOpcode::kWord64ReverseBits:
   1393       return MarkAsWord64(node), VisitWord64ReverseBits(node);
   1394     case IrOpcode::kWord64ReverseBytes:
   1395       return MarkAsWord64(node), VisitWord64ReverseBytes(node);
   1396     case IrOpcode::kInt64AbsWithOverflow:
   1397       return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
   1398     case IrOpcode::kWord64Equal:
   1399       return VisitWord64Equal(node);
   1400     case IrOpcode::kInt32Add:
   1401       return MarkAsWord32(node), VisitInt32Add(node);
   1402     case IrOpcode::kInt32AddWithOverflow:
   1403       return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
   1404     case IrOpcode::kInt32Sub:
   1405       return MarkAsWord32(node), VisitInt32Sub(node);
   1406     case IrOpcode::kInt32SubWithOverflow:
   1407       return VisitInt32SubWithOverflow(node);
   1408     case IrOpcode::kInt32Mul:
   1409       return MarkAsWord32(node), VisitInt32Mul(node);
   1410     case IrOpcode::kInt32MulWithOverflow:
   1411       return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
   1412     case IrOpcode::kInt32MulHigh:
   1413       return VisitInt32MulHigh(node);
   1414     case IrOpcode::kInt32Div:
   1415       return MarkAsWord32(node), VisitInt32Div(node);
   1416     case IrOpcode::kInt32Mod:
   1417       return MarkAsWord32(node), VisitInt32Mod(node);
   1418     case IrOpcode::kInt32LessThan:
   1419       return VisitInt32LessThan(node);
   1420     case IrOpcode::kInt32LessThanOrEqual:
   1421       return VisitInt32LessThanOrEqual(node);
   1422     case IrOpcode::kUint32Div:
   1423       return MarkAsWord32(node), VisitUint32Div(node);
   1424     case IrOpcode::kUint32LessThan:
   1425       return VisitUint32LessThan(node);
   1426     case IrOpcode::kUint32LessThanOrEqual:
   1427       return VisitUint32LessThanOrEqual(node);
   1428     case IrOpcode::kUint32Mod:
   1429       return MarkAsWord32(node), VisitUint32Mod(node);
   1430     case IrOpcode::kUint32MulHigh:
   1431       return VisitUint32MulHigh(node);
   1432     case IrOpcode::kInt64Add:
   1433       return MarkAsWord64(node), VisitInt64Add(node);
   1434     case IrOpcode::kInt64AddWithOverflow:
   1435       return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
   1436     case IrOpcode::kInt64Sub:
   1437       return MarkAsWord64(node), VisitInt64Sub(node);
   1438     case IrOpcode::kInt64SubWithOverflow:
   1439       return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
   1440     case IrOpcode::kInt64Mul:
   1441       return MarkAsWord64(node), VisitInt64Mul(node);
   1442     case IrOpcode::kInt64Div:
   1443       return MarkAsWord64(node), VisitInt64Div(node);
   1444     case IrOpcode::kInt64Mod:
   1445       return MarkAsWord64(node), VisitInt64Mod(node);
   1446     case IrOpcode::kInt64LessThan:
   1447       return VisitInt64LessThan(node);
   1448     case IrOpcode::kInt64LessThanOrEqual:
   1449       return VisitInt64LessThanOrEqual(node);
   1450     case IrOpcode::kUint64Div:
   1451       return MarkAsWord64(node), VisitUint64Div(node);
   1452     case IrOpcode::kUint64LessThan:
   1453       return VisitUint64LessThan(node);
   1454     case IrOpcode::kUint64LessThanOrEqual:
   1455       return VisitUint64LessThanOrEqual(node);
   1456     case IrOpcode::kUint64Mod:
   1457       return MarkAsWord64(node), VisitUint64Mod(node);
   1458     case IrOpcode::kBitcastTaggedToWord:
   1459       return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
   1460              VisitBitcastTaggedToWord(node);
   1461     case IrOpcode::kBitcastWordToTagged:
   1462       return MarkAsReference(node), VisitBitcastWordToTagged(node);
   1463     case IrOpcode::kBitcastWordToTaggedSigned:
   1464       return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
   1465              EmitIdentity(node);
   1466     case IrOpcode::kChangeFloat32ToFloat64:
   1467       return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
   1468     case IrOpcode::kChangeInt32ToFloat64:
   1469       return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
   1470     case IrOpcode::kChangeUint32ToFloat64:
   1471       return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
   1472     case IrOpcode::kChangeFloat64ToInt32:
   1473       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
   1474     case IrOpcode::kChangeFloat64ToUint32:
   1475       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
   1476     case IrOpcode::kChangeFloat64ToUint64:
   1477       return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
   1478     case IrOpcode::kFloat64SilenceNaN:
   1479       MarkAsFloat64(node);
   1480       if (CanProduceSignalingNaN(node->InputAt(0))) {
   1481         return VisitFloat64SilenceNaN(node);
   1482       } else {
   1483         return EmitIdentity(node);
   1484       }
   1485     case IrOpcode::kTruncateFloat64ToUint32:
   1486       return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
   1487     case IrOpcode::kTruncateFloat32ToInt32:
   1488       return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
   1489     case IrOpcode::kTruncateFloat32ToUint32:
   1490       return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
   1491     case IrOpcode::kTryTruncateFloat32ToInt64:
   1492       return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
   1493     case IrOpcode::kTryTruncateFloat64ToInt64:
   1494       return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
   1495     case IrOpcode::kTryTruncateFloat32ToUint64:
   1496       return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
   1497     case IrOpcode::kTryTruncateFloat64ToUint64:
   1498       return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
   1499     case IrOpcode::kChangeInt32ToInt64:
   1500       return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
   1501     case IrOpcode::kChangeUint32ToUint64:
   1502       return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
   1503     case IrOpcode::kTruncateFloat64ToFloat32:
   1504       return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
   1505     case IrOpcode::kTruncateFloat64ToWord32:
   1506       return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
   1507     case IrOpcode::kTruncateInt64ToInt32:
   1508       return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
   1509     case IrOpcode::kRoundFloat64ToInt32:
   1510       return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
   1511     case IrOpcode::kRoundInt64ToFloat32:
   1512       return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
   1513     case IrOpcode::kRoundInt32ToFloat32:
   1514       return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
   1515     case IrOpcode::kRoundInt64ToFloat64:
   1516       return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
   1517     case IrOpcode::kBitcastFloat32ToInt32:
   1518       return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
   1519     case IrOpcode::kRoundUint32ToFloat32:
   1520       return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
   1521     case IrOpcode::kRoundUint64ToFloat32:
   1522       return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
   1523     case IrOpcode::kRoundUint64ToFloat64:
   1524       return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
   1525     case IrOpcode::kBitcastFloat64ToInt64:
   1526       return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
   1527     case IrOpcode::kBitcastInt32ToFloat32:
   1528       return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
   1529     case IrOpcode::kBitcastInt64ToFloat64:
   1530       return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
   1531     case IrOpcode::kFloat32Add:
   1532       return MarkAsFloat32(node), VisitFloat32Add(node);
   1533     case IrOpcode::kFloat32Sub:
   1534       return MarkAsFloat32(node), VisitFloat32Sub(node);
   1535     case IrOpcode::kFloat32Neg:
   1536       return MarkAsFloat32(node), VisitFloat32Neg(node);
   1537     case IrOpcode::kFloat32Mul:
   1538       return MarkAsFloat32(node), VisitFloat32Mul(node);
   1539     case IrOpcode::kFloat32Div:
   1540       return MarkAsFloat32(node), VisitFloat32Div(node);
   1541     case IrOpcode::kFloat32Abs:
   1542       return MarkAsFloat32(node), VisitFloat32Abs(node);
   1543     case IrOpcode::kFloat32Sqrt:
   1544       return MarkAsFloat32(node), VisitFloat32Sqrt(node);
   1545     case IrOpcode::kFloat32Equal:
   1546       return VisitFloat32Equal(node);
   1547     case IrOpcode::kFloat32LessThan:
   1548       return VisitFloat32LessThan(node);
   1549     case IrOpcode::kFloat32LessThanOrEqual:
   1550       return VisitFloat32LessThanOrEqual(node);
   1551     case IrOpcode::kFloat32Max:
   1552       return MarkAsFloat32(node), VisitFloat32Max(node);
   1553     case IrOpcode::kFloat32Min:
   1554       return MarkAsFloat32(node), VisitFloat32Min(node);
   1555     case IrOpcode::kFloat64Add:
   1556       return MarkAsFloat64(node), VisitFloat64Add(node);
   1557     case IrOpcode::kFloat64Sub:
   1558       return MarkAsFloat64(node), VisitFloat64Sub(node);
   1559     case IrOpcode::kFloat64Neg:
   1560       return MarkAsFloat64(node), VisitFloat64Neg(node);
   1561     case IrOpcode::kFloat64Mul:
   1562       return MarkAsFloat64(node), VisitFloat64Mul(node);
   1563     case IrOpcode::kFloat64Div:
   1564       return MarkAsFloat64(node), VisitFloat64Div(node);
   1565     case IrOpcode::kFloat64Mod:
   1566       return MarkAsFloat64(node), VisitFloat64Mod(node);
   1567     case IrOpcode::kFloat64Min:
   1568       return MarkAsFloat64(node), VisitFloat64Min(node);
   1569     case IrOpcode::kFloat64Max:
   1570       return MarkAsFloat64(node), VisitFloat64Max(node);
   1571     case IrOpcode::kFloat64Abs:
   1572       return MarkAsFloat64(node), VisitFloat64Abs(node);
   1573     case IrOpcode::kFloat64Acos:
   1574       return MarkAsFloat64(node), VisitFloat64Acos(node);
   1575     case IrOpcode::kFloat64Acosh:
   1576       return MarkAsFloat64(node), VisitFloat64Acosh(node);
   1577     case IrOpcode::kFloat64Asin:
   1578       return MarkAsFloat64(node), VisitFloat64Asin(node);
   1579     case IrOpcode::kFloat64Asinh:
   1580       return MarkAsFloat64(node), VisitFloat64Asinh(node);
   1581     case IrOpcode::kFloat64Atan:
   1582       return MarkAsFloat64(node), VisitFloat64Atan(node);
   1583     case IrOpcode::kFloat64Atanh:
   1584       return MarkAsFloat64(node), VisitFloat64Atanh(node);
   1585     case IrOpcode::kFloat64Atan2:
   1586       return MarkAsFloat64(node), VisitFloat64Atan2(node);
   1587     case IrOpcode::kFloat64Cbrt:
   1588       return MarkAsFloat64(node), VisitFloat64Cbrt(node);
   1589     case IrOpcode::kFloat64Cos:
   1590       return MarkAsFloat64(node), VisitFloat64Cos(node);
   1591     case IrOpcode::kFloat64Cosh:
   1592       return MarkAsFloat64(node), VisitFloat64Cosh(node);
   1593     case IrOpcode::kFloat64Exp:
   1594       return MarkAsFloat64(node), VisitFloat64Exp(node);
   1595     case IrOpcode::kFloat64Expm1:
   1596       return MarkAsFloat64(node), VisitFloat64Expm1(node);
   1597     case IrOpcode::kFloat64Log:
   1598       return MarkAsFloat64(node), VisitFloat64Log(node);
   1599     case IrOpcode::kFloat64Log1p:
   1600       return MarkAsFloat64(node), VisitFloat64Log1p(node);
   1601     case IrOpcode::kFloat64Log10:
   1602       return MarkAsFloat64(node), VisitFloat64Log10(node);
   1603     case IrOpcode::kFloat64Log2:
   1604       return MarkAsFloat64(node), VisitFloat64Log2(node);
   1605     case IrOpcode::kFloat64Pow:
   1606       return MarkAsFloat64(node), VisitFloat64Pow(node);
   1607     case IrOpcode::kFloat64Sin:
   1608       return MarkAsFloat64(node), VisitFloat64Sin(node);
   1609     case IrOpcode::kFloat64Sinh:
   1610       return MarkAsFloat64(node), VisitFloat64Sinh(node);
   1611     case IrOpcode::kFloat64Sqrt:
   1612       return MarkAsFloat64(node), VisitFloat64Sqrt(node);
   1613     case IrOpcode::kFloat64Tan:
   1614       return MarkAsFloat64(node), VisitFloat64Tan(node);
   1615     case IrOpcode::kFloat64Tanh:
   1616       return MarkAsFloat64(node), VisitFloat64Tanh(node);
   1617     case IrOpcode::kFloat64Equal:
   1618       return VisitFloat64Equal(node);
   1619     case IrOpcode::kFloat64LessThan:
   1620       return VisitFloat64LessThan(node);
   1621     case IrOpcode::kFloat64LessThanOrEqual:
   1622       return VisitFloat64LessThanOrEqual(node);
   1623     case IrOpcode::kFloat32RoundDown:
   1624       return MarkAsFloat32(node), VisitFloat32RoundDown(node);
   1625     case IrOpcode::kFloat64RoundDown:
   1626       return MarkAsFloat64(node), VisitFloat64RoundDown(node);
   1627     case IrOpcode::kFloat32RoundUp:
   1628       return MarkAsFloat32(node), VisitFloat32RoundUp(node);
   1629     case IrOpcode::kFloat64RoundUp:
   1630       return MarkAsFloat64(node), VisitFloat64RoundUp(node);
   1631     case IrOpcode::kFloat32RoundTruncate:
   1632       return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
   1633     case IrOpcode::kFloat64RoundTruncate:
   1634       return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
   1635     case IrOpcode::kFloat64RoundTiesAway:
   1636       return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
   1637     case IrOpcode::kFloat32RoundTiesEven:
   1638       return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
   1639     case IrOpcode::kFloat64RoundTiesEven:
   1640       return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
   1641     case IrOpcode::kFloat64ExtractLowWord32:
   1642       return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
   1643     case IrOpcode::kFloat64ExtractHighWord32:
   1644       return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
   1645     case IrOpcode::kFloat64InsertLowWord32:
   1646       return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
   1647     case IrOpcode::kFloat64InsertHighWord32:
   1648       return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
   1649     case IrOpcode::kTaggedPoisonOnSpeculation:
   1650       return MarkAsReference(node), VisitTaggedPoisonOnSpeculation(node);
   1651     case IrOpcode::kWord32PoisonOnSpeculation:
   1652       return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
   1653     case IrOpcode::kWord64PoisonOnSpeculation:
   1654       return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
   1655     case IrOpcode::kStackSlot:
   1656       return VisitStackSlot(node);
   1657     case IrOpcode::kLoadStackPointer:
   1658       return VisitLoadStackPointer(node);
   1659     case IrOpcode::kLoadFramePointer:
   1660       return VisitLoadFramePointer(node);
   1661     case IrOpcode::kLoadParentFramePointer:
   1662       return VisitLoadParentFramePointer(node);
   1663     case IrOpcode::kUnalignedLoad: {
   1664       LoadRepresentation type = LoadRepresentationOf(node->op());
   1665       MarkAsRepresentation(type.representation(), node);
   1666       return VisitUnalignedLoad(node);
   1667     }
   1668     case IrOpcode::kUnalignedStore:
   1669       return VisitUnalignedStore(node);
   1670     case IrOpcode::kInt32PairAdd:
   1671       MarkAsWord32(node);
   1672       MarkPairProjectionsAsWord32(node);
   1673       return VisitInt32PairAdd(node);
   1674     case IrOpcode::kInt32PairSub:
   1675       MarkAsWord32(node);
   1676       MarkPairProjectionsAsWord32(node);
   1677       return VisitInt32PairSub(node);
   1678     case IrOpcode::kInt32PairMul:
   1679       MarkAsWord32(node);
   1680       MarkPairProjectionsAsWord32(node);
   1681       return VisitInt32PairMul(node);
   1682     case IrOpcode::kWord32PairShl:
   1683       MarkAsWord32(node);
   1684       MarkPairProjectionsAsWord32(node);
   1685       return VisitWord32PairShl(node);
   1686     case IrOpcode::kWord32PairShr:
   1687       MarkAsWord32(node);
   1688       MarkPairProjectionsAsWord32(node);
   1689       return VisitWord32PairShr(node);
   1690     case IrOpcode::kWord32PairSar:
   1691       MarkAsWord32(node);
   1692       MarkPairProjectionsAsWord32(node);
   1693       return VisitWord32PairSar(node);
   1694     case IrOpcode::kWord32AtomicLoad: {
   1695       LoadRepresentation type = LoadRepresentationOf(node->op());
   1696       MarkAsRepresentation(type.representation(), node);
   1697       return VisitWord32AtomicLoad(node);
   1698     }
   1699     case IrOpcode::kWord64AtomicLoad: {
   1700       LoadRepresentation type = LoadRepresentationOf(node->op());
   1701       MarkAsRepresentation(type.representation(), node);
   1702       return VisitWord64AtomicLoad(node);
   1703     }
   1704     case IrOpcode::kWord32AtomicStore:
   1705       return VisitWord32AtomicStore(node);
   1706     case IrOpcode::kWord64AtomicStore:
   1707       return VisitWord64AtomicStore(node);
   1708     case IrOpcode::kWord32AtomicPairStore:
   1709       return VisitWord32AtomicPairStore(node);
   1710     case IrOpcode::kWord32AtomicPairLoad: {
   1711       MarkAsWord32(node);
   1712       MarkPairProjectionsAsWord32(node);
   1713       return VisitWord32AtomicPairLoad(node);
   1714     }
   1715 #define ATOMIC_CASE(name, rep)                         \
   1716   case IrOpcode::k##rep##Atomic##name: {               \
   1717     MachineType type = AtomicOpType(node->op());       \
   1718     MarkAsRepresentation(type.representation(), node); \
   1719     return Visit##rep##Atomic##name(node);             \
   1720   }
   1721       ATOMIC_CASE(Add, Word32)
   1722       ATOMIC_CASE(Add, Word64)
   1723       ATOMIC_CASE(Sub, Word32)
   1724       ATOMIC_CASE(Sub, Word64)
   1725       ATOMIC_CASE(And, Word32)
   1726       ATOMIC_CASE(And, Word64)
   1727       ATOMIC_CASE(Or, Word32)
   1728       ATOMIC_CASE(Or, Word64)
   1729       ATOMIC_CASE(Xor, Word32)
   1730       ATOMIC_CASE(Xor, Word64)
   1731       ATOMIC_CASE(Exchange, Word32)
   1732       ATOMIC_CASE(Exchange, Word64)
   1733       ATOMIC_CASE(CompareExchange, Word32)
   1734       ATOMIC_CASE(CompareExchange, Word64)
   1735 #undef ATOMIC_CASE
   1736 #define ATOMIC_CASE(name)                     \
   1737   case IrOpcode::kWord32AtomicPair##name: {   \
   1738     MarkAsWord32(node);                       \
   1739     MarkPairProjectionsAsWord32(node);        \
   1740     return VisitWord32AtomicPair##name(node); \
   1741   }
   1742       ATOMIC_CASE(Add)
   1743       ATOMIC_CASE(Sub)
   1744       ATOMIC_CASE(And)
   1745       ATOMIC_CASE(Or)
   1746       ATOMIC_CASE(Xor)
   1747       ATOMIC_CASE(Exchange)
   1748       ATOMIC_CASE(CompareExchange)
   1749 #undef ATOMIC_CASE
   1750 #define ATOMIC_CASE(name)                              \
   1751   case IrOpcode::kWord64AtomicNarrow##name: {          \
   1752     MachineType type = AtomicOpType(node->op());       \
   1753     MarkAsRepresentation(type.representation(), node); \
   1754     MarkPairProjectionsAsWord32(node);                 \
   1755     return VisitWord64AtomicNarrow##name(node);        \
   1756   }
   1757       ATOMIC_CASE(Add)
   1758       ATOMIC_CASE(Sub)
   1759       ATOMIC_CASE(And)
   1760       ATOMIC_CASE(Or)
   1761       ATOMIC_CASE(Xor)
   1762       ATOMIC_CASE(Exchange)
   1763       ATOMIC_CASE(CompareExchange)
   1764 #undef ATOMIC_CASE
   1765     case IrOpcode::kSpeculationFence:
   1766       return VisitSpeculationFence(node);
   1767     case IrOpcode::kProtectedLoad: {
   1768       LoadRepresentation type = LoadRepresentationOf(node->op());
   1769       MarkAsRepresentation(type.representation(), node);
   1770       return VisitProtectedLoad(node);
   1771     }
   1772     case IrOpcode::kSignExtendWord8ToInt32:
   1773       return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
   1774     case IrOpcode::kSignExtendWord16ToInt32:
   1775       return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
   1776     case IrOpcode::kSignExtendWord8ToInt64:
   1777       return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
   1778     case IrOpcode::kSignExtendWord16ToInt64:
   1779       return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
   1780     case IrOpcode::kSignExtendWord32ToInt64:
   1781       return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
   1782     case IrOpcode::kUnsafePointerAdd:
   1783       MarkAsRepresentation(MachineType::PointerRepresentation(), node);
   1784       return VisitUnsafePointerAdd(node);
   1785     case IrOpcode::kF32x4Splat:
   1786       return MarkAsSimd128(node), VisitF32x4Splat(node);
   1787     case IrOpcode::kF32x4ExtractLane:
   1788       return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
   1789     case IrOpcode::kF32x4ReplaceLane:
   1790       return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
   1791     case IrOpcode::kF32x4SConvertI32x4:
   1792       return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
   1793     case IrOpcode::kF32x4UConvertI32x4:
   1794       return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
   1795     case IrOpcode::kF32x4Abs:
   1796       return MarkAsSimd128(node), VisitF32x4Abs(node);
   1797     case IrOpcode::kF32x4Neg:
   1798       return MarkAsSimd128(node), VisitF32x4Neg(node);
   1799     case IrOpcode::kF32x4RecipApprox:
   1800       return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
   1801     case IrOpcode::kF32x4RecipSqrtApprox:
   1802       return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
   1803     case IrOpcode::kF32x4Add:
   1804       return MarkAsSimd128(node), VisitF32x4Add(node);
   1805     case IrOpcode::kF32x4AddHoriz:
   1806       return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
   1807     case IrOpcode::kF32x4Sub:
   1808       return MarkAsSimd128(node), VisitF32x4Sub(node);
   1809     case IrOpcode::kF32x4Mul:
   1810       return MarkAsSimd128(node), VisitF32x4Mul(node);
   1811     case IrOpcode::kF32x4Min:
   1812       return MarkAsSimd128(node), VisitF32x4Min(node);
   1813     case IrOpcode::kF32x4Max:
   1814       return MarkAsSimd128(node), VisitF32x4Max(node);
   1815     case IrOpcode::kF32x4Eq:
   1816       return MarkAsSimd128(node), VisitF32x4Eq(node);
   1817     case IrOpcode::kF32x4Ne:
   1818       return MarkAsSimd128(node), VisitF32x4Ne(node);
   1819     case IrOpcode::kF32x4Lt:
   1820       return MarkAsSimd128(node), VisitF32x4Lt(node);
   1821     case IrOpcode::kF32x4Le:
   1822       return MarkAsSimd128(node), VisitF32x4Le(node);
   1823     case IrOpcode::kI32x4Splat:
   1824       return MarkAsSimd128(node), VisitI32x4Splat(node);
   1825     case IrOpcode::kI32x4ExtractLane:
   1826       return MarkAsWord32(node), VisitI32x4ExtractLane(node);
   1827     case IrOpcode::kI32x4ReplaceLane:
   1828       return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
   1829     case IrOpcode::kI32x4SConvertF32x4:
   1830       return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
   1831     case IrOpcode::kI32x4SConvertI16x8Low:
   1832       return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
   1833     case IrOpcode::kI32x4SConvertI16x8High:
   1834       return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
   1835     case IrOpcode::kI32x4Neg:
   1836       return MarkAsSimd128(node), VisitI32x4Neg(node);
   1837     case IrOpcode::kI32x4Shl:
   1838       return MarkAsSimd128(node), VisitI32x4Shl(node);
   1839     case IrOpcode::kI32x4ShrS:
   1840       return MarkAsSimd128(node), VisitI32x4ShrS(node);
   1841     case IrOpcode::kI32x4Add:
   1842       return MarkAsSimd128(node), VisitI32x4Add(node);
   1843     case IrOpcode::kI32x4AddHoriz:
   1844       return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
   1845     case IrOpcode::kI32x4Sub:
   1846       return MarkAsSimd128(node), VisitI32x4Sub(node);
   1847     case IrOpcode::kI32x4Mul:
   1848       return MarkAsSimd128(node), VisitI32x4Mul(node);
   1849     case IrOpcode::kI32x4MinS:
   1850       return MarkAsSimd128(node), VisitI32x4MinS(node);
   1851     case IrOpcode::kI32x4MaxS:
   1852       return MarkAsSimd128(node), VisitI32x4MaxS(node);
   1853     case IrOpcode::kI32x4Eq:
   1854       return MarkAsSimd128(node), VisitI32x4Eq(node);
   1855     case IrOpcode::kI32x4Ne:
   1856       return MarkAsSimd128(node), VisitI32x4Ne(node);
   1857     case IrOpcode::kI32x4GtS:
   1858       return MarkAsSimd128(node), VisitI32x4GtS(node);
   1859     case IrOpcode::kI32x4GeS:
   1860       return MarkAsSimd128(node), VisitI32x4GeS(node);
   1861     case IrOpcode::kI32x4UConvertF32x4:
   1862       return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
   1863     case IrOpcode::kI32x4UConvertI16x8Low:
   1864       return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
   1865     case IrOpcode::kI32x4UConvertI16x8High:
   1866       return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
   1867     case IrOpcode::kI32x4ShrU:
   1868       return MarkAsSimd128(node), VisitI32x4ShrU(node);
   1869     case IrOpcode::kI32x4MinU:
   1870       return MarkAsSimd128(node), VisitI32x4MinU(node);
   1871     case IrOpcode::kI32x4MaxU:
   1872       return MarkAsSimd128(node), VisitI32x4MaxU(node);
   1873     case IrOpcode::kI32x4GtU:
   1874       return MarkAsSimd128(node), VisitI32x4GtU(node);
   1875     case IrOpcode::kI32x4GeU:
   1876       return MarkAsSimd128(node), VisitI32x4GeU(node);
   1877     case IrOpcode::kI16x8Splat:
   1878       return MarkAsSimd128(node), VisitI16x8Splat(node);
   1879     case IrOpcode::kI16x8ExtractLane:
   1880       return MarkAsWord32(node), VisitI16x8ExtractLane(node);
   1881     case IrOpcode::kI16x8ReplaceLane:
   1882       return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
   1883     case IrOpcode::kI16x8SConvertI8x16Low:
   1884       return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
   1885     case IrOpcode::kI16x8SConvertI8x16High:
   1886       return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
   1887     case IrOpcode::kI16x8Neg:
   1888       return MarkAsSimd128(node), VisitI16x8Neg(node);
   1889     case IrOpcode::kI16x8Shl:
   1890       return MarkAsSimd128(node), VisitI16x8Shl(node);
   1891     case IrOpcode::kI16x8ShrS:
   1892       return MarkAsSimd128(node), VisitI16x8ShrS(node);
   1893     case IrOpcode::kI16x8SConvertI32x4:
   1894       return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
   1895     case IrOpcode::kI16x8Add:
   1896       return MarkAsSimd128(node), VisitI16x8Add(node);
   1897     case IrOpcode::kI16x8AddSaturateS:
   1898       return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
   1899     case IrOpcode::kI16x8AddHoriz:
   1900       return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
   1901     case IrOpcode::kI16x8Sub:
   1902       return MarkAsSimd128(node), VisitI16x8Sub(node);
   1903     case IrOpcode::kI16x8SubSaturateS:
   1904       return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
   1905     case IrOpcode::kI16x8Mul:
   1906       return MarkAsSimd128(node), VisitI16x8Mul(node);
   1907     case IrOpcode::kI16x8MinS:
   1908       return MarkAsSimd128(node), VisitI16x8MinS(node);
   1909     case IrOpcode::kI16x8MaxS:
   1910       return MarkAsSimd128(node), VisitI16x8MaxS(node);
   1911     case IrOpcode::kI16x8Eq:
   1912       return MarkAsSimd128(node), VisitI16x8Eq(node);
   1913     case IrOpcode::kI16x8Ne:
   1914       return MarkAsSimd128(node), VisitI16x8Ne(node);
   1915     case IrOpcode::kI16x8GtS:
   1916       return MarkAsSimd128(node), VisitI16x8GtS(node);
   1917     case IrOpcode::kI16x8GeS:
   1918       return MarkAsSimd128(node), VisitI16x8GeS(node);
   1919     case IrOpcode::kI16x8UConvertI8x16Low:
   1920       return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
   1921     case IrOpcode::kI16x8UConvertI8x16High:
   1922       return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
   1923     case IrOpcode::kI16x8ShrU:
   1924       return MarkAsSimd128(node), VisitI16x8ShrU(node);
   1925     case IrOpcode::kI16x8UConvertI32x4:
   1926       return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
   1927     case IrOpcode::kI16x8AddSaturateU:
   1928       return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
   1929     case IrOpcode::kI16x8SubSaturateU:
   1930       return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
   1931     case IrOpcode::kI16x8MinU:
   1932       return MarkAsSimd128(node), VisitI16x8MinU(node);
   1933     case IrOpcode::kI16x8MaxU:
   1934       return MarkAsSimd128(node), VisitI16x8MaxU(node);
   1935     case IrOpcode::kI16x8GtU:
   1936       return MarkAsSimd128(node), VisitI16x8GtU(node);
   1937     case IrOpcode::kI16x8GeU:
   1938       return MarkAsSimd128(node), VisitI16x8GeU(node);
   1939     case IrOpcode::kI8x16Splat:
   1940       return MarkAsSimd128(node), VisitI8x16Splat(node);
   1941     case IrOpcode::kI8x16ExtractLane:
   1942       return MarkAsWord32(node), VisitI8x16ExtractLane(node);
   1943     case IrOpcode::kI8x16ReplaceLane:
   1944       return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
   1945     case IrOpcode::kI8x16Neg:
   1946       return MarkAsSimd128(node), VisitI8x16Neg(node);
   1947     case IrOpcode::kI8x16Shl:
   1948       return MarkAsSimd128(node), VisitI8x16Shl(node);
   1949     case IrOpcode::kI8x16ShrS:
   1950       return MarkAsSimd128(node), VisitI8x16ShrS(node);
   1951     case IrOpcode::kI8x16SConvertI16x8:
   1952       return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
   1953     case IrOpcode::kI8x16Add:
   1954       return MarkAsSimd128(node), VisitI8x16Add(node);
   1955     case IrOpcode::kI8x16AddSaturateS:
   1956       return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
   1957     case IrOpcode::kI8x16Sub:
   1958       return MarkAsSimd128(node), VisitI8x16Sub(node);
   1959     case IrOpcode::kI8x16SubSaturateS:
   1960       return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
   1961     case IrOpcode::kI8x16Mul:
   1962       return MarkAsSimd128(node), VisitI8x16Mul(node);
   1963     case IrOpcode::kI8x16MinS:
   1964       return MarkAsSimd128(node), VisitI8x16MinS(node);
   1965     case IrOpcode::kI8x16MaxS:
   1966       return MarkAsSimd128(node), VisitI8x16MaxS(node);
   1967     case IrOpcode::kI8x16Eq:
   1968       return MarkAsSimd128(node), VisitI8x16Eq(node);
   1969     case IrOpcode::kI8x16Ne:
   1970       return MarkAsSimd128(node), VisitI8x16Ne(node);
   1971     case IrOpcode::kI8x16GtS:
   1972       return MarkAsSimd128(node), VisitI8x16GtS(node);
   1973     case IrOpcode::kI8x16GeS:
   1974       return MarkAsSimd128(node), VisitI8x16GeS(node);
   1975     case IrOpcode::kI8x16ShrU:
   1976       return MarkAsSimd128(node), VisitI8x16ShrU(node);
   1977     case IrOpcode::kI8x16UConvertI16x8:
   1978       return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
   1979     case IrOpcode::kI8x16AddSaturateU:
   1980       return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
   1981     case IrOpcode::kI8x16SubSaturateU:
   1982       return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
   1983     case IrOpcode::kI8x16MinU:
   1984       return MarkAsSimd128(node), VisitI8x16MinU(node);
   1985     case IrOpcode::kI8x16MaxU:
   1986       return MarkAsSimd128(node), VisitI8x16MaxU(node);
   1987     case IrOpcode::kI8x16GtU:
   1988       return MarkAsSimd128(node), VisitI8x16GtU(node);
   1989     case IrOpcode::kI8x16GeU:
   1990       return MarkAsSimd128(node), VisitI16x8GeU(node);
   1991     case IrOpcode::kS128Zero:
   1992       return MarkAsSimd128(node), VisitS128Zero(node);
   1993     case IrOpcode::kS128And:
   1994       return MarkAsSimd128(node), VisitS128And(node);
   1995     case IrOpcode::kS128Or:
   1996       return MarkAsSimd128(node), VisitS128Or(node);
   1997     case IrOpcode::kS128Xor:
   1998       return MarkAsSimd128(node), VisitS128Xor(node);
   1999     case IrOpcode::kS128Not:
   2000       return MarkAsSimd128(node), VisitS128Not(node);
   2001     case IrOpcode::kS128Select:
   2002       return MarkAsSimd128(node), VisitS128Select(node);
   2003     case IrOpcode::kS8x16Shuffle:
   2004       return MarkAsSimd128(node), VisitS8x16Shuffle(node);
   2005     case IrOpcode::kS1x4AnyTrue:
   2006       return MarkAsWord32(node), VisitS1x4AnyTrue(node);
   2007     case IrOpcode::kS1x4AllTrue:
   2008       return MarkAsWord32(node), VisitS1x4AllTrue(node);
   2009     case IrOpcode::kS1x8AnyTrue:
   2010       return MarkAsWord32(node), VisitS1x8AnyTrue(node);
   2011     case IrOpcode::kS1x8AllTrue:
   2012       return MarkAsWord32(node), VisitS1x8AllTrue(node);
   2013     case IrOpcode::kS1x16AnyTrue:
   2014       return MarkAsWord32(node), VisitS1x16AnyTrue(node);
   2015     case IrOpcode::kS1x16AllTrue:
   2016       return MarkAsWord32(node), VisitS1x16AllTrue(node);
   2017     default:
   2018       FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
   2019             node->op()->mnemonic(), node->id());
   2020       break;
   2021   }
   2022 }
   2023 
   2024 void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
   2025   if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
   2026     OperandGenerator g(this);
   2027     Node* input_node = NodeProperties::GetValueInput(node, 0);
   2028     InstructionOperand input = g.UseRegister(input_node);
   2029     InstructionOperand output = g.DefineSameAsFirst(node);
   2030     Emit(kArchWordPoisonOnSpeculation, output, input);
   2031   } else {
   2032     EmitIdentity(node);
   2033   }
   2034 }
   2035 
   2036 void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
   2037   EmitWordPoisonOnSpeculation(node);
   2038 }
   2039 
   2040 void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
   2041   EmitWordPoisonOnSpeculation(node);
   2042 }
   2043 
   2044 void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
   2045   EmitWordPoisonOnSpeculation(node);
   2046 }
   2047 
   2048 void InstructionSelector::VisitLoadStackPointer(Node* node) {
   2049   OperandGenerator g(this);
   2050   Emit(kArchStackPointer, g.DefineAsRegister(node));
   2051 }
   2052 
   2053 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   2054   OperandGenerator g(this);
   2055   Emit(kArchFramePointer, g.DefineAsRegister(node));
   2056 }
   2057 
   2058 void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
   2059   OperandGenerator g(this);
   2060   Emit(kArchParentFramePointer, g.DefineAsRegister(node));
   2061 }
   2062 
   2063 void InstructionSelector::VisitFloat64Acos(Node* node) {
   2064   VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
   2065 }
   2066 
   2067 void InstructionSelector::VisitFloat64Acosh(Node* node) {
   2068   VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
   2069 }
   2070 
   2071 void InstructionSelector::VisitFloat64Asin(Node* node) {
   2072   VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
   2073 }
   2074 
   2075 void InstructionSelector::VisitFloat64Asinh(Node* node) {
   2076   VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
   2077 }
   2078 
   2079 void InstructionSelector::VisitFloat64Atan(Node* node) {
   2080   VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
   2081 }
   2082 
   2083 void InstructionSelector::VisitFloat64Atanh(Node* node) {
   2084   VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
   2085 }
   2086 
   2087 void InstructionSelector::VisitFloat64Atan2(Node* node) {
   2088   VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
   2089 }
   2090 
   2091 void InstructionSelector::VisitFloat64Cbrt(Node* node) {
   2092   VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
   2093 }
   2094 
   2095 void InstructionSelector::VisitFloat64Cos(Node* node) {
   2096   VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
   2097 }
   2098 
   2099 void InstructionSelector::VisitFloat64Cosh(Node* node) {
   2100   VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
   2101 }
   2102 
   2103 void InstructionSelector::VisitFloat64Exp(Node* node) {
   2104   VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
   2105 }
   2106 
   2107 void InstructionSelector::VisitFloat64Expm1(Node* node) {
   2108   VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
   2109 }
   2110 
   2111 void InstructionSelector::VisitFloat64Log(Node* node) {
   2112   VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
   2113 }
   2114 
   2115 void InstructionSelector::VisitFloat64Log1p(Node* node) {
   2116   VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
   2117 }
   2118 
   2119 void InstructionSelector::VisitFloat64Log2(Node* node) {
   2120   VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
   2121 }
   2122 
   2123 void InstructionSelector::VisitFloat64Log10(Node* node) {
   2124   VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
   2125 }
   2126 
   2127 void InstructionSelector::VisitFloat64Pow(Node* node) {
   2128   VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
   2129 }
   2130 
   2131 void InstructionSelector::VisitFloat64Sin(Node* node) {
   2132   VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
   2133 }
   2134 
   2135 void InstructionSelector::VisitFloat64Sinh(Node* node) {
   2136   VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
   2137 }
   2138 
   2139 void InstructionSelector::VisitFloat64Tan(Node* node) {
   2140   VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
   2141 }
   2142 
   2143 void InstructionSelector::VisitFloat64Tanh(Node* node) {
   2144   VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
   2145 }
   2146 
   2147 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
   2148                                           InstructionOperand& index_operand) {
   2149   OperandGenerator g(this);
   2150   size_t input_count = 2 + sw.value_range();
   2151   DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
   2152   auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
   2153   inputs[0] = index_operand;
   2154   InstructionOperand default_operand = g.Label(sw.default_branch());
   2155   std::fill(&inputs[1], &inputs[input_count], default_operand);
   2156   for (const CaseInfo& c : sw.CasesUnsorted()) {
   2157     size_t value = c.value - sw.min_value();
   2158     DCHECK_LE(0u, value);
   2159     DCHECK_LT(value + 2, input_count);
   2160     inputs[value + 2] = g.Label(c.branch);
   2161   }
   2162   Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
   2163 }
   2164 
   2165 
   2166 void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
   2167                                            InstructionOperand& value_operand) {
   2168   OperandGenerator g(this);
   2169   std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
   2170   size_t input_count = 2 + sw.case_count() * 2;
   2171   DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
   2172   auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
   2173   inputs[0] = value_operand;
   2174   inputs[1] = g.Label(sw.default_branch());
   2175   for (size_t index = 0; index < cases.size(); ++index) {
   2176     const CaseInfo& c = cases[index];
   2177     inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
   2178     inputs[index * 2 + 2 + 1] = g.Label(c.branch);
   2179   }
   2180   Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
   2181 }
   2182 
   2183 void InstructionSelector::EmitBinarySearchSwitch(
   2184     const SwitchInfo& sw, InstructionOperand& value_operand) {
   2185   OperandGenerator g(this);
   2186   size_t input_count = 2 + sw.case_count() * 2;
   2187   DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
   2188   auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
   2189   inputs[0] = value_operand;
   2190   inputs[1] = g.Label(sw.default_branch());
   2191   std::vector<CaseInfo> cases = sw.CasesSortedByValue();
   2192   std::stable_sort(cases.begin(), cases.end(),
   2193                    [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
   2194   for (size_t index = 0; index < cases.size(); ++index) {
   2195     const CaseInfo& c = cases[index];
   2196     inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
   2197     inputs[index * 2 + 2 + 1] = g.Label(c.branch);
   2198   }
   2199   Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
   2200 }
   2201 
   2202 void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
   2203   EmitIdentity(node);
   2204 }
   2205 
   2206 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
   2207   OperandGenerator g(this);
   2208   Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
   2209 }
   2210 
   2211 // 32 bit targets do not implement the following instructions.
   2212 #if V8_TARGET_ARCH_32_BIT
   2213 
   2214 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
   2215 
   2216 
   2217 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
   2218 
   2219 
   2220 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
   2221 
   2222 
   2223 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
   2224 
   2225 
   2226 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
   2227 
   2228 
   2229 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
   2230 
   2231 
   2232 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
   2233 
   2234 
   2235 void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
   2236 
   2237 
   2238 void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
   2239 
   2240 
   2241 void InstructionSelector::VisitWord64ReverseBits(Node* node) {
   2242   UNIMPLEMENTED();
   2243 }
   2244 
   2245 
   2246 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
   2247 
   2248 
   2249 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
   2250 
   2251 
   2252 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
   2253 
   2254 
   2255 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   2256   UNIMPLEMENTED();
   2257 }
   2258 
   2259 
   2260 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
   2261 
   2262 
   2263 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   2264   UNIMPLEMENTED();
   2265 }
   2266 
   2267 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
   2268 
   2269 
   2270 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
   2271 
   2272 
   2273 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
   2274 
   2275 
   2276 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
   2277   UNIMPLEMENTED();
   2278 }
   2279 
   2280 
   2281 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
   2282 
   2283 
   2284 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
   2285 
   2286 
   2287 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
   2288 
   2289 
   2290 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
   2291   UNIMPLEMENTED();
   2292 }
   2293 
   2294 
   2295 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
   2296 
   2297 
   2298 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   2299   UNIMPLEMENTED();
   2300 }
   2301 
   2302 
   2303 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   2304   UNIMPLEMENTED();
   2305 }
   2306 
   2307 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
   2308   UNIMPLEMENTED();
   2309 }
   2310 
   2311 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   2312   UNIMPLEMENTED();
   2313 }
   2314 
   2315 
   2316 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
   2317   UNIMPLEMENTED();
   2318 }
   2319 
   2320 
   2321 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
   2322   UNIMPLEMENTED();
   2323 }
   2324 
   2325 
   2326 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
   2327   UNIMPLEMENTED();
   2328 }
   2329 
   2330 
   2331 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   2332   UNIMPLEMENTED();
   2333 }
   2334 
   2335 
   2336 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
   2337   UNIMPLEMENTED();
   2338 }
   2339 
   2340 
   2341 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
   2342   UNIMPLEMENTED();
   2343 }
   2344 
   2345 
   2346 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   2347   UNIMPLEMENTED();
   2348 }
   2349 
   2350 
   2351 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
   2352   UNIMPLEMENTED();
   2353 }
   2354 
   2355 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
   2356   UNIMPLEMENTED();
   2357 }
   2358 
   2359 
   2360 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
   2361   UNIMPLEMENTED();
   2362 }
   2363 
   2364 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
   2365   UNIMPLEMENTED();
   2366 }
   2367 
   2368 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
   2369   UNIMPLEMENTED();
   2370 }
   2371 
   2372 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
   2373   UNIMPLEMENTED();
   2374 }
   2375 #endif  // V8_TARGET_ARCH_32_BIT
   2376 
   2377 // 64 bit targets do not implement the following instructions.
   2378 #if V8_TARGET_ARCH_64_BIT
   2379 void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
   2380 
   2381 void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
   2382 
   2383 void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
   2384 
   2385 void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
   2386 
   2387 void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
   2388 
   2389 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
   2390 #endif  // V8_TARGET_ARCH_64_BIT
   2391 
   2392 #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
   2393 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
   2394   UNIMPLEMENTED();
   2395 }
   2396 
   2397 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
   2398   UNIMPLEMENTED();
   2399 }
   2400 
   2401 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
   2402   UNIMPLEMENTED();
   2403 }
   2404 
   2405 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
   2406   UNIMPLEMENTED();
   2407 }
   2408 
   2409 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
   2410   UNIMPLEMENTED();
   2411 }
   2412 
   2413 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
   2414   UNIMPLEMENTED();
   2415 }
   2416 
   2417 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
   2418   UNIMPLEMENTED();
   2419 }
   2420 
   2421 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
   2422   UNIMPLEMENTED();
   2423 }
   2424 
   2425 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
   2426   UNIMPLEMENTED();
   2427 }
   2428 
   2429 void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
   2430   UNIMPLEMENTED();
   2431 }
   2432 
   2433 void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
   2434   UNIMPLEMENTED();
   2435 }
   2436 
   2437 void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
   2438   UNIMPLEMENTED();
   2439 }
   2440 
   2441 void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
   2442   UNIMPLEMENTED();
   2443 }
   2444 
   2445 void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
   2446   UNIMPLEMENTED();
   2447 }
   2448 
   2449 void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
   2450   UNIMPLEMENTED();
   2451 }
   2452 
   2453 void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
   2454   UNIMPLEMENTED();
   2455 }
   2456 #endif  // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
   2457 
   2458 #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
   2459     !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
   2460 void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
   2461   UNIMPLEMENTED();
   2462 }
   2463 
   2464 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
   2465   UNIMPLEMENTED();
   2466 }
   2467 #endif  // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
   2468         // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
   2469 
   2470 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
   2471 void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
   2472 
   2473 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
   2474   UNIMPLEMENTED();
   2475 }
   2476 
   2477 void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
   2478 
   2479 void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
   2480 
   2481 void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
   2482 
   2483 void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
   2484 
   2485 void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
   2486 
   2487 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
   2488   UNIMPLEMENTED();
   2489 }
   2490 
   2491 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
   2492   UNIMPLEMENTED();
   2493 }
   2494 #endif  // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
   2495 
   2496 #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
   2497     !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
   2498 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
   2499   UNIMPLEMENTED();
   2500 }
   2501 
   2502 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
   2503   UNIMPLEMENTED();
   2504 }
   2505 
   2506 void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
   2507   UNIMPLEMENTED();
   2508 }
   2509 
   2510 void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
   2511   UNIMPLEMENTED();
   2512 }
   2513 
   2514 void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
   2515   UNIMPLEMENTED();
   2516 }
   2517 
   2518 void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
   2519   UNIMPLEMENTED();
   2520 }
   2521 
   2522 void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
   2523   UNIMPLEMENTED();
   2524 }
   2525 
   2526 void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
   2527   UNIMPLEMENTED();
   2528 }
   2529 
   2530 void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
   2531   UNIMPLEMENTED();
   2532 }
   2533 
   2534 void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
   2535   UNIMPLEMENTED();
   2536 }
   2537 
   2538 void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
   2539   UNIMPLEMENTED();
   2540 }
   2541 void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
   2542   UNIMPLEMENTED();
   2543 }
   2544 
   2545 void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
   2546   UNIMPLEMENTED();
   2547 }
   2548 
   2549 void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
   2550   UNIMPLEMENTED();
   2551 }
   2552 
   2553 void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
   2554 
   2555 void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
   2556 
   2557 void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
   2558 
   2559 void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
   2560 
   2561 void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
   2562 
   2563 void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
   2564 
   2565 void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
   2566 
   2567 void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
   2568 
   2569 void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
   2570 
   2571 void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
   2572 
   2573 void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
   2574 #endif  // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
   2575         // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
   2576 
   2577 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
   2578 
   2579 void InstructionSelector::VisitParameter(Node* node) {
   2580   OperandGenerator g(this);
   2581   int index = ParameterIndexOf(node->op());
   2582   InstructionOperand op =
   2583       linkage()->ParameterHasSecondaryLocation(index)
   2584           ? g.DefineAsDualLocation(
   2585                 node, linkage()->GetParameterLocation(index),
   2586                 linkage()->GetParameterSecondaryLocation(index))
   2587           : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
   2588 
   2589   Emit(kArchNop, op);
   2590 }
   2591 
   2592 namespace {
   2593 LinkageLocation ExceptionLocation() {
   2594   return LinkageLocation::ForRegister(kReturnRegister0.code(),
   2595                                       MachineType::IntPtr());
   2596 }
   2597 }
   2598 
   2599 void InstructionSelector::VisitIfException(Node* node) {
   2600   OperandGenerator g(this);
   2601   DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
   2602   Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
   2603 }
   2604 
   2605 
   2606 void InstructionSelector::VisitOsrValue(Node* node) {
   2607   OperandGenerator g(this);
   2608   int index = OsrValueIndexOf(node->op());
   2609   Emit(kArchNop,
   2610        g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
   2611 }
   2612 
   2613 
   2614 void InstructionSelector::VisitPhi(Node* node) {
   2615   const int input_count = node->op()->ValueInputCount();
   2616   DCHECK_EQ(input_count, current_block_->PredecessorCount());
   2617   PhiInstruction* phi = new (instruction_zone())
   2618       PhiInstruction(instruction_zone(), GetVirtualRegister(node),
   2619                      static_cast<size_t>(input_count));
   2620   sequence()
   2621       ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
   2622       ->AddPhi(phi);
   2623   for (int i = 0; i < input_count; ++i) {
   2624     Node* const input = node->InputAt(i);
   2625     MarkAsUsed(input);
   2626     phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
   2627   }
   2628 }
   2629 
   2630 
   2631 void InstructionSelector::VisitProjection(Node* node) {
   2632   OperandGenerator g(this);
   2633   Node* value = node->InputAt(0);
   2634   switch (value->opcode()) {
   2635     case IrOpcode::kInt32AddWithOverflow:
   2636     case IrOpcode::kInt32SubWithOverflow:
   2637     case IrOpcode::kInt32MulWithOverflow:
   2638     case IrOpcode::kInt64AddWithOverflow:
   2639     case IrOpcode::kInt64SubWithOverflow:
   2640     case IrOpcode::kTryTruncateFloat32ToInt64:
   2641     case IrOpcode::kTryTruncateFloat64ToInt64:
   2642     case IrOpcode::kTryTruncateFloat32ToUint64:
   2643     case IrOpcode::kTryTruncateFloat64ToUint64:
   2644     case IrOpcode::kInt32PairAdd:
   2645     case IrOpcode::kInt32PairSub:
   2646     case IrOpcode::kInt32PairMul:
   2647     case IrOpcode::kWord32PairShl:
   2648     case IrOpcode::kWord32PairShr:
   2649     case IrOpcode::kWord32PairSar:
   2650     case IrOpcode::kInt32AbsWithOverflow:
   2651     case IrOpcode::kInt64AbsWithOverflow:
   2652       if (ProjectionIndexOf(node->op()) == 0u) {
   2653         Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
   2654       } else {
   2655         DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
   2656         MarkAsUsed(value);
   2657       }
   2658       break;
   2659     default:
   2660       break;
   2661   }
   2662 }
   2663 
   2664 
   2665 void InstructionSelector::VisitConstant(Node* node) {
   2666   // We must emit a NOP here because every live range needs a defining
   2667   // instruction in the register allocator.
   2668   OperandGenerator g(this);
   2669   Emit(kArchNop, g.DefineAsConstant(node));
   2670 }
   2671 
   2672 
   2673 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
   2674   OperandGenerator g(this);
   2675   auto call_descriptor = CallDescriptorOf(node->op());
   2676 
   2677   FrameStateDescriptor* frame_state_descriptor = nullptr;
   2678   if (call_descriptor->NeedsFrameState()) {
   2679     frame_state_descriptor = GetFrameStateDescriptor(
   2680         node->InputAt(static_cast<int>(call_descriptor->InputCount())));
   2681   }
   2682 
   2683   CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
   2684 
   2685   // Compute InstructionOperands for inputs and outputs.
   2686   // TODO(turbofan): on some architectures it's probably better to use
   2687   // the code object in a register if there are multiple uses of it.
   2688   // Improve constant pool and the heuristics in the register allocator
   2689   // for where to emit constants.
   2690   CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
   2691   InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
   2692 
   2693   EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
   2694 
   2695   // Pass label of exception handler block.
   2696   CallDescriptor::Flags flags = call_descriptor->flags();
   2697   if (handler) {
   2698     DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
   2699     flags |= CallDescriptor::kHasExceptionHandler;
   2700     buffer.instruction_args.push_back(g.Label(handler));
   2701   }
   2702 
   2703   // Select the appropriate opcode based on the call type.
   2704   InstructionCode opcode = kArchNop;
   2705   switch (call_descriptor->kind()) {
   2706     case CallDescriptor::kCallAddress:
   2707       opcode = kArchCallCFunction | MiscField::encode(static_cast<int>(
   2708                                         call_descriptor->ParameterCount()));
   2709       break;
   2710     case CallDescriptor::kCallCodeObject:
   2711       opcode = kArchCallCodeObject | MiscField::encode(flags);
   2712       break;
   2713     case CallDescriptor::kCallJSFunction:
   2714       opcode = kArchCallJSFunction | MiscField::encode(flags);
   2715       break;
   2716     case CallDescriptor::kCallWasmFunction:
   2717       opcode = kArchCallWasmFunction | MiscField::encode(flags);
   2718       break;
   2719   }
   2720 
   2721   // Emit the call instruction.
   2722   size_t const output_count = buffer.outputs.size();
   2723   auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
   2724   Instruction* call_instr =
   2725       Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
   2726            &buffer.instruction_args.front());
   2727   if (instruction_selection_failed()) return;
   2728   call_instr->MarkAsCall();
   2729 
   2730   EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
   2731 }
   2732 
   2733 void InstructionSelector::VisitCallWithCallerSavedRegisters(
   2734     Node* node, BasicBlock* handler) {
   2735   OperandGenerator g(this);
   2736   const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode();
   2737   Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)),
   2738        g.NoOutput());
   2739   VisitCall(node, handler);
   2740   Emit(kArchRestoreCallerRegisters |
   2741            MiscField::encode(static_cast<int>(fp_mode)),
   2742        g.NoOutput());
   2743 }
   2744 
   2745 void InstructionSelector::VisitTailCall(Node* node) {
   2746   OperandGenerator g(this);
   2747   auto call_descriptor = CallDescriptorOf(node->op());
   2748 
   2749   CallDescriptor* caller = linkage()->GetIncomingDescriptor();
   2750   DCHECK(caller->CanTailCall(node));
   2751   const CallDescriptor* callee = CallDescriptorOf(node->op());
   2752   int stack_param_delta = callee->GetStackParameterDelta(caller);
   2753   CallBuffer buffer(zone(), call_descriptor, nullptr);
   2754 
   2755   // Compute InstructionOperands for inputs and outputs.
   2756   CallBufferFlags flags(kCallCodeImmediate | kCallTail);
   2757   if (IsTailCallAddressImmediate()) {
   2758     flags |= kCallAddressImmediate;
   2759   }
   2760   if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
   2761     flags |= kCallFixedTargetRegister;
   2762   }
   2763   InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
   2764 
   2765   // Select the appropriate opcode based on the call type.
   2766   InstructionCode opcode;
   2767   InstructionOperandVector temps(zone());
   2768   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
   2769     switch (call_descriptor->kind()) {
   2770       case CallDescriptor::kCallCodeObject:
   2771         opcode = kArchTailCallCodeObjectFromJSFunction;
   2772         break;
   2773       default:
   2774         UNREACHABLE();
   2775         return;
   2776     }
   2777     int temps_count = GetTempsCountForTailCallFromJSFunction();
   2778     for (int i = 0; i < temps_count; i++) {
   2779       temps.push_back(g.TempRegister());
   2780     }
   2781   } else {
   2782     switch (call_descriptor->kind()) {
   2783       case CallDescriptor::kCallCodeObject:
   2784         opcode = kArchTailCallCodeObject;
   2785         break;
   2786       case CallDescriptor::kCallAddress:
   2787         opcode = kArchTailCallAddress;
   2788         break;
   2789       case CallDescriptor::kCallWasmFunction:
   2790         opcode = kArchTailCallWasm;
   2791         break;
   2792       default:
   2793         UNREACHABLE();
   2794         return;
   2795     }
   2796   }
   2797   opcode |= MiscField::encode(call_descriptor->flags());
   2798 
   2799   Emit(kArchPrepareTailCall, g.NoOutput());
   2800 
   2801   // Add an immediate operand that represents the first slot that is unused
   2802   // with respect to the stack pointer that has been updated for the tail call
   2803   // instruction. This is used by backends that need to pad arguments for stack
   2804   // alignment, in order to store an optional slot of padding above the
   2805   // arguments.
   2806   int optional_padding_slot = callee->GetFirstUnusedStackSlot();
   2807   buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
   2808 
   2809   int first_unused_stack_slot =
   2810       (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
   2811       stack_param_delta;
   2812   buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
   2813 
   2814   // Emit the tailcall instruction.
   2815   Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
   2816        &buffer.instruction_args.front(), temps.size(),
   2817        temps.empty() ? nullptr : &temps.front());
   2818 }
   2819 
   2820 
   2821 void InstructionSelector::VisitGoto(BasicBlock* target) {
   2822   // jump to the next block.
   2823   OperandGenerator g(this);
   2824   Emit(kArchJmp, g.NoOutput(), g.Label(target));
   2825 }
   2826 
   2827 void InstructionSelector::VisitReturn(Node* ret) {
   2828   OperandGenerator g(this);
   2829   const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
   2830                               ? 1
   2831                               : ret->op()->ValueInputCount();
   2832   DCHECK_GE(input_count, 1);
   2833   auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
   2834   Node* pop_count = ret->InputAt(0);
   2835   value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
   2836                         pop_count->opcode() == IrOpcode::kInt64Constant)
   2837                            ? g.UseImmediate(pop_count)
   2838                            : g.UseRegister(pop_count);
   2839   for (int i = 1; i < input_count; ++i) {
   2840     value_locations[i] =
   2841         g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
   2842   }
   2843   Emit(kArchRet, 0, nullptr, input_count, value_locations);
   2844 }
   2845 
   2846 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
   2847                                       BasicBlock* fbranch) {
   2848   if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
   2849     FlagsContinuation cont =
   2850         FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
   2851     VisitWordCompareZero(branch, branch->InputAt(0), &cont);
   2852   } else {
   2853     FlagsContinuation cont =
   2854         FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
   2855     VisitWordCompareZero(branch, branch->InputAt(0), &cont);
   2856   }
   2857 }
   2858 
   2859 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
   2860   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2861   if (NeedsPoisoning(p.is_safety_check())) {
   2862     FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
   2863         kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
   2864     VisitWordCompareZero(node, node->InputAt(0), &cont);
   2865   } else {
   2866     FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2867         kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
   2868     VisitWordCompareZero(node, node->InputAt(0), &cont);
   2869   }
   2870 }
   2871 
   2872 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
   2873   DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   2874   if (NeedsPoisoning(p.is_safety_check())) {
   2875     FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
   2876         kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
   2877     VisitWordCompareZero(node, node->InputAt(0), &cont);
   2878   } else {
   2879     FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
   2880         kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
   2881     VisitWordCompareZero(node, node->InputAt(0), &cont);
   2882   }
   2883 }
   2884 
   2885 void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
   2886   FlagsContinuation cont =
   2887       FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
   2888   VisitWordCompareZero(node, node->InputAt(0), &cont);
   2889 }
   2890 
   2891 void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
   2892   FlagsContinuation cont =
   2893       FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1));
   2894   VisitWordCompareZero(node, node->InputAt(0), &cont);
   2895 }
   2896 
   2897 void InstructionSelector::EmitIdentity(Node* node) {
   2898   OperandGenerator g(this);
   2899   MarkAsUsed(node->InputAt(0));
   2900   SetRename(node, node->InputAt(0));
   2901 }
   2902 
   2903 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
   2904                                           DeoptimizeReason reason,
   2905                                           VectorSlotPair const& feedback,
   2906                                           Node* value) {
   2907   EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
   2908                  feedback, value);
   2909 }
   2910 
   2911 void InstructionSelector::VisitThrow(Node* node) {
   2912   OperandGenerator g(this);
   2913   Emit(kArchThrowTerminator, g.NoOutput());
   2914 }
   2915 
   2916 void InstructionSelector::VisitDebugBreak(Node* node) {
   2917   OperandGenerator g(this);
   2918   Emit(kArchDebugBreak, g.NoOutput());
   2919 }
   2920 
   2921 void InstructionSelector::VisitUnreachable(Node* node) {
   2922   OperandGenerator g(this);
   2923   Emit(kArchDebugBreak, g.NoOutput());
   2924 }
   2925 
   2926 void InstructionSelector::VisitDeadValue(Node* node) {
   2927   OperandGenerator g(this);
   2928   MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
   2929   Emit(kArchDebugBreak, g.DefineAsConstant(node));
   2930 }
   2931 
   2932 void InstructionSelector::VisitComment(Node* node) {
   2933   OperandGenerator g(this);
   2934   InstructionOperand operand(g.UseImmediate(node));
   2935   Emit(kArchComment, 0, nullptr, 1, &operand);
   2936 }
   2937 
   2938 void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
   2939 #if V8_TARGET_ARCH_64_BIT
   2940   VisitInt64Add(node);
   2941 #else   // V8_TARGET_ARCH_64_BIT
   2942   VisitInt32Add(node);
   2943 #endif  // V8_TARGET_ARCH_64_BIT
   2944 }
   2945 
   2946 void InstructionSelector::VisitRetain(Node* node) {
   2947   OperandGenerator g(this);
   2948   Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
   2949 }
   2950 
   2951 bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
   2952   // TODO(jarin) Improve the heuristic here.
   2953   if (node->opcode() == IrOpcode::kFloat64Add ||
   2954       node->opcode() == IrOpcode::kFloat64Sub ||
   2955       node->opcode() == IrOpcode::kFloat64Mul) {
   2956     return false;
   2957   }
   2958   return true;
   2959 }
   2960 
   2961 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
   2962     Node* state) {
   2963   DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
   2964   DCHECK_EQ(kFrameStateInputCount, state->InputCount());
   2965   FrameStateInfo state_info = FrameStateInfoOf(state->op());
   2966 
   2967   int parameters = static_cast<int>(
   2968       StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
   2969   int locals = static_cast<int>(
   2970       StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
   2971   int stack = static_cast<int>(
   2972       StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
   2973 
   2974   DCHECK_EQ(parameters, state_info.parameter_count());
   2975   DCHECK_EQ(locals, state_info.local_count());
   2976 
   2977   FrameStateDescriptor* outer_state = nullptr;
   2978   Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
   2979   if (outer_node->opcode() == IrOpcode::kFrameState) {
   2980     outer_state = GetFrameStateDescriptor(outer_node);
   2981   }
   2982 
   2983   return new (instruction_zone()) FrameStateDescriptor(
   2984       instruction_zone(), state_info.type(), state_info.bailout_id(),
   2985       state_info.state_combine(), parameters, locals, stack,
   2986       state_info.shared_info(), outer_state);
   2987 }
   2988 
   2989 // static
   2990 void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
   2991                                               uint8_t* shuffle,
   2992                                               bool* needs_swap,
   2993                                               bool* is_swizzle) {
   2994   *needs_swap = false;
   2995   // Inputs equal, then it's a swizzle.
   2996   if (inputs_equal) {
   2997     *is_swizzle = true;
   2998   } else {
   2999     // Inputs are distinct; check that both are required.
   3000     bool src0_is_used = false;
   3001     bool src1_is_used = false;
   3002     for (int i = 0; i < kSimd128Size; ++i) {
   3003       if (shuffle[i] < kSimd128Size) {
   3004         src0_is_used = true;
   3005       } else {
   3006         src1_is_used = true;
   3007       }
   3008     }
   3009     if (src0_is_used && !src1_is_used) {
   3010       *is_swizzle = true;
   3011     } else if (src1_is_used && !src0_is_used) {
   3012       *needs_swap = true;
   3013       *is_swizzle = true;
   3014     } else {
   3015       *is_swizzle = false;
   3016       // Canonicalize general 2 input shuffles so that the first input lanes are
   3017       // encountered first. This makes architectural shuffle pattern matching
   3018       // easier, since we only need to consider 1 input ordering instead of 2.
   3019       if (shuffle[0] >= kSimd128Size) {
   3020         // The second operand is used first. Swap inputs and adjust the shuffle.
   3021         *needs_swap = true;
   3022         for (int i = 0; i < kSimd128Size; ++i) {
   3023           shuffle[i] ^= kSimd128Size;
   3024         }
   3025       }
   3026     }
   3027   }
   3028   if (*is_swizzle) {
   3029     for (int i = 0; i < kSimd128Size; ++i) shuffle[i] &= kSimd128Size - 1;
   3030   }
   3031 }
   3032 
   3033 void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
   3034                                               bool* is_swizzle) {
   3035   // Get raw shuffle indices.
   3036   memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
   3037   bool needs_swap;
   3038   bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
   3039                       GetVirtualRegister(node->InputAt(1));
   3040   CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap, is_swizzle);
   3041   if (needs_swap) {
   3042     SwapShuffleInputs(node);
   3043   }
   3044   // Duplicate the first input; for some shuffles on some architectures, it's
   3045   // easiest to implement a swizzle as a shuffle so it might be used.
   3046   if (*is_swizzle) {
   3047     node->ReplaceInput(1, node->InputAt(0));
   3048   }
   3049 }
   3050 
   3051 // static
   3052 void InstructionSelector::SwapShuffleInputs(Node* node) {
   3053   Node* input0 = node->InputAt(0);
   3054   Node* input1 = node->InputAt(1);
   3055   node->ReplaceInput(0, input1);
   3056   node->ReplaceInput(1, input0);
   3057 }
   3058 
   3059 // static
   3060 bool InstructionSelector::TryMatchIdentity(const uint8_t* shuffle) {
   3061   for (int i = 0; i < kSimd128Size; ++i) {
   3062     if (shuffle[i] != i) return false;
   3063   }
   3064   return true;
   3065 }
   3066 
   3067 // static
   3068 bool InstructionSelector::TryMatch32x4Shuffle(const uint8_t* shuffle,
   3069                                               uint8_t* shuffle32x4) {
   3070   for (int i = 0; i < 4; ++i) {
   3071     if (shuffle[i * 4] % 4 != 0) return false;
   3072     for (int j = 1; j < 4; ++j) {
   3073       if (shuffle[i * 4 + j] - shuffle[i * 4 + j - 1] != 1) return false;
   3074     }
   3075     shuffle32x4[i] = shuffle[i * 4] / 4;
   3076   }
   3077   return true;
   3078 }
   3079 
   3080 // static
   3081 bool InstructionSelector::TryMatch16x8Shuffle(const uint8_t* shuffle,
   3082                                               uint8_t* shuffle16x8) {
   3083   for (int i = 0; i < 8; ++i) {
   3084     if (shuffle[i * 2] % 2 != 0) return false;
   3085     for (int j = 1; j < 2; ++j) {
   3086       if (shuffle[i * 2 + j] - shuffle[i * 2 + j - 1] != 1) return false;
   3087     }
   3088     shuffle16x8[i] = shuffle[i * 2] / 2;
   3089   }
   3090   return true;
   3091 }
   3092 
   3093 // static
   3094 bool InstructionSelector::TryMatchConcat(const uint8_t* shuffle,
   3095                                          uint8_t* offset) {
   3096   // Don't match the identity shuffle (e.g. [0 1 2 ... 15]).
   3097   uint8_t start = shuffle[0];
   3098   if (start == 0) return false;
   3099   DCHECK_GT(kSimd128Size, start);  // The shuffle should be canonicalized.
   3100   // A concatenation is a series of consecutive indices, with at most one jump
   3101   // in the middle from the last lane to the first.
   3102   for (int i = 1; i < kSimd128Size; ++i) {
   3103     if ((shuffle[i]) != ((shuffle[i - 1] + 1))) {
   3104       if (shuffle[i - 1] != 15) return false;
   3105       if (shuffle[i] % kSimd128Size != 0) return false;
   3106     }
   3107   }
   3108   *offset = start;
   3109   return true;
   3110 }
   3111 
   3112 // static
   3113 bool InstructionSelector::TryMatchBlend(const uint8_t* shuffle) {
   3114   for (int i = 0; i < 16; ++i) {
   3115     if ((shuffle[i] & 0xF) != i) return false;
   3116   }
   3117   return true;
   3118 }
   3119 
   3120 // static
   3121 int32_t InstructionSelector::Pack4Lanes(const uint8_t* shuffle) {
   3122   int32_t result = 0;
   3123   for (int i = 3; i >= 0; --i) {
   3124     result <<= 8;
   3125     result |= shuffle[i];
   3126   }
   3127   return result;
   3128 }
   3129 
   3130 bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
   3131   switch (poisoning_level_) {
   3132     case PoisoningMitigationLevel::kDontPoison:
   3133       return false;
   3134     case PoisoningMitigationLevel::kPoisonAll:
   3135       return safety_check != IsSafetyCheck::kNoSafetyCheck;
   3136     case PoisoningMitigationLevel::kPoisonCriticalOnly:
   3137       return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
   3138   }
   3139   UNREACHABLE();
   3140 }
   3141 
   3142 }  // namespace compiler
   3143 }  // namespace internal
   3144 }  // namespace v8
   3145