Home | History | Annotate | Download | only in interpreter
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/interpreter/interpreter.h"
      6 
      7 #include <fstream>
      8 
      9 #include "src/ast/prettyprinter.h"
     10 #include "src/code-factory.h"
     11 #include "src/compiler.h"
     12 #include "src/factory.h"
     13 #include "src/interpreter/bytecode-generator.h"
     14 #include "src/interpreter/bytecodes.h"
     15 #include "src/interpreter/interpreter-assembler.h"
     16 #include "src/interpreter/interpreter-intrinsics.h"
     17 #include "src/log.h"
     18 #include "src/zone.h"
     19 
     20 namespace v8 {
     21 namespace internal {
     22 namespace interpreter {
     23 
     24 using compiler::Node;
     25 typedef CodeStubAssembler::Label Label;
     26 typedef CodeStubAssembler::Variable Variable;
     27 
     28 #define __ assembler->
     29 
     30 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
     31   memset(dispatch_table_, 0, sizeof(dispatch_table_));
     32 }
     33 
     34 void Interpreter::Initialize() {
     35   if (IsDispatchTableInitialized()) return;
     36   Zone zone(isolate_->allocator());
     37   HandleScope scope(isolate_);
     38 
     39   if (FLAG_trace_ignition_dispatches) {
     40     static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
     41     bytecode_dispatch_counters_table_.Reset(
     42         new uintptr_t[kBytecodeCount * kBytecodeCount]);
     43     memset(bytecode_dispatch_counters_table_.get(), 0,
     44            sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
     45   }
     46 
     47   // Generate bytecode handlers for all bytecodes and scales.
     48   const OperandScale kOperandScales[] = {
     49 #define VALUE(Name, _) OperandScale::k##Name,
     50       OPERAND_SCALE_LIST(VALUE)
     51 #undef VALUE
     52   };
     53 
     54   for (OperandScale operand_scale : kOperandScales) {
     55 #define GENERATE_CODE(Name, ...)                                               \
     56   {                                                                            \
     57     if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
     58       InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name,       \
     59                                      operand_scale);                           \
     60       Do##Name(&assembler);                                                    \
     61       Handle<Code> code = assembler.GenerateCode();                            \
     62       size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
     63       dispatch_table_[index] = code->entry();                                  \
     64       TraceCodegen(code);                                                      \
     65       PROFILE(                                                                 \
     66           isolate_,                                                            \
     67           CodeCreateEvent(                                                     \
     68               CodeEventListener::BYTECODE_HANDLER_TAG,                         \
     69               AbstractCode::cast(*code),                                       \
     70               Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
     71     }                                                                          \
     72   }
     73     BYTECODE_LIST(GENERATE_CODE)
     74 #undef GENERATE_CODE
     75   }
     76 
     77   // Fill unused entries will the illegal bytecode handler.
     78   size_t illegal_index =
     79       GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
     80   for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
     81     if (dispatch_table_[index] == nullptr) {
     82       dispatch_table_[index] = dispatch_table_[illegal_index];
     83     }
     84   }
     85 }
     86 
     87 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
     88                                       OperandScale operand_scale) {
     89   DCHECK(IsDispatchTableInitialized());
     90   DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
     91   size_t index = GetDispatchTableIndex(bytecode, operand_scale);
     92   Address code_entry = dispatch_table_[index];
     93   return Code::GetCodeFromTargetAddress(code_entry);
     94 }
     95 
     96 // static
     97 size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
     98                                           OperandScale operand_scale) {
     99   static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
    100   size_t index = static_cast<size_t>(bytecode);
    101   switch (operand_scale) {
    102     case OperandScale::kSingle:
    103       return index;
    104     case OperandScale::kDouble:
    105       return index + kEntriesPerOperandScale;
    106     case OperandScale::kQuadruple:
    107       return index + 2 * kEntriesPerOperandScale;
    108   }
    109   UNREACHABLE();
    110   return 0;
    111 }
    112 
    113 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
    114   for (int i = 0; i < kDispatchTableSize; i++) {
    115     Address code_entry = dispatch_table_[i];
    116     Object* code = code_entry == nullptr
    117                        ? nullptr
    118                        : Code::GetCodeFromTargetAddress(code_entry);
    119     Object* old_code = code;
    120     v->VisitPointer(&code);
    121     if (code != old_code) {
    122       dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
    123     }
    124   }
    125 }
    126 
    127 // static
    128 int Interpreter::InterruptBudget() {
    129   // TODO(ignition): Tune code size multiplier.
    130   const int kCodeSizeMultiplier = 32;
    131   return FLAG_interrupt_budget * kCodeSizeMultiplier;
    132 }
    133 
    134 bool Interpreter::MakeBytecode(CompilationInfo* info) {
    135   RuntimeCallTimerScope runtimeTimer(info->isolate(),
    136                                      &RuntimeCallStats::CompileIgnition);
    137   TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
    138   TRACE_EVENT0("v8", "V8.CompileIgnition");
    139 
    140   if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
    141     OFStream os(stdout);
    142     base::SmartArrayPointer<char> name = info->GetDebugName();
    143     os << "[generating bytecode for function: " << info->GetDebugName().get()
    144        << "]" << std::endl
    145        << std::flush;
    146   }
    147 
    148 #ifdef DEBUG
    149   if (info->parse_info() && FLAG_print_source) {
    150     OFStream os(stdout);
    151     os << "--- Source from AST ---" << std::endl
    152        << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
    153        << std::endl
    154        << std::flush;
    155   }
    156 
    157   if (info->parse_info() && FLAG_print_ast) {
    158     OFStream os(stdout);
    159     os << "--- AST ---" << std::endl
    160        << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
    161        << std::flush;
    162   }
    163 #endif  // DEBUG
    164 
    165   BytecodeGenerator generator(info);
    166   Handle<BytecodeArray> bytecodes = generator.MakeBytecode();
    167 
    168   if (generator.HasStackOverflow()) return false;
    169 
    170   if (FLAG_print_bytecode) {
    171     OFStream os(stdout);
    172     bytecodes->Print(os);
    173     os << std::flush;
    174   }
    175 
    176   info->SetBytecodeArray(bytecodes);
    177   info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
    178   return true;
    179 }
    180 
    181 bool Interpreter::IsDispatchTableInitialized() {
    182   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
    183       FLAG_trace_ignition_dispatches) {
    184     // Regenerate table to add bytecode tracing operations, print the assembly
    185     // code generated by TurboFan or instrument handlers with dispatch counters.
    186     return false;
    187   }
    188   return dispatch_table_[0] != nullptr;
    189 }
    190 
    191 void Interpreter::TraceCodegen(Handle<Code> code) {
    192 #ifdef ENABLE_DISASSEMBLER
    193   if (FLAG_trace_ignition_codegen) {
    194     OFStream os(stdout);
    195     code->Disassemble(nullptr, os);
    196     os << std::flush;
    197   }
    198 #endif  // ENABLE_DISASSEMBLER
    199 }
    200 
    201 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
    202 #ifdef ENABLE_DISASSEMBLER
    203 #define RETURN_NAME(Name, ...)                                 \
    204   if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
    205       code->entry()) {                                         \
    206     return #Name;                                              \
    207   }
    208   BYTECODE_LIST(RETURN_NAME)
    209 #undef RETURN_NAME
    210 #endif  // ENABLE_DISASSEMBLER
    211   return nullptr;
    212 }
    213 
    214 uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
    215   int from_index = Bytecodes::ToByte(from);
    216   int to_index = Bytecodes::ToByte(to);
    217   return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
    218                                            to_index];
    219 }
    220 
    221 Local<v8::Object> Interpreter::GetDispatchCountersObject() {
    222   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
    223   Local<v8::Context> context = isolate->GetCurrentContext();
    224 
    225   Local<v8::Object> counters_map = v8::Object::New(isolate);
    226 
    227   // Output is a JSON-encoded object of objects.
    228   //
    229   // The keys on the top level object are source bytecodes,
    230   // and corresponding value are objects. Keys on these last are the
    231   // destinations of the dispatch and the value associated is a counter for
    232   // the correspondent source-destination dispatch chain.
    233   //
    234   // Only non-zero counters are written to file, but an entry in the top-level
    235   // object is always present, even if the value is empty because all counters
    236   // for that source are zero.
    237 
    238   for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
    239     Bytecode from_bytecode = Bytecodes::FromByte(from_index);
    240     Local<v8::Object> counters_row = v8::Object::New(isolate);
    241 
    242     for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
    243       Bytecode to_bytecode = Bytecodes::FromByte(to_index);
    244       uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
    245 
    246       if (counter > 0) {
    247         std::string to_name = Bytecodes::ToString(to_bytecode);
    248         Local<v8::String> to_name_object =
    249             v8::String::NewFromUtf8(isolate, to_name.c_str(),
    250                                     NewStringType::kNormal)
    251                 .ToLocalChecked();
    252         Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
    253         CHECK(counters_row
    254                   ->DefineOwnProperty(context, to_name_object, counter_object)
    255                   .IsJust());
    256       }
    257     }
    258 
    259     std::string from_name = Bytecodes::ToString(from_bytecode);
    260     Local<v8::String> from_name_object =
    261         v8::String::NewFromUtf8(isolate, from_name.c_str(),
    262                                 NewStringType::kNormal)
    263             .ToLocalChecked();
    264 
    265     CHECK(
    266         counters_map->DefineOwnProperty(context, from_name_object, counters_row)
    267             .IsJust());
    268   }
    269 
    270   return counters_map;
    271 }
    272 
    273 // LdaZero
    274 //
    275 // Load literal '0' into the accumulator.
    276 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
    277   Node* zero_value = __ NumberConstant(0.0);
    278   __ SetAccumulator(zero_value);
    279   __ Dispatch();
    280 }
    281 
    282 // LdaSmi <imm>
    283 //
    284 // Load an integer literal into the accumulator as a Smi.
    285 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
    286   Node* raw_int = __ BytecodeOperandImm(0);
    287   Node* smi_int = __ SmiTag(raw_int);
    288   __ SetAccumulator(smi_int);
    289   __ Dispatch();
    290 }
    291 
    292 // LdaConstant <idx>
    293 //
    294 // Load constant literal at |idx| in the constant pool into the accumulator.
    295 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
    296   Node* index = __ BytecodeOperandIdx(0);
    297   Node* constant = __ LoadConstantPoolEntry(index);
    298   __ SetAccumulator(constant);
    299   __ Dispatch();
    300 }
    301 
    302 // LdaUndefined
    303 //
    304 // Load Undefined into the accumulator.
    305 void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
    306   Node* undefined_value =
    307       __ HeapConstant(isolate_->factory()->undefined_value());
    308   __ SetAccumulator(undefined_value);
    309   __ Dispatch();
    310 }
    311 
    312 // LdrUndefined <reg>
    313 //
    314 // Loads undefined into the accumulator and |reg|.
    315 void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
    316   Node* undefined_value =
    317       __ HeapConstant(isolate_->factory()->undefined_value());
    318   Node* destination = __ BytecodeOperandReg(0);
    319   __ StoreRegister(undefined_value, destination);
    320   __ Dispatch();
    321 }
    322 
    323 // LdaNull
    324 //
    325 // Load Null into the accumulator.
    326 void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
    327   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
    328   __ SetAccumulator(null_value);
    329   __ Dispatch();
    330 }
    331 
    332 // LdaTheHole
    333 //
    334 // Load TheHole into the accumulator.
    335 void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
    336   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
    337   __ SetAccumulator(the_hole_value);
    338   __ Dispatch();
    339 }
    340 
    341 // LdaTrue
    342 //
    343 // Load True into the accumulator.
    344 void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
    345   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
    346   __ SetAccumulator(true_value);
    347   __ Dispatch();
    348 }
    349 
    350 // LdaFalse
    351 //
    352 // Load False into the accumulator.
    353 void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
    354   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
    355   __ SetAccumulator(false_value);
    356   __ Dispatch();
    357 }
    358 
    359 // Ldar <src>
    360 //
    361 // Load accumulator with value from register <src>.
    362 void Interpreter::DoLdar(InterpreterAssembler* assembler) {
    363   Node* reg_index = __ BytecodeOperandReg(0);
    364   Node* value = __ LoadRegister(reg_index);
    365   __ SetAccumulator(value);
    366   __ Dispatch();
    367 }
    368 
    369 // Star <dst>
    370 //
    371 // Store accumulator to register <dst>.
    372 void Interpreter::DoStar(InterpreterAssembler* assembler) {
    373   Node* reg_index = __ BytecodeOperandReg(0);
    374   Node* accumulator = __ GetAccumulator();
    375   __ StoreRegister(accumulator, reg_index);
    376   __ Dispatch();
    377 }
    378 
    379 // Mov <src> <dst>
    380 //
    381 // Stores the value of register <src> to register <dst>.
    382 void Interpreter::DoMov(InterpreterAssembler* assembler) {
    383   Node* src_index = __ BytecodeOperandReg(0);
    384   Node* src_value = __ LoadRegister(src_index);
    385   Node* dst_index = __ BytecodeOperandReg(1);
    386   __ StoreRegister(src_value, dst_index);
    387   __ Dispatch();
    388 }
    389 
    390 Node* Interpreter::BuildLoadGlobal(Callable ic,
    391                                    InterpreterAssembler* assembler) {
    392   // Get the global object.
    393   Node* context = __ GetContext();
    394 
    395   // Load the global via the LoadGlobalIC.
    396   Node* code_target = __ HeapConstant(ic.code());
    397   Node* raw_slot = __ BytecodeOperandIdx(0);
    398   Node* smi_slot = __ SmiTag(raw_slot);
    399   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    400   return __ CallStub(ic.descriptor(), code_target, context, smi_slot,
    401                      type_feedback_vector);
    402 }
    403 
    404 // LdaGlobal <slot>
    405 //
    406 // Load the global with name in constant pool entry <name_index> into the
    407 // accumulator using FeedBackVector slot <slot> outside of a typeof.
    408 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
    409   Callable ic =
    410       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
    411   Node* result = BuildLoadGlobal(ic, assembler);
    412   __ SetAccumulator(result);
    413   __ Dispatch();
    414 }
    415 
    416 // LdrGlobal <slot> <reg>
    417 //
    418 // Load the global with name in constant pool entry <name_index> into
    419 // register <reg> using FeedBackVector slot <slot> outside of a typeof.
    420 void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
    421   Callable ic =
    422       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
    423   Node* result = BuildLoadGlobal(ic, assembler);
    424   Node* destination = __ BytecodeOperandReg(1);
    425   __ StoreRegister(result, destination);
    426   __ Dispatch();
    427 }
    428 
    429 // LdaGlobalInsideTypeof <slot>
    430 //
    431 // Load the global with name in constant pool entry <name_index> into the
    432 // accumulator using FeedBackVector slot <slot> inside of a typeof.
    433 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
    434   Callable ic =
    435       CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
    436   Node* result = BuildLoadGlobal(ic, assembler);
    437   __ SetAccumulator(result);
    438   __ Dispatch();
    439 }
    440 
    441 void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
    442   // Get the global object.
    443   Node* context = __ GetContext();
    444   Node* native_context =
    445       __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
    446   Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
    447 
    448   // Store the global via the StoreIC.
    449   Node* code_target = __ HeapConstant(ic.code());
    450   Node* constant_index = __ BytecodeOperandIdx(0);
    451   Node* name = __ LoadConstantPoolEntry(constant_index);
    452   Node* value = __ GetAccumulator();
    453   Node* raw_slot = __ BytecodeOperandIdx(1);
    454   Node* smi_slot = __ SmiTag(raw_slot);
    455   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    456   __ CallStub(ic.descriptor(), code_target, context, global, name, value,
    457               smi_slot, type_feedback_vector);
    458   __ Dispatch();
    459 }
    460 
    461 // StaGlobalSloppy <name_index> <slot>
    462 //
    463 // Store the value in the accumulator into the global with name in constant pool
    464 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
    465 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
    466   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
    467   DoStaGlobal(ic, assembler);
    468 }
    469 
    470 // StaGlobalStrict <name_index> <slot>
    471 //
    472 // Store the value in the accumulator into the global with name in constant pool
    473 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
    474 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
    475   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
    476   DoStaGlobal(ic, assembler);
    477 }
    478 
    479 compiler::Node* Interpreter::BuildLoadContextSlot(
    480     InterpreterAssembler* assembler) {
    481   Node* reg_index = __ BytecodeOperandReg(0);
    482   Node* context = __ LoadRegister(reg_index);
    483   Node* slot_index = __ BytecodeOperandIdx(1);
    484   return __ LoadContextSlot(context, slot_index);
    485 }
    486 
    487 // LdaContextSlot <context> <slot_index>
    488 //
    489 // Load the object in |slot_index| of |context| into the accumulator.
    490 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
    491   Node* result = BuildLoadContextSlot(assembler);
    492   __ SetAccumulator(result);
    493   __ Dispatch();
    494 }
    495 
    496 // LdrContextSlot <context> <slot_index> <reg>
    497 //
    498 // Load the object in <slot_index> of <context> into register <reg>.
    499 void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
    500   Node* result = BuildLoadContextSlot(assembler);
    501   Node* destination = __ BytecodeOperandReg(2);
    502   __ StoreRegister(result, destination);
    503   __ Dispatch();
    504 }
    505 
    506 // StaContextSlot <context> <slot_index>
    507 //
    508 // Stores the object in the accumulator into |slot_index| of |context|.
    509 void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
    510   Node* value = __ GetAccumulator();
    511   Node* reg_index = __ BytecodeOperandReg(0);
    512   Node* context = __ LoadRegister(reg_index);
    513   Node* slot_index = __ BytecodeOperandIdx(1);
    514   __ StoreContextSlot(context, slot_index, value);
    515   __ Dispatch();
    516 }
    517 
    518 void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
    519                                   InterpreterAssembler* assembler) {
    520   Node* index = __ BytecodeOperandIdx(0);
    521   Node* name = __ LoadConstantPoolEntry(index);
    522   Node* context = __ GetContext();
    523   Node* result = __ CallRuntime(function_id, context, name);
    524   __ SetAccumulator(result);
    525   __ Dispatch();
    526 }
    527 
    528 // LdaLookupSlot <name_index>
    529 //
    530 // Lookup the object with the name in constant pool entry |name_index|
    531 // dynamically.
    532 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
    533   DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
    534 }
    535 
    536 // LdaLookupSlotInsideTypeof <name_index>
    537 //
    538 // Lookup the object with the name in constant pool entry |name_index|
    539 // dynamically without causing a NoReferenceError.
    540 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
    541   DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
    542 }
    543 
    544 void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
    545                                   InterpreterAssembler* assembler) {
    546   Node* value = __ GetAccumulator();
    547   Node* index = __ BytecodeOperandIdx(0);
    548   Node* name = __ LoadConstantPoolEntry(index);
    549   Node* context = __ GetContext();
    550   Node* result = __ CallRuntime(is_strict(language_mode)
    551                                     ? Runtime::kStoreLookupSlot_Strict
    552                                     : Runtime::kStoreLookupSlot_Sloppy,
    553                                 context, name, value);
    554   __ SetAccumulator(result);
    555   __ Dispatch();
    556 }
    557 
    558 // StaLookupSlotSloppy <name_index>
    559 //
    560 // Store the object in accumulator to the object with the name in constant
    561 // pool entry |name_index| in sloppy mode.
    562 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
    563   DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
    564 }
    565 
    566 // StaLookupSlotStrict <name_index>
    567 //
    568 // Store the object in accumulator to the object with the name in constant
    569 // pool entry |name_index| in strict mode.
    570 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
    571   DoStaLookupSlot(LanguageMode::STRICT, assembler);
    572 }
    573 
    574 Node* Interpreter::BuildLoadNamedProperty(Callable ic,
    575                                           InterpreterAssembler* assembler) {
    576   Node* code_target = __ HeapConstant(ic.code());
    577   Node* register_index = __ BytecodeOperandReg(0);
    578   Node* object = __ LoadRegister(register_index);
    579   Node* constant_index = __ BytecodeOperandIdx(1);
    580   Node* name = __ LoadConstantPoolEntry(constant_index);
    581   Node* raw_slot = __ BytecodeOperandIdx(2);
    582   Node* smi_slot = __ SmiTag(raw_slot);
    583   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    584   Node* context = __ GetContext();
    585   return __ CallStub(ic.descriptor(), code_target, context, object, name,
    586                      smi_slot, type_feedback_vector);
    587 }
    588 
    589 // LdaNamedProperty <object> <name_index> <slot>
    590 //
    591 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
    592 // constant pool entry <name_index>.
    593 void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
    594   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
    595   Node* result = BuildLoadNamedProperty(ic, assembler);
    596   __ SetAccumulator(result);
    597   __ Dispatch();
    598 }
    599 
    600 // LdrNamedProperty <object> <name_index> <slot> <reg>
    601 //
    602 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
    603 // constant pool entry <name_index> and puts the result into register <reg>.
    604 void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
    605   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
    606   Node* result = BuildLoadNamedProperty(ic, assembler);
    607   Node* destination = __ BytecodeOperandReg(3);
    608   __ StoreRegister(result, destination);
    609   __ Dispatch();
    610 }
    611 
    612 Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
    613                                           InterpreterAssembler* assembler) {
    614   Node* code_target = __ HeapConstant(ic.code());
    615   Node* reg_index = __ BytecodeOperandReg(0);
    616   Node* object = __ LoadRegister(reg_index);
    617   Node* name = __ GetAccumulator();
    618   Node* raw_slot = __ BytecodeOperandIdx(1);
    619   Node* smi_slot = __ SmiTag(raw_slot);
    620   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    621   Node* context = __ GetContext();
    622   return __ CallStub(ic.descriptor(), code_target, context, object, name,
    623                      smi_slot, type_feedback_vector);
    624 }
    625 
    626 // KeyedLoadIC <object> <slot>
    627 //
    628 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
    629 // in the accumulator.
    630 void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
    631   Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
    632   Node* result = BuildLoadKeyedProperty(ic, assembler);
    633   __ SetAccumulator(result);
    634   __ Dispatch();
    635 }
    636 
    637 // LdrKeyedProperty <object> <slot> <reg>
    638 //
    639 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
    640 // in the accumulator and puts the result in register <reg>.
    641 void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
    642   Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
    643   Node* result = BuildLoadKeyedProperty(ic, assembler);
    644   Node* destination = __ BytecodeOperandReg(2);
    645   __ StoreRegister(result, destination);
    646   __ Dispatch();
    647 }
    648 
    649 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
    650   Node* code_target = __ HeapConstant(ic.code());
    651   Node* object_reg_index = __ BytecodeOperandReg(0);
    652   Node* object = __ LoadRegister(object_reg_index);
    653   Node* constant_index = __ BytecodeOperandIdx(1);
    654   Node* name = __ LoadConstantPoolEntry(constant_index);
    655   Node* value = __ GetAccumulator();
    656   Node* raw_slot = __ BytecodeOperandIdx(2);
    657   Node* smi_slot = __ SmiTag(raw_slot);
    658   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    659   Node* context = __ GetContext();
    660   __ CallStub(ic.descriptor(), code_target, context, object, name, value,
    661               smi_slot, type_feedback_vector);
    662   __ Dispatch();
    663 }
    664 
    665 // StaNamedPropertySloppy <object> <name_index> <slot>
    666 //
    667 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
    668 // the name in constant pool entry <name_index> with the value in the
    669 // accumulator.
    670 void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
    671   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
    672   DoStoreIC(ic, assembler);
    673 }
    674 
    675 // StaNamedPropertyStrict <object> <name_index> <slot>
    676 //
    677 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
    678 // the name in constant pool entry <name_index> with the value in the
    679 // accumulator.
    680 void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
    681   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
    682   DoStoreIC(ic, assembler);
    683 }
    684 
    685 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
    686   Node* code_target = __ HeapConstant(ic.code());
    687   Node* object_reg_index = __ BytecodeOperandReg(0);
    688   Node* object = __ LoadRegister(object_reg_index);
    689   Node* name_reg_index = __ BytecodeOperandReg(1);
    690   Node* name = __ LoadRegister(name_reg_index);
    691   Node* value = __ GetAccumulator();
    692   Node* raw_slot = __ BytecodeOperandIdx(2);
    693   Node* smi_slot = __ SmiTag(raw_slot);
    694   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
    695   Node* context = __ GetContext();
    696   __ CallStub(ic.descriptor(), code_target, context, object, name, value,
    697               smi_slot, type_feedback_vector);
    698   __ Dispatch();
    699 }
    700 
    701 // StaKeyedPropertySloppy <object> <key> <slot>
    702 //
    703 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
    704 // and the key <key> with the value in the accumulator.
    705 void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
    706   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
    707   DoKeyedStoreIC(ic, assembler);
    708 }
    709 
    710 // StaKeyedPropertyStrict <object> <key> <slot>
    711 //
    712 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
    713 // and the key <key> with the value in the accumulator.
    714 void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
    715   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
    716   DoKeyedStoreIC(ic, assembler);
    717 }
    718 
    719 // PushContext <context>
    720 //
    721 // Saves the current context in <context>, and pushes the accumulator as the
    722 // new current context.
    723 void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
    724   Node* reg_index = __ BytecodeOperandReg(0);
    725   Node* new_context = __ GetAccumulator();
    726   Node* old_context = __ GetContext();
    727   __ StoreRegister(old_context, reg_index);
    728   __ SetContext(new_context);
    729   __ Dispatch();
    730 }
    731 
    732 // PopContext <context>
    733 //
    734 // Pops the current context and sets <context> as the new context.
    735 void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
    736   Node* reg_index = __ BytecodeOperandReg(0);
    737   Node* context = __ LoadRegister(reg_index);
    738   __ SetContext(context);
    739   __ Dispatch();
    740 }
    741 
    742 template <class Generator>
    743 void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
    744   Node* reg_index = __ BytecodeOperandReg(0);
    745   Node* lhs = __ LoadRegister(reg_index);
    746   Node* rhs = __ GetAccumulator();
    747   Node* context = __ GetContext();
    748   Node* result = Generator::Generate(assembler, lhs, rhs, context);
    749   __ SetAccumulator(result);
    750   __ Dispatch();
    751 }
    752 
    753 // Add <src>
    754 //
    755 // Add register <src> to accumulator.
    756 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
    757   DoBinaryOp<AddStub>(assembler);
    758 }
    759 
    760 // Sub <src>
    761 //
    762 // Subtract register <src> from accumulator.
    763 void Interpreter::DoSub(InterpreterAssembler* assembler) {
    764   DoBinaryOp<SubtractStub>(assembler);
    765 }
    766 
    767 // Mul <src>
    768 //
    769 // Multiply accumulator by register <src>.
    770 void Interpreter::DoMul(InterpreterAssembler* assembler) {
    771   DoBinaryOp<MultiplyStub>(assembler);
    772 }
    773 
    774 // Div <src>
    775 //
    776 // Divide register <src> by accumulator.
    777 void Interpreter::DoDiv(InterpreterAssembler* assembler) {
    778   DoBinaryOp<DivideStub>(assembler);
    779 }
    780 
    781 // Mod <src>
    782 //
    783 // Modulo register <src> by accumulator.
    784 void Interpreter::DoMod(InterpreterAssembler* assembler) {
    785   DoBinaryOp<ModulusStub>(assembler);
    786 }
    787 
    788 // BitwiseOr <src>
    789 //
    790 // BitwiseOr register <src> to accumulator.
    791 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
    792   DoBinaryOp<BitwiseOrStub>(assembler);
    793 }
    794 
    795 // BitwiseXor <src>
    796 //
    797 // BitwiseXor register <src> to accumulator.
    798 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
    799   DoBinaryOp<BitwiseXorStub>(assembler);
    800 }
    801 
    802 // BitwiseAnd <src>
    803 //
    804 // BitwiseAnd register <src> to accumulator.
    805 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
    806   DoBinaryOp<BitwiseAndStub>(assembler);
    807 }
    808 
    809 // ShiftLeft <src>
    810 //
    811 // Left shifts register <src> by the count specified in the accumulator.
    812 // Register <src> is converted to an int32 and the accumulator to uint32
    813 // before the operation. 5 lsb bits from the accumulator are used as count
    814 // i.e. <src> << (accumulator & 0x1F).
    815 void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
    816   DoBinaryOp<ShiftLeftStub>(assembler);
    817 }
    818 
    819 // ShiftRight <src>
    820 //
    821 // Right shifts register <src> by the count specified in the accumulator.
    822 // Result is sign extended. Register <src> is converted to an int32 and the
    823 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
    824 // are used as count i.e. <src> >> (accumulator & 0x1F).
    825 void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
    826   DoBinaryOp<ShiftRightStub>(assembler);
    827 }
    828 
    829 // ShiftRightLogical <src>
    830 //
    831 // Right Shifts register <src> by the count specified in the accumulator.
    832 // Result is zero-filled. The accumulator and register <src> are converted to
    833 // uint32 before the operation 5 lsb bits from the accumulator are used as
    834 // count i.e. <src> << (accumulator & 0x1F).
    835 void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
    836   DoBinaryOp<ShiftRightLogicalStub>(assembler);
    837 }
    838 
    839 void Interpreter::DoUnaryOp(Callable callable,
    840                             InterpreterAssembler* assembler) {
    841   Node* target = __ HeapConstant(callable.code());
    842   Node* accumulator = __ GetAccumulator();
    843   Node* context = __ GetContext();
    844   Node* result =
    845       __ CallStub(callable.descriptor(), target, context, accumulator);
    846   __ SetAccumulator(result);
    847   __ Dispatch();
    848 }
    849 
    850 template <class Generator>
    851 void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
    852   Node* value = __ GetAccumulator();
    853   Node* context = __ GetContext();
    854   Node* result = Generator::Generate(assembler, value, context);
    855   __ SetAccumulator(result);
    856   __ Dispatch();
    857 }
    858 
    859 // ToName
    860 //
    861 // Cast the object referenced by the accumulator to a name.
    862 void Interpreter::DoToName(InterpreterAssembler* assembler) {
    863   DoUnaryOp(CodeFactory::ToName(isolate_), assembler);
    864 }
    865 
    866 // ToNumber
    867 //
    868 // Cast the object referenced by the accumulator to a number.
    869 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
    870   DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
    871 }
    872 
    873 // ToObject
    874 //
    875 // Cast the object referenced by the accumulator to a JSObject.
    876 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
    877   DoUnaryOp(CodeFactory::ToObject(isolate_), assembler);
    878 }
    879 
    880 // Inc
    881 //
    882 // Increments value in the accumulator by one.
    883 void Interpreter::DoInc(InterpreterAssembler* assembler) {
    884   DoUnaryOp<IncStub>(assembler);
    885 }
    886 
    887 // Dec
    888 //
    889 // Decrements value in the accumulator by one.
    890 void Interpreter::DoDec(InterpreterAssembler* assembler) {
    891   DoUnaryOp<DecStub>(assembler);
    892 }
    893 
    894 Node* Interpreter::BuildToBoolean(Node* value,
    895                                   InterpreterAssembler* assembler) {
    896   Node* context = __ GetContext();
    897   return ToBooleanStub::Generate(assembler, value, context);
    898 }
    899 
    900 Node* Interpreter::BuildLogicalNot(Node* value,
    901                                    InterpreterAssembler* assembler) {
    902   Variable result(assembler, MachineRepresentation::kTagged);
    903   Label if_true(assembler), if_false(assembler), end(assembler);
    904   Node* true_value = __ BooleanConstant(true);
    905   Node* false_value = __ BooleanConstant(false);
    906   __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
    907   __ Bind(&if_true);
    908   {
    909     result.Bind(false_value);
    910     __ Goto(&end);
    911   }
    912   __ Bind(&if_false);
    913   {
    914     if (FLAG_debug_code) {
    915       __ AbortIfWordNotEqual(value, false_value,
    916                              BailoutReason::kExpectedBooleanValue);
    917     }
    918     result.Bind(true_value);
    919     __ Goto(&end);
    920   }
    921   __ Bind(&end);
    922   return result.value();
    923 }
    924 
    925 // LogicalNot
    926 //
    927 // Perform logical-not on the accumulator, first casting the
    928 // accumulator to a boolean value if required.
    929 // ToBooleanLogicalNot
    930 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
    931   Node* value = __ GetAccumulator();
    932   Node* to_boolean_value = BuildToBoolean(value, assembler);
    933   Node* result = BuildLogicalNot(to_boolean_value, assembler);
    934   __ SetAccumulator(result);
    935   __ Dispatch();
    936 }
    937 
    938 // LogicalNot
    939 //
    940 // Perform logical-not on the accumulator, which must already be a boolean
    941 // value.
    942 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
    943   Node* value = __ GetAccumulator();
    944   Node* result = BuildLogicalNot(value, assembler);
    945   __ SetAccumulator(result);
    946   __ Dispatch();
    947 }
    948 
    949 // TypeOf
    950 //
    951 // Load the accumulator with the string representating type of the
    952 // object in the accumulator.
    953 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
    954   DoUnaryOp(CodeFactory::Typeof(isolate_), assembler);
    955 }
    956 
    957 void Interpreter::DoDelete(Runtime::FunctionId function_id,
    958                            InterpreterAssembler* assembler) {
    959   Node* reg_index = __ BytecodeOperandReg(0);
    960   Node* object = __ LoadRegister(reg_index);
    961   Node* key = __ GetAccumulator();
    962   Node* context = __ GetContext();
    963   Node* result = __ CallRuntime(function_id, context, object, key);
    964   __ SetAccumulator(result);
    965   __ Dispatch();
    966 }
    967 
    968 // DeletePropertyStrict
    969 //
    970 // Delete the property specified in the accumulator from the object
    971 // referenced by the register operand following strict mode semantics.
    972 void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
    973   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
    974 }
    975 
    976 // DeletePropertySloppy
    977 //
    978 // Delete the property specified in the accumulator from the object
    979 // referenced by the register operand following sloppy mode semantics.
    980 void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
    981   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
    982 }
    983 
    984 void Interpreter::DoJSCall(InterpreterAssembler* assembler,
    985                            TailCallMode tail_call_mode) {
    986   Node* function_reg = __ BytecodeOperandReg(0);
    987   Node* function = __ LoadRegister(function_reg);
    988   Node* receiver_reg = __ BytecodeOperandReg(1);
    989   Node* receiver_arg = __ RegisterLocation(receiver_reg);
    990   Node* receiver_args_count = __ BytecodeOperandCount(2);
    991   Node* receiver_count = __ Int32Constant(1);
    992   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
    993   Node* context = __ GetContext();
    994   // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
    995   Node* result =
    996       __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
    997   __ SetAccumulator(result);
    998   __ Dispatch();
    999 }
   1000 
   1001 // Call <callable> <receiver> <arg_count>
   1002 //
   1003 // Call a JSfunction or Callable in |callable| with the |receiver| and
   1004 // |arg_count| arguments in subsequent registers.
   1005 void Interpreter::DoCall(InterpreterAssembler* assembler) {
   1006   DoJSCall(assembler, TailCallMode::kDisallow);
   1007 }
   1008 
   1009 // TailCall <callable> <receiver> <arg_count>
   1010 //
   1011 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
   1012 // |arg_count| arguments in subsequent registers.
   1013 void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
   1014   DoJSCall(assembler, TailCallMode::kAllow);
   1015 }
   1016 
   1017 void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
   1018   Node* function_id = __ BytecodeOperandRuntimeId(0);
   1019   Node* first_arg_reg = __ BytecodeOperandReg(1);
   1020   Node* first_arg = __ RegisterLocation(first_arg_reg);
   1021   Node* args_count = __ BytecodeOperandCount(2);
   1022   Node* context = __ GetContext();
   1023   Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
   1024   __ SetAccumulator(result);
   1025   __ Dispatch();
   1026 }
   1027 
   1028 // CallRuntime <function_id> <first_arg> <arg_count>
   1029 //
   1030 // Call the runtime function |function_id| with the first argument in
   1031 // register |first_arg| and |arg_count| arguments in subsequent
   1032 // registers.
   1033 void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
   1034   DoCallRuntimeCommon(assembler);
   1035 }
   1036 
   1037 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
   1038 //
   1039 // Implements the semantic equivalent of calling the runtime function
   1040 // |function_id| with the first argument in |first_arg| and |arg_count|
   1041 // arguments in subsequent registers.
   1042 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
   1043   Node* function_id = __ BytecodeOperandIntrinsicId(0);
   1044   Node* first_arg_reg = __ BytecodeOperandReg(1);
   1045   Node* arg_count = __ BytecodeOperandCount(2);
   1046   Node* context = __ GetContext();
   1047   IntrinsicsHelper helper(assembler);
   1048   Node* result =
   1049       helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
   1050   __ SetAccumulator(result);
   1051   __ Dispatch();
   1052 }
   1053 
   1054 void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
   1055   // Call the runtime function.
   1056   Node* function_id = __ BytecodeOperandRuntimeId(0);
   1057   Node* first_arg_reg = __ BytecodeOperandReg(1);
   1058   Node* first_arg = __ RegisterLocation(first_arg_reg);
   1059   Node* args_count = __ BytecodeOperandCount(2);
   1060   Node* context = __ GetContext();
   1061   Node* result_pair =
   1062       __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
   1063 
   1064   // Store the results in <first_return> and <first_return + 1>
   1065   Node* first_return_reg = __ BytecodeOperandReg(3);
   1066   Node* second_return_reg = __ NextRegister(first_return_reg);
   1067   Node* result0 = __ Projection(0, result_pair);
   1068   Node* result1 = __ Projection(1, result_pair);
   1069   __ StoreRegister(result0, first_return_reg);
   1070   __ StoreRegister(result1, second_return_reg);
   1071   __ Dispatch();
   1072 }
   1073 
   1074 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
   1075 //
   1076 // Call the runtime function |function_id| which returns a pair, with the
   1077 // first argument in register |first_arg| and |arg_count| arguments in
   1078 // subsequent registers. Returns the result in <first_return> and
   1079 // <first_return + 1>
   1080 void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
   1081   DoCallRuntimeForPairCommon(assembler);
   1082 }
   1083 
   1084 void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   1085   Node* context_index = __ BytecodeOperandIdx(0);
   1086   Node* receiver_reg = __ BytecodeOperandReg(1);
   1087   Node* first_arg = __ RegisterLocation(receiver_reg);
   1088   Node* receiver_args_count = __ BytecodeOperandCount(2);
   1089   Node* receiver_count = __ Int32Constant(1);
   1090   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
   1091 
   1092   // Get the function to call from the native context.
   1093   Node* context = __ GetContext();
   1094   Node* native_context =
   1095       __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
   1096   Node* function = __ LoadContextSlot(native_context, context_index);
   1097 
   1098   // Call the function.
   1099   Node* result = __ CallJS(function, context, first_arg, args_count,
   1100                            TailCallMode::kDisallow);
   1101   __ SetAccumulator(result);
   1102   __ Dispatch();
   1103 }
   1104 
   1105 // CallJSRuntime <context_index> <receiver> <arg_count>
   1106 //
   1107 // Call the JS runtime function that has the |context_index| with the receiver
   1108 // in register |receiver| and |arg_count| arguments in subsequent registers.
   1109 void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
   1110   DoCallJSRuntimeCommon(assembler);
   1111 }
   1112 
   1113 void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
   1114   Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
   1115   Node* new_target = __ GetAccumulator();
   1116   Node* constructor_reg = __ BytecodeOperandReg(0);
   1117   Node* constructor = __ LoadRegister(constructor_reg);
   1118   Node* first_arg_reg = __ BytecodeOperandReg(1);
   1119   Node* first_arg = __ RegisterLocation(first_arg_reg);
   1120   Node* args_count = __ BytecodeOperandCount(2);
   1121   Node* context = __ GetContext();
   1122   Node* result =
   1123       __ CallConstruct(constructor, context, new_target, first_arg, args_count);
   1124   __ SetAccumulator(result);
   1125   __ Dispatch();
   1126 }
   1127 
   1128 // New <constructor> <first_arg> <arg_count>
   1129 //
   1130 // Call operator new with |constructor| and the first argument in
   1131 // register |first_arg| and |arg_count| arguments in subsequent
   1132 // registers. The new.target is in the accumulator.
   1133 //
   1134 void Interpreter::DoNew(InterpreterAssembler* assembler) {
   1135   DoCallConstruct(assembler);
   1136 }
   1137 
   1138 // TestEqual <src>
   1139 //
   1140 // Test if the value in the <src> register equals the accumulator.
   1141 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
   1142   DoBinaryOp<EqualStub>(assembler);
   1143 }
   1144 
   1145 // TestNotEqual <src>
   1146 //
   1147 // Test if the value in the <src> register is not equal to the accumulator.
   1148 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
   1149   DoBinaryOp<NotEqualStub>(assembler);
   1150 }
   1151 
   1152 // TestEqualStrict <src>
   1153 //
   1154 // Test if the value in the <src> register is strictly equal to the accumulator.
   1155 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
   1156   DoBinaryOp<StrictEqualStub>(assembler);
   1157 }
   1158 
   1159 // TestLessThan <src>
   1160 //
   1161 // Test if the value in the <src> register is less than the accumulator.
   1162 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
   1163   DoBinaryOp<LessThanStub>(assembler);
   1164 }
   1165 
   1166 // TestGreaterThan <src>
   1167 //
   1168 // Test if the value in the <src> register is greater than the accumulator.
   1169 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
   1170   DoBinaryOp<GreaterThanStub>(assembler);
   1171 }
   1172 
   1173 // TestLessThanOrEqual <src>
   1174 //
   1175 // Test if the value in the <src> register is less than or equal to the
   1176 // accumulator.
   1177 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
   1178   DoBinaryOp<LessThanOrEqualStub>(assembler);
   1179 }
   1180 
   1181 // TestGreaterThanOrEqual <src>
   1182 //
   1183 // Test if the value in the <src> register is greater than or equal to the
   1184 // accumulator.
   1185 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
   1186   DoBinaryOp<GreaterThanOrEqualStub>(assembler);
   1187 }
   1188 
   1189 // TestIn <src>
   1190 //
   1191 // Test if the object referenced by the register operand is a property of the
   1192 // object referenced by the accumulator.
   1193 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
   1194   DoBinaryOp<HasPropertyStub>(assembler);
   1195 }
   1196 
   1197 // TestInstanceOf <src>
   1198 //
   1199 // Test if the object referenced by the <src> register is an an instance of type
   1200 // referenced by the accumulator.
   1201 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
   1202   DoBinaryOp<InstanceOfStub>(assembler);
   1203 }
   1204 
   1205 // Jump <imm>
   1206 //
   1207 // Jump by number of bytes represented by the immediate operand |imm|.
   1208 void Interpreter::DoJump(InterpreterAssembler* assembler) {
   1209   Node* relative_jump = __ BytecodeOperandImm(0);
   1210   __ Jump(relative_jump);
   1211 }
   1212 
   1213 // JumpConstant <idx>
   1214 //
   1215 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
   1216 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   1217   Node* index = __ BytecodeOperandIdx(0);
   1218   Node* constant = __ LoadConstantPoolEntry(index);
   1219   Node* relative_jump = __ SmiUntag(constant);
   1220   __ Jump(relative_jump);
   1221 }
   1222 
   1223 // JumpIfTrue <imm>
   1224 //
   1225 // Jump by number of bytes represented by an immediate operand if the
   1226 // accumulator contains true.
   1227 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
   1228   Node* accumulator = __ GetAccumulator();
   1229   Node* relative_jump = __ BytecodeOperandImm(0);
   1230   Node* true_value = __ BooleanConstant(true);
   1231   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
   1232 }
   1233 
   1234 // JumpIfTrueConstant <idx>
   1235 //
   1236 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1237 // if the accumulator contains true.
   1238 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   1239   Node* accumulator = __ GetAccumulator();
   1240   Node* index = __ BytecodeOperandIdx(0);
   1241   Node* constant = __ LoadConstantPoolEntry(index);
   1242   Node* relative_jump = __ SmiUntag(constant);
   1243   Node* true_value = __ BooleanConstant(true);
   1244   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
   1245 }
   1246 
   1247 // JumpIfFalse <imm>
   1248 //
   1249 // Jump by number of bytes represented by an immediate operand if the
   1250 // accumulator contains false.
   1251 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
   1252   Node* accumulator = __ GetAccumulator();
   1253   Node* relative_jump = __ BytecodeOperandImm(0);
   1254   Node* false_value = __ BooleanConstant(false);
   1255   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
   1256 }
   1257 
   1258 // JumpIfFalseConstant <idx>
   1259 //
   1260 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1261 // if the accumulator contains false.
   1262 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   1263   Node* accumulator = __ GetAccumulator();
   1264   Node* index = __ BytecodeOperandIdx(0);
   1265   Node* constant = __ LoadConstantPoolEntry(index);
   1266   Node* relative_jump = __ SmiUntag(constant);
   1267   Node* false_value = __ BooleanConstant(false);
   1268   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
   1269 }
   1270 
   1271 // JumpIfToBooleanTrue <imm>
   1272 //
   1273 // Jump by number of bytes represented by an immediate operand if the object
   1274 // referenced by the accumulator is true when the object is cast to boolean.
   1275 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
   1276   Node* accumulator = __ GetAccumulator();
   1277   Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   1278   Node* relative_jump = __ BytecodeOperandImm(0);
   1279   Node* true_value = __ BooleanConstant(true);
   1280   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
   1281 }
   1282 
   1283 // JumpIfToBooleanTrueConstant <idx>
   1284 //
   1285 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1286 // if the object referenced by the accumulator is true when the object is cast
   1287 // to boolean.
   1288 void Interpreter::DoJumpIfToBooleanTrueConstant(
   1289     InterpreterAssembler* assembler) {
   1290   Node* accumulator = __ GetAccumulator();
   1291   Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   1292   Node* index = __ BytecodeOperandIdx(0);
   1293   Node* constant = __ LoadConstantPoolEntry(index);
   1294   Node* relative_jump = __ SmiUntag(constant);
   1295   Node* true_value = __ BooleanConstant(true);
   1296   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
   1297 }
   1298 
   1299 // JumpIfToBooleanFalse <imm>
   1300 //
   1301 // Jump by number of bytes represented by an immediate operand if the object
   1302 // referenced by the accumulator is false when the object is cast to boolean.
   1303 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
   1304   Node* accumulator = __ GetAccumulator();
   1305   Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   1306   Node* relative_jump = __ BytecodeOperandImm(0);
   1307   Node* false_value = __ BooleanConstant(false);
   1308   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
   1309 }
   1310 
   1311 // JumpIfToBooleanFalseConstant <idx>
   1312 //
   1313 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1314 // if the object referenced by the accumulator is false when the object is cast
   1315 // to boolean.
   1316 void Interpreter::DoJumpIfToBooleanFalseConstant(
   1317     InterpreterAssembler* assembler) {
   1318   Node* accumulator = __ GetAccumulator();
   1319   Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   1320   Node* index = __ BytecodeOperandIdx(0);
   1321   Node* constant = __ LoadConstantPoolEntry(index);
   1322   Node* relative_jump = __ SmiUntag(constant);
   1323   Node* false_value = __ BooleanConstant(false);
   1324   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
   1325 }
   1326 
   1327 // JumpIfNull <imm>
   1328 //
   1329 // Jump by number of bytes represented by an immediate operand if the object
   1330 // referenced by the accumulator is the null constant.
   1331 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
   1332   Node* accumulator = __ GetAccumulator();
   1333   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   1334   Node* relative_jump = __ BytecodeOperandImm(0);
   1335   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
   1336 }
   1337 
   1338 // JumpIfNullConstant <idx>
   1339 //
   1340 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1341 // if the object referenced by the accumulator is the null constant.
   1342 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   1343   Node* accumulator = __ GetAccumulator();
   1344   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   1345   Node* index = __ BytecodeOperandIdx(0);
   1346   Node* constant = __ LoadConstantPoolEntry(index);
   1347   Node* relative_jump = __ SmiUntag(constant);
   1348   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
   1349 }
   1350 
   1351 // JumpIfUndefined <imm>
   1352 //
   1353 // Jump by number of bytes represented by an immediate operand if the object
   1354 // referenced by the accumulator is the undefined constant.
   1355 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
   1356   Node* accumulator = __ GetAccumulator();
   1357   Node* undefined_value =
   1358       __ HeapConstant(isolate_->factory()->undefined_value());
   1359   Node* relative_jump = __ BytecodeOperandImm(0);
   1360   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
   1361 }
   1362 
   1363 // JumpIfUndefinedConstant <idx>
   1364 //
   1365 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1366 // if the object referenced by the accumulator is the undefined constant.
   1367 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   1368   Node* accumulator = __ GetAccumulator();
   1369   Node* undefined_value =
   1370       __ HeapConstant(isolate_->factory()->undefined_value());
   1371   Node* index = __ BytecodeOperandIdx(0);
   1372   Node* constant = __ LoadConstantPoolEntry(index);
   1373   Node* relative_jump = __ SmiUntag(constant);
   1374   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
   1375 }
   1376 
   1377 // JumpIfNotHole <imm>
   1378 //
   1379 // Jump by number of bytes represented by an immediate operand if the object
   1380 // referenced by the accumulator is the hole.
   1381 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
   1382   Node* accumulator = __ GetAccumulator();
   1383   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
   1384   Node* relative_jump = __ BytecodeOperandImm(0);
   1385   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
   1386 }
   1387 
   1388 // JumpIfNotHoleConstant <idx>
   1389 //
   1390 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
   1391 // if the object referenced by the accumulator is the hole constant.
   1392 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
   1393   Node* accumulator = __ GetAccumulator();
   1394   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
   1395   Node* index = __ BytecodeOperandIdx(0);
   1396   Node* constant = __ LoadConstantPoolEntry(index);
   1397   Node* relative_jump = __ SmiUntag(constant);
   1398   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
   1399 }
   1400 
   1401 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
   1402 //
   1403 // Creates a regular expression literal for literal index <literal_idx> with
   1404 // <flags> and the pattern in <pattern_idx>.
   1405 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
   1406   Callable callable = CodeFactory::FastCloneRegExp(isolate_);
   1407   Node* target = __ HeapConstant(callable.code());
   1408   Node* index = __ BytecodeOperandIdx(0);
   1409   Node* pattern = __ LoadConstantPoolEntry(index);
   1410   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   1411   Node* literal_index = __ SmiTag(literal_index_raw);
   1412   Node* flags_raw = __ BytecodeOperandFlag(2);
   1413   Node* flags = __ SmiTag(flags_raw);
   1414   Node* closure = __ LoadRegister(Register::function_closure());
   1415   Node* context = __ GetContext();
   1416   Node* result = __ CallStub(callable.descriptor(), target, context, closure,
   1417                              literal_index, pattern, flags);
   1418   __ SetAccumulator(result);
   1419   __ Dispatch();
   1420 }
   1421 
   1422 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
   1423 //
   1424 // Creates an array literal for literal index <literal_idx> with flags <flags>
   1425 // and constant elements in <element_idx>.
   1426 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
   1427   Node* index = __ BytecodeOperandIdx(0);
   1428   Node* constant_elements = __ LoadConstantPoolEntry(index);
   1429   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   1430   Node* literal_index = __ SmiTag(literal_index_raw);
   1431   Node* flags_raw = __ BytecodeOperandFlag(2);
   1432   Node* flags = __ SmiTag(flags_raw);
   1433   Node* closure = __ LoadRegister(Register::function_closure());
   1434   Node* context = __ GetContext();
   1435   Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
   1436                                 literal_index, constant_elements, flags);
   1437   __ SetAccumulator(result);
   1438   __ Dispatch();
   1439 }
   1440 
   1441 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
   1442 //
   1443 // Creates an object literal for literal index <literal_idx> with
   1444 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
   1445 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
   1446   Node* literal_index_raw = __ BytecodeOperandIdx(1);
   1447   Node* literal_index = __ SmiTag(literal_index_raw);
   1448   Node* bytecode_flags = __ BytecodeOperandFlag(2);
   1449   Node* closure = __ LoadRegister(Register::function_closure());
   1450 
   1451   // Check if we can do a fast clone or have to call the runtime.
   1452   Label if_fast_clone(assembler),
   1453       if_not_fast_clone(assembler, Label::kDeferred);
   1454   Node* fast_clone_properties_count =
   1455       __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
   1456           bytecode_flags);
   1457   __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
   1458 
   1459   __ Bind(&if_fast_clone);
   1460   {
   1461     // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
   1462     Node* result = FastCloneShallowObjectStub::GenerateFastPath(
   1463         assembler, &if_not_fast_clone, closure, literal_index,
   1464         fast_clone_properties_count);
   1465     __ SetAccumulator(result);
   1466     __ Dispatch();
   1467   }
   1468 
   1469   __ Bind(&if_not_fast_clone);
   1470   {
   1471     // If we can't do a fast clone, call into the runtime.
   1472     Node* index = __ BytecodeOperandIdx(0);
   1473     Node* constant_elements = __ LoadConstantPoolEntry(index);
   1474     Node* context = __ GetContext();
   1475 
   1476     STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
   1477     Node* flags_raw = __ Word32And(
   1478         bytecode_flags,
   1479         __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
   1480     Node* flags = __ SmiTag(flags_raw);
   1481 
   1482     Node* result =
   1483         __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
   1484                        literal_index, constant_elements, flags);
   1485     __ SetAccumulator(result);
   1486     __ Dispatch();
   1487   }
   1488 }
   1489 
   1490 // CreateClosure <index> <tenured>
   1491 //
   1492 // Creates a new closure for SharedFunctionInfo at position |index| in the
   1493 // constant pool and with the PretenureFlag <tenured>.
   1494 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
   1495   // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
   1496   // calling into the runtime.
   1497   Node* index = __ BytecodeOperandIdx(0);
   1498   Node* shared = __ LoadConstantPoolEntry(index);
   1499   Node* tenured_raw = __ BytecodeOperandFlag(1);
   1500   Node* tenured = __ SmiTag(tenured_raw);
   1501   Node* context = __ GetContext();
   1502   Node* result =
   1503       __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
   1504   __ SetAccumulator(result);
   1505   __ Dispatch();
   1506 }
   1507 
   1508 // CreateMappedArguments
   1509 //
   1510 // Creates a new mapped arguments object.
   1511 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
   1512   Node* closure = __ LoadRegister(Register::function_closure());
   1513   Node* context = __ GetContext();
   1514 
   1515   Label if_duplicate_parameters(assembler, Label::kDeferred);
   1516   Label if_not_duplicate_parameters(assembler);
   1517 
   1518   // Check if function has duplicate parameters.
   1519   // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
   1520   // duplicate parameters.
   1521   Node* shared_info =
   1522       __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
   1523   Node* compiler_hints = __ LoadObjectField(
   1524       shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
   1525       MachineType::Uint8());
   1526   Node* duplicate_parameters_bit = __ Int32Constant(
   1527       1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
   1528   Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
   1529   __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
   1530 
   1531   __ Bind(&if_not_duplicate_parameters);
   1532   {
   1533     // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
   1534     Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
   1535     Node* target = __ HeapConstant(callable.code());
   1536     Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   1537     __ SetAccumulator(result);
   1538     __ Dispatch();
   1539   }
   1540 
   1541   __ Bind(&if_duplicate_parameters);
   1542   {
   1543     Node* result =
   1544         __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
   1545     __ SetAccumulator(result);
   1546     __ Dispatch();
   1547   }
   1548 }
   1549 
   1550 // CreateUnmappedArguments
   1551 //
   1552 // Creates a new unmapped arguments object.
   1553 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
   1554   // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
   1555   Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
   1556   Node* target = __ HeapConstant(callable.code());
   1557   Node* context = __ GetContext();
   1558   Node* closure = __ LoadRegister(Register::function_closure());
   1559   Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   1560   __ SetAccumulator(result);
   1561   __ Dispatch();
   1562 }
   1563 
   1564 // CreateRestParameter
   1565 //
   1566 // Creates a new rest parameter array.
   1567 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
   1568   // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
   1569   Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
   1570   Node* target = __ HeapConstant(callable.code());
   1571   Node* closure = __ LoadRegister(Register::function_closure());
   1572   Node* context = __ GetContext();
   1573   Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   1574   __ SetAccumulator(result);
   1575   __ Dispatch();
   1576 }
   1577 
   1578 // StackCheck
   1579 //
   1580 // Performs a stack guard check.
   1581 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
   1582   Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
   1583 
   1584   Node* interrupt = __ StackCheckTriggeredInterrupt();
   1585   __ BranchIf(interrupt, &stack_check_interrupt, &ok);
   1586 
   1587   __ Bind(&ok);
   1588   __ Dispatch();
   1589 
   1590   __ Bind(&stack_check_interrupt);
   1591   {
   1592     Node* context = __ GetContext();
   1593     __ CallRuntime(Runtime::kStackGuard, context);
   1594     __ Dispatch();
   1595   }
   1596 }
   1597 
   1598 // Throw
   1599 //
   1600 // Throws the exception in the accumulator.
   1601 void Interpreter::DoThrow(InterpreterAssembler* assembler) {
   1602   Node* exception = __ GetAccumulator();
   1603   Node* context = __ GetContext();
   1604   __ CallRuntime(Runtime::kThrow, context, exception);
   1605   // We shouldn't ever return from a throw.
   1606   __ Abort(kUnexpectedReturnFromThrow);
   1607 }
   1608 
   1609 // ReThrow
   1610 //
   1611 // Re-throws the exception in the accumulator.
   1612 void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
   1613   Node* exception = __ GetAccumulator();
   1614   Node* context = __ GetContext();
   1615   __ CallRuntime(Runtime::kReThrow, context, exception);
   1616   // We shouldn't ever return from a throw.
   1617   __ Abort(kUnexpectedReturnFromThrow);
   1618 }
   1619 
   1620 // Return
   1621 //
   1622 // Return the value in the accumulator.
   1623 void Interpreter::DoReturn(InterpreterAssembler* assembler) {
   1624   __ UpdateInterruptBudgetOnReturn();
   1625   Node* accumulator = __ GetAccumulator();
   1626   __ Return(accumulator);
   1627 }
   1628 
   1629 // Debugger
   1630 //
   1631 // Call runtime to handle debugger statement.
   1632 void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
   1633   Node* context = __ GetContext();
   1634   __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
   1635   __ Dispatch();
   1636 }
   1637 
   1638 // DebugBreak
   1639 //
   1640 // Call runtime to handle a debug break.
   1641 #define DEBUG_BREAK(Name, ...)                                                \
   1642   void Interpreter::Do##Name(InterpreterAssembler* assembler) {               \
   1643     Node* context = __ GetContext();                                          \
   1644     Node* accumulator = __ GetAccumulator();                                  \
   1645     Node* original_handler =                                                  \
   1646         __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
   1647     __ DispatchToBytecodeHandler(original_handler);                           \
   1648   }
   1649 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
   1650 #undef DEBUG_BREAK
   1651 
   1652 // ForInPrepare <cache_info_triple>
   1653 //
   1654 // Returns state for for..in loop execution based on the object in the
   1655 // accumulator. The result is output in registers |cache_info_triple| to
   1656 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
   1657 // and cache_length respectively.
   1658 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
   1659   Node* object = __ GetAccumulator();
   1660   Node* context = __ GetContext();
   1661   Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
   1662 
   1663   // Set output registers:
   1664   //   0 == cache_type, 1 == cache_array, 2 == cache_length
   1665   Node* output_register = __ BytecodeOperandReg(0);
   1666   for (int i = 0; i < 3; i++) {
   1667     Node* cache_info = __ Projection(i, result_triple);
   1668     __ StoreRegister(cache_info, output_register);
   1669     output_register = __ NextRegister(output_register);
   1670   }
   1671   __ Dispatch();
   1672 }
   1673 
   1674 // ForInNext <receiver> <index> <cache_info_pair>
   1675 //
   1676 // Returns the next enumerable property in the the accumulator.
   1677 void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
   1678   Node* receiver_reg = __ BytecodeOperandReg(0);
   1679   Node* receiver = __ LoadRegister(receiver_reg);
   1680   Node* index_reg = __ BytecodeOperandReg(1);
   1681   Node* index = __ LoadRegister(index_reg);
   1682   Node* cache_type_reg = __ BytecodeOperandReg(2);
   1683   Node* cache_type = __ LoadRegister(cache_type_reg);
   1684   Node* cache_array_reg = __ NextRegister(cache_type_reg);
   1685   Node* cache_array = __ LoadRegister(cache_array_reg);
   1686 
   1687   // Load the next key from the enumeration array.
   1688   Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
   1689                                        CodeStubAssembler::SMI_PARAMETERS);
   1690 
   1691   // Check if we can use the for-in fast path potentially using the enum cache.
   1692   Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
   1693   Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
   1694   Node* condition = __ WordEqual(receiver_map, cache_type);
   1695   __ BranchIf(condition, &if_fast, &if_slow);
   1696   __ Bind(&if_fast);
   1697   {
   1698     // Enum cache in use for {receiver}, the {key} is definitely valid.
   1699     __ SetAccumulator(key);
   1700     __ Dispatch();
   1701   }
   1702   __ Bind(&if_slow);
   1703   {
   1704     // Record the fact that we hit the for-in slow path.
   1705     Node* vector_index = __ BytecodeOperandIdx(3);
   1706     Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   1707     Node* megamorphic_sentinel =
   1708         __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
   1709     __ StoreFixedArrayElement(type_feedback_vector, vector_index,
   1710                               megamorphic_sentinel, SKIP_WRITE_BARRIER);
   1711 
   1712     // Need to filter the {key} for the {receiver}.
   1713     Node* context = __ GetContext();
   1714     Node* result =
   1715         __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
   1716     __ SetAccumulator(result);
   1717     __ Dispatch();
   1718   }
   1719 }
   1720 
   1721 // ForInDone <index> <cache_length>
   1722 //
   1723 // Returns true if the end of the enumerable properties has been reached.
   1724 void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
   1725   Node* index_reg = __ BytecodeOperandReg(0);
   1726   Node* index = __ LoadRegister(index_reg);
   1727   Node* cache_length_reg = __ BytecodeOperandReg(1);
   1728   Node* cache_length = __ LoadRegister(cache_length_reg);
   1729 
   1730   // Check if {index} is at {cache_length} already.
   1731   Label if_true(assembler), if_false(assembler), end(assembler);
   1732   __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
   1733   __ Bind(&if_true);
   1734   {
   1735     __ SetAccumulator(__ BooleanConstant(true));
   1736     __ Goto(&end);
   1737   }
   1738   __ Bind(&if_false);
   1739   {
   1740     __ SetAccumulator(__ BooleanConstant(false));
   1741     __ Goto(&end);
   1742   }
   1743   __ Bind(&end);
   1744   __ Dispatch();
   1745 }
   1746 
   1747 // ForInStep <index>
   1748 //
   1749 // Increments the loop counter in register |index| and stores the result
   1750 // in the accumulator.
   1751 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
   1752   Node* index_reg = __ BytecodeOperandReg(0);
   1753   Node* index = __ LoadRegister(index_reg);
   1754   Node* one = __ SmiConstant(Smi::FromInt(1));
   1755   Node* result = __ SmiAdd(index, one);
   1756   __ SetAccumulator(result);
   1757   __ Dispatch();
   1758 }
   1759 
   1760 // Wide
   1761 //
   1762 // Prefix bytecode indicating next bytecode has wide (16-bit) operands.
   1763 void Interpreter::DoWide(InterpreterAssembler* assembler) {
   1764   __ DispatchWide(OperandScale::kDouble);
   1765 }
   1766 
   1767 // ExtraWide
   1768 //
   1769 // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
   1770 void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
   1771   __ DispatchWide(OperandScale::kQuadruple);
   1772 }
   1773 
   1774 // Illegal
   1775 //
   1776 // An invalid bytecode aborting execution if dispatched.
   1777 void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
   1778   __ Abort(kInvalidBytecode);
   1779 }
   1780 
   1781 // Nop
   1782 //
   1783 // No operation.
   1784 void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
   1785 
   1786 // SuspendGenerator <generator>
   1787 //
   1788 // Exports the register file and stores it into the generator.  Also stores the
   1789 // current context, the state given in the accumulator, and the current bytecode
   1790 // offset (for debugging purposes) into the generator.
   1791 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
   1792   Node* generator_reg = __ BytecodeOperandReg(0);
   1793   Node* generator = __ LoadRegister(generator_reg);
   1794 
   1795   Label if_stepping(assembler, Label::kDeferred), ok(assembler);
   1796   Node* step_action_address = __ ExternalConstant(
   1797       ExternalReference::debug_last_step_action_address(isolate_));
   1798   Node* step_action = __ Load(MachineType::Int8(), step_action_address);
   1799   STATIC_ASSERT(StepIn > StepNext);
   1800   STATIC_ASSERT(StepFrame > StepNext);
   1801   STATIC_ASSERT(LastStepAction == StepFrame);
   1802   Node* step_next = __ Int32Constant(StepNext);
   1803   __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
   1804   __ Bind(&ok);
   1805 
   1806   Node* array =
   1807       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
   1808   Node* context = __ GetContext();
   1809   Node* state = __ GetAccumulator();
   1810 
   1811   __ ExportRegisterFile(array);
   1812   __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
   1813   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
   1814 
   1815   Node* offset = __ SmiTag(__ BytecodeOffset());
   1816   __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
   1817                       offset);
   1818 
   1819   __ Dispatch();
   1820 
   1821   __ Bind(&if_stepping);
   1822   {
   1823     Node* context = __ GetContext();
   1824     __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
   1825     __ Goto(&ok);
   1826   }
   1827 }
   1828 
   1829 // ResumeGenerator <generator>
   1830 //
   1831 // Imports the register file stored in the generator. Also loads the
   1832 // generator's state and stores it in the accumulator, before overwriting it
   1833 // with kGeneratorExecuting.
   1834 void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
   1835   Node* generator_reg = __ BytecodeOperandReg(0);
   1836   Node* generator = __ LoadRegister(generator_reg);
   1837 
   1838   __ ImportRegisterFile(
   1839       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
   1840 
   1841   Node* old_state =
   1842       __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
   1843   Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
   1844   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
   1845       __ SmiTag(new_state));
   1846   __ SetAccumulator(old_state);
   1847 
   1848   __ Dispatch();
   1849 }
   1850 
   1851 }  // namespace interpreter
   1852 }  // namespace internal
   1853 }  // namespace v8
   1854