Home | History | Annotate | Download | only in interpreter
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
      6 #define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
      7 
      8 #include "src/allocation.h"
      9 #include "src/builtins/builtins.h"
     10 #include "src/code-stub-assembler.h"
     11 #include "src/frames.h"
     12 #include "src/globals.h"
     13 #include "src/interpreter/bytecode-register.h"
     14 #include "src/interpreter/bytecodes.h"
     15 #include "src/runtime/runtime.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 namespace interpreter {
     20 
     21 class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
     22  public:
     23   InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
     24                        OperandScale operand_scale);
     25   virtual ~InterpreterAssembler();
     26 
     27   // Returns the count immediate for bytecode operand |operand_index| in the
     28   // current bytecode.
     29   compiler::Node* BytecodeOperandCount(int operand_index);
     30   // Returns the 8-bit flag for bytecode operand |operand_index| in the
     31   // current bytecode.
     32   compiler::Node* BytecodeOperandFlag(int operand_index);
     33   // Returns the index immediate for bytecode operand |operand_index| in the
     34   // current bytecode.
     35   compiler::Node* BytecodeOperandIdx(int operand_index);
     36   // Returns the UImm8 immediate for bytecode operand |operand_index| in the
     37   // current bytecode.
     38   compiler::Node* BytecodeOperandUImm(int operand_index);
     39   // Returns the Imm8 immediate for bytecode operand |operand_index| in the
     40   // current bytecode.
     41   compiler::Node* BytecodeOperandImm(int operand_index);
     42   // Returns the register index for bytecode operand |operand_index| in the
     43   // current bytecode.
     44   compiler::Node* BytecodeOperandReg(int operand_index);
     45   // Returns the runtime id immediate for bytecode operand
     46   // |operand_index| in the current bytecode.
     47   compiler::Node* BytecodeOperandRuntimeId(int operand_index);
     48   // Returns the intrinsic id immediate for bytecode operand
     49   // |operand_index| in the current bytecode.
     50   compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
     51 
     52   // Accumulator.
     53   compiler::Node* GetAccumulator();
     54   void SetAccumulator(compiler::Node* value);
     55 
     56   // Context.
     57   compiler::Node* GetContext();
     58   void SetContext(compiler::Node* value);
     59 
     60   // Context at |depth| in the context chain starting at |context|.
     61   compiler::Node* GetContextAtDepth(compiler::Node* context,
     62                                     compiler::Node* depth);
     63 
     64   // Goto the given |target| if the context chain starting at |context| has any
     65   // extensions up to the given |depth|.
     66   void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
     67                                           compiler::Node* depth, Label* target);
     68 
     69   // Number of registers.
     70   compiler::Node* RegisterCount();
     71 
     72   // Backup/restore register file to/from a fixed array of the correct length.
     73   compiler::Node* ExportRegisterFile(compiler::Node* array);
     74   compiler::Node* ImportRegisterFile(compiler::Node* array);
     75 
     76   // Loads from and stores to the interpreter register file.
     77   compiler::Node* LoadRegister(Register reg);
     78   compiler::Node* LoadRegister(compiler::Node* reg_index);
     79   compiler::Node* StoreRegister(compiler::Node* value, Register reg);
     80   compiler::Node* StoreRegister(compiler::Node* value,
     81                                 compiler::Node* reg_index);
     82 
     83   // Returns the next consecutive register.
     84   compiler::Node* NextRegister(compiler::Node* reg_index);
     85 
     86   // Returns the location in memory of the register |reg_index| in the
     87   // interpreter register file.
     88   compiler::Node* RegisterLocation(compiler::Node* reg_index);
     89 
     90   // Load constant at |index| in the constant pool.
     91   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
     92 
     93   // Load and untag constant at |index| in the constant pool.
     94   compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
     95 
     96   // Load the TypeFeedbackVector for the current function.
     97   compiler::Node* LoadTypeFeedbackVector();
     98 
     99   // Increment the call count for a CALL_IC or construct call.
    100   // The call count is located at feedback_vector[slot_id + 1].
    101   compiler::Node* IncrementCallCount(compiler::Node* type_feedback_vector,
    102                                      compiler::Node* slot_id);
    103 
    104   // Call JSFunction or Callable |function| with |arg_count|
    105   // arguments (not including receiver) and the first argument
    106   // located at |first_arg|. Type feedback is collected in the
    107   // slot at index |slot_id|.
    108   compiler::Node* CallJSWithFeedback(compiler::Node* function,
    109                                      compiler::Node* context,
    110                                      compiler::Node* first_arg,
    111                                      compiler::Node* arg_count,
    112                                      compiler::Node* slot_id,
    113                                      compiler::Node* type_feedback_vector,
    114                                      TailCallMode tail_call_mode);
    115 
    116   // Call JSFunction or Callable |function| with |arg_count|
    117   // arguments (not including receiver) and the first argument
    118   // located at |first_arg|.
    119   compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
    120                          compiler::Node* first_arg, compiler::Node* arg_count,
    121                          TailCallMode tail_call_mode);
    122 
    123   // Call constructor |constructor| with |arg_count| arguments (not
    124   // including receiver) and the first argument located at
    125   // |first_arg|. The |new_target| is the same as the
    126   // |constructor| for the new keyword, but differs for the super
    127   // keyword.
    128   compiler::Node* CallConstruct(compiler::Node* constructor,
    129                                 compiler::Node* context,
    130                                 compiler::Node* new_target,
    131                                 compiler::Node* first_arg,
    132                                 compiler::Node* arg_count,
    133                                 compiler::Node* slot_id,
    134                                 compiler::Node* type_feedback_vector);
    135 
    136   // Call runtime function with |arg_count| arguments and the first argument
    137   // located at |first_arg|.
    138   compiler::Node* CallRuntimeN(compiler::Node* function_id,
    139                                compiler::Node* context,
    140                                compiler::Node* first_arg,
    141                                compiler::Node* arg_count, int return_size = 1);
    142 
    143   // Jump relative to the current bytecode by |jump_offset|.
    144   compiler::Node* Jump(compiler::Node* jump_offset);
    145 
    146   // Jump relative to the current bytecode by |jump_offset| if the
    147   // word values |lhs| and |rhs| are equal.
    148   void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
    149                        compiler::Node* jump_offset);
    150 
    151   // Jump relative to the current bytecode by |jump_offset| if the
    152   // word values |lhs| and |rhs| are not equal.
    153   void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
    154                           compiler::Node* jump_offset);
    155 
    156   // Returns true if the stack guard check triggers an interrupt.
    157   compiler::Node* StackCheckTriggeredInterrupt();
    158 
    159   // Updates the profiler interrupt budget for a return.
    160   void UpdateInterruptBudgetOnReturn();
    161 
    162   // Returns the OSR nesting level from the bytecode header.
    163   compiler::Node* LoadOSRNestingLevel();
    164 
    165   // Dispatch to the bytecode.
    166   compiler::Node* Dispatch();
    167 
    168   // Dispatch to bytecode handler.
    169   compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
    170     return DispatchToBytecodeHandler(handler, BytecodeOffset());
    171   }
    172 
    173   // Dispatch bytecode as wide operand variant.
    174   void DispatchWide(OperandScale operand_scale);
    175 
    176   // Truncate tagged |value| to word32 and store the type feedback in
    177   // |var_type_feedback|.
    178   compiler::Node* TruncateTaggedToWord32WithFeedback(
    179       compiler::Node* context, compiler::Node* value,
    180       Variable* var_type_feedback);
    181 
    182   // Abort with the given bailout reason.
    183   void Abort(BailoutReason bailout_reason);
    184   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
    185                            BailoutReason bailout_reason);
    186 
    187   // Returns the offset from the BytecodeArrayPointer of the current bytecode.
    188   compiler::Node* BytecodeOffset();
    189 
    190  protected:
    191   Bytecode bytecode() const { return bytecode_; }
    192   static bool TargetSupportsUnalignedAccess();
    193 
    194  private:
    195   // Returns a tagged pointer to the current function's BytecodeArray object.
    196   compiler::Node* BytecodeArrayTaggedPointer();
    197 
    198   // Returns a raw pointer to first entry in the interpreter dispatch table.
    199   compiler::Node* DispatchTableRawPointer();
    200 
    201   // Returns the accumulator value without checking whether bytecode
    202   // uses it. This is intended to be used only in dispatch and in
    203   // tracing as these need to bypass accumulator use validity checks.
    204   compiler::Node* GetAccumulatorUnchecked();
    205 
    206   // Returns the frame pointer for the interpreted frame of the function being
    207   // interpreted.
    208   compiler::Node* GetInterpretedFramePointer();
    209 
    210   // Saves and restores interpreter bytecode offset to the interpreter stack
    211   // frame when performing a call.
    212   void CallPrologue() override;
    213   void CallEpilogue() override;
    214 
    215   // Increment the dispatch counter for the (current, next) bytecode pair.
    216   void TraceBytecodeDispatch(compiler::Node* target_index);
    217 
    218   // Traces the current bytecode by calling |function_id|.
    219   void TraceBytecode(Runtime::FunctionId function_id);
    220 
    221   // Updates the bytecode array's interrupt budget by |weight| and calls
    222   // Runtime::kInterrupt if counter reaches zero.
    223   void UpdateInterruptBudget(compiler::Node* weight);
    224 
    225   // Returns the offset of register |index| relative to RegisterFilePointer().
    226   compiler::Node* RegisterFrameOffset(compiler::Node* index);
    227 
    228   // Returns the offset of an operand relative to the current bytecode offset.
    229   compiler::Node* OperandOffset(int operand_index);
    230 
    231   // Returns a value built from an sequence of bytes in the bytecode
    232   // array starting at |relative_offset| from the current bytecode.
    233   // The |result_type| determines the size and signedness.  of the
    234   // value read. This method should only be used on architectures that
    235   // do not support unaligned memory accesses.
    236   compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
    237                                                MachineType result_type);
    238 
    239   compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
    240   compiler::Node* BytecodeOperandSignedByte(int operand_index);
    241   compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
    242   compiler::Node* BytecodeOperandSignedShort(int operand_index);
    243   compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
    244   compiler::Node* BytecodeOperandSignedQuad(int operand_index);
    245 
    246   compiler::Node* BytecodeSignedOperand(int operand_index,
    247                                         OperandSize operand_size);
    248   compiler::Node* BytecodeUnsignedOperand(int operand_index,
    249                                           OperandSize operand_size);
    250 
    251   // Jump relative to the current bytecode by |jump_offset| if the
    252   // |condition| is true. Helper function for JumpIfWordEqual and
    253   // JumpIfWordNotEqual.
    254   void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
    255 
    256   // Updates and returns BytecodeOffset() advanced by the current bytecode's
    257   // size. Traces the exit of the current bytecode.
    258   compiler::Node* Advance();
    259 
    260   // Updates and returns BytecodeOffset() advanced by delta bytecodes.
    261   // Traces the exit of the current bytecode.
    262   compiler::Node* Advance(int delta);
    263   compiler::Node* Advance(compiler::Node* delta);
    264 
    265   // Load the bytecode at |bytecode_offset|.
    266   compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
    267 
    268   // Look ahead for Star and inline it in a branch. Returns a new target
    269   // bytecode node for dispatch.
    270   compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
    271 
    272   // Build code for Star at the current BytecodeOffset() and Advance() to the
    273   // next dispatch offset.
    274   void InlineStar();
    275 
    276   // Dispatch to |target_bytecode| at |new_bytecode_offset|.
    277   // |target_bytecode| should be equivalent to loading from the offset.
    278   compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
    279                                      compiler::Node* new_bytecode_offset);
    280 
    281   // Dispatch to the bytecode handler with code offset |handler|.
    282   compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
    283                                             compiler::Node* bytecode_offset);
    284 
    285   // Dispatch to the bytecode handler with code entry point |handler_entry|.
    286   compiler::Node* DispatchToBytecodeHandlerEntry(
    287       compiler::Node* handler_entry, compiler::Node* bytecode_offset);
    288 
    289   OperandScale operand_scale() const { return operand_scale_; }
    290 
    291   Bytecode bytecode_;
    292   OperandScale operand_scale_;
    293   CodeStubAssembler::Variable bytecode_offset_;
    294   CodeStubAssembler::Variable interpreted_frame_pointer_;
    295   CodeStubAssembler::Variable accumulator_;
    296   AccumulatorUse accumulator_use_;
    297   bool made_call_;
    298 
    299   bool disable_stack_check_across_call_;
    300   compiler::Node* stack_pointer_before_call_;
    301 
    302   DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
    303 };
    304 
    305 }  // namespace interpreter
    306 }  // namespace internal
    307 }  // namespace v8
    308 
    309 #endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
    310