Home | History | Annotate | Download | only in interpreter
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
      6 #define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
      7 
      8 #include "src/allocation.h"
      9 #include "src/builtins/builtins.h"
     10 #include "src/code-stub-assembler.h"
     11 #include "src/globals.h"
     12 #include "src/interpreter/bytecode-register.h"
     13 #include "src/interpreter/bytecodes.h"
     14 #include "src/runtime/runtime.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 namespace interpreter {
     19 
     20 class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
     21  public:
     22   InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
     23                        OperandScale operand_scale);
     24   ~InterpreterAssembler();
     25 
     26   // Returns the 32-bit unsigned count immediate for bytecode operand
     27   // |operand_index| in the current bytecode.
     28   compiler::Node* BytecodeOperandCount(int operand_index);
     29   // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
     30   // in the current bytecode.
     31   compiler::Node* BytecodeOperandFlag(int operand_index);
     32   // Returns the 32-bit zero-extended index immediate for bytecode operand
     33   // |operand_index| in the current bytecode.
     34   compiler::Node* BytecodeOperandIdxInt32(int operand_index);
     35   // Returns the word zero-extended index immediate for bytecode operand
     36   // |operand_index| in the current bytecode.
     37   compiler::Node* BytecodeOperandIdx(int operand_index);
     38   // Returns the smi index immediate for bytecode operand |operand_index|
     39   // in the current bytecode.
     40   compiler::Node* BytecodeOperandIdxSmi(int operand_index);
     41   // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
     42   // in the current bytecode.
     43   compiler::Node* BytecodeOperandUImm(int operand_index);
     44   // Returns the word-size unsigned immediate for bytecode operand
     45   // |operand_index| in the current bytecode.
     46   compiler::Node* BytecodeOperandUImmWord(int operand_index);
     47   // Returns the unsigned smi immediate for bytecode operand |operand_index| in
     48   // the current bytecode.
     49   compiler::Node* BytecodeOperandUImmSmi(int operand_index);
     50   // Returns the 32-bit signed immediate for bytecode operand |operand_index|
     51   // in the current bytecode.
     52   compiler::Node* BytecodeOperandImm(int operand_index);
     53   // Returns the word-size signed immediate for bytecode operand |operand_index|
     54   // in the current bytecode.
     55   compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
     56   // Returns the smi immediate for bytecode operand |operand_index| in the
     57   // current bytecode.
     58   compiler::Node* BytecodeOperandImmSmi(int operand_index);
     59   // Returns the 32-bit unsigned runtime id immediate for bytecode operand
     60   // |operand_index| in the current bytecode.
     61   compiler::Node* BytecodeOperandRuntimeId(int operand_index);
     62   // Returns the 32-bit unsigned native context index immediate for bytecode
     63   // operand |operand_index| in the current bytecode.
     64   compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
     65   // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
     66   // |operand_index| in the current bytecode.
     67   compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
     68 
     69   // Accumulator.
     70   compiler::Node* GetAccumulator();
     71   void SetAccumulator(compiler::Node* value);
     72 
     73   // Context.
     74   compiler::Node* GetContext();
     75   void SetContext(compiler::Node* value);
     76 
     77   // Context at |depth| in the context chain starting at |context|.
     78   compiler::Node* GetContextAtDepth(compiler::Node* context,
     79                                     compiler::Node* depth);
     80 
     81   // Goto the given |target| if the context chain starting at |context| has any
     82   // extensions up to the given |depth|.
     83   void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
     84                                           compiler::Node* depth, Label* target);
     85 
     86   // A RegListNodePair provides an abstraction over lists of registers.
     87   class RegListNodePair {
     88    public:
     89     RegListNodePair(Node* base_reg_location, Node* reg_count)
     90         : base_reg_location_(base_reg_location), reg_count_(reg_count) {}
     91 
     92     compiler::Node* reg_count() const { return reg_count_; }
     93     compiler::Node* base_reg_location() const { return base_reg_location_; }
     94 
     95    private:
     96     compiler::Node* base_reg_location_;
     97     compiler::Node* reg_count_;
     98   };
     99 
    100   // Backup/restore register file to/from a fixed array of the correct length.
    101   // There is an asymmetry between suspend/export and resume/import.
    102   // - Suspend copies arguments and registers to the generator.
    103   // - Resume copies only the registers from the generator, the arguments
    104   //   are copied by the ResumeGenerator trampoline.
    105   compiler::Node* ExportParametersAndRegisterFile(
    106       TNode<FixedArray> array, const RegListNodePair& registers,
    107       TNode<Int32T> formal_parameter_count);
    108   compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
    109                                      const RegListNodePair& registers,
    110                                      TNode<Int32T> formal_parameter_count);
    111 
    112   // Loads from and stores to the interpreter register file.
    113   compiler::Node* LoadRegister(Register reg);
    114   compiler::Node* LoadAndUntagRegister(Register reg);
    115   compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
    116   std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
    117       int operand_index);
    118   void StoreRegister(compiler::Node* value, Register reg);
    119   void StoreAndTagRegister(compiler::Node* value, Register reg);
    120   void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
    121   void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
    122                                        compiler::Node* value2,
    123                                        int operand_index);
    124   void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
    125                                          compiler::Node* value2,
    126                                          compiler::Node* value3,
    127                                          int operand_index);
    128 
    129   RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
    130   Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
    131                                      int index);
    132   Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
    133                                        int index);
    134 
    135   // Load constant at the index specified in operand |operand_index| from the
    136   // constant pool.
    137   compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
    138   // Load and untag constant at the index specified in operand |operand_index|
    139   // from the constant pool.
    140   compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
    141       int operand_index);
    142   // Load constant at |index| in the constant pool.
    143   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
    144   // Load and untag constant at |index| in the constant pool.
    145   compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
    146 
    147   // Load the FeedbackVector for the current function.
    148   compiler::TNode<FeedbackVector> LoadFeedbackVector();
    149 
    150   // Increment the call count for a CALL_IC or construct call.
    151   // The call count is located at feedback_vector[slot_id + 1].
    152   void IncrementCallCount(compiler::Node* feedback_vector,
    153                           compiler::Node* slot_id);
    154 
    155   // Collect the callable |target| feedback for either a CALL_IC or
    156   // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
    157   void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
    158                                compiler::Node* feedback_vector,
    159                                compiler::Node* slot_id);
    160 
    161   // Collect CALL_IC feedback for |target| function in the
    162   // |feedback_vector| at |slot_id|, and the call counts in
    163   // the |feedback_vector| at |slot_id+1|.
    164   void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
    165                            compiler::Node* feedback_vector,
    166                            compiler::Node* slot_id);
    167 
    168   // Call JSFunction or Callable |function| with |args| arguments, possibly
    169   // including the receiver depending on |receiver_mode|. After the call returns
    170   // directly dispatches to the next bytecode.
    171   void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
    172                          const RegListNodePair& args,
    173                          ConvertReceiverMode receiver_mode);
    174 
    175   // Call JSFunction or Callable |function| with |arg_count| arguments (not
    176   // including receiver) passed as |args|, possibly including the receiver
    177   // depending on |receiver_mode|. After the call returns directly dispatches to
    178   // the next bytecode.
    179   template <class... TArgs>
    180   void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
    181                          ConvertReceiverMode receiver_mode, TArgs... args);
    182 
    183   // Call JSFunction or Callable |function| with |args|
    184   // arguments (not including receiver), and the final argument being spread.
    185   // After the call returns directly dispatches to the next bytecode.
    186   void CallJSWithSpreadAndDispatch(compiler::Node* function,
    187                                    compiler::Node* context,
    188                                    const RegListNodePair& args,
    189                                    compiler::Node* slot_id,
    190                                    compiler::Node* feedback_vector);
    191 
    192   // Call constructor |target| with |args| arguments (not including receiver).
    193   // The |new_target| is the same as the |target| for the new keyword, but
    194   // differs for the super keyword.
    195   compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
    196                             compiler::Node* new_target,
    197                             const RegListNodePair& args,
    198                             compiler::Node* slot_id,
    199                             compiler::Node* feedback_vector);
    200 
    201   // Call constructor |target| with |args| arguments (not including
    202   // receiver). The last argument is always a spread. The |new_target| is the
    203   // same as the |target| for the new keyword, but differs for the super
    204   // keyword.
    205   compiler::Node* ConstructWithSpread(compiler::Node* target,
    206                                       compiler::Node* context,
    207                                       compiler::Node* new_target,
    208                                       const RegListNodePair& args,
    209                                       compiler::Node* slot_id,
    210                                       compiler::Node* feedback_vector);
    211 
    212   // Call runtime function with |args| arguments which will return |return_size|
    213   // number of values.
    214   compiler::Node* CallRuntimeN(compiler::Node* function_id,
    215                                compiler::Node* context,
    216                                const RegListNodePair& args,
    217                                int return_size = 1);
    218 
    219   // Jump forward relative to the current bytecode by the |jump_offset|.
    220   compiler::Node* Jump(compiler::Node* jump_offset);
    221 
    222   // Jump backward relative to the current bytecode by the |jump_offset|.
    223   compiler::Node* JumpBackward(compiler::Node* jump_offset);
    224 
    225   // Jump forward relative to the current bytecode by |jump_offset| if the
    226   // word values |lhs| and |rhs| are equal.
    227   void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
    228                        compiler::Node* jump_offset);
    229 
    230   // Jump forward relative to the current bytecode by |jump_offset| if the
    231   // word values |lhs| and |rhs| are not equal.
    232   void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
    233                           compiler::Node* jump_offset);
    234 
    235   // Updates the profiler interrupt budget for a return.
    236   void UpdateInterruptBudgetOnReturn();
    237 
    238   // Returns the OSR nesting level from the bytecode header.
    239   compiler::Node* LoadOSRNestingLevel();
    240 
    241   // Dispatch to the bytecode.
    242   compiler::Node* Dispatch();
    243 
    244   // Dispatch bytecode as wide operand variant.
    245   void DispatchWide(OperandScale operand_scale);
    246 
    247   // Dispatch to |target_bytecode| at |new_bytecode_offset|.
    248   // |target_bytecode| should be equivalent to loading from the offset.
    249   compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
    250                                      compiler::Node* new_bytecode_offset);
    251 
    252   // Abort with the given abort reason.
    253   void Abort(AbortReason abort_reason);
    254   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
    255                            AbortReason abort_reason);
    256   // Abort if |register_count| is invalid for given register file array.
    257   void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
    258                                    compiler::Node* formal_parameter_count,
    259                                    compiler::Node* register_count);
    260 
    261   // Dispatch to frame dropper trampoline if necessary.
    262   void MaybeDropFrames(compiler::Node* context);
    263 
    264   // Returns the offset from the BytecodeArrayPointer of the current bytecode.
    265   compiler::Node* BytecodeOffset();
    266 
    267  protected:
    268   Bytecode bytecode() const { return bytecode_; }
    269   static bool TargetSupportsUnalignedAccess();
    270 
    271   void ToNumberOrNumeric(Object::Conversion mode);
    272 
    273   // Lazily deserializes the current bytecode's handler and tail-calls into it.
    274   void DeserializeLazyAndDispatch();
    275 
    276  private:
    277   // Returns a tagged pointer to the current function's BytecodeArray object.
    278   compiler::Node* BytecodeArrayTaggedPointer();
    279 
    280   // Returns a raw pointer to first entry in the interpreter dispatch table.
    281   compiler::Node* DispatchTableRawPointer();
    282 
    283   // Returns the accumulator value without checking whether bytecode
    284   // uses it. This is intended to be used only in dispatch and in
    285   // tracing as these need to bypass accumulator use validity checks.
    286   compiler::Node* GetAccumulatorUnchecked();
    287 
    288   // Returns the frame pointer for the interpreted frame of the function being
    289   // interpreted.
    290   compiler::Node* GetInterpretedFramePointer();
    291 
    292   // Operations on registers.
    293   compiler::Node* RegisterLocation(Register reg);
    294   compiler::Node* RegisterLocation(compiler::Node* reg_index);
    295   compiler::Node* NextRegister(compiler::Node* reg_index);
    296   compiler::Node* LoadRegister(Node* reg_index);
    297   void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
    298 
    299   // Saves and restores interpreter bytecode offset to the interpreter stack
    300   // frame when performing a call.
    301   void CallPrologue();
    302   void CallEpilogue();
    303 
    304   // Increment the dispatch counter for the (current, next) bytecode pair.
    305   void TraceBytecodeDispatch(compiler::Node* target_index);
    306 
    307   // Traces the current bytecode by calling |function_id|.
    308   void TraceBytecode(Runtime::FunctionId function_id);
    309 
    310   // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
    311   // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
    312   // the interrupt budget is decremented, otherwise it is incremented.
    313   void UpdateInterruptBudget(compiler::Node* weight, bool backward);
    314 
    315   // Returns the offset of register |index| relative to RegisterFilePointer().
    316   compiler::Node* RegisterFrameOffset(compiler::Node* index);
    317 
    318   // Returns the offset of an operand relative to the current bytecode offset.
    319   compiler::Node* OperandOffset(int operand_index);
    320 
    321   // Returns a value built from an sequence of bytes in the bytecode
    322   // array starting at |relative_offset| from the current bytecode.
    323   // The |result_type| determines the size and signedness.  of the
    324   // value read. This method should only be used on architectures that
    325   // do not support unaligned memory accesses.
    326   compiler::Node* BytecodeOperandReadUnaligned(
    327       int relative_offset, MachineType result_type,
    328       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    329 
    330   // Returns zero- or sign-extended to word32 value of the operand.
    331   compiler::Node* BytecodeOperandUnsignedByte(
    332       int operand_index,
    333       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    334   compiler::Node* BytecodeOperandSignedByte(
    335       int operand_index,
    336       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    337   compiler::Node* BytecodeOperandUnsignedShort(
    338       int operand_index,
    339       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    340   compiler::Node* BytecodeOperandSignedShort(
    341       int operand_index,
    342       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    343   compiler::Node* BytecodeOperandUnsignedQuad(
    344       int operand_index,
    345       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    346   compiler::Node* BytecodeOperandSignedQuad(
    347       int operand_index,
    348       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    349 
    350   // Returns zero- or sign-extended to word32 value of the operand of
    351   // given size.
    352   compiler::Node* BytecodeSignedOperand(
    353       int operand_index, OperandSize operand_size,
    354       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    355   compiler::Node* BytecodeUnsignedOperand(
    356       int operand_index, OperandSize operand_size,
    357       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    358 
    359   // Returns the word-size sign-extended register index for bytecode operand
    360   // |operand_index| in the current bytecode. Value is not poisoned on
    361   // speculation since the value loaded from the register is poisoned instead.
    362   compiler::Node* BytecodeOperandReg(
    363       int operand_index,
    364       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    365 
    366   // Returns the word zero-extended index immediate for bytecode operand
    367   // |operand_index| in the current bytecode for use when loading a .
    368   compiler::Node* BytecodeOperandConstantPoolIdx(
    369       int operand_index,
    370       LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
    371 
    372   // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
    373   // then jump backward (subtract the offset), otherwise jump forward (add the
    374   // offset). Helper function for Jump and JumpBackward.
    375   compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
    376 
    377   // Jump forward relative to the current bytecode by |jump_offset| if the
    378   // |condition| is true. Helper function for JumpIfWordEqual and
    379   // JumpIfWordNotEqual.
    380   void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
    381 
    382   // Save the bytecode offset to the interpreter frame.
    383   void SaveBytecodeOffset();
    384   // Reload the bytecode offset from the interpreter frame.
    385   Node* ReloadBytecodeOffset();
    386 
    387   // Updates and returns BytecodeOffset() advanced by the current bytecode's
    388   // size. Traces the exit of the current bytecode.
    389   compiler::Node* Advance();
    390 
    391   // Updates and returns BytecodeOffset() advanced by delta bytecodes.
    392   // Traces the exit of the current bytecode.
    393   compiler::Node* Advance(int delta);
    394   compiler::Node* Advance(compiler::Node* delta, bool backward = false);
    395 
    396   // Load the bytecode at |bytecode_offset|.
    397   compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
    398 
    399   // Look ahead for Star and inline it in a branch. Returns a new target
    400   // bytecode node for dispatch.
    401   compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
    402 
    403   // Build code for Star at the current BytecodeOffset() and Advance() to the
    404   // next dispatch offset.
    405   void InlineStar();
    406 
    407   // Dispatch to the bytecode handler with code offset |handler|.
    408   compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
    409                                             compiler::Node* bytecode_offset,
    410                                             compiler::Node* target_bytecode);
    411 
    412   // Dispatch to the bytecode handler with code entry point |handler_entry|.
    413   compiler::Node* DispatchToBytecodeHandlerEntry(
    414       compiler::Node* handler_entry, compiler::Node* bytecode_offset,
    415       compiler::Node* target_bytecode);
    416 
    417   int CurrentBytecodeSize() const;
    418 
    419   OperandScale operand_scale() const { return operand_scale_; }
    420 
    421   Bytecode bytecode_;
    422   OperandScale operand_scale_;
    423   CodeStubAssembler::Variable interpreted_frame_pointer_;
    424   CodeStubAssembler::Variable bytecode_array_;
    425   CodeStubAssembler::Variable bytecode_offset_;
    426   CodeStubAssembler::Variable dispatch_table_;
    427   CodeStubAssembler::Variable accumulator_;
    428   AccumulatorUse accumulator_use_;
    429   bool made_call_;
    430   bool reloaded_frame_ptr_;
    431   bool bytecode_array_valid_;
    432   bool disable_stack_check_across_call_;
    433   compiler::Node* stack_pointer_before_call_;
    434 
    435   DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
    436 };
    437 
    438 }  // namespace interpreter
    439 }  // namespace internal
    440 }  // namespace v8
    441 
    442 #endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
    443