Home | History | Annotate | Download | only in src
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2012 the V8 project authors. All rights reserved.
     34 
     35 #ifndef V8_ASSEMBLER_H_
     36 #define V8_ASSEMBLER_H_
     37 
     38 #include <forward_list>
     39 #include <iosfwd>
     40 #include <map>
     41 
     42 #include "src/allocation.h"
     43 #include "src/code-reference.h"
     44 #include "src/contexts.h"
     45 #include "src/deoptimize-reason.h"
     46 #include "src/double.h"
     47 #include "src/external-reference.h"
     48 #include "src/flags.h"
     49 #include "src/globals.h"
     50 #include "src/label.h"
     51 #include "src/objects.h"
     52 #include "src/register-configuration.h"
     53 #include "src/reglist.h"
     54 #include "src/reloc-info.h"
     55 
     56 namespace v8 {
     57 
     58 // Forward declarations.
     59 class ApiFunction;
     60 
     61 namespace internal {
     62 
     63 // Forward declarations.
     64 class EmbeddedData;
     65 class InstructionStream;
     66 class Isolate;
     67 class SCTableReference;
     68 class SourcePosition;
     69 class StatsCounter;
     70 
     71 // -----------------------------------------------------------------------------
     72 // Optimization for far-jmp like instructions that can be replaced by shorter.
     73 
     74 class JumpOptimizationInfo {
     75  public:
     76   bool is_collecting() const { return stage_ == kCollection; }
     77   bool is_optimizing() const { return stage_ == kOptimization; }
     78   void set_optimizing() { stage_ = kOptimization; }
     79 
     80   bool is_optimizable() const { return optimizable_; }
     81   void set_optimizable() { optimizable_ = true; }
     82 
     83   // Used to verify the instruction sequence is always the same in two stages.
     84   size_t hash_code() const { return hash_code_; }
     85   void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
     86 
     87   std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
     88 
     89  private:
     90   enum { kCollection, kOptimization } stage_ = kCollection;
     91   bool optimizable_ = false;
     92   std::vector<uint32_t> farjmp_bitmap_;
     93   size_t hash_code_ = 0u;
     94 };
     95 
     96 class HeapObjectRequest {
     97  public:
     98   explicit HeapObjectRequest(double heap_number, int offset = -1);
     99   explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
    100 
    101   enum Kind { kHeapNumber, kCodeStub };
    102   Kind kind() const { return kind_; }
    103 
    104   double heap_number() const {
    105     DCHECK_EQ(kind(), kHeapNumber);
    106     return value_.heap_number;
    107   }
    108 
    109   CodeStub* code_stub() const {
    110     DCHECK_EQ(kind(), kCodeStub);
    111     return value_.code_stub;
    112   }
    113 
    114   // The code buffer offset at the time of the request.
    115   int offset() const {
    116     DCHECK_GE(offset_, 0);
    117     return offset_;
    118   }
    119   void set_offset(int offset) {
    120     DCHECK_LT(offset_, 0);
    121     offset_ = offset;
    122     DCHECK_GE(offset_, 0);
    123   }
    124 
    125  private:
    126   Kind kind_;
    127 
    128   union {
    129     double heap_number;
    130     CodeStub* code_stub;
    131   } value_;
    132 
    133   int offset_;
    134 };
    135 
    136 // -----------------------------------------------------------------------------
    137 // Platform independent assembler base class.
    138 
    139 enum class CodeObjectRequired { kNo, kYes };
    140 
    141 struct V8_EXPORT_PRIVATE AssemblerOptions {
    142   // Recording reloc info for external references and off-heap targets is
    143   // needed whenever code is serialized, e.g. into the snapshot or as a WASM
    144   // module. This flag allows this reloc info to be disabled for code that
    145   // will not survive process destruction.
    146   bool record_reloc_info_for_serialization = true;
    147   // Recording reloc info can be disabled wholesale. This is needed when the
    148   // assembler is used on existing code directly (e.g. JumpTableAssembler)
    149   // without any buffer to hold reloc information.
    150   bool disable_reloc_info_for_patching = false;
    151   // Enables access to exrefs by computing a delta from the root array.
    152   // Only valid if code will not survive the process.
    153   bool enable_root_array_delta_access = false;
    154   // Enables specific assembler sequences only used for the simulator.
    155   bool enable_simulator_code = false;
    156   // Enables use of isolate-independent constants, indirected through the
    157   // root array.
    158   // (macro assembler feature).
    159   bool isolate_independent_code = false;
    160   // Enables the use of isolate-independent builtins through an off-heap
    161   // trampoline. (macro assembler feature).
    162   bool inline_offheap_trampolines = false;
    163   // On some platforms, all code is within a given range in the process,
    164   // and the start of this range is configured here.
    165   Address code_range_start = 0;
    166   // Enable pc-relative calls/jumps on platforms that support it. When setting
    167   // this flag, the code range must be small enough to fit all offsets into
    168   // the instruction immediates.
    169   bool use_pc_relative_calls_and_jumps = false;
    170 
    171   static AssemblerOptions Default(
    172       Isolate* isolate, bool explicitly_support_serialization = false);
    173 };
    174 
    175 class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
    176  public:
    177   AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
    178   virtual ~AssemblerBase();
    179 
    180   const AssemblerOptions& options() const { return options_; }
    181 
    182   bool emit_debug_code() const { return emit_debug_code_; }
    183   void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
    184 
    185   bool predictable_code_size() const { return predictable_code_size_; }
    186   void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
    187 
    188   uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
    189   void set_enabled_cpu_features(uint64_t features) {
    190     enabled_cpu_features_ = features;
    191   }
    192   // Features are usually enabled by CpuFeatureScope, which also asserts that
    193   // the features are supported before they are enabled.
    194   bool IsEnabled(CpuFeature f) {
    195     return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
    196   }
    197   void EnableCpuFeature(CpuFeature f) {
    198     enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
    199   }
    200 
    201   bool is_constant_pool_available() const {
    202     if (FLAG_enable_embedded_constant_pool) {
    203       return constant_pool_available_;
    204     } else {
    205       // Embedded constant pool not supported on this architecture.
    206       UNREACHABLE();
    207     }
    208   }
    209 
    210   JumpOptimizationInfo* jump_optimization_info() {
    211     return jump_optimization_info_;
    212   }
    213   void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
    214     jump_optimization_info_ = jump_opt;
    215   }
    216 
    217   // Overwrite a host NaN with a quiet target NaN.  Used by mksnapshot for
    218   // cross-snapshotting.
    219   static void QuietNaN(HeapObject* nan) { }
    220 
    221   int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
    222 
    223   // This function is called when code generation is aborted, so that
    224   // the assembler could clean up internal data structures.
    225   virtual void AbortedCodeGeneration() { }
    226 
    227   // Debugging
    228   void Print(Isolate* isolate);
    229 
    230   static const int kMinimalBufferSize = 4*KB;
    231 
    232   static void FlushICache(void* start, size_t size);
    233   static void FlushICache(Address start, size_t size) {
    234     return FlushICache(reinterpret_cast<void*>(start), size);
    235   }
    236 
    237   // Used to print the name of some special registers.
    238   static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
    239 
    240  protected:
    241   // Add 'target' to the {code_targets_} vector, if necessary, and return the
    242   // offset at which it is stored.
    243   int AddCodeTarget(Handle<Code> target);
    244   Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
    245   // Update to the code target at {code_target_index} to {target}.
    246   void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
    247   // Reserves space in the code target vector.
    248   void ReserveCodeTargetSpace(size_t num_of_code_targets) {
    249     code_targets_.reserve(num_of_code_targets);
    250   }
    251 
    252   // The buffer into which code and relocation info are generated. It could
    253   // either be owned by the assembler or be provided externally.
    254   byte* buffer_;
    255   int buffer_size_;
    256   bool own_buffer_;
    257   std::forward_list<HeapObjectRequest> heap_object_requests_;
    258   // The program counter, which points into the buffer above and moves forward.
    259   // TODO(jkummerow): This should probably have type {Address}.
    260   byte* pc_;
    261 
    262   void set_constant_pool_available(bool available) {
    263     if (FLAG_enable_embedded_constant_pool) {
    264       constant_pool_available_ = available;
    265     } else {
    266       // Embedded constant pool not supported on this architecture.
    267       UNREACHABLE();
    268     }
    269   }
    270 
    271   // {RequestHeapObject} records the need for a future heap number allocation or
    272   // code stub generation. After code assembly, each platform's
    273   // {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
    274   // objects and place them where they are expected (determined by the pc offset
    275   // associated with each request).
    276   void RequestHeapObject(HeapObjectRequest request);
    277 
    278  private:
    279   // Before we copy code into the code space, we sometimes cannot encode
    280   // call/jump code targets as we normally would, as the difference between the
    281   // instruction's location in the temporary buffer and the call target is not
    282   // guaranteed to fit in the instruction's offset field. We keep track of the
    283   // code handles we encounter in calls in this vector, and encode the index of
    284   // the code handle in the vector instead.
    285   std::vector<Handle<Code>> code_targets_;
    286 
    287   const AssemblerOptions options_;
    288   uint64_t enabled_cpu_features_;
    289   bool emit_debug_code_;
    290   bool predictable_code_size_;
    291 
    292   // Indicates whether the constant pool can be accessed, which is only possible
    293   // if the pp register points to the current code object's constant pool.
    294   bool constant_pool_available_;
    295 
    296   JumpOptimizationInfo* jump_optimization_info_;
    297 
    298   // Constant pool.
    299   friend class FrameAndConstantPoolScope;
    300   friend class ConstantPoolUnavailableScope;
    301 };
    302 
    303 // Avoids emitting debug code during the lifetime of this scope object.
    304 class DontEmitDebugCodeScope BASE_EMBEDDED {
    305  public:
    306   explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
    307       : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
    308     assembler_->set_emit_debug_code(false);
    309   }
    310   ~DontEmitDebugCodeScope() {
    311     assembler_->set_emit_debug_code(old_value_);
    312   }
    313  private:
    314   AssemblerBase* assembler_;
    315   bool old_value_;
    316 };
    317 
    318 
    319 // Avoids using instructions that vary in size in unpredictable ways between the
    320 // snapshot and the running VM.
    321 class PredictableCodeSizeScope {
    322  public:
    323   PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
    324   ~PredictableCodeSizeScope();
    325 
    326  private:
    327   AssemblerBase* const assembler_;
    328   int const expected_size_;
    329   int const start_offset_;
    330   bool const old_value_;
    331 };
    332 
    333 
    334 // Enable a specified feature within a scope.
    335 class CpuFeatureScope BASE_EMBEDDED {
    336  public:
    337   enum CheckPolicy {
    338     kCheckSupported,
    339     kDontCheckSupported,
    340   };
    341 
    342 #ifdef DEBUG
    343   CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
    344                   CheckPolicy check = kCheckSupported);
    345   ~CpuFeatureScope();
    346 
    347  private:
    348   AssemblerBase* assembler_;
    349   uint64_t old_enabled_;
    350 #else
    351   CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
    352                   CheckPolicy check = kCheckSupported) {}
    353   // Define a destructor to avoid unused variable warnings.
    354   ~CpuFeatureScope() {}
    355 #endif
    356 };
    357 
    358 
    359 // CpuFeatures keeps track of which features are supported by the target CPU.
    360 // Supported features must be enabled by a CpuFeatureScope before use.
    361 // Example:
    362 //   if (assembler->IsSupported(SSE3)) {
    363 //     CpuFeatureScope fscope(assembler, SSE3);
    364 //     // Generate code containing SSE3 instructions.
    365 //   } else {
    366 //     // Generate alternative code.
    367 //   }
    368 class CpuFeatures : public AllStatic {
    369  public:
    370   static void Probe(bool cross_compile) {
    371     STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
    372     if (initialized_) return;
    373     initialized_ = true;
    374     ProbeImpl(cross_compile);
    375   }
    376 
    377   static unsigned SupportedFeatures() {
    378     Probe(false);
    379     return supported_;
    380   }
    381 
    382   static bool IsSupported(CpuFeature f) {
    383     return (supported_ & (1u << f)) != 0;
    384   }
    385 
    386   static inline bool SupportsOptimizer();
    387 
    388   static inline bool SupportsWasmSimd128();
    389 
    390   static inline unsigned icache_line_size() {
    391     DCHECK_NE(icache_line_size_, 0);
    392     return icache_line_size_;
    393   }
    394 
    395   static inline unsigned dcache_line_size() {
    396     DCHECK_NE(dcache_line_size_, 0);
    397     return dcache_line_size_;
    398   }
    399 
    400   static void PrintTarget();
    401   static void PrintFeatures();
    402 
    403  private:
    404   friend class ExternalReference;
    405   friend class AssemblerBase;
    406   // Flush instruction cache.
    407   static void FlushICache(void* start, size_t size);
    408 
    409   // Platform-dependent implementation.
    410   static void ProbeImpl(bool cross_compile);
    411 
    412   static unsigned supported_;
    413   static unsigned icache_line_size_;
    414   static unsigned dcache_line_size_;
    415   static bool initialized_;
    416   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
    417 };
    418 
    419 // -----------------------------------------------------------------------------
    420 // Utility functions
    421 
    422 // Computes pow(x, y) with the special cases in the spec for Math.pow.
    423 double power_helper(Isolate* isolate, double x, double y);
    424 double power_double_int(double x, int y);
    425 double power_double_double(double x, double y);
    426 
    427 
    428 // -----------------------------------------------------------------------------
    429 // Constant pool support
    430 
    431 class ConstantPoolEntry {
    432  public:
    433   ConstantPoolEntry() {}
    434   ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
    435                     RelocInfo::Mode rmode = RelocInfo::NONE)
    436       : position_(position),
    437         merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
    438         value_(value),
    439         rmode_(rmode) {}
    440   ConstantPoolEntry(int position, Double value,
    441                     RelocInfo::Mode rmode = RelocInfo::NONE)
    442       : position_(position),
    443         merged_index_(SHARING_ALLOWED),
    444         value64_(value.AsUint64()),
    445         rmode_(rmode) {}
    446 
    447   int position() const { return position_; }
    448   bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
    449   bool is_merged() const { return merged_index_ >= 0; }
    450   int merged_index(void) const {
    451     DCHECK(is_merged());
    452     return merged_index_;
    453   }
    454   void set_merged_index(int index) {
    455     DCHECK(sharing_ok());
    456     merged_index_ = index;
    457     DCHECK(is_merged());
    458   }
    459   int offset(void) const {
    460     DCHECK_GE(merged_index_, 0);
    461     return merged_index_;
    462   }
    463   void set_offset(int offset) {
    464     DCHECK_GE(offset, 0);
    465     merged_index_ = offset;
    466   }
    467   intptr_t value() const { return value_; }
    468   uint64_t value64() const { return value64_; }
    469   RelocInfo::Mode rmode() const { return rmode_; }
    470 
    471   enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
    472 
    473   static int size(Type type) {
    474     return (type == INTPTR) ? kPointerSize : kDoubleSize;
    475   }
    476 
    477   enum Access { REGULAR, OVERFLOWED };
    478 
    479  private:
    480   int position_;
    481   int merged_index_;
    482   union {
    483     intptr_t value_;
    484     uint64_t value64_;
    485   };
    486   // TODO(leszeks): The way we use this, it could probably be packed into
    487   // merged_index_ if size is a concern.
    488   RelocInfo::Mode rmode_;
    489   enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
    490 };
    491 
    492 
    493 // -----------------------------------------------------------------------------
    494 // Embedded constant pool support
    495 
    496 class ConstantPoolBuilder BASE_EMBEDDED {
    497  public:
    498   ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);
    499 
    500   // Add pointer-sized constant to the embedded constant pool
    501   ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
    502                                      bool sharing_ok) {
    503     ConstantPoolEntry entry(position, value, sharing_ok);
    504     return AddEntry(entry, ConstantPoolEntry::INTPTR);
    505   }
    506 
    507   // Add double constant to the embedded constant pool
    508   ConstantPoolEntry::Access AddEntry(int position, Double value) {
    509     ConstantPoolEntry entry(position, value);
    510     return AddEntry(entry, ConstantPoolEntry::DOUBLE);
    511   }
    512 
    513   // Add double constant to the embedded constant pool
    514   ConstantPoolEntry::Access AddEntry(int position, double value) {
    515     return AddEntry(position, Double(value));
    516   }
    517 
    518   // Previews the access type required for the next new entry to be added.
    519   ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
    520 
    521   bool IsEmpty() {
    522     return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
    523            info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
    524            info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
    525            info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
    526   }
    527 
    528   // Emit the constant pool.  Invoke only after all entries have been
    529   // added and all instructions have been emitted.
    530   // Returns position of the emitted pool (zero implies no constant pool).
    531   int Emit(Assembler* assm);
    532 
    533   // Returns the label associated with the start of the constant pool.
    534   // Linking to this label in the function prologue may provide an
    535   // efficient means of constant pool pointer register initialization
    536   // on some architectures.
    537   inline Label* EmittedPosition() { return &emitted_label_; }
    538 
    539  private:
    540   ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
    541                                      ConstantPoolEntry::Type type);
    542   void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
    543   void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
    544                  ConstantPoolEntry::Type type);
    545 
    546   struct PerTypeEntryInfo {
    547     PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
    548     bool overflow() const {
    549       return (overflow_start >= 0 &&
    550               overflow_start < static_cast<int>(entries.size()));
    551     }
    552     int regular_reach_bits;
    553     int regular_count;
    554     int overflow_start;
    555     std::vector<ConstantPoolEntry> entries;
    556     std::vector<ConstantPoolEntry> shared_entries;
    557   };
    558 
    559   Label emitted_label_;  // Records pc_offset of emitted pool
    560   PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
    561 };
    562 
    563 // Base type for CPU Registers.
    564 //
    565 // 1) We would prefer to use an enum for registers, but enum values are
    566 // assignment-compatible with int, which has caused code-generation bugs.
    567 //
    568 // 2) By not using an enum, we are possibly preventing the compiler from
    569 // doing certain constant folds, which may significantly reduce the
    570 // code generated for some assembly instructions (because they boil down
    571 // to a few constants). If this is a problem, we could change the code
    572 // such that we use an enum in optimized mode, and the class in debug
    573 // mode. This way we get the compile-time error checking in debug mode
    574 // and best performance in optimized code.
    575 template <typename SubType, int kAfterLastRegister>
    576 class RegisterBase {
    577   // Internal enum class; used for calling constexpr methods, where we need to
    578   // pass an integral type as template parameter.
    579   enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
    580 
    581  public:
    582   static constexpr int kCode_no_reg = -1;
    583   static constexpr int kNumRegisters = kAfterLastRegister;
    584 
    585   static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
    586 
    587   template <int code>
    588   static constexpr SubType from_code() {
    589     static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
    590     return SubType{code};
    591   }
    592 
    593   constexpr operator RegisterCode() const {
    594     return static_cast<RegisterCode>(reg_code_);
    595   }
    596 
    597   template <RegisterCode reg_code>
    598   static constexpr int code() {
    599     static_assert(
    600         reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
    601         "must be valid reg");
    602     return static_cast<int>(reg_code);
    603   }
    604 
    605   template <RegisterCode reg_code>
    606   static constexpr RegList bit() {
    607     return RegList{1} << code<reg_code>();
    608   }
    609 
    610   static SubType from_code(int code) {
    611     DCHECK_LE(0, code);
    612     DCHECK_GT(kNumRegisters, code);
    613     return SubType{code};
    614   }
    615 
    616   // Constexpr version (pass registers as template parameters).
    617   template <RegisterCode... reg_codes>
    618   static constexpr RegList ListOf() {
    619     return CombineRegLists(RegisterBase::bit<reg_codes>()...);
    620   }
    621 
    622   // Non-constexpr version (pass registers as method parameters).
    623   template <typename... Register>
    624   static RegList ListOf(Register... regs) {
    625     return CombineRegLists(regs.bit()...);
    626   }
    627 
    628   bool is_valid() const { return reg_code_ != kCode_no_reg; }
    629 
    630   int code() const {
    631     DCHECK(is_valid());
    632     return reg_code_;
    633   }
    634 
    635   RegList bit() const { return RegList{1} << code(); }
    636 
    637   inline constexpr bool operator==(SubType other) const {
    638     return reg_code_ == other.reg_code_;
    639   }
    640   inline constexpr bool operator!=(SubType other) const {
    641     return reg_code_ != other.reg_code_;
    642   }
    643 
    644  protected:
    645   explicit constexpr RegisterBase(int code) : reg_code_(code) {}
    646   int reg_code_;
    647 };
    648 
    649 template <typename SubType, int kAfterLastRegister>
    650 inline std::ostream& operator<<(std::ostream& os,
    651                                 RegisterBase<SubType, kAfterLastRegister> reg) {
    652   return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
    653 }
    654 
    655 }  // namespace internal
    656 }  // namespace v8
    657 #endif  // V8_ASSEMBLER_H_
    658