Home | History | Annotate | Download | only in interpreter
      1 // Copyright 2016 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/interpreter/bytecode-register-optimizer.h"
      6 
      7 namespace v8 {
      8 namespace internal {
      9 namespace interpreter {
     10 
     11 const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
     12 
     13 // A class for tracking the state of a register. This class tracks
     14 // which equivalence set a register is a member of and also whether a
     15 // register is materialized in the bytecode stream.
     16 class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
     17  public:
     18   RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
     19       : register_(reg),
     20         equivalence_id_(equivalence_id),
     21         materialized_(materialized),
     22         next_(this),
     23         prev_(this) {}
     24 
     25   void AddToEquivalenceSetOf(RegisterInfo* info);
     26   void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
     27   bool IsOnlyMemberOfEquivalenceSet() const;
     28   bool IsOnlyMaterializedMemberOfEquivalenceSet() const;
     29   bool IsInSameEquivalenceSet(RegisterInfo* info) const;
     30 
     31   // Get a member of this register's equivalence set that is
     32   // materialized. The materialized equivalent will be this register
     33   // if it is materialized. Returns nullptr if no materialized
     34   // equivalent exists.
     35   RegisterInfo* GetMaterializedEquivalent();
     36 
     37   // Get a member of this register's equivalence set that is
     38   // materialized and not register |reg|. The materialized equivalent
     39   // will be this register if it is materialized. Returns nullptr if
     40   // no materialized equivalent exists.
     41   RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
     42 
     43   // Get a member of this register's equivalence set that is intended
     44   // to be materialized in place of this register (which is currently
     45   // materialized). The best candidate is deemed to be the register
     46   // with the lowest index as this permits temporary registers to be
     47   // removed from the bytecode stream. Returns nullptr if no candidate
     48   // exists.
     49   RegisterInfo* GetEquivalentToMaterialize();
     50 
     51   // Get an equivalent register. Returns this if none exists.
     52   RegisterInfo* GetEquivalent();
     53 
     54   Register register_value() const { return register_; }
     55   bool materialized() const { return materialized_; }
     56   void set_materialized(bool materialized) { materialized_ = materialized; }
     57   void set_equivalence_id(uint32_t equivalence_id) {
     58     equivalence_id_ = equivalence_id;
     59   }
     60   uint32_t equivalence_id() const { return equivalence_id_; }
     61 
     62  private:
     63   Register register_;
     64   uint32_t equivalence_id_;
     65   bool materialized_;
     66 
     67   // Equivalence set pointers.
     68   RegisterInfo* next_;
     69   RegisterInfo* prev_;
     70 
     71   DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
     72 };
     73 
     74 void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
     75     RegisterInfo* info) {
     76   DCHECK_NE(kInvalidEquivalenceId, info->equivalence_id());
     77   // Fix old list
     78   next_->prev_ = prev_;
     79   prev_->next_ = next_;
     80   // Add to new list.
     81   next_ = info->next_;
     82   prev_ = info;
     83   prev_->next_ = this;
     84   next_->prev_ = this;
     85   set_equivalence_id(info->equivalence_id());
     86   set_materialized(false);
     87 }
     88 
     89 void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet(
     90     uint32_t equivalence_id, bool materialized) {
     91   next_->prev_ = prev_;
     92   prev_->next_ = next_;
     93   next_ = prev_ = this;
     94   equivalence_id_ = equivalence_id;
     95   materialized_ = materialized;
     96 }
     97 
     98 bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet()
     99     const {
    100   return this->next_ == this;
    101 }
    102 
    103 bool BytecodeRegisterOptimizer::RegisterInfo::
    104     IsOnlyMaterializedMemberOfEquivalenceSet() const {
    105   DCHECK(materialized());
    106 
    107   const RegisterInfo* visitor = this->next_;
    108   while (visitor != this) {
    109     if (visitor->materialized()) {
    110       return false;
    111     }
    112     visitor = visitor->next_;
    113   }
    114   return true;
    115 }
    116 
    117 bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet(
    118     RegisterInfo* info) const {
    119   return equivalence_id() == info->equivalence_id();
    120 }
    121 
    122 BytecodeRegisterOptimizer::RegisterInfo*
    123 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
    124   RegisterInfo* visitor = this;
    125   do {
    126     if (visitor->materialized()) {
    127       return visitor;
    128     }
    129     visitor = visitor->next_;
    130   } while (visitor != this);
    131 
    132   return nullptr;
    133 }
    134 
    135 BytecodeRegisterOptimizer::RegisterInfo*
    136 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan(
    137     Register reg) {
    138   RegisterInfo* visitor = this;
    139   do {
    140     if (visitor->materialized() && visitor->register_value() != reg) {
    141       return visitor;
    142     }
    143     visitor = visitor->next_;
    144   } while (visitor != this);
    145 
    146   return nullptr;
    147 }
    148 
    149 BytecodeRegisterOptimizer::RegisterInfo*
    150 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
    151   DCHECK(this->materialized());
    152   RegisterInfo* visitor = this->next_;
    153   RegisterInfo* best_info = nullptr;
    154   while (visitor != this) {
    155     if (visitor->materialized()) {
    156       return nullptr;
    157     }
    158     if (best_info == nullptr ||
    159         visitor->register_value() < best_info->register_value()) {
    160       best_info = visitor;
    161     }
    162     visitor = visitor->next_;
    163   }
    164   return best_info;
    165 }
    166 
    167 BytecodeRegisterOptimizer::RegisterInfo*
    168 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
    169   return next_;
    170 }
    171 
    172 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
    173     Zone* zone, TemporaryRegisterAllocator* register_allocator,
    174     int parameter_count, BytecodePipelineStage* next_stage)
    175     : accumulator_(Register::virtual_accumulator()),
    176       temporary_base_(register_allocator->allocation_base()),
    177       register_info_table_(zone),
    178       equivalence_id_(0),
    179       next_stage_(next_stage),
    180       flush_required_(false),
    181       zone_(zone) {
    182   register_allocator->set_observer(this);
    183 
    184   // Calculate offset so register index values can be mapped into
    185   // a vector of register metadata.
    186   if (parameter_count != 0) {
    187     register_info_table_offset_ =
    188         -Register::FromParameterIndex(0, parameter_count).index();
    189   } else {
    190     // TODO(oth): This path shouldn't be necessary in bytecode generated
    191     // from Javascript, but a set of tests do not include the JS receiver.
    192     register_info_table_offset_ = -accumulator_.index();
    193   }
    194 
    195   // Initialize register map for parameters, locals, and the
    196   // accumulator.
    197   register_info_table_.resize(register_info_table_offset_ +
    198                               static_cast<size_t>(temporary_base_.index()));
    199   for (size_t i = 0; i < register_info_table_.size(); ++i) {
    200     register_info_table_[i] = new (zone) RegisterInfo(
    201         RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
    202     DCHECK_EQ(register_info_table_[i]->register_value().index(),
    203               RegisterFromRegisterInfoTableIndex(i).index());
    204   }
    205   accumulator_info_ = GetRegisterInfo(accumulator_);
    206   DCHECK(accumulator_info_->register_value() == accumulator_);
    207 }
    208 
    209 // override
    210 Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
    211     int fixed_register_count, int parameter_count,
    212     Handle<FixedArray> handler_table) {
    213   FlushState();
    214   return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
    215                                       handler_table);
    216 }
    217 
    218 // override
    219 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
    220   //
    221   // Transfers with observable registers as the destination will be
    222   // immediately materialized so the source position information will
    223   // be ordered correctly.
    224   //
    225   // Transfers without observable destination registers will initially
    226   // be emitted as Nop's with the source position. They may, or may
    227   // not, be materialized by the optimizer. However, the source
    228   // position is not lost and being attached to a Nop is fine as the
    229   // destination register is not observable in the debugger.
    230   //
    231   switch (node->bytecode()) {
    232     case Bytecode::kLdar: {
    233       DoLdar(node);
    234       return;
    235     }
    236     case Bytecode::kStar: {
    237       DoStar(node);
    238       return;
    239     }
    240     case Bytecode::kMov: {
    241       DoMov(node);
    242       return;
    243     }
    244     default:
    245       break;
    246   }
    247 
    248   if (Bytecodes::IsJump(node->bytecode()) ||
    249       node->bytecode() == Bytecode::kDebugger ||
    250       node->bytecode() == Bytecode::kSuspendGenerator) {
    251     // All state must be flushed before emitting
    252     // - a jump (due to how bytecode offsets for jumps are evaluated),
    253     // - a call to the debugger (as it can manipulate locals and parameters),
    254     // - a generator suspend (as this involves saving all registers).
    255     FlushState();
    256   }
    257 
    258   PrepareOperands(node);
    259   WriteToNextStage(node);
    260 }
    261 
    262 // override
    263 void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
    264                                           BytecodeLabel* label) {
    265   FlushState();
    266   next_stage_->WriteJump(node, label);
    267 }
    268 
    269 // override
    270 void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
    271   FlushState();
    272   next_stage_->BindLabel(label);
    273 }
    274 
    275 // override
    276 void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
    277                                           BytecodeLabel* label) {
    278   // There is no need to flush here, it will have been flushed when |target|
    279   // was bound.
    280   next_stage_->BindLabel(target, label);
    281 }
    282 
    283 void BytecodeRegisterOptimizer::FlushState() {
    284   if (!flush_required_) {
    285     return;
    286   }
    287 
    288   // Materialize all live registers and break equivalences.
    289   size_t count = register_info_table_.size();
    290   for (size_t i = 0; i < count; ++i) {
    291     RegisterInfo* reg_info = register_info_table_[i];
    292     if (reg_info->materialized()) {
    293       // Walk equivalents of materialized registers, materializing
    294       // each equivalent register as necessary and placing in their
    295       // own equivalence set.
    296       RegisterInfo* equivalent;
    297       while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
    298         if (!equivalent->materialized()) {
    299           OutputRegisterTransfer(reg_info, equivalent);
    300         }
    301         equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
    302       }
    303     }
    304   }
    305 
    306   flush_required_ = false;
    307 }
    308 
    309 void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
    310   next_stage_->Write(node);
    311 }
    312 
    313 void BytecodeRegisterOptimizer::WriteToNextStage(
    314     BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
    315   if (source_info.is_valid()) {
    316     node->source_info().Clone(source_info);
    317   }
    318   next_stage_->Write(node);
    319 }
    320 
    321 void BytecodeRegisterOptimizer::OutputRegisterTransfer(
    322     RegisterInfo* input_info, RegisterInfo* output_info,
    323     const BytecodeSourceInfo& source_info) {
    324   Register input = input_info->register_value();
    325   Register output = output_info->register_value();
    326   DCHECK_NE(input.index(), output.index());
    327 
    328   if (input == accumulator_) {
    329     uint32_t operand = static_cast<uint32_t>(output.ToOperand());
    330     BytecodeNode node(Bytecode::kStar, operand);
    331     WriteToNextStage(&node, source_info);
    332   } else if (output == accumulator_) {
    333     uint32_t operand = static_cast<uint32_t>(input.ToOperand());
    334     BytecodeNode node(Bytecode::kLdar, operand);
    335     WriteToNextStage(&node, source_info);
    336   } else {
    337     uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
    338     uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
    339     BytecodeNode node(Bytecode::kMov, operand0, operand1);
    340     WriteToNextStage(&node, source_info);
    341   }
    342   output_info->set_materialized(true);
    343 }
    344 
    345 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent(
    346     RegisterInfo* info) {
    347   DCHECK(info->materialized());
    348   RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize();
    349   if (unmaterialized) {
    350     OutputRegisterTransfer(info, unmaterialized);
    351   }
    352 }
    353 
    354 BytecodeRegisterOptimizer::RegisterInfo*
    355 BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
    356   return info->materialized() ? info : info->GetMaterializedEquivalent();
    357 }
    358 
    359 BytecodeRegisterOptimizer::RegisterInfo*
    360 BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
    361     RegisterInfo* info) {
    362   if (info->materialized()) {
    363     return info;
    364   }
    365 
    366   RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_);
    367   if (result == nullptr) {
    368     Materialize(info);
    369     result = info;
    370   }
    371   DCHECK(result->register_value() != accumulator_);
    372   return result;
    373 }
    374 
    375 void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
    376   if (!info->materialized()) {
    377     RegisterInfo* materialized = info->GetMaterializedEquivalent();
    378     OutputRegisterTransfer(materialized, info);
    379   }
    380 }
    381 
    382 void BytecodeRegisterOptimizer::AddToEquivalenceSet(
    383     RegisterInfo* set_member, RegisterInfo* non_set_member) {
    384   non_set_member->AddToEquivalenceSetOf(set_member);
    385   // Flushing is only required when two or more registers are placed
    386   // in the same equivalence set.
    387   flush_required_ = true;
    388 }
    389 
    390 void BytecodeRegisterOptimizer::RegisterTransfer(
    391     RegisterInfo* input_info, RegisterInfo* output_info,
    392     const BytecodeSourceInfo& source_info) {
    393   // Materialize an alternate in the equivalence set that
    394   // |output_info| is leaving.
    395   if (output_info->materialized()) {
    396     CreateMaterializedEquivalent(output_info);
    397   }
    398 
    399   // Add |output_info| to new equivalence set.
    400   if (!output_info->IsInSameEquivalenceSet(input_info)) {
    401     AddToEquivalenceSet(input_info, output_info);
    402   }
    403 
    404   bool output_is_observable =
    405       RegisterIsObservable(output_info->register_value());
    406   if (output_is_observable) {
    407     // Force store to be emitted when register is observable.
    408     output_info->set_materialized(false);
    409     RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
    410     OutputRegisterTransfer(materialized_info, output_info, source_info);
    411   } else if (source_info.is_valid()) {
    412     // Emit a placeholder nop to maintain source position info.
    413     EmitNopForSourceInfo(source_info);
    414   }
    415 }
    416 
    417 void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
    418     const BytecodeSourceInfo& source_info) const {
    419   DCHECK(source_info.is_valid());
    420   BytecodeNode nop(Bytecode::kNop);
    421   nop.source_info().Clone(source_info);
    422   WriteToNextStage(&nop);
    423 }
    424 
    425 void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
    426   Register input = GetRegisterInputOperand(
    427       0, node->bytecode(), node->operands(), node->operand_count());
    428   RegisterInfo* input_info = GetRegisterInfo(input);
    429   RegisterTransfer(input_info, accumulator_info_, node->source_info());
    430 }
    431 
    432 void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
    433   Register input = GetRegisterInputOperand(
    434       0, node->bytecode(), node->operands(), node->operand_count());
    435   RegisterInfo* input_info = GetRegisterInfo(input);
    436   Register output = GetRegisterOutputOperand(
    437       1, node->bytecode(), node->operands(), node->operand_count());
    438   RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
    439   RegisterTransfer(input_info, output_info, node->source_info());
    440 }
    441 
    442 void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
    443   Register output = GetRegisterOutputOperand(
    444       0, node->bytecode(), node->operands(), node->operand_count());
    445   RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
    446   RegisterTransfer(accumulator_info_, output_info, node->source_info());
    447 }
    448 
    449 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
    450     RegisterInfo* reg_info) {
    451   if (reg_info->materialized()) {
    452     CreateMaterializedEquivalent(reg_info);
    453   }
    454   reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
    455 }
    456 
    457 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
    458     Register start, int count) {
    459   for (int i = 0; i < count; ++i) {
    460     Register reg(start.index() + i);
    461     RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
    462     PrepareRegisterOutputOperand(reg_info);
    463   }
    464 }
    465 
    466 Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
    467     Register reg) {
    468   // For a temporary register, RegInfo state may need be created. For
    469   // locals and parameters, the RegInfo state is created in the
    470   // BytecodeRegisterOptimizer constructor.
    471   RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
    472   if (reg_info->materialized()) {
    473     return reg;
    474   } else {
    475     RegisterInfo* equivalent_info =
    476         GetMaterializedEquivalentNotAccumulator(reg_info);
    477     return equivalent_info->register_value();
    478   }
    479 }
    480 
    481 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
    482     BytecodeNode* const node, Register reg, int operand_index) {
    483   Register equivalent = GetEquivalentRegisterForInputOperand(reg);
    484   node->operands()[operand_index] =
    485       static_cast<uint32_t>(equivalent.ToOperand());
    486 }
    487 
    488 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
    489                                                                  int count) {
    490   for (int i = 0; i < count; ++i) {
    491     Register current(start.index() + i);
    492     RegisterInfo* input_info = GetRegisterInfo(current);
    493     Materialize(input_info);
    494   }
    495 }
    496 
    497 void BytecodeRegisterOptimizer::PrepareRegisterOperands(
    498     BytecodeNode* const node) {
    499   //
    500   // For each input operand, get a materialized equivalent if it is
    501   // just a single register, otherwise materialize register range.
    502   // Update operand_scale if necessary.
    503   //
    504   // For each output register about to be clobbered, materialize an
    505   // equivalent if it exists. Put each register in it's own equivalence set.
    506   //
    507   int register_operand_bitmap =
    508       Bytecodes::GetRegisterOperandBitmap(node->bytecode());
    509   const OperandType* operand_types =
    510       Bytecodes::GetOperandTypes(node->bytecode());
    511   uint32_t* operands = node->operands();
    512   for (int i = 0; register_operand_bitmap != 0;
    513        ++i, register_operand_bitmap >>= 1) {
    514     if ((register_operand_bitmap & 1) == 0) {
    515       continue;
    516     }
    517     OperandType operand_type = operand_types[i];
    518     int count = 0;
    519     if (operand_types[i + 1] == OperandType::kRegCount) {
    520       count = static_cast<int>(operands[i + 1]);
    521       if (count == 0) {
    522         continue;
    523       }
    524     } else {
    525       count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
    526     }
    527 
    528     Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
    529     if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
    530       if (count == 1) {
    531         PrepareRegisterInputOperand(node, reg, i);
    532       } else if (count > 1) {
    533         PrepareRegisterRangeInputOperand(reg, count);
    534       }
    535     } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
    536       PrepareRegisterRangeOutputOperand(reg, count);
    537     }
    538   }
    539 }
    540 
    541 void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
    542   // Materialize the accumulator if it is read by the bytecode. The
    543   // accumulator is special and no other register can be materialized
    544   // in it's place.
    545   if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
    546       !accumulator_info_->materialized()) {
    547     Materialize(accumulator_info_);
    548   }
    549 
    550   // Materialize an equivalent to the accumulator if it will be
    551   // clobbered when the bytecode is dispatched.
    552   if (Bytecodes::WritesAccumulator(node->bytecode())) {
    553     PrepareRegisterOutputOperand(accumulator_info_);
    554   }
    555 }
    556 
    557 void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
    558   PrepareAccumulator(node);
    559   PrepareRegisterOperands(node);
    560 }
    561 
    562 // static
    563 Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
    564     int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
    565   DCHECK_LT(index, operand_count);
    566   DCHECK(Bytecodes::IsRegisterInputOperandType(
    567       Bytecodes::GetOperandType(bytecode, index)));
    568   return OperandToRegister(operands[index]);
    569 }
    570 
    571 // static
    572 Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
    573     int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
    574   DCHECK_LT(index, operand_count);
    575   DCHECK(Bytecodes::IsRegisterOutputOperandType(
    576       Bytecodes::GetOperandType(bytecode, index)));
    577   return OperandToRegister(operands[index]);
    578 }
    579 
    580 BytecodeRegisterOptimizer::RegisterInfo*
    581 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
    582   size_t index = GetRegisterInfoTableIndex(reg);
    583   return (index < register_info_table_.size()) ? register_info_table_[index]
    584                                                : nullptr;
    585 }
    586 
    587 BytecodeRegisterOptimizer::RegisterInfo*
    588 BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
    589   size_t index = GetRegisterInfoTableIndex(reg);
    590   return index < register_info_table_.size() ? register_info_table_[index]
    591                                              : NewRegisterInfo(reg);
    592 }
    593 
    594 BytecodeRegisterOptimizer::RegisterInfo*
    595 BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
    596   size_t index = GetRegisterInfoTableIndex(reg);
    597   DCHECK_GE(index, register_info_table_.size());
    598   GrowRegisterMap(reg);
    599   return register_info_table_[index];
    600 }
    601 
    602 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
    603   DCHECK(RegisterIsTemporary(reg));
    604   size_t index = GetRegisterInfoTableIndex(reg);
    605   DCHECK_GE(index, register_info_table_.size());
    606   size_t new_size = index + 1;
    607   size_t old_size = register_info_table_.size();
    608   register_info_table_.resize(new_size);
    609   for (size_t i = old_size; i < new_size; ++i) {
    610     register_info_table_[i] = new (zone()) RegisterInfo(
    611         RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
    612   }
    613 }
    614 
    615 void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
    616   RegisterInfo* info = GetRegisterInfo(reg);
    617   if (info != nullptr) {
    618     // If register is materialized and part of equivalence set, make
    619     // sure another member of the set holds the value before the
    620     // temporary register is removed.
    621     if (info->materialized()) {
    622       CreateMaterializedEquivalent(info);
    623     }
    624     info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
    625   }
    626 }
    627 
    628 }  // namespace interpreter
    629 }  // namespace internal
    630 }  // namespace v8
    631