Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "register_allocator_linear_scan.h"
     18 
     19 #include <iostream>
     20 #include <sstream>
     21 
     22 #include "base/bit_vector-inl.h"
     23 #include "base/enums.h"
     24 #include "code_generator.h"
     25 #include "linear_order.h"
     26 #include "register_allocation_resolver.h"
     27 #include "ssa_liveness_analysis.h"
     28 
     29 namespace art {
     30 
     31 static constexpr size_t kMaxLifetimePosition = -1;
     32 static constexpr size_t kDefaultNumberOfSpillSlots = 4;
     33 
     34 // For simplicity, we implement register pairs as (reg, reg + 1).
     35 // Note that this is a requirement for double registers on ARM, since we
     36 // allocate SRegister.
     37 static int GetHighForLowRegister(int reg) { return reg + 1; }
     38 static bool IsLowRegister(int reg) { return (reg & 1) == 0; }
     39 static bool IsLowOfUnalignedPairInterval(LiveInterval* low) {
     40   return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
     41 }
     42 
     43 RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
     44                                                          CodeGenerator* codegen,
     45                                                          const SsaLivenessAnalysis& liveness)
     46       : RegisterAllocator(allocator, codegen, liveness),
     47         unhandled_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     48         unhandled_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     49         unhandled_(nullptr),
     50         handled_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     51         active_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     52         inactive_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     53         physical_core_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     54         physical_fp_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     55         temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     56         int_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     57         long_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     58         float_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     59         double_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     60         catch_phi_spill_slots_(0),
     61         safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
     62         processing_core_registers_(false),
     63         number_of_registers_(-1),
     64         registers_array_(nullptr),
     65         blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
     66         blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
     67         reserved_out_slots_(0) {
     68   temp_intervals_.reserve(4);
     69   int_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
     70   long_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
     71   float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
     72   double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
     73 
     74   codegen->SetupBlockedRegisters();
     75   physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
     76   physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
     77   // Always reserve for the current method and the graph's max out registers.
     78   // TODO: compute it instead.
     79   // ArtMethod* takes 2 vregs for 64 bits.
     80   size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
     81   reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
     82 }
     83 
     84 RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {}
     85 
     86 static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
     87   if (interval == nullptr) return false;
     88   bool is_core_register = (interval->GetType() != DataType::Type::kFloat64)
     89       && (interval->GetType() != DataType::Type::kFloat32);
     90   return processing_core_registers == is_core_register;
     91 }
     92 
     93 void RegisterAllocatorLinearScan::AllocateRegisters() {
     94   AllocateRegistersInternal();
     95   RegisterAllocationResolver(codegen_, liveness_)
     96       .Resolve(ArrayRef<HInstruction* const>(safepoints_),
     97                reserved_out_slots_,
     98                int_spill_slots_.size(),
     99                long_spill_slots_.size(),
    100                float_spill_slots_.size(),
    101                double_spill_slots_.size(),
    102                catch_phi_spill_slots_,
    103                ArrayRef<LiveInterval* const>(temp_intervals_));
    104 
    105   if (kIsDebugBuild) {
    106     processing_core_registers_ = true;
    107     ValidateInternal(true);
    108     processing_core_registers_ = false;
    109     ValidateInternal(true);
    110     // Check that the linear order is still correct with regards to lifetime positions.
    111     // Since only parallel moves have been inserted during the register allocation,
    112     // these checks are mostly for making sure these moves have been added correctly.
    113     size_t current_liveness = 0;
    114     for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
    115       for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
    116         HInstruction* instruction = inst_it.Current();
    117         DCHECK_LE(current_liveness, instruction->GetLifetimePosition());
    118         current_liveness = instruction->GetLifetimePosition();
    119       }
    120       for (HInstructionIterator inst_it(block->GetInstructions());
    121            !inst_it.Done();
    122            inst_it.Advance()) {
    123         HInstruction* instruction = inst_it.Current();
    124         DCHECK_LE(current_liveness, instruction->GetLifetimePosition()) << instruction->DebugName();
    125         current_liveness = instruction->GetLifetimePosition();
    126       }
    127     }
    128   }
    129 }
    130 
    131 void RegisterAllocatorLinearScan::BlockRegister(Location location, size_t start, size_t end) {
    132   int reg = location.reg();
    133   DCHECK(location.IsRegister() || location.IsFpuRegister());
    134   LiveInterval* interval = location.IsRegister()
    135       ? physical_core_register_intervals_[reg]
    136       : physical_fp_register_intervals_[reg];
    137   DataType::Type type = location.IsRegister()
    138       ? DataType::Type::kInt32
    139       : DataType::Type::kFloat32;
    140   if (interval == nullptr) {
    141     interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
    142     if (location.IsRegister()) {
    143       physical_core_register_intervals_[reg] = interval;
    144     } else {
    145       physical_fp_register_intervals_[reg] = interval;
    146     }
    147   }
    148   DCHECK(interval->GetRegister() == reg);
    149   interval->AddRange(start, end);
    150 }
    151 
    152 void RegisterAllocatorLinearScan::BlockRegisters(size_t start, size_t end, bool caller_save_only) {
    153   for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
    154     if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) {
    155       BlockRegister(Location::RegisterLocation(i), start, end);
    156     }
    157   }
    158   for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
    159     if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) {
    160       BlockRegister(Location::FpuRegisterLocation(i), start, end);
    161     }
    162   }
    163 }
    164 
    165 void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
    166   // Iterate post-order, to ensure the list is sorted, and the last added interval
    167   // is the one with the lowest start position.
    168   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
    169     for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
    170          back_it.Advance()) {
    171       ProcessInstruction(back_it.Current());
    172     }
    173     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
    174       ProcessInstruction(inst_it.Current());
    175     }
    176 
    177     if (block->IsCatchBlock() ||
    178         (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
    179       // By blocking all registers at the top of each catch block or irreducible loop, we force
    180       // intervals belonging to the live-in set of the catch/header block to be spilled.
    181       // TODO(ngeoffray): Phis in this block could be allocated in register.
    182       size_t position = block->GetLifetimeStart();
    183       BlockRegisters(position, position + 1);
    184     }
    185   }
    186 
    187   number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
    188   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
    189                                                     kArenaAllocRegisterAllocator);
    190   processing_core_registers_ = true;
    191   unhandled_ = &unhandled_core_intervals_;
    192   for (LiveInterval* fixed : physical_core_register_intervals_) {
    193     if (fixed != nullptr) {
    194       // Fixed interval is added to inactive_ instead of unhandled_.
    195       // It's also the only type of inactive interval whose start position
    196       // can be after the current interval during linear scan.
    197       // Fixed interval is never split and never moves to unhandled_.
    198       inactive_.push_back(fixed);
    199     }
    200   }
    201   LinearScan();
    202 
    203   inactive_.clear();
    204   active_.clear();
    205   handled_.clear();
    206 
    207   number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
    208   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
    209                                                     kArenaAllocRegisterAllocator);
    210   processing_core_registers_ = false;
    211   unhandled_ = &unhandled_fp_intervals_;
    212   for (LiveInterval* fixed : physical_fp_register_intervals_) {
    213     if (fixed != nullptr) {
    214       // Fixed interval is added to inactive_ instead of unhandled_.
    215       // It's also the only type of inactive interval whose start position
    216       // can be after the current interval during linear scan.
    217       // Fixed interval is never split and never moves to unhandled_.
    218       inactive_.push_back(fixed);
    219     }
    220   }
    221   LinearScan();
    222 }
    223 
    224 void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) {
    225   LocationSummary* locations = instruction->GetLocations();
    226   size_t position = instruction->GetLifetimePosition();
    227 
    228   if (locations == nullptr) return;
    229 
    230   // Create synthesized intervals for temporaries.
    231   for (size_t i = 0; i < locations->GetTempCount(); ++i) {
    232     Location temp = locations->GetTemp(i);
    233     if (temp.IsRegister() || temp.IsFpuRegister()) {
    234       BlockRegister(temp, position, position + 1);
    235       // Ensure that an explicit temporary register is marked as being allocated.
    236       codegen_->AddAllocatedRegister(temp);
    237     } else {
    238       DCHECK(temp.IsUnallocated());
    239       switch (temp.GetPolicy()) {
    240         case Location::kRequiresRegister: {
    241           LiveInterval* interval =
    242               LiveInterval::MakeTempInterval(allocator_, DataType::Type::kInt32);
    243           temp_intervals_.push_back(interval);
    244           interval->AddTempUse(instruction, i);
    245           unhandled_core_intervals_.push_back(interval);
    246           break;
    247         }
    248 
    249         case Location::kRequiresFpuRegister: {
    250           LiveInterval* interval =
    251               LiveInterval::MakeTempInterval(allocator_, DataType::Type::kFloat64);
    252           temp_intervals_.push_back(interval);
    253           interval->AddTempUse(instruction, i);
    254           if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
    255             interval->AddHighInterval(/* is_temp */ true);
    256             LiveInterval* high = interval->GetHighInterval();
    257             temp_intervals_.push_back(high);
    258             unhandled_fp_intervals_.push_back(high);
    259           }
    260           unhandled_fp_intervals_.push_back(interval);
    261           break;
    262         }
    263 
    264         default:
    265           LOG(FATAL) << "Unexpected policy for temporary location "
    266                      << temp.GetPolicy();
    267       }
    268     }
    269   }
    270 
    271   bool core_register = (instruction->GetType() != DataType::Type::kFloat64)
    272       && (instruction->GetType() != DataType::Type::kFloat32);
    273 
    274   if (locations->NeedsSafepoint()) {
    275     if (codegen_->IsLeafMethod()) {
    276       // TODO: We do this here because we do not want the suspend check to artificially
    277       // create live registers. We should find another place, but this is currently the
    278       // simplest.
    279       DCHECK(instruction->IsSuspendCheckEntry());
    280       instruction->GetBlock()->RemoveInstruction(instruction);
    281       return;
    282     }
    283     safepoints_.push_back(instruction);
    284   }
    285 
    286   if (locations->WillCall()) {
    287     BlockRegisters(position, position + 1, /* caller_save_only */ true);
    288   }
    289 
    290   for (size_t i = 0; i < locations->GetInputCount(); ++i) {
    291     Location input = locations->InAt(i);
    292     if (input.IsRegister() || input.IsFpuRegister()) {
    293       BlockRegister(input, position, position + 1);
    294     } else if (input.IsPair()) {
    295       BlockRegister(input.ToLow(), position, position + 1);
    296       BlockRegister(input.ToHigh(), position, position + 1);
    297     }
    298   }
    299 
    300   LiveInterval* current = instruction->GetLiveInterval();
    301   if (current == nullptr) return;
    302 
    303   ScopedArenaVector<LiveInterval*>& unhandled = core_register
    304       ? unhandled_core_intervals_
    305       : unhandled_fp_intervals_;
    306 
    307   DCHECK(unhandled.empty() || current->StartsBeforeOrAt(unhandled.back()));
    308 
    309   if (codegen_->NeedsTwoRegisters(current->GetType())) {
    310     current->AddHighInterval();
    311   }
    312 
    313   for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
    314     HInstruction* safepoint = safepoints_[safepoint_index - 1u];
    315     size_t safepoint_position = safepoint->GetLifetimePosition();
    316 
    317     // Test that safepoints are ordered in the optimal way.
    318     DCHECK(safepoint_index == safepoints_.size() ||
    319            safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
    320 
    321     if (safepoint_position == current->GetStart()) {
    322       // The safepoint is for this instruction, so the location of the instruction
    323       // does not need to be saved.
    324       DCHECK_EQ(safepoint_index, safepoints_.size());
    325       DCHECK_EQ(safepoint, instruction);
    326       continue;
    327     } else if (current->IsDeadAt(safepoint_position)) {
    328       break;
    329     } else if (!current->Covers(safepoint_position)) {
    330       // Hole in the interval.
    331       continue;
    332     }
    333     current->AddSafepoint(safepoint);
    334   }
    335   current->ResetSearchCache();
    336 
    337   // Some instructions define their output in fixed register/stack slot. We need
    338   // to ensure we know these locations before doing register allocation. For a
    339   // given register, we create an interval that covers these locations. The register
    340   // will be unavailable at these locations when trying to allocate one for an
    341   // interval.
    342   //
    343   // The backwards walking ensures the ranges are ordered on increasing start positions.
    344   Location output = locations->Out();
    345   if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
    346     Location first = locations->InAt(0);
    347     if (first.IsRegister() || first.IsFpuRegister()) {
    348       current->SetFrom(position + 1);
    349       current->SetRegister(first.reg());
    350     } else if (first.IsPair()) {
    351       current->SetFrom(position + 1);
    352       current->SetRegister(first.low());
    353       LiveInterval* high = current->GetHighInterval();
    354       high->SetRegister(first.high());
    355       high->SetFrom(position + 1);
    356     }
    357   } else if (output.IsRegister() || output.IsFpuRegister()) {
    358     // Shift the interval's start by one to account for the blocked register.
    359     current->SetFrom(position + 1);
    360     current->SetRegister(output.reg());
    361     BlockRegister(output, position, position + 1);
    362   } else if (output.IsPair()) {
    363     current->SetFrom(position + 1);
    364     current->SetRegister(output.low());
    365     LiveInterval* high = current->GetHighInterval();
    366     high->SetRegister(output.high());
    367     high->SetFrom(position + 1);
    368     BlockRegister(output.ToLow(), position, position + 1);
    369     BlockRegister(output.ToHigh(), position, position + 1);
    370   } else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
    371     current->SetSpillSlot(output.GetStackIndex());
    372   } else {
    373     DCHECK(output.IsUnallocated() || output.IsConstant());
    374   }
    375 
    376   if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
    377     AllocateSpillSlotForCatchPhi(instruction->AsPhi());
    378   }
    379 
    380   // If needed, add interval to the list of unhandled intervals.
    381   if (current->HasSpillSlot() || instruction->IsConstant()) {
    382     // Split just before first register use.
    383     size_t first_register_use = current->FirstRegisterUse();
    384     if (first_register_use != kNoLifetime) {
    385       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
    386       // Don't add directly to `unhandled`, it needs to be sorted and the start
    387       // of this new interval might be after intervals already in the list.
    388       AddSorted(&unhandled, split);
    389     } else {
    390       // Nothing to do, we won't allocate a register for this value.
    391     }
    392   } else {
    393     // Don't add directly to `unhandled`, temp or safepoint intervals
    394     // for this instruction may have been added, and those can be
    395     // processed first.
    396     AddSorted(&unhandled, current);
    397   }
    398 }
    399 
    400 class AllRangesIterator : public ValueObject {
    401  public:
    402   explicit AllRangesIterator(LiveInterval* interval)
    403       : current_interval_(interval),
    404         current_range_(interval->GetFirstRange()) {}
    405 
    406   bool Done() const { return current_interval_ == nullptr; }
    407   LiveRange* CurrentRange() const { return current_range_; }
    408   LiveInterval* CurrentInterval() const { return current_interval_; }
    409 
    410   void Advance() {
    411     current_range_ = current_range_->GetNext();
    412     if (current_range_ == nullptr) {
    413       current_interval_ = current_interval_->GetNextSibling();
    414       if (current_interval_ != nullptr) {
    415         current_range_ = current_interval_->GetFirstRange();
    416       }
    417     }
    418   }
    419 
    420  private:
    421   LiveInterval* current_interval_;
    422   LiveRange* current_range_;
    423 
    424   DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
    425 };
    426 
    427 bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
    428   // To simplify unit testing, we eagerly create the array of intervals, and
    429   // call the helper method.
    430   ScopedArenaAllocator allocator(allocator_->GetArenaStack());
    431   ScopedArenaVector<LiveInterval*> intervals(
    432       allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
    433   for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
    434     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
    435     if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
    436       intervals.push_back(instruction->GetLiveInterval());
    437     }
    438   }
    439 
    440   const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
    441       ? &physical_core_register_intervals_
    442       : &physical_fp_register_intervals_;
    443   for (LiveInterval* fixed : *physical_register_intervals) {
    444     if (fixed != nullptr) {
    445       intervals.push_back(fixed);
    446     }
    447   }
    448 
    449   for (LiveInterval* temp : temp_intervals_) {
    450     if (ShouldProcess(processing_core_registers_, temp)) {
    451       intervals.push_back(temp);
    452     }
    453   }
    454 
    455   return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
    456                            GetNumberOfSpillSlots(),
    457                            reserved_out_slots_,
    458                            *codegen_,
    459                            processing_core_registers_,
    460                            log_fatal_on_failure);
    461 }
    462 
    463 void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
    464   interval->Dump(stream);
    465   stream << ": ";
    466   if (interval->HasRegister()) {
    467     if (interval->IsFloatingPoint()) {
    468       codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
    469     } else {
    470       codegen_->DumpCoreRegister(stream, interval->GetRegister());
    471     }
    472   } else {
    473     stream << "spilled";
    474   }
    475   stream << std::endl;
    476 }
    477 
    478 void RegisterAllocatorLinearScan::DumpAllIntervals(std::ostream& stream) const {
    479   stream << "inactive: " << std::endl;
    480   for (LiveInterval* inactive_interval : inactive_) {
    481     DumpInterval(stream, inactive_interval);
    482   }
    483   stream << "active: " << std::endl;
    484   for (LiveInterval* active_interval : active_) {
    485     DumpInterval(stream, active_interval);
    486   }
    487   stream << "unhandled: " << std::endl;
    488   auto unhandled = (unhandled_ != nullptr) ?
    489       unhandled_ : &unhandled_core_intervals_;
    490   for (LiveInterval* unhandled_interval : *unhandled) {
    491     DumpInterval(stream, unhandled_interval);
    492   }
    493   stream << "handled: " << std::endl;
    494   for (LiveInterval* handled_interval : handled_) {
    495     DumpInterval(stream, handled_interval);
    496   }
    497 }
    498 
    499 // By the book implementation of a linear scan register allocator.
    500 void RegisterAllocatorLinearScan::LinearScan() {
    501   while (!unhandled_->empty()) {
    502     // (1) Remove interval with the lowest start position from unhandled.
    503     LiveInterval* current = unhandled_->back();
    504     unhandled_->pop_back();
    505 
    506     // Make sure the interval is an expected state.
    507     DCHECK(!current->IsFixed() && !current->HasSpillSlot());
    508     // Make sure we are going in the right order.
    509     DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart());
    510     // Make sure a low interval is always with a high.
    511     DCHECK(!current->IsLowInterval() || unhandled_->back()->IsHighInterval());
    512     // Make sure a high interval is always with a low.
    513     DCHECK(current->IsLowInterval() ||
    514            unhandled_->empty() ||
    515            !unhandled_->back()->IsHighInterval());
    516 
    517     size_t position = current->GetStart();
    518 
    519     // Remember the inactive_ size here since the ones moved to inactive_ from
    520     // active_ below shouldn't need to be re-checked.
    521     size_t inactive_intervals_to_handle = inactive_.size();
    522 
    523     // (2) Remove currently active intervals that are dead at this position.
    524     //     Move active intervals that have a lifetime hole at this position
    525     //     to inactive.
    526     auto active_kept_end = std::remove_if(
    527         active_.begin(),
    528         active_.end(),
    529         [this, position](LiveInterval* interval) {
    530           if (interval->IsDeadAt(position)) {
    531             handled_.push_back(interval);
    532             return true;
    533           } else if (!interval->Covers(position)) {
    534             inactive_.push_back(interval);
    535             return true;
    536           } else {
    537             return false;  // Keep this interval.
    538           }
    539         });
    540     active_.erase(active_kept_end, active_.end());
    541 
    542     // (3) Remove currently inactive intervals that are dead at this position.
    543     //     Move inactive intervals that cover this position to active.
    544     auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
    545     auto inactive_kept_end = std::remove_if(
    546         inactive_.begin(),
    547         inactive_to_handle_end,
    548         [this, position](LiveInterval* interval) {
    549           DCHECK(interval->GetStart() < position || interval->IsFixed());
    550           if (interval->IsDeadAt(position)) {
    551             handled_.push_back(interval);
    552             return true;
    553           } else if (interval->Covers(position)) {
    554             active_.push_back(interval);
    555             return true;
    556           } else {
    557             return false;  // Keep this interval.
    558           }
    559         });
    560     inactive_.erase(inactive_kept_end, inactive_to_handle_end);
    561 
    562     if (current->IsHighInterval() && !current->GetLowInterval()->HasRegister()) {
    563       DCHECK(!current->HasRegister());
    564       // Allocating the low part was unsucessful. The splitted interval for the high part
    565       // will be handled next (it is in the `unhandled_` list).
    566       continue;
    567     }
    568 
    569     // (4) Try to find an available register.
    570     bool success = TryAllocateFreeReg(current);
    571 
    572     // (5) If no register could be found, we need to spill.
    573     if (!success) {
    574       success = AllocateBlockedReg(current);
    575     }
    576 
    577     // (6) If the interval had a register allocated, add it to the list of active
    578     //     intervals.
    579     if (success) {
    580       codegen_->AddAllocatedRegister(processing_core_registers_
    581           ? Location::RegisterLocation(current->GetRegister())
    582           : Location::FpuRegisterLocation(current->GetRegister()));
    583       active_.push_back(current);
    584       if (current->HasHighInterval() && !current->GetHighInterval()->HasRegister()) {
    585         current->GetHighInterval()->SetRegister(GetHighForLowRegister(current->GetRegister()));
    586       }
    587     }
    588   }
    589 }
    590 
    591 static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* free_until) {
    592   DCHECK(!interval->IsHighInterval());
    593   // Note that the same instruction may occur multiple times in the input list,
    594   // so `free_until` may have changed already.
    595   // Since `position` is not the current scan position, we need to use CoversSlow.
    596   if (interval->IsDeadAt(position)) {
    597     // Set the register to be free. Note that inactive intervals might later
    598     // update this.
    599     free_until[interval->GetRegister()] = kMaxLifetimePosition;
    600     if (interval->HasHighInterval()) {
    601       DCHECK(interval->GetHighInterval()->IsDeadAt(position));
    602       free_until[interval->GetHighInterval()->GetRegister()] = kMaxLifetimePosition;
    603     }
    604   } else if (!interval->CoversSlow(position)) {
    605     // The interval becomes inactive at `defined_by`. We make its register
    606     // available only until the next use strictly after `defined_by`.
    607     free_until[interval->GetRegister()] = interval->FirstUseAfter(position);
    608     if (interval->HasHighInterval()) {
    609       DCHECK(!interval->GetHighInterval()->CoversSlow(position));
    610       free_until[interval->GetHighInterval()->GetRegister()] = free_until[interval->GetRegister()];
    611     }
    612   }
    613 }
    614 
    615 // Find a free register. If multiple are found, pick the register that
    616 // is free the longest.
    617 bool RegisterAllocatorLinearScan::TryAllocateFreeReg(LiveInterval* current) {
    618   size_t* free_until = registers_array_;
    619 
    620   // First set all registers to be free.
    621   for (size_t i = 0; i < number_of_registers_; ++i) {
    622     free_until[i] = kMaxLifetimePosition;
    623   }
    624 
    625   // For each active interval, set its register to not free.
    626   for (LiveInterval* interval : active_) {
    627     DCHECK(interval->HasRegister());
    628     free_until[interval->GetRegister()] = 0;
    629   }
    630 
    631   // An interval that starts an instruction (that is, it is not split), may
    632   // re-use the registers used by the inputs of that instruciton, based on the
    633   // location summary.
    634   HInstruction* defined_by = current->GetDefinedBy();
    635   if (defined_by != nullptr && !current->IsSplit()) {
    636     LocationSummary* locations = defined_by->GetLocations();
    637     if (!locations->OutputCanOverlapWithInputs() && locations->Out().IsUnallocated()) {
    638       HInputsRef inputs = defined_by->GetInputs();
    639       for (size_t i = 0; i < inputs.size(); ++i) {
    640         if (locations->InAt(i).IsValid()) {
    641           // Take the last interval of the input. It is the location of that interval
    642           // that will be used at `defined_by`.
    643           LiveInterval* interval = inputs[i]->GetLiveInterval()->GetLastSibling();
    644           // Note that interval may have not been processed yet.
    645           // TODO: Handle non-split intervals last in the work list.
    646           if (interval->HasRegister() && interval->SameRegisterKind(*current)) {
    647             // The input must be live until the end of `defined_by`, to comply to
    648             // the linear scan algorithm. So we use `defined_by`'s end lifetime
    649             // position to check whether the input is dead or is inactive after
    650             // `defined_by`.
    651             DCHECK(interval->CoversSlow(defined_by->GetLifetimePosition()));
    652             size_t position = defined_by->GetLifetimePosition() + 1;
    653             FreeIfNotCoverAt(interval, position, free_until);
    654           }
    655         }
    656       }
    657     }
    658   }
    659 
    660   // For each inactive interval, set its register to be free until
    661   // the next intersection with `current`.
    662   for (LiveInterval* inactive : inactive_) {
    663     // Temp/Slow-path-safepoint interval has no holes.
    664     DCHECK(!inactive->IsTemp());
    665     if (!current->IsSplit() && !inactive->IsFixed()) {
    666       // Neither current nor inactive are fixed.
    667       // Thanks to SSA, a non-split interval starting in a hole of an
    668       // inactive interval should never intersect with that inactive interval.
    669       // Only if it's not fixed though, because fixed intervals don't come from SSA.
    670       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
    671       continue;
    672     }
    673 
    674     DCHECK(inactive->HasRegister());
    675     if (free_until[inactive->GetRegister()] == 0) {
    676       // Already used by some active interval. No need to intersect.
    677       continue;
    678     }
    679     size_t next_intersection = inactive->FirstIntersectionWith(current);
    680     if (next_intersection != kNoLifetime) {
    681       free_until[inactive->GetRegister()] =
    682           std::min(free_until[inactive->GetRegister()], next_intersection);
    683     }
    684   }
    685 
    686   int reg = kNoRegister;
    687   if (current->HasRegister()) {
    688     // Some instructions have a fixed register output.
    689     reg = current->GetRegister();
    690     if (free_until[reg] == 0) {
    691       DCHECK(current->IsHighInterval());
    692       // AllocateBlockedReg will spill the holder of the register.
    693       return false;
    694     }
    695   } else {
    696     DCHECK(!current->IsHighInterval());
    697     int hint = current->FindFirstRegisterHint(free_until, liveness_);
    698     if ((hint != kNoRegister)
    699         // For simplicity, if the hint we are getting for a pair cannot be used,
    700         // we are just going to allocate a new pair.
    701         && !(current->IsLowInterval() && IsBlocked(GetHighForLowRegister(hint)))) {
    702       DCHECK(!IsBlocked(hint));
    703       reg = hint;
    704     } else if (current->IsLowInterval()) {
    705       reg = FindAvailableRegisterPair(free_until, current->GetStart());
    706     } else {
    707       reg = FindAvailableRegister(free_until, current);
    708     }
    709   }
    710 
    711   DCHECK_NE(reg, kNoRegister);
    712   // If we could not find a register, we need to spill.
    713   if (free_until[reg] == 0) {
    714     return false;
    715   }
    716 
    717   if (current->IsLowInterval()) {
    718     // If the high register of this interval is not available, we need to spill.
    719     int high_reg = current->GetHighInterval()->GetRegister();
    720     if (high_reg == kNoRegister) {
    721       high_reg = GetHighForLowRegister(reg);
    722     }
    723     if (free_until[high_reg] == 0) {
    724       return false;
    725     }
    726   }
    727 
    728   current->SetRegister(reg);
    729   if (!current->IsDeadAt(free_until[reg])) {
    730     // If the register is only available for a subset of live ranges
    731     // covered by `current`, split `current` before the position where
    732     // the register is not available anymore.
    733     LiveInterval* split = SplitBetween(current, current->GetStart(), free_until[reg]);
    734     DCHECK(split != nullptr);
    735     AddSorted(unhandled_, split);
    736   }
    737   return true;
    738 }
    739 
    740 bool RegisterAllocatorLinearScan::IsBlocked(int reg) const {
    741   return processing_core_registers_
    742       ? blocked_core_registers_[reg]
    743       : blocked_fp_registers_[reg];
    744 }
    745 
    746 int RegisterAllocatorLinearScan::FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const {
    747   int reg = kNoRegister;
    748   // Pick the register pair that is used the last.
    749   for (size_t i = 0; i < number_of_registers_; ++i) {
    750     if (IsBlocked(i)) continue;
    751     if (!IsLowRegister(i)) continue;
    752     int high_register = GetHighForLowRegister(i);
    753     if (IsBlocked(high_register)) continue;
    754     int existing_high_register = GetHighForLowRegister(reg);
    755     if ((reg == kNoRegister) || (next_use[i] >= next_use[reg]
    756                         && next_use[high_register] >= next_use[existing_high_register])) {
    757       reg = i;
    758       if (next_use[i] == kMaxLifetimePosition
    759           && next_use[high_register] == kMaxLifetimePosition) {
    760         break;
    761       }
    762     } else if (next_use[reg] <= starting_at || next_use[existing_high_register] <= starting_at) {
    763       // If one of the current register is known to be unavailable, just unconditionally
    764       // try a new one.
    765       reg = i;
    766     }
    767   }
    768   return reg;
    769 }
    770 
    771 bool RegisterAllocatorLinearScan::IsCallerSaveRegister(int reg) const {
    772   return processing_core_registers_
    773       ? !codegen_->IsCoreCalleeSaveRegister(reg)
    774       : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
    775 }
    776 
    777 int RegisterAllocatorLinearScan::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
    778   // We special case intervals that do not span a safepoint to try to find a caller-save
    779   // register if one is available. We iterate from 0 to the number of registers,
    780   // so if there are caller-save registers available at the end, we continue the iteration.
    781   bool prefers_caller_save = !current->HasWillCallSafepoint();
    782   int reg = kNoRegister;
    783   for (size_t i = 0; i < number_of_registers_; ++i) {
    784     if (IsBlocked(i)) {
    785       // Register cannot be used. Continue.
    786       continue;
    787     }
    788 
    789     // Best case: we found a register fully available.
    790     if (next_use[i] == kMaxLifetimePosition) {
    791       if (prefers_caller_save && !IsCallerSaveRegister(i)) {
    792         // We can get shorter encodings on some platforms by using
    793         // small register numbers. So only update the candidate if the previous
    794         // one was not available for the whole method.
    795         if (reg == kNoRegister || next_use[reg] != kMaxLifetimePosition) {
    796           reg = i;
    797         }
    798         // Continue the iteration in the hope of finding a caller save register.
    799         continue;
    800       } else {
    801         reg = i;
    802         // We know the register is good enough. Return it.
    803         break;
    804       }
    805     }
    806 
    807     // If we had no register before, take this one as a reference.
    808     if (reg == kNoRegister) {
    809       reg = i;
    810       continue;
    811     }
    812 
    813     // Pick the register that is used the last.
    814     if (next_use[i] > next_use[reg]) {
    815       reg = i;
    816       continue;
    817     }
    818   }
    819   return reg;
    820 }
    821 
    822 // Remove interval and its other half if any. Return iterator to the following element.
    823 static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
    824     ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) {
    825   DCHECK(intervals->begin() <= pos && pos < intervals->end());
    826   LiveInterval* interval = *pos;
    827   if (interval->IsLowInterval()) {
    828     DCHECK(pos + 1 < intervals->end());
    829     DCHECK_EQ(*(pos + 1), interval->GetHighInterval());
    830     return intervals->erase(pos, pos + 2);
    831   } else if (interval->IsHighInterval()) {
    832     DCHECK(intervals->begin() < pos);
    833     DCHECK_EQ(*(pos - 1), interval->GetLowInterval());
    834     return intervals->erase(pos - 1, pos + 1);
    835   } else {
    836     return intervals->erase(pos);
    837   }
    838 }
    839 
    840 bool RegisterAllocatorLinearScan::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
    841                                                                            size_t first_register_use,
    842                                                                            size_t* next_use) {
    843   for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
    844     LiveInterval* active = *it;
    845     DCHECK(active->HasRegister());
    846     if (active->IsFixed()) continue;
    847     if (active->IsHighInterval()) continue;
    848     if (first_register_use > next_use[active->GetRegister()]) continue;
    849 
    850     // Split the first interval found that is either:
    851     // 1) A non-pair interval.
    852     // 2) A pair interval whose high is not low + 1.
    853     // 3) A pair interval whose low is not even.
    854     if (!active->IsLowInterval() ||
    855         IsLowOfUnalignedPairInterval(active) ||
    856         !IsLowRegister(active->GetRegister())) {
    857       LiveInterval* split = Split(active, position);
    858       if (split != active) {
    859         handled_.push_back(active);
    860       }
    861       RemoveIntervalAndPotentialOtherHalf(&active_, it);
    862       AddSorted(unhandled_, split);
    863       return true;
    864     }
    865   }
    866   return false;
    867 }
    868 
    869 // Find the register that is used the last, and spill the interval
    870 // that holds it. If the first use of `current` is after that register
    871 // we spill `current` instead.
    872 bool RegisterAllocatorLinearScan::AllocateBlockedReg(LiveInterval* current) {
    873   size_t first_register_use = current->FirstRegisterUse();
    874   if (current->HasRegister()) {
    875     DCHECK(current->IsHighInterval());
    876     // The low interval has allocated the register for the high interval. In
    877     // case the low interval had to split both intervals, we may end up in a
    878     // situation where the high interval does not have a register use anymore.
    879     // We must still proceed in order to split currently active and inactive
    880     // uses of the high interval's register, and put the high interval in the
    881     // active set.
    882     DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr));
    883   } else if (first_register_use == kNoLifetime) {
    884     AllocateSpillSlotFor(current);
    885     return false;
    886   }
    887 
    888   // First set all registers as not being used.
    889   size_t* next_use = registers_array_;
    890   for (size_t i = 0; i < number_of_registers_; ++i) {
    891     next_use[i] = kMaxLifetimePosition;
    892   }
    893 
    894   // For each active interval, find the next use of its register after the
    895   // start of current.
    896   for (LiveInterval* active : active_) {
    897     DCHECK(active->HasRegister());
    898     if (active->IsFixed()) {
    899       next_use[active->GetRegister()] = current->GetStart();
    900     } else {
    901       size_t use = active->FirstRegisterUseAfter(current->GetStart());
    902       if (use != kNoLifetime) {
    903         next_use[active->GetRegister()] = use;
    904       }
    905     }
    906   }
    907 
    908   // For each inactive interval, find the next use of its register after the
    909   // start of current.
    910   for (LiveInterval* inactive : inactive_) {
    911     // Temp/Slow-path-safepoint interval has no holes.
    912     DCHECK(!inactive->IsTemp());
    913     if (!current->IsSplit() && !inactive->IsFixed()) {
    914       // Neither current nor inactive are fixed.
    915       // Thanks to SSA, a non-split interval starting in a hole of an
    916       // inactive interval should never intersect with that inactive interval.
    917       // Only if it's not fixed though, because fixed intervals don't come from SSA.
    918       DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
    919       continue;
    920     }
    921     DCHECK(inactive->HasRegister());
    922     size_t next_intersection = inactive->FirstIntersectionWith(current);
    923     if (next_intersection != kNoLifetime) {
    924       if (inactive->IsFixed()) {
    925         next_use[inactive->GetRegister()] =
    926             std::min(next_intersection, next_use[inactive->GetRegister()]);
    927       } else {
    928         size_t use = inactive->FirstUseAfter(current->GetStart());
    929         if (use != kNoLifetime) {
    930           next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
    931         }
    932       }
    933     }
    934   }
    935 
    936   int reg = kNoRegister;
    937   bool should_spill = false;
    938   if (current->HasRegister()) {
    939     DCHECK(current->IsHighInterval());
    940     reg = current->GetRegister();
    941     // When allocating the low part, we made sure the high register was available.
    942     DCHECK_LT(first_register_use, next_use[reg]);
    943   } else if (current->IsLowInterval()) {
    944     reg = FindAvailableRegisterPair(next_use, first_register_use);
    945     // We should spill if both registers are not available.
    946     should_spill = (first_register_use >= next_use[reg])
    947       || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
    948   } else {
    949     DCHECK(!current->IsHighInterval());
    950     reg = FindAvailableRegister(next_use, current);
    951     should_spill = (first_register_use >= next_use[reg]);
    952   }
    953 
    954   DCHECK_NE(reg, kNoRegister);
    955   if (should_spill) {
    956     DCHECK(!current->IsHighInterval());
    957     bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1));
    958     if (is_allocation_at_use_site) {
    959       if (!current->IsLowInterval()) {
    960         DumpInterval(std::cerr, current);
    961         DumpAllIntervals(std::cerr);
    962         // This situation has the potential to infinite loop, so we make it a non-debug CHECK.
    963         HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2);
    964         CHECK(false) << "There is not enough registers available for "
    965           << current->GetParent()->GetDefinedBy()->DebugName() << " "
    966           << current->GetParent()->GetDefinedBy()->GetId()
    967           << " at " << first_register_use - 1 << " "
    968           << (at == nullptr ? "" : at->DebugName());
    969       }
    970 
    971       // If we're allocating a register for `current` because the instruction at
    972       // that position requires it, but we think we should spill, then there are
    973       // non-pair intervals or unaligned pair intervals blocking the allocation.
    974       // We split the first interval found, and put ourselves first in the
    975       // `unhandled_` list.
    976       bool success = TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(),
    977                                                               first_register_use,
    978                                                               next_use);
    979       DCHECK(success);
    980       LiveInterval* existing = unhandled_->back();
    981       DCHECK(existing->IsHighInterval());
    982       DCHECK_EQ(existing->GetLowInterval(), current);
    983       unhandled_->push_back(current);
    984     } else {
    985       // If the first use of that instruction is after the last use of the found
    986       // register, we split this interval just before its first register use.
    987       AllocateSpillSlotFor(current);
    988       LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
    989       DCHECK(current != split);
    990       AddSorted(unhandled_, split);
    991     }
    992     return false;
    993   } else {
    994     // Use this register and spill the active and inactives interval that
    995     // have that register.
    996     current->SetRegister(reg);
    997 
    998     for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
    999       LiveInterval* active = *it;
   1000       if (active->GetRegister() == reg) {
   1001         DCHECK(!active->IsFixed());
   1002         LiveInterval* split = Split(active, current->GetStart());
   1003         if (split != active) {
   1004           handled_.push_back(active);
   1005         }
   1006         RemoveIntervalAndPotentialOtherHalf(&active_, it);
   1007         AddSorted(unhandled_, split);
   1008         break;
   1009       }
   1010     }
   1011 
   1012     // NOTE: Retrieve end() on each iteration because we're removing elements in the loop body.
   1013     for (auto it = inactive_.begin(); it != inactive_.end(); ) {
   1014       LiveInterval* inactive = *it;
   1015       bool erased = false;
   1016       if (inactive->GetRegister() == reg) {
   1017         if (!current->IsSplit() && !inactive->IsFixed()) {
   1018           // Neither current nor inactive are fixed.
   1019           // Thanks to SSA, a non-split interval starting in a hole of an
   1020           // inactive interval should never intersect with that inactive interval.
   1021           // Only if it's not fixed though, because fixed intervals don't come from SSA.
   1022           DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
   1023         } else {
   1024           size_t next_intersection = inactive->FirstIntersectionWith(current);
   1025           if (next_intersection != kNoLifetime) {
   1026             if (inactive->IsFixed()) {
   1027               LiveInterval* split = Split(current, next_intersection);
   1028               DCHECK_NE(split, current);
   1029               AddSorted(unhandled_, split);
   1030             } else {
   1031               // Split at the start of `current`, which will lead to splitting
   1032               // at the end of the lifetime hole of `inactive`.
   1033               LiveInterval* split = Split(inactive, current->GetStart());
   1034               // If it's inactive, it must start before the current interval.
   1035               DCHECK_NE(split, inactive);
   1036               it = RemoveIntervalAndPotentialOtherHalf(&inactive_, it);
   1037               erased = true;
   1038               handled_.push_back(inactive);
   1039               AddSorted(unhandled_, split);
   1040             }
   1041           }
   1042         }
   1043       }
   1044       // If we have erased the element, `it` already points to the next element.
   1045       // Otherwise we need to move to the next element.
   1046       if (!erased) {
   1047         ++it;
   1048       }
   1049     }
   1050 
   1051     return true;
   1052   }
   1053 }
   1054 
   1055 void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array,
   1056                                             LiveInterval* interval) {
   1057   DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
   1058   size_t insert_at = 0;
   1059   for (size_t i = array->size(); i > 0; --i) {
   1060     LiveInterval* current = (*array)[i - 1u];
   1061     // High intervals must be processed right after their low equivalent.
   1062     if (current->StartsAfter(interval) && !current->IsHighInterval()) {
   1063       insert_at = i;
   1064       break;
   1065     }
   1066   }
   1067 
   1068   // Insert the high interval before the low, to ensure the low is processed before.
   1069   auto insert_pos = array->begin() + insert_at;
   1070   if (interval->HasHighInterval()) {
   1071     array->insert(insert_pos, { interval->GetHighInterval(), interval });
   1072   } else if (interval->HasLowInterval()) {
   1073     array->insert(insert_pos, { interval, interval->GetLowInterval() });
   1074   } else {
   1075     array->insert(insert_pos, interval);
   1076   }
   1077 }
   1078 
   1079 void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
   1080   if (interval->IsHighInterval()) {
   1081     // The low interval already took care of allocating the spill slot.
   1082     DCHECK(!interval->GetLowInterval()->HasRegister());
   1083     DCHECK(interval->GetLowInterval()->GetParent()->HasSpillSlot());
   1084     return;
   1085   }
   1086 
   1087   LiveInterval* parent = interval->GetParent();
   1088 
   1089   // An instruction gets a spill slot for its entire lifetime. If the parent
   1090   // of this interval already has a spill slot, there is nothing to do.
   1091   if (parent->HasSpillSlot()) {
   1092     return;
   1093   }
   1094 
   1095   HInstruction* defined_by = parent->GetDefinedBy();
   1096   DCHECK(!defined_by->IsPhi() || !defined_by->AsPhi()->IsCatchPhi());
   1097 
   1098   if (defined_by->IsParameterValue()) {
   1099     // Parameters have their own stack slot.
   1100     parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
   1101     return;
   1102   }
   1103 
   1104   if (defined_by->IsCurrentMethod()) {
   1105     parent->SetSpillSlot(0);
   1106     return;
   1107   }
   1108 
   1109   if (defined_by->IsConstant()) {
   1110     // Constants don't need a spill slot.
   1111     return;
   1112   }
   1113 
   1114   ScopedArenaVector<size_t>* spill_slots = nullptr;
   1115   switch (interval->GetType()) {
   1116     case DataType::Type::kFloat64:
   1117       spill_slots = &double_spill_slots_;
   1118       break;
   1119     case DataType::Type::kInt64:
   1120       spill_slots = &long_spill_slots_;
   1121       break;
   1122     case DataType::Type::kFloat32:
   1123       spill_slots = &float_spill_slots_;
   1124       break;
   1125     case DataType::Type::kReference:
   1126     case DataType::Type::kInt32:
   1127     case DataType::Type::kUint16:
   1128     case DataType::Type::kUint8:
   1129     case DataType::Type::kInt8:
   1130     case DataType::Type::kBool:
   1131     case DataType::Type::kInt16:
   1132       spill_slots = &int_spill_slots_;
   1133       break;
   1134     case DataType::Type::kUint32:
   1135     case DataType::Type::kUint64:
   1136     case DataType::Type::kVoid:
   1137       LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
   1138   }
   1139 
   1140   // Find first available spill slots.
   1141   size_t number_of_spill_slots_needed = parent->NumberOfSpillSlotsNeeded();
   1142   size_t slot = 0;
   1143   for (size_t e = spill_slots->size(); slot < e; ++slot) {
   1144     bool found = true;
   1145     for (size_t s = slot, u = std::min(slot + number_of_spill_slots_needed, e); s < u; s++) {
   1146       if ((*spill_slots)[s] > parent->GetStart()) {
   1147         found = false;  // failure
   1148         break;
   1149       }
   1150     }
   1151     if (found) {
   1152       break;  // success
   1153     }
   1154   }
   1155 
   1156   // Need new spill slots?
   1157   size_t upper = slot + number_of_spill_slots_needed;
   1158   if (upper > spill_slots->size()) {
   1159     spill_slots->resize(upper);
   1160   }
   1161   // Set slots to end.
   1162   size_t end = interval->GetLastSibling()->GetEnd();
   1163   for (size_t s = slot; s < upper; s++) {
   1164     (*spill_slots)[s] = end;
   1165   }
   1166 
   1167   // Note that the exact spill slot location will be computed when we resolve,
   1168   // that is when we know the number of spill slots for each type.
   1169   parent->SetSpillSlot(slot);
   1170 }
   1171 
   1172 void RegisterAllocatorLinearScan::AllocateSpillSlotForCatchPhi(HPhi* phi) {
   1173   LiveInterval* interval = phi->GetLiveInterval();
   1174 
   1175   HInstruction* previous_phi = phi->GetPrevious();
   1176   DCHECK(previous_phi == nullptr ||
   1177          previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
   1178       << "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
   1179 
   1180   if (phi->IsVRegEquivalentOf(previous_phi)) {
   1181     // This is an equivalent of the previous phi. We need to assign the same
   1182     // catch phi slot.
   1183     DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot());
   1184     interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot());
   1185   } else {
   1186     // Allocate a new spill slot for this catch phi.
   1187     // TODO: Reuse spill slots when intervals of phis from different catch
   1188     //       blocks do not overlap.
   1189     interval->SetSpillSlot(catch_phi_spill_slots_);
   1190     catch_phi_spill_slots_ += interval->NumberOfSpillSlotsNeeded();
   1191   }
   1192 }
   1193 
   1194 }  // namespace art
   1195