Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "register_allocation_resolver.h"
     18 
     19 #include "code_generator.h"
     20 #include "linear_order.h"
     21 #include "ssa_liveness_analysis.h"
     22 
     23 namespace art {
     24 
     25 RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
     26                                                        CodeGenerator* codegen,
     27                                                        const SsaLivenessAnalysis& liveness)
     28       : allocator_(allocator),
     29         codegen_(codegen),
     30         liveness_(liveness) {}
     31 
     32 void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoints,
     33                                          size_t reserved_out_slots,
     34                                          size_t int_spill_slots,
     35                                          size_t long_spill_slots,
     36                                          size_t float_spill_slots,
     37                                          size_t double_spill_slots,
     38                                          size_t catch_phi_spill_slots,
     39                                          const ArenaVector<LiveInterval*>& temp_intervals) {
     40   size_t spill_slots = int_spill_slots
     41                      + long_spill_slots
     42                      + float_spill_slots
     43                      + double_spill_slots
     44                      + catch_phi_spill_slots;
     45 
     46   // Update safepoints and calculate the size of the spills.
     47   UpdateSafepointLiveRegisters();
     48   size_t maximum_safepoint_spill_size = CalculateMaximumSafepointSpillSize(safepoints);
     49 
     50   // Computes frame size and spill mask.
     51   codegen_->InitializeCodeGeneration(spill_slots,
     52                                      maximum_safepoint_spill_size,
     53                                      reserved_out_slots,  // Includes slot(s) for the art method.
     54                                      codegen_->GetGraph()->GetLinearOrder());
     55 
     56   // Resolve outputs, including stack locations.
     57   // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
     58   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
     59     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
     60     LiveInterval* current = instruction->GetLiveInterval();
     61     LocationSummary* locations = instruction->GetLocations();
     62     Location location = locations->Out();
     63     if (instruction->IsParameterValue()) {
     64       // Now that we know the frame size, adjust the parameter's location.
     65       if (location.IsStackSlot()) {
     66         location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
     67         current->SetSpillSlot(location.GetStackIndex());
     68         locations->UpdateOut(location);
     69       } else if (location.IsDoubleStackSlot()) {
     70         location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
     71         current->SetSpillSlot(location.GetStackIndex());
     72         locations->UpdateOut(location);
     73       } else if (current->HasSpillSlot()) {
     74         current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
     75       }
     76     } else if (instruction->IsCurrentMethod()) {
     77       // The current method is always at offset 0.
     78       DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
     79     } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
     80       DCHECK(current->HasSpillSlot());
     81       size_t slot = current->GetSpillSlot()
     82                     + spill_slots
     83                     + reserved_out_slots
     84                     - catch_phi_spill_slots;
     85       current->SetSpillSlot(slot * kVRegSize);
     86     } else if (current->HasSpillSlot()) {
     87       // Adjust the stack slot, now that we know the number of them for each type.
     88       // The way this implementation lays out the stack is the following:
     89       // [parameter slots       ]
     90       // [art method (caller)   ]
     91       // [entry spill (core)    ]
     92       // [entry spill (float)   ]
     93       // [should_deoptimize flag] (this is optional)
     94       // [catch phi spill slots ]
     95       // [double spill slots    ]
     96       // [long spill slots      ]
     97       // [float spill slots     ]
     98       // [int/ref values        ]
     99       // [maximum out values    ] (number of arguments for calls)
    100       // [art method            ].
    101       size_t slot = current->GetSpillSlot();
    102       switch (current->GetType()) {
    103         case Primitive::kPrimDouble:
    104           slot += long_spill_slots;
    105           FALLTHROUGH_INTENDED;
    106         case Primitive::kPrimLong:
    107           slot += float_spill_slots;
    108           FALLTHROUGH_INTENDED;
    109         case Primitive::kPrimFloat:
    110           slot += int_spill_slots;
    111           FALLTHROUGH_INTENDED;
    112         case Primitive::kPrimNot:
    113         case Primitive::kPrimInt:
    114         case Primitive::kPrimChar:
    115         case Primitive::kPrimByte:
    116         case Primitive::kPrimBoolean:
    117         case Primitive::kPrimShort:
    118           slot += reserved_out_slots;
    119           break;
    120         case Primitive::kPrimVoid:
    121           LOG(FATAL) << "Unexpected type for interval " << current->GetType();
    122       }
    123       current->SetSpillSlot(slot * kVRegSize);
    124     }
    125 
    126     Location source = current->ToLocation();
    127 
    128     if (location.IsUnallocated()) {
    129       if (location.GetPolicy() == Location::kSameAsFirstInput) {
    130         if (locations->InAt(0).IsUnallocated()) {
    131           locations->SetInAt(0, source);
    132         } else {
    133           DCHECK(locations->InAt(0).Equals(source));
    134         }
    135       }
    136       locations->UpdateOut(source);
    137     } else {
    138       DCHECK(source.Equals(location));
    139     }
    140   }
    141 
    142   // Connect siblings and resolve inputs.
    143   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
    144     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
    145     ConnectSiblings(instruction->GetLiveInterval());
    146   }
    147 
    148   // Resolve non-linear control flow across branches. Order does not matter.
    149   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
    150     if (block->IsCatchBlock() ||
    151         (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
    152       // Instructions live at the top of catch blocks or irreducible loop header
    153       // were forced to spill.
    154       if (kIsDebugBuild) {
    155         BitVector* live = liveness_.GetLiveInSet(*block);
    156         for (uint32_t idx : live->Indexes()) {
    157           LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
    158           LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
    159           // `GetSiblingAt` returns the sibling that contains a position, but there could be
    160           // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
    161           // position.
    162           if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
    163             DCHECK(!sibling->HasRegister());
    164           }
    165         }
    166       }
    167     } else {
    168       BitVector* live = liveness_.GetLiveInSet(*block);
    169       for (uint32_t idx : live->Indexes()) {
    170         LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
    171         for (HBasicBlock* predecessor : block->GetPredecessors()) {
    172           ConnectSplitSiblings(interval, predecessor, block);
    173         }
    174       }
    175     }
    176   }
    177 
    178   // Resolve phi inputs. Order does not matter.
    179   for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
    180     if (block->IsCatchBlock()) {
    181       // Catch phi values are set at runtime by the exception delivery mechanism.
    182     } else {
    183       for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
    184         HInstruction* phi = inst_it.Current();
    185         for (size_t i = 0, e = block->GetPredecessors().size(); i < e; ++i) {
    186           HBasicBlock* predecessor = block->GetPredecessors()[i];
    187           DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
    188           HInstruction* input = phi->InputAt(i);
    189           Location source = input->GetLiveInterval()->GetLocationAt(
    190               predecessor->GetLifetimeEnd() - 1);
    191           Location destination = phi->GetLiveInterval()->ToLocation();
    192           InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
    193         }
    194       }
    195     }
    196   }
    197 
    198   // Resolve temp locations.
    199   for (LiveInterval* temp : temp_intervals) {
    200     if (temp->IsHighInterval()) {
    201       // High intervals can be skipped, they are already handled by the low interval.
    202       continue;
    203     }
    204     HInstruction* at = liveness_.GetTempUser(temp);
    205     size_t temp_index = liveness_.GetTempIndex(temp);
    206     LocationSummary* locations = at->GetLocations();
    207     switch (temp->GetType()) {
    208       case Primitive::kPrimInt:
    209         locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
    210         break;
    211 
    212       case Primitive::kPrimDouble:
    213         if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
    214           Location location = Location::FpuRegisterPairLocation(
    215               temp->GetRegister(), temp->GetHighInterval()->GetRegister());
    216           locations->SetTempAt(temp_index, location);
    217         } else {
    218           locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
    219         }
    220         break;
    221 
    222       default:
    223         LOG(FATAL) << "Unexpected type for temporary location "
    224                    << temp->GetType();
    225     }
    226   }
    227 }
    228 
    229 void RegisterAllocationResolver::UpdateSafepointLiveRegisters() {
    230   for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
    231     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
    232     for (LiveInterval* current = instruction->GetLiveInterval();
    233          current != nullptr;
    234          current = current->GetNextSibling()) {
    235       if (!current->HasRegister()) {
    236         continue;
    237       }
    238       Location source = current->ToLocation();
    239       for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
    240            safepoint_position != nullptr;
    241            safepoint_position = safepoint_position->GetNext()) {
    242         DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
    243         LocationSummary* locations = safepoint_position->GetLocations();
    244         switch (source.GetKind()) {
    245           case Location::kRegister:
    246           case Location::kFpuRegister: {
    247             locations->AddLiveRegister(source);
    248             break;
    249           }
    250           case Location::kRegisterPair:
    251           case Location::kFpuRegisterPair: {
    252             locations->AddLiveRegister(source.ToLow());
    253             locations->AddLiveRegister(source.ToHigh());
    254             break;
    255           }
    256           case Location::kStackSlot:  // Fall-through
    257           case Location::kDoubleStackSlot:  // Fall-through
    258           case Location::kConstant: {
    259             // Nothing to do.
    260             break;
    261           }
    262           default: {
    263             LOG(FATAL) << "Unexpected location for object";
    264           }
    265         }
    266       }
    267     }
    268   }
    269 }
    270 
    271 size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
    272     ArrayRef<HInstruction* const> safepoints) {
    273   size_t core_register_spill_size = codegen_->GetWordSize();
    274   size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
    275   size_t maximum_safepoint_spill_size = 0u;
    276   for (HInstruction* instruction : safepoints) {
    277     LocationSummary* locations = instruction->GetLocations();
    278     if (locations->OnlyCallsOnSlowPath()) {
    279       size_t core_spills =
    280           codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
    281       size_t fp_spills =
    282           codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
    283       size_t spill_size =
    284           core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
    285       maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
    286     } else if (locations->CallsOnMainAndSlowPath()) {
    287       // Nothing to spill on the slow path if the main path already clobbers caller-saves.
    288       DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
    289       DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
    290     }
    291   }
    292   return maximum_safepoint_spill_size;
    293 }
    294 
    295 void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
    296   LiveInterval* current = interval;
    297   if (current->HasSpillSlot()
    298       && current->HasRegister()
    299       // Currently, we spill unconditionnally the current method in the code generators.
    300       && !interval->GetDefinedBy()->IsCurrentMethod()) {
    301     // We spill eagerly, so move must be at definition.
    302     Location loc;
    303     switch (interval->NumberOfSpillSlotsNeeded()) {
    304       case 1: loc = Location::StackSlot(interval->GetParent()->GetSpillSlot()); break;
    305       case 2: loc = Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()); break;
    306       case 4: loc = Location::SIMDStackSlot(interval->GetParent()->GetSpillSlot()); break;
    307       default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
    308     }
    309     InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
    310   }
    311   UsePosition* use = current->GetFirstUse();
    312   EnvUsePosition* env_use = current->GetFirstEnvironmentUse();
    313 
    314   // Walk over all siblings, updating locations of use positions, and
    315   // connecting them when they are adjacent.
    316   do {
    317     Location source = current->ToLocation();
    318 
    319     // Walk over all uses covered by this interval, and update the location
    320     // information.
    321 
    322     LiveRange* range = current->GetFirstRange();
    323     while (range != nullptr) {
    324       while (use != nullptr && use->GetPosition() < range->GetStart()) {
    325         DCHECK(use->IsSynthesized());
    326         use = use->GetNext();
    327       }
    328       while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
    329         DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
    330         if (!use->IsSynthesized()) {
    331           LocationSummary* locations = use->GetUser()->GetLocations();
    332           Location expected_location = locations->InAt(use->GetInputIndex());
    333           // The expected (actual) location may be invalid in case the input is unused. Currently
    334           // this only happens for intrinsics.
    335           if (expected_location.IsValid()) {
    336             if (expected_location.IsUnallocated()) {
    337               locations->SetInAt(use->GetInputIndex(), source);
    338             } else if (!expected_location.IsConstant()) {
    339               AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
    340             }
    341           } else {
    342             DCHECK(use->GetUser()->IsInvoke());
    343             DCHECK(use->GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
    344           }
    345         }
    346         use = use->GetNext();
    347       }
    348 
    349       // Walk over the environment uses, and update their locations.
    350       while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
    351         env_use = env_use->GetNext();
    352       }
    353 
    354       while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
    355         DCHECK(current->CoversSlow(env_use->GetPosition())
    356                || (env_use->GetPosition() == range->GetEnd()));
    357         HEnvironment* environment = env_use->GetEnvironment();
    358         environment->SetLocationAt(env_use->GetInputIndex(), source);
    359         env_use = env_use->GetNext();
    360       }
    361 
    362       range = range->GetNext();
    363     }
    364 
    365     // If the next interval starts just after this one, and has a register,
    366     // insert a move.
    367     LiveInterval* next_sibling = current->GetNextSibling();
    368     if (next_sibling != nullptr
    369         && next_sibling->HasRegister()
    370         && current->GetEnd() == next_sibling->GetStart()) {
    371       Location destination = next_sibling->ToLocation();
    372       InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
    373     }
    374 
    375     for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
    376          safepoint_position != nullptr;
    377          safepoint_position = safepoint_position->GetNext()) {
    378       DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
    379 
    380       if (current->GetType() == Primitive::kPrimNot) {
    381         DCHECK(interval->GetDefinedBy()->IsActualObject())
    382             << interval->GetDefinedBy()->DebugName()
    383             << '(' << interval->GetDefinedBy()->GetId() << ')'
    384             << "@" << safepoint_position->GetInstruction()->DebugName()
    385             << '(' << safepoint_position->GetInstruction()->GetId() << ')';
    386         LocationSummary* locations = safepoint_position->GetLocations();
    387         if (current->GetParent()->HasSpillSlot()) {
    388           locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
    389         }
    390         if (source.GetKind() == Location::kRegister) {
    391           locations->SetRegisterBit(source.reg());
    392         }
    393       }
    394     }
    395     current = next_sibling;
    396   } while (current != nullptr);
    397 
    398   if (kIsDebugBuild) {
    399     // Following uses can only be synthesized uses.
    400     while (use != nullptr) {
    401       DCHECK(use->IsSynthesized());
    402       use = use->GetNext();
    403     }
    404   }
    405 }
    406 
    407 static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
    408     HInstruction* instruction) {
    409   return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
    410          (instruction->IsConstant() || instruction->IsCurrentMethod());
    411 }
    412 
    413 void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval,
    414                                                       HBasicBlock* from,
    415                                                       HBasicBlock* to) const {
    416   if (interval->GetNextSibling() == nullptr) {
    417     // Nothing to connect. The whole range was allocated to the same location.
    418     return;
    419   }
    420 
    421   // Find the intervals that cover `from` and `to`.
    422   size_t destination_position = to->GetLifetimeStart();
    423   size_t source_position = from->GetLifetimeEnd() - 1;
    424   LiveInterval* destination = interval->GetSiblingAt(destination_position);
    425   LiveInterval* source = interval->GetSiblingAt(source_position);
    426 
    427   if (destination == source) {
    428     // Interval was not split.
    429     return;
    430   }
    431 
    432   LiveInterval* parent = interval->GetParent();
    433   HInstruction* defined_by = parent->GetDefinedBy();
    434   if (codegen_->GetGraph()->HasIrreducibleLoops() &&
    435       (destination == nullptr || !destination->CoversSlow(destination_position))) {
    436     // Our live_in fixed point calculation has found that the instruction is live
    437     // in the `to` block because it will eventually enter an irreducible loop. Our
    438     // live interval computation however does not compute a fixed point, and
    439     // therefore will not have a location for that instruction for `to`.
    440     // Because the instruction is a constant or the ArtMethod, we don't need to
    441     // do anything: it will be materialized in the irreducible loop.
    442     DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
    443         << defined_by->DebugName() << ":" << defined_by->GetId()
    444         << " " << from->GetBlockId() << " -> " << to->GetBlockId();
    445     return;
    446   }
    447 
    448   if (!destination->HasRegister()) {
    449     // Values are eagerly spilled. Spill slot already contains appropriate value.
    450     return;
    451   }
    452 
    453   Location location_source;
    454   // `GetSiblingAt` returns the interval whose start and end cover `position`,
    455   // but does not check whether the interval is inactive at that position.
    456   // The only situation where the interval is inactive at that position is in the
    457   // presence of irreducible loops for constants and ArtMethod.
    458   if (codegen_->GetGraph()->HasIrreducibleLoops() &&
    459       (source == nullptr || !source->CoversSlow(source_position))) {
    460     DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
    461     if (defined_by->IsConstant()) {
    462       location_source = defined_by->GetLocations()->Out();
    463     } else {
    464       DCHECK(defined_by->IsCurrentMethod());
    465       switch (parent->NumberOfSpillSlotsNeeded()) {
    466         case 1: location_source = Location::StackSlot(parent->GetSpillSlot()); break;
    467         case 2: location_source = Location::DoubleStackSlot(parent->GetSpillSlot()); break;
    468         case 4: location_source = Location::SIMDStackSlot(parent->GetSpillSlot()); break;
    469         default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
    470       }
    471     }
    472   } else {
    473     DCHECK(source != nullptr);
    474     DCHECK(source->CoversSlow(source_position));
    475     DCHECK(destination->CoversSlow(destination_position));
    476     location_source = source->ToLocation();
    477   }
    478 
    479   // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
    480   // we need to put the moves at the entry of `to`.
    481   if (from->GetNormalSuccessors().size() == 1) {
    482     InsertParallelMoveAtExitOf(from,
    483                                defined_by,
    484                                location_source,
    485                                destination->ToLocation());
    486   } else {
    487     DCHECK_EQ(to->GetPredecessors().size(), 1u);
    488     InsertParallelMoveAtEntryOf(to,
    489                                 defined_by,
    490                                 location_source,
    491                                 destination->ToLocation());
    492   }
    493 }
    494 
    495 static bool IsValidDestination(Location destination) {
    496   return destination.IsRegister()
    497       || destination.IsRegisterPair()
    498       || destination.IsFpuRegister()
    499       || destination.IsFpuRegisterPair()
    500       || destination.IsStackSlot()
    501       || destination.IsDoubleStackSlot()
    502       || destination.IsSIMDStackSlot();
    503 }
    504 
    505 void RegisterAllocationResolver::AddMove(HParallelMove* move,
    506                                          Location source,
    507                                          Location destination,
    508                                          HInstruction* instruction,
    509                                          Primitive::Type type) const {
    510   if (type == Primitive::kPrimLong
    511       && codegen_->ShouldSplitLongMoves()
    512       // The parallel move resolver knows how to deal with long constants.
    513       && !source.IsConstant()) {
    514     move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
    515     move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
    516   } else {
    517     move->AddMove(source, destination, type, instruction);
    518   }
    519 }
    520 
    521 void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
    522                                                  HInstruction* user,
    523                                                  Location source,
    524                                                  Location destination) const {
    525   if (source.Equals(destination)) return;
    526 
    527   DCHECK(!user->IsPhi());
    528 
    529   HInstruction* previous = user->GetPrevious();
    530   HParallelMove* move = nullptr;
    531   if (previous == nullptr
    532       || !previous->IsParallelMove()
    533       || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
    534     move = new (allocator_) HParallelMove(allocator_);
    535     move->SetLifetimePosition(user->GetLifetimePosition());
    536     user->GetBlock()->InsertInstructionBefore(move, user);
    537   } else {
    538     move = previous->AsParallelMove();
    539   }
    540   DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
    541   AddMove(move, source, destination, nullptr, input->GetType());
    542 }
    543 
    544 static bool IsInstructionStart(size_t position) {
    545   return (position & 1) == 0;
    546 }
    547 
    548 static bool IsInstructionEnd(size_t position) {
    549   return (position & 1) == 1;
    550 }
    551 
    552 void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
    553                                                       HInstruction* instruction,
    554                                                       Location source,
    555                                                       Location destination) const {
    556   DCHECK(IsValidDestination(destination)) << destination;
    557   if (source.Equals(destination)) return;
    558 
    559   HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
    560   HParallelMove* move;
    561   if (at == nullptr) {
    562     if (IsInstructionStart(position)) {
    563       // Block boundary, don't do anything the connection of split siblings will handle it.
    564       return;
    565     } else {
    566       // Move must happen before the first instruction of the block.
    567       at = liveness_.GetInstructionFromPosition((position + 1) / 2);
    568       // Note that parallel moves may have already been inserted, so we explicitly
    569       // ask for the first instruction of the block: `GetInstructionFromPosition` does
    570       // not contain the `HParallelMove` instructions.
    571       at = at->GetBlock()->GetFirstInstruction();
    572 
    573       if (at->GetLifetimePosition() < position) {
    574         // We may insert moves for split siblings and phi spills at the beginning of the block.
    575         // Since this is a different lifetime position, we need to go to the next instruction.
    576         DCHECK(at->IsParallelMove());
    577         at = at->GetNext();
    578       }
    579 
    580       if (at->GetLifetimePosition() != position) {
    581         DCHECK_GT(at->GetLifetimePosition(), position);
    582         move = new (allocator_) HParallelMove(allocator_);
    583         move->SetLifetimePosition(position);
    584         at->GetBlock()->InsertInstructionBefore(move, at);
    585       } else {
    586         DCHECK(at->IsParallelMove());
    587         move = at->AsParallelMove();
    588       }
    589     }
    590   } else if (IsInstructionEnd(position)) {
    591     // Move must happen after the instruction.
    592     DCHECK(!at->IsControlFlow());
    593     move = at->GetNext()->AsParallelMove();
    594     // This is a parallel move for connecting siblings in a same block. We need to
    595     // differentiate it with moves for connecting blocks, and input moves.
    596     if (move == nullptr || move->GetLifetimePosition() > position) {
    597       move = new (allocator_) HParallelMove(allocator_);
    598       move->SetLifetimePosition(position);
    599       at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
    600     }
    601   } else {
    602     // Move must happen before the instruction.
    603     HInstruction* previous = at->GetPrevious();
    604     if (previous == nullptr
    605         || !previous->IsParallelMove()
    606         || previous->GetLifetimePosition() != position) {
    607       // If the previous is a parallel move, then its position must be lower
    608       // than the given `position`: it was added just after the non-parallel
    609       // move instruction that precedes `instruction`.
    610       DCHECK(previous == nullptr
    611              || !previous->IsParallelMove()
    612              || previous->GetLifetimePosition() < position);
    613       move = new (allocator_) HParallelMove(allocator_);
    614       move->SetLifetimePosition(position);
    615       at->GetBlock()->InsertInstructionBefore(move, at);
    616     } else {
    617       move = previous->AsParallelMove();
    618     }
    619   }
    620   DCHECK_EQ(move->GetLifetimePosition(), position);
    621   AddMove(move, source, destination, instruction, instruction->GetType());
    622 }
    623 
    624 void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
    625                                                             HInstruction* instruction,
    626                                                             Location source,
    627                                                             Location destination) const {
    628   DCHECK(IsValidDestination(destination)) << destination;
    629   if (source.Equals(destination)) return;
    630 
    631   DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
    632   HInstruction* last = block->GetLastInstruction();
    633   // We insert moves at exit for phi predecessors and connecting blocks.
    634   // A block ending with an if or a packed switch cannot branch to a block
    635   // with phis because we do not allow critical edges. It can also not connect
    636   // a split interval between two blocks: the move has to happen in the successor.
    637   DCHECK(!last->IsIf() && !last->IsPackedSwitch());
    638   HInstruction* previous = last->GetPrevious();
    639   HParallelMove* move;
    640   // This is a parallel move for connecting blocks. We need to differentiate
    641   // it with moves for connecting siblings in a same block, and output moves.
    642   size_t position = last->GetLifetimePosition();
    643   if (previous == nullptr || !previous->IsParallelMove()
    644       || previous->AsParallelMove()->GetLifetimePosition() != position) {
    645     move = new (allocator_) HParallelMove(allocator_);
    646     move->SetLifetimePosition(position);
    647     block->InsertInstructionBefore(move, last);
    648   } else {
    649     move = previous->AsParallelMove();
    650   }
    651   AddMove(move, source, destination, instruction, instruction->GetType());
    652 }
    653 
    654 void RegisterAllocationResolver::InsertParallelMoveAtEntryOf(HBasicBlock* block,
    655                                                              HInstruction* instruction,
    656                                                              Location source,
    657                                                              Location destination) const {
    658   DCHECK(IsValidDestination(destination)) << destination;
    659   if (source.Equals(destination)) return;
    660 
    661   HInstruction* first = block->GetFirstInstruction();
    662   HParallelMove* move = first->AsParallelMove();
    663   size_t position = block->GetLifetimeStart();
    664   // This is a parallel move for connecting blocks. We need to differentiate
    665   // it with moves for connecting siblings in a same block, and input moves.
    666   if (move == nullptr || move->GetLifetimePosition() != position) {
    667     move = new (allocator_) HParallelMove(allocator_);
    668     move->SetLifetimePosition(position);
    669     block->InsertInstructionBefore(move, first);
    670   }
    671   AddMove(move, source, destination, instruction, instruction->GetType());
    672 }
    673 
    674 void RegisterAllocationResolver::InsertMoveAfter(HInstruction* instruction,
    675                                                  Location source,
    676                                                  Location destination) const {
    677   DCHECK(IsValidDestination(destination)) << destination;
    678   if (source.Equals(destination)) return;
    679 
    680   if (instruction->IsPhi()) {
    681     InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
    682     return;
    683   }
    684 
    685   size_t position = instruction->GetLifetimePosition() + 1;
    686   HParallelMove* move = instruction->GetNext()->AsParallelMove();
    687   // This is a parallel move for moving the output of an instruction. We need
    688   // to differentiate with input moves, moves for connecting siblings in a
    689   // and moves for connecting blocks.
    690   if (move == nullptr || move->GetLifetimePosition() != position) {
    691     move = new (allocator_) HParallelMove(allocator_);
    692     move->SetLifetimePosition(position);
    693     instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
    694   }
    695   AddMove(move, source, destination, instruction, instruction->GetType());
    696 }
    697 
    698 }  // namespace art
    699