1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the LiveInterval analysis pass which is used 11 // by the Linear Scan Register allocator. This pass linearizes the 12 // basic blocks of the function in DFS order and uses the 13 // LiveVariables pass to conservatively compute live intervals for 14 // each virtual and physical register. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "LiveRangeCalc.h" 20 #include "llvm/ADT/DenseSet.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/CodeGen/LiveVariables.h" 24 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 25 #include "llvm/CodeGen/MachineDominators.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/Passes.h" 29 #include "llvm/CodeGen/VirtRegMap.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/BlockFrequency.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetInstrInfo.h" 37 #include "llvm/Target/TargetRegisterInfo.h" 38 #include "llvm/Target/TargetSubtargetInfo.h" 39 #include <algorithm> 40 #include <cmath> 41 #include <limits> 42 using namespace llvm; 43 44 #define DEBUG_TYPE "regalloc" 45 46 char LiveIntervals::ID = 0; 47 char &llvm::LiveIntervalsID = LiveIntervals::ID; 48 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", 49 "Live Interval Analysis", false, false) 50 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 51 INITIALIZE_PASS_DEPENDENCY(LiveVariables) 52 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 53 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 54 INITIALIZE_PASS_END(LiveIntervals, "liveintervals", 55 "Live Interval Analysis", false, false) 56 57 #ifndef NDEBUG 58 static cl::opt<bool> EnablePrecomputePhysRegs( 59 "precompute-phys-liveness", cl::Hidden, 60 cl::desc("Eagerly compute live intervals for all physreg units.")); 61 #else 62 static bool EnablePrecomputePhysRegs = false; 63 #endif // NDEBUG 64 65 static cl::opt<bool> EnableSubRegLiveness( 66 "enable-subreg-liveness", cl::Hidden, cl::init(true), 67 cl::desc("Enable subregister liveness tracking.")); 68 69 namespace llvm { 70 cl::opt<bool> UseSegmentSetForPhysRegs( 71 "use-segment-set-for-physregs", cl::Hidden, cl::init(true), 72 cl::desc( 73 "Use segment set for the computation of the live ranges of physregs.")); 74 } 75 76 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 77 AU.setPreservesCFG(); 78 AU.addRequired<AAResultsWrapperPass>(); 79 AU.addPreserved<AAResultsWrapperPass>(); 80 // LiveVariables isn't really required by this analysis, it is only required 81 // here to make sure it is live during TwoAddressInstructionPass and 82 // PHIElimination. This is temporary. 83 AU.addRequired<LiveVariables>(); 84 AU.addPreserved<LiveVariables>(); 85 AU.addPreservedID(MachineLoopInfoID); 86 AU.addRequiredTransitiveID(MachineDominatorsID); 87 AU.addPreservedID(MachineDominatorsID); 88 AU.addPreserved<SlotIndexes>(); 89 AU.addRequiredTransitive<SlotIndexes>(); 90 MachineFunctionPass::getAnalysisUsage(AU); 91 } 92 93 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID), 94 DomTree(nullptr), LRCalc(nullptr) { 95 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 96 } 97 98 LiveIntervals::~LiveIntervals() { 99 delete LRCalc; 100 } 101 102 void LiveIntervals::releaseMemory() { 103 // Free the live intervals themselves. 104 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i) 105 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)]; 106 VirtRegIntervals.clear(); 107 RegMaskSlots.clear(); 108 RegMaskBits.clear(); 109 RegMaskBlocks.clear(); 110 111 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i) 112 delete RegUnitRanges[i]; 113 RegUnitRanges.clear(); 114 115 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 116 VNInfoAllocator.Reset(); 117 } 118 119 /// runOnMachineFunction - calculates LiveIntervals 120 /// 121 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 122 MF = &fn; 123 MRI = &MF->getRegInfo(); 124 TRI = MF->getSubtarget().getRegisterInfo(); 125 TII = MF->getSubtarget().getInstrInfo(); 126 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 127 Indexes = &getAnalysis<SlotIndexes>(); 128 DomTree = &getAnalysis<MachineDominatorTree>(); 129 130 if (EnableSubRegLiveness && MF->getSubtarget().enableSubRegLiveness()) 131 MRI->enableSubRegLiveness(true); 132 133 if (!LRCalc) 134 LRCalc = new LiveRangeCalc(); 135 136 // Allocate space for all virtual registers. 137 VirtRegIntervals.resize(MRI->getNumVirtRegs()); 138 139 computeVirtRegs(); 140 computeRegMasks(); 141 computeLiveInRegUnits(); 142 143 if (EnablePrecomputePhysRegs) { 144 // For stress testing, precompute live ranges of all physical register 145 // units, including reserved registers. 146 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 147 getRegUnit(i); 148 } 149 DEBUG(dump()); 150 return true; 151 } 152 153 /// print - Implement the dump method. 154 void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 155 OS << "********** INTERVALS **********\n"; 156 157 // Dump the regunits. 158 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i) 159 if (LiveRange *LR = RegUnitRanges[i]) 160 OS << PrintRegUnit(i, TRI) << ' ' << *LR << '\n'; 161 162 // Dump the virtregs. 163 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 164 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 165 if (hasInterval(Reg)) 166 OS << getInterval(Reg) << '\n'; 167 } 168 169 OS << "RegMasks:"; 170 for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i) 171 OS << ' ' << RegMaskSlots[i]; 172 OS << '\n'; 173 174 printInstrs(OS); 175 } 176 177 void LiveIntervals::printInstrs(raw_ostream &OS) const { 178 OS << "********** MACHINEINSTRS **********\n"; 179 MF->print(OS, Indexes); 180 } 181 182 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 183 void LiveIntervals::dumpInstrs() const { 184 printInstrs(dbgs()); 185 } 186 #endif 187 188 LiveInterval* LiveIntervals::createInterval(unsigned reg) { 189 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? 190 llvm::huge_valf : 0.0F; 191 return new LiveInterval(reg, Weight); 192 } 193 194 195 /// computeVirtRegInterval - Compute the live interval of a virtual register, 196 /// based on defs and uses. 197 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) { 198 assert(LRCalc && "LRCalc not initialized."); 199 assert(LI.empty() && "Should only compute empty intervals."); 200 bool ShouldTrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(LI.reg); 201 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 202 LRCalc->calculate(LI, ShouldTrackSubRegLiveness); 203 bool SeparatedComponents = computeDeadValues(LI, nullptr); 204 if (SeparatedComponents) { 205 assert(ShouldTrackSubRegLiveness 206 && "Separated components should only occur for unused subreg defs"); 207 SmallVector<LiveInterval*, 8> SplitLIs; 208 splitSeparateComponents(LI, SplitLIs); 209 } 210 } 211 212 void LiveIntervals::computeVirtRegs() { 213 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 214 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 215 if (MRI->reg_nodbg_empty(Reg)) 216 continue; 217 createAndComputeVirtRegInterval(Reg); 218 } 219 } 220 221 void LiveIntervals::computeRegMasks() { 222 RegMaskBlocks.resize(MF->getNumBlockIDs()); 223 224 // Find all instructions with regmask operands. 225 for (MachineBasicBlock &MBB : *MF) { 226 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()]; 227 RMB.first = RegMaskSlots.size(); 228 229 // Some block starts, such as EH funclets, create masks. 230 if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) { 231 RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB)); 232 RegMaskBits.push_back(Mask); 233 } 234 235 for (MachineInstr &MI : MBB) { 236 for (const MachineOperand &MO : MI.operands()) { 237 if (!MO.isRegMask()) 238 continue; 239 RegMaskSlots.push_back(Indexes->getInstructionIndex(&MI).getRegSlot()); 240 RegMaskBits.push_back(MO.getRegMask()); 241 } 242 } 243 244 // Some block ends, such as funclet returns, create masks. 245 if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) { 246 RegMaskSlots.push_back(Indexes->getMBBEndIdx(&MBB)); 247 RegMaskBits.push_back(Mask); 248 } 249 250 // Compute the number of register mask instructions in this block. 251 RMB.second = RegMaskSlots.size() - RMB.first; 252 } 253 } 254 255 //===----------------------------------------------------------------------===// 256 // Register Unit Liveness 257 //===----------------------------------------------------------------------===// 258 // 259 // Fixed interference typically comes from ABI boundaries: Function arguments 260 // and return values are passed in fixed registers, and so are exception 261 // pointers entering landing pads. Certain instructions require values to be 262 // present in specific registers. That is also represented through fixed 263 // interference. 264 // 265 266 /// computeRegUnitInterval - Compute the live range of a register unit, based 267 /// on the uses and defs of aliasing registers. The range should be empty, 268 /// or contain only dead phi-defs from ABI blocks. 269 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) { 270 assert(LRCalc && "LRCalc not initialized."); 271 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 272 273 // The physregs aliasing Unit are the roots and their super-registers. 274 // Create all values as dead defs before extending to uses. Note that roots 275 // may share super-registers. That's OK because createDeadDefs() is 276 // idempotent. It is very rare for a register unit to have multiple roots, so 277 // uniquing super-registers is probably not worthwhile. 278 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 279 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true); 280 Supers.isValid(); ++Supers) { 281 if (!MRI->reg_empty(*Supers)) 282 LRCalc->createDeadDefs(LR, *Supers); 283 } 284 } 285 286 // Now extend LR to reach all uses. 287 // Ignore uses of reserved registers. We only track defs of those. 288 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 289 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true); 290 Supers.isValid(); ++Supers) { 291 unsigned Reg = *Supers; 292 if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg)) 293 LRCalc->extendToUses(LR, Reg); 294 } 295 } 296 297 // Flush the segment set to the segment vector. 298 if (UseSegmentSetForPhysRegs) 299 LR.flushSegmentSet(); 300 } 301 302 303 /// computeLiveInRegUnits - Precompute the live ranges of any register units 304 /// that are live-in to an ABI block somewhere. Register values can appear 305 /// without a corresponding def when entering the entry block or a landing pad. 306 /// 307 void LiveIntervals::computeLiveInRegUnits() { 308 RegUnitRanges.resize(TRI->getNumRegUnits()); 309 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n"); 310 311 // Keep track of the live range sets allocated. 312 SmallVector<unsigned, 8> NewRanges; 313 314 // Check all basic blocks for live-ins. 315 for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end(); 316 MFI != MFE; ++MFI) { 317 const MachineBasicBlock *MBB = &*MFI; 318 319 // We only care about ABI blocks: Entry + landing pads. 320 if ((MFI != MF->begin() && !MBB->isEHPad()) || MBB->livein_empty()) 321 continue; 322 323 // Create phi-defs at Begin for all live-in registers. 324 SlotIndex Begin = Indexes->getMBBStartIdx(MBB); 325 DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber()); 326 for (const auto &LI : MBB->liveins()) { 327 for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) { 328 unsigned Unit = *Units; 329 LiveRange *LR = RegUnitRanges[Unit]; 330 if (!LR) { 331 // Use segment set to speed-up initial computation of the live range. 332 LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs); 333 NewRanges.push_back(Unit); 334 } 335 VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator()); 336 (void)VNI; 337 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id); 338 } 339 } 340 DEBUG(dbgs() << '\n'); 341 } 342 DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n"); 343 344 // Compute the 'normal' part of the ranges. 345 for (unsigned i = 0, e = NewRanges.size(); i != e; ++i) { 346 unsigned Unit = NewRanges[i]; 347 computeRegUnitRange(*RegUnitRanges[Unit], Unit); 348 } 349 } 350 351 352 static void createSegmentsForValues(LiveRange &LR, 353 iterator_range<LiveInterval::vni_iterator> VNIs) { 354 for (auto VNI : VNIs) { 355 if (VNI->isUnused()) 356 continue; 357 SlotIndex Def = VNI->def; 358 LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI)); 359 } 360 } 361 362 typedef SmallVector<std::pair<SlotIndex, VNInfo*>, 16> ShrinkToUsesWorkList; 363 364 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes, 365 ShrinkToUsesWorkList &WorkList, 366 const LiveRange &OldRange) { 367 // Keep track of the PHIs that are in use. 368 SmallPtrSet<VNInfo*, 8> UsedPHIs; 369 // Blocks that have already been added to WorkList as live-out. 370 SmallPtrSet<MachineBasicBlock*, 16> LiveOut; 371 372 // Extend intervals to reach all uses in WorkList. 373 while (!WorkList.empty()) { 374 SlotIndex Idx = WorkList.back().first; 375 VNInfo *VNI = WorkList.back().second; 376 WorkList.pop_back(); 377 const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot()); 378 SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB); 379 380 // Extend the live range for VNI to be live at Idx. 381 if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) { 382 assert(ExtVNI == VNI && "Unexpected existing value number"); 383 (void)ExtVNI; 384 // Is this a PHIDef we haven't seen before? 385 if (!VNI->isPHIDef() || VNI->def != BlockStart || 386 !UsedPHIs.insert(VNI).second) 387 continue; 388 // The PHI is live, make sure the predecessors are live-out. 389 for (auto &Pred : MBB->predecessors()) { 390 if (!LiveOut.insert(Pred).second) 391 continue; 392 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); 393 // A predecessor is not required to have a live-out value for a PHI. 394 if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop)) 395 WorkList.push_back(std::make_pair(Stop, PVNI)); 396 } 397 continue; 398 } 399 400 // VNI is live-in to MBB. 401 DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); 402 LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI)); 403 404 // Make sure VNI is live-out from the predecessors. 405 for (auto &Pred : MBB->predecessors()) { 406 if (!LiveOut.insert(Pred).second) 407 continue; 408 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); 409 assert(OldRange.getVNInfoBefore(Stop) == VNI && 410 "Wrong value out of predecessor"); 411 WorkList.push_back(std::make_pair(Stop, VNI)); 412 } 413 } 414 } 415 416 bool LiveIntervals::shrinkToUses(LiveInterval *li, 417 SmallVectorImpl<MachineInstr*> *dead) { 418 DEBUG(dbgs() << "Shrink: " << *li << '\n'); 419 assert(TargetRegisterInfo::isVirtualRegister(li->reg) 420 && "Can only shrink virtual registers"); 421 422 // Shrink subregister live ranges. 423 bool NeedsCleanup = false; 424 for (LiveInterval::SubRange &S : li->subranges()) { 425 shrinkToUses(S, li->reg); 426 if (S.empty()) 427 NeedsCleanup = true; 428 } 429 if (NeedsCleanup) 430 li->removeEmptySubRanges(); 431 432 // Find all the values used, including PHI kills. 433 ShrinkToUsesWorkList WorkList; 434 435 // Visit all instructions reading li->reg. 436 for (MachineRegisterInfo::reg_instr_iterator 437 I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end(); 438 I != E; ) { 439 MachineInstr *UseMI = &*(I++); 440 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) 441 continue; 442 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); 443 LiveQueryResult LRQ = li->Query(Idx); 444 VNInfo *VNI = LRQ.valueIn(); 445 if (!VNI) { 446 // This shouldn't happen: readsVirtualRegister returns true, but there is 447 // no live value. It is likely caused by a target getting <undef> flags 448 // wrong. 449 DEBUG(dbgs() << Idx << '\t' << *UseMI 450 << "Warning: Instr claims to read non-existent value in " 451 << *li << '\n'); 452 continue; 453 } 454 // Special case: An early-clobber tied operand reads and writes the 455 // register one slot early. 456 if (VNInfo *DefVNI = LRQ.valueDefined()) 457 Idx = DefVNI->def; 458 459 WorkList.push_back(std::make_pair(Idx, VNI)); 460 } 461 462 // Create new live ranges with only minimal live segments per def. 463 LiveRange NewLR; 464 createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end())); 465 extendSegmentsToUses(NewLR, *Indexes, WorkList, *li); 466 467 // Move the trimmed segments back. 468 li->segments.swap(NewLR.segments); 469 470 // Handle dead values. 471 bool CanSeparate = computeDeadValues(*li, dead); 472 DEBUG(dbgs() << "Shrunk: " << *li << '\n'); 473 return CanSeparate; 474 } 475 476 bool LiveIntervals::computeDeadValues(LiveInterval &LI, 477 SmallVectorImpl<MachineInstr*> *dead) { 478 bool MayHaveSplitComponents = false; 479 for (auto VNI : LI.valnos) { 480 if (VNI->isUnused()) 481 continue; 482 SlotIndex Def = VNI->def; 483 LiveRange::iterator I = LI.FindSegmentContaining(Def); 484 assert(I != LI.end() && "Missing segment for VNI"); 485 486 // Is the register live before? Otherwise we may have to add a read-undef 487 // flag for subregister defs. 488 bool DeadBeforeDef = false; 489 unsigned VReg = LI.reg; 490 if (MRI->shouldTrackSubRegLiveness(VReg)) { 491 if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) { 492 MachineInstr *MI = getInstructionFromIndex(Def); 493 MI->setRegisterDefReadUndef(VReg); 494 DeadBeforeDef = true; 495 } 496 } 497 498 if (I->end != Def.getDeadSlot()) 499 continue; 500 if (VNI->isPHIDef()) { 501 // This is a dead PHI. Remove it. 502 VNI->markUnused(); 503 LI.removeSegment(I); 504 DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n"); 505 MayHaveSplitComponents = true; 506 } else { 507 // This is a dead def. Make sure the instruction knows. 508 MachineInstr *MI = getInstructionFromIndex(Def); 509 assert(MI && "No instruction defining live value"); 510 MI->addRegisterDead(VReg, TRI); 511 512 // If we have a dead def that is completely separate from the rest of 513 // the liverange then we rewrite it to use a different VReg to not violate 514 // the rule that the liveness of a virtual register forms a connected 515 // component. This should only happen if subregister liveness is tracked. 516 if (DeadBeforeDef) 517 MayHaveSplitComponents = true; 518 519 if (dead && MI->allDefsAreDead()) { 520 DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI); 521 dead->push_back(MI); 522 } 523 } 524 } 525 return MayHaveSplitComponents; 526 } 527 528 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) 529 { 530 DEBUG(dbgs() << "Shrink: " << SR << '\n'); 531 assert(TargetRegisterInfo::isVirtualRegister(Reg) 532 && "Can only shrink virtual registers"); 533 // Find all the values used, including PHI kills. 534 ShrinkToUsesWorkList WorkList; 535 536 // Visit all instructions reading Reg. 537 SlotIndex LastIdx; 538 for (MachineOperand &MO : MRI->reg_operands(Reg)) { 539 MachineInstr *UseMI = MO.getParent(); 540 if (UseMI->isDebugValue()) 541 continue; 542 // Maybe the operand is for a subregister we don't care about. 543 unsigned SubReg = MO.getSubReg(); 544 if (SubReg != 0) { 545 LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg); 546 if ((LaneMask & SR.LaneMask) == 0) 547 continue; 548 } 549 // We only need to visit each instruction once. 550 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); 551 if (Idx == LastIdx) 552 continue; 553 LastIdx = Idx; 554 555 LiveQueryResult LRQ = SR.Query(Idx); 556 VNInfo *VNI = LRQ.valueIn(); 557 // For Subranges it is possible that only undef values are left in that 558 // part of the subregister, so there is no real liverange at the use 559 if (!VNI) 560 continue; 561 562 // Special case: An early-clobber tied operand reads and writes the 563 // register one slot early. 564 if (VNInfo *DefVNI = LRQ.valueDefined()) 565 Idx = DefVNI->def; 566 567 WorkList.push_back(std::make_pair(Idx, VNI)); 568 } 569 570 // Create a new live ranges with only minimal live segments per def. 571 LiveRange NewLR; 572 createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end())); 573 extendSegmentsToUses(NewLR, *Indexes, WorkList, SR); 574 575 // Move the trimmed ranges back. 576 SR.segments.swap(NewLR.segments); 577 578 // Remove dead PHI value numbers 579 for (auto VNI : SR.valnos) { 580 if (VNI->isUnused()) 581 continue; 582 const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def); 583 assert(Segment != nullptr && "Missing segment for VNI"); 584 if (Segment->end != VNI->def.getDeadSlot()) 585 continue; 586 if (VNI->isPHIDef()) { 587 // This is a dead PHI. Remove it. 588 VNI->markUnused(); 589 SR.removeSegment(*Segment); 590 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); 591 } 592 } 593 594 DEBUG(dbgs() << "Shrunk: " << SR << '\n'); 595 } 596 597 void LiveIntervals::extendToIndices(LiveRange &LR, 598 ArrayRef<SlotIndex> Indices) { 599 assert(LRCalc && "LRCalc not initialized."); 600 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 601 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 602 LRCalc->extend(LR, Indices[i]); 603 } 604 605 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill, 606 SmallVectorImpl<SlotIndex> *EndPoints) { 607 LiveQueryResult LRQ = LR.Query(Kill); 608 VNInfo *VNI = LRQ.valueOutOrDead(); 609 if (!VNI) 610 return; 611 612 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill); 613 SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB); 614 615 // If VNI isn't live out from KillMBB, the value is trivially pruned. 616 if (LRQ.endPoint() < MBBEnd) { 617 LR.removeSegment(Kill, LRQ.endPoint()); 618 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); 619 return; 620 } 621 622 // VNI is live out of KillMBB. 623 LR.removeSegment(Kill, MBBEnd); 624 if (EndPoints) EndPoints->push_back(MBBEnd); 625 626 // Find all blocks that are reachable from KillMBB without leaving VNI's live 627 // range. It is possible that KillMBB itself is reachable, so start a DFS 628 // from each successor. 629 typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy; 630 VisitedTy Visited; 631 for (MachineBasicBlock::succ_iterator 632 SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end(); 633 SuccI != SuccE; ++SuccI) { 634 for (df_ext_iterator<MachineBasicBlock*, VisitedTy> 635 I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited); 636 I != E;) { 637 MachineBasicBlock *MBB = *I; 638 639 // Check if VNI is live in to MBB. 640 SlotIndex MBBStart, MBBEnd; 641 std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); 642 LiveQueryResult LRQ = LR.Query(MBBStart); 643 if (LRQ.valueIn() != VNI) { 644 // This block isn't part of the VNI segment. Prune the search. 645 I.skipChildren(); 646 continue; 647 } 648 649 // Prune the search if VNI is killed in MBB. 650 if (LRQ.endPoint() < MBBEnd) { 651 LR.removeSegment(MBBStart, LRQ.endPoint()); 652 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); 653 I.skipChildren(); 654 continue; 655 } 656 657 // VNI is live through MBB. 658 LR.removeSegment(MBBStart, MBBEnd); 659 if (EndPoints) EndPoints->push_back(MBBEnd); 660 ++I; 661 } 662 } 663 } 664 665 //===----------------------------------------------------------------------===// 666 // Register allocator hooks. 667 // 668 669 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) { 670 // Keep track of regunit ranges. 671 SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU; 672 // Keep track of subregister ranges. 673 SmallVector<std::pair<const LiveInterval::SubRange*, 674 LiveRange::const_iterator>, 4> SRs; 675 676 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 677 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 678 if (MRI->reg_nodbg_empty(Reg)) 679 continue; 680 const LiveInterval &LI = getInterval(Reg); 681 if (LI.empty()) 682 continue; 683 684 // Find the regunit intervals for the assigned register. They may overlap 685 // the virtual register live range, cancelling any kills. 686 RU.clear(); 687 for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid(); 688 ++Units) { 689 const LiveRange &RURange = getRegUnit(*Units); 690 if (RURange.empty()) 691 continue; 692 RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end))); 693 } 694 695 if (MRI->subRegLivenessEnabled()) { 696 SRs.clear(); 697 for (const LiveInterval::SubRange &SR : LI.subranges()) { 698 SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end))); 699 } 700 } 701 702 // Every instruction that kills Reg corresponds to a segment range end 703 // point. 704 for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE; 705 ++RI) { 706 // A block index indicates an MBB edge. 707 if (RI->end.isBlock()) 708 continue; 709 MachineInstr *MI = getInstructionFromIndex(RI->end); 710 if (!MI) 711 continue; 712 713 // Check if any of the regunits are live beyond the end of RI. That could 714 // happen when a physreg is defined as a copy of a virtreg: 715 // 716 // %EAX = COPY %vreg5 717 // FOO %vreg5 <--- MI, cancel kill because %EAX is live. 718 // BAR %EAX<kill> 719 // 720 // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX. 721 for (auto &RUP : RU) { 722 const LiveRange &RURange = *RUP.first; 723 LiveRange::const_iterator &I = RUP.second; 724 if (I == RURange.end()) 725 continue; 726 I = RURange.advanceTo(I, RI->end); 727 if (I == RURange.end() || I->start >= RI->end) 728 continue; 729 // I is overlapping RI. 730 goto CancelKill; 731 } 732 733 if (MRI->subRegLivenessEnabled()) { 734 // When reading a partial undefined value we must not add a kill flag. 735 // The regalloc might have used the undef lane for something else. 736 // Example: 737 // %vreg1 = ... ; R32: %vreg1 738 // %vreg2:high16 = ... ; R64: %vreg2 739 // = read %vreg2<kill> ; R64: %vreg2 740 // = read %vreg1 ; R32: %vreg1 741 // The <kill> flag is correct for %vreg2, but the register allocator may 742 // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0 743 // are actually never written by %vreg2. After assignment the <kill> 744 // flag at the read instruction is invalid. 745 LaneBitmask DefinedLanesMask; 746 if (!SRs.empty()) { 747 // Compute a mask of lanes that are defined. 748 DefinedLanesMask = 0; 749 for (auto &SRP : SRs) { 750 const LiveInterval::SubRange &SR = *SRP.first; 751 LiveRange::const_iterator &I = SRP.second; 752 if (I == SR.end()) 753 continue; 754 I = SR.advanceTo(I, RI->end); 755 if (I == SR.end() || I->start >= RI->end) 756 continue; 757 // I is overlapping RI 758 DefinedLanesMask |= SR.LaneMask; 759 } 760 } else 761 DefinedLanesMask = ~0u; 762 763 bool IsFullWrite = false; 764 for (const MachineOperand &MO : MI->operands()) { 765 if (!MO.isReg() || MO.getReg() != Reg) 766 continue; 767 if (MO.isUse()) { 768 // Reading any undefined lanes? 769 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg()); 770 if ((UseMask & ~DefinedLanesMask) != 0) 771 goto CancelKill; 772 } else if (MO.getSubReg() == 0) { 773 // Writing to the full register? 774 assert(MO.isDef()); 775 IsFullWrite = true; 776 } 777 } 778 779 // If an instruction writes to a subregister, a new segment starts in 780 // the LiveInterval. But as this is only overriding part of the register 781 // adding kill-flags is not correct here after registers have been 782 // assigned. 783 if (!IsFullWrite) { 784 // Next segment has to be adjacent in the subregister write case. 785 LiveRange::const_iterator N = std::next(RI); 786 if (N != LI.end() && N->start == RI->end) 787 goto CancelKill; 788 } 789 } 790 791 MI->addRegisterKilled(Reg, nullptr); 792 continue; 793 CancelKill: 794 MI->clearRegisterKills(Reg, nullptr); 795 } 796 } 797 } 798 799 MachineBasicBlock* 800 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const { 801 // A local live range must be fully contained inside the block, meaning it is 802 // defined and killed at instructions, not at block boundaries. It is not 803 // live in or or out of any block. 804 // 805 // It is technically possible to have a PHI-defined live range identical to a 806 // single block, but we are going to return false in that case. 807 808 SlotIndex Start = LI.beginIndex(); 809 if (Start.isBlock()) 810 return nullptr; 811 812 SlotIndex Stop = LI.endIndex(); 813 if (Stop.isBlock()) 814 return nullptr; 815 816 // getMBBFromIndex doesn't need to search the MBB table when both indexes 817 // belong to proper instructions. 818 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start); 819 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop); 820 return MBB1 == MBB2 ? MBB1 : nullptr; 821 } 822 823 bool 824 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const { 825 for (const VNInfo *PHI : LI.valnos) { 826 if (PHI->isUnused() || !PHI->isPHIDef()) 827 continue; 828 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def); 829 // Conservatively return true instead of scanning huge predecessor lists. 830 if (PHIMBB->pred_size() > 100) 831 return true; 832 for (MachineBasicBlock::const_pred_iterator 833 PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI) 834 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI))) 835 return true; 836 } 837 return false; 838 } 839 840 float 841 LiveIntervals::getSpillWeight(bool isDef, bool isUse, 842 const MachineBlockFrequencyInfo *MBFI, 843 const MachineInstr *MI) { 844 BlockFrequency Freq = MBFI->getBlockFreq(MI->getParent()); 845 const float Scale = 1.0f / MBFI->getEntryFreq(); 846 return (isDef + isUse) * (Freq.getFrequency() * Scale); 847 } 848 849 LiveRange::Segment 850 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr* startInst) { 851 LiveInterval& Interval = createEmptyInterval(reg); 852 VNInfo* VN = Interval.getNextValue( 853 SlotIndex(getInstructionIndex(startInst).getRegSlot()), 854 getVNInfoAllocator()); 855 LiveRange::Segment S( 856 SlotIndex(getInstructionIndex(startInst).getRegSlot()), 857 getMBBEndIdx(startInst->getParent()), VN); 858 Interval.addSegment(S); 859 860 return S; 861 } 862 863 864 //===----------------------------------------------------------------------===// 865 // Register mask functions 866 //===----------------------------------------------------------------------===// 867 868 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI, 869 BitVector &UsableRegs) { 870 if (LI.empty()) 871 return false; 872 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end(); 873 874 // Use a smaller arrays for local live ranges. 875 ArrayRef<SlotIndex> Slots; 876 ArrayRef<const uint32_t*> Bits; 877 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) { 878 Slots = getRegMaskSlotsInBlock(MBB->getNumber()); 879 Bits = getRegMaskBitsInBlock(MBB->getNumber()); 880 } else { 881 Slots = getRegMaskSlots(); 882 Bits = getRegMaskBits(); 883 } 884 885 // We are going to enumerate all the register mask slots contained in LI. 886 // Start with a binary search of RegMaskSlots to find a starting point. 887 ArrayRef<SlotIndex>::iterator SlotI = 888 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start); 889 ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); 890 891 // No slots in range, LI begins after the last call. 892 if (SlotI == SlotE) 893 return false; 894 895 bool Found = false; 896 for (;;) { 897 assert(*SlotI >= LiveI->start); 898 // Loop over all slots overlapping this segment. 899 while (*SlotI < LiveI->end) { 900 // *SlotI overlaps LI. Collect mask bits. 901 if (!Found) { 902 // This is the first overlap. Initialize UsableRegs to all ones. 903 UsableRegs.clear(); 904 UsableRegs.resize(TRI->getNumRegs(), true); 905 Found = true; 906 } 907 // Remove usable registers clobbered by this mask. 908 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]); 909 if (++SlotI == SlotE) 910 return Found; 911 } 912 // *SlotI is beyond the current LI segment. 913 LiveI = LI.advanceTo(LiveI, *SlotI); 914 if (LiveI == LiveE) 915 return Found; 916 // Advance SlotI until it overlaps. 917 while (*SlotI < LiveI->start) 918 if (++SlotI == SlotE) 919 return Found; 920 } 921 } 922 923 //===----------------------------------------------------------------------===// 924 // IntervalUpdate class. 925 //===----------------------------------------------------------------------===// 926 927 // HMEditor is a toolkit used by handleMove to trim or extend live intervals. 928 class LiveIntervals::HMEditor { 929 private: 930 LiveIntervals& LIS; 931 const MachineRegisterInfo& MRI; 932 const TargetRegisterInfo& TRI; 933 SlotIndex OldIdx; 934 SlotIndex NewIdx; 935 SmallPtrSet<LiveRange*, 8> Updated; 936 bool UpdateFlags; 937 938 public: 939 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, 940 const TargetRegisterInfo& TRI, 941 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags) 942 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx), 943 UpdateFlags(UpdateFlags) {} 944 945 // FIXME: UpdateFlags is a workaround that creates live intervals for all 946 // physregs, even those that aren't needed for regalloc, in order to update 947 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill 948 // flags, and postRA passes will use a live register utility instead. 949 LiveRange *getRegUnitLI(unsigned Unit) { 950 if (UpdateFlags) 951 return &LIS.getRegUnit(Unit); 952 return LIS.getCachedRegUnit(Unit); 953 } 954 955 /// Update all live ranges touched by MI, assuming a move from OldIdx to 956 /// NewIdx. 957 void updateAllRanges(MachineInstr *MI) { 958 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI); 959 bool hasRegMask = false; 960 for (MachineOperand &MO : MI->operands()) { 961 if (MO.isRegMask()) 962 hasRegMask = true; 963 if (!MO.isReg()) 964 continue; 965 // Aggressively clear all kill flags. 966 // They are reinserted by VirtRegRewriter. 967 if (MO.isUse()) 968 MO.setIsKill(false); 969 970 unsigned Reg = MO.getReg(); 971 if (!Reg) 972 continue; 973 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 974 LiveInterval &LI = LIS.getInterval(Reg); 975 if (LI.hasSubRanges()) { 976 unsigned SubReg = MO.getSubReg(); 977 LaneBitmask LaneMask = TRI.getSubRegIndexLaneMask(SubReg); 978 for (LiveInterval::SubRange &S : LI.subranges()) { 979 if ((S.LaneMask & LaneMask) == 0) 980 continue; 981 updateRange(S, Reg, S.LaneMask); 982 } 983 } 984 updateRange(LI, Reg, 0); 985 continue; 986 } 987 988 // For physregs, only update the regunits that actually have a 989 // precomputed live range. 990 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) 991 if (LiveRange *LR = getRegUnitLI(*Units)) 992 updateRange(*LR, *Units, 0); 993 } 994 if (hasRegMask) 995 updateRegMaskSlots(); 996 } 997 998 private: 999 /// Update a single live range, assuming an instruction has been moved from 1000 /// OldIdx to NewIdx. 1001 void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { 1002 if (!Updated.insert(&LR).second) 1003 return; 1004 DEBUG({ 1005 dbgs() << " "; 1006 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1007 dbgs() << PrintReg(Reg); 1008 if (LaneMask != 0) 1009 dbgs() << " L" << PrintLaneMask(LaneMask); 1010 } else { 1011 dbgs() << PrintRegUnit(Reg, &TRI); 1012 } 1013 dbgs() << ":\t" << LR << '\n'; 1014 }); 1015 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx)) 1016 handleMoveDown(LR); 1017 else 1018 handleMoveUp(LR, Reg, LaneMask); 1019 DEBUG(dbgs() << " -->\t" << LR << '\n'); 1020 LR.verify(); 1021 } 1022 1023 /// Update LR to reflect an instruction has been moved downwards from OldIdx 1024 /// to NewIdx. 1025 /// 1026 /// 1. Live def at OldIdx: 1027 /// Move def to NewIdx, assert endpoint after NewIdx. 1028 /// 1029 /// 2. Live def at OldIdx, killed at NewIdx: 1030 /// Change to dead def at NewIdx. 1031 /// (Happens when bundling def+kill together). 1032 /// 1033 /// 3. Dead def at OldIdx: 1034 /// Move def to NewIdx, possibly across another live value. 1035 /// 1036 /// 4. Def at OldIdx AND at NewIdx: 1037 /// Remove segment [OldIdx;NewIdx) and value defined at OldIdx. 1038 /// (Happens when bundling multiple defs together). 1039 /// 1040 /// 5. Value read at OldIdx, killed before NewIdx: 1041 /// Extend kill to NewIdx. 1042 /// 1043 void handleMoveDown(LiveRange &LR) { 1044 // First look for a kill at OldIdx. 1045 LiveRange::iterator I = LR.find(OldIdx.getBaseIndex()); 1046 LiveRange::iterator E = LR.end(); 1047 // Is LR even live at OldIdx? 1048 if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) 1049 return; 1050 1051 // Handle a live-in value. 1052 if (!SlotIndex::isSameInstr(I->start, OldIdx)) { 1053 bool isKill = SlotIndex::isSameInstr(OldIdx, I->end); 1054 // If the live-in value already extends to NewIdx, there is nothing to do. 1055 if (!SlotIndex::isEarlierInstr(I->end, NewIdx)) 1056 return; 1057 // Aggressively remove all kill flags from the old kill point. 1058 // Kill flags shouldn't be used while live intervals exist, they will be 1059 // reinserted by VirtRegRewriter. 1060 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end)) 1061 for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO) 1062 if (MO->isReg() && MO->isUse()) 1063 MO->setIsKill(false); 1064 // Adjust I->end to reach NewIdx. This may temporarily make LR invalid by 1065 // overlapping ranges. Case 5 above. 1066 I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); 1067 // If this was a kill, there may also be a def. Otherwise we're done. 1068 if (!isKill) 1069 return; 1070 ++I; 1071 } 1072 1073 // Check for a def at OldIdx. 1074 if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start)) 1075 return; 1076 // We have a def at OldIdx. 1077 VNInfo *DefVNI = I->valno; 1078 assert(DefVNI->def == I->start && "Inconsistent def"); 1079 DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); 1080 // If the defined value extends beyond NewIdx, just move the def down. 1081 // This is case 1 above. 1082 if (SlotIndex::isEarlierInstr(NewIdx, I->end)) { 1083 I->start = DefVNI->def; 1084 return; 1085 } 1086 // The remaining possibilities are now: 1087 // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx). 1088 // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot(). 1089 // In either case, it is possible that there is an existing def at NewIdx. 1090 assert((I->end == OldIdx.getDeadSlot() || 1091 SlotIndex::isSameInstr(I->end, NewIdx)) && 1092 "Cannot move def below kill"); 1093 LiveRange::iterator NewI = LR.advanceTo(I, NewIdx.getRegSlot()); 1094 if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) { 1095 // There is an existing def at NewIdx, case 4 above. The def at OldIdx is 1096 // coalesced into that value. 1097 assert(NewI->valno != DefVNI && "Multiple defs of value?"); 1098 LR.removeValNo(DefVNI); 1099 return; 1100 } 1101 // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx. 1102 // If the def at OldIdx was dead, we allow it to be moved across other LR 1103 // values. The new range should be placed immediately before NewI, move any 1104 // intermediate ranges up. 1105 assert(NewI != I && "Inconsistent iterators"); 1106 std::copy(std::next(I), NewI, I); 1107 *std::prev(NewI) 1108 = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); 1109 } 1110 1111 /// Update LR to reflect an instruction has been moved upwards from OldIdx 1112 /// to NewIdx. 1113 /// 1114 /// 1. Live def at OldIdx: 1115 /// Hoist def to NewIdx. 1116 /// 1117 /// 2. Dead def at OldIdx: 1118 /// Hoist def+end to NewIdx, possibly move across other values. 1119 /// 1120 /// 3. Dead def at OldIdx AND existing def at NewIdx: 1121 /// Remove value defined at OldIdx, coalescing it with existing value. 1122 /// 1123 /// 4. Live def at OldIdx AND existing def at NewIdx: 1124 /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx. 1125 /// (Happens when bundling multiple defs together). 1126 /// 1127 /// 5. Value killed at OldIdx: 1128 /// Hoist kill to NewIdx, then scan for last kill between NewIdx and 1129 /// OldIdx. 1130 /// 1131 void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { 1132 // First look for a kill at OldIdx. 1133 LiveRange::iterator I = LR.find(OldIdx.getBaseIndex()); 1134 LiveRange::iterator E = LR.end(); 1135 // Is LR even live at OldIdx? 1136 if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) 1137 return; 1138 1139 // Handle a live-in value. 1140 if (!SlotIndex::isSameInstr(I->start, OldIdx)) { 1141 // If the live-in value isn't killed here, there is nothing to do. 1142 if (!SlotIndex::isSameInstr(OldIdx, I->end)) 1143 return; 1144 // Adjust I->end to end at NewIdx. If we are hoisting a kill above 1145 // another use, we need to search for that use. Case 5 above. 1146 I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); 1147 ++I; 1148 // If OldIdx also defines a value, there couldn't have been another use. 1149 if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) { 1150 // No def, search for the new kill. 1151 // This can never be an early clobber kill since there is no def. 1152 std::prev(I)->end = findLastUseBefore(Reg, LaneMask).getRegSlot(); 1153 return; 1154 } 1155 } 1156 1157 // Now deal with the def at OldIdx. 1158 assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?"); 1159 VNInfo *DefVNI = I->valno; 1160 assert(DefVNI->def == I->start && "Inconsistent def"); 1161 DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); 1162 1163 // Check for an existing def at NewIdx. 1164 LiveRange::iterator NewI = LR.find(NewIdx.getRegSlot()); 1165 if (SlotIndex::isSameInstr(NewI->start, NewIdx)) { 1166 assert(NewI->valno != DefVNI && "Same value defined more than once?"); 1167 // There is an existing def at NewIdx. 1168 if (I->end.isDead()) { 1169 // Case 3: Remove the dead def at OldIdx. 1170 LR.removeValNo(DefVNI); 1171 return; 1172 } 1173 // Case 4: Replace def at NewIdx with live def at OldIdx. 1174 I->start = DefVNI->def; 1175 LR.removeValNo(NewI->valno); 1176 return; 1177 } 1178 1179 // There is no existing def at NewIdx. Hoist DefVNI. 1180 if (!I->end.isDead()) { 1181 // Leave the end point of a live def. 1182 I->start = DefVNI->def; 1183 return; 1184 } 1185 1186 // DefVNI is a dead def. It may have been moved across other values in LR, 1187 // so move I up to NewI. Slide [NewI;I) down one position. 1188 std::copy_backward(NewI, I, std::next(I)); 1189 *NewI = LiveRange::Segment(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); 1190 } 1191 1192 void updateRegMaskSlots() { 1193 SmallVectorImpl<SlotIndex>::iterator RI = 1194 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), 1195 OldIdx); 1196 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && 1197 "No RegMask at OldIdx."); 1198 *RI = NewIdx.getRegSlot(); 1199 assert((RI == LIS.RegMaskSlots.begin() || 1200 SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) && 1201 "Cannot move regmask instruction above another call"); 1202 assert((std::next(RI) == LIS.RegMaskSlots.end() || 1203 SlotIndex::isEarlierInstr(*RI, *std::next(RI))) && 1204 "Cannot move regmask instruction below another call"); 1205 } 1206 1207 // Return the last use of reg between NewIdx and OldIdx. 1208 SlotIndex findLastUseBefore(unsigned Reg, LaneBitmask LaneMask) { 1209 1210 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1211 SlotIndex LastUse = NewIdx; 1212 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) { 1213 unsigned SubReg = MO.getSubReg(); 1214 if (SubReg != 0 && LaneMask != 0 1215 && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask) == 0) 1216 continue; 1217 1218 const MachineInstr *MI = MO.getParent(); 1219 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); 1220 if (InstSlot > LastUse && InstSlot < OldIdx) 1221 LastUse = InstSlot; 1222 } 1223 return LastUse; 1224 } 1225 1226 // This is a regunit interval, so scanning the use list could be very 1227 // expensive. Scan upwards from OldIdx instead. 1228 assert(NewIdx < OldIdx && "Expected upwards move"); 1229 SlotIndexes *Indexes = LIS.getSlotIndexes(); 1230 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(NewIdx); 1231 1232 // OldIdx may not correspond to an instruction any longer, so set MII to 1233 // point to the next instruction after OldIdx, or MBB->end(). 1234 MachineBasicBlock::iterator MII = MBB->end(); 1235 if (MachineInstr *MI = Indexes->getInstructionFromIndex( 1236 Indexes->getNextNonNullIndex(OldIdx))) 1237 if (MI->getParent() == MBB) 1238 MII = MI; 1239 1240 MachineBasicBlock::iterator Begin = MBB->begin(); 1241 while (MII != Begin) { 1242 if ((--MII)->isDebugValue()) 1243 continue; 1244 SlotIndex Idx = Indexes->getInstructionIndex(MII); 1245 1246 // Stop searching when NewIdx is reached. 1247 if (!SlotIndex::isEarlierInstr(NewIdx, Idx)) 1248 return NewIdx; 1249 1250 // Check if MII uses Reg. 1251 for (MIBundleOperands MO(MII); MO.isValid(); ++MO) 1252 if (MO->isReg() && 1253 TargetRegisterInfo::isPhysicalRegister(MO->getReg()) && 1254 TRI.hasRegUnit(MO->getReg(), Reg)) 1255 return Idx; 1256 } 1257 // Didn't reach NewIdx. It must be the first instruction in the block. 1258 return NewIdx; 1259 } 1260 }; 1261 1262 void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) { 1263 assert(!MI->isBundled() && "Can't handle bundled instructions yet."); 1264 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); 1265 Indexes->removeMachineInstrFromMaps(MI); 1266 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI); 1267 assert(getMBBStartIdx(MI->getParent()) <= OldIndex && 1268 OldIndex < getMBBEndIdx(MI->getParent()) && 1269 "Cannot handle moves across basic block boundaries."); 1270 1271 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); 1272 HME.updateAllRanges(MI); 1273 } 1274 1275 void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, 1276 MachineInstr* BundleStart, 1277 bool UpdateFlags) { 1278 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); 1279 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); 1280 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); 1281 HME.updateAllRanges(MI); 1282 } 1283 1284 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin, 1285 const MachineBasicBlock::iterator End, 1286 const SlotIndex endIdx, 1287 LiveRange &LR, const unsigned Reg, 1288 LaneBitmask LaneMask) { 1289 LiveInterval::iterator LII = LR.find(endIdx); 1290 SlotIndex lastUseIdx; 1291 if (LII != LR.end() && LII->start < endIdx) 1292 lastUseIdx = LII->end; 1293 else 1294 --LII; 1295 1296 for (MachineBasicBlock::iterator I = End; I != Begin;) { 1297 --I; 1298 MachineInstr *MI = I; 1299 if (MI->isDebugValue()) 1300 continue; 1301 1302 SlotIndex instrIdx = getInstructionIndex(MI); 1303 bool isStartValid = getInstructionFromIndex(LII->start); 1304 bool isEndValid = getInstructionFromIndex(LII->end); 1305 1306 // FIXME: This doesn't currently handle early-clobber or multiple removed 1307 // defs inside of the region to repair. 1308 for (MachineInstr::mop_iterator OI = MI->operands_begin(), 1309 OE = MI->operands_end(); OI != OE; ++OI) { 1310 const MachineOperand &MO = *OI; 1311 if (!MO.isReg() || MO.getReg() != Reg) 1312 continue; 1313 1314 unsigned SubReg = MO.getSubReg(); 1315 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg); 1316 if ((Mask & LaneMask) == 0) 1317 continue; 1318 1319 if (MO.isDef()) { 1320 if (!isStartValid) { 1321 if (LII->end.isDead()) { 1322 SlotIndex prevStart; 1323 if (LII != LR.begin()) 1324 prevStart = std::prev(LII)->start; 1325 1326 // FIXME: This could be more efficient if there was a 1327 // removeSegment method that returned an iterator. 1328 LR.removeSegment(*LII, true); 1329 if (prevStart.isValid()) 1330 LII = LR.find(prevStart); 1331 else 1332 LII = LR.begin(); 1333 } else { 1334 LII->start = instrIdx.getRegSlot(); 1335 LII->valno->def = instrIdx.getRegSlot(); 1336 if (MO.getSubReg() && !MO.isUndef()) 1337 lastUseIdx = instrIdx.getRegSlot(); 1338 else 1339 lastUseIdx = SlotIndex(); 1340 continue; 1341 } 1342 } 1343 1344 if (!lastUseIdx.isValid()) { 1345 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); 1346 LiveRange::Segment S(instrIdx.getRegSlot(), 1347 instrIdx.getDeadSlot(), VNI); 1348 LII = LR.addSegment(S); 1349 } else if (LII->start != instrIdx.getRegSlot()) { 1350 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); 1351 LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI); 1352 LII = LR.addSegment(S); 1353 } 1354 1355 if (MO.getSubReg() && !MO.isUndef()) 1356 lastUseIdx = instrIdx.getRegSlot(); 1357 else 1358 lastUseIdx = SlotIndex(); 1359 } else if (MO.isUse()) { 1360 // FIXME: This should probably be handled outside of this branch, 1361 // either as part of the def case (for defs inside of the region) or 1362 // after the loop over the region. 1363 if (!isEndValid && !LII->end.isBlock()) 1364 LII->end = instrIdx.getRegSlot(); 1365 if (!lastUseIdx.isValid()) 1366 lastUseIdx = instrIdx.getRegSlot(); 1367 } 1368 } 1369 } 1370 } 1371 1372 void 1373 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB, 1374 MachineBasicBlock::iterator Begin, 1375 MachineBasicBlock::iterator End, 1376 ArrayRef<unsigned> OrigRegs) { 1377 // Find anchor points, which are at the beginning/end of blocks or at 1378 // instructions that already have indexes. 1379 while (Begin != MBB->begin() && !Indexes->hasIndex(Begin)) 1380 --Begin; 1381 while (End != MBB->end() && !Indexes->hasIndex(End)) 1382 ++End; 1383 1384 SlotIndex endIdx; 1385 if (End == MBB->end()) 1386 endIdx = getMBBEndIdx(MBB).getPrevSlot(); 1387 else 1388 endIdx = getInstructionIndex(End); 1389 1390 Indexes->repairIndexesInRange(MBB, Begin, End); 1391 1392 for (MachineBasicBlock::iterator I = End; I != Begin;) { 1393 --I; 1394 MachineInstr *MI = I; 1395 if (MI->isDebugValue()) 1396 continue; 1397 for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(), 1398 MOE = MI->operands_end(); MOI != MOE; ++MOI) { 1399 if (MOI->isReg() && 1400 TargetRegisterInfo::isVirtualRegister(MOI->getReg()) && 1401 !hasInterval(MOI->getReg())) { 1402 createAndComputeVirtRegInterval(MOI->getReg()); 1403 } 1404 } 1405 } 1406 1407 for (unsigned i = 0, e = OrigRegs.size(); i != e; ++i) { 1408 unsigned Reg = OrigRegs[i]; 1409 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1410 continue; 1411 1412 LiveInterval &LI = getInterval(Reg); 1413 // FIXME: Should we support undefs that gain defs? 1414 if (!LI.hasAtLeastOneValue()) 1415 continue; 1416 1417 for (LiveInterval::SubRange &S : LI.subranges()) { 1418 repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask); 1419 } 1420 repairOldRegInRange(Begin, End, endIdx, LI, Reg); 1421 } 1422 } 1423 1424 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) { 1425 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) { 1426 if (LiveRange *LR = getCachedRegUnit(*Units)) 1427 if (VNInfo *VNI = LR->getVNInfoAt(Pos)) 1428 LR->removeValNo(VNI); 1429 } 1430 } 1431 1432 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) { 1433 VNInfo *VNI = LI.getVNInfoAt(Pos); 1434 if (VNI == nullptr) 1435 return; 1436 LI.removeValNo(VNI); 1437 1438 // Also remove the value in subranges. 1439 for (LiveInterval::SubRange &S : LI.subranges()) { 1440 if (VNInfo *SVNI = S.getVNInfoAt(Pos)) 1441 S.removeValNo(SVNI); 1442 } 1443 LI.removeEmptySubRanges(); 1444 } 1445 1446 void LiveIntervals::splitSeparateComponents(LiveInterval &LI, 1447 SmallVectorImpl<LiveInterval*> &SplitLIs) { 1448 ConnectedVNInfoEqClasses ConEQ(*this); 1449 unsigned NumComp = ConEQ.Classify(&LI); 1450 if (NumComp <= 1) 1451 return; 1452 DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n'); 1453 unsigned Reg = LI.reg; 1454 const TargetRegisterClass *RegClass = MRI->getRegClass(Reg); 1455 for (unsigned I = 1; I < NumComp; ++I) { 1456 unsigned NewVReg = MRI->createVirtualRegister(RegClass); 1457 LiveInterval &NewLI = createEmptyInterval(NewVReg); 1458 SplitLIs.push_back(&NewLI); 1459 } 1460 ConEQ.Distribute(LI, SplitLIs.data(), *MRI); 1461 } 1462