1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the LiveInterval analysis pass which is used 11 // by the Linear Scan Register allocator. This pass linearizes the 12 // basic blocks of the function in DFS order and computes live intervals for 13 // each virtual and physical register. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 18 #include "LiveRangeCalc.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveVariables.h" 22 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/Passes.h" 27 #include "llvm/CodeGen/VirtRegMap.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Support/BlockFrequency.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetInstrInfo.h" 35 #include "llvm/Target/TargetRegisterInfo.h" 36 #include "llvm/Target/TargetSubtargetInfo.h" 37 #include <algorithm> 38 #include <cmath> 39 using namespace llvm; 40 41 #define DEBUG_TYPE "regalloc" 42 43 char LiveIntervals::ID = 0; 44 char &llvm::LiveIntervalsID = LiveIntervals::ID; 45 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", 46 "Live Interval Analysis", false, false) 47 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 48 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 49 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 50 INITIALIZE_PASS_END(LiveIntervals, "liveintervals", 51 "Live Interval Analysis", false, false) 52 53 #ifndef NDEBUG 54 static cl::opt<bool> EnablePrecomputePhysRegs( 55 "precompute-phys-liveness", cl::Hidden, 56 cl::desc("Eagerly compute live intervals for all physreg units.")); 57 #else 58 static bool EnablePrecomputePhysRegs = false; 59 #endif // NDEBUG 60 61 static cl::opt<bool> EnableSubRegLiveness( 62 "enable-subreg-liveness", cl::Hidden, cl::init(true), 63 cl::desc("Enable subregister liveness tracking.")); 64 65 namespace llvm { 66 cl::opt<bool> UseSegmentSetForPhysRegs( 67 "use-segment-set-for-physregs", cl::Hidden, cl::init(true), 68 cl::desc( 69 "Use segment set for the computation of the live ranges of physregs.")); 70 } 71 72 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 73 AU.setPreservesCFG(); 74 AU.addRequired<AAResultsWrapperPass>(); 75 AU.addPreserved<AAResultsWrapperPass>(); 76 AU.addPreserved<LiveVariables>(); 77 AU.addPreservedID(MachineLoopInfoID); 78 AU.addRequiredTransitiveID(MachineDominatorsID); 79 AU.addPreservedID(MachineDominatorsID); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83 } 84 85 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID), 86 DomTree(nullptr), LRCalc(nullptr) { 87 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 88 } 89 90 LiveIntervals::~LiveIntervals() { 91 delete LRCalc; 92 } 93 94 void LiveIntervals::releaseMemory() { 95 // Free the live intervals themselves. 96 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i) 97 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)]; 98 VirtRegIntervals.clear(); 99 RegMaskSlots.clear(); 100 RegMaskBits.clear(); 101 RegMaskBlocks.clear(); 102 103 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i) 104 delete RegUnitRanges[i]; 105 RegUnitRanges.clear(); 106 107 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. 108 VNInfoAllocator.Reset(); 109 } 110 111 /// runOnMachineFunction - calculates LiveIntervals 112 /// 113 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 114 MF = &fn; 115 MRI = &MF->getRegInfo(); 116 TRI = MF->getSubtarget().getRegisterInfo(); 117 TII = MF->getSubtarget().getInstrInfo(); 118 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 119 Indexes = &getAnalysis<SlotIndexes>(); 120 DomTree = &getAnalysis<MachineDominatorTree>(); 121 122 if (EnableSubRegLiveness && MF->getSubtarget().enableSubRegLiveness()) 123 MRI->enableSubRegLiveness(true); 124 125 if (!LRCalc) 126 LRCalc = new LiveRangeCalc(); 127 128 // Allocate space for all virtual registers. 129 VirtRegIntervals.resize(MRI->getNumVirtRegs()); 130 131 computeVirtRegs(); 132 computeRegMasks(); 133 computeLiveInRegUnits(); 134 135 if (EnablePrecomputePhysRegs) { 136 // For stress testing, precompute live ranges of all physical register 137 // units, including reserved registers. 138 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 139 getRegUnit(i); 140 } 141 DEBUG(dump()); 142 return true; 143 } 144 145 /// print - Implement the dump method. 146 void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 147 OS << "********** INTERVALS **********\n"; 148 149 // Dump the regunits. 150 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i) 151 if (LiveRange *LR = RegUnitRanges[i]) 152 OS << PrintRegUnit(i, TRI) << ' ' << *LR << '\n'; 153 154 // Dump the virtregs. 155 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 156 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 157 if (hasInterval(Reg)) 158 OS << getInterval(Reg) << '\n'; 159 } 160 161 OS << "RegMasks:"; 162 for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i) 163 OS << ' ' << RegMaskSlots[i]; 164 OS << '\n'; 165 166 printInstrs(OS); 167 } 168 169 void LiveIntervals::printInstrs(raw_ostream &OS) const { 170 OS << "********** MACHINEINSTRS **********\n"; 171 MF->print(OS, Indexes); 172 } 173 174 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 175 void LiveIntervals::dumpInstrs() const { 176 printInstrs(dbgs()); 177 } 178 #endif 179 180 LiveInterval* LiveIntervals::createInterval(unsigned reg) { 181 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? 182 llvm::huge_valf : 0.0F; 183 return new LiveInterval(reg, Weight); 184 } 185 186 187 /// computeVirtRegInterval - Compute the live interval of a virtual register, 188 /// based on defs and uses. 189 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) { 190 assert(LRCalc && "LRCalc not initialized."); 191 assert(LI.empty() && "Should only compute empty intervals."); 192 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 193 LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg)); 194 computeDeadValues(LI, nullptr); 195 } 196 197 void LiveIntervals::computeVirtRegs() { 198 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 199 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 200 if (MRI->reg_nodbg_empty(Reg)) 201 continue; 202 createAndComputeVirtRegInterval(Reg); 203 } 204 } 205 206 void LiveIntervals::computeRegMasks() { 207 RegMaskBlocks.resize(MF->getNumBlockIDs()); 208 209 // Find all instructions with regmask operands. 210 for (MachineBasicBlock &MBB : *MF) { 211 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()]; 212 RMB.first = RegMaskSlots.size(); 213 214 // Some block starts, such as EH funclets, create masks. 215 if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) { 216 RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB)); 217 RegMaskBits.push_back(Mask); 218 } 219 220 for (MachineInstr &MI : MBB) { 221 for (const MachineOperand &MO : MI.operands()) { 222 if (!MO.isRegMask()) 223 continue; 224 RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot()); 225 RegMaskBits.push_back(MO.getRegMask()); 226 } 227 } 228 229 // Some block ends, such as funclet returns, create masks. Put the mask on 230 // the last instruction of the block, because MBB slot index intervals are 231 // half-open. 232 if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) { 233 assert(!MBB.empty() && "empty return block?"); 234 RegMaskSlots.push_back( 235 Indexes->getInstructionIndex(MBB.back()).getRegSlot()); 236 RegMaskBits.push_back(Mask); 237 } 238 239 // Compute the number of register mask instructions in this block. 240 RMB.second = RegMaskSlots.size() - RMB.first; 241 } 242 } 243 244 //===----------------------------------------------------------------------===// 245 // Register Unit Liveness 246 //===----------------------------------------------------------------------===// 247 // 248 // Fixed interference typically comes from ABI boundaries: Function arguments 249 // and return values are passed in fixed registers, and so are exception 250 // pointers entering landing pads. Certain instructions require values to be 251 // present in specific registers. That is also represented through fixed 252 // interference. 253 // 254 255 /// computeRegUnitInterval - Compute the live range of a register unit, based 256 /// on the uses and defs of aliasing registers. The range should be empty, 257 /// or contain only dead phi-defs from ABI blocks. 258 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) { 259 assert(LRCalc && "LRCalc not initialized."); 260 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 261 262 // The physregs aliasing Unit are the roots and their super-registers. 263 // Create all values as dead defs before extending to uses. Note that roots 264 // may share super-registers. That's OK because createDeadDefs() is 265 // idempotent. It is very rare for a register unit to have multiple roots, so 266 // uniquing super-registers is probably not worthwhile. 267 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 268 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true); 269 Supers.isValid(); ++Supers) { 270 if (!MRI->reg_empty(*Supers)) 271 LRCalc->createDeadDefs(LR, *Supers); 272 } 273 } 274 275 // Now extend LR to reach all uses. 276 // Ignore uses of reserved registers. We only track defs of those. 277 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { 278 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true); 279 Supers.isValid(); ++Supers) { 280 unsigned Reg = *Supers; 281 if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg)) 282 LRCalc->extendToUses(LR, Reg); 283 } 284 } 285 286 // Flush the segment set to the segment vector. 287 if (UseSegmentSetForPhysRegs) 288 LR.flushSegmentSet(); 289 } 290 291 292 /// computeLiveInRegUnits - Precompute the live ranges of any register units 293 /// that are live-in to an ABI block somewhere. Register values can appear 294 /// without a corresponding def when entering the entry block or a landing pad. 295 /// 296 void LiveIntervals::computeLiveInRegUnits() { 297 RegUnitRanges.resize(TRI->getNumRegUnits()); 298 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n"); 299 300 // Keep track of the live range sets allocated. 301 SmallVector<unsigned, 8> NewRanges; 302 303 // Check all basic blocks for live-ins. 304 for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end(); 305 MFI != MFE; ++MFI) { 306 const MachineBasicBlock *MBB = &*MFI; 307 308 // We only care about ABI blocks: Entry + landing pads. 309 if ((MFI != MF->begin() && !MBB->isEHPad()) || MBB->livein_empty()) 310 continue; 311 312 // Create phi-defs at Begin for all live-in registers. 313 SlotIndex Begin = Indexes->getMBBStartIdx(MBB); 314 DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber()); 315 for (const auto &LI : MBB->liveins()) { 316 for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) { 317 unsigned Unit = *Units; 318 LiveRange *LR = RegUnitRanges[Unit]; 319 if (!LR) { 320 // Use segment set to speed-up initial computation of the live range. 321 LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs); 322 NewRanges.push_back(Unit); 323 } 324 VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator()); 325 (void)VNI; 326 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id); 327 } 328 } 329 DEBUG(dbgs() << '\n'); 330 } 331 DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n"); 332 333 // Compute the 'normal' part of the ranges. 334 for (unsigned i = 0, e = NewRanges.size(); i != e; ++i) { 335 unsigned Unit = NewRanges[i]; 336 computeRegUnitRange(*RegUnitRanges[Unit], Unit); 337 } 338 } 339 340 341 static void createSegmentsForValues(LiveRange &LR, 342 iterator_range<LiveInterval::vni_iterator> VNIs) { 343 for (auto VNI : VNIs) { 344 if (VNI->isUnused()) 345 continue; 346 SlotIndex Def = VNI->def; 347 LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI)); 348 } 349 } 350 351 typedef SmallVector<std::pair<SlotIndex, VNInfo*>, 16> ShrinkToUsesWorkList; 352 353 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes, 354 ShrinkToUsesWorkList &WorkList, 355 const LiveRange &OldRange) { 356 // Keep track of the PHIs that are in use. 357 SmallPtrSet<VNInfo*, 8> UsedPHIs; 358 // Blocks that have already been added to WorkList as live-out. 359 SmallPtrSet<MachineBasicBlock*, 16> LiveOut; 360 361 // Extend intervals to reach all uses in WorkList. 362 while (!WorkList.empty()) { 363 SlotIndex Idx = WorkList.back().first; 364 VNInfo *VNI = WorkList.back().second; 365 WorkList.pop_back(); 366 const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot()); 367 SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB); 368 369 // Extend the live range for VNI to be live at Idx. 370 if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) { 371 assert(ExtVNI == VNI && "Unexpected existing value number"); 372 (void)ExtVNI; 373 // Is this a PHIDef we haven't seen before? 374 if (!VNI->isPHIDef() || VNI->def != BlockStart || 375 !UsedPHIs.insert(VNI).second) 376 continue; 377 // The PHI is live, make sure the predecessors are live-out. 378 for (auto &Pred : MBB->predecessors()) { 379 if (!LiveOut.insert(Pred).second) 380 continue; 381 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); 382 // A predecessor is not required to have a live-out value for a PHI. 383 if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop)) 384 WorkList.push_back(std::make_pair(Stop, PVNI)); 385 } 386 continue; 387 } 388 389 // VNI is live-in to MBB. 390 DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); 391 LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI)); 392 393 // Make sure VNI is live-out from the predecessors. 394 for (auto &Pred : MBB->predecessors()) { 395 if (!LiveOut.insert(Pred).second) 396 continue; 397 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); 398 assert(OldRange.getVNInfoBefore(Stop) == VNI && 399 "Wrong value out of predecessor"); 400 WorkList.push_back(std::make_pair(Stop, VNI)); 401 } 402 } 403 } 404 405 bool LiveIntervals::shrinkToUses(LiveInterval *li, 406 SmallVectorImpl<MachineInstr*> *dead) { 407 DEBUG(dbgs() << "Shrink: " << *li << '\n'); 408 assert(TargetRegisterInfo::isVirtualRegister(li->reg) 409 && "Can only shrink virtual registers"); 410 411 // Shrink subregister live ranges. 412 bool NeedsCleanup = false; 413 for (LiveInterval::SubRange &S : li->subranges()) { 414 shrinkToUses(S, li->reg); 415 if (S.empty()) 416 NeedsCleanup = true; 417 } 418 if (NeedsCleanup) 419 li->removeEmptySubRanges(); 420 421 // Find all the values used, including PHI kills. 422 ShrinkToUsesWorkList WorkList; 423 424 // Visit all instructions reading li->reg. 425 for (MachineRegisterInfo::reg_instr_iterator 426 I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end(); 427 I != E; ) { 428 MachineInstr *UseMI = &*(I++); 429 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) 430 continue; 431 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot(); 432 LiveQueryResult LRQ = li->Query(Idx); 433 VNInfo *VNI = LRQ.valueIn(); 434 if (!VNI) { 435 // This shouldn't happen: readsVirtualRegister returns true, but there is 436 // no live value. It is likely caused by a target getting <undef> flags 437 // wrong. 438 DEBUG(dbgs() << Idx << '\t' << *UseMI 439 << "Warning: Instr claims to read non-existent value in " 440 << *li << '\n'); 441 continue; 442 } 443 // Special case: An early-clobber tied operand reads and writes the 444 // register one slot early. 445 if (VNInfo *DefVNI = LRQ.valueDefined()) 446 Idx = DefVNI->def; 447 448 WorkList.push_back(std::make_pair(Idx, VNI)); 449 } 450 451 // Create new live ranges with only minimal live segments per def. 452 LiveRange NewLR; 453 createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end())); 454 extendSegmentsToUses(NewLR, *Indexes, WorkList, *li); 455 456 // Move the trimmed segments back. 457 li->segments.swap(NewLR.segments); 458 459 // Handle dead values. 460 bool CanSeparate = computeDeadValues(*li, dead); 461 DEBUG(dbgs() << "Shrunk: " << *li << '\n'); 462 return CanSeparate; 463 } 464 465 bool LiveIntervals::computeDeadValues(LiveInterval &LI, 466 SmallVectorImpl<MachineInstr*> *dead) { 467 bool MayHaveSplitComponents = false; 468 for (auto VNI : LI.valnos) { 469 if (VNI->isUnused()) 470 continue; 471 SlotIndex Def = VNI->def; 472 LiveRange::iterator I = LI.FindSegmentContaining(Def); 473 assert(I != LI.end() && "Missing segment for VNI"); 474 475 // Is the register live before? Otherwise we may have to add a read-undef 476 // flag for subregister defs. 477 unsigned VReg = LI.reg; 478 if (MRI->shouldTrackSubRegLiveness(VReg)) { 479 if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) { 480 MachineInstr *MI = getInstructionFromIndex(Def); 481 MI->setRegisterDefReadUndef(VReg); 482 } 483 } 484 485 if (I->end != Def.getDeadSlot()) 486 continue; 487 if (VNI->isPHIDef()) { 488 // This is a dead PHI. Remove it. 489 VNI->markUnused(); 490 LI.removeSegment(I); 491 DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n"); 492 MayHaveSplitComponents = true; 493 } else { 494 // This is a dead def. Make sure the instruction knows. 495 MachineInstr *MI = getInstructionFromIndex(Def); 496 assert(MI && "No instruction defining live value"); 497 MI->addRegisterDead(LI.reg, TRI); 498 if (dead && MI->allDefsAreDead()) { 499 DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI); 500 dead->push_back(MI); 501 } 502 } 503 } 504 return MayHaveSplitComponents; 505 } 506 507 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) 508 { 509 DEBUG(dbgs() << "Shrink: " << SR << '\n'); 510 assert(TargetRegisterInfo::isVirtualRegister(Reg) 511 && "Can only shrink virtual registers"); 512 // Find all the values used, including PHI kills. 513 ShrinkToUsesWorkList WorkList; 514 515 // Visit all instructions reading Reg. 516 SlotIndex LastIdx; 517 for (MachineOperand &MO : MRI->reg_operands(Reg)) { 518 MachineInstr *UseMI = MO.getParent(); 519 if (UseMI->isDebugValue()) 520 continue; 521 // Maybe the operand is for a subregister we don't care about. 522 unsigned SubReg = MO.getSubReg(); 523 if (SubReg != 0) { 524 LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg); 525 if ((LaneMask & SR.LaneMask) == 0) 526 continue; 527 } 528 // We only need to visit each instruction once. 529 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot(); 530 if (Idx == LastIdx) 531 continue; 532 LastIdx = Idx; 533 534 LiveQueryResult LRQ = SR.Query(Idx); 535 VNInfo *VNI = LRQ.valueIn(); 536 // For Subranges it is possible that only undef values are left in that 537 // part of the subregister, so there is no real liverange at the use 538 if (!VNI) 539 continue; 540 541 // Special case: An early-clobber tied operand reads and writes the 542 // register one slot early. 543 if (VNInfo *DefVNI = LRQ.valueDefined()) 544 Idx = DefVNI->def; 545 546 WorkList.push_back(std::make_pair(Idx, VNI)); 547 } 548 549 // Create a new live ranges with only minimal live segments per def. 550 LiveRange NewLR; 551 createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end())); 552 extendSegmentsToUses(NewLR, *Indexes, WorkList, SR); 553 554 // Move the trimmed ranges back. 555 SR.segments.swap(NewLR.segments); 556 557 // Remove dead PHI value numbers 558 for (auto VNI : SR.valnos) { 559 if (VNI->isUnused()) 560 continue; 561 const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def); 562 assert(Segment != nullptr && "Missing segment for VNI"); 563 if (Segment->end != VNI->def.getDeadSlot()) 564 continue; 565 if (VNI->isPHIDef()) { 566 // This is a dead PHI. Remove it. 567 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); 568 VNI->markUnused(); 569 SR.removeSegment(*Segment); 570 } 571 } 572 573 DEBUG(dbgs() << "Shrunk: " << SR << '\n'); 574 } 575 576 void LiveIntervals::extendToIndices(LiveRange &LR, 577 ArrayRef<SlotIndex> Indices) { 578 assert(LRCalc && "LRCalc not initialized."); 579 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 580 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 581 LRCalc->extend(LR, Indices[i]); 582 } 583 584 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill, 585 SmallVectorImpl<SlotIndex> *EndPoints) { 586 LiveQueryResult LRQ = LR.Query(Kill); 587 VNInfo *VNI = LRQ.valueOutOrDead(); 588 if (!VNI) 589 return; 590 591 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill); 592 SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB); 593 594 // If VNI isn't live out from KillMBB, the value is trivially pruned. 595 if (LRQ.endPoint() < MBBEnd) { 596 LR.removeSegment(Kill, LRQ.endPoint()); 597 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); 598 return; 599 } 600 601 // VNI is live out of KillMBB. 602 LR.removeSegment(Kill, MBBEnd); 603 if (EndPoints) EndPoints->push_back(MBBEnd); 604 605 // Find all blocks that are reachable from KillMBB without leaving VNI's live 606 // range. It is possible that KillMBB itself is reachable, so start a DFS 607 // from each successor. 608 typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy; 609 VisitedTy Visited; 610 for (MachineBasicBlock::succ_iterator 611 SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end(); 612 SuccI != SuccE; ++SuccI) { 613 for (df_ext_iterator<MachineBasicBlock*, VisitedTy> 614 I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited); 615 I != E;) { 616 MachineBasicBlock *MBB = *I; 617 618 // Check if VNI is live in to MBB. 619 SlotIndex MBBStart, MBBEnd; 620 std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); 621 LiveQueryResult LRQ = LR.Query(MBBStart); 622 if (LRQ.valueIn() != VNI) { 623 // This block isn't part of the VNI segment. Prune the search. 624 I.skipChildren(); 625 continue; 626 } 627 628 // Prune the search if VNI is killed in MBB. 629 if (LRQ.endPoint() < MBBEnd) { 630 LR.removeSegment(MBBStart, LRQ.endPoint()); 631 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); 632 I.skipChildren(); 633 continue; 634 } 635 636 // VNI is live through MBB. 637 LR.removeSegment(MBBStart, MBBEnd); 638 if (EndPoints) EndPoints->push_back(MBBEnd); 639 ++I; 640 } 641 } 642 } 643 644 //===----------------------------------------------------------------------===// 645 // Register allocator hooks. 646 // 647 648 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) { 649 // Keep track of regunit ranges. 650 SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU; 651 // Keep track of subregister ranges. 652 SmallVector<std::pair<const LiveInterval::SubRange*, 653 LiveRange::const_iterator>, 4> SRs; 654 655 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { 656 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 657 if (MRI->reg_nodbg_empty(Reg)) 658 continue; 659 const LiveInterval &LI = getInterval(Reg); 660 if (LI.empty()) 661 continue; 662 663 // Find the regunit intervals for the assigned register. They may overlap 664 // the virtual register live range, cancelling any kills. 665 RU.clear(); 666 for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid(); 667 ++Units) { 668 const LiveRange &RURange = getRegUnit(*Units); 669 if (RURange.empty()) 670 continue; 671 RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end))); 672 } 673 674 if (MRI->subRegLivenessEnabled()) { 675 SRs.clear(); 676 for (const LiveInterval::SubRange &SR : LI.subranges()) { 677 SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end))); 678 } 679 } 680 681 // Every instruction that kills Reg corresponds to a segment range end 682 // point. 683 for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE; 684 ++RI) { 685 // A block index indicates an MBB edge. 686 if (RI->end.isBlock()) 687 continue; 688 MachineInstr *MI = getInstructionFromIndex(RI->end); 689 if (!MI) 690 continue; 691 692 // Check if any of the regunits are live beyond the end of RI. That could 693 // happen when a physreg is defined as a copy of a virtreg: 694 // 695 // %EAX = COPY %vreg5 696 // FOO %vreg5 <--- MI, cancel kill because %EAX is live. 697 // BAR %EAX<kill> 698 // 699 // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX. 700 for (auto &RUP : RU) { 701 const LiveRange &RURange = *RUP.first; 702 LiveRange::const_iterator &I = RUP.second; 703 if (I == RURange.end()) 704 continue; 705 I = RURange.advanceTo(I, RI->end); 706 if (I == RURange.end() || I->start >= RI->end) 707 continue; 708 // I is overlapping RI. 709 goto CancelKill; 710 } 711 712 if (MRI->subRegLivenessEnabled()) { 713 // When reading a partial undefined value we must not add a kill flag. 714 // The regalloc might have used the undef lane for something else. 715 // Example: 716 // %vreg1 = ... ; R32: %vreg1 717 // %vreg2:high16 = ... ; R64: %vreg2 718 // = read %vreg2<kill> ; R64: %vreg2 719 // = read %vreg1 ; R32: %vreg1 720 // The <kill> flag is correct for %vreg2, but the register allocator may 721 // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0 722 // are actually never written by %vreg2. After assignment the <kill> 723 // flag at the read instruction is invalid. 724 LaneBitmask DefinedLanesMask; 725 if (!SRs.empty()) { 726 // Compute a mask of lanes that are defined. 727 DefinedLanesMask = 0; 728 for (auto &SRP : SRs) { 729 const LiveInterval::SubRange &SR = *SRP.first; 730 LiveRange::const_iterator &I = SRP.second; 731 if (I == SR.end()) 732 continue; 733 I = SR.advanceTo(I, RI->end); 734 if (I == SR.end() || I->start >= RI->end) 735 continue; 736 // I is overlapping RI 737 DefinedLanesMask |= SR.LaneMask; 738 } 739 } else 740 DefinedLanesMask = ~0u; 741 742 bool IsFullWrite = false; 743 for (const MachineOperand &MO : MI->operands()) { 744 if (!MO.isReg() || MO.getReg() != Reg) 745 continue; 746 if (MO.isUse()) { 747 // Reading any undefined lanes? 748 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg()); 749 if ((UseMask & ~DefinedLanesMask) != 0) 750 goto CancelKill; 751 } else if (MO.getSubReg() == 0) { 752 // Writing to the full register? 753 assert(MO.isDef()); 754 IsFullWrite = true; 755 } 756 } 757 758 // If an instruction writes to a subregister, a new segment starts in 759 // the LiveInterval. But as this is only overriding part of the register 760 // adding kill-flags is not correct here after registers have been 761 // assigned. 762 if (!IsFullWrite) { 763 // Next segment has to be adjacent in the subregister write case. 764 LiveRange::const_iterator N = std::next(RI); 765 if (N != LI.end() && N->start == RI->end) 766 goto CancelKill; 767 } 768 } 769 770 MI->addRegisterKilled(Reg, nullptr); 771 continue; 772 CancelKill: 773 MI->clearRegisterKills(Reg, nullptr); 774 } 775 } 776 } 777 778 MachineBasicBlock* 779 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const { 780 // A local live range must be fully contained inside the block, meaning it is 781 // defined and killed at instructions, not at block boundaries. It is not 782 // live in or or out of any block. 783 // 784 // It is technically possible to have a PHI-defined live range identical to a 785 // single block, but we are going to return false in that case. 786 787 SlotIndex Start = LI.beginIndex(); 788 if (Start.isBlock()) 789 return nullptr; 790 791 SlotIndex Stop = LI.endIndex(); 792 if (Stop.isBlock()) 793 return nullptr; 794 795 // getMBBFromIndex doesn't need to search the MBB table when both indexes 796 // belong to proper instructions. 797 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start); 798 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop); 799 return MBB1 == MBB2 ? MBB1 : nullptr; 800 } 801 802 bool 803 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const { 804 for (const VNInfo *PHI : LI.valnos) { 805 if (PHI->isUnused() || !PHI->isPHIDef()) 806 continue; 807 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def); 808 // Conservatively return true instead of scanning huge predecessor lists. 809 if (PHIMBB->pred_size() > 100) 810 return true; 811 for (MachineBasicBlock::const_pred_iterator 812 PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI) 813 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI))) 814 return true; 815 } 816 return false; 817 } 818 819 float LiveIntervals::getSpillWeight(bool isDef, bool isUse, 820 const MachineBlockFrequencyInfo *MBFI, 821 const MachineInstr &MI) { 822 BlockFrequency Freq = MBFI->getBlockFreq(MI.getParent()); 823 const float Scale = 1.0f / MBFI->getEntryFreq(); 824 return (isDef + isUse) * (Freq.getFrequency() * Scale); 825 } 826 827 LiveRange::Segment 828 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) { 829 LiveInterval& Interval = createEmptyInterval(reg); 830 VNInfo *VN = Interval.getNextValue( 831 SlotIndex(getInstructionIndex(startInst).getRegSlot()), 832 getVNInfoAllocator()); 833 LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()), 834 getMBBEndIdx(startInst.getParent()), VN); 835 Interval.addSegment(S); 836 837 return S; 838 } 839 840 841 //===----------------------------------------------------------------------===// 842 // Register mask functions 843 //===----------------------------------------------------------------------===// 844 845 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI, 846 BitVector &UsableRegs) { 847 if (LI.empty()) 848 return false; 849 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end(); 850 851 // Use a smaller arrays for local live ranges. 852 ArrayRef<SlotIndex> Slots; 853 ArrayRef<const uint32_t*> Bits; 854 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) { 855 Slots = getRegMaskSlotsInBlock(MBB->getNumber()); 856 Bits = getRegMaskBitsInBlock(MBB->getNumber()); 857 } else { 858 Slots = getRegMaskSlots(); 859 Bits = getRegMaskBits(); 860 } 861 862 // We are going to enumerate all the register mask slots contained in LI. 863 // Start with a binary search of RegMaskSlots to find a starting point. 864 ArrayRef<SlotIndex>::iterator SlotI = 865 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start); 866 ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); 867 868 // No slots in range, LI begins after the last call. 869 if (SlotI == SlotE) 870 return false; 871 872 bool Found = false; 873 for (;;) { 874 assert(*SlotI >= LiveI->start); 875 // Loop over all slots overlapping this segment. 876 while (*SlotI < LiveI->end) { 877 // *SlotI overlaps LI. Collect mask bits. 878 if (!Found) { 879 // This is the first overlap. Initialize UsableRegs to all ones. 880 UsableRegs.clear(); 881 UsableRegs.resize(TRI->getNumRegs(), true); 882 Found = true; 883 } 884 // Remove usable registers clobbered by this mask. 885 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]); 886 if (++SlotI == SlotE) 887 return Found; 888 } 889 // *SlotI is beyond the current LI segment. 890 LiveI = LI.advanceTo(LiveI, *SlotI); 891 if (LiveI == LiveE) 892 return Found; 893 // Advance SlotI until it overlaps. 894 while (*SlotI < LiveI->start) 895 if (++SlotI == SlotE) 896 return Found; 897 } 898 } 899 900 //===----------------------------------------------------------------------===// 901 // IntervalUpdate class. 902 //===----------------------------------------------------------------------===// 903 904 // HMEditor is a toolkit used by handleMove to trim or extend live intervals. 905 class LiveIntervals::HMEditor { 906 private: 907 LiveIntervals& LIS; 908 const MachineRegisterInfo& MRI; 909 const TargetRegisterInfo& TRI; 910 SlotIndex OldIdx; 911 SlotIndex NewIdx; 912 SmallPtrSet<LiveRange*, 8> Updated; 913 bool UpdateFlags; 914 915 public: 916 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, 917 const TargetRegisterInfo& TRI, 918 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags) 919 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx), 920 UpdateFlags(UpdateFlags) {} 921 922 // FIXME: UpdateFlags is a workaround that creates live intervals for all 923 // physregs, even those that aren't needed for regalloc, in order to update 924 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill 925 // flags, and postRA passes will use a live register utility instead. 926 LiveRange *getRegUnitLI(unsigned Unit) { 927 if (UpdateFlags) 928 return &LIS.getRegUnit(Unit); 929 return LIS.getCachedRegUnit(Unit); 930 } 931 932 /// Update all live ranges touched by MI, assuming a move from OldIdx to 933 /// NewIdx. 934 void updateAllRanges(MachineInstr *MI) { 935 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI); 936 bool hasRegMask = false; 937 for (MachineOperand &MO : MI->operands()) { 938 if (MO.isRegMask()) 939 hasRegMask = true; 940 if (!MO.isReg()) 941 continue; 942 if (MO.isUse()) { 943 if (!MO.readsReg()) 944 continue; 945 // Aggressively clear all kill flags. 946 // They are reinserted by VirtRegRewriter. 947 MO.setIsKill(false); 948 } 949 950 unsigned Reg = MO.getReg(); 951 if (!Reg) 952 continue; 953 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 954 LiveInterval &LI = LIS.getInterval(Reg); 955 if (LI.hasSubRanges()) { 956 unsigned SubReg = MO.getSubReg(); 957 LaneBitmask LaneMask = TRI.getSubRegIndexLaneMask(SubReg); 958 for (LiveInterval::SubRange &S : LI.subranges()) { 959 if ((S.LaneMask & LaneMask) == 0) 960 continue; 961 updateRange(S, Reg, S.LaneMask); 962 } 963 } 964 updateRange(LI, Reg, 0); 965 continue; 966 } 967 968 // For physregs, only update the regunits that actually have a 969 // precomputed live range. 970 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) 971 if (LiveRange *LR = getRegUnitLI(*Units)) 972 updateRange(*LR, *Units, 0); 973 } 974 if (hasRegMask) 975 updateRegMaskSlots(); 976 } 977 978 private: 979 /// Update a single live range, assuming an instruction has been moved from 980 /// OldIdx to NewIdx. 981 void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { 982 if (!Updated.insert(&LR).second) 983 return; 984 DEBUG({ 985 dbgs() << " "; 986 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 987 dbgs() << PrintReg(Reg); 988 if (LaneMask != 0) 989 dbgs() << " L" << PrintLaneMask(LaneMask); 990 } else { 991 dbgs() << PrintRegUnit(Reg, &TRI); 992 } 993 dbgs() << ":\t" << LR << '\n'; 994 }); 995 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx)) 996 handleMoveDown(LR); 997 else 998 handleMoveUp(LR, Reg, LaneMask); 999 DEBUG(dbgs() << " -->\t" << LR << '\n'); 1000 LR.verify(); 1001 } 1002 1003 /// Update LR to reflect an instruction has been moved downwards from OldIdx 1004 /// to NewIdx (OldIdx < NewIdx). 1005 void handleMoveDown(LiveRange &LR) { 1006 LiveRange::iterator E = LR.end(); 1007 // Segment going into OldIdx. 1008 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex()); 1009 1010 // No value live before or after OldIdx? Nothing to do. 1011 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start)) 1012 return; 1013 1014 LiveRange::iterator OldIdxOut; 1015 // Do we have a value live-in to OldIdx? 1016 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) { 1017 // If the live-in value already extends to NewIdx, there is nothing to do. 1018 if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end)) 1019 return; 1020 // Aggressively remove all kill flags from the old kill point. 1021 // Kill flags shouldn't be used while live intervals exist, they will be 1022 // reinserted by VirtRegRewriter. 1023 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end)) 1024 for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO) 1025 if (MO->isReg() && MO->isUse()) 1026 MO->setIsKill(false); 1027 1028 // Is there a def before NewIdx which is not OldIdx? 1029 LiveRange::iterator Next = std::next(OldIdxIn); 1030 if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) && 1031 SlotIndex::isEarlierInstr(Next->start, NewIdx)) { 1032 // If we are here then OldIdx was just a use but not a def. We only have 1033 // to ensure liveness extends to NewIdx. 1034 LiveRange::iterator NewIdxIn = 1035 LR.advanceTo(Next, NewIdx.getBaseIndex()); 1036 // Extend the segment before NewIdx if necessary. 1037 if (NewIdxIn == E || 1038 !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) { 1039 LiveRange::iterator Prev = std::prev(NewIdxIn); 1040 Prev->end = NewIdx.getRegSlot(); 1041 } 1042 return; 1043 } 1044 1045 // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR 1046 // invalid by overlapping ranges. 1047 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end); 1048 OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()); 1049 // If this was not a kill, then there was no def and we're done. 1050 if (!isKill) 1051 return; 1052 1053 // Did we have a Def at OldIdx? 1054 OldIdxOut = Next; 1055 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start)) 1056 return; 1057 } else { 1058 OldIdxOut = OldIdxIn; 1059 } 1060 1061 // If we are here then there is a Definition at OldIdx. OldIdxOut points 1062 // to the segment starting there. 1063 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) && 1064 "No def?"); 1065 VNInfo *OldIdxVNI = OldIdxOut->valno; 1066 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def"); 1067 1068 // If the defined value extends beyond NewIdx, just move the beginning 1069 // of the segment to NewIdx. 1070 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber()); 1071 if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) { 1072 OldIdxVNI->def = NewIdxDef; 1073 OldIdxOut->start = OldIdxVNI->def; 1074 return; 1075 } 1076 1077 // If we are here then we have a Definition at OldIdx which ends before 1078 // NewIdx. 1079 1080 // Is there an existing Def at NewIdx? 1081 LiveRange::iterator AfterNewIdx 1082 = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot()); 1083 bool OldIdxDefIsDead = OldIdxOut->end.isDead(); 1084 if (!OldIdxDefIsDead && 1085 SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) { 1086 // OldIdx is not a dead def, and NewIdxDef is inside a new interval. 1087 VNInfo *DefVNI; 1088 if (OldIdxOut != LR.begin() && 1089 !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end, 1090 OldIdxOut->start)) { 1091 // There is no gap between OldIdxOut and its predecessor anymore, 1092 // merge them. 1093 LiveRange::iterator IPrev = std::prev(OldIdxOut); 1094 DefVNI = OldIdxVNI; 1095 IPrev->end = OldIdxOut->end; 1096 } else { 1097 // The value is live in to OldIdx 1098 LiveRange::iterator INext = std::next(OldIdxOut); 1099 assert(INext != E && "Must have following segment"); 1100 // We merge OldIdxOut and its successor. As we're dealing with subreg 1101 // reordering, there is always a successor to OldIdxOut in the same BB 1102 // We don't need INext->valno anymore and will reuse for the new segment 1103 // we create later. 1104 DefVNI = OldIdxVNI; 1105 INext->start = OldIdxOut->end; 1106 INext->valno->def = INext->start; 1107 } 1108 // If NewIdx is behind the last segment, extend that and append a new one. 1109 if (AfterNewIdx == E) { 1110 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up 1111 // one position. 1112 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end 1113 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end 1114 std::copy(std::next(OldIdxOut), E, OldIdxOut); 1115 // The last segment is undefined now, reuse it for a dead def. 1116 LiveRange::iterator NewSegment = std::prev(E); 1117 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), 1118 DefVNI); 1119 DefVNI->def = NewIdxDef; 1120 1121 LiveRange::iterator Prev = std::prev(NewSegment); 1122 Prev->end = NewIdxDef; 1123 } else { 1124 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up 1125 // one position. 1126 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -| 1127 // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -| 1128 std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut); 1129 LiveRange::iterator Prev = std::prev(AfterNewIdx); 1130 // We have two cases: 1131 if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) { 1132 // Case 1: NewIdx is inside a liverange. Split this liverange at 1133 // NewIdxDef into the segment "Prev" followed by "NewSegment". 1134 LiveRange::iterator NewSegment = AfterNewIdx; 1135 *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno); 1136 Prev->valno->def = NewIdxDef; 1137 1138 *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI); 1139 DefVNI->def = Prev->start; 1140 } else { 1141 // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and 1142 // turn Prev into a segment from NewIdx to AfterNewIdx->start. 1143 *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI); 1144 DefVNI->def = NewIdxDef; 1145 assert(DefVNI != AfterNewIdx->valno); 1146 } 1147 } 1148 return; 1149 } 1150 1151 if (AfterNewIdx != E && 1152 SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) { 1153 // There is an existing def at NewIdx. The def at OldIdx is coalesced into 1154 // that value. 1155 assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?"); 1156 LR.removeValNo(OldIdxVNI); 1157 } else { 1158 // There was no existing def at NewIdx. We need to create a dead def 1159 // at NewIdx. Shift segments over the old OldIdxOut segment, this frees 1160 // a new segment at the place where we want to construct the dead def. 1161 // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -| 1162 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -| 1163 assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators"); 1164 std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut); 1165 // We can reuse OldIdxVNI now. 1166 LiveRange::iterator NewSegment = std::prev(AfterNewIdx); 1167 VNInfo *NewSegmentVNI = OldIdxVNI; 1168 NewSegmentVNI->def = NewIdxDef; 1169 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), 1170 NewSegmentVNI); 1171 } 1172 } 1173 1174 /// Update LR to reflect an instruction has been moved upwards from OldIdx 1175 /// to NewIdx (NewIdx < OldIdx). 1176 void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { 1177 LiveRange::iterator E = LR.end(); 1178 // Segment going into OldIdx. 1179 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex()); 1180 1181 // No value live before or after OldIdx? Nothing to do. 1182 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start)) 1183 return; 1184 1185 LiveRange::iterator OldIdxOut; 1186 // Do we have a value live-in to OldIdx? 1187 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) { 1188 // If the live-in value isn't killed here, then we have no Def at 1189 // OldIdx, moreover the value must be live at NewIdx so there is nothing 1190 // to do. 1191 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end); 1192 if (!isKill) 1193 return; 1194 1195 // At this point we have to move OldIdxIn->end back to the nearest 1196 // previous use or (dead-)def but no further than NewIdx. 1197 SlotIndex DefBeforeOldIdx 1198 = std::max(OldIdxIn->start.getDeadSlot(), 1199 NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber())); 1200 OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask); 1201 1202 // Did we have a Def at OldIdx? If not we are done now. 1203 OldIdxOut = std::next(OldIdxIn); 1204 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start)) 1205 return; 1206 } else { 1207 OldIdxOut = OldIdxIn; 1208 OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E; 1209 } 1210 1211 // If we are here then there is a Definition at OldIdx. OldIdxOut points 1212 // to the segment starting there. 1213 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) && 1214 "No def?"); 1215 VNInfo *OldIdxVNI = OldIdxOut->valno; 1216 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def"); 1217 bool OldIdxDefIsDead = OldIdxOut->end.isDead(); 1218 1219 // Is there an existing def at NewIdx? 1220 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber()); 1221 LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot()); 1222 if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) { 1223 assert(NewIdxOut->valno != OldIdxVNI && 1224 "Same value defined more than once?"); 1225 // If OldIdx was a dead def remove it. 1226 if (!OldIdxDefIsDead) { 1227 // Remove segment starting at NewIdx and move begin of OldIdxOut to 1228 // NewIdx so it can take its place. 1229 OldIdxVNI->def = NewIdxDef; 1230 OldIdxOut->start = NewIdxDef; 1231 LR.removeValNo(NewIdxOut->valno); 1232 } else { 1233 // Simply remove the dead def at OldIdx. 1234 LR.removeValNo(OldIdxVNI); 1235 } 1236 } else { 1237 // Previously nothing was live after NewIdx, so all we have to do now is 1238 // move the begin of OldIdxOut to NewIdx. 1239 if (!OldIdxDefIsDead) { 1240 // Do we have any intermediate Defs between OldIdx and NewIdx? 1241 if (OldIdxIn != E && 1242 SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) { 1243 // OldIdx is not a dead def and NewIdx is before predecessor start. 1244 LiveRange::iterator NewIdxIn = NewIdxOut; 1245 assert(NewIdxIn == LR.find(NewIdx.getBaseIndex())); 1246 const SlotIndex SplitPos = NewIdxDef; 1247 1248 // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut. 1249 *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end, 1250 OldIdxIn->valno); 1251 // OldIdxIn and OldIdxVNI are now undef and can be overridden. 1252 // We Slide [NewIdxIn, OldIdxIn) down one position. 1253 // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -| 1254 // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -| 1255 std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut); 1256 // NewIdxIn is now considered undef so we can reuse it for the moved 1257 // value. 1258 LiveRange::iterator NewSegment = NewIdxIn; 1259 LiveRange::iterator Next = std::next(NewSegment); 1260 if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) { 1261 // There is no gap between NewSegment and its predecessor. 1262 *NewSegment = LiveRange::Segment(Next->start, SplitPos, 1263 Next->valno); 1264 *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI); 1265 Next->valno->def = SplitPos; 1266 } else { 1267 // There is a gap between NewSegment and its predecessor 1268 // Value becomes live in. 1269 *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI); 1270 NewSegment->valno->def = SplitPos; 1271 } 1272 } else { 1273 // Leave the end point of a live def. 1274 OldIdxOut->start = NewIdxDef; 1275 OldIdxVNI->def = NewIdxDef; 1276 if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end)) 1277 OldIdxIn->end = NewIdx.getRegSlot(); 1278 } 1279 } else { 1280 // OldIdxVNI is a dead def. It may have been moved across other values 1281 // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut) 1282 // down one position. 1283 // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - | 1284 // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -| 1285 std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut)); 1286 // OldIdxVNI can be reused now to build a new dead def segment. 1287 LiveRange::iterator NewSegment = NewIdxOut; 1288 VNInfo *NewSegmentVNI = OldIdxVNI; 1289 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), 1290 NewSegmentVNI); 1291 NewSegmentVNI->def = NewIdxDef; 1292 } 1293 } 1294 } 1295 1296 void updateRegMaskSlots() { 1297 SmallVectorImpl<SlotIndex>::iterator RI = 1298 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), 1299 OldIdx); 1300 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && 1301 "No RegMask at OldIdx."); 1302 *RI = NewIdx.getRegSlot(); 1303 assert((RI == LIS.RegMaskSlots.begin() || 1304 SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) && 1305 "Cannot move regmask instruction above another call"); 1306 assert((std::next(RI) == LIS.RegMaskSlots.end() || 1307 SlotIndex::isEarlierInstr(*RI, *std::next(RI))) && 1308 "Cannot move regmask instruction below another call"); 1309 } 1310 1311 // Return the last use of reg between NewIdx and OldIdx. 1312 SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg, 1313 LaneBitmask LaneMask) { 1314 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1315 SlotIndex LastUse = Before; 1316 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) { 1317 if (MO.isUndef()) 1318 continue; 1319 unsigned SubReg = MO.getSubReg(); 1320 if (SubReg != 0 && LaneMask != 0 1321 && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask) == 0) 1322 continue; 1323 1324 const MachineInstr &MI = *MO.getParent(); 1325 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); 1326 if (InstSlot > LastUse && InstSlot < OldIdx) 1327 LastUse = InstSlot.getRegSlot(); 1328 } 1329 return LastUse; 1330 } 1331 1332 // This is a regunit interval, so scanning the use list could be very 1333 // expensive. Scan upwards from OldIdx instead. 1334 assert(Before < OldIdx && "Expected upwards move"); 1335 SlotIndexes *Indexes = LIS.getSlotIndexes(); 1336 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before); 1337 1338 // OldIdx may not correspond to an instruction any longer, so set MII to 1339 // point to the next instruction after OldIdx, or MBB->end(). 1340 MachineBasicBlock::iterator MII = MBB->end(); 1341 if (MachineInstr *MI = Indexes->getInstructionFromIndex( 1342 Indexes->getNextNonNullIndex(OldIdx))) 1343 if (MI->getParent() == MBB) 1344 MII = MI; 1345 1346 MachineBasicBlock::iterator Begin = MBB->begin(); 1347 while (MII != Begin) { 1348 if ((--MII)->isDebugValue()) 1349 continue; 1350 SlotIndex Idx = Indexes->getInstructionIndex(*MII); 1351 1352 // Stop searching when Before is reached. 1353 if (!SlotIndex::isEarlierInstr(Before, Idx)) 1354 return Before; 1355 1356 // Check if MII uses Reg. 1357 for (MIBundleOperands MO(*MII); MO.isValid(); ++MO) 1358 if (MO->isReg() && !MO->isUndef() && 1359 TargetRegisterInfo::isPhysicalRegister(MO->getReg()) && 1360 TRI.hasRegUnit(MO->getReg(), Reg)) 1361 return Idx.getRegSlot(); 1362 } 1363 // Didn't reach Before. It must be the first instruction in the block. 1364 return Before; 1365 } 1366 }; 1367 1368 void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) { 1369 assert(!MI.isBundled() && "Can't handle bundled instructions yet."); 1370 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); 1371 Indexes->removeMachineInstrFromMaps(MI); 1372 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI); 1373 assert(getMBBStartIdx(MI.getParent()) <= OldIndex && 1374 OldIndex < getMBBEndIdx(MI.getParent()) && 1375 "Cannot handle moves across basic block boundaries."); 1376 1377 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); 1378 HME.updateAllRanges(&MI); 1379 } 1380 1381 void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI, 1382 MachineInstr &BundleStart, 1383 bool UpdateFlags) { 1384 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); 1385 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); 1386 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); 1387 HME.updateAllRanges(&MI); 1388 } 1389 1390 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin, 1391 const MachineBasicBlock::iterator End, 1392 const SlotIndex endIdx, 1393 LiveRange &LR, const unsigned Reg, 1394 LaneBitmask LaneMask) { 1395 LiveInterval::iterator LII = LR.find(endIdx); 1396 SlotIndex lastUseIdx; 1397 if (LII != LR.end() && LII->start < endIdx) 1398 lastUseIdx = LII->end; 1399 else 1400 --LII; 1401 1402 for (MachineBasicBlock::iterator I = End; I != Begin;) { 1403 --I; 1404 MachineInstr &MI = *I; 1405 if (MI.isDebugValue()) 1406 continue; 1407 1408 SlotIndex instrIdx = getInstructionIndex(MI); 1409 bool isStartValid = getInstructionFromIndex(LII->start); 1410 bool isEndValid = getInstructionFromIndex(LII->end); 1411 1412 // FIXME: This doesn't currently handle early-clobber or multiple removed 1413 // defs inside of the region to repair. 1414 for (MachineInstr::mop_iterator OI = MI.operands_begin(), 1415 OE = MI.operands_end(); 1416 OI != OE; ++OI) { 1417 const MachineOperand &MO = *OI; 1418 if (!MO.isReg() || MO.getReg() != Reg) 1419 continue; 1420 1421 unsigned SubReg = MO.getSubReg(); 1422 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg); 1423 if ((Mask & LaneMask) == 0) 1424 continue; 1425 1426 if (MO.isDef()) { 1427 if (!isStartValid) { 1428 if (LII->end.isDead()) { 1429 SlotIndex prevStart; 1430 if (LII != LR.begin()) 1431 prevStart = std::prev(LII)->start; 1432 1433 // FIXME: This could be more efficient if there was a 1434 // removeSegment method that returned an iterator. 1435 LR.removeSegment(*LII, true); 1436 if (prevStart.isValid()) 1437 LII = LR.find(prevStart); 1438 else 1439 LII = LR.begin(); 1440 } else { 1441 LII->start = instrIdx.getRegSlot(); 1442 LII->valno->def = instrIdx.getRegSlot(); 1443 if (MO.getSubReg() && !MO.isUndef()) 1444 lastUseIdx = instrIdx.getRegSlot(); 1445 else 1446 lastUseIdx = SlotIndex(); 1447 continue; 1448 } 1449 } 1450 1451 if (!lastUseIdx.isValid()) { 1452 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); 1453 LiveRange::Segment S(instrIdx.getRegSlot(), 1454 instrIdx.getDeadSlot(), VNI); 1455 LII = LR.addSegment(S); 1456 } else if (LII->start != instrIdx.getRegSlot()) { 1457 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); 1458 LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI); 1459 LII = LR.addSegment(S); 1460 } 1461 1462 if (MO.getSubReg() && !MO.isUndef()) 1463 lastUseIdx = instrIdx.getRegSlot(); 1464 else 1465 lastUseIdx = SlotIndex(); 1466 } else if (MO.isUse()) { 1467 // FIXME: This should probably be handled outside of this branch, 1468 // either as part of the def case (for defs inside of the region) or 1469 // after the loop over the region. 1470 if (!isEndValid && !LII->end.isBlock()) 1471 LII->end = instrIdx.getRegSlot(); 1472 if (!lastUseIdx.isValid()) 1473 lastUseIdx = instrIdx.getRegSlot(); 1474 } 1475 } 1476 } 1477 } 1478 1479 void 1480 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB, 1481 MachineBasicBlock::iterator Begin, 1482 MachineBasicBlock::iterator End, 1483 ArrayRef<unsigned> OrigRegs) { 1484 // Find anchor points, which are at the beginning/end of blocks or at 1485 // instructions that already have indexes. 1486 while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin)) 1487 --Begin; 1488 while (End != MBB->end() && !Indexes->hasIndex(*End)) 1489 ++End; 1490 1491 SlotIndex endIdx; 1492 if (End == MBB->end()) 1493 endIdx = getMBBEndIdx(MBB).getPrevSlot(); 1494 else 1495 endIdx = getInstructionIndex(*End); 1496 1497 Indexes->repairIndexesInRange(MBB, Begin, End); 1498 1499 for (MachineBasicBlock::iterator I = End; I != Begin;) { 1500 --I; 1501 MachineInstr &MI = *I; 1502 if (MI.isDebugValue()) 1503 continue; 1504 for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(), 1505 MOE = MI.operands_end(); 1506 MOI != MOE; ++MOI) { 1507 if (MOI->isReg() && 1508 TargetRegisterInfo::isVirtualRegister(MOI->getReg()) && 1509 !hasInterval(MOI->getReg())) { 1510 createAndComputeVirtRegInterval(MOI->getReg()); 1511 } 1512 } 1513 } 1514 1515 for (unsigned i = 0, e = OrigRegs.size(); i != e; ++i) { 1516 unsigned Reg = OrigRegs[i]; 1517 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1518 continue; 1519 1520 LiveInterval &LI = getInterval(Reg); 1521 // FIXME: Should we support undefs that gain defs? 1522 if (!LI.hasAtLeastOneValue()) 1523 continue; 1524 1525 for (LiveInterval::SubRange &S : LI.subranges()) { 1526 repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask); 1527 } 1528 repairOldRegInRange(Begin, End, endIdx, LI, Reg); 1529 } 1530 } 1531 1532 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) { 1533 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) { 1534 if (LiveRange *LR = getCachedRegUnit(*Units)) 1535 if (VNInfo *VNI = LR->getVNInfoAt(Pos)) 1536 LR->removeValNo(VNI); 1537 } 1538 } 1539 1540 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) { 1541 VNInfo *VNI = LI.getVNInfoAt(Pos); 1542 if (VNI == nullptr) 1543 return; 1544 LI.removeValNo(VNI); 1545 1546 // Also remove the value in subranges. 1547 for (LiveInterval::SubRange &S : LI.subranges()) { 1548 if (VNInfo *SVNI = S.getVNInfoAt(Pos)) 1549 S.removeValNo(SVNI); 1550 } 1551 LI.removeEmptySubRanges(); 1552 } 1553 1554 void LiveIntervals::splitSeparateComponents(LiveInterval &LI, 1555 SmallVectorImpl<LiveInterval*> &SplitLIs) { 1556 ConnectedVNInfoEqClasses ConEQ(*this); 1557 unsigned NumComp = ConEQ.Classify(LI); 1558 if (NumComp <= 1) 1559 return; 1560 DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n'); 1561 unsigned Reg = LI.reg; 1562 const TargetRegisterClass *RegClass = MRI->getRegClass(Reg); 1563 for (unsigned I = 1; I < NumComp; ++I) { 1564 unsigned NewVReg = MRI->createVirtualRegister(RegClass); 1565 LiveInterval &NewLI = createEmptyInterval(NewVReg); 1566 SplitLIs.push_back(&NewLI); 1567 } 1568 ConEQ.Distribute(LI, SplitLIs.data(), *MRI); 1569 } 1570 1571 void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) { 1572 assert(LRCalc && "LRCalc not initialized."); 1573 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); 1574 LRCalc->constructMainRangeFromSubranges(LI); 1575 } 1576