1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "Spiller.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/ADT/TinyPtrVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 21 #include "llvm/CodeGen/LiveRangeEdit.h" 22 #include "llvm/CodeGen/LiveStackAnalysis.h" 23 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 24 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" 25 #include "llvm/CodeGen/MachineDominators.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineLoopInfo.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/VirtRegMap.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetInstrInfo.h" 37 #include "llvm/Target/TargetMachine.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "regalloc" 42 43 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 44 STATISTIC(NumSnippets, "Number of spilled snippets"); 45 STATISTIC(NumSpills, "Number of spills inserted"); 46 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 47 STATISTIC(NumReloads, "Number of reloads inserted"); 48 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 49 STATISTIC(NumFolded, "Number of folded stack accesses"); 50 STATISTIC(NumFoldedLoads, "Number of folded loads"); 51 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 52 STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads"); 53 STATISTIC(NumHoists, "Number of hoisted spills"); 54 55 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 56 cl::desc("Disable inline spill hoisting")); 57 58 namespace { 59 class InlineSpiller : public Spiller { 60 MachineFunction &MF; 61 LiveIntervals &LIS; 62 LiveStacks &LSS; 63 AliasAnalysis *AA; 64 MachineDominatorTree &MDT; 65 MachineLoopInfo &Loops; 66 VirtRegMap &VRM; 67 MachineFrameInfo &MFI; 68 MachineRegisterInfo &MRI; 69 const TargetInstrInfo &TII; 70 const TargetRegisterInfo &TRI; 71 const MachineBlockFrequencyInfo &MBFI; 72 73 // Variables that are valid during spill(), but used by multiple methods. 74 LiveRangeEdit *Edit; 75 LiveInterval *StackInt; 76 int StackSlot; 77 unsigned Original; 78 79 // All registers to spill to StackSlot, including the main register. 80 SmallVector<unsigned, 8> RegsToSpill; 81 82 // All COPY instructions to/from snippets. 83 // They are ignored since both operands refer to the same stack slot. 84 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 85 86 // Values that failed to remat at some point. 87 SmallPtrSet<VNInfo*, 8> UsedValues; 88 89 public: 90 // Information about a value that was defined by a copy from a sibling 91 // register. 92 struct SibValueInfo { 93 // True when all reaching defs were reloads: No spill is necessary. 94 bool AllDefsAreReloads; 95 96 // True when value is defined by an original PHI not from splitting. 97 bool DefByOrigPHI; 98 99 // True when the COPY defining this value killed its source. 100 bool KillsSource; 101 102 // The preferred register to spill. 103 unsigned SpillReg; 104 105 // The value of SpillReg that should be spilled. 106 VNInfo *SpillVNI; 107 108 // The block where SpillVNI should be spilled. Currently, this must be the 109 // block containing SpillVNI->def. 110 MachineBasicBlock *SpillMBB; 111 112 // A defining instruction that is not a sibling copy or a reload, or NULL. 113 // This can be used as a template for rematerialization. 114 MachineInstr *DefMI; 115 116 // List of values that depend on this one. These values are actually the 117 // same, but live range splitting has placed them in different registers, 118 // or SSA update needed to insert PHI-defs to preserve SSA form. This is 119 // copies of the current value and phi-kills. Usually only phi-kills cause 120 // more than one dependent value. 121 TinyPtrVector<VNInfo*> Deps; 122 123 SibValueInfo(unsigned Reg, VNInfo *VNI) 124 : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), 125 SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {} 126 127 // Returns true when a def has been found. 128 bool hasDef() const { return DefByOrigPHI || DefMI; } 129 }; 130 131 private: 132 // Values in RegsToSpill defined by sibling copies. 133 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 134 SibValueMap SibValues; 135 136 // Dead defs generated during spilling. 137 SmallVector<MachineInstr*, 8> DeadDefs; 138 139 ~InlineSpiller() {} 140 141 public: 142 InlineSpiller(MachineFunctionPass &pass, 143 MachineFunction &mf, 144 VirtRegMap &vrm) 145 : MF(mf), 146 LIS(pass.getAnalysis<LiveIntervals>()), 147 LSS(pass.getAnalysis<LiveStacks>()), 148 AA(&pass.getAnalysis<AliasAnalysis>()), 149 MDT(pass.getAnalysis<MachineDominatorTree>()), 150 Loops(pass.getAnalysis<MachineLoopInfo>()), 151 VRM(vrm), 152 MFI(*mf.getFrameInfo()), 153 MRI(mf.getRegInfo()), 154 TII(*mf.getTarget().getInstrInfo()), 155 TRI(*mf.getTarget().getRegisterInfo()), 156 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {} 157 158 void spill(LiveRangeEdit &) override; 159 160 private: 161 bool isSnippet(const LiveInterval &SnipLI); 162 void collectRegsToSpill(); 163 164 bool isRegToSpill(unsigned Reg) { 165 return std::find(RegsToSpill.begin(), 166 RegsToSpill.end(), Reg) != RegsToSpill.end(); 167 } 168 169 bool isSibling(unsigned Reg); 170 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); 171 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr); 172 void analyzeSiblingValues(); 173 174 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); 175 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 176 177 void markValueUsed(LiveInterval*, VNInfo*); 178 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI); 179 void reMaterializeAll(); 180 181 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 182 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >, 183 MachineInstr *LoadMI = nullptr); 184 void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI); 185 void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI); 186 187 void spillAroundUses(unsigned Reg); 188 void spillAll(); 189 }; 190 } 191 192 namespace llvm { 193 Spiller *createInlineSpiller(MachineFunctionPass &pass, 194 MachineFunction &mf, 195 VirtRegMap &vrm) { 196 return new InlineSpiller(pass, mf, vrm); 197 } 198 } 199 200 //===----------------------------------------------------------------------===// 201 // Snippets 202 //===----------------------------------------------------------------------===// 203 204 // When spilling a virtual register, we also spill any snippets it is connected 205 // to. The snippets are small live ranges that only have a single real use, 206 // leftovers from live range splitting. Spilling them enables memory operand 207 // folding or tightens the live range around the single use. 208 // 209 // This minimizes register pressure and maximizes the store-to-load distance for 210 // spill slots which can be important in tight loops. 211 212 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 213 /// otherwise return 0. 214 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 215 if (!MI->isFullCopy()) 216 return 0; 217 if (MI->getOperand(0).getReg() == Reg) 218 return MI->getOperand(1).getReg(); 219 if (MI->getOperand(1).getReg() == Reg) 220 return MI->getOperand(0).getReg(); 221 return 0; 222 } 223 224 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 225 /// It is assumed that SnipLI is a virtual register with the same original as 226 /// Edit->getReg(). 227 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 228 unsigned Reg = Edit->getReg(); 229 230 // A snippet is a tiny live range with only a single instruction using it 231 // besides copies to/from Reg or spills/fills. We accept: 232 // 233 // %snip = COPY %Reg / FILL fi# 234 // %snip = USE %snip 235 // %Reg = COPY %snip / SPILL %snip, fi# 236 // 237 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 238 return false; 239 240 MachineInstr *UseMI = nullptr; 241 242 // Check that all uses satisfy our criteria. 243 for (MachineRegisterInfo::reg_instr_nodbg_iterator 244 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg), 245 E = MRI.reg_instr_nodbg_end(); RI != E; ) { 246 MachineInstr *MI = &*(RI++); 247 248 // Allow copies to/from Reg. 249 if (isFullCopyOf(MI, Reg)) 250 continue; 251 252 // Allow stack slot loads. 253 int FI; 254 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 255 continue; 256 257 // Allow stack slot stores. 258 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 259 continue; 260 261 // Allow a single additional instruction. 262 if (UseMI && MI != UseMI) 263 return false; 264 UseMI = MI; 265 } 266 return true; 267 } 268 269 /// collectRegsToSpill - Collect live range snippets that only have a single 270 /// real use. 271 void InlineSpiller::collectRegsToSpill() { 272 unsigned Reg = Edit->getReg(); 273 274 // Main register always spills. 275 RegsToSpill.assign(1, Reg); 276 SnippetCopies.clear(); 277 278 // Snippets all have the same original, so there can't be any for an original 279 // register. 280 if (Original == Reg) 281 return; 282 283 for (MachineRegisterInfo::reg_instr_iterator 284 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { 285 MachineInstr *MI = &*(RI++); 286 unsigned SnipReg = isFullCopyOf(MI, Reg); 287 if (!isSibling(SnipReg)) 288 continue; 289 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 290 if (!isSnippet(SnipLI)) 291 continue; 292 SnippetCopies.insert(MI); 293 if (isRegToSpill(SnipReg)) 294 continue; 295 RegsToSpill.push_back(SnipReg); 296 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 297 ++NumSnippets; 298 } 299 } 300 301 302 //===----------------------------------------------------------------------===// 303 // Sibling Values 304 //===----------------------------------------------------------------------===// 305 306 // After live range splitting, some values to be spilled may be defined by 307 // copies from sibling registers. We trace the sibling copies back to the 308 // original value if it still exists. We need it for rematerialization. 309 // 310 // Even when the value can't be rematerialized, we still want to determine if 311 // the value has already been spilled, or we may want to hoist the spill from a 312 // loop. 313 314 bool InlineSpiller::isSibling(unsigned Reg) { 315 return TargetRegisterInfo::isVirtualRegister(Reg) && 316 VRM.getOriginal(Reg) == Original; 317 } 318 319 #ifndef NDEBUG 320 static raw_ostream &operator<<(raw_ostream &OS, 321 const InlineSpiller::SibValueInfo &SVI) { 322 OS << "spill " << PrintReg(SVI.SpillReg) << ':' 323 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def; 324 if (SVI.SpillMBB) 325 OS << " in BB#" << SVI.SpillMBB->getNumber(); 326 if (SVI.AllDefsAreReloads) 327 OS << " all-reloads"; 328 if (SVI.DefByOrigPHI) 329 OS << " orig-phi"; 330 if (SVI.KillsSource) 331 OS << " kill"; 332 OS << " deps["; 333 for (unsigned i = 0, e = SVI.Deps.size(); i != e; ++i) 334 OS << ' ' << SVI.Deps[i]->id << '@' << SVI.Deps[i]->def; 335 OS << " ]"; 336 if (SVI.DefMI) 337 OS << " def: " << *SVI.DefMI; 338 else 339 OS << '\n'; 340 return OS; 341 } 342 #endif 343 344 /// propagateSiblingValue - Propagate the value in SVI to dependents if it is 345 /// known. Otherwise remember the dependency for later. 346 /// 347 /// @param SVIIter SibValues entry to propagate. 348 /// @param VNI Dependent value, or NULL to propagate to all saved dependents. 349 void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, 350 VNInfo *VNI) { 351 SibValueMap::value_type *SVI = &*SVIIter; 352 353 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. 354 TinyPtrVector<VNInfo*> FirstDeps; 355 if (VNI) { 356 FirstDeps.push_back(VNI); 357 SVI->second.Deps.push_back(VNI); 358 } 359 360 // Has the value been completely determined yet? If not, defer propagation. 361 if (!SVI->second.hasDef()) 362 return; 363 364 // Work list of values to propagate. 365 SmallSetVector<SibValueMap::value_type *, 8> WorkList; 366 WorkList.insert(SVI); 367 368 do { 369 SVI = WorkList.pop_back_val(); 370 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; 371 VNI = nullptr; 372 373 SibValueInfo &SV = SVI->second; 374 if (!SV.SpillMBB) 375 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); 376 377 DEBUG(dbgs() << " prop to " << Deps->size() << ": " 378 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); 379 380 assert(SV.hasDef() && "Propagating undefined value"); 381 382 // Should this value be propagated as a preferred spill candidate? We don't 383 // propagate values of registers that are about to spill. 384 bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); 385 unsigned SpillDepth = ~0u; 386 387 for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(), 388 DepE = Deps->end(); DepI != DepE; ++DepI) { 389 SibValueMap::iterator DepSVI = SibValues.find(*DepI); 390 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); 391 SibValueInfo &DepSV = DepSVI->second; 392 if (!DepSV.SpillMBB) 393 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); 394 395 bool Changed = false; 396 397 // Propagate defining instruction. 398 if (!DepSV.hasDef()) { 399 Changed = true; 400 DepSV.DefMI = SV.DefMI; 401 DepSV.DefByOrigPHI = SV.DefByOrigPHI; 402 } 403 404 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of 405 // all predecessors. 406 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { 407 Changed = true; 408 DepSV.AllDefsAreReloads = false; 409 } 410 411 // Propagate best spill value. 412 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { 413 if (SV.SpillMBB == DepSV.SpillMBB) { 414 // DepSV is in the same block. Hoist when dominated. 415 if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { 416 // This is an alternative def earlier in the same MBB. 417 // Hoist the spill as far as possible in SpillMBB. This can ease 418 // register pressure: 419 // 420 // x = def 421 // y = use x 422 // s = copy x 423 // 424 // Hoisting the spill of s to immediately after the def removes the 425 // interference between x and y: 426 // 427 // x = def 428 // spill x 429 // y = use x<kill> 430 // 431 // This hoist only helps when the DepSV copy kills its source. 432 Changed = true; 433 DepSV.SpillReg = SV.SpillReg; 434 DepSV.SpillVNI = SV.SpillVNI; 435 DepSV.SpillMBB = SV.SpillMBB; 436 } 437 } else { 438 // DepSV is in a different block. 439 if (SpillDepth == ~0u) 440 SpillDepth = Loops.getLoopDepth(SV.SpillMBB); 441 442 // Also hoist spills to blocks with smaller loop depth, but make sure 443 // that the new value dominates. Non-phi dependents are always 444 // dominated, phis need checking. 445 446 const BranchProbability MarginProb(4, 5); // 80% 447 // Hoist a spill to outer loop if there are multiple dependents (it 448 // can be beneficial if more than one dependents are hoisted) or 449 // if DepSV (the hoisting source) is hotter than SV (the hoisting 450 // destination) (we add a 80% margin to bias a little towards 451 // loop depth). 452 bool HoistCondition = 453 (MBFI.getBlockFreq(DepSV.SpillMBB) >= 454 (MBFI.getBlockFreq(SV.SpillMBB) * MarginProb)) || 455 Deps->size() > 1; 456 457 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && 458 HoistCondition && 459 (!DepSVI->first->isPHIDef() || 460 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { 461 Changed = true; 462 DepSV.SpillReg = SV.SpillReg; 463 DepSV.SpillVNI = SV.SpillVNI; 464 DepSV.SpillMBB = SV.SpillMBB; 465 } 466 } 467 } 468 469 if (!Changed) 470 continue; 471 472 // Something changed in DepSVI. Propagate to dependents. 473 WorkList.insert(&*DepSVI); 474 475 DEBUG(dbgs() << " update " << DepSVI->first->id << '@' 476 << DepSVI->first->def << " to:\t" << DepSV); 477 } 478 } while (!WorkList.empty()); 479 } 480 481 /// traceSiblingValue - Trace a value that is about to be spilled back to the 482 /// real defining instructions by looking through sibling copies. Always stay 483 /// within the range of OrigVNI so the registers are known to carry the same 484 /// value. 485 /// 486 /// Determine if the value is defined by all reloads, so spilling isn't 487 /// necessary - the value is already in the stack slot. 488 /// 489 /// Return a defining instruction that may be a candidate for rematerialization. 490 /// 491 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 492 VNInfo *OrigVNI) { 493 // Check if a cached value already exists. 494 SibValueMap::iterator SVI; 495 bool Inserted; 496 std::tie(SVI, Inserted) = 497 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); 498 if (!Inserted) { 499 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' 500 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second); 501 return SVI->second.DefMI; 502 } 503 504 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 505 << UseVNI->id << '@' << UseVNI->def << '\n'); 506 507 // List of (Reg, VNI) that have been inserted into SibValues, but need to be 508 // processed. 509 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 510 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 511 512 do { 513 unsigned Reg; 514 VNInfo *VNI; 515 std::tie(Reg, VNI) = WorkList.pop_back_val(); 516 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def 517 << ":\t"); 518 519 // First check if this value has already been computed. 520 SVI = SibValues.find(VNI); 521 assert(SVI != SibValues.end() && "Missing SibValues entry"); 522 523 // Trace through PHI-defs created by live range splitting. 524 if (VNI->isPHIDef()) { 525 // Stop at original PHIs. We don't know the value at the predecessors. 526 if (VNI->def == OrigVNI->def) { 527 DEBUG(dbgs() << "orig phi value\n"); 528 SVI->second.DefByOrigPHI = true; 529 SVI->second.AllDefsAreReloads = false; 530 propagateSiblingValue(SVI); 531 continue; 532 } 533 534 // This is a PHI inserted by live range splitting. We could trace the 535 // live-out value from predecessor blocks, but that search can be very 536 // expensive if there are many predecessors and many more PHIs as 537 // generated by tail-dup when it sees an indirectbr. Instead, look at 538 // all the non-PHI defs that have the same value as OrigVNI. They must 539 // jointly dominate VNI->def. This is not optimal since VNI may actually 540 // be jointly dominated by a smaller subset of defs, so there is a change 541 // we will miss a AllDefsAreReloads optimization. 542 543 // Separate all values dominated by OrigVNI into PHIs and non-PHIs. 544 SmallVector<VNInfo*, 8> PHIs, NonPHIs; 545 LiveInterval &LI = LIS.getInterval(Reg); 546 LiveInterval &OrigLI = LIS.getInterval(Original); 547 548 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); 549 VI != VE; ++VI) { 550 VNInfo *VNI2 = *VI; 551 if (VNI2->isUnused()) 552 continue; 553 if (!OrigLI.containsOneValue() && 554 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI) 555 continue; 556 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def) 557 PHIs.push_back(VNI2); 558 else 559 NonPHIs.push_back(VNI2); 560 } 561 DEBUG(dbgs() << "split phi value, checking " << PHIs.size() 562 << " phi-defs, and " << NonPHIs.size() 563 << " non-phi/orig defs\n"); 564 565 // Create entries for all the PHIs. Don't add them to the worklist, we 566 // are processing all of them in one go here. 567 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) 568 SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i]))); 569 570 // Add every PHI as a dependent of all the non-PHIs. 571 for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) { 572 VNInfo *NonPHI = NonPHIs[i]; 573 // Known value? Try an insertion. 574 std::tie(SVI, Inserted) = 575 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); 576 // Add all the PHIs as dependents of NonPHI. 577 for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi) 578 SVI->second.Deps.push_back(PHIs[pi]); 579 // This is the first time we see NonPHI, add it to the worklist. 580 if (Inserted) 581 WorkList.push_back(std::make_pair(Reg, NonPHI)); 582 else 583 // Propagate to all inserted PHIs, not just VNI. 584 propagateSiblingValue(SVI); 585 } 586 587 // Next work list item. 588 continue; 589 } 590 591 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 592 assert(MI && "Missing def"); 593 594 // Trace through sibling copies. 595 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 596 if (isSibling(SrcReg)) { 597 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 598 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 599 assert(SrcQ.valueIn() && "Copy from non-existing value"); 600 // Check if this COPY kills its source. 601 SVI->second.KillsSource = SrcQ.isKill(); 602 VNInfo *SrcVNI = SrcQ.valueIn(); 603 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':' 604 << SrcVNI->id << '@' << SrcVNI->def 605 << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); 606 // Known sibling source value? Try an insertion. 607 std::tie(SVI, Inserted) = SibValues.insert( 608 std::make_pair(SrcVNI, SibValueInfo(SrcReg, SrcVNI))); 609 // This is the first time we see Src, add it to the worklist. 610 if (Inserted) 611 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 612 propagateSiblingValue(SVI, VNI); 613 // Next work list item. 614 continue; 615 } 616 } 617 618 // Track reachable reloads. 619 SVI->second.DefMI = MI; 620 SVI->second.SpillMBB = MI->getParent(); 621 int FI; 622 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 623 DEBUG(dbgs() << "reload\n"); 624 propagateSiblingValue(SVI); 625 // Next work list item. 626 continue; 627 } 628 629 // Potential remat candidate. 630 DEBUG(dbgs() << "def " << *MI); 631 SVI->second.AllDefsAreReloads = false; 632 propagateSiblingValue(SVI); 633 } while (!WorkList.empty()); 634 635 // Look up the value we were looking for. We already did this lookup at the 636 // top of the function, but SibValues may have been invalidated. 637 SVI = SibValues.find(UseVNI); 638 assert(SVI != SibValues.end() && "Didn't compute requested info"); 639 DEBUG(dbgs() << " traced to:\t" << SVI->second); 640 return SVI->second.DefMI; 641 } 642 643 /// analyzeSiblingValues - Trace values defined by sibling copies back to 644 /// something that isn't a sibling copy. 645 /// 646 /// Keep track of values that may be rematerializable. 647 void InlineSpiller::analyzeSiblingValues() { 648 SibValues.clear(); 649 650 // No siblings at all? 651 if (Edit->getReg() == Original) 652 return; 653 654 LiveInterval &OrigLI = LIS.getInterval(Original); 655 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 656 unsigned Reg = RegsToSpill[i]; 657 LiveInterval &LI = LIS.getInterval(Reg); 658 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 659 VE = LI.vni_end(); VI != VE; ++VI) { 660 VNInfo *VNI = *VI; 661 if (VNI->isUnused()) 662 continue; 663 MachineInstr *DefMI = nullptr; 664 if (!VNI->isPHIDef()) { 665 DefMI = LIS.getInstructionFromIndex(VNI->def); 666 assert(DefMI && "No defining instruction"); 667 } 668 // Check possible sibling copies. 669 if (VNI->isPHIDef() || DefMI->isCopy()) { 670 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 671 assert(OrigVNI && "Def outside original live range"); 672 if (OrigVNI->def != VNI->def) 673 DefMI = traceSiblingValue(Reg, VNI, OrigVNI); 674 } 675 if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) { 676 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@' 677 << VNI->def << " may remat from " << *DefMI); 678 } 679 } 680 } 681 } 682 683 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 684 /// a spill at a better location. 685 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) { 686 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 687 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot()); 688 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy"); 689 SibValueMap::iterator I = SibValues.find(VNI); 690 if (I == SibValues.end()) 691 return false; 692 693 const SibValueInfo &SVI = I->second; 694 695 // Let the normal folding code deal with the boring case. 696 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 697 return false; 698 699 // SpillReg may have been deleted by remat and DCE. 700 if (!LIS.hasInterval(SVI.SpillReg)) { 701 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n'); 702 SibValues.erase(I); 703 return false; 704 } 705 706 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg); 707 if (!SibLI.containsValue(SVI.SpillVNI)) { 708 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n'); 709 SibValues.erase(I); 710 return false; 711 } 712 713 // Conservatively extend the stack slot range to the range of the original 714 // value. We may be able to do better with stack slot coloring by being more 715 // careful here. 716 assert(StackInt && "No stack slot assigned yet."); 717 LiveInterval &OrigLI = LIS.getInterval(Original); 718 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 719 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 720 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 721 << *StackInt << '\n'); 722 723 // Already spilled everywhere. 724 if (SVI.AllDefsAreReloads) { 725 DEBUG(dbgs() << "\tno spill needed: " << SVI); 726 ++NumOmitReloadSpill; 727 return true; 728 } 729 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 730 // any later spills of the same value. 731 eliminateRedundantSpills(SibLI, SVI.SpillVNI); 732 733 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 734 MachineBasicBlock::iterator MII; 735 if (SVI.SpillVNI->isPHIDef()) 736 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 737 else { 738 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 739 assert(DefMI && "Defining instruction disappeared"); 740 MII = DefMI; 741 ++MII; 742 } 743 // Insert spill without kill flag immediately after def. 744 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, 745 MRI.getRegClass(SVI.SpillReg), &TRI); 746 --MII; // Point to store instruction. 747 LIS.InsertMachineInstrInMaps(MII); 748 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 749 750 ++NumSpills; 751 ++NumHoists; 752 return true; 753 } 754 755 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 756 /// redundant spills of this value in SLI.reg and sibling copies. 757 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 758 assert(VNI && "Missing value"); 759 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 760 WorkList.push_back(std::make_pair(&SLI, VNI)); 761 assert(StackInt && "No stack slot assigned yet."); 762 763 do { 764 LiveInterval *LI; 765 std::tie(LI, VNI) = WorkList.pop_back_val(); 766 unsigned Reg = LI->reg; 767 DEBUG(dbgs() << "Checking redundant spills for " 768 << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); 769 770 // Regs to spill are taken care of. 771 if (isRegToSpill(Reg)) 772 continue; 773 774 // Add all of VNI's live range to StackInt. 775 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 776 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 777 778 // Find all spills and copies of VNI. 779 for (MachineRegisterInfo::use_instr_nodbg_iterator 780 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 781 UI != E; ) { 782 MachineInstr *MI = &*(UI++); 783 if (!MI->isCopy() && !MI->mayStore()) 784 continue; 785 SlotIndex Idx = LIS.getInstructionIndex(MI); 786 if (LI->getVNInfoAt(Idx) != VNI) 787 continue; 788 789 // Follow sibling copies down the dominator tree. 790 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 791 if (isSibling(DstReg)) { 792 LiveInterval &DstLI = LIS.getInterval(DstReg); 793 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot()); 794 assert(DstVNI && "Missing defined value"); 795 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot"); 796 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 797 } 798 continue; 799 } 800 801 // Erase spills. 802 int FI; 803 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 804 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 805 // eliminateDeadDefs won't normally remove stores, so switch opcode. 806 MI->setDesc(TII.get(TargetOpcode::KILL)); 807 DeadDefs.push_back(MI); 808 ++NumSpillsRemoved; 809 --NumSpills; 810 } 811 } 812 } while (!WorkList.empty()); 813 } 814 815 816 //===----------------------------------------------------------------------===// 817 // Rematerialization 818 //===----------------------------------------------------------------------===// 819 820 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 821 /// instruction cannot be eliminated. See through snippet copies 822 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 823 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 824 WorkList.push_back(std::make_pair(LI, VNI)); 825 do { 826 std::tie(LI, VNI) = WorkList.pop_back_val(); 827 if (!UsedValues.insert(VNI)) 828 continue; 829 830 if (VNI->isPHIDef()) { 831 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 832 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 833 PE = MBB->pred_end(); PI != PE; ++PI) { 834 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(*PI)); 835 if (PVNI) 836 WorkList.push_back(std::make_pair(LI, PVNI)); 837 } 838 continue; 839 } 840 841 // Follow snippet copies. 842 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 843 if (!SnippetCopies.count(MI)) 844 continue; 845 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 846 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy"); 847 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); 848 assert(SnipVNI && "Snippet undefined before copy"); 849 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 850 } while (!WorkList.empty()); 851 } 852 853 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 854 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, 855 MachineBasicBlock::iterator MI) { 856 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); 857 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 858 859 if (!ParentVNI) { 860 DEBUG(dbgs() << "\tadding <undef> flags: "); 861 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 862 MachineOperand &MO = MI->getOperand(i); 863 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) 864 MO.setIsUndef(); 865 } 866 DEBUG(dbgs() << UseIdx << '\t' << *MI); 867 return true; 868 } 869 870 if (SnippetCopies.count(MI)) 871 return false; 872 873 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy. 874 LiveRangeEdit::Remat RM(ParentVNI); 875 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI); 876 if (SibI != SibValues.end()) 877 RM.OrigMI = SibI->second.DefMI; 878 if (!Edit->canRematerializeAt(RM, UseIdx, false)) { 879 markValueUsed(&VirtReg, ParentVNI); 880 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 881 return false; 882 } 883 884 // If the instruction also writes VirtReg.reg, it had better not require the 885 // same register for uses and defs. 886 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 887 MIBundleOperands::VirtRegInfo RI = 888 MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); 889 if (RI.Tied) { 890 markValueUsed(&VirtReg, ParentVNI); 891 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 892 return false; 893 } 894 895 // Before rematerializing into a register for a single instruction, try to 896 // fold a load into the instruction. That avoids allocating a new register. 897 if (RM.OrigMI->canFoldAsLoad() && 898 foldMemoryOperand(Ops, RM.OrigMI)) { 899 Edit->markRematerialized(RM.ParentVNI); 900 ++NumFoldedLoads; 901 return true; 902 } 903 904 // Alocate a new register for the remat. 905 unsigned NewVReg = Edit->createFrom(Original); 906 907 // Finally we can rematerialize OrigMI before MI. 908 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewVReg, RM, 909 TRI); 910 (void)DefIdx; 911 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 912 << *LIS.getInstructionFromIndex(DefIdx)); 913 914 // Replace operands 915 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 916 MachineOperand &MO = MI->getOperand(Ops[i].second); 917 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { 918 MO.setReg(NewVReg); 919 MO.setIsKill(); 920 } 921 } 922 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI << '\n'); 923 924 ++NumRemats; 925 return true; 926 } 927 928 /// reMaterializeAll - Try to rematerialize as many uses as possible, 929 /// and trim the live ranges after. 930 void InlineSpiller::reMaterializeAll() { 931 // analyzeSiblingValues has already tested all relevant defining instructions. 932 if (!Edit->anyRematerializable(AA)) 933 return; 934 935 UsedValues.clear(); 936 937 // Try to remat before all uses of snippets. 938 bool anyRemat = false; 939 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 940 unsigned Reg = RegsToSpill[i]; 941 LiveInterval &LI = LIS.getInterval(Reg); 942 for (MachineRegisterInfo::use_bundle_nodbg_iterator 943 RI = MRI.use_bundle_nodbg_begin(Reg), E = MRI.use_bundle_nodbg_end(); 944 RI != E; ) { 945 MachineInstr *MI = &*(RI++); 946 anyRemat |= reMaterializeFor(LI, MI); 947 } 948 } 949 if (!anyRemat) 950 return; 951 952 // Remove any values that were completely rematted. 953 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 954 unsigned Reg = RegsToSpill[i]; 955 LiveInterval &LI = LIS.getInterval(Reg); 956 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 957 I != E; ++I) { 958 VNInfo *VNI = *I; 959 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 960 continue; 961 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 962 MI->addRegisterDead(Reg, &TRI); 963 if (!MI->allDefsAreDead()) 964 continue; 965 DEBUG(dbgs() << "All defs dead: " << *MI); 966 DeadDefs.push_back(MI); 967 } 968 } 969 970 // Eliminate dead code after remat. Note that some snippet copies may be 971 // deleted here. 972 if (DeadDefs.empty()) 973 return; 974 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 975 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 976 977 // Get rid of deleted and empty intervals. 978 unsigned ResultPos = 0; 979 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 980 unsigned Reg = RegsToSpill[i]; 981 if (!LIS.hasInterval(Reg)) 982 continue; 983 984 LiveInterval &LI = LIS.getInterval(Reg); 985 if (LI.empty()) { 986 Edit->eraseVirtReg(Reg); 987 continue; 988 } 989 990 RegsToSpill[ResultPos++] = Reg; 991 } 992 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); 993 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); 994 } 995 996 997 //===----------------------------------------------------------------------===// 998 // Spilling 999 //===----------------------------------------------------------------------===// 1000 1001 /// If MI is a load or store of StackSlot, it can be removed. 1002 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 1003 int FI = 0; 1004 unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); 1005 bool IsLoad = InstrReg; 1006 if (!IsLoad) 1007 InstrReg = TII.isStoreToStackSlot(MI, FI); 1008 1009 // We have a stack access. Is it the right register and slot? 1010 if (InstrReg != Reg || FI != StackSlot) 1011 return false; 1012 1013 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 1014 LIS.RemoveMachineInstrFromMaps(MI); 1015 MI->eraseFromParent(); 1016 1017 if (IsLoad) { 1018 ++NumReloadsRemoved; 1019 --NumReloads; 1020 } else { 1021 ++NumSpillsRemoved; 1022 --NumSpills; 1023 } 1024 1025 return true; 1026 } 1027 1028 #if !defined(NDEBUG) 1029 // Dump the range of instructions from B to E with their slot indexes. 1030 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, 1031 MachineBasicBlock::iterator E, 1032 LiveIntervals const &LIS, 1033 const char *const header, 1034 unsigned VReg =0) { 1035 char NextLine = '\n'; 1036 char SlotIndent = '\t'; 1037 1038 if (std::next(B) == E) { 1039 NextLine = ' '; 1040 SlotIndent = ' '; 1041 } 1042 1043 dbgs() << '\t' << header << ": " << NextLine; 1044 1045 for (MachineBasicBlock::iterator I = B; I != E; ++I) { 1046 SlotIndex Idx = LIS.getInstructionIndex(I).getRegSlot(); 1047 1048 // If a register was passed in and this instruction has it as a 1049 // destination that is marked as an early clobber, print the 1050 // early-clobber slot index. 1051 if (VReg) { 1052 MachineOperand *MO = I->findRegisterDefOperand(VReg); 1053 if (MO && MO->isEarlyClobber()) 1054 Idx = Idx.getRegSlot(true); 1055 } 1056 1057 dbgs() << SlotIndent << Idx << '\t' << *I; 1058 } 1059 } 1060 #endif 1061 1062 /// foldMemoryOperand - Try folding stack slot references in Ops into their 1063 /// instructions. 1064 /// 1065 /// @param Ops Operand indices from analyzeVirtReg(). 1066 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 1067 /// @return True on success. 1068 bool InlineSpiller:: 1069 foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, 1070 MachineInstr *LoadMI) { 1071 if (Ops.empty()) 1072 return false; 1073 // Don't attempt folding in bundles. 1074 MachineInstr *MI = Ops.front().first; 1075 if (Ops.back().first != MI || MI->isBundled()) 1076 return false; 1077 1078 bool WasCopy = MI->isCopy(); 1079 unsigned ImpReg = 0; 1080 1081 bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT || 1082 MI->getOpcode() == TargetOpcode::STACKMAP); 1083 1084 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 1085 // operands. 1086 SmallVector<unsigned, 8> FoldOps; 1087 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1088 unsigned Idx = Ops[i].second; 1089 MachineOperand &MO = MI->getOperand(Idx); 1090 if (MO.isImplicit()) { 1091 ImpReg = MO.getReg(); 1092 continue; 1093 } 1094 // FIXME: Teach targets to deal with subregs. 1095 if (!SpillSubRegs && MO.getSubReg()) 1096 return false; 1097 // We cannot fold a load instruction into a def. 1098 if (LoadMI && MO.isDef()) 1099 return false; 1100 // Tied use operands should not be passed to foldMemoryOperand. 1101 if (!MI->isRegTiedToDefOperand(Idx)) 1102 FoldOps.push_back(Idx); 1103 } 1104 1105 MachineInstrSpan MIS(MI); 1106 1107 MachineInstr *FoldMI = 1108 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 1109 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 1110 if (!FoldMI) 1111 return false; 1112 1113 // Remove LIS for any dead defs in the original MI not in FoldMI. 1114 for (MIBundleOperands MO(MI); MO.isValid(); ++MO) { 1115 if (!MO->isReg()) 1116 continue; 1117 unsigned Reg = MO->getReg(); 1118 if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) || 1119 MRI.isReserved(Reg)) { 1120 continue; 1121 } 1122 // Skip non-Defs, including undef uses and internal reads. 1123 if (MO->isUse()) 1124 continue; 1125 MIBundleOperands::PhysRegInfo RI = 1126 MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI); 1127 if (RI.Defines) 1128 continue; 1129 // FoldMI does not define this physreg. Remove the LI segment. 1130 assert(MO->isDead() && "Cannot fold physreg def"); 1131 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) { 1132 if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) { 1133 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1134 if (VNInfo *VNI = LR->getVNInfoAt(Idx)) 1135 LR->removeValNo(VNI); 1136 } 1137 } 1138 } 1139 1140 LIS.ReplaceMachineInstrInMaps(MI, FoldMI); 1141 MI->eraseFromParent(); 1142 1143 // Insert any new instructions other than FoldMI into the LIS maps. 1144 assert(!MIS.empty() && "Unexpected empty span of instructions!"); 1145 for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end(); 1146 MII != End; ++MII) 1147 if (&*MII != FoldMI) 1148 LIS.InsertMachineInstrInMaps(&*MII); 1149 1150 // TII.foldMemoryOperand may have left some implicit operands on the 1151 // instruction. Strip them. 1152 if (ImpReg) 1153 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1154 MachineOperand &MO = FoldMI->getOperand(i - 1); 1155 if (!MO.isReg() || !MO.isImplicit()) 1156 break; 1157 if (MO.getReg() == ImpReg) 1158 FoldMI->RemoveOperand(i - 1); 1159 } 1160 1161 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, 1162 "folded")); 1163 1164 if (!WasCopy) 1165 ++NumFolded; 1166 else if (Ops.front().second == 0) 1167 ++NumSpills; 1168 else 1169 ++NumReloads; 1170 return true; 1171 } 1172 1173 void InlineSpiller::insertReload(unsigned NewVReg, 1174 SlotIndex Idx, 1175 MachineBasicBlock::iterator MI) { 1176 MachineBasicBlock &MBB = *MI->getParent(); 1177 1178 MachineInstrSpan MIS(MI); 1179 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot, 1180 MRI.getRegClass(NewVReg), &TRI); 1181 1182 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI); 1183 1184 DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload", 1185 NewVReg)); 1186 ++NumReloads; 1187 } 1188 1189 /// insertSpill - Insert a spill of NewVReg after MI. 1190 void InlineSpiller::insertSpill(unsigned NewVReg, bool isKill, 1191 MachineBasicBlock::iterator MI) { 1192 MachineBasicBlock &MBB = *MI->getParent(); 1193 1194 MachineInstrSpan MIS(MI); 1195 TII.storeRegToStackSlot(MBB, std::next(MI), NewVReg, isKill, StackSlot, 1196 MRI.getRegClass(NewVReg), &TRI); 1197 1198 LIS.InsertMachineInstrRangeInMaps(std::next(MI), MIS.end()); 1199 1200 DEBUG(dumpMachineInstrRangeWithSlotIndex(std::next(MI), MIS.end(), LIS, 1201 "spill")); 1202 ++NumSpills; 1203 } 1204 1205 /// spillAroundUses - insert spill code around each use of Reg. 1206 void InlineSpiller::spillAroundUses(unsigned Reg) { 1207 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); 1208 LiveInterval &OldLI = LIS.getInterval(Reg); 1209 1210 // Iterate over instructions using Reg. 1211 for (MachineRegisterInfo::reg_bundle_iterator 1212 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); 1213 RegI != E; ) { 1214 MachineInstr *MI = &*(RegI++); 1215 1216 // Debug values are not allowed to affect codegen. 1217 if (MI->isDebugValue()) { 1218 // Modify DBG_VALUE now that the value is in a spill slot. 1219 bool IsIndirect = MI->isIndirectDebugValue(); 1220 uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0; 1221 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1222 DebugLoc DL = MI->getDebugLoc(); 1223 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1224 MachineBasicBlock *MBB = MI->getParent(); 1225 BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE)) 1226 .addFrameIndex(StackSlot).addImm(Offset).addMetadata(MDPtr); 1227 continue; 1228 } 1229 1230 // Ignore copies to/from snippets. We'll delete them. 1231 if (SnippetCopies.count(MI)) 1232 continue; 1233 1234 // Stack slot accesses may coalesce away. 1235 if (coalesceStackAccess(MI, Reg)) 1236 continue; 1237 1238 // Analyze instruction. 1239 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; 1240 MIBundleOperands::VirtRegInfo RI = 1241 MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops); 1242 1243 // Find the slot index where this instruction reads and writes OldLI. 1244 // This is usually the def slot, except for tied early clobbers. 1245 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 1246 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) 1247 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1248 Idx = VNI->def; 1249 1250 // Check for a sibling copy. 1251 unsigned SibReg = isFullCopyOf(MI, Reg); 1252 if (SibReg && isSibling(SibReg)) { 1253 // This may actually be a copy between snippets. 1254 if (isRegToSpill(SibReg)) { 1255 DEBUG(dbgs() << "Found new snippet copy: " << *MI); 1256 SnippetCopies.insert(MI); 1257 continue; 1258 } 1259 if (RI.Writes) { 1260 // Hoist the spill of a sib-reg copy. 1261 if (hoistSpill(OldLI, MI)) { 1262 // This COPY is now dead, the value is already in the stack slot. 1263 MI->getOperand(0).setIsDead(); 1264 DeadDefs.push_back(MI); 1265 continue; 1266 } 1267 } else { 1268 // This is a reload for a sib-reg copy. Drop spills downstream. 1269 LiveInterval &SibLI = LIS.getInterval(SibReg); 1270 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1271 // The COPY will fold to a reload below. 1272 } 1273 } 1274 1275 // Attempt to fold memory ops. 1276 if (foldMemoryOperand(Ops)) 1277 continue; 1278 1279 // Create a new virtual register for spill/fill. 1280 // FIXME: Infer regclass from instruction alone. 1281 unsigned NewVReg = Edit->createFrom(Reg); 1282 1283 if (RI.Reads) 1284 insertReload(NewVReg, Idx, MI); 1285 1286 // Rewrite instruction operands. 1287 bool hasLiveDef = false; 1288 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1289 MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second); 1290 MO.setReg(NewVReg); 1291 if (MO.isUse()) { 1292 if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second)) 1293 MO.setIsKill(); 1294 } else { 1295 if (!MO.isDead()) 1296 hasLiveDef = true; 1297 } 1298 } 1299 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n'); 1300 1301 // FIXME: Use a second vreg if instruction has no tied ops. 1302 if (RI.Writes) 1303 if (hasLiveDef) 1304 insertSpill(NewVReg, true, MI); 1305 } 1306 } 1307 1308 /// spillAll - Spill all registers remaining after rematerialization. 1309 void InlineSpiller::spillAll() { 1310 // Update LiveStacks now that we are committed to spilling. 1311 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1312 StackSlot = VRM.assignVirt2StackSlot(Original); 1313 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1314 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); 1315 } else 1316 StackInt = &LSS.getInterval(StackSlot); 1317 1318 if (Original != Edit->getReg()) 1319 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1320 1321 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1322 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1323 StackInt->MergeSegmentsInAsValue(LIS.getInterval(RegsToSpill[i]), 1324 StackInt->getValNumInfo(0)); 1325 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1326 1327 // Spill around uses of all RegsToSpill. 1328 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1329 spillAroundUses(RegsToSpill[i]); 1330 1331 // Hoisted spills may cause dead code. 1332 if (!DeadDefs.empty()) { 1333 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1334 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); 1335 } 1336 1337 // Finally delete the SnippetCopies. 1338 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 1339 for (MachineRegisterInfo::reg_instr_iterator 1340 RI = MRI.reg_instr_begin(RegsToSpill[i]), E = MRI.reg_instr_end(); 1341 RI != E; ) { 1342 MachineInstr *MI = &*(RI++); 1343 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 1344 // FIXME: Do this with a LiveRangeEdit callback. 1345 LIS.RemoveMachineInstrFromMaps(MI); 1346 MI->eraseFromParent(); 1347 } 1348 } 1349 1350 // Delete all spilled registers. 1351 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1352 Edit->eraseVirtReg(RegsToSpill[i]); 1353 } 1354 1355 void InlineSpiller::spill(LiveRangeEdit &edit) { 1356 ++NumSpilledRanges; 1357 Edit = &edit; 1358 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 1359 && "Trying to spill a stack slot."); 1360 // Share a stack slot among all descendants of Original. 1361 Original = VRM.getOriginal(edit.getReg()); 1362 StackSlot = VRM.getStackSlot(Original); 1363 StackInt = nullptr; 1364 1365 DEBUG(dbgs() << "Inline spilling " 1366 << MRI.getRegClass(edit.getReg())->getName() 1367 << ':' << edit.getParent() 1368 << "\nFrom original " << PrintReg(Original) << '\n'); 1369 assert(edit.getParent().isSpillable() && 1370 "Attempting to spill already spilled value."); 1371 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1372 1373 collectRegsToSpill(); 1374 analyzeSiblingValues(); 1375 reMaterializeAll(); 1376 1377 // Remat may handle everything. 1378 if (!RegsToSpill.empty()) 1379 spillAll(); 1380 1381 Edit->calculateRegClassAndHint(MF, Loops, MBFI); 1382 } 1383