1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "Spiller.h" 17 #include "LiveRangeEdit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/TinyPtrVector.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 23 #include "llvm/CodeGen/LiveStackAnalysis.h" 24 #include "llvm/CodeGen/MachineDominators.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineLoopInfo.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/Target/TargetMachine.h" 30 #include "llvm/Target/TargetInstrInfo.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 using namespace llvm; 36 37 STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 38 STATISTIC(NumSnippets, "Number of spilled snippets"); 39 STATISTIC(NumSpills, "Number of spills inserted"); 40 STATISTIC(NumSpillsRemoved, "Number of spills removed"); 41 STATISTIC(NumReloads, "Number of reloads inserted"); 42 STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 43 STATISTIC(NumFolded, "Number of folded stack accesses"); 44 STATISTIC(NumFoldedLoads, "Number of folded loads"); 45 STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 46 STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads"); 47 STATISTIC(NumHoists, "Number of hoisted spills"); 48 49 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 50 cl::desc("Disable inline spill hoisting")); 51 52 namespace { 53 class InlineSpiller : public Spiller { 54 MachineFunctionPass &Pass; 55 MachineFunction &MF; 56 LiveIntervals &LIS; 57 LiveStacks &LSS; 58 AliasAnalysis *AA; 59 MachineDominatorTree &MDT; 60 MachineLoopInfo &Loops; 61 VirtRegMap &VRM; 62 MachineFrameInfo &MFI; 63 MachineRegisterInfo &MRI; 64 const TargetInstrInfo &TII; 65 const TargetRegisterInfo &TRI; 66 67 // Variables that are valid during spill(), but used by multiple methods. 68 LiveRangeEdit *Edit; 69 LiveInterval *StackInt; 70 int StackSlot; 71 unsigned Original; 72 73 // All registers to spill to StackSlot, including the main register. 74 SmallVector<unsigned, 8> RegsToSpill; 75 76 // All COPY instructions to/from snippets. 77 // They are ignored since both operands refer to the same stack slot. 78 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 79 80 // Values that failed to remat at some point. 81 SmallPtrSet<VNInfo*, 8> UsedValues; 82 83 public: 84 // Information about a value that was defined by a copy from a sibling 85 // register. 86 struct SibValueInfo { 87 // True when all reaching defs were reloads: No spill is necessary. 88 bool AllDefsAreReloads; 89 90 // True when value is defined by an original PHI not from splitting. 91 bool DefByOrigPHI; 92 93 // True when the COPY defining this value killed its source. 94 bool KillsSource; 95 96 // The preferred register to spill. 97 unsigned SpillReg; 98 99 // The value of SpillReg that should be spilled. 100 VNInfo *SpillVNI; 101 102 // The block where SpillVNI should be spilled. Currently, this must be the 103 // block containing SpillVNI->def. 104 MachineBasicBlock *SpillMBB; 105 106 // A defining instruction that is not a sibling copy or a reload, or NULL. 107 // This can be used as a template for rematerialization. 108 MachineInstr *DefMI; 109 110 // List of values that depend on this one. These values are actually the 111 // same, but live range splitting has placed them in different registers, 112 // or SSA update needed to insert PHI-defs to preserve SSA form. This is 113 // copies of the current value and phi-kills. Usually only phi-kills cause 114 // more than one dependent value. 115 TinyPtrVector<VNInfo*> Deps; 116 117 SibValueInfo(unsigned Reg, VNInfo *VNI) 118 : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), 119 SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {} 120 121 // Returns true when a def has been found. 122 bool hasDef() const { return DefByOrigPHI || DefMI; } 123 }; 124 125 private: 126 // Values in RegsToSpill defined by sibling copies. 127 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 128 SibValueMap SibValues; 129 130 // Dead defs generated during spilling. 131 SmallVector<MachineInstr*, 8> DeadDefs; 132 133 ~InlineSpiller() {} 134 135 public: 136 InlineSpiller(MachineFunctionPass &pass, 137 MachineFunction &mf, 138 VirtRegMap &vrm) 139 : Pass(pass), 140 MF(mf), 141 LIS(pass.getAnalysis<LiveIntervals>()), 142 LSS(pass.getAnalysis<LiveStacks>()), 143 AA(&pass.getAnalysis<AliasAnalysis>()), 144 MDT(pass.getAnalysis<MachineDominatorTree>()), 145 Loops(pass.getAnalysis<MachineLoopInfo>()), 146 VRM(vrm), 147 MFI(*mf.getFrameInfo()), 148 MRI(mf.getRegInfo()), 149 TII(*mf.getTarget().getInstrInfo()), 150 TRI(*mf.getTarget().getRegisterInfo()) {} 151 152 void spill(LiveRangeEdit &); 153 154 private: 155 bool isSnippet(const LiveInterval &SnipLI); 156 void collectRegsToSpill(); 157 158 bool isRegToSpill(unsigned Reg) { 159 return std::find(RegsToSpill.begin(), 160 RegsToSpill.end(), Reg) != RegsToSpill.end(); 161 } 162 163 bool isSibling(unsigned Reg); 164 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); 165 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0); 166 void analyzeSiblingValues(); 167 168 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); 169 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 170 171 void markValueUsed(LiveInterval*, VNInfo*); 172 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI); 173 void reMaterializeAll(); 174 175 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 176 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 177 const SmallVectorImpl<unsigned> &Ops, 178 MachineInstr *LoadMI = 0); 179 void insertReload(LiveInterval &NewLI, SlotIndex, 180 MachineBasicBlock::iterator MI); 181 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 182 SlotIndex, MachineBasicBlock::iterator MI); 183 184 void spillAroundUses(unsigned Reg); 185 void spillAll(); 186 }; 187 } 188 189 namespace llvm { 190 Spiller *createInlineSpiller(MachineFunctionPass &pass, 191 MachineFunction &mf, 192 VirtRegMap &vrm) { 193 return new InlineSpiller(pass, mf, vrm); 194 } 195 } 196 197 //===----------------------------------------------------------------------===// 198 // Snippets 199 //===----------------------------------------------------------------------===// 200 201 // When spilling a virtual register, we also spill any snippets it is connected 202 // to. The snippets are small live ranges that only have a single real use, 203 // leftovers from live range splitting. Spilling them enables memory operand 204 // folding or tightens the live range around the single use. 205 // 206 // This minimizes register pressure and maximizes the store-to-load distance for 207 // spill slots which can be important in tight loops. 208 209 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 210 /// otherwise return 0. 211 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 212 if (!MI->isFullCopy()) 213 return 0; 214 if (MI->getOperand(0).getReg() == Reg) 215 return MI->getOperand(1).getReg(); 216 if (MI->getOperand(1).getReg() == Reg) 217 return MI->getOperand(0).getReg(); 218 return 0; 219 } 220 221 /// isSnippet - Identify if a live interval is a snippet that should be spilled. 222 /// It is assumed that SnipLI is a virtual register with the same original as 223 /// Edit->getReg(). 224 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 225 unsigned Reg = Edit->getReg(); 226 227 // A snippet is a tiny live range with only a single instruction using it 228 // besides copies to/from Reg or spills/fills. We accept: 229 // 230 // %snip = COPY %Reg / FILL fi# 231 // %snip = USE %snip 232 // %Reg = COPY %snip / SPILL %snip, fi# 233 // 234 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 235 return false; 236 237 MachineInstr *UseMI = 0; 238 239 // Check that all uses satisfy our criteria. 240 for (MachineRegisterInfo::reg_nodbg_iterator 241 RI = MRI.reg_nodbg_begin(SnipLI.reg); 242 MachineInstr *MI = RI.skipInstruction();) { 243 244 // Allow copies to/from Reg. 245 if (isFullCopyOf(MI, Reg)) 246 continue; 247 248 // Allow stack slot loads. 249 int FI; 250 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 251 continue; 252 253 // Allow stack slot stores. 254 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 255 continue; 256 257 // Allow a single additional instruction. 258 if (UseMI && MI != UseMI) 259 return false; 260 UseMI = MI; 261 } 262 return true; 263 } 264 265 /// collectRegsToSpill - Collect live range snippets that only have a single 266 /// real use. 267 void InlineSpiller::collectRegsToSpill() { 268 unsigned Reg = Edit->getReg(); 269 270 // Main register always spills. 271 RegsToSpill.assign(1, Reg); 272 SnippetCopies.clear(); 273 274 // Snippets all have the same original, so there can't be any for an original 275 // register. 276 if (Original == Reg) 277 return; 278 279 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 280 MachineInstr *MI = RI.skipInstruction();) { 281 unsigned SnipReg = isFullCopyOf(MI, Reg); 282 if (!isSibling(SnipReg)) 283 continue; 284 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 285 if (!isSnippet(SnipLI)) 286 continue; 287 SnippetCopies.insert(MI); 288 if (isRegToSpill(SnipReg)) 289 continue; 290 RegsToSpill.push_back(SnipReg); 291 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 292 ++NumSnippets; 293 } 294 } 295 296 297 //===----------------------------------------------------------------------===// 298 // Sibling Values 299 //===----------------------------------------------------------------------===// 300 301 // After live range splitting, some values to be spilled may be defined by 302 // copies from sibling registers. We trace the sibling copies back to the 303 // original value if it still exists. We need it for rematerialization. 304 // 305 // Even when the value can't be rematerialized, we still want to determine if 306 // the value has already been spilled, or we may want to hoist the spill from a 307 // loop. 308 309 bool InlineSpiller::isSibling(unsigned Reg) { 310 return TargetRegisterInfo::isVirtualRegister(Reg) && 311 VRM.getOriginal(Reg) == Original; 312 } 313 314 #ifndef NDEBUG 315 static raw_ostream &operator<<(raw_ostream &OS, 316 const InlineSpiller::SibValueInfo &SVI) { 317 OS << "spill " << PrintReg(SVI.SpillReg) << ':' 318 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def; 319 if (SVI.SpillMBB) 320 OS << " in BB#" << SVI.SpillMBB->getNumber(); 321 if (SVI.AllDefsAreReloads) 322 OS << " all-reloads"; 323 if (SVI.DefByOrigPHI) 324 OS << " orig-phi"; 325 if (SVI.KillsSource) 326 OS << " kill"; 327 OS << " deps["; 328 for (unsigned i = 0, e = SVI.Deps.size(); i != e; ++i) 329 OS << ' ' << SVI.Deps[i]->id << '@' << SVI.Deps[i]->def; 330 OS << " ]"; 331 if (SVI.DefMI) 332 OS << " def: " << *SVI.DefMI; 333 else 334 OS << '\n'; 335 return OS; 336 } 337 #endif 338 339 /// propagateSiblingValue - Propagate the value in SVI to dependents if it is 340 /// known. Otherwise remember the dependency for later. 341 /// 342 /// @param SVI SibValues entry to propagate. 343 /// @param VNI Dependent value, or NULL to propagate to all saved dependents. 344 void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVI, 345 VNInfo *VNI) { 346 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. 347 TinyPtrVector<VNInfo*> FirstDeps; 348 if (VNI) { 349 FirstDeps.push_back(VNI); 350 SVI->second.Deps.push_back(VNI); 351 } 352 353 // Has the value been completely determined yet? If not, defer propagation. 354 if (!SVI->second.hasDef()) 355 return; 356 357 // Work list of values to propagate. It would be nice to use a SetVector 358 // here, but then we would be forced to use a SmallSet. 359 SmallVector<SibValueMap::iterator, 8> WorkList(1, SVI); 360 SmallPtrSet<VNInfo*, 8> WorkSet; 361 362 do { 363 SVI = WorkList.pop_back_val(); 364 WorkSet.erase(SVI->first); 365 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; 366 VNI = 0; 367 368 SibValueInfo &SV = SVI->second; 369 if (!SV.SpillMBB) 370 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); 371 372 DEBUG(dbgs() << " prop to " << Deps->size() << ": " 373 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); 374 375 assert(SV.hasDef() && "Propagating undefined value"); 376 377 // Should this value be propagated as a preferred spill candidate? We don't 378 // propagate values of registers that are about to spill. 379 bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); 380 unsigned SpillDepth = ~0u; 381 382 for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(), 383 DepE = Deps->end(); DepI != DepE; ++DepI) { 384 SibValueMap::iterator DepSVI = SibValues.find(*DepI); 385 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); 386 SibValueInfo &DepSV = DepSVI->second; 387 if (!DepSV.SpillMBB) 388 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); 389 390 bool Changed = false; 391 392 // Propagate defining instruction. 393 if (!DepSV.hasDef()) { 394 Changed = true; 395 DepSV.DefMI = SV.DefMI; 396 DepSV.DefByOrigPHI = SV.DefByOrigPHI; 397 } 398 399 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of 400 // all predecessors. 401 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { 402 Changed = true; 403 DepSV.AllDefsAreReloads = false; 404 } 405 406 // Propagate best spill value. 407 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { 408 if (SV.SpillMBB == DepSV.SpillMBB) { 409 // DepSV is in the same block. Hoist when dominated. 410 if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { 411 // This is an alternative def earlier in the same MBB. 412 // Hoist the spill as far as possible in SpillMBB. This can ease 413 // register pressure: 414 // 415 // x = def 416 // y = use x 417 // s = copy x 418 // 419 // Hoisting the spill of s to immediately after the def removes the 420 // interference between x and y: 421 // 422 // x = def 423 // spill x 424 // y = use x<kill> 425 // 426 // This hoist only helps when the DepSV copy kills its source. 427 Changed = true; 428 DepSV.SpillReg = SV.SpillReg; 429 DepSV.SpillVNI = SV.SpillVNI; 430 DepSV.SpillMBB = SV.SpillMBB; 431 } 432 } else { 433 // DepSV is in a different block. 434 if (SpillDepth == ~0u) 435 SpillDepth = Loops.getLoopDepth(SV.SpillMBB); 436 437 // Also hoist spills to blocks with smaller loop depth, but make sure 438 // that the new value dominates. Non-phi dependents are always 439 // dominated, phis need checking. 440 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && 441 (!DepSVI->first->isPHIDef() || 442 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { 443 Changed = true; 444 DepSV.SpillReg = SV.SpillReg; 445 DepSV.SpillVNI = SV.SpillVNI; 446 DepSV.SpillMBB = SV.SpillMBB; 447 } 448 } 449 } 450 451 if (!Changed) 452 continue; 453 454 // Something changed in DepSVI. Propagate to dependents. 455 if (WorkSet.insert(DepSVI->first)) 456 WorkList.push_back(DepSVI); 457 458 DEBUG(dbgs() << " update " << DepSVI->first->id << '@' 459 << DepSVI->first->def << " to:\t" << DepSV); 460 } 461 } while (!WorkList.empty()); 462 } 463 464 /// traceSiblingValue - Trace a value that is about to be spilled back to the 465 /// real defining instructions by looking through sibling copies. Always stay 466 /// within the range of OrigVNI so the registers are known to carry the same 467 /// value. 468 /// 469 /// Determine if the value is defined by all reloads, so spilling isn't 470 /// necessary - the value is already in the stack slot. 471 /// 472 /// Return a defining instruction that may be a candidate for rematerialization. 473 /// 474 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 475 VNInfo *OrigVNI) { 476 // Check if a cached value already exists. 477 SibValueMap::iterator SVI; 478 bool Inserted; 479 tie(SVI, Inserted) = 480 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); 481 if (!Inserted) { 482 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' 483 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second); 484 return SVI->second.DefMI; 485 } 486 487 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 488 << UseVNI->id << '@' << UseVNI->def << '\n'); 489 490 // List of (Reg, VNI) that have been inserted into SibValues, but need to be 491 // processed. 492 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 493 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 494 495 do { 496 unsigned Reg; 497 VNInfo *VNI; 498 tie(Reg, VNI) = WorkList.pop_back_val(); 499 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def 500 << ":\t"); 501 502 // First check if this value has already been computed. 503 SVI = SibValues.find(VNI); 504 assert(SVI != SibValues.end() && "Missing SibValues entry"); 505 506 // Trace through PHI-defs created by live range splitting. 507 if (VNI->isPHIDef()) { 508 // Stop at original PHIs. We don't know the value at the predecessors. 509 if (VNI->def == OrigVNI->def) { 510 DEBUG(dbgs() << "orig phi value\n"); 511 SVI->second.DefByOrigPHI = true; 512 SVI->second.AllDefsAreReloads = false; 513 propagateSiblingValue(SVI); 514 continue; 515 } 516 517 // This is a PHI inserted by live range splitting. We could trace the 518 // live-out value from predecessor blocks, but that search can be very 519 // expensive if there are many predecessors and many more PHIs as 520 // generated by tail-dup when it sees an indirectbr. Instead, look at 521 // all the non-PHI defs that have the same value as OrigVNI. They must 522 // jointly dominate VNI->def. This is not optimal since VNI may actually 523 // be jointly dominated by a smaller subset of defs, so there is a change 524 // we will miss a AllDefsAreReloads optimization. 525 526 // Separate all values dominated by OrigVNI into PHIs and non-PHIs. 527 SmallVector<VNInfo*, 8> PHIs, NonPHIs; 528 LiveInterval &LI = LIS.getInterval(Reg); 529 LiveInterval &OrigLI = LIS.getInterval(Original); 530 531 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); 532 VI != VE; ++VI) { 533 VNInfo *VNI2 = *VI; 534 if (VNI2->isUnused()) 535 continue; 536 if (!OrigLI.containsOneValue() && 537 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI) 538 continue; 539 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def) 540 PHIs.push_back(VNI2); 541 else 542 NonPHIs.push_back(VNI2); 543 } 544 DEBUG(dbgs() << "split phi value, checking " << PHIs.size() 545 << " phi-defs, and " << NonPHIs.size() 546 << " non-phi/orig defs\n"); 547 548 // Create entries for all the PHIs. Don't add them to the worklist, we 549 // are processing all of them in one go here. 550 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) 551 SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i]))); 552 553 // Add every PHI as a dependent of all the non-PHIs. 554 for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) { 555 VNInfo *NonPHI = NonPHIs[i]; 556 // Known value? Try an insertion. 557 tie(SVI, Inserted) = 558 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); 559 // Add all the PHIs as dependents of NonPHI. 560 for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi) 561 SVI->second.Deps.push_back(PHIs[pi]); 562 // This is the first time we see NonPHI, add it to the worklist. 563 if (Inserted) 564 WorkList.push_back(std::make_pair(Reg, NonPHI)); 565 else 566 // Propagate to all inserted PHIs, not just VNI. 567 propagateSiblingValue(SVI); 568 } 569 570 // Next work list item. 571 continue; 572 } 573 574 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 575 assert(MI && "Missing def"); 576 577 // Trace through sibling copies. 578 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 579 if (isSibling(SrcReg)) { 580 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 581 LiveRange *SrcLR = SrcLI.getLiveRangeContaining(VNI->def.getUseIndex()); 582 assert(SrcLR && "Copy from non-existing value"); 583 // Check if this COPY kills its source. 584 SVI->second.KillsSource = (SrcLR->end == VNI->def); 585 VNInfo *SrcVNI = SrcLR->valno; 586 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':' 587 << SrcVNI->id << '@' << SrcVNI->def 588 << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); 589 // Known sibling source value? Try an insertion. 590 tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI, 591 SibValueInfo(SrcReg, SrcVNI))); 592 // This is the first time we see Src, add it to the worklist. 593 if (Inserted) 594 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 595 propagateSiblingValue(SVI, VNI); 596 // Next work list item. 597 continue; 598 } 599 } 600 601 // Track reachable reloads. 602 SVI->second.DefMI = MI; 603 SVI->second.SpillMBB = MI->getParent(); 604 int FI; 605 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 606 DEBUG(dbgs() << "reload\n"); 607 propagateSiblingValue(SVI); 608 // Next work list item. 609 continue; 610 } 611 612 // Potential remat candidate. 613 DEBUG(dbgs() << "def " << *MI); 614 SVI->second.AllDefsAreReloads = false; 615 propagateSiblingValue(SVI); 616 } while (!WorkList.empty()); 617 618 // Look up the value we were looking for. We already did this lokup at the 619 // top of the function, but SibValues may have been invalidated. 620 SVI = SibValues.find(UseVNI); 621 assert(SVI != SibValues.end() && "Didn't compute requested info"); 622 DEBUG(dbgs() << " traced to:\t" << SVI->second); 623 return SVI->second.DefMI; 624 } 625 626 /// analyzeSiblingValues - Trace values defined by sibling copies back to 627 /// something that isn't a sibling copy. 628 /// 629 /// Keep track of values that may be rematerializable. 630 void InlineSpiller::analyzeSiblingValues() { 631 SibValues.clear(); 632 633 // No siblings at all? 634 if (Edit->getReg() == Original) 635 return; 636 637 LiveInterval &OrigLI = LIS.getInterval(Original); 638 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 639 unsigned Reg = RegsToSpill[i]; 640 LiveInterval &LI = LIS.getInterval(Reg); 641 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 642 VE = LI.vni_end(); VI != VE; ++VI) { 643 VNInfo *VNI = *VI; 644 if (VNI->isUnused()) 645 continue; 646 MachineInstr *DefMI = 0; 647 // Check possible sibling copies. 648 if (VNI->isPHIDef() || VNI->getCopy()) { 649 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 650 assert(OrigVNI && "Def outside original live range"); 651 if (OrigVNI->def != VNI->def) 652 DefMI = traceSiblingValue(Reg, VNI, OrigVNI); 653 } 654 if (!DefMI && !VNI->isPHIDef()) 655 DefMI = LIS.getInstructionFromIndex(VNI->def); 656 if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) { 657 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@' 658 << VNI->def << " may remat from " << *DefMI); 659 } 660 } 661 } 662 } 663 664 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 665 /// a spill at a better location. 666 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) { 667 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 668 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex()); 669 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy"); 670 SibValueMap::iterator I = SibValues.find(VNI); 671 if (I == SibValues.end()) 672 return false; 673 674 const SibValueInfo &SVI = I->second; 675 676 // Let the normal folding code deal with the boring case. 677 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 678 return false; 679 680 // SpillReg may have been deleted by remat and DCE. 681 if (!LIS.hasInterval(SVI.SpillReg)) { 682 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n'); 683 SibValues.erase(I); 684 return false; 685 } 686 687 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg); 688 if (!SibLI.containsValue(SVI.SpillVNI)) { 689 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n'); 690 SibValues.erase(I); 691 return false; 692 } 693 694 // Conservatively extend the stack slot range to the range of the original 695 // value. We may be able to do better with stack slot coloring by being more 696 // careful here. 697 assert(StackInt && "No stack slot assigned yet."); 698 LiveInterval &OrigLI = LIS.getInterval(Original); 699 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 700 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 701 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 702 << *StackInt << '\n'); 703 704 // Already spilled everywhere. 705 if (SVI.AllDefsAreReloads) { 706 DEBUG(dbgs() << "\tno spill needed: " << SVI); 707 ++NumOmitReloadSpill; 708 return true; 709 } 710 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 711 // any later spills of the same value. 712 eliminateRedundantSpills(SibLI, SVI.SpillVNI); 713 714 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 715 MachineBasicBlock::iterator MII; 716 if (SVI.SpillVNI->isPHIDef()) 717 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 718 else { 719 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 720 assert(DefMI && "Defining instruction disappeared"); 721 MII = DefMI; 722 ++MII; 723 } 724 // Insert spill without kill flag immediately after def. 725 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, 726 MRI.getRegClass(SVI.SpillReg), &TRI); 727 --MII; // Point to store instruction. 728 LIS.InsertMachineInstrInMaps(MII); 729 VRM.addSpillSlotUse(StackSlot, MII); 730 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 731 732 ++NumSpills; 733 ++NumHoists; 734 return true; 735 } 736 737 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 738 /// redundant spills of this value in SLI.reg and sibling copies. 739 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 740 assert(VNI && "Missing value"); 741 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 742 WorkList.push_back(std::make_pair(&SLI, VNI)); 743 assert(StackInt && "No stack slot assigned yet."); 744 745 do { 746 LiveInterval *LI; 747 tie(LI, VNI) = WorkList.pop_back_val(); 748 unsigned Reg = LI->reg; 749 DEBUG(dbgs() << "Checking redundant spills for " 750 << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); 751 752 // Regs to spill are taken care of. 753 if (isRegToSpill(Reg)) 754 continue; 755 756 // Add all of VNI's live range to StackInt. 757 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 758 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 759 760 // Find all spills and copies of VNI. 761 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); 762 MachineInstr *MI = UI.skipInstruction();) { 763 if (!MI->isCopy() && !MI->getDesc().mayStore()) 764 continue; 765 SlotIndex Idx = LIS.getInstructionIndex(MI); 766 if (LI->getVNInfoAt(Idx) != VNI) 767 continue; 768 769 // Follow sibling copies down the dominator tree. 770 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 771 if (isSibling(DstReg)) { 772 LiveInterval &DstLI = LIS.getInterval(DstReg); 773 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex()); 774 assert(DstVNI && "Missing defined value"); 775 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot"); 776 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 777 } 778 continue; 779 } 780 781 // Erase spills. 782 int FI; 783 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 784 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 785 // eliminateDeadDefs won't normally remove stores, so switch opcode. 786 MI->setDesc(TII.get(TargetOpcode::KILL)); 787 DeadDefs.push_back(MI); 788 ++NumSpillsRemoved; 789 --NumSpills; 790 } 791 } 792 } while (!WorkList.empty()); 793 } 794 795 796 //===----------------------------------------------------------------------===// 797 // Rematerialization 798 //===----------------------------------------------------------------------===// 799 800 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining 801 /// instruction cannot be eliminated. See through snippet copies 802 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 803 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 804 WorkList.push_back(std::make_pair(LI, VNI)); 805 do { 806 tie(LI, VNI) = WorkList.pop_back_val(); 807 if (!UsedValues.insert(VNI)) 808 continue; 809 810 if (VNI->isPHIDef()) { 811 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 812 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 813 PE = MBB->pred_end(); PI != PE; ++PI) { 814 VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot()); 815 if (PVNI) 816 WorkList.push_back(std::make_pair(LI, PVNI)); 817 } 818 continue; 819 } 820 821 // Follow snippet copies. 822 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 823 if (!SnippetCopies.count(MI)) 824 continue; 825 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 826 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy"); 827 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex()); 828 assert(SnipVNI && "Snippet undefined before copy"); 829 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 830 } while (!WorkList.empty()); 831 } 832 833 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 834 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, 835 MachineBasicBlock::iterator MI) { 836 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex(); 837 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 838 839 if (!ParentVNI) { 840 DEBUG(dbgs() << "\tadding <undef> flags: "); 841 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 842 MachineOperand &MO = MI->getOperand(i); 843 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) 844 MO.setIsUndef(); 845 } 846 DEBUG(dbgs() << UseIdx << '\t' << *MI); 847 return true; 848 } 849 850 if (SnippetCopies.count(MI)) 851 return false; 852 853 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy. 854 LiveRangeEdit::Remat RM(ParentVNI); 855 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI); 856 if (SibI != SibValues.end()) 857 RM.OrigMI = SibI->second.DefMI; 858 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) { 859 markValueUsed(&VirtReg, ParentVNI); 860 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 861 return false; 862 } 863 864 // If the instruction also writes VirtReg.reg, it had better not require the 865 // same register for uses and defs. 866 bool Reads, Writes; 867 SmallVector<unsigned, 8> Ops; 868 tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops); 869 if (Writes) { 870 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 871 MachineOperand &MO = MI->getOperand(Ops[i]); 872 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 873 markValueUsed(&VirtReg, ParentVNI); 874 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 875 return false; 876 } 877 } 878 } 879 880 // Before rematerializing into a register for a single instruction, try to 881 // fold a load into the instruction. That avoids allocating a new register. 882 if (RM.OrigMI->getDesc().canFoldAsLoad() && 883 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 884 Edit->markRematerialized(RM.ParentVNI); 885 ++NumFoldedLoads; 886 return true; 887 } 888 889 // Alocate a new register for the remat. 890 LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM); 891 NewLI.markNotSpillable(); 892 893 // Finally we can rematerialize OrigMI before MI. 894 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 895 LIS, TII, TRI); 896 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 897 << *LIS.getInstructionFromIndex(DefIdx)); 898 899 // Replace operands 900 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 901 MachineOperand &MO = MI->getOperand(Ops[i]); 902 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { 903 MO.setReg(NewLI.reg); 904 MO.setIsKill(); 905 } 906 } 907 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 908 909 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator()); 910 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 911 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 912 ++NumRemats; 913 return true; 914 } 915 916 /// reMaterializeAll - Try to rematerialize as many uses as possible, 917 /// and trim the live ranges after. 918 void InlineSpiller::reMaterializeAll() { 919 // analyzeSiblingValues has already tested all relevant defining instructions. 920 if (!Edit->anyRematerializable(LIS, TII, AA)) 921 return; 922 923 UsedValues.clear(); 924 925 // Try to remat before all uses of snippets. 926 bool anyRemat = false; 927 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 928 unsigned Reg = RegsToSpill[i]; 929 LiveInterval &LI = LIS.getInterval(Reg); 930 for (MachineRegisterInfo::use_nodbg_iterator 931 RI = MRI.use_nodbg_begin(Reg); 932 MachineInstr *MI = RI.skipInstruction();) 933 anyRemat |= reMaterializeFor(LI, MI); 934 } 935 if (!anyRemat) 936 return; 937 938 // Remove any values that were completely rematted. 939 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 940 unsigned Reg = RegsToSpill[i]; 941 LiveInterval &LI = LIS.getInterval(Reg); 942 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 943 I != E; ++I) { 944 VNInfo *VNI = *I; 945 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 946 continue; 947 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 948 MI->addRegisterDead(Reg, &TRI); 949 if (!MI->allDefsAreDead()) 950 continue; 951 DEBUG(dbgs() << "All defs dead: " << *MI); 952 DeadDefs.push_back(MI); 953 } 954 } 955 956 // Eliminate dead code after remat. Note that some snippet copies may be 957 // deleted here. 958 if (DeadDefs.empty()) 959 return; 960 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 961 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII); 962 963 // Get rid of deleted and empty intervals. 964 for (unsigned i = RegsToSpill.size(); i != 0; --i) { 965 unsigned Reg = RegsToSpill[i-1]; 966 if (!LIS.hasInterval(Reg)) { 967 RegsToSpill.erase(RegsToSpill.begin() + (i - 1)); 968 continue; 969 } 970 LiveInterval &LI = LIS.getInterval(Reg); 971 if (!LI.empty()) 972 continue; 973 Edit->eraseVirtReg(Reg, LIS); 974 RegsToSpill.erase(RegsToSpill.begin() + (i - 1)); 975 } 976 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); 977 } 978 979 980 //===----------------------------------------------------------------------===// 981 // Spilling 982 //===----------------------------------------------------------------------===// 983 984 /// If MI is a load or store of StackSlot, it can be removed. 985 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 986 int FI = 0; 987 unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); 988 bool IsLoad = InstrReg; 989 if (!IsLoad) 990 InstrReg = TII.isStoreToStackSlot(MI, FI); 991 992 // We have a stack access. Is it the right register and slot? 993 if (InstrReg != Reg || FI != StackSlot) 994 return false; 995 996 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 997 LIS.RemoveMachineInstrFromMaps(MI); 998 MI->eraseFromParent(); 999 1000 if (IsLoad) { 1001 ++NumReloadsRemoved; 1002 --NumReloads; 1003 } else { 1004 ++NumSpillsRemoved; 1005 --NumSpills; 1006 } 1007 1008 return true; 1009 } 1010 1011 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 1012 /// @param MI Instruction using or defining the current register. 1013 /// @param Ops Operand indices from readsWritesVirtualRegister(). 1014 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 1015 /// @return True on success, and MI will be erased. 1016 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 1017 const SmallVectorImpl<unsigned> &Ops, 1018 MachineInstr *LoadMI) { 1019 bool WasCopy = MI->isCopy(); 1020 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 1021 // operands. 1022 SmallVector<unsigned, 8> FoldOps; 1023 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1024 unsigned Idx = Ops[i]; 1025 MachineOperand &MO = MI->getOperand(Idx); 1026 if (MO.isImplicit()) 1027 continue; 1028 // FIXME: Teach targets to deal with subregs. 1029 if (MO.getSubReg()) 1030 return false; 1031 // We cannot fold a load instruction into a def. 1032 if (LoadMI && MO.isDef()) 1033 return false; 1034 // Tied use operands should not be passed to foldMemoryOperand. 1035 if (!MI->isRegTiedToDefOperand(Idx)) 1036 FoldOps.push_back(Idx); 1037 } 1038 1039 MachineInstr *FoldMI = 1040 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 1041 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 1042 if (!FoldMI) 1043 return false; 1044 LIS.ReplaceMachineInstrInMaps(MI, FoldMI); 1045 if (!LoadMI) 1046 VRM.addSpillSlotUse(StackSlot, FoldMI); 1047 MI->eraseFromParent(); 1048 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 1049 if (!WasCopy) 1050 ++NumFolded; 1051 else if (Ops.front() == 0) 1052 ++NumSpills; 1053 else 1054 ++NumReloads; 1055 return true; 1056 } 1057 1058 /// insertReload - Insert a reload of NewLI.reg before MI. 1059 void InlineSpiller::insertReload(LiveInterval &NewLI, 1060 SlotIndex Idx, 1061 MachineBasicBlock::iterator MI) { 1062 MachineBasicBlock &MBB = *MI->getParent(); 1063 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot, 1064 MRI.getRegClass(NewLI.reg), &TRI); 1065 --MI; // Point to load instruction. 1066 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 1067 VRM.addSpillSlotUse(StackSlot, MI); 1068 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 1069 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 1070 LIS.getVNInfoAllocator()); 1071 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 1072 ++NumReloads; 1073 } 1074 1075 /// insertSpill - Insert a spill of NewLI.reg after MI. 1076 void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 1077 SlotIndex Idx, MachineBasicBlock::iterator MI) { 1078 MachineBasicBlock &MBB = *MI->getParent(); 1079 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot, 1080 MRI.getRegClass(NewLI.reg), &TRI); 1081 --MI; // Point to store instruction. 1082 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 1083 VRM.addSpillSlotUse(StackSlot, MI); 1084 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 1085 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator()); 1086 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 1087 ++NumSpills; 1088 } 1089 1090 /// spillAroundUses - insert spill code around each use of Reg. 1091 void InlineSpiller::spillAroundUses(unsigned Reg) { 1092 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); 1093 LiveInterval &OldLI = LIS.getInterval(Reg); 1094 1095 // Iterate over instructions using Reg. 1096 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 1097 MachineInstr *MI = RI.skipInstruction();) { 1098 1099 // Debug values are not allowed to affect codegen. 1100 if (MI->isDebugValue()) { 1101 // Modify DBG_VALUE now that the value is in a spill slot. 1102 uint64_t Offset = MI->getOperand(1).getImm(); 1103 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1104 DebugLoc DL = MI->getDebugLoc(); 1105 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot, 1106 Offset, MDPtr, DL)) { 1107 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1108 MachineBasicBlock *MBB = MI->getParent(); 1109 MBB->insert(MBB->erase(MI), NewDV); 1110 } else { 1111 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1112 MI->eraseFromParent(); 1113 } 1114 continue; 1115 } 1116 1117 // Ignore copies to/from snippets. We'll delete them. 1118 if (SnippetCopies.count(MI)) 1119 continue; 1120 1121 // Stack slot accesses may coalesce away. 1122 if (coalesceStackAccess(MI, Reg)) 1123 continue; 1124 1125 // Analyze instruction. 1126 bool Reads, Writes; 1127 SmallVector<unsigned, 8> Ops; 1128 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops); 1129 1130 // Find the slot index where this instruction reads and writes OldLI. 1131 // This is usually the def slot, except for tied early clobbers. 1132 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex(); 1133 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex())) 1134 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1135 Idx = VNI->def; 1136 1137 // Check for a sibling copy. 1138 unsigned SibReg = isFullCopyOf(MI, Reg); 1139 if (SibReg && isSibling(SibReg)) { 1140 // This may actually be a copy between snippets. 1141 if (isRegToSpill(SibReg)) { 1142 DEBUG(dbgs() << "Found new snippet copy: " << *MI); 1143 SnippetCopies.insert(MI); 1144 continue; 1145 } 1146 if (Writes) { 1147 // Hoist the spill of a sib-reg copy. 1148 if (hoistSpill(OldLI, MI)) { 1149 // This COPY is now dead, the value is already in the stack slot. 1150 MI->getOperand(0).setIsDead(); 1151 DeadDefs.push_back(MI); 1152 continue; 1153 } 1154 } else { 1155 // This is a reload for a sib-reg copy. Drop spills downstream. 1156 LiveInterval &SibLI = LIS.getInterval(SibReg); 1157 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1158 // The COPY will fold to a reload below. 1159 } 1160 } 1161 1162 // Attempt to fold memory ops. 1163 if (foldMemoryOperand(MI, Ops)) 1164 continue; 1165 1166 // Allocate interval around instruction. 1167 // FIXME: Infer regclass from instruction alone. 1168 LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM); 1169 NewLI.markNotSpillable(); 1170 1171 if (Reads) 1172 insertReload(NewLI, Idx, MI); 1173 1174 // Rewrite instruction operands. 1175 bool hasLiveDef = false; 1176 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1177 MachineOperand &MO = MI->getOperand(Ops[i]); 1178 MO.setReg(NewLI.reg); 1179 if (MO.isUse()) { 1180 if (!MI->isRegTiedToDefOperand(Ops[i])) 1181 MO.setIsKill(); 1182 } else { 1183 if (!MO.isDead()) 1184 hasLiveDef = true; 1185 } 1186 } 1187 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI); 1188 1189 // FIXME: Use a second vreg if instruction has no tied ops. 1190 if (Writes) { 1191 if (hasLiveDef) 1192 insertSpill(NewLI, OldLI, Idx, MI); 1193 else { 1194 // This instruction defines a dead value. We don't need to spill it, 1195 // but do create a live range for the dead value. 1196 VNInfo *VNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator()); 1197 NewLI.addRange(LiveRange(Idx, Idx.getNextSlot(), VNI)); 1198 } 1199 } 1200 1201 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 1202 } 1203 } 1204 1205 /// spillAll - Spill all registers remaining after rematerialization. 1206 void InlineSpiller::spillAll() { 1207 // Update LiveStacks now that we are committed to spilling. 1208 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1209 StackSlot = VRM.assignVirt2StackSlot(Original); 1210 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1211 StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator()); 1212 } else 1213 StackInt = &LSS.getInterval(StackSlot); 1214 1215 if (Original != Edit->getReg()) 1216 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1217 1218 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1219 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1220 StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]), 1221 StackInt->getValNumInfo(0)); 1222 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1223 1224 // Spill around uses of all RegsToSpill. 1225 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1226 spillAroundUses(RegsToSpill[i]); 1227 1228 // Hoisted spills may cause dead code. 1229 if (!DeadDefs.empty()) { 1230 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1231 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII); 1232 } 1233 1234 // Finally delete the SnippetCopies. 1235 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 1236 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]); 1237 MachineInstr *MI = RI.skipInstruction();) { 1238 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 1239 // FIXME: Do this with a LiveRangeEdit callback. 1240 VRM.RemoveMachineInstrFromMaps(MI); 1241 LIS.RemoveMachineInstrFromMaps(MI); 1242 MI->eraseFromParent(); 1243 } 1244 } 1245 1246 // Delete all spilled registers. 1247 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1248 Edit->eraseVirtReg(RegsToSpill[i], LIS); 1249 } 1250 1251 void InlineSpiller::spill(LiveRangeEdit &edit) { 1252 ++NumSpilledRanges; 1253 Edit = &edit; 1254 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 1255 && "Trying to spill a stack slot."); 1256 // Share a stack slot among all descendants of Original. 1257 Original = VRM.getOriginal(edit.getReg()); 1258 StackSlot = VRM.getStackSlot(Original); 1259 StackInt = 0; 1260 1261 DEBUG(dbgs() << "Inline spilling " 1262 << MRI.getRegClass(edit.getReg())->getName() 1263 << ':' << edit.getParent() << "\nFrom original " 1264 << LIS.getInterval(Original) << '\n'); 1265 assert(edit.getParent().isSpillable() && 1266 "Attempting to spill already spilled value."); 1267 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1268 1269 collectRegsToSpill(); 1270 analyzeSiblingValues(); 1271 reMaterializeAll(); 1272 1273 // Remat may handle everything. 1274 if (!RegsToSpill.empty()) 1275 spillAll(); 1276 1277 Edit->calculateRegClassAndHint(MF, LIS, Loops); 1278 } 1279