1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements a top-down list scheduler, using standard algorithms. 11 // The basic approach uses a priority queue of available nodes to schedule. 12 // One at a time, nodes are taken from the priority queue (thus in priority 13 // order), checked for legality to schedule, and emitted if legal. 14 // 15 // Nodes may not be legal to schedule either due to structural hazards (e.g. 16 // pipeline or resource constraints) or because an input to the instruction has 17 // not completed execution. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #define DEBUG_TYPE "post-RA-sched" 22 #include "llvm/CodeGen/Passes.h" 23 #include "AggressiveAntiDepBreaker.h" 24 #include "AntiDepBreaker.h" 25 #include "CriticalAntiDepBreaker.h" 26 #include "llvm/ADT/BitVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/CodeGen/LatencyPriorityQueue.h" 30 #include "llvm/CodeGen/MachineDominators.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineLoopInfo.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/RegisterClassInfo.h" 37 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 38 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 39 #include "llvm/CodeGen/SchedulerRegistry.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetLowering.h" 46 #include "llvm/Target/TargetMachine.h" 47 #include "llvm/Target/TargetRegisterInfo.h" 48 #include "llvm/Target/TargetSubtargetInfo.h" 49 using namespace llvm; 50 51 STATISTIC(NumNoops, "Number of noops inserted"); 52 STATISTIC(NumStalls, "Number of pipeline stalls"); 53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55 // Post-RA scheduling is enabled with 56 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57 // override the target. 58 static cl::opt<bool> 59 EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62 static cl::opt<std::string> 63 EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69 static cl::opt<int> 70 DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73 static cl::opt<int> 74 DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78 AntiDepBreaker::~AntiDepBreaker() { } 79 80 namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 const TargetInstrInfo *TII; 83 RegisterClassInfo RegClassInfo; 84 85 public: 86 static char ID; 87 PostRAScheduler() : MachineFunctionPass(ID) {} 88 89 void getAnalysisUsage(AnalysisUsage &AU) const { 90 AU.setPreservesCFG(); 91 AU.addRequired<AliasAnalysis>(); 92 AU.addRequired<TargetPassConfig>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn); 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// HazardRec - The hazard recognizer to use. 116 ScheduleHazardRecognizer *HazardRec; 117 118 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 119 AntiDepBreaker *AntiDepBreak; 120 121 /// AA - AliasAnalysis for making memory reference queries. 122 AliasAnalysis *AA; 123 124 /// LiveRegs - true if the register is live. 125 BitVector LiveRegs; 126 127 /// The schedule. Null SUnit*'s represent noop instructions. 128 std::vector<SUnit*> Sequence; 129 130 public: 131 SchedulePostRATDList( 132 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 133 AliasAnalysis *AA, const RegisterClassInfo&, 134 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 135 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); 136 137 ~SchedulePostRATDList(); 138 139 /// startBlock - Initialize register live-range state for scheduling in 140 /// this block. 141 /// 142 void startBlock(MachineBasicBlock *BB); 143 144 /// Initialize the scheduler state for the next scheduling region. 145 virtual void enterRegion(MachineBasicBlock *bb, 146 MachineBasicBlock::iterator begin, 147 MachineBasicBlock::iterator end, 148 unsigned endcount); 149 150 /// Notify that the scheduler has finished scheduling the current region. 151 virtual void exitRegion(); 152 153 /// Schedule - Schedule the instruction range using list scheduling. 154 /// 155 void schedule(); 156 157 void EmitSchedule(); 158 159 /// Observe - Update liveness information to account for the current 160 /// instruction, which will not be scheduled. 161 /// 162 void Observe(MachineInstr *MI, unsigned Count); 163 164 /// finishBlock - Clean up register live-range state. 165 /// 166 void finishBlock(); 167 168 /// FixupKills - Fix register kill flags that have been made 169 /// invalid due to scheduling 170 /// 171 void FixupKills(MachineBasicBlock *MBB); 172 173 private: 174 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 175 void ReleaseSuccessors(SUnit *SU); 176 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 177 void ListScheduleTopDown(); 178 void StartBlockForKills(MachineBasicBlock *BB); 179 180 // ToggleKillFlag - Toggle a register operand kill flag. Other 181 // adjustments may be made to the instruction if necessary. Return 182 // true if the operand has been deleted, false if not. 183 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 184 185 void dumpSchedule() const; 186 }; 187 } 188 189 char &llvm::PostRASchedulerID = PostRAScheduler::ID; 190 191 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 192 "Post RA top-down list latency scheduler", false, false) 193 194 SchedulePostRATDList::SchedulePostRATDList( 195 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 196 AliasAnalysis *AA, const RegisterClassInfo &RCI, 197 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 198 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) 199 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), 200 LiveRegs(TRI->getNumRegs()) 201 { 202 const TargetMachine &TM = MF.getTarget(); 203 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 204 HazardRec = 205 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 206 207 assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE || 208 MRI.tracksLiveness()) && 209 "Live-ins must be accurate for anti-dependency breaking"); 210 AntiDepBreak = 211 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 212 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 213 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 214 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 215 } 216 217 SchedulePostRATDList::~SchedulePostRATDList() { 218 delete HazardRec; 219 delete AntiDepBreak; 220 } 221 222 /// Initialize state associated with the next scheduling region. 223 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb, 224 MachineBasicBlock::iterator begin, 225 MachineBasicBlock::iterator end, 226 unsigned endcount) { 227 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 228 Sequence.clear(); 229 } 230 231 /// Print the schedule before exiting the region. 232 void SchedulePostRATDList::exitRegion() { 233 DEBUG({ 234 dbgs() << "*** Final schedule ***\n"; 235 dumpSchedule(); 236 dbgs() << '\n'; 237 }); 238 ScheduleDAGInstrs::exitRegion(); 239 } 240 241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 242 /// dumpSchedule - dump the scheduled Sequence. 243 void SchedulePostRATDList::dumpSchedule() const { 244 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 245 if (SUnit *SU = Sequence[i]) 246 SU->dump(this); 247 else 248 dbgs() << "**** NOOP ****\n"; 249 } 250 } 251 #endif 252 253 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 254 TII = Fn.getTarget().getInstrInfo(); 255 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 256 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 257 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 258 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 259 260 RegClassInfo.runOnMachineFunction(Fn); 261 262 // Check for explicit enable/disable of post-ra scheduling. 263 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 264 TargetSubtargetInfo::ANTIDEP_NONE; 265 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; 266 if (EnablePostRAScheduler.getPosition() > 0) { 267 if (!EnablePostRAScheduler) 268 return false; 269 } else { 270 // Check that post-RA scheduling is enabled for this target. 271 // This may upgrade the AntiDepMode. 272 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 273 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 274 CriticalPathRCs)) 275 return false; 276 } 277 278 // Check for antidep breaking override... 279 if (EnableAntiDepBreaking.getPosition() > 0) { 280 AntiDepMode = (EnableAntiDepBreaking == "all") 281 ? TargetSubtargetInfo::ANTIDEP_ALL 282 : ((EnableAntiDepBreaking == "critical") 283 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 284 : TargetSubtargetInfo::ANTIDEP_NONE); 285 } 286 287 DEBUG(dbgs() << "PostRAScheduler\n"); 288 289 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 290 CriticalPathRCs); 291 292 // Loop over all of the basic blocks 293 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 294 MBB != MBBe; ++MBB) { 295 #ifndef NDEBUG 296 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 297 if (DebugDiv > 0) { 298 static int bbcnt = 0; 299 if (bbcnt++ % DebugDiv != DebugMod) 300 continue; 301 dbgs() << "*** DEBUG scheduling " << Fn.getName() 302 << ":BB#" << MBB->getNumber() << " ***\n"; 303 } 304 #endif 305 306 // Initialize register live-range state for scheduling in this block. 307 Scheduler.startBlock(MBB); 308 309 // Schedule each sequence of instructions not interrupted by a label 310 // or anything else that effectively needs to shut down scheduling. 311 MachineBasicBlock::iterator Current = MBB->end(); 312 unsigned Count = MBB->size(), CurrentCount = Count; 313 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 314 MachineInstr *MI = llvm::prior(I); 315 // Calls are not scheduling boundaries before register allocation, but 316 // post-ra we don't gain anything by scheduling across calls since we 317 // don't need to worry about register pressure. 318 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) { 319 Scheduler.enterRegion(MBB, I, Current, CurrentCount); 320 Scheduler.schedule(); 321 Scheduler.exitRegion(); 322 Scheduler.EmitSchedule(); 323 Current = MI; 324 CurrentCount = Count - 1; 325 Scheduler.Observe(MI, CurrentCount); 326 } 327 I = MI; 328 --Count; 329 if (MI->isBundle()) 330 Count -= MI->getBundleSize(); 331 } 332 assert(Count == 0 && "Instruction count mismatch!"); 333 assert((MBB->begin() == Current || CurrentCount != 0) && 334 "Instruction count mismatch!"); 335 Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount); 336 Scheduler.schedule(); 337 Scheduler.exitRegion(); 338 Scheduler.EmitSchedule(); 339 340 // Clean up register live-range state. 341 Scheduler.finishBlock(); 342 343 // Update register kills 344 Scheduler.FixupKills(MBB); 345 } 346 347 return true; 348 } 349 350 /// StartBlock - Initialize register live-range state for scheduling in 351 /// this block. 352 /// 353 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) { 354 // Call the superclass. 355 ScheduleDAGInstrs::startBlock(BB); 356 357 // Reset the hazard recognizer and anti-dep breaker. 358 HazardRec->Reset(); 359 if (AntiDepBreak != NULL) 360 AntiDepBreak->StartBlock(BB); 361 } 362 363 /// Schedule - Schedule the instruction range using list scheduling. 364 /// 365 void SchedulePostRATDList::schedule() { 366 // Build the scheduling graph. 367 buildSchedGraph(AA); 368 369 if (AntiDepBreak != NULL) { 370 unsigned Broken = 371 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, 372 EndIndex, DbgValues); 373 374 if (Broken != 0) { 375 // We made changes. Update the dependency graph. 376 // Theoretically we could update the graph in place: 377 // When a live range is changed to use a different register, remove 378 // the def's anti-dependence *and* output-dependence edges due to 379 // that register, and add new anti-dependence and output-dependence 380 // edges based on the next live range of the register. 381 ScheduleDAG::clearDAG(); 382 buildSchedGraph(AA); 383 384 NumFixedAnti += Broken; 385 } 386 } 387 388 DEBUG(dbgs() << "********** List Scheduling **********\n"); 389 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 390 SUnits[su].dumpAll(this)); 391 392 AvailableQueue.initNodes(SUnits); 393 ListScheduleTopDown(); 394 AvailableQueue.releaseState(); 395 } 396 397 /// Observe - Update liveness information to account for the current 398 /// instruction, which will not be scheduled. 399 /// 400 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 401 if (AntiDepBreak != NULL) 402 AntiDepBreak->Observe(MI, Count, EndIndex); 403 } 404 405 /// FinishBlock - Clean up register live-range state. 406 /// 407 void SchedulePostRATDList::finishBlock() { 408 if (AntiDepBreak != NULL) 409 AntiDepBreak->FinishBlock(); 410 411 // Call the superclass. 412 ScheduleDAGInstrs::finishBlock(); 413 } 414 415 /// StartBlockForKills - Initialize register live-range state for updating kills 416 /// 417 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 418 // Start with no live registers. 419 LiveRegs.reset(); 420 421 // Examine the live-in regs of all successors. 422 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 423 SE = BB->succ_end(); SI != SE; ++SI) { 424 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 425 E = (*SI)->livein_end(); I != E; ++I) { 426 unsigned Reg = *I; 427 // Repeat, for reg and all subregs. 428 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 429 SubRegs.isValid(); ++SubRegs) 430 LiveRegs.set(*SubRegs); 431 } 432 } 433 } 434 435 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 436 MachineOperand &MO) { 437 // Setting kill flag... 438 if (!MO.isKill()) { 439 MO.setIsKill(true); 440 return false; 441 } 442 443 // If MO itself is live, clear the kill flag... 444 if (LiveRegs.test(MO.getReg())) { 445 MO.setIsKill(false); 446 return false; 447 } 448 449 // If any subreg of MO is live, then create an imp-def for that 450 // subreg and keep MO marked as killed. 451 MO.setIsKill(false); 452 bool AllDead = true; 453 const unsigned SuperReg = MO.getReg(); 454 MachineInstrBuilder MIB(MF, MI); 455 for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { 456 if (LiveRegs.test(*SubRegs)) { 457 MIB.addReg(*SubRegs, RegState::ImplicitDefine); 458 AllDead = false; 459 } 460 } 461 462 if(AllDead) 463 MO.setIsKill(true); 464 return false; 465 } 466 467 /// FixupKills - Fix the register kill flags, they may have been made 468 /// incorrect by instruction reordering. 469 /// 470 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 471 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 472 473 BitVector killedRegs(TRI->getNumRegs()); 474 475 StartBlockForKills(MBB); 476 477 // Examine block from end to start... 478 unsigned Count = MBB->size(); 479 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 480 I != E; --Count) { 481 MachineInstr *MI = --I; 482 if (MI->isDebugValue()) 483 continue; 484 485 // Update liveness. Registers that are defed but not used in this 486 // instruction are now dead. Mark register and all subregs as they 487 // are completely defined. 488 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 489 MachineOperand &MO = MI->getOperand(i); 490 if (MO.isRegMask()) 491 LiveRegs.clearBitsNotInMask(MO.getRegMask()); 492 if (!MO.isReg()) continue; 493 unsigned Reg = MO.getReg(); 494 if (Reg == 0) continue; 495 if (!MO.isDef()) continue; 496 // Ignore two-addr defs. 497 if (MI->isRegTiedToUseOperand(i)) continue; 498 499 // Repeat for reg and all subregs. 500 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 501 SubRegs.isValid(); ++SubRegs) 502 LiveRegs.reset(*SubRegs); 503 } 504 505 // Examine all used registers and set/clear kill flag. When a 506 // register is used multiple times we only set the kill flag on 507 // the first use. 508 killedRegs.reset(); 509 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 510 MachineOperand &MO = MI->getOperand(i); 511 if (!MO.isReg() || !MO.isUse()) continue; 512 unsigned Reg = MO.getReg(); 513 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 514 515 bool kill = false; 516 if (!killedRegs.test(Reg)) { 517 kill = true; 518 // A register is not killed if any subregs are live... 519 for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { 520 if (LiveRegs.test(*SubRegs)) { 521 kill = false; 522 break; 523 } 524 } 525 526 // If subreg is not live, then register is killed if it became 527 // live in this instruction 528 if (kill) 529 kill = !LiveRegs.test(Reg); 530 } 531 532 if (MO.isKill() != kill) { 533 DEBUG(dbgs() << "Fixing " << MO << " in "); 534 // Warning: ToggleKillFlag may invalidate MO. 535 ToggleKillFlag(MI, MO); 536 DEBUG(MI->dump()); 537 } 538 539 killedRegs.set(Reg); 540 } 541 542 // Mark any used register (that is not using undef) and subregs as 543 // now live... 544 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 545 MachineOperand &MO = MI->getOperand(i); 546 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 547 unsigned Reg = MO.getReg(); 548 if ((Reg == 0) || MRI.isReserved(Reg)) continue; 549 550 for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); 551 SubRegs.isValid(); ++SubRegs) 552 LiveRegs.set(*SubRegs); 553 } 554 } 555 } 556 557 //===----------------------------------------------------------------------===// 558 // Top-Down Scheduling 559 //===----------------------------------------------------------------------===// 560 561 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 562 /// the PendingQueue if the count reaches zero. 563 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 564 SUnit *SuccSU = SuccEdge->getSUnit(); 565 566 if (SuccEdge->isWeak()) { 567 --SuccSU->WeakPredsLeft; 568 return; 569 } 570 #ifndef NDEBUG 571 if (SuccSU->NumPredsLeft == 0) { 572 dbgs() << "*** Scheduling failed! ***\n"; 573 SuccSU->dump(this); 574 dbgs() << " has been released too many times!\n"; 575 llvm_unreachable(0); 576 } 577 #endif 578 --SuccSU->NumPredsLeft; 579 580 // Standard scheduler algorithms will recompute the depth of the successor 581 // here as such: 582 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 583 // 584 // However, we lazily compute node depth instead. Note that 585 // ScheduleNodeTopDown has already updated the depth of this node which causes 586 // all descendents to be marked dirty. Setting the successor depth explicitly 587 // here would cause depth to be recomputed for all its ancestors. If the 588 // successor is not yet ready (because of a transitively redundant edge) then 589 // this causes depth computation to be quadratic in the size of the DAG. 590 591 // If all the node's predecessors are scheduled, this node is ready 592 // to be scheduled. Ignore the special ExitSU node. 593 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 594 PendingQueue.push_back(SuccSU); 595 } 596 597 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 598 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 599 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 600 I != E; ++I) { 601 ReleaseSucc(SU, &*I); 602 } 603 } 604 605 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 606 /// count of its successors. If a successor pending count is zero, add it to 607 /// the Available queue. 608 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 609 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 610 DEBUG(SU->dump(this)); 611 612 Sequence.push_back(SU); 613 assert(CurCycle >= SU->getDepth() && 614 "Node scheduled above its depth!"); 615 SU->setDepthToAtLeast(CurCycle); 616 617 ReleaseSuccessors(SU); 618 SU->isScheduled = true; 619 AvailableQueue.scheduledNode(SU); 620 } 621 622 /// ListScheduleTopDown - The main loop of list scheduling for top-down 623 /// schedulers. 624 void SchedulePostRATDList::ListScheduleTopDown() { 625 unsigned CurCycle = 0; 626 627 // We're scheduling top-down but we're visiting the regions in 628 // bottom-up order, so we don't know the hazards at the start of a 629 // region. So assume no hazards (this should usually be ok as most 630 // blocks are a single region). 631 HazardRec->Reset(); 632 633 // Release any successors of the special Entry node. 634 ReleaseSuccessors(&EntrySU); 635 636 // Add all leaves to Available queue. 637 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 638 // It is available if it has no predecessors. 639 if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) { 640 AvailableQueue.push(&SUnits[i]); 641 SUnits[i].isAvailable = true; 642 } 643 } 644 645 // In any cycle where we can't schedule any instructions, we must 646 // stall or emit a noop, depending on the target. 647 bool CycleHasInsts = false; 648 649 // While Available queue is not empty, grab the node with the highest 650 // priority. If it is not ready put it back. Schedule the node. 651 std::vector<SUnit*> NotReady; 652 Sequence.reserve(SUnits.size()); 653 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 654 // Check to see if any of the pending instructions are ready to issue. If 655 // so, add them to the available queue. 656 unsigned MinDepth = ~0u; 657 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 658 if (PendingQueue[i]->getDepth() <= CurCycle) { 659 AvailableQueue.push(PendingQueue[i]); 660 PendingQueue[i]->isAvailable = true; 661 PendingQueue[i] = PendingQueue.back(); 662 PendingQueue.pop_back(); 663 --i; --e; 664 } else if (PendingQueue[i]->getDepth() < MinDepth) 665 MinDepth = PendingQueue[i]->getDepth(); 666 } 667 668 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 669 670 SUnit *FoundSUnit = 0; 671 bool HasNoopHazards = false; 672 while (!AvailableQueue.empty()) { 673 SUnit *CurSUnit = AvailableQueue.pop(); 674 675 ScheduleHazardRecognizer::HazardType HT = 676 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 677 if (HT == ScheduleHazardRecognizer::NoHazard) { 678 FoundSUnit = CurSUnit; 679 break; 680 } 681 682 // Remember if this is a noop hazard. 683 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 684 685 NotReady.push_back(CurSUnit); 686 } 687 688 // Add the nodes that aren't ready back onto the available list. 689 if (!NotReady.empty()) { 690 AvailableQueue.push_all(NotReady); 691 NotReady.clear(); 692 } 693 694 // If we found a node to schedule... 695 if (FoundSUnit) { 696 // ... schedule the node... 697 ScheduleNodeTopDown(FoundSUnit, CurCycle); 698 HazardRec->EmitInstruction(FoundSUnit); 699 CycleHasInsts = true; 700 if (HazardRec->atIssueLimit()) { 701 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 702 HazardRec->AdvanceCycle(); 703 ++CurCycle; 704 CycleHasInsts = false; 705 } 706 } else { 707 if (CycleHasInsts) { 708 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 709 HazardRec->AdvanceCycle(); 710 } else if (!HasNoopHazards) { 711 // Otherwise, we have a pipeline stall, but no other problem, 712 // just advance the current cycle and try again. 713 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 714 HazardRec->AdvanceCycle(); 715 ++NumStalls; 716 } else { 717 // Otherwise, we have no instructions to issue and we have instructions 718 // that will fault if we don't do this right. This is the case for 719 // processors without pipeline interlocks and other cases. 720 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 721 HazardRec->EmitNoop(); 722 Sequence.push_back(0); // NULL here means noop 723 ++NumNoops; 724 } 725 726 ++CurCycle; 727 CycleHasInsts = false; 728 } 729 } 730 731 #ifndef NDEBUG 732 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false); 733 unsigned Noops = 0; 734 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 735 if (!Sequence[i]) 736 ++Noops; 737 assert(Sequence.size() - Noops == ScheduledNodes && 738 "The number of nodes scheduled doesn't match the expected number!"); 739 #endif // NDEBUG 740 } 741 742 // EmitSchedule - Emit the machine code in scheduled order. 743 void SchedulePostRATDList::EmitSchedule() { 744 RegionBegin = RegionEnd; 745 746 // If first instruction was a DBG_VALUE then put it back. 747 if (FirstDbgValue) 748 BB->splice(RegionEnd, BB, FirstDbgValue); 749 750 // Then re-insert them according to the given schedule. 751 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 752 if (SUnit *SU = Sequence[i]) 753 BB->splice(RegionEnd, BB, SU->getInstr()); 754 else 755 // Null SUnit* is a noop. 756 TII->insertNoop(*BB, RegionEnd); 757 758 // Update the Begin iterator, as the first instruction in the block 759 // may have been scheduled later. 760 if (i == 0) 761 RegionBegin = prior(RegionEnd); 762 } 763 764 // Reinsert any remaining debug_values. 765 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 766 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 767 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 768 MachineInstr *DbgValue = P.first; 769 MachineBasicBlock::iterator OrigPrivMI = P.second; 770 BB->splice(++OrigPrivMI, BB, DbgValue); 771 } 772 DbgValues.clear(); 773 FirstDbgValue = NULL; 774 } 775