1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 18 #include "llvm/CodeGen/MachineScheduler.h" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/CodeGen/RegisterClassInfo.h" 21 #include "llvm/CodeGen/RegisterPressure.h" 22 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24 #include "llvm/Target/TargetInstrInfo.h" 25 #include "llvm/MC/MCInstrItineraries.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/ADT/OwningPtr.h" 32 #include "llvm/ADT/PriorityQueue.h" 33 34 #include <queue> 35 36 using namespace llvm; 37 38 static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 43 #ifndef NDEBUG 44 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 45 cl::desc("Pop up a window to show MISched dags after they are processed")); 46 47 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 48 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 49 #else 50 static bool ViewMISchedDAGs = false; 51 #endif // NDEBUG 52 53 //===----------------------------------------------------------------------===// 54 // Machine Instruction Scheduling Pass and Registry 55 //===----------------------------------------------------------------------===// 56 57 MachineSchedContext::MachineSchedContext(): 58 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 59 RegClassInfo = new RegisterClassInfo(); 60 } 61 62 MachineSchedContext::~MachineSchedContext() { 63 delete RegClassInfo; 64 } 65 66 namespace { 67 /// MachineScheduler runs after coalescing and before register allocation. 68 class MachineScheduler : public MachineSchedContext, 69 public MachineFunctionPass { 70 public: 71 MachineScheduler(); 72 73 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 74 75 virtual void releaseMemory() {} 76 77 virtual bool runOnMachineFunction(MachineFunction&); 78 79 virtual void print(raw_ostream &O, const Module* = 0) const; 80 81 static char ID; // Class identification, replacement for typeinfo 82 }; 83 } // namespace 84 85 char MachineScheduler::ID = 0; 86 87 char &llvm::MachineSchedulerID = MachineScheduler::ID; 88 89 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 90 "Machine Instruction Scheduler", false, false) 91 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 92 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 93 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 94 INITIALIZE_PASS_END(MachineScheduler, "misched", 95 "Machine Instruction Scheduler", false, false) 96 97 MachineScheduler::MachineScheduler() 98 : MachineFunctionPass(ID) { 99 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 100 } 101 102 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 103 AU.setPreservesCFG(); 104 AU.addRequiredID(MachineDominatorsID); 105 AU.addRequired<MachineLoopInfo>(); 106 AU.addRequired<AliasAnalysis>(); 107 AU.addRequired<TargetPassConfig>(); 108 AU.addRequired<SlotIndexes>(); 109 AU.addPreserved<SlotIndexes>(); 110 AU.addRequired<LiveIntervals>(); 111 AU.addPreserved<LiveIntervals>(); 112 MachineFunctionPass::getAnalysisUsage(AU); 113 } 114 115 MachinePassRegistry MachineSchedRegistry::Registry; 116 117 /// A dummy default scheduler factory indicates whether the scheduler 118 /// is overridden on the command line. 119 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 120 return 0; 121 } 122 123 /// MachineSchedOpt allows command line selection of the scheduler. 124 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 125 RegisterPassParser<MachineSchedRegistry> > 126 MachineSchedOpt("misched", 127 cl::init(&useDefaultMachineSched), cl::Hidden, 128 cl::desc("Machine instruction scheduler to use")); 129 130 static MachineSchedRegistry 131 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 132 useDefaultMachineSched); 133 134 /// Forward declare the standard machine scheduler. This will be used as the 135 /// default scheduler if the target does not set a default. 136 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 137 138 139 /// Decrement this iterator until reaching the top or a non-debug instr. 140 static MachineBasicBlock::iterator 141 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 142 assert(I != Beg && "reached the top of the region, cannot decrement"); 143 while (--I != Beg) { 144 if (!I->isDebugValue()) 145 break; 146 } 147 return I; 148 } 149 150 /// If this iterator is a debug value, increment until reaching the End or a 151 /// non-debug instruction. 152 static MachineBasicBlock::iterator 153 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 154 for(; I != End; ++I) { 155 if (!I->isDebugValue()) 156 break; 157 } 158 return I; 159 } 160 161 /// Top-level MachineScheduler pass driver. 162 /// 163 /// Visit blocks in function order. Divide each block into scheduling regions 164 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 165 /// consistent with the DAG builder, which traverses the interior of the 166 /// scheduling regions bottom-up. 167 /// 168 /// This design avoids exposing scheduling boundaries to the DAG builder, 169 /// simplifying the DAG builder's support for "special" target instructions. 170 /// At the same time the design allows target schedulers to operate across 171 /// scheduling boundaries, for example to bundle the boudary instructions 172 /// without reordering them. This creates complexity, because the target 173 /// scheduler must update the RegionBegin and RegionEnd positions cached by 174 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 175 /// design would be to split blocks at scheduling boundaries, but LLVM has a 176 /// general bias against block splitting purely for implementation simplicity. 177 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 178 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 179 180 // Initialize the context of the pass. 181 MF = &mf; 182 MLI = &getAnalysis<MachineLoopInfo>(); 183 MDT = &getAnalysis<MachineDominatorTree>(); 184 PassConfig = &getAnalysis<TargetPassConfig>(); 185 AA = &getAnalysis<AliasAnalysis>(); 186 187 LIS = &getAnalysis<LiveIntervals>(); 188 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 189 190 RegClassInfo->runOnMachineFunction(*MF); 191 192 // Select the scheduler, or set the default. 193 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 194 if (Ctor == useDefaultMachineSched) { 195 // Get the default scheduler set by the target. 196 Ctor = MachineSchedRegistry::getDefault(); 197 if (!Ctor) { 198 Ctor = createConvergingSched; 199 MachineSchedRegistry::setDefault(Ctor); 200 } 201 } 202 // Instantiate the selected scheduler. 203 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 204 205 // Visit all machine basic blocks. 206 // 207 // TODO: Visit blocks in global postorder or postorder within the bottom-up 208 // loop tree. Then we can optionally compute global RegPressure. 209 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 210 MBB != MBBEnd; ++MBB) { 211 212 Scheduler->startBlock(MBB); 213 214 // Break the block into scheduling regions [I, RegionEnd), and schedule each 215 // region as soon as it is discovered. RegionEnd points the scheduling 216 // boundary at the bottom of the region. The DAG does not include RegionEnd, 217 // but the region does (i.e. the next RegionEnd is above the previous 218 // RegionBegin). If the current block has no terminator then RegionEnd == 219 // MBB->end() for the bottom region. 220 // 221 // The Scheduler may insert instructions during either schedule() or 222 // exitRegion(), even for empty regions. So the local iterators 'I' and 223 // 'RegionEnd' are invalid across these calls. 224 unsigned RemainingCount = MBB->size(); 225 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 226 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 227 228 // Avoid decrementing RegionEnd for blocks with no terminator. 229 if (RegionEnd != MBB->end() 230 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 231 --RegionEnd; 232 // Count the boundary instruction. 233 --RemainingCount; 234 } 235 236 // The next region starts above the previous region. Look backward in the 237 // instruction stream until we find the nearest boundary. 238 MachineBasicBlock::iterator I = RegionEnd; 239 for(;I != MBB->begin(); --I, --RemainingCount) { 240 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 241 break; 242 } 243 // Notify the scheduler of the region, even if we may skip scheduling 244 // it. Perhaps it still needs to be bundled. 245 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); 246 247 // Skip empty scheduling regions (0 or 1 schedulable instructions). 248 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 249 // Close the current region. Bundle the terminator if needed. 250 // This invalidates 'RegionEnd' and 'I'. 251 Scheduler->exitRegion(); 252 continue; 253 } 254 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 255 DEBUG(dbgs() << MF->getName() 256 << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; 257 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 258 else dbgs() << "End"; 259 dbgs() << " Remaining: " << RemainingCount << "\n"); 260 261 // Schedule a region: possibly reorder instructions. 262 // This invalidates 'RegionEnd' and 'I'. 263 Scheduler->schedule(); 264 265 // Close the current region. 266 Scheduler->exitRegion(); 267 268 // Scheduling has invalidated the current iterator 'I'. Ask the 269 // scheduler for the top of it's scheduled region. 270 RegionEnd = Scheduler->begin(); 271 } 272 assert(RemainingCount == 0 && "Instruction count mismatch!"); 273 Scheduler->finishBlock(); 274 } 275 Scheduler->finalizeSchedule(); 276 DEBUG(LIS->print(dbgs())); 277 return true; 278 } 279 280 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 281 // unimplemented 282 } 283 284 //===----------------------------------------------------------------------===// 285 // MachineSchedStrategy - Interface to a machine scheduling algorithm. 286 //===----------------------------------------------------------------------===// 287 288 namespace { 289 class ScheduleDAGMI; 290 291 /// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected 292 /// scheduling algorithm. 293 /// 294 /// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it 295 /// in ScheduleDAGInstrs.h 296 class MachineSchedStrategy { 297 public: 298 virtual ~MachineSchedStrategy() {} 299 300 /// Initialize the strategy after building the DAG for a new region. 301 virtual void initialize(ScheduleDAGMI *DAG) = 0; 302 303 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to 304 /// schedule the node at the top of the unscheduled region. Otherwise it will 305 /// be scheduled at the bottom. 306 virtual SUnit *pickNode(bool &IsTopNode) = 0; 307 308 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node. 309 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; 310 311 /// When all predecessor dependencies have been resolved, free this node for 312 /// top-down scheduling. 313 virtual void releaseTopNode(SUnit *SU) = 0; 314 /// When all successor dependencies have been resolved, free this node for 315 /// bottom-up scheduling. 316 virtual void releaseBottomNode(SUnit *SU) = 0; 317 }; 318 } // namespace 319 320 //===----------------------------------------------------------------------===// 321 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 322 // preservation. 323 //===----------------------------------------------------------------------===// 324 325 namespace { 326 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules 327 /// machine instructions while updating LiveIntervals. 328 class ScheduleDAGMI : public ScheduleDAGInstrs { 329 AliasAnalysis *AA; 330 RegisterClassInfo *RegClassInfo; 331 MachineSchedStrategy *SchedImpl; 332 333 MachineBasicBlock::iterator LiveRegionEnd; 334 335 /// Register pressure in this region computed by buildSchedGraph. 336 IntervalPressure RegPressure; 337 RegPressureTracker RPTracker; 338 339 /// List of pressure sets that exceed the target's pressure limit before 340 /// scheduling, listed in increasing set ID order. Each pressure set is paired 341 /// with its max pressure in the currently scheduled regions. 342 std::vector<PressureElement> RegionCriticalPSets; 343 344 /// The top of the unscheduled zone. 345 MachineBasicBlock::iterator CurrentTop; 346 IntervalPressure TopPressure; 347 RegPressureTracker TopRPTracker; 348 349 /// The bottom of the unscheduled zone. 350 MachineBasicBlock::iterator CurrentBottom; 351 IntervalPressure BotPressure; 352 RegPressureTracker BotRPTracker; 353 354 #ifndef NDEBUG 355 /// The number of instructions scheduled so far. Used to cut off the 356 /// scheduler at the point determined by misched-cutoff. 357 unsigned NumInstrsScheduled; 358 #endif 359 public: 360 ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): 361 ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), 362 AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), 363 RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), 364 CurrentBottom(), BotRPTracker(BotPressure) { 365 #ifndef NDEBUG 366 NumInstrsScheduled = 0; 367 #endif 368 } 369 370 ~ScheduleDAGMI() { 371 delete SchedImpl; 372 } 373 374 MachineBasicBlock::iterator top() const { return CurrentTop; } 375 MachineBasicBlock::iterator bottom() const { return CurrentBottom; } 376 377 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling 378 /// region. This covers all instructions in a block, while schedule() may only 379 /// cover a subset. 380 void enterRegion(MachineBasicBlock *bb, 381 MachineBasicBlock::iterator begin, 382 MachineBasicBlock::iterator end, 383 unsigned endcount); 384 385 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of 386 /// reorderable instructions. 387 void schedule(); 388 389 /// Get current register pressure for the top scheduled instructions. 390 const IntervalPressure &getTopPressure() const { return TopPressure; } 391 const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } 392 393 /// Get current register pressure for the bottom scheduled instructions. 394 const IntervalPressure &getBotPressure() const { return BotPressure; } 395 const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } 396 397 /// Get register pressure for the entire scheduling region before scheduling. 398 const IntervalPressure &getRegPressure() const { return RegPressure; } 399 400 const std::vector<PressureElement> &getRegionCriticalPSets() const { 401 return RegionCriticalPSets; 402 } 403 404 /// getIssueWidth - Return the max instructions per scheduling group. 405 unsigned getIssueWidth() const { 406 return (InstrItins && InstrItins->SchedModel) 407 ? InstrItins->SchedModel->IssueWidth : 1; 408 } 409 410 /// getNumMicroOps - Return the number of issue slots required for this MI. 411 unsigned getNumMicroOps(MachineInstr *MI) const { 412 if (!InstrItins) return 1; 413 int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass()); 414 return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI); 415 } 416 417 protected: 418 void initRegPressure(); 419 void updateScheduledPressure(std::vector<unsigned> NewMaxPressure); 420 421 void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); 422 bool checkSchedLimit(); 423 424 void releaseRoots(); 425 426 void releaseSucc(SUnit *SU, SDep *SuccEdge); 427 void releaseSuccessors(SUnit *SU); 428 void releasePred(SUnit *SU, SDep *PredEdge); 429 void releasePredecessors(SUnit *SU); 430 431 void placeDebugValues(); 432 }; 433 } // namespace 434 435 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 436 /// NumPredsLeft reaches zero, release the successor node. 437 /// 438 /// FIXME: Adjust SuccSU height based on MinLatency. 439 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 440 SUnit *SuccSU = SuccEdge->getSUnit(); 441 442 #ifndef NDEBUG 443 if (SuccSU->NumPredsLeft == 0) { 444 dbgs() << "*** Scheduling failed! ***\n"; 445 SuccSU->dump(this); 446 dbgs() << " has been released too many times!\n"; 447 llvm_unreachable(0); 448 } 449 #endif 450 --SuccSU->NumPredsLeft; 451 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 452 SchedImpl->releaseTopNode(SuccSU); 453 } 454 455 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 456 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 457 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 458 I != E; ++I) { 459 releaseSucc(SU, &*I); 460 } 461 } 462 463 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 464 /// NumSuccsLeft reaches zero, release the predecessor node. 465 /// 466 /// FIXME: Adjust PredSU height based on MinLatency. 467 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 468 SUnit *PredSU = PredEdge->getSUnit(); 469 470 #ifndef NDEBUG 471 if (PredSU->NumSuccsLeft == 0) { 472 dbgs() << "*** Scheduling failed! ***\n"; 473 PredSU->dump(this); 474 dbgs() << " has been released too many times!\n"; 475 llvm_unreachable(0); 476 } 477 #endif 478 --PredSU->NumSuccsLeft; 479 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 480 SchedImpl->releaseBottomNode(PredSU); 481 } 482 483 /// releasePredecessors - Call releasePred on each of SU's predecessors. 484 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 485 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 486 I != E; ++I) { 487 releasePred(SU, &*I); 488 } 489 } 490 491 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 492 MachineBasicBlock::iterator InsertPos) { 493 // Advance RegionBegin if the first instruction moves down. 494 if (&*RegionBegin == MI) 495 ++RegionBegin; 496 497 // Update the instruction stream. 498 BB->splice(InsertPos, BB, MI); 499 500 // Update LiveIntervals 501 LIS->handleMove(MI); 502 503 // Recede RegionBegin if an instruction moves above the first. 504 if (RegionBegin == InsertPos) 505 RegionBegin = MI; 506 } 507 508 bool ScheduleDAGMI::checkSchedLimit() { 509 #ifndef NDEBUG 510 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 511 CurrentTop = CurrentBottom; 512 return false; 513 } 514 ++NumInstrsScheduled; 515 #endif 516 return true; 517 } 518 519 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 520 /// crossing a scheduling boundary. [begin, end) includes all instructions in 521 /// the region, including the boundary itself and single-instruction regions 522 /// that don't get scheduled. 523 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 524 MachineBasicBlock::iterator begin, 525 MachineBasicBlock::iterator end, 526 unsigned endcount) 527 { 528 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 529 530 // For convenience remember the end of the liveness region. 531 LiveRegionEnd = 532 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 533 } 534 535 // Setup the register pressure trackers for the top scheduled top and bottom 536 // scheduled regions. 537 void ScheduleDAGMI::initRegPressure() { 538 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 539 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 540 541 // Close the RPTracker to finalize live ins. 542 RPTracker.closeRegion(); 543 544 DEBUG(RPTracker.getPressure().dump(TRI)); 545 546 // Initialize the live ins and live outs. 547 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 548 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 549 550 // Close one end of the tracker so we can call 551 // getMaxUpward/DownwardPressureDelta before advancing across any 552 // instructions. This converts currently live regs into live ins/outs. 553 TopRPTracker.closeTop(); 554 BotRPTracker.closeBottom(); 555 556 // Account for liveness generated by the region boundary. 557 if (LiveRegionEnd != RegionEnd) 558 BotRPTracker.recede(); 559 560 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 561 562 // Cache the list of excess pressure sets in this region. This will also track 563 // the max pressure in the scheduled code for these sets. 564 RegionCriticalPSets.clear(); 565 std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure; 566 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 567 unsigned Limit = TRI->getRegPressureSetLimit(i); 568 if (RegionPressure[i] > Limit) 569 RegionCriticalPSets.push_back(PressureElement(i, 0)); 570 } 571 DEBUG(dbgs() << "Excess PSets: "; 572 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 573 dbgs() << TRI->getRegPressureSetName( 574 RegionCriticalPSets[i].PSetID) << " "; 575 dbgs() << "\n"); 576 } 577 578 // FIXME: When the pressure tracker deals in pressure differences then we won't 579 // iterate over all RegionCriticalPSets[i]. 580 void ScheduleDAGMI:: 581 updateScheduledPressure(std::vector<unsigned> NewMaxPressure) { 582 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 583 unsigned ID = RegionCriticalPSets[i].PSetID; 584 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 585 if ((int)NewMaxPressure[ID] > MaxUnits) 586 MaxUnits = NewMaxPressure[ID]; 587 } 588 } 589 590 // Release all DAG roots for scheduling. 591 void ScheduleDAGMI::releaseRoots() { 592 SmallVector<SUnit*, 16> BotRoots; 593 594 for (std::vector<SUnit>::iterator 595 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 596 // A SUnit is ready to top schedule if it has no predecessors. 597 if (I->Preds.empty()) 598 SchedImpl->releaseTopNode(&(*I)); 599 // A SUnit is ready to bottom schedule if it has no successors. 600 if (I->Succs.empty()) 601 BotRoots.push_back(&(*I)); 602 } 603 // Release bottom roots in reverse order so the higher priority nodes appear 604 // first. This is more natural and slightly more efficient. 605 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 606 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) 607 SchedImpl->releaseBottomNode(*I); 608 } 609 610 /// schedule - Called back from MachineScheduler::runOnMachineFunction 611 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 612 /// only includes instructions that have DAG nodes, not scheduling boundaries. 613 void ScheduleDAGMI::schedule() { 614 // Initialize the register pressure tracker used by buildSchedGraph. 615 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 616 617 // Account for liveness generate by the region boundary. 618 if (LiveRegionEnd != RegionEnd) 619 RPTracker.recede(); 620 621 // Build the DAG, and compute current register pressure. 622 buildSchedGraph(AA, &RPTracker); 623 624 // Initialize top/bottom trackers after computing region pressure. 625 initRegPressure(); 626 627 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 628 SUnits[su].dumpAll(this)); 629 630 if (ViewMISchedDAGs) viewGraph(); 631 632 SchedImpl->initialize(this); 633 634 // Release edges from the special Entry node or to the special Exit node. 635 releaseSuccessors(&EntrySU); 636 releasePredecessors(&ExitSU); 637 638 // Release all DAG roots for scheduling. 639 releaseRoots(); 640 641 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 642 CurrentBottom = RegionEnd; 643 bool IsTopNode = false; 644 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 645 if (!checkSchedLimit()) 646 break; 647 648 // Move the instruction to its new location in the instruction stream. 649 MachineInstr *MI = SU->getInstr(); 650 651 if (IsTopNode) { 652 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 653 if (&*CurrentTop == MI) 654 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 655 else { 656 moveInstruction(MI, CurrentTop); 657 TopRPTracker.setPos(MI); 658 } 659 660 // Update top scheduled pressure. 661 TopRPTracker.advance(); 662 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 663 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 664 665 // Release dependent instructions for scheduling. 666 releaseSuccessors(SU); 667 } 668 else { 669 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 670 MachineBasicBlock::iterator priorII = 671 priorNonDebug(CurrentBottom, CurrentTop); 672 if (&*priorII == MI) 673 CurrentBottom = priorII; 674 else { 675 if (&*CurrentTop == MI) { 676 CurrentTop = nextIfDebug(++CurrentTop, priorII); 677 TopRPTracker.setPos(CurrentTop); 678 } 679 moveInstruction(MI, CurrentBottom); 680 CurrentBottom = MI; 681 } 682 // Update bottom scheduled pressure. 683 BotRPTracker.recede(); 684 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 685 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 686 687 // Release dependent instructions for scheduling. 688 releasePredecessors(SU); 689 } 690 SU->isScheduled = true; 691 SchedImpl->schedNode(SU, IsTopNode); 692 } 693 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 694 695 placeDebugValues(); 696 } 697 698 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 699 void ScheduleDAGMI::placeDebugValues() { 700 // If first instruction was a DBG_VALUE then put it back. 701 if (FirstDbgValue) { 702 BB->splice(RegionBegin, BB, FirstDbgValue); 703 RegionBegin = FirstDbgValue; 704 } 705 706 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 707 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 708 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 709 MachineInstr *DbgValue = P.first; 710 MachineBasicBlock::iterator OrigPrevMI = P.second; 711 BB->splice(++OrigPrevMI, BB, DbgValue); 712 if (OrigPrevMI == llvm::prior(RegionEnd)) 713 RegionEnd = DbgValue; 714 } 715 DbgValues.clear(); 716 FirstDbgValue = NULL; 717 } 718 719 //===----------------------------------------------------------------------===// 720 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 721 //===----------------------------------------------------------------------===// 722 723 namespace { 724 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience 725 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified 726 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. 727 class ReadyQueue { 728 unsigned ID; 729 std::string Name; 730 std::vector<SUnit*> Queue; 731 732 public: 733 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} 734 735 unsigned getID() const { return ID; } 736 737 StringRef getName() const { return Name; } 738 739 // SU is in this queue if it's NodeQueueID is a superset of this ID. 740 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } 741 742 bool empty() const { return Queue.empty(); } 743 744 unsigned size() const { return Queue.size(); } 745 746 typedef std::vector<SUnit*>::iterator iterator; 747 748 iterator begin() { return Queue.begin(); } 749 750 iterator end() { return Queue.end(); } 751 752 iterator find(SUnit *SU) { 753 return std::find(Queue.begin(), Queue.end(), SU); 754 } 755 756 void push(SUnit *SU) { 757 Queue.push_back(SU); 758 SU->NodeQueueId |= ID; 759 } 760 761 void remove(iterator I) { 762 (*I)->NodeQueueId &= ~ID; 763 *I = Queue.back(); 764 Queue.pop_back(); 765 } 766 767 #ifndef NDEBUG 768 void dump() { 769 dbgs() << Name << ": "; 770 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 771 dbgs() << Queue[i]->NodeNum << " "; 772 dbgs() << "\n"; 773 } 774 #endif 775 }; 776 777 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 778 /// the schedule. 779 class ConvergingScheduler : public MachineSchedStrategy { 780 781 /// Store the state used by ConvergingScheduler heuristics, required for the 782 /// lifetime of one invocation of pickNode(). 783 struct SchedCandidate { 784 // The best SUnit candidate. 785 SUnit *SU; 786 787 // Register pressure values for the best candidate. 788 RegPressureDelta RPDelta; 789 790 SchedCandidate(): SU(NULL) {} 791 }; 792 /// Represent the type of SchedCandidate found within a single queue. 793 enum CandResult { 794 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; 795 796 /// Each Scheduling boundary is associated with ready queues. It tracks the 797 /// current cycle in whichever direction at has moved, and maintains the state 798 /// of "hazards" and other interlocks at the current cycle. 799 struct SchedBoundary { 800 ScheduleDAGMI *DAG; 801 802 ReadyQueue Available; 803 ReadyQueue Pending; 804 bool CheckPending; 805 806 ScheduleHazardRecognizer *HazardRec; 807 808 unsigned CurrCycle; 809 unsigned IssueCount; 810 811 /// MinReadyCycle - Cycle of the soonest available instruction. 812 unsigned MinReadyCycle; 813 814 // Remember the greatest min operand latency. 815 unsigned MaxMinLatency; 816 817 /// Pending queues extend the ready queues with the same ID and the 818 /// PendingFlag set. 819 SchedBoundary(unsigned ID, const Twine &Name): 820 DAG(0), Available(ID, Name+".A"), 821 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 822 CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0), 823 MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} 824 825 ~SchedBoundary() { delete HazardRec; } 826 827 bool isTop() const { 828 return Available.getID() == ConvergingScheduler::TopQID; 829 } 830 831 bool checkHazard(SUnit *SU); 832 833 void releaseNode(SUnit *SU, unsigned ReadyCycle); 834 835 void bumpCycle(); 836 837 void bumpNode(SUnit *SU); 838 839 void releasePending(); 840 841 void removeReady(SUnit *SU); 842 843 SUnit *pickOnlyChoice(); 844 }; 845 846 ScheduleDAGMI *DAG; 847 const TargetRegisterInfo *TRI; 848 849 // State of the top and bottom scheduled instruction boundaries. 850 SchedBoundary Top; 851 SchedBoundary Bot; 852 853 public: 854 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 855 enum { 856 TopQID = 1, 857 BotQID = 2, 858 LogMaxQID = 2 859 }; 860 861 ConvergingScheduler(): 862 DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 863 864 virtual void initialize(ScheduleDAGMI *dag); 865 866 virtual SUnit *pickNode(bool &IsTopNode); 867 868 virtual void schedNode(SUnit *SU, bool IsTopNode); 869 870 virtual void releaseTopNode(SUnit *SU); 871 872 virtual void releaseBottomNode(SUnit *SU); 873 874 protected: 875 SUnit *pickNodeBidrectional(bool &IsTopNode); 876 877 CandResult pickNodeFromQueue(ReadyQueue &Q, 878 const RegPressureTracker &RPTracker, 879 SchedCandidate &Candidate); 880 #ifndef NDEBUG 881 void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, 882 PressureElement P = PressureElement()); 883 #endif 884 }; 885 } // namespace 886 887 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 888 DAG = dag; 889 TRI = DAG->TRI; 890 Top.DAG = dag; 891 Bot.DAG = dag; 892 893 // Initialize the HazardRecognizers. 894 const TargetMachine &TM = DAG->MF.getTarget(); 895 const InstrItineraryData *Itin = TM.getInstrItineraryData(); 896 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 897 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 898 899 assert((!ForceTopDown || !ForceBottomUp) && 900 "-misched-topdown incompatible with -misched-bottomup"); 901 } 902 903 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 904 if (SU->isScheduled) 905 return; 906 907 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 908 I != E; ++I) { 909 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 910 unsigned MinLatency = I->getMinLatency(); 911 #ifndef NDEBUG 912 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); 913 #endif 914 if (SU->TopReadyCycle < PredReadyCycle + MinLatency) 915 SU->TopReadyCycle = PredReadyCycle + MinLatency; 916 } 917 Top.releaseNode(SU, SU->TopReadyCycle); 918 } 919 920 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 921 if (SU->isScheduled) 922 return; 923 924 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 925 926 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 927 I != E; ++I) { 928 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 929 unsigned MinLatency = I->getMinLatency(); 930 #ifndef NDEBUG 931 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); 932 #endif 933 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) 934 SU->BotReadyCycle = SuccReadyCycle + MinLatency; 935 } 936 Bot.releaseNode(SU, SU->BotReadyCycle); 937 } 938 939 /// Does this SU have a hazard within the current instruction group. 940 /// 941 /// The scheduler supports two modes of hazard recognition. The first is the 942 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 943 /// supports highly complicated in-order reservation tables 944 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 945 /// 946 /// The second is a streamlined mechanism that checks for hazards based on 947 /// simple counters that the scheduler itself maintains. It explicitly checks 948 /// for instruction dispatch limitations, including the number of micro-ops that 949 /// can dispatch per cycle. 950 /// 951 /// TODO: Also check whether the SU must start a new group. 952 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 953 if (HazardRec->isEnabled()) 954 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 955 956 if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth()) 957 return true; 958 959 return false; 960 } 961 962 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 963 unsigned ReadyCycle) { 964 if (ReadyCycle < MinReadyCycle) 965 MinReadyCycle = ReadyCycle; 966 967 // Check for interlocks first. For the purpose of other heuristics, an 968 // instruction that cannot issue appears as if it's not in the ReadyQueue. 969 if (ReadyCycle > CurrCycle || checkHazard(SU)) 970 Pending.push(SU); 971 else 972 Available.push(SU); 973 } 974 975 /// Move the boundary of scheduled code by one cycle. 976 void ConvergingScheduler::SchedBoundary::bumpCycle() { 977 unsigned Width = DAG->getIssueWidth(); 978 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 979 980 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 981 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); 982 983 if (!HazardRec->isEnabled()) { 984 // Bypass HazardRec virtual calls. 985 CurrCycle = NextCycle; 986 } 987 else { 988 // Bypass getHazardType calls in case of long latency. 989 for (; CurrCycle != NextCycle; ++CurrCycle) { 990 if (isTop()) 991 HazardRec->AdvanceCycle(); 992 else 993 HazardRec->RecedeCycle(); 994 } 995 } 996 CheckPending = true; 997 998 DEBUG(dbgs() << "*** " << Available.getName() << " cycle " 999 << CurrCycle << '\n'); 1000 } 1001 1002 /// Move the boundary of scheduled code by one SUnit. 1003 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1004 // Update the reservation table. 1005 if (HazardRec->isEnabled()) { 1006 if (!isTop() && SU->isCall) { 1007 // Calls are scheduled with their preceding instructions. For bottom-up 1008 // scheduling, clear the pipeline state before emitting. 1009 HazardRec->Reset(); 1010 } 1011 HazardRec->EmitInstruction(SU); 1012 } 1013 // Check the instruction group dispatch limit. 1014 // TODO: Check if this SU must end a dispatch group. 1015 IssueCount += DAG->getNumMicroOps(SU->getInstr()); 1016 if (IssueCount >= DAG->getIssueWidth()) { 1017 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); 1018 bumpCycle(); 1019 } 1020 } 1021 1022 /// Release pending ready nodes in to the available queue. This makes them 1023 /// visible to heuristics. 1024 void ConvergingScheduler::SchedBoundary::releasePending() { 1025 // If the available queue is empty, it is safe to reset MinReadyCycle. 1026 if (Available.empty()) 1027 MinReadyCycle = UINT_MAX; 1028 1029 // Check to see if any of the pending instructions are ready to issue. If 1030 // so, add them to the available queue. 1031 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1032 SUnit *SU = *(Pending.begin()+i); 1033 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1034 1035 if (ReadyCycle < MinReadyCycle) 1036 MinReadyCycle = ReadyCycle; 1037 1038 if (ReadyCycle > CurrCycle) 1039 continue; 1040 1041 if (checkHazard(SU)) 1042 continue; 1043 1044 Available.push(SU); 1045 Pending.remove(Pending.begin()+i); 1046 --i; --e; 1047 } 1048 CheckPending = false; 1049 } 1050 1051 /// Remove SU from the ready set for this boundary. 1052 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1053 if (Available.isInQueue(SU)) 1054 Available.remove(Available.find(SU)); 1055 else { 1056 assert(Pending.isInQueue(SU) && "bad ready count"); 1057 Pending.remove(Pending.find(SU)); 1058 } 1059 } 1060 1061 /// If this queue only has one ready candidate, return it. As a side effect, 1062 /// advance the cycle until at least one node is ready. If multiple instructions 1063 /// are ready, return NULL. 1064 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1065 if (CheckPending) 1066 releasePending(); 1067 1068 for (unsigned i = 0; Available.empty(); ++i) { 1069 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1070 "permanent hazard"); (void)i; 1071 bumpCycle(); 1072 releasePending(); 1073 } 1074 if (Available.size() == 1) 1075 return *Available.begin(); 1076 return NULL; 1077 } 1078 1079 #ifndef NDEBUG 1080 void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q, 1081 SUnit *SU, PressureElement P) { 1082 dbgs() << Label << " " << Q.getName() << " "; 1083 if (P.isValid()) 1084 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 1085 << " "; 1086 else 1087 dbgs() << " "; 1088 SU->dump(DAG); 1089 } 1090 #endif 1091 1092 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1093 /// more desirable than RHS from scheduling standpoint. 1094 static bool compareRPDelta(const RegPressureDelta &LHS, 1095 const RegPressureDelta &RHS) { 1096 // Compare each component of pressure in decreasing order of importance 1097 // without checking if any are valid. Invalid PressureElements are assumed to 1098 // have UnitIncrease==0, so are neutral. 1099 1100 // Avoid increasing the max critical pressure in the scheduled region. 1101 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) 1102 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1103 1104 // Avoid increasing the max critical pressure in the scheduled region. 1105 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) 1106 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1107 1108 // Avoid increasing the max pressure of the entire region. 1109 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) 1110 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1111 1112 return false; 1113 } 1114 1115 /// Pick the best candidate from the top queue. 1116 /// 1117 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1118 /// DAG building. To adjust for the current scheduling location we need to 1119 /// maintain the number of vreg uses remaining to be top-scheduled. 1120 ConvergingScheduler::CandResult ConvergingScheduler:: 1121 pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, 1122 SchedCandidate &Candidate) { 1123 DEBUG(Q.dump()); 1124 1125 // getMaxPressureDelta temporarily modifies the tracker. 1126 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1127 1128 // BestSU remains NULL if no top candidates beat the best existing candidate. 1129 CandResult FoundCandidate = NoCand; 1130 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1131 RegPressureDelta RPDelta; 1132 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, 1133 DAG->getRegionCriticalPSets(), 1134 DAG->getRegPressure().MaxSetPressure); 1135 1136 // Initialize the candidate if needed. 1137 if (!Candidate.SU) { 1138 Candidate.SU = *I; 1139 Candidate.RPDelta = RPDelta; 1140 FoundCandidate = NodeOrder; 1141 continue; 1142 } 1143 // Avoid exceeding the target's limit. 1144 if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { 1145 DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess)); 1146 Candidate.SU = *I; 1147 Candidate.RPDelta = RPDelta; 1148 FoundCandidate = SingleExcess; 1149 continue; 1150 } 1151 if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) 1152 continue; 1153 if (FoundCandidate == SingleExcess) 1154 FoundCandidate = MultiPressure; 1155 1156 // Avoid increasing the max critical pressure in the scheduled region. 1157 if (RPDelta.CriticalMax.UnitIncrease 1158 < Candidate.RPDelta.CriticalMax.UnitIncrease) { 1159 DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax)); 1160 Candidate.SU = *I; 1161 Candidate.RPDelta = RPDelta; 1162 FoundCandidate = SingleCritical; 1163 continue; 1164 } 1165 if (RPDelta.CriticalMax.UnitIncrease 1166 > Candidate.RPDelta.CriticalMax.UnitIncrease) 1167 continue; 1168 if (FoundCandidate == SingleCritical) 1169 FoundCandidate = MultiPressure; 1170 1171 // Avoid increasing the max pressure of the entire region. 1172 if (RPDelta.CurrentMax.UnitIncrease 1173 < Candidate.RPDelta.CurrentMax.UnitIncrease) { 1174 DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax)); 1175 Candidate.SU = *I; 1176 Candidate.RPDelta = RPDelta; 1177 FoundCandidate = SingleMax; 1178 continue; 1179 } 1180 if (RPDelta.CurrentMax.UnitIncrease 1181 > Candidate.RPDelta.CurrentMax.UnitIncrease) 1182 continue; 1183 if (FoundCandidate == SingleMax) 1184 FoundCandidate = MultiPressure; 1185 1186 // Fall through to original instruction order. 1187 // Only consider node order if Candidate was chosen from this Q. 1188 if (FoundCandidate == NoCand) 1189 continue; 1190 1191 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) 1192 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { 1193 DEBUG(traceCandidate("NCAND", Q, *I)); 1194 Candidate.SU = *I; 1195 Candidate.RPDelta = RPDelta; 1196 FoundCandidate = NodeOrder; 1197 } 1198 } 1199 return FoundCandidate; 1200 } 1201 1202 /// Pick the best candidate node from either the top or bottom queue. 1203 SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { 1204 // Schedule as far as possible in the direction of no choice. This is most 1205 // efficient, but also provides the best heuristics for CriticalPSets. 1206 if (SUnit *SU = Bot.pickOnlyChoice()) { 1207 IsTopNode = false; 1208 return SU; 1209 } 1210 if (SUnit *SU = Top.pickOnlyChoice()) { 1211 IsTopNode = true; 1212 return SU; 1213 } 1214 SchedCandidate BotCand; 1215 // Prefer bottom scheduling when heuristics are silent. 1216 CandResult BotResult = pickNodeFromQueue(Bot.Available, 1217 DAG->getBotRPTracker(), BotCand); 1218 assert(BotResult != NoCand && "failed to find the first candidate"); 1219 1220 // If either Q has a single candidate that provides the least increase in 1221 // Excess pressure, we can immediately schedule from that Q. 1222 // 1223 // RegionCriticalPSets summarizes the pressure within the scheduled region and 1224 // affects picking from either Q. If scheduling in one direction must 1225 // increase pressure for one of the excess PSets, then schedule in that 1226 // direction first to provide more freedom in the other direction. 1227 if (BotResult == SingleExcess || BotResult == SingleCritical) { 1228 IsTopNode = false; 1229 return BotCand.SU; 1230 } 1231 // Check if the top Q has a better candidate. 1232 SchedCandidate TopCand; 1233 CandResult TopResult = pickNodeFromQueue(Top.Available, 1234 DAG->getTopRPTracker(), TopCand); 1235 assert(TopResult != NoCand && "failed to find the first candidate"); 1236 1237 if (TopResult == SingleExcess || TopResult == SingleCritical) { 1238 IsTopNode = true; 1239 return TopCand.SU; 1240 } 1241 // If either Q has a single candidate that minimizes pressure above the 1242 // original region's pressure pick it. 1243 if (BotResult == SingleMax) { 1244 IsTopNode = false; 1245 return BotCand.SU; 1246 } 1247 if (TopResult == SingleMax) { 1248 IsTopNode = true; 1249 return TopCand.SU; 1250 } 1251 // Check for a salient pressure difference and pick the best from either side. 1252 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 1253 IsTopNode = true; 1254 return TopCand.SU; 1255 } 1256 // Otherwise prefer the bottom candidate in node order. 1257 IsTopNode = false; 1258 return BotCand.SU; 1259 } 1260 1261 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 1262 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 1263 if (DAG->top() == DAG->bottom()) { 1264 assert(Top.Available.empty() && Top.Pending.empty() && 1265 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 1266 return NULL; 1267 } 1268 SUnit *SU; 1269 if (ForceTopDown) { 1270 SU = Top.pickOnlyChoice(); 1271 if (!SU) { 1272 SchedCandidate TopCand; 1273 CandResult TopResult = 1274 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); 1275 assert(TopResult != NoCand && "failed to find the first candidate"); 1276 (void)TopResult; 1277 SU = TopCand.SU; 1278 } 1279 IsTopNode = true; 1280 } 1281 else if (ForceBottomUp) { 1282 SU = Bot.pickOnlyChoice(); 1283 if (!SU) { 1284 SchedCandidate BotCand; 1285 CandResult BotResult = 1286 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); 1287 assert(BotResult != NoCand && "failed to find the first candidate"); 1288 (void)BotResult; 1289 SU = BotCand.SU; 1290 } 1291 IsTopNode = false; 1292 } 1293 else { 1294 SU = pickNodeBidrectional(IsTopNode); 1295 } 1296 if (SU->isTopReady()) 1297 Top.removeReady(SU); 1298 if (SU->isBottomReady()) 1299 Bot.removeReady(SU); 1300 1301 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 1302 << " Scheduling Instruction in cycle " 1303 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; 1304 SU->dump(DAG)); 1305 return SU; 1306 } 1307 1308 /// Update the scheduler's state after scheduling a node. This is the same node 1309 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 1310 /// it's state based on the current cycle before MachineSchedStrategy does. 1311 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 1312 if (IsTopNode) { 1313 SU->TopReadyCycle = Top.CurrCycle; 1314 Top.bumpNode(SU); 1315 } 1316 else { 1317 SU->BotReadyCycle = Bot.CurrCycle; 1318 Bot.bumpNode(SU); 1319 } 1320 } 1321 1322 /// Create the standard converging machine scheduler. This will be used as the 1323 /// default scheduler if the target does not set a default. 1324 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 1325 assert((!ForceTopDown || !ForceBottomUp) && 1326 "-misched-topdown incompatible with -misched-bottomup"); 1327 return new ScheduleDAGMI(C, new ConvergingScheduler()); 1328 } 1329 static MachineSchedRegistry 1330 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 1331 createConvergingSched); 1332 1333 //===----------------------------------------------------------------------===// 1334 // Machine Instruction Shuffler for Correctness Testing 1335 //===----------------------------------------------------------------------===// 1336 1337 #ifndef NDEBUG 1338 namespace { 1339 /// Apply a less-than relation on the node order, which corresponds to the 1340 /// instruction order prior to scheduling. IsReverse implements greater-than. 1341 template<bool IsReverse> 1342 struct SUnitOrder { 1343 bool operator()(SUnit *A, SUnit *B) const { 1344 if (IsReverse) 1345 return A->NodeNum > B->NodeNum; 1346 else 1347 return A->NodeNum < B->NodeNum; 1348 } 1349 }; 1350 1351 /// Reorder instructions as much as possible. 1352 class InstructionShuffler : public MachineSchedStrategy { 1353 bool IsAlternating; 1354 bool IsTopDown; 1355 1356 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 1357 // gives nodes with a higher number higher priority causing the latest 1358 // instructions to be scheduled first. 1359 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 1360 TopQ; 1361 // When scheduling bottom-up, use greater-than as the queue priority. 1362 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 1363 BottomQ; 1364 public: 1365 InstructionShuffler(bool alternate, bool topdown) 1366 : IsAlternating(alternate), IsTopDown(topdown) {} 1367 1368 virtual void initialize(ScheduleDAGMI *) { 1369 TopQ.clear(); 1370 BottomQ.clear(); 1371 } 1372 1373 /// Implement MachineSchedStrategy interface. 1374 /// ----------------------------------------- 1375 1376 virtual SUnit *pickNode(bool &IsTopNode) { 1377 SUnit *SU; 1378 if (IsTopDown) { 1379 do { 1380 if (TopQ.empty()) return NULL; 1381 SU = TopQ.top(); 1382 TopQ.pop(); 1383 } while (SU->isScheduled); 1384 IsTopNode = true; 1385 } 1386 else { 1387 do { 1388 if (BottomQ.empty()) return NULL; 1389 SU = BottomQ.top(); 1390 BottomQ.pop(); 1391 } while (SU->isScheduled); 1392 IsTopNode = false; 1393 } 1394 if (IsAlternating) 1395 IsTopDown = !IsTopDown; 1396 return SU; 1397 } 1398 1399 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 1400 1401 virtual void releaseTopNode(SUnit *SU) { 1402 TopQ.push(SU); 1403 } 1404 virtual void releaseBottomNode(SUnit *SU) { 1405 BottomQ.push(SU); 1406 } 1407 }; 1408 } // namespace 1409 1410 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 1411 bool Alternate = !ForceTopDown && !ForceBottomUp; 1412 bool TopDown = !ForceBottomUp; 1413 assert((TopDown || !ForceTopDown) && 1414 "-misched-topdown incompatible with -misched-bottomup"); 1415 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 1416 } 1417 static MachineSchedRegistry ShufflerRegistry( 1418 "shuffle", "Shuffle machine instructions alternating directions", 1419 createInstructionShuffler); 1420 #endif // !NDEBUG 1421