1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // MachineScheduler schedules machine instructions after phi elimination. It 11 // preserves LiveIntervals so it can be invoked before register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "misched" 16 17 #include "llvm/CodeGen/MachineScheduler.h" 18 #include "llvm/ADT/OwningPtr.h" 19 #include "llvm/ADT/PriorityQueue.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/ScheduleDFS.h" 27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/GraphWriter.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include <queue> 34 35 using namespace llvm; 36 37 namespace llvm { 38 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden, 39 cl::desc("Force top-down list scheduling")); 40 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden, 41 cl::desc("Force bottom-up list scheduling")); 42 } 43 44 #ifndef NDEBUG 45 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden, 46 cl::desc("Pop up a window to show MISched dags after they are processed")); 47 48 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden, 49 cl::desc("Stop scheduling after N instructions"), cl::init(~0U)); 50 #else 51 static bool ViewMISchedDAGs = false; 52 #endif // NDEBUG 53 54 // Experimental heuristics 55 static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden, 56 cl::desc("Enable load clustering."), cl::init(true)); 57 58 // Experimental heuristics 59 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 60 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 61 62 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden, 63 cl::desc("Verify machine instrs before and after machine scheduling")); 64 65 // DAG subtrees must have at least this many nodes. 66 static const unsigned MinSubtreeSize = 8; 67 68 //===----------------------------------------------------------------------===// 69 // Machine Instruction Scheduling Pass and Registry 70 //===----------------------------------------------------------------------===// 71 72 MachineSchedContext::MachineSchedContext(): 73 MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) { 74 RegClassInfo = new RegisterClassInfo(); 75 } 76 77 MachineSchedContext::~MachineSchedContext() { 78 delete RegClassInfo; 79 } 80 81 namespace { 82 /// MachineScheduler runs after coalescing and before register allocation. 83 class MachineScheduler : public MachineSchedContext, 84 public MachineFunctionPass { 85 public: 86 MachineScheduler(); 87 88 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 89 90 virtual void releaseMemory() {} 91 92 virtual bool runOnMachineFunction(MachineFunction&); 93 94 virtual void print(raw_ostream &O, const Module* = 0) const; 95 96 static char ID; // Class identification, replacement for typeinfo 97 }; 98 } // namespace 99 100 char MachineScheduler::ID = 0; 101 102 char &llvm::MachineSchedulerID = MachineScheduler::ID; 103 104 INITIALIZE_PASS_BEGIN(MachineScheduler, "misched", 105 "Machine Instruction Scheduler", false, false) 106 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 107 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 108 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 109 INITIALIZE_PASS_END(MachineScheduler, "misched", 110 "Machine Instruction Scheduler", false, false) 111 112 MachineScheduler::MachineScheduler() 113 : MachineFunctionPass(ID) { 114 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 115 } 116 117 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const { 118 AU.setPreservesCFG(); 119 AU.addRequiredID(MachineDominatorsID); 120 AU.addRequired<MachineLoopInfo>(); 121 AU.addRequired<AliasAnalysis>(); 122 AU.addRequired<TargetPassConfig>(); 123 AU.addRequired<SlotIndexes>(); 124 AU.addPreserved<SlotIndexes>(); 125 AU.addRequired<LiveIntervals>(); 126 AU.addPreserved<LiveIntervals>(); 127 MachineFunctionPass::getAnalysisUsage(AU); 128 } 129 130 MachinePassRegistry MachineSchedRegistry::Registry; 131 132 /// A dummy default scheduler factory indicates whether the scheduler 133 /// is overridden on the command line. 134 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) { 135 return 0; 136 } 137 138 /// MachineSchedOpt allows command line selection of the scheduler. 139 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false, 140 RegisterPassParser<MachineSchedRegistry> > 141 MachineSchedOpt("misched", 142 cl::init(&useDefaultMachineSched), cl::Hidden, 143 cl::desc("Machine instruction scheduler to use")); 144 145 static MachineSchedRegistry 146 DefaultSchedRegistry("default", "Use the target's default scheduler choice.", 147 useDefaultMachineSched); 148 149 /// Forward declare the standard machine scheduler. This will be used as the 150 /// default scheduler if the target does not set a default. 151 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C); 152 153 154 /// Decrement this iterator until reaching the top or a non-debug instr. 155 static MachineBasicBlock::iterator 156 priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) { 157 assert(I != Beg && "reached the top of the region, cannot decrement"); 158 while (--I != Beg) { 159 if (!I->isDebugValue()) 160 break; 161 } 162 return I; 163 } 164 165 /// If this iterator is a debug value, increment until reaching the End or a 166 /// non-debug instruction. 167 static MachineBasicBlock::iterator 168 nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) { 169 for(; I != End; ++I) { 170 if (!I->isDebugValue()) 171 break; 172 } 173 return I; 174 } 175 176 /// Top-level MachineScheduler pass driver. 177 /// 178 /// Visit blocks in function order. Divide each block into scheduling regions 179 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is 180 /// consistent with the DAG builder, which traverses the interior of the 181 /// scheduling regions bottom-up. 182 /// 183 /// This design avoids exposing scheduling boundaries to the DAG builder, 184 /// simplifying the DAG builder's support for "special" target instructions. 185 /// At the same time the design allows target schedulers to operate across 186 /// scheduling boundaries, for example to bundle the boudary instructions 187 /// without reordering them. This creates complexity, because the target 188 /// scheduler must update the RegionBegin and RegionEnd positions cached by 189 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler 190 /// design would be to split blocks at scheduling boundaries, but LLVM has a 191 /// general bias against block splitting purely for implementation simplicity. 192 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { 193 DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs())); 194 195 // Initialize the context of the pass. 196 MF = &mf; 197 MLI = &getAnalysis<MachineLoopInfo>(); 198 MDT = &getAnalysis<MachineDominatorTree>(); 199 PassConfig = &getAnalysis<TargetPassConfig>(); 200 AA = &getAnalysis<AliasAnalysis>(); 201 202 LIS = &getAnalysis<LiveIntervals>(); 203 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); 204 205 if (VerifyScheduling) { 206 DEBUG(LIS->print(dbgs())); 207 MF->verify(this, "Before machine scheduling."); 208 } 209 RegClassInfo->runOnMachineFunction(*MF); 210 211 // Select the scheduler, or set the default. 212 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt; 213 if (Ctor == useDefaultMachineSched) { 214 // Get the default scheduler set by the target. 215 Ctor = MachineSchedRegistry::getDefault(); 216 if (!Ctor) { 217 Ctor = createConvergingSched; 218 MachineSchedRegistry::setDefault(Ctor); 219 } 220 } 221 // Instantiate the selected scheduler. 222 OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this)); 223 224 // Visit all machine basic blocks. 225 // 226 // TODO: Visit blocks in global postorder or postorder within the bottom-up 227 // loop tree. Then we can optionally compute global RegPressure. 228 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end(); 229 MBB != MBBEnd; ++MBB) { 230 231 Scheduler->startBlock(MBB); 232 233 // Break the block into scheduling regions [I, RegionEnd), and schedule each 234 // region as soon as it is discovered. RegionEnd points the scheduling 235 // boundary at the bottom of the region. The DAG does not include RegionEnd, 236 // but the region does (i.e. the next RegionEnd is above the previous 237 // RegionBegin). If the current block has no terminator then RegionEnd == 238 // MBB->end() for the bottom region. 239 // 240 // The Scheduler may insert instructions during either schedule() or 241 // exitRegion(), even for empty regions. So the local iterators 'I' and 242 // 'RegionEnd' are invalid across these calls. 243 unsigned RemainingInstrs = MBB->size(); 244 for(MachineBasicBlock::iterator RegionEnd = MBB->end(); 245 RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { 246 247 // Avoid decrementing RegionEnd for blocks with no terminator. 248 if (RegionEnd != MBB->end() 249 || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { 250 --RegionEnd; 251 // Count the boundary instruction. 252 --RemainingInstrs; 253 } 254 255 // The next region starts above the previous region. Look backward in the 256 // instruction stream until we find the nearest boundary. 257 MachineBasicBlock::iterator I = RegionEnd; 258 for(;I != MBB->begin(); --I, --RemainingInstrs) { 259 if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) 260 break; 261 } 262 // Notify the scheduler of the region, even if we may skip scheduling 263 // it. Perhaps it still needs to be bundled. 264 Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); 265 266 // Skip empty scheduling regions (0 or 1 schedulable instructions). 267 if (I == RegionEnd || I == llvm::prior(RegionEnd)) { 268 // Close the current region. Bundle the terminator if needed. 269 // This invalidates 'RegionEnd' and 'I'. 270 Scheduler->exitRegion(); 271 continue; 272 } 273 DEBUG(dbgs() << "********** MI Scheduling **********\n"); 274 DEBUG(dbgs() << MF->getName() 275 << ":BB#" << MBB->getNumber() << " " << MBB->getName() 276 << "\n From: " << *I << " To: "; 277 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; 278 else dbgs() << "End"; 279 dbgs() << " Remaining: " << RemainingInstrs << "\n"); 280 281 // Schedule a region: possibly reorder instructions. 282 // This invalidates 'RegionEnd' and 'I'. 283 Scheduler->schedule(); 284 285 // Close the current region. 286 Scheduler->exitRegion(); 287 288 // Scheduling has invalidated the current iterator 'I'. Ask the 289 // scheduler for the top of it's scheduled region. 290 RegionEnd = Scheduler->begin(); 291 } 292 assert(RemainingInstrs == 0 && "Instruction count mismatch!"); 293 Scheduler->finishBlock(); 294 } 295 Scheduler->finalizeSchedule(); 296 DEBUG(LIS->print(dbgs())); 297 if (VerifyScheduling) 298 MF->verify(this, "After machine scheduling."); 299 return true; 300 } 301 302 void MachineScheduler::print(raw_ostream &O, const Module* m) const { 303 // unimplemented 304 } 305 306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 307 void ReadyQueue::dump() { 308 dbgs() << Name << ": "; 309 for (unsigned i = 0, e = Queue.size(); i < e; ++i) 310 dbgs() << Queue[i]->NodeNum << " "; 311 dbgs() << "\n"; 312 } 313 #endif 314 315 //===----------------------------------------------------------------------===// 316 // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals 317 // preservation. 318 //===----------------------------------------------------------------------===// 319 320 ScheduleDAGMI::~ScheduleDAGMI() { 321 delete DFSResult; 322 DeleteContainerPointers(Mutations); 323 delete SchedImpl; 324 } 325 326 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) { 327 if (SuccSU != &ExitSU) { 328 // Do not use WillCreateCycle, it assumes SD scheduling. 329 // If Pred is reachable from Succ, then the edge creates a cycle. 330 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 331 return false; 332 Topo.AddPred(SuccSU, PredDep.getSUnit()); 333 } 334 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 335 // Return true regardless of whether a new edge needed to be inserted. 336 return true; 337 } 338 339 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When 340 /// NumPredsLeft reaches zero, release the successor node. 341 /// 342 /// FIXME: Adjust SuccSU height based on MinLatency. 343 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) { 344 SUnit *SuccSU = SuccEdge->getSUnit(); 345 346 if (SuccEdge->isWeak()) { 347 --SuccSU->WeakPredsLeft; 348 if (SuccEdge->isCluster()) 349 NextClusterSucc = SuccSU; 350 return; 351 } 352 #ifndef NDEBUG 353 if (SuccSU->NumPredsLeft == 0) { 354 dbgs() << "*** Scheduling failed! ***\n"; 355 SuccSU->dump(this); 356 dbgs() << " has been released too many times!\n"; 357 llvm_unreachable(0); 358 } 359 #endif 360 --SuccSU->NumPredsLeft; 361 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 362 SchedImpl->releaseTopNode(SuccSU); 363 } 364 365 /// releaseSuccessors - Call releaseSucc on each of SU's successors. 366 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) { 367 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 368 I != E; ++I) { 369 releaseSucc(SU, &*I); 370 } 371 } 372 373 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When 374 /// NumSuccsLeft reaches zero, release the predecessor node. 375 /// 376 /// FIXME: Adjust PredSU height based on MinLatency. 377 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) { 378 SUnit *PredSU = PredEdge->getSUnit(); 379 380 if (PredEdge->isWeak()) { 381 --PredSU->WeakSuccsLeft; 382 if (PredEdge->isCluster()) 383 NextClusterPred = PredSU; 384 return; 385 } 386 #ifndef NDEBUG 387 if (PredSU->NumSuccsLeft == 0) { 388 dbgs() << "*** Scheduling failed! ***\n"; 389 PredSU->dump(this); 390 dbgs() << " has been released too many times!\n"; 391 llvm_unreachable(0); 392 } 393 #endif 394 --PredSU->NumSuccsLeft; 395 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) 396 SchedImpl->releaseBottomNode(PredSU); 397 } 398 399 /// releasePredecessors - Call releasePred on each of SU's predecessors. 400 void ScheduleDAGMI::releasePredecessors(SUnit *SU) { 401 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 402 I != E; ++I) { 403 releasePred(SU, &*I); 404 } 405 } 406 407 void ScheduleDAGMI::moveInstruction(MachineInstr *MI, 408 MachineBasicBlock::iterator InsertPos) { 409 // Advance RegionBegin if the first instruction moves down. 410 if (&*RegionBegin == MI) 411 ++RegionBegin; 412 413 // Update the instruction stream. 414 BB->splice(InsertPos, BB, MI); 415 416 // Update LiveIntervals 417 LIS->handleMove(MI, /*UpdateFlags=*/true); 418 419 // Recede RegionBegin if an instruction moves above the first. 420 if (RegionBegin == InsertPos) 421 RegionBegin = MI; 422 } 423 424 bool ScheduleDAGMI::checkSchedLimit() { 425 #ifndef NDEBUG 426 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) { 427 CurrentTop = CurrentBottom; 428 return false; 429 } 430 ++NumInstrsScheduled; 431 #endif 432 return true; 433 } 434 435 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after 436 /// crossing a scheduling boundary. [begin, end) includes all instructions in 437 /// the region, including the boundary itself and single-instruction regions 438 /// that don't get scheduled. 439 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb, 440 MachineBasicBlock::iterator begin, 441 MachineBasicBlock::iterator end, 442 unsigned endcount) 443 { 444 ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount); 445 446 // For convenience remember the end of the liveness region. 447 LiveRegionEnd = 448 (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd); 449 } 450 451 // Setup the register pressure trackers for the top scheduled top and bottom 452 // scheduled regions. 453 void ScheduleDAGMI::initRegPressure() { 454 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin); 455 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 456 457 // Close the RPTracker to finalize live ins. 458 RPTracker.closeRegion(); 459 460 DEBUG(RPTracker.getPressure().dump(TRI)); 461 462 // Initialize the live ins and live outs. 463 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs); 464 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs); 465 466 // Close one end of the tracker so we can call 467 // getMaxUpward/DownwardPressureDelta before advancing across any 468 // instructions. This converts currently live regs into live ins/outs. 469 TopRPTracker.closeTop(); 470 BotRPTracker.closeBottom(); 471 472 // Account for liveness generated by the region boundary. 473 if (LiveRegionEnd != RegionEnd) 474 BotRPTracker.recede(); 475 476 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom"); 477 478 // Cache the list of excess pressure sets in this region. This will also track 479 // the max pressure in the scheduled code for these sets. 480 RegionCriticalPSets.clear(); 481 const std::vector<unsigned> &RegionPressure = 482 RPTracker.getPressure().MaxSetPressure; 483 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { 484 unsigned Limit = TRI->getRegPressureSetLimit(i); 485 DEBUG(dbgs() << TRI->getRegPressureSetName(i) 486 << "Limit " << Limit 487 << " Actual " << RegionPressure[i] << "\n"); 488 if (RegionPressure[i] > Limit) 489 RegionCriticalPSets.push_back(PressureElement(i, 0)); 490 } 491 DEBUG(dbgs() << "Excess PSets: "; 492 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i) 493 dbgs() << TRI->getRegPressureSetName( 494 RegionCriticalPSets[i].PSetID) << " "; 495 dbgs() << "\n"); 496 } 497 498 // FIXME: When the pressure tracker deals in pressure differences then we won't 499 // iterate over all RegionCriticalPSets[i]. 500 void ScheduleDAGMI:: 501 updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) { 502 for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) { 503 unsigned ID = RegionCriticalPSets[i].PSetID; 504 int &MaxUnits = RegionCriticalPSets[i].UnitIncrease; 505 if ((int)NewMaxPressure[ID] > MaxUnits) 506 MaxUnits = NewMaxPressure[ID]; 507 } 508 } 509 510 /// schedule - Called back from MachineScheduler::runOnMachineFunction 511 /// after setting up the current scheduling region. [RegionBegin, RegionEnd) 512 /// only includes instructions that have DAG nodes, not scheduling boundaries. 513 /// 514 /// This is a skeletal driver, with all the functionality pushed into helpers, 515 /// so that it can be easilly extended by experimental schedulers. Generally, 516 /// implementing MachineSchedStrategy should be sufficient to implement a new 517 /// scheduling algorithm. However, if a scheduler further subclasses 518 /// ScheduleDAGMI then it will want to override this virtual method in order to 519 /// update any specialized state. 520 void ScheduleDAGMI::schedule() { 521 buildDAGWithRegPressure(); 522 523 Topo.InitDAGTopologicalSorting(); 524 525 postprocessDAG(); 526 527 SmallVector<SUnit*, 8> TopRoots, BotRoots; 528 findRootsAndBiasEdges(TopRoots, BotRoots); 529 530 // Initialize the strategy before modifying the DAG. 531 // This may initialize a DFSResult to be used for queue priority. 532 SchedImpl->initialize(this); 533 534 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 535 SUnits[su].dumpAll(this)); 536 if (ViewMISchedDAGs) viewGraph(); 537 538 // Initialize ready queues now that the DAG and priority data are finalized. 539 initQueues(TopRoots, BotRoots); 540 541 bool IsTopNode = false; 542 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { 543 assert(!SU->isScheduled && "Node already scheduled"); 544 if (!checkSchedLimit()) 545 break; 546 547 scheduleMI(SU, IsTopNode); 548 549 updateQueues(SU, IsTopNode); 550 } 551 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); 552 553 placeDebugValues(); 554 555 DEBUG({ 556 unsigned BBNum = begin()->getParent()->getNumber(); 557 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; 558 dumpSchedule(); 559 dbgs() << '\n'; 560 }); 561 } 562 563 /// Build the DAG and setup three register pressure trackers. 564 void ScheduleDAGMI::buildDAGWithRegPressure() { 565 // Initialize the register pressure tracker used by buildSchedGraph. 566 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); 567 568 // Account for liveness generate by the region boundary. 569 if (LiveRegionEnd != RegionEnd) 570 RPTracker.recede(); 571 572 // Build the DAG, and compute current register pressure. 573 buildSchedGraph(AA, &RPTracker); 574 575 // Initialize top/bottom trackers after computing region pressure. 576 initRegPressure(); 577 } 578 579 /// Apply each ScheduleDAGMutation step in order. 580 void ScheduleDAGMI::postprocessDAG() { 581 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { 582 Mutations[i]->apply(this); 583 } 584 } 585 586 void ScheduleDAGMI::computeDFSResult() { 587 if (!DFSResult) 588 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize); 589 DFSResult->clear(); 590 ScheduledTrees.clear(); 591 DFSResult->resize(SUnits.size()); 592 DFSResult->compute(SUnits); 593 ScheduledTrees.resize(DFSResult->getNumSubtrees()); 594 } 595 596 void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots, 597 SmallVectorImpl<SUnit*> &BotRoots) { 598 for (std::vector<SUnit>::iterator 599 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) { 600 SUnit *SU = &(*I); 601 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits"); 602 603 // Order predecessors so DFSResult follows the critical path. 604 SU->biasCriticalPath(); 605 606 // A SUnit is ready to top schedule if it has no predecessors. 607 if (!I->NumPredsLeft) 608 TopRoots.push_back(SU); 609 // A SUnit is ready to bottom schedule if it has no successors. 610 if (!I->NumSuccsLeft) 611 BotRoots.push_back(SU); 612 } 613 ExitSU.biasCriticalPath(); 614 } 615 616 /// Identify DAG roots and setup scheduler queues. 617 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots, 618 ArrayRef<SUnit*> BotRoots) { 619 NextClusterSucc = NULL; 620 NextClusterPred = NULL; 621 622 // Release all DAG roots for scheduling, not including EntrySU/ExitSU. 623 // 624 // Nodes with unreleased weak edges can still be roots. 625 // Release top roots in forward order. 626 for (SmallVectorImpl<SUnit*>::const_iterator 627 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) { 628 SchedImpl->releaseTopNode(*I); 629 } 630 // Release bottom roots in reverse order so the higher priority nodes appear 631 // first. This is more natural and slightly more efficient. 632 for (SmallVectorImpl<SUnit*>::const_reverse_iterator 633 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) { 634 SchedImpl->releaseBottomNode(*I); 635 } 636 637 releaseSuccessors(&EntrySU); 638 releasePredecessors(&ExitSU); 639 640 SchedImpl->registerRoots(); 641 642 // Advance past initial DebugValues. 643 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker"); 644 CurrentTop = nextIfDebug(RegionBegin, RegionEnd); 645 TopRPTracker.setPos(CurrentTop); 646 647 CurrentBottom = RegionEnd; 648 } 649 650 /// Move an instruction and update register pressure. 651 void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { 652 // Move the instruction to its new location in the instruction stream. 653 MachineInstr *MI = SU->getInstr(); 654 655 if (IsTopNode) { 656 assert(SU->isTopReady() && "node still has unscheduled dependencies"); 657 if (&*CurrentTop == MI) 658 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); 659 else { 660 moveInstruction(MI, CurrentTop); 661 TopRPTracker.setPos(MI); 662 } 663 664 // Update top scheduled pressure. 665 TopRPTracker.advance(); 666 assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); 667 updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); 668 } 669 else { 670 assert(SU->isBottomReady() && "node still has unscheduled dependencies"); 671 MachineBasicBlock::iterator priorII = 672 priorNonDebug(CurrentBottom, CurrentTop); 673 if (&*priorII == MI) 674 CurrentBottom = priorII; 675 else { 676 if (&*CurrentTop == MI) { 677 CurrentTop = nextIfDebug(++CurrentTop, priorII); 678 TopRPTracker.setPos(CurrentTop); 679 } 680 moveInstruction(MI, CurrentBottom); 681 CurrentBottom = MI; 682 } 683 // Update bottom scheduled pressure. 684 BotRPTracker.recede(); 685 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); 686 updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); 687 } 688 } 689 690 /// Update scheduler queues after scheduling an instruction. 691 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { 692 // Release dependent instructions for scheduling. 693 if (IsTopNode) 694 releaseSuccessors(SU); 695 else 696 releasePredecessors(SU); 697 698 SU->isScheduled = true; 699 700 if (DFSResult) { 701 unsigned SubtreeID = DFSResult->getSubtreeID(SU); 702 if (!ScheduledTrees.test(SubtreeID)) { 703 ScheduledTrees.set(SubtreeID); 704 DFSResult->scheduleTree(SubtreeID); 705 SchedImpl->scheduleTree(SubtreeID); 706 } 707 } 708 709 // Notify the scheduling strategy after updating the DAG. 710 SchedImpl->schedNode(SU, IsTopNode); 711 } 712 713 /// Reinsert any remaining debug_values, just like the PostRA scheduler. 714 void ScheduleDAGMI::placeDebugValues() { 715 // If first instruction was a DBG_VALUE then put it back. 716 if (FirstDbgValue) { 717 BB->splice(RegionBegin, BB, FirstDbgValue); 718 RegionBegin = FirstDbgValue; 719 } 720 721 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 722 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 723 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 724 MachineInstr *DbgValue = P.first; 725 MachineBasicBlock::iterator OrigPrevMI = P.second; 726 if (&*RegionBegin == DbgValue) 727 ++RegionBegin; 728 BB->splice(++OrigPrevMI, BB, DbgValue); 729 if (OrigPrevMI == llvm::prior(RegionEnd)) 730 RegionEnd = DbgValue; 731 } 732 DbgValues.clear(); 733 FirstDbgValue = NULL; 734 } 735 736 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 737 void ScheduleDAGMI::dumpSchedule() const { 738 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { 739 if (SUnit *SU = getSUnit(&(*MI))) 740 SU->dump(this); 741 else 742 dbgs() << "Missing SUnit\n"; 743 } 744 } 745 #endif 746 747 //===----------------------------------------------------------------------===// 748 // LoadClusterMutation - DAG post-processing to cluster loads. 749 //===----------------------------------------------------------------------===// 750 751 namespace { 752 /// \brief Post-process the DAG to create cluster edges between neighboring 753 /// loads. 754 class LoadClusterMutation : public ScheduleDAGMutation { 755 struct LoadInfo { 756 SUnit *SU; 757 unsigned BaseReg; 758 unsigned Offset; 759 LoadInfo(SUnit *su, unsigned reg, unsigned ofs) 760 : SU(su), BaseReg(reg), Offset(ofs) {} 761 }; 762 static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS, 763 const LoadClusterMutation::LoadInfo &RHS); 764 765 const TargetInstrInfo *TII; 766 const TargetRegisterInfo *TRI; 767 public: 768 LoadClusterMutation(const TargetInstrInfo *tii, 769 const TargetRegisterInfo *tri) 770 : TII(tii), TRI(tri) {} 771 772 virtual void apply(ScheduleDAGMI *DAG); 773 protected: 774 void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG); 775 }; 776 } // anonymous 777 778 bool LoadClusterMutation::LoadInfoLess( 779 const LoadClusterMutation::LoadInfo &LHS, 780 const LoadClusterMutation::LoadInfo &RHS) { 781 if (LHS.BaseReg != RHS.BaseReg) 782 return LHS.BaseReg < RHS.BaseReg; 783 return LHS.Offset < RHS.Offset; 784 } 785 786 void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads, 787 ScheduleDAGMI *DAG) { 788 SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords; 789 for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) { 790 SUnit *SU = Loads[Idx]; 791 unsigned BaseReg; 792 unsigned Offset; 793 if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) 794 LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset)); 795 } 796 if (LoadRecords.size() < 2) 797 return; 798 std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess); 799 unsigned ClusterLength = 1; 800 for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) { 801 if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) { 802 ClusterLength = 1; 803 continue; 804 } 805 806 SUnit *SUa = LoadRecords[Idx].SU; 807 SUnit *SUb = LoadRecords[Idx+1].SU; 808 if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength) 809 && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { 810 811 DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU(" 812 << SUb->NodeNum << ")\n"); 813 // Copy successor edges from SUa to SUb. Interleaving computation 814 // dependent on SUa can prevent load combining due to register reuse. 815 // Predecessor edges do not need to be copied from SUb to SUa since nearby 816 // loads should have effectively the same inputs. 817 for (SUnit::const_succ_iterator 818 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) { 819 if (SI->getSUnit() == SUb) 820 continue; 821 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n"); 822 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial)); 823 } 824 ++ClusterLength; 825 } 826 else 827 ClusterLength = 1; 828 } 829 } 830 831 /// \brief Callback from DAG postProcessing to create cluster edges for loads. 832 void LoadClusterMutation::apply(ScheduleDAGMI *DAG) { 833 // Map DAG NodeNum to store chain ID. 834 DenseMap<unsigned, unsigned> StoreChainIDs; 835 // Map each store chain to a set of dependent loads. 836 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents; 837 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) { 838 SUnit *SU = &DAG->SUnits[Idx]; 839 if (!SU->getInstr()->mayLoad()) 840 continue; 841 unsigned ChainPredID = DAG->SUnits.size(); 842 for (SUnit::const_pred_iterator 843 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 844 if (PI->isCtrl()) { 845 ChainPredID = PI->getSUnit()->NodeNum; 846 break; 847 } 848 } 849 // Check if this chain-like pred has been seen 850 // before. ChainPredID==MaxNodeID for loads at the top of the schedule. 851 unsigned NumChains = StoreChainDependents.size(); 852 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result = 853 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains)); 854 if (Result.second) 855 StoreChainDependents.resize(NumChains + 1); 856 StoreChainDependents[Result.first->second].push_back(SU); 857 } 858 // Iterate over the store chains. 859 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx) 860 clusterNeighboringLoads(StoreChainDependents[Idx], DAG); 861 } 862 863 //===----------------------------------------------------------------------===// 864 // MacroFusion - DAG post-processing to encourage fusion of macro ops. 865 //===----------------------------------------------------------------------===// 866 867 namespace { 868 /// \brief Post-process the DAG to create cluster edges between instructions 869 /// that may be fused by the processor into a single operation. 870 class MacroFusion : public ScheduleDAGMutation { 871 const TargetInstrInfo *TII; 872 public: 873 MacroFusion(const TargetInstrInfo *tii): TII(tii) {} 874 875 virtual void apply(ScheduleDAGMI *DAG); 876 }; 877 } // anonymous 878 879 /// \brief Callback from DAG postProcessing to create cluster edges to encourage 880 /// fused operations. 881 void MacroFusion::apply(ScheduleDAGMI *DAG) { 882 // For now, assume targets can only fuse with the branch. 883 MachineInstr *Branch = DAG->ExitSU.getInstr(); 884 if (!Branch) 885 return; 886 887 for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) { 888 SUnit *SU = &DAG->SUnits[--Idx]; 889 if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch)) 890 continue; 891 892 // Create a single weak edge from SU to ExitSU. The only effect is to cause 893 // bottom-up scheduling to heavily prioritize the clustered SU. There is no 894 // need to copy predecessor edges from ExitSU to SU, since top-down 895 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling 896 // of SU, we could create an artificial edge from the deepest root, but it 897 // hasn't been needed yet. 898 bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster)); 899 (void)Success; 900 assert(Success && "No DAG nodes should be reachable from ExitSU"); 901 902 DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n"); 903 break; 904 } 905 } 906 907 //===----------------------------------------------------------------------===// 908 // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. 909 //===----------------------------------------------------------------------===// 910 911 namespace { 912 /// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance 913 /// the schedule. 914 class ConvergingScheduler : public MachineSchedStrategy { 915 public: 916 /// Represent the type of SchedCandidate found within a single queue. 917 /// pickNodeBidirectional depends on these listed by decreasing priority. 918 enum CandReason { 919 NoCand, SingleExcess, SingleCritical, Cluster, 920 ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce, 921 TopDepthReduce, TopPathReduce, SingleMax, MultiPressure, NextDefUse, 922 NodeOrder}; 923 924 #ifndef NDEBUG 925 static const char *getReasonStr(ConvergingScheduler::CandReason Reason); 926 #endif 927 928 /// Policy for scheduling the next instruction in the candidate's zone. 929 struct CandPolicy { 930 bool ReduceLatency; 931 unsigned ReduceResIdx; 932 unsigned DemandResIdx; 933 934 CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} 935 }; 936 937 /// Status of an instruction's critical resource consumption. 938 struct SchedResourceDelta { 939 // Count critical resources in the scheduled region required by SU. 940 unsigned CritResources; 941 942 // Count critical resources from another region consumed by SU. 943 unsigned DemandedResources; 944 945 SchedResourceDelta(): CritResources(0), DemandedResources(0) {} 946 947 bool operator==(const SchedResourceDelta &RHS) const { 948 return CritResources == RHS.CritResources 949 && DemandedResources == RHS.DemandedResources; 950 } 951 bool operator!=(const SchedResourceDelta &RHS) const { 952 return !operator==(RHS); 953 } 954 }; 955 956 /// Store the state used by ConvergingScheduler heuristics, required for the 957 /// lifetime of one invocation of pickNode(). 958 struct SchedCandidate { 959 CandPolicy Policy; 960 961 // The best SUnit candidate. 962 SUnit *SU; 963 964 // The reason for this candidate. 965 CandReason Reason; 966 967 // Register pressure values for the best candidate. 968 RegPressureDelta RPDelta; 969 970 // Critical resource consumption of the best candidate. 971 SchedResourceDelta ResDelta; 972 973 SchedCandidate(const CandPolicy &policy) 974 : Policy(policy), SU(NULL), Reason(NoCand) {} 975 976 bool isValid() const { return SU; } 977 978 // Copy the status of another candidate without changing policy. 979 void setBest(SchedCandidate &Best) { 980 assert(Best.Reason != NoCand && "uninitialized Sched candidate"); 981 SU = Best.SU; 982 Reason = Best.Reason; 983 RPDelta = Best.RPDelta; 984 ResDelta = Best.ResDelta; 985 } 986 987 void initResourceDelta(const ScheduleDAGMI *DAG, 988 const TargetSchedModel *SchedModel); 989 }; 990 991 /// Summarize the unscheduled region. 992 struct SchedRemainder { 993 // Critical path through the DAG in expected latency. 994 unsigned CriticalPath; 995 996 // Unscheduled resources 997 SmallVector<unsigned, 16> RemainingCounts; 998 // Critical resource for the unscheduled zone. 999 unsigned CritResIdx; 1000 // Number of micro-ops left to schedule. 1001 unsigned RemainingMicroOps; 1002 1003 void reset() { 1004 CriticalPath = 0; 1005 RemainingCounts.clear(); 1006 CritResIdx = 0; 1007 RemainingMicroOps = 0; 1008 } 1009 1010 SchedRemainder() { reset(); } 1011 1012 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); 1013 1014 unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const { 1015 if (!SchedModel->hasInstrSchedModel()) 1016 return 0; 1017 1018 return std::max( 1019 RemainingMicroOps * SchedModel->getMicroOpFactor(), 1020 RemainingCounts[CritResIdx]); 1021 } 1022 }; 1023 1024 /// Each Scheduling boundary is associated with ready queues. It tracks the 1025 /// current cycle in the direction of movement, and maintains the state 1026 /// of "hazards" and other interlocks at the current cycle. 1027 struct SchedBoundary { 1028 ScheduleDAGMI *DAG; 1029 const TargetSchedModel *SchedModel; 1030 SchedRemainder *Rem; 1031 1032 ReadyQueue Available; 1033 ReadyQueue Pending; 1034 bool CheckPending; 1035 1036 // For heuristics, keep a list of the nodes that immediately depend on the 1037 // most recently scheduled node. 1038 SmallPtrSet<const SUnit*, 8> NextSUs; 1039 1040 ScheduleHazardRecognizer *HazardRec; 1041 1042 unsigned CurrCycle; 1043 unsigned IssueCount; 1044 1045 /// MinReadyCycle - Cycle of the soonest available instruction. 1046 unsigned MinReadyCycle; 1047 1048 // The expected latency of the critical path in this scheduled zone. 1049 unsigned ExpectedLatency; 1050 1051 // Resources used in the scheduled zone beyond this boundary. 1052 SmallVector<unsigned, 16> ResourceCounts; 1053 1054 // Cache the critical resources ID in this scheduled zone. 1055 unsigned CritResIdx; 1056 1057 // Is the scheduled region resource limited vs. latency limited. 1058 bool IsResourceLimited; 1059 1060 unsigned ExpectedCount; 1061 1062 #ifndef NDEBUG 1063 // Remember the greatest min operand latency. 1064 unsigned MaxMinLatency; 1065 #endif 1066 1067 void reset() { 1068 // A new HazardRec is created for each DAG and owned by SchedBoundary. 1069 delete HazardRec; 1070 1071 Available.clear(); 1072 Pending.clear(); 1073 CheckPending = false; 1074 NextSUs.clear(); 1075 HazardRec = 0; 1076 CurrCycle = 0; 1077 IssueCount = 0; 1078 MinReadyCycle = UINT_MAX; 1079 ExpectedLatency = 0; 1080 ResourceCounts.resize(1); 1081 assert(!ResourceCounts[0] && "nonzero count for bad resource"); 1082 CritResIdx = 0; 1083 IsResourceLimited = false; 1084 ExpectedCount = 0; 1085 #ifndef NDEBUG 1086 MaxMinLatency = 0; 1087 #endif 1088 // Reserve a zero-count for invalid CritResIdx. 1089 ResourceCounts.resize(1); 1090 } 1091 1092 /// Pending queues extend the ready queues with the same ID and the 1093 /// PendingFlag set. 1094 SchedBoundary(unsigned ID, const Twine &Name): 1095 DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), 1096 Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), 1097 HazardRec(0) { 1098 reset(); 1099 } 1100 1101 ~SchedBoundary() { delete HazardRec; } 1102 1103 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, 1104 SchedRemainder *rem); 1105 1106 bool isTop() const { 1107 return Available.getID() == ConvergingScheduler::TopQID; 1108 } 1109 1110 unsigned getUnscheduledLatency(SUnit *SU) const { 1111 if (isTop()) 1112 return SU->getHeight(); 1113 return SU->getDepth() + SU->Latency; 1114 } 1115 1116 unsigned getCriticalCount() const { 1117 return ResourceCounts[CritResIdx]; 1118 } 1119 1120 bool checkHazard(SUnit *SU); 1121 1122 void setLatencyPolicy(CandPolicy &Policy); 1123 1124 void releaseNode(SUnit *SU, unsigned ReadyCycle); 1125 1126 void bumpCycle(); 1127 1128 void countResource(unsigned PIdx, unsigned Cycles); 1129 1130 void bumpNode(SUnit *SU); 1131 1132 void releasePending(); 1133 1134 void removeReady(SUnit *SU); 1135 1136 SUnit *pickOnlyChoice(); 1137 }; 1138 1139 private: 1140 ScheduleDAGMI *DAG; 1141 const TargetSchedModel *SchedModel; 1142 const TargetRegisterInfo *TRI; 1143 1144 // State of the top and bottom scheduled instruction boundaries. 1145 SchedRemainder Rem; 1146 SchedBoundary Top; 1147 SchedBoundary Bot; 1148 1149 public: 1150 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) 1151 enum { 1152 TopQID = 1, 1153 BotQID = 2, 1154 LogMaxQID = 2 1155 }; 1156 1157 ConvergingScheduler(): 1158 DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} 1159 1160 virtual void initialize(ScheduleDAGMI *dag); 1161 1162 virtual SUnit *pickNode(bool &IsTopNode); 1163 1164 virtual void schedNode(SUnit *SU, bool IsTopNode); 1165 1166 virtual void releaseTopNode(SUnit *SU); 1167 1168 virtual void releaseBottomNode(SUnit *SU); 1169 1170 virtual void registerRoots(); 1171 1172 protected: 1173 void balanceZones( 1174 ConvergingScheduler::SchedBoundary &CriticalZone, 1175 ConvergingScheduler::SchedCandidate &CriticalCand, 1176 ConvergingScheduler::SchedBoundary &OppositeZone, 1177 ConvergingScheduler::SchedCandidate &OppositeCand); 1178 1179 void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand, 1180 ConvergingScheduler::SchedCandidate &BotCand); 1181 1182 void tryCandidate(SchedCandidate &Cand, 1183 SchedCandidate &TryCand, 1184 SchedBoundary &Zone, 1185 const RegPressureTracker &RPTracker, 1186 RegPressureTracker &TempTracker); 1187 1188 SUnit *pickNodeBidirectional(bool &IsTopNode); 1189 1190 void pickNodeFromQueue(SchedBoundary &Zone, 1191 const RegPressureTracker &RPTracker, 1192 SchedCandidate &Candidate); 1193 1194 #ifndef NDEBUG 1195 void traceCandidate(const SchedCandidate &Cand, const SchedBoundary &Zone); 1196 #endif 1197 }; 1198 } // namespace 1199 1200 void ConvergingScheduler::SchedRemainder:: 1201 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { 1202 reset(); 1203 if (!SchedModel->hasInstrSchedModel()) 1204 return; 1205 RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); 1206 for (std::vector<SUnit>::iterator 1207 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { 1208 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); 1209 RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC); 1210 for (TargetSchedModel::ProcResIter 1211 PI = SchedModel->getWriteProcResBegin(SC), 1212 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1213 unsigned PIdx = PI->ProcResourceIdx; 1214 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1215 RemainingCounts[PIdx] += (Factor * PI->Cycles); 1216 } 1217 } 1218 for (unsigned PIdx = 0, PEnd = SchedModel->getNumProcResourceKinds(); 1219 PIdx != PEnd; ++PIdx) { 1220 if ((int)(RemainingCounts[PIdx] - RemainingCounts[CritResIdx]) 1221 >= (int)SchedModel->getLatencyFactor()) { 1222 CritResIdx = PIdx; 1223 } 1224 } 1225 } 1226 1227 void ConvergingScheduler::SchedBoundary:: 1228 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { 1229 reset(); 1230 DAG = dag; 1231 SchedModel = smodel; 1232 Rem = rem; 1233 if (SchedModel->hasInstrSchedModel()) 1234 ResourceCounts.resize(SchedModel->getNumProcResourceKinds()); 1235 } 1236 1237 void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { 1238 DAG = dag; 1239 SchedModel = DAG->getSchedModel(); 1240 TRI = DAG->TRI; 1241 1242 Rem.init(DAG, SchedModel); 1243 Top.init(DAG, SchedModel, &Rem); 1244 Bot.init(DAG, SchedModel, &Rem); 1245 1246 DAG->computeDFSResult(); 1247 1248 // Initialize resource counts. 1249 1250 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or 1251 // are disabled, then these HazardRecs will be disabled. 1252 const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); 1253 const TargetMachine &TM = DAG->MF.getTarget(); 1254 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1255 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); 1256 1257 assert((!ForceTopDown || !ForceBottomUp) && 1258 "-misched-topdown incompatible with -misched-bottomup"); 1259 } 1260 1261 void ConvergingScheduler::releaseTopNode(SUnit *SU) { 1262 if (SU->isScheduled) 1263 return; 1264 1265 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1266 I != E; ++I) { 1267 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; 1268 unsigned MinLatency = I->getMinLatency(); 1269 #ifndef NDEBUG 1270 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); 1271 #endif 1272 if (SU->TopReadyCycle < PredReadyCycle + MinLatency) 1273 SU->TopReadyCycle = PredReadyCycle + MinLatency; 1274 } 1275 Top.releaseNode(SU, SU->TopReadyCycle); 1276 } 1277 1278 void ConvergingScheduler::releaseBottomNode(SUnit *SU) { 1279 if (SU->isScheduled) 1280 return; 1281 1282 assert(SU->getInstr() && "Scheduled SUnit must have instr"); 1283 1284 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1285 I != E; ++I) { 1286 if (I->isWeak()) 1287 continue; 1288 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; 1289 unsigned MinLatency = I->getMinLatency(); 1290 #ifndef NDEBUG 1291 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); 1292 #endif 1293 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) 1294 SU->BotReadyCycle = SuccReadyCycle + MinLatency; 1295 } 1296 Bot.releaseNode(SU, SU->BotReadyCycle); 1297 } 1298 1299 void ConvergingScheduler::registerRoots() { 1300 Rem.CriticalPath = DAG->ExitSU.getDepth(); 1301 // Some roots may not feed into ExitSU. Check all of them in case. 1302 for (std::vector<SUnit*>::const_iterator 1303 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { 1304 if ((*I)->getDepth() > Rem.CriticalPath) 1305 Rem.CriticalPath = (*I)->getDepth(); 1306 } 1307 DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); 1308 } 1309 1310 /// Does this SU have a hazard within the current instruction group. 1311 /// 1312 /// The scheduler supports two modes of hazard recognition. The first is the 1313 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that 1314 /// supports highly complicated in-order reservation tables 1315 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic. 1316 /// 1317 /// The second is a streamlined mechanism that checks for hazards based on 1318 /// simple counters that the scheduler itself maintains. It explicitly checks 1319 /// for instruction dispatch limitations, including the number of micro-ops that 1320 /// can dispatch per cycle. 1321 /// 1322 /// TODO: Also check whether the SU must start a new group. 1323 bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { 1324 if (HazardRec->isEnabled()) 1325 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; 1326 1327 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); 1328 if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) { 1329 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" 1330 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); 1331 return true; 1332 } 1333 return false; 1334 } 1335 1336 /// Compute the remaining latency to determine whether ILP should be increased. 1337 void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) { 1338 // FIXME: compile time. In all, we visit four queues here one we should only 1339 // need to visit the one that was last popped if we cache the result. 1340 unsigned RemLatency = 0; 1341 for (ReadyQueue::iterator I = Available.begin(), E = Available.end(); 1342 I != E; ++I) { 1343 unsigned L = getUnscheduledLatency(*I); 1344 if (L > RemLatency) 1345 RemLatency = L; 1346 } 1347 for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end(); 1348 I != E; ++I) { 1349 unsigned L = getUnscheduledLatency(*I); 1350 if (L > RemLatency) 1351 RemLatency = L; 1352 } 1353 unsigned CriticalPathLimit = Rem->CriticalPath + SchedModel->getILPWindow(); 1354 if (RemLatency + ExpectedLatency >= CriticalPathLimit 1355 && RemLatency > Rem->getMaxRemainingCount(SchedModel)) { 1356 Policy.ReduceLatency = true; 1357 DEBUG(dbgs() << "Increase ILP: " << Available.getName() << '\n'); 1358 } 1359 } 1360 1361 void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, 1362 unsigned ReadyCycle) { 1363 1364 if (ReadyCycle < MinReadyCycle) 1365 MinReadyCycle = ReadyCycle; 1366 1367 // Check for interlocks first. For the purpose of other heuristics, an 1368 // instruction that cannot issue appears as if it's not in the ReadyQueue. 1369 if (ReadyCycle > CurrCycle || checkHazard(SU)) 1370 Pending.push(SU); 1371 else 1372 Available.push(SU); 1373 1374 // Record this node as an immediate dependent of the scheduled node. 1375 NextSUs.insert(SU); 1376 } 1377 1378 /// Move the boundary of scheduled code by one cycle. 1379 void ConvergingScheduler::SchedBoundary::bumpCycle() { 1380 unsigned Width = SchedModel->getIssueWidth(); 1381 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; 1382 1383 unsigned NextCycle = CurrCycle + 1; 1384 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); 1385 if (MinReadyCycle > NextCycle) { 1386 IssueCount = 0; 1387 NextCycle = MinReadyCycle; 1388 } 1389 1390 if (!HazardRec->isEnabled()) { 1391 // Bypass HazardRec virtual calls. 1392 CurrCycle = NextCycle; 1393 } 1394 else { 1395 // Bypass getHazardType calls in case of long latency. 1396 for (; CurrCycle != NextCycle; ++CurrCycle) { 1397 if (isTop()) 1398 HazardRec->AdvanceCycle(); 1399 else 1400 HazardRec->RecedeCycle(); 1401 } 1402 } 1403 CheckPending = true; 1404 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1405 1406 DEBUG(dbgs() << " *** " << Available.getName() << " cycle " 1407 << CurrCycle << '\n'); 1408 } 1409 1410 /// Add the given processor resource to this scheduled zone. 1411 void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx, 1412 unsigned Cycles) { 1413 unsigned Factor = SchedModel->getResourceFactor(PIdx); 1414 DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name 1415 << " +(" << Cycles << "x" << Factor 1416 << ") / " << SchedModel->getLatencyFactor() << '\n'); 1417 1418 unsigned Count = Factor * Cycles; 1419 ResourceCounts[PIdx] += Count; 1420 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); 1421 Rem->RemainingCounts[PIdx] -= Count; 1422 1423 // Check if this resource exceeds the current critical resource by a full 1424 // cycle. If so, it becomes the critical resource. 1425 if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) 1426 >= (int)SchedModel->getLatencyFactor()) { 1427 CritResIdx = PIdx; 1428 DEBUG(dbgs() << " *** Critical resource " 1429 << SchedModel->getProcResource(PIdx)->Name << " x" 1430 << ResourceCounts[PIdx] << '\n'); 1431 } 1432 } 1433 1434 /// Move the boundary of scheduled code by one SUnit. 1435 void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { 1436 // Update the reservation table. 1437 if (HazardRec->isEnabled()) { 1438 if (!isTop() && SU->isCall) { 1439 // Calls are scheduled with their preceding instructions. For bottom-up 1440 // scheduling, clear the pipeline state before emitting. 1441 HazardRec->Reset(); 1442 } 1443 HazardRec->EmitInstruction(SU); 1444 } 1445 // Update resource counts and critical resource. 1446 if (SchedModel->hasInstrSchedModel()) { 1447 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1448 Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC); 1449 for (TargetSchedModel::ProcResIter 1450 PI = SchedModel->getWriteProcResBegin(SC), 1451 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1452 countResource(PI->ProcResourceIdx, PI->Cycles); 1453 } 1454 } 1455 if (isTop()) { 1456 if (SU->getDepth() > ExpectedLatency) 1457 ExpectedLatency = SU->getDepth(); 1458 } 1459 else { 1460 if (SU->getHeight() > ExpectedLatency) 1461 ExpectedLatency = SU->getHeight(); 1462 } 1463 1464 IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); 1465 1466 // Check the instruction group dispatch limit. 1467 // TODO: Check if this SU must end a dispatch group. 1468 IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); 1469 1470 // checkHazard prevents scheduling multiple instructions per cycle that exceed 1471 // issue width. However, we commonly reach the maximum. In this case 1472 // opportunistically bump the cycle to avoid uselessly checking everything in 1473 // the readyQ. Furthermore, a single instruction may produce more than one 1474 // cycle's worth of micro-ops. 1475 if (IssueCount >= SchedModel->getIssueWidth()) { 1476 DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n'); 1477 bumpCycle(); 1478 } 1479 } 1480 1481 /// Release pending ready nodes in to the available queue. This makes them 1482 /// visible to heuristics. 1483 void ConvergingScheduler::SchedBoundary::releasePending() { 1484 // If the available queue is empty, it is safe to reset MinReadyCycle. 1485 if (Available.empty()) 1486 MinReadyCycle = UINT_MAX; 1487 1488 // Check to see if any of the pending instructions are ready to issue. If 1489 // so, add them to the available queue. 1490 for (unsigned i = 0, e = Pending.size(); i != e; ++i) { 1491 SUnit *SU = *(Pending.begin()+i); 1492 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; 1493 1494 if (ReadyCycle < MinReadyCycle) 1495 MinReadyCycle = ReadyCycle; 1496 1497 if (ReadyCycle > CurrCycle) 1498 continue; 1499 1500 if (checkHazard(SU)) 1501 continue; 1502 1503 Available.push(SU); 1504 Pending.remove(Pending.begin()+i); 1505 --i; --e; 1506 } 1507 DEBUG(if (!Pending.empty()) Pending.dump()); 1508 CheckPending = false; 1509 } 1510 1511 /// Remove SU from the ready set for this boundary. 1512 void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { 1513 if (Available.isInQueue(SU)) 1514 Available.remove(Available.find(SU)); 1515 else { 1516 assert(Pending.isInQueue(SU) && "bad ready count"); 1517 Pending.remove(Pending.find(SU)); 1518 } 1519 } 1520 1521 /// If this queue only has one ready candidate, return it. As a side effect, 1522 /// defer any nodes that now hit a hazard, and advance the cycle until at least 1523 /// one node is ready. If multiple instructions are ready, return NULL. 1524 SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { 1525 if (CheckPending) 1526 releasePending(); 1527 1528 if (IssueCount > 0) { 1529 // Defer any ready instrs that now have a hazard. 1530 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { 1531 if (checkHazard(*I)) { 1532 Pending.push(*I); 1533 I = Available.remove(I); 1534 continue; 1535 } 1536 ++I; 1537 } 1538 } 1539 for (unsigned i = 0; Available.empty(); ++i) { 1540 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && 1541 "permanent hazard"); (void)i; 1542 bumpCycle(); 1543 releasePending(); 1544 } 1545 if (Available.size() == 1) 1546 return *Available.begin(); 1547 return NULL; 1548 } 1549 1550 /// Record the candidate policy for opposite zones with different critical 1551 /// resources. 1552 /// 1553 /// If the CriticalZone is latency limited, don't force a policy for the 1554 /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed. 1555 void ConvergingScheduler::balanceZones( 1556 ConvergingScheduler::SchedBoundary &CriticalZone, 1557 ConvergingScheduler::SchedCandidate &CriticalCand, 1558 ConvergingScheduler::SchedBoundary &OppositeZone, 1559 ConvergingScheduler::SchedCandidate &OppositeCand) { 1560 1561 if (!CriticalZone.IsResourceLimited) 1562 return; 1563 assert(SchedModel->hasInstrSchedModel() && "required schedmodel"); 1564 1565 SchedRemainder *Rem = CriticalZone.Rem; 1566 1567 // If the critical zone is overconsuming a resource relative to the 1568 // remainder, try to reduce it. 1569 unsigned RemainingCritCount = 1570 Rem->RemainingCounts[CriticalZone.CritResIdx]; 1571 if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount) 1572 > (int)SchedModel->getLatencyFactor()) { 1573 CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; 1574 DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce " 1575 << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name 1576 << '\n'); 1577 } 1578 // If the other zone is underconsuming a resource relative to the full zone, 1579 // try to increase it. 1580 unsigned OppositeCount = 1581 OppositeZone.ResourceCounts[CriticalZone.CritResIdx]; 1582 if ((int)(OppositeZone.ExpectedCount - OppositeCount) 1583 > (int)SchedModel->getLatencyFactor()) { 1584 OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; 1585 DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand " 1586 << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name 1587 << '\n'); 1588 } 1589 } 1590 1591 /// Determine if the scheduled zones exceed resource limits or critical path and 1592 /// set each candidate's ReduceHeight policy accordingly. 1593 void ConvergingScheduler::checkResourceLimits( 1594 ConvergingScheduler::SchedCandidate &TopCand, 1595 ConvergingScheduler::SchedCandidate &BotCand) { 1596 1597 // Set ReduceLatency to true if needed. 1598 Bot.setLatencyPolicy(BotCand.Policy); 1599 Top.setLatencyPolicy(TopCand.Policy); 1600 1601 // Handle resource-limited regions. 1602 if (Top.IsResourceLimited && Bot.IsResourceLimited 1603 && Top.CritResIdx == Bot.CritResIdx) { 1604 // If the scheduled critical resource in both zones is no longer the 1605 // critical remaining resource, attempt to reduce resource height both ways. 1606 if (Top.CritResIdx != Rem.CritResIdx) { 1607 TopCand.Policy.ReduceResIdx = Top.CritResIdx; 1608 BotCand.Policy.ReduceResIdx = Bot.CritResIdx; 1609 DEBUG(dbgs() << "Reduce scheduled " 1610 << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); 1611 } 1612 return; 1613 } 1614 // Handle latency-limited regions. 1615 if (!Top.IsResourceLimited && !Bot.IsResourceLimited) { 1616 // If the total scheduled expected latency exceeds the region's critical 1617 // path then reduce latency both ways. 1618 // 1619 // Just because a zone is not resource limited does not mean it is latency 1620 // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle 1621 // to exceed expected latency. 1622 if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath) 1623 && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { 1624 TopCand.Policy.ReduceLatency = true; 1625 BotCand.Policy.ReduceLatency = true; 1626 DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency 1627 << " + " << Bot.ExpectedLatency << '\n'); 1628 } 1629 return; 1630 } 1631 // The critical resource is different in each zone, so request balancing. 1632 1633 // Compute the cost of each zone. 1634 Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); 1635 Top.ExpectedCount = std::max( 1636 Top.getCriticalCount(), 1637 Top.ExpectedCount * SchedModel->getLatencyFactor()); 1638 Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle); 1639 Bot.ExpectedCount = std::max( 1640 Bot.getCriticalCount(), 1641 Bot.ExpectedCount * SchedModel->getLatencyFactor()); 1642 1643 balanceZones(Top, TopCand, Bot, BotCand); 1644 balanceZones(Bot, BotCand, Top, TopCand); 1645 } 1646 1647 void ConvergingScheduler::SchedCandidate:: 1648 initResourceDelta(const ScheduleDAGMI *DAG, 1649 const TargetSchedModel *SchedModel) { 1650 if (!Policy.ReduceResIdx && !Policy.DemandResIdx) 1651 return; 1652 1653 const MCSchedClassDesc *SC = DAG->getSchedClass(SU); 1654 for (TargetSchedModel::ProcResIter 1655 PI = SchedModel->getWriteProcResBegin(SC), 1656 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { 1657 if (PI->ProcResourceIdx == Policy.ReduceResIdx) 1658 ResDelta.CritResources += PI->Cycles; 1659 if (PI->ProcResourceIdx == Policy.DemandResIdx) 1660 ResDelta.DemandedResources += PI->Cycles; 1661 } 1662 } 1663 1664 /// Return true if this heuristic determines order. 1665 static bool tryLess(unsigned TryVal, unsigned CandVal, 1666 ConvergingScheduler::SchedCandidate &TryCand, 1667 ConvergingScheduler::SchedCandidate &Cand, 1668 ConvergingScheduler::CandReason Reason) { 1669 if (TryVal < CandVal) { 1670 TryCand.Reason = Reason; 1671 return true; 1672 } 1673 if (TryVal > CandVal) { 1674 if (Cand.Reason > Reason) 1675 Cand.Reason = Reason; 1676 return true; 1677 } 1678 return false; 1679 } 1680 1681 static bool tryGreater(unsigned TryVal, unsigned CandVal, 1682 ConvergingScheduler::SchedCandidate &TryCand, 1683 ConvergingScheduler::SchedCandidate &Cand, 1684 ConvergingScheduler::CandReason Reason) { 1685 if (TryVal > CandVal) { 1686 TryCand.Reason = Reason; 1687 return true; 1688 } 1689 if (TryVal < CandVal) { 1690 if (Cand.Reason > Reason) 1691 Cand.Reason = Reason; 1692 return true; 1693 } 1694 return false; 1695 } 1696 1697 static unsigned getWeakLeft(const SUnit *SU, bool isTop) { 1698 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft; 1699 } 1700 1701 /// Apply a set of heursitics to a new candidate. Heuristics are currently 1702 /// hierarchical. This may be more efficient than a graduated cost model because 1703 /// we don't need to evaluate all aspects of the model for each node in the 1704 /// queue. But it's really done to make the heuristics easier to debug and 1705 /// statistically analyze. 1706 /// 1707 /// \param Cand provides the policy and current best candidate. 1708 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. 1709 /// \param Zone describes the scheduled zone that we are extending. 1710 /// \param RPTracker describes reg pressure within the scheduled zone. 1711 /// \param TempTracker is a scratch pressure tracker to reuse in queries. 1712 void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, 1713 SchedCandidate &TryCand, 1714 SchedBoundary &Zone, 1715 const RegPressureTracker &RPTracker, 1716 RegPressureTracker &TempTracker) { 1717 1718 // Always initialize TryCand's RPDelta. 1719 TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, 1720 DAG->getRegionCriticalPSets(), 1721 DAG->getRegPressure().MaxSetPressure); 1722 1723 // Initialize the candidate if needed. 1724 if (!Cand.isValid()) { 1725 TryCand.Reason = NodeOrder; 1726 return; 1727 } 1728 // Avoid exceeding the target's limit. 1729 if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, 1730 Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) 1731 return; 1732 if (Cand.Reason == SingleExcess) 1733 Cand.Reason = MultiPressure; 1734 1735 // Avoid increasing the max critical pressure in the scheduled region. 1736 if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, 1737 Cand.RPDelta.CriticalMax.UnitIncrease, 1738 TryCand, Cand, SingleCritical)) 1739 return; 1740 if (Cand.Reason == SingleCritical) 1741 Cand.Reason = MultiPressure; 1742 1743 // Keep clustered nodes together to encourage downstream peephole 1744 // optimizations which may reduce resource requirements. 1745 // 1746 // This is a best effort to set things up for a post-RA pass. Optimizations 1747 // like generating loads of multiple registers should ideally be done within 1748 // the scheduler pass by combining the loads during DAG postprocessing. 1749 const SUnit *NextClusterSU = 1750 Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred(); 1751 if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU, 1752 TryCand, Cand, Cluster)) 1753 return; 1754 // Currently, weak edges are for clustering, so we hard-code that reason. 1755 // However, deferring the current TryCand will not change Cand's reason. 1756 CandReason OrigReason = Cand.Reason; 1757 if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()), 1758 getWeakLeft(Cand.SU, Zone.isTop()), 1759 TryCand, Cand, Cluster)) { 1760 Cand.Reason = OrigReason; 1761 return; 1762 } 1763 // Avoid critical resource consumption and balance the schedule. 1764 TryCand.initResourceDelta(DAG, SchedModel); 1765 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, 1766 TryCand, Cand, ResourceReduce)) 1767 return; 1768 if (tryGreater(TryCand.ResDelta.DemandedResources, 1769 Cand.ResDelta.DemandedResources, 1770 TryCand, Cand, ResourceDemand)) 1771 return; 1772 1773 // Avoid serializing long latency dependence chains. 1774 if (Cand.Policy.ReduceLatency) { 1775 if (Zone.isTop()) { 1776 if (Cand.SU->getDepth() * SchedModel->getLatencyFactor() 1777 > Zone.ExpectedCount) { 1778 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), 1779 TryCand, Cand, TopDepthReduce)) 1780 return; 1781 } 1782 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), 1783 TryCand, Cand, TopPathReduce)) 1784 return; 1785 } 1786 else { 1787 if (Cand.SU->getHeight() * SchedModel->getLatencyFactor() 1788 > Zone.ExpectedCount) { 1789 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), 1790 TryCand, Cand, BotHeightReduce)) 1791 return; 1792 } 1793 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), 1794 TryCand, Cand, BotPathReduce)) 1795 return; 1796 } 1797 } 1798 1799 // Avoid increasing the max pressure of the entire region. 1800 if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, 1801 Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) 1802 return; 1803 if (Cand.Reason == SingleMax) 1804 Cand.Reason = MultiPressure; 1805 1806 // Prefer immediate defs/users of the last scheduled instruction. This is a 1807 // nice pressure avoidance strategy that also conserves the processor's 1808 // register renaming resources and keeps the machine code readable. 1809 if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU), 1810 TryCand, Cand, NextDefUse)) 1811 return; 1812 1813 // Fall through to original instruction order. 1814 if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) 1815 || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { 1816 TryCand.Reason = NodeOrder; 1817 } 1818 } 1819 1820 /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is 1821 /// more desirable than RHS from scheduling standpoint. 1822 static bool compareRPDelta(const RegPressureDelta &LHS, 1823 const RegPressureDelta &RHS) { 1824 // Compare each component of pressure in decreasing order of importance 1825 // without checking if any are valid. Invalid PressureElements are assumed to 1826 // have UnitIncrease==0, so are neutral. 1827 1828 // Avoid increasing the max critical pressure in the scheduled region. 1829 if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { 1830 DEBUG(dbgs() << "RP excess top - bot: " 1831 << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); 1832 return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; 1833 } 1834 // Avoid increasing the max critical pressure in the scheduled region. 1835 if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { 1836 DEBUG(dbgs() << "RP critical top - bot: " 1837 << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) 1838 << '\n'); 1839 return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; 1840 } 1841 // Avoid increasing the max pressure of the entire region. 1842 if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { 1843 DEBUG(dbgs() << "RP current top - bot: " 1844 << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) 1845 << '\n'); 1846 return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; 1847 } 1848 return false; 1849 } 1850 1851 #ifndef NDEBUG 1852 const char *ConvergingScheduler::getReasonStr( 1853 ConvergingScheduler::CandReason Reason) { 1854 switch (Reason) { 1855 case NoCand: return "NOCAND "; 1856 case SingleExcess: return "REG-EXCESS"; 1857 case SingleCritical: return "REG-CRIT "; 1858 case Cluster: return "CLUSTER "; 1859 case SingleMax: return "REG-MAX "; 1860 case MultiPressure: return "REG-MULTI "; 1861 case ResourceReduce: return "RES-REDUCE"; 1862 case ResourceDemand: return "RES-DEMAND"; 1863 case TopDepthReduce: return "TOP-DEPTH "; 1864 case TopPathReduce: return "TOP-PATH "; 1865 case BotHeightReduce:return "BOT-HEIGHT"; 1866 case BotPathReduce: return "BOT-PATH "; 1867 case NextDefUse: return "DEF-USE "; 1868 case NodeOrder: return "ORDER "; 1869 }; 1870 llvm_unreachable("Unknown reason!"); 1871 } 1872 1873 void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand, 1874 const SchedBoundary &Zone) { 1875 const char *Label = getReasonStr(Cand.Reason); 1876 PressureElement P; 1877 unsigned ResIdx = 0; 1878 unsigned Latency = 0; 1879 switch (Cand.Reason) { 1880 default: 1881 break; 1882 case SingleExcess: 1883 P = Cand.RPDelta.Excess; 1884 break; 1885 case SingleCritical: 1886 P = Cand.RPDelta.CriticalMax; 1887 break; 1888 case SingleMax: 1889 P = Cand.RPDelta.CurrentMax; 1890 break; 1891 case ResourceReduce: 1892 ResIdx = Cand.Policy.ReduceResIdx; 1893 break; 1894 case ResourceDemand: 1895 ResIdx = Cand.Policy.DemandResIdx; 1896 break; 1897 case TopDepthReduce: 1898 Latency = Cand.SU->getDepth(); 1899 break; 1900 case TopPathReduce: 1901 Latency = Cand.SU->getHeight(); 1902 break; 1903 case BotHeightReduce: 1904 Latency = Cand.SU->getHeight(); 1905 break; 1906 case BotPathReduce: 1907 Latency = Cand.SU->getDepth(); 1908 break; 1909 } 1910 dbgs() << Label << " " << Zone.Available.getName() << " "; 1911 if (P.isValid()) 1912 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease 1913 << " "; 1914 else 1915 dbgs() << " "; 1916 if (ResIdx) 1917 dbgs() << SchedModel->getProcResource(ResIdx)->Name << " "; 1918 else 1919 dbgs() << " "; 1920 if (Latency) 1921 dbgs() << Latency << " cycles "; 1922 else 1923 dbgs() << " "; 1924 Cand.SU->dump(DAG); 1925 } 1926 #endif 1927 1928 /// Pick the best candidate from the top queue. 1929 /// 1930 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during 1931 /// DAG building. To adjust for the current scheduling location we need to 1932 /// maintain the number of vreg uses remaining to be top-scheduled. 1933 void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, 1934 const RegPressureTracker &RPTracker, 1935 SchedCandidate &Cand) { 1936 ReadyQueue &Q = Zone.Available; 1937 1938 DEBUG(Q.dump()); 1939 1940 // getMaxPressureDelta temporarily modifies the tracker. 1941 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); 1942 1943 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { 1944 1945 SchedCandidate TryCand(Cand.Policy); 1946 TryCand.SU = *I; 1947 tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); 1948 if (TryCand.Reason != NoCand) { 1949 // Initialize resource delta if needed in case future heuristics query it. 1950 if (TryCand.ResDelta == SchedResourceDelta()) 1951 TryCand.initResourceDelta(DAG, SchedModel); 1952 Cand.setBest(TryCand); 1953 DEBUG(traceCandidate(Cand, Zone)); 1954 } 1955 } 1956 } 1957 1958 static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, 1959 bool IsTop) { 1960 DEBUG(dbgs() << "Pick " << (IsTop ? "top" : "bot") 1961 << " SU(" << Cand.SU->NodeNum << ") " 1962 << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); 1963 } 1964 1965 /// Pick the best candidate node from either the top or bottom queue. 1966 SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { 1967 // Schedule as far as possible in the direction of no choice. This is most 1968 // efficient, but also provides the best heuristics for CriticalPSets. 1969 if (SUnit *SU = Bot.pickOnlyChoice()) { 1970 IsTopNode = false; 1971 return SU; 1972 } 1973 if (SUnit *SU = Top.pickOnlyChoice()) { 1974 IsTopNode = true; 1975 return SU; 1976 } 1977 CandPolicy NoPolicy; 1978 SchedCandidate BotCand(NoPolicy); 1979 SchedCandidate TopCand(NoPolicy); 1980 checkResourceLimits(TopCand, BotCand); 1981 1982 // Prefer bottom scheduling when heuristics are silent. 1983 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 1984 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 1985 1986 // If either Q has a single candidate that provides the least increase in 1987 // Excess pressure, we can immediately schedule from that Q. 1988 // 1989 // RegionCriticalPSets summarizes the pressure within the scheduled region and 1990 // affects picking from either Q. If scheduling in one direction must 1991 // increase pressure for one of the excess PSets, then schedule in that 1992 // direction first to provide more freedom in the other direction. 1993 if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { 1994 IsTopNode = false; 1995 tracePick(BotCand, IsTopNode); 1996 return BotCand.SU; 1997 } 1998 // Check if the top Q has a better candidate. 1999 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2000 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2001 2002 // If either Q has a single candidate that minimizes pressure above the 2003 // original region's pressure pick it. 2004 if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { 2005 if (TopCand.Reason < BotCand.Reason) { 2006 IsTopNode = true; 2007 tracePick(TopCand, IsTopNode); 2008 return TopCand.SU; 2009 } 2010 IsTopNode = false; 2011 tracePick(BotCand, IsTopNode); 2012 return BotCand.SU; 2013 } 2014 // Check for a salient pressure difference and pick the best from either side. 2015 if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { 2016 IsTopNode = true; 2017 tracePick(TopCand, IsTopNode); 2018 return TopCand.SU; 2019 } 2020 // Otherwise prefer the bottom candidate, in node order if all else failed. 2021 if (TopCand.Reason < BotCand.Reason) { 2022 IsTopNode = true; 2023 tracePick(TopCand, IsTopNode); 2024 return TopCand.SU; 2025 } 2026 IsTopNode = false; 2027 tracePick(BotCand, IsTopNode); 2028 return BotCand.SU; 2029 } 2030 2031 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy. 2032 SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { 2033 if (DAG->top() == DAG->bottom()) { 2034 assert(Top.Available.empty() && Top.Pending.empty() && 2035 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); 2036 return NULL; 2037 } 2038 SUnit *SU; 2039 do { 2040 if (ForceTopDown) { 2041 SU = Top.pickOnlyChoice(); 2042 if (!SU) { 2043 CandPolicy NoPolicy; 2044 SchedCandidate TopCand(NoPolicy); 2045 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); 2046 assert(TopCand.Reason != NoCand && "failed to find the first candidate"); 2047 SU = TopCand.SU; 2048 } 2049 IsTopNode = true; 2050 } 2051 else if (ForceBottomUp) { 2052 SU = Bot.pickOnlyChoice(); 2053 if (!SU) { 2054 CandPolicy NoPolicy; 2055 SchedCandidate BotCand(NoPolicy); 2056 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); 2057 assert(BotCand.Reason != NoCand && "failed to find the first candidate"); 2058 SU = BotCand.SU; 2059 } 2060 IsTopNode = false; 2061 } 2062 else { 2063 SU = pickNodeBidirectional(IsTopNode); 2064 } 2065 } while (SU->isScheduled); 2066 2067 if (SU->isTopReady()) 2068 Top.removeReady(SU); 2069 if (SU->isBottomReady()) 2070 Bot.removeReady(SU); 2071 2072 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") 2073 << " Scheduling Instruction in cycle " 2074 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; 2075 SU->dump(DAG)); 2076 return SU; 2077 } 2078 2079 /// Update the scheduler's state after scheduling a node. This is the same node 2080 /// that was just returned by pickNode(). However, ScheduleDAGMI needs to update 2081 /// it's state based on the current cycle before MachineSchedStrategy does. 2082 void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) { 2083 if (IsTopNode) { 2084 SU->TopReadyCycle = Top.CurrCycle; 2085 Top.bumpNode(SU); 2086 } 2087 else { 2088 SU->BotReadyCycle = Bot.CurrCycle; 2089 Bot.bumpNode(SU); 2090 } 2091 } 2092 2093 /// Create the standard converging machine scheduler. This will be used as the 2094 /// default scheduler if the target does not set a default. 2095 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) { 2096 assert((!ForceTopDown || !ForceBottomUp) && 2097 "-misched-topdown incompatible with -misched-bottomup"); 2098 ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler()); 2099 // Register DAG post-processors. 2100 if (EnableLoadCluster) 2101 DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI)); 2102 if (EnableMacroFusion) 2103 DAG->addMutation(new MacroFusion(DAG->TII)); 2104 return DAG; 2105 } 2106 static MachineSchedRegistry 2107 ConvergingSchedRegistry("converge", "Standard converging scheduler.", 2108 createConvergingSched); 2109 2110 //===----------------------------------------------------------------------===// 2111 // ILP Scheduler. Currently for experimental analysis of heuristics. 2112 //===----------------------------------------------------------------------===// 2113 2114 namespace { 2115 /// \brief Order nodes by the ILP metric. 2116 struct ILPOrder { 2117 const SchedDFSResult *DFSResult; 2118 const BitVector *ScheduledTrees; 2119 bool MaximizeILP; 2120 2121 ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {} 2122 2123 /// \brief Apply a less-than relation on node priority. 2124 /// 2125 /// (Return true if A comes after B in the Q.) 2126 bool operator()(const SUnit *A, const SUnit *B) const { 2127 unsigned SchedTreeA = DFSResult->getSubtreeID(A); 2128 unsigned SchedTreeB = DFSResult->getSubtreeID(B); 2129 if (SchedTreeA != SchedTreeB) { 2130 // Unscheduled trees have lower priority. 2131 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) 2132 return ScheduledTrees->test(SchedTreeB); 2133 2134 // Trees with shallower connections have have lower priority. 2135 if (DFSResult->getSubtreeLevel(SchedTreeA) 2136 != DFSResult->getSubtreeLevel(SchedTreeB)) { 2137 return DFSResult->getSubtreeLevel(SchedTreeA) 2138 < DFSResult->getSubtreeLevel(SchedTreeB); 2139 } 2140 } 2141 if (MaximizeILP) 2142 return DFSResult->getILP(A) < DFSResult->getILP(B); 2143 else 2144 return DFSResult->getILP(A) > DFSResult->getILP(B); 2145 } 2146 }; 2147 2148 /// \brief Schedule based on the ILP metric. 2149 class ILPScheduler : public MachineSchedStrategy { 2150 /// In case all subtrees are eventually connected to a common root through 2151 /// data dependence (e.g. reduction), place an upper limit on their size. 2152 /// 2153 /// FIXME: A subtree limit is generally good, but in the situation commented 2154 /// above, where multiple similar subtrees feed a common root, we should 2155 /// only split at a point where the resulting subtrees will be balanced. 2156 /// (a motivating test case must be found). 2157 static const unsigned SubtreeLimit = 16; 2158 2159 ScheduleDAGMI *DAG; 2160 ILPOrder Cmp; 2161 2162 std::vector<SUnit*> ReadyQ; 2163 public: 2164 ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {} 2165 2166 virtual void initialize(ScheduleDAGMI *dag) { 2167 DAG = dag; 2168 DAG->computeDFSResult(); 2169 Cmp.DFSResult = DAG->getDFSResult(); 2170 Cmp.ScheduledTrees = &DAG->getScheduledTrees(); 2171 ReadyQ.clear(); 2172 } 2173 2174 virtual void registerRoots() { 2175 // Restore the heap in ReadyQ with the updated DFS results. 2176 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2177 } 2178 2179 /// Implement MachineSchedStrategy interface. 2180 /// ----------------------------------------- 2181 2182 /// Callback to select the highest priority node from the ready Q. 2183 virtual SUnit *pickNode(bool &IsTopNode) { 2184 if (ReadyQ.empty()) return NULL; 2185 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2186 SUnit *SU = ReadyQ.back(); 2187 ReadyQ.pop_back(); 2188 IsTopNode = false; 2189 DEBUG(dbgs() << "*** Scheduling " << "SU(" << SU->NodeNum << "): " 2190 << *SU->getInstr() 2191 << " ILP: " << DAG->getDFSResult()->getILP(SU) 2192 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @" 2193 << DAG->getDFSResult()->getSubtreeLevel( 2194 DAG->getDFSResult()->getSubtreeID(SU)) << '\n'); 2195 return SU; 2196 } 2197 2198 /// \brief Scheduler callback to notify that a new subtree is scheduled. 2199 virtual void scheduleTree(unsigned SubtreeID) { 2200 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2201 } 2202 2203 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify 2204 /// DFSResults, and resort the priority Q. 2205 virtual void schedNode(SUnit *SU, bool IsTopNode) { 2206 assert(!IsTopNode && "SchedDFSResult needs bottom-up"); 2207 } 2208 2209 virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } 2210 2211 virtual void releaseBottomNode(SUnit *SU) { 2212 ReadyQ.push_back(SU); 2213 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); 2214 } 2215 }; 2216 } // namespace 2217 2218 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { 2219 return new ScheduleDAGMI(C, new ILPScheduler(true)); 2220 } 2221 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { 2222 return new ScheduleDAGMI(C, new ILPScheduler(false)); 2223 } 2224 static MachineSchedRegistry ILPMaxRegistry( 2225 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); 2226 static MachineSchedRegistry ILPMinRegistry( 2227 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); 2228 2229 //===----------------------------------------------------------------------===// 2230 // Machine Instruction Shuffler for Correctness Testing 2231 //===----------------------------------------------------------------------===// 2232 2233 #ifndef NDEBUG 2234 namespace { 2235 /// Apply a less-than relation on the node order, which corresponds to the 2236 /// instruction order prior to scheduling. IsReverse implements greater-than. 2237 template<bool IsReverse> 2238 struct SUnitOrder { 2239 bool operator()(SUnit *A, SUnit *B) const { 2240 if (IsReverse) 2241 return A->NodeNum > B->NodeNum; 2242 else 2243 return A->NodeNum < B->NodeNum; 2244 } 2245 }; 2246 2247 /// Reorder instructions as much as possible. 2248 class InstructionShuffler : public MachineSchedStrategy { 2249 bool IsAlternating; 2250 bool IsTopDown; 2251 2252 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority 2253 // gives nodes with a higher number higher priority causing the latest 2254 // instructions to be scheduled first. 2255 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> > 2256 TopQ; 2257 // When scheduling bottom-up, use greater-than as the queue priority. 2258 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> > 2259 BottomQ; 2260 public: 2261 InstructionShuffler(bool alternate, bool topdown) 2262 : IsAlternating(alternate), IsTopDown(topdown) {} 2263 2264 virtual void initialize(ScheduleDAGMI *) { 2265 TopQ.clear(); 2266 BottomQ.clear(); 2267 } 2268 2269 /// Implement MachineSchedStrategy interface. 2270 /// ----------------------------------------- 2271 2272 virtual SUnit *pickNode(bool &IsTopNode) { 2273 SUnit *SU; 2274 if (IsTopDown) { 2275 do { 2276 if (TopQ.empty()) return NULL; 2277 SU = TopQ.top(); 2278 TopQ.pop(); 2279 } while (SU->isScheduled); 2280 IsTopNode = true; 2281 } 2282 else { 2283 do { 2284 if (BottomQ.empty()) return NULL; 2285 SU = BottomQ.top(); 2286 BottomQ.pop(); 2287 } while (SU->isScheduled); 2288 IsTopNode = false; 2289 } 2290 if (IsAlternating) 2291 IsTopDown = !IsTopDown; 2292 return SU; 2293 } 2294 2295 virtual void schedNode(SUnit *SU, bool IsTopNode) {} 2296 2297 virtual void releaseTopNode(SUnit *SU) { 2298 TopQ.push(SU); 2299 } 2300 virtual void releaseBottomNode(SUnit *SU) { 2301 BottomQ.push(SU); 2302 } 2303 }; 2304 } // namespace 2305 2306 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) { 2307 bool Alternate = !ForceTopDown && !ForceBottomUp; 2308 bool TopDown = !ForceBottomUp; 2309 assert((TopDown || !ForceTopDown) && 2310 "-misched-topdown incompatible with -misched-bottomup"); 2311 return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown)); 2312 } 2313 static MachineSchedRegistry ShufflerRegistry( 2314 "shuffle", "Shuffle machine instructions alternating directions", 2315 createInstructionShuffler); 2316 #endif // !NDEBUG 2317 2318 //===----------------------------------------------------------------------===// 2319 // GraphWriter support for ScheduleDAGMI. 2320 //===----------------------------------------------------------------------===// 2321 2322 #ifndef NDEBUG 2323 namespace llvm { 2324 2325 template<> struct GraphTraits< 2326 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {}; 2327 2328 template<> 2329 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits { 2330 2331 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} 2332 2333 static std::string getGraphName(const ScheduleDAG *G) { 2334 return G->MF.getName(); 2335 } 2336 2337 static bool renderGraphFromBottomUp() { 2338 return true; 2339 } 2340 2341 static bool isNodeHidden(const SUnit *Node) { 2342 return (Node->NumPreds > 10 || Node->NumSuccs > 10); 2343 } 2344 2345 static bool hasNodeAddressLabel(const SUnit *Node, 2346 const ScheduleDAG *Graph) { 2347 return false; 2348 } 2349 2350 /// If you want to override the dot attributes printed for a particular 2351 /// edge, override this method. 2352 static std::string getEdgeAttributes(const SUnit *Node, 2353 SUnitIterator EI, 2354 const ScheduleDAG *Graph) { 2355 if (EI.isArtificialDep()) 2356 return "color=cyan,style=dashed"; 2357 if (EI.isCtrlDep()) 2358 return "color=blue,style=dashed"; 2359 return ""; 2360 } 2361 2362 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) { 2363 std::string Str; 2364 raw_string_ostream SS(Str); 2365 SS << "SU(" << SU->NodeNum << ')'; 2366 return SS.str(); 2367 } 2368 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) { 2369 return G->getGraphNodeLabel(SU); 2370 } 2371 2372 static std::string getNodeAttributes(const SUnit *N, 2373 const ScheduleDAG *Graph) { 2374 std::string Str("shape=Mrecord"); 2375 const SchedDFSResult *DFS = 2376 static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult(); 2377 if (DFS) { 2378 Str += ",style=filled,fillcolor=\"#"; 2379 Str += DOT::getColorString(DFS->getSubtreeID(N)); 2380 Str += '"'; 2381 } 2382 return Str; 2383 } 2384 }; 2385 } // namespace llvm 2386 #endif // NDEBUG 2387 2388 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG 2389 /// rendered using 'dot'. 2390 /// 2391 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) { 2392 #ifndef NDEBUG 2393 ViewGraph(this, Name, false, Title); 2394 #else 2395 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on " 2396 << "systems with Graphviz or gv!\n"; 2397 #endif // NDEBUG 2398 } 2399 2400 /// Out-of-line implementation with no arguments is handy for gdb. 2401 void ScheduleDAGMI::viewGraph() { 2402 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName()); 2403 } 2404