Home | History | Annotate | Download | only in CodeGen
      1 //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This implements the ScheduleDAG class, which is a base class used by
     11 // scheduling implementation classes.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "llvm/CodeGen/ScheduleDAG.h"
     16 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
     17 #include "llvm/CodeGen/SelectionDAGNodes.h"
     18 #include "llvm/Support/CommandLine.h"
     19 #include "llvm/Support/Debug.h"
     20 #include "llvm/Support/raw_ostream.h"
     21 #include "llvm/Target/TargetInstrInfo.h"
     22 #include "llvm/Target/TargetMachine.h"
     23 #include "llvm/Target/TargetRegisterInfo.h"
     24 #include "llvm/Target/TargetSubtargetInfo.h"
     25 #include <climits>
     26 using namespace llvm;
     27 
     28 #define DEBUG_TYPE "pre-RA-sched"
     29 
     30 #ifndef NDEBUG
     31 static cl::opt<bool> StressSchedOpt(
     32   "stress-sched", cl::Hidden, cl::init(false),
     33   cl::desc("Stress test instruction scheduling"));
     34 #endif
     35 
     36 void SchedulingPriorityQueue::anchor() { }
     37 
     38 ScheduleDAG::ScheduleDAG(MachineFunction &mf)
     39     : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
     40       TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
     41       MRI(mf.getRegInfo()), EntrySU(), ExitSU() {
     42 #ifndef NDEBUG
     43   StressSched = StressSchedOpt;
     44 #endif
     45 }
     46 
     47 ScheduleDAG::~ScheduleDAG() {}
     48 
     49 /// Clear the DAG state (e.g. between scheduling regions).
     50 void ScheduleDAG::clearDAG() {
     51   SUnits.clear();
     52   EntrySU = SUnit();
     53   ExitSU = SUnit();
     54 }
     55 
     56 /// getInstrDesc helper to handle SDNodes.
     57 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
     58   if (!Node || !Node->isMachineOpcode()) return nullptr;
     59   return &TII->get(Node->getMachineOpcode());
     60 }
     61 
     62 /// addPred - This adds the specified edge as a pred of the current node if
     63 /// not already.  It also adds the current node as a successor of the
     64 /// specified node.
     65 bool SUnit::addPred(const SDep &D, bool Required) {
     66   // If this node already has this dependence, don't add a redundant one.
     67   for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
     68          I != E; ++I) {
     69     // Zero-latency weak edges may be added purely for heuristic ordering. Don't
     70     // add them if another kind of edge already exists.
     71     if (!Required && I->getSUnit() == D.getSUnit())
     72       return false;
     73     if (I->overlaps(D)) {
     74       // Extend the latency if needed. Equivalent to removePred(I) + addPred(D).
     75       if (I->getLatency() < D.getLatency()) {
     76         SUnit *PredSU = I->getSUnit();
     77         // Find the corresponding successor in N.
     78         SDep ForwardD = *I;
     79         ForwardD.setSUnit(this);
     80         for (SmallVectorImpl<SDep>::iterator II = PredSU->Succs.begin(),
     81                EE = PredSU->Succs.end(); II != EE; ++II) {
     82           if (*II == ForwardD) {
     83             II->setLatency(D.getLatency());
     84             break;
     85           }
     86         }
     87         I->setLatency(D.getLatency());
     88       }
     89       return false;
     90     }
     91   }
     92   // Now add a corresponding succ to N.
     93   SDep P = D;
     94   P.setSUnit(this);
     95   SUnit *N = D.getSUnit();
     96   // Update the bookkeeping.
     97   if (D.getKind() == SDep::Data) {
     98     assert(NumPreds < UINT_MAX && "NumPreds will overflow!");
     99     assert(N->NumSuccs < UINT_MAX && "NumSuccs will overflow!");
    100     ++NumPreds;
    101     ++N->NumSuccs;
    102   }
    103   if (!N->isScheduled) {
    104     if (D.isWeak()) {
    105       ++WeakPredsLeft;
    106     }
    107     else {
    108       assert(NumPredsLeft < UINT_MAX && "NumPredsLeft will overflow!");
    109       ++NumPredsLeft;
    110     }
    111   }
    112   if (!isScheduled) {
    113     if (D.isWeak()) {
    114       ++N->WeakSuccsLeft;
    115     }
    116     else {
    117       assert(N->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
    118       ++N->NumSuccsLeft;
    119     }
    120   }
    121   Preds.push_back(D);
    122   N->Succs.push_back(P);
    123   if (P.getLatency() != 0) {
    124     this->setDepthDirty();
    125     N->setHeightDirty();
    126   }
    127   return true;
    128 }
    129 
    130 /// removePred - This removes the specified edge as a pred of the current
    131 /// node if it exists.  It also removes the current node as a successor of
    132 /// the specified node.
    133 void SUnit::removePred(const SDep &D) {
    134   // Find the matching predecessor.
    135   for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
    136          I != E; ++I)
    137     if (*I == D) {
    138       // Find the corresponding successor in N.
    139       SDep P = D;
    140       P.setSUnit(this);
    141       SUnit *N = D.getSUnit();
    142       SmallVectorImpl<SDep>::iterator Succ = std::find(N->Succs.begin(),
    143                                                        N->Succs.end(), P);
    144       assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
    145       N->Succs.erase(Succ);
    146       Preds.erase(I);
    147       // Update the bookkeeping.
    148       if (P.getKind() == SDep::Data) {
    149         assert(NumPreds > 0 && "NumPreds will underflow!");
    150         assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
    151         --NumPreds;
    152         --N->NumSuccs;
    153       }
    154       if (!N->isScheduled) {
    155         if (D.isWeak())
    156           --WeakPredsLeft;
    157         else {
    158           assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
    159           --NumPredsLeft;
    160         }
    161       }
    162       if (!isScheduled) {
    163         if (D.isWeak())
    164           --N->WeakSuccsLeft;
    165         else {
    166           assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
    167           --N->NumSuccsLeft;
    168         }
    169       }
    170       if (P.getLatency() != 0) {
    171         this->setDepthDirty();
    172         N->setHeightDirty();
    173       }
    174       return;
    175     }
    176 }
    177 
    178 void SUnit::setDepthDirty() {
    179   if (!isDepthCurrent) return;
    180   SmallVector<SUnit*, 8> WorkList;
    181   WorkList.push_back(this);
    182   do {
    183     SUnit *SU = WorkList.pop_back_val();
    184     SU->isDepthCurrent = false;
    185     for (SUnit::const_succ_iterator I = SU->Succs.begin(),
    186          E = SU->Succs.end(); I != E; ++I) {
    187       SUnit *SuccSU = I->getSUnit();
    188       if (SuccSU->isDepthCurrent)
    189         WorkList.push_back(SuccSU);
    190     }
    191   } while (!WorkList.empty());
    192 }
    193 
    194 void SUnit::setHeightDirty() {
    195   if (!isHeightCurrent) return;
    196   SmallVector<SUnit*, 8> WorkList;
    197   WorkList.push_back(this);
    198   do {
    199     SUnit *SU = WorkList.pop_back_val();
    200     SU->isHeightCurrent = false;
    201     for (SUnit::const_pred_iterator I = SU->Preds.begin(),
    202          E = SU->Preds.end(); I != E; ++I) {
    203       SUnit *PredSU = I->getSUnit();
    204       if (PredSU->isHeightCurrent)
    205         WorkList.push_back(PredSU);
    206     }
    207   } while (!WorkList.empty());
    208 }
    209 
    210 /// setDepthToAtLeast - Update this node's successors to reflect the
    211 /// fact that this node's depth just increased.
    212 ///
    213 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
    214   if (NewDepth <= getDepth())
    215     return;
    216   setDepthDirty();
    217   Depth = NewDepth;
    218   isDepthCurrent = true;
    219 }
    220 
    221 /// setHeightToAtLeast - Update this node's predecessors to reflect the
    222 /// fact that this node's height just increased.
    223 ///
    224 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
    225   if (NewHeight <= getHeight())
    226     return;
    227   setHeightDirty();
    228   Height = NewHeight;
    229   isHeightCurrent = true;
    230 }
    231 
    232 /// ComputeDepth - Calculate the maximal path from the node to the exit.
    233 ///
    234 void SUnit::ComputeDepth() {
    235   SmallVector<SUnit*, 8> WorkList;
    236   WorkList.push_back(this);
    237   do {
    238     SUnit *Cur = WorkList.back();
    239 
    240     bool Done = true;
    241     unsigned MaxPredDepth = 0;
    242     for (SUnit::const_pred_iterator I = Cur->Preds.begin(),
    243          E = Cur->Preds.end(); I != E; ++I) {
    244       SUnit *PredSU = I->getSUnit();
    245       if (PredSU->isDepthCurrent)
    246         MaxPredDepth = std::max(MaxPredDepth,
    247                                 PredSU->Depth + I->getLatency());
    248       else {
    249         Done = false;
    250         WorkList.push_back(PredSU);
    251       }
    252     }
    253 
    254     if (Done) {
    255       WorkList.pop_back();
    256       if (MaxPredDepth != Cur->Depth) {
    257         Cur->setDepthDirty();
    258         Cur->Depth = MaxPredDepth;
    259       }
    260       Cur->isDepthCurrent = true;
    261     }
    262   } while (!WorkList.empty());
    263 }
    264 
    265 /// ComputeHeight - Calculate the maximal path from the node to the entry.
    266 ///
    267 void SUnit::ComputeHeight() {
    268   SmallVector<SUnit*, 8> WorkList;
    269   WorkList.push_back(this);
    270   do {
    271     SUnit *Cur = WorkList.back();
    272 
    273     bool Done = true;
    274     unsigned MaxSuccHeight = 0;
    275     for (SUnit::const_succ_iterator I = Cur->Succs.begin(),
    276          E = Cur->Succs.end(); I != E; ++I) {
    277       SUnit *SuccSU = I->getSUnit();
    278       if (SuccSU->isHeightCurrent)
    279         MaxSuccHeight = std::max(MaxSuccHeight,
    280                                  SuccSU->Height + I->getLatency());
    281       else {
    282         Done = false;
    283         WorkList.push_back(SuccSU);
    284       }
    285     }
    286 
    287     if (Done) {
    288       WorkList.pop_back();
    289       if (MaxSuccHeight != Cur->Height) {
    290         Cur->setHeightDirty();
    291         Cur->Height = MaxSuccHeight;
    292       }
    293       Cur->isHeightCurrent = true;
    294     }
    295   } while (!WorkList.empty());
    296 }
    297 
    298 void SUnit::biasCriticalPath() {
    299   if (NumPreds < 2)
    300     return;
    301 
    302   SUnit::pred_iterator BestI = Preds.begin();
    303   unsigned MaxDepth = BestI->getSUnit()->getDepth();
    304   for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
    305        ++I) {
    306     if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
    307       BestI = I;
    308   }
    309   if (BestI != Preds.begin())
    310     std::swap(*Preds.begin(), *BestI);
    311 }
    312 
    313 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    314 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
    315 /// a group of nodes flagged together.
    316 void SUnit::dump(const ScheduleDAG *G) const {
    317   dbgs() << "SU(" << NodeNum << "): ";
    318   G->dumpNode(this);
    319 }
    320 
    321 void SUnit::dumpAll(const ScheduleDAG *G) const {
    322   dump(G);
    323 
    324   dbgs() << "  # preds left       : " << NumPredsLeft << "\n";
    325   dbgs() << "  # succs left       : " << NumSuccsLeft << "\n";
    326   if (WeakPredsLeft)
    327     dbgs() << "  # weak preds left  : " << WeakPredsLeft << "\n";
    328   if (WeakSuccsLeft)
    329     dbgs() << "  # weak succs left  : " << WeakSuccsLeft << "\n";
    330   dbgs() << "  # rdefs left       : " << NumRegDefsLeft << "\n";
    331   dbgs() << "  Latency            : " << Latency << "\n";
    332   dbgs() << "  Depth              : " << getDepth() << "\n";
    333   dbgs() << "  Height             : " << getHeight() << "\n";
    334 
    335   if (Preds.size() != 0) {
    336     dbgs() << "  Predecessors:\n";
    337     for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
    338          I != E; ++I) {
    339       dbgs() << "   ";
    340       switch (I->getKind()) {
    341       case SDep::Data:        dbgs() << "val "; break;
    342       case SDep::Anti:        dbgs() << "anti"; break;
    343       case SDep::Output:      dbgs() << "out "; break;
    344       case SDep::Order:       dbgs() << "ch  "; break;
    345       }
    346       dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
    347       if (I->isArtificial())
    348         dbgs() << " *";
    349       dbgs() << ": Latency=" << I->getLatency();
    350       if (I->isAssignedRegDep())
    351         dbgs() << " Reg=" << PrintReg(I->getReg(), G->TRI);
    352       dbgs() << "\n";
    353     }
    354   }
    355   if (Succs.size() != 0) {
    356     dbgs() << "  Successors:\n";
    357     for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
    358          I != E; ++I) {
    359       dbgs() << "   ";
    360       switch (I->getKind()) {
    361       case SDep::Data:        dbgs() << "val "; break;
    362       case SDep::Anti:        dbgs() << "anti"; break;
    363       case SDep::Output:      dbgs() << "out "; break;
    364       case SDep::Order:       dbgs() << "ch  "; break;
    365       }
    366       dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
    367       if (I->isArtificial())
    368         dbgs() << " *";
    369       dbgs() << ": Latency=" << I->getLatency();
    370       if (I->isAssignedRegDep())
    371         dbgs() << " Reg=" << PrintReg(I->getReg(), G->TRI);
    372       dbgs() << "\n";
    373     }
    374   }
    375 }
    376 #endif
    377 
    378 #ifndef NDEBUG
    379 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
    380 /// their state is consistent. Return the number of scheduled nodes.
    381 ///
    382 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
    383   bool AnyNotSched = false;
    384   unsigned DeadNodes = 0;
    385   for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
    386     if (!SUnits[i].isScheduled) {
    387       if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
    388         ++DeadNodes;
    389         continue;
    390       }
    391       if (!AnyNotSched)
    392         dbgs() << "*** Scheduling failed! ***\n";
    393       SUnits[i].dump(this);
    394       dbgs() << "has not been scheduled!\n";
    395       AnyNotSched = true;
    396     }
    397     if (SUnits[i].isScheduled &&
    398         (isBottomUp ? SUnits[i].getHeight() : SUnits[i].getDepth()) >
    399           unsigned(INT_MAX)) {
    400       if (!AnyNotSched)
    401         dbgs() << "*** Scheduling failed! ***\n";
    402       SUnits[i].dump(this);
    403       dbgs() << "has an unexpected "
    404            << (isBottomUp ? "Height" : "Depth") << " value!\n";
    405       AnyNotSched = true;
    406     }
    407     if (isBottomUp) {
    408       if (SUnits[i].NumSuccsLeft != 0) {
    409         if (!AnyNotSched)
    410           dbgs() << "*** Scheduling failed! ***\n";
    411         SUnits[i].dump(this);
    412         dbgs() << "has successors left!\n";
    413         AnyNotSched = true;
    414       }
    415     } else {
    416       if (SUnits[i].NumPredsLeft != 0) {
    417         if (!AnyNotSched)
    418           dbgs() << "*** Scheduling failed! ***\n";
    419         SUnits[i].dump(this);
    420         dbgs() << "has predecessors left!\n";
    421         AnyNotSched = true;
    422       }
    423     }
    424   }
    425   assert(!AnyNotSched);
    426   return SUnits.size() - DeadNodes;
    427 }
    428 #endif
    429 
    430 /// InitDAGTopologicalSorting - create the initial topological
    431 /// ordering from the DAG to be scheduled.
    432 ///
    433 /// The idea of the algorithm is taken from
    434 /// "Online algorithms for managing the topological order of
    435 /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
    436 /// This is the MNR algorithm, which was first introduced by
    437 /// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
    438 /// "Maintaining a topological order under edge insertions".
    439 ///
    440 /// Short description of the algorithm:
    441 ///
    442 /// Topological ordering, ord, of a DAG maps each node to a topological
    443 /// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
    444 ///
    445 /// This means that if there is a path from the node X to the node Z,
    446 /// then ord(X) < ord(Z).
    447 ///
    448 /// This property can be used to check for reachability of nodes:
    449 /// if Z is reachable from X, then an insertion of the edge Z->X would
    450 /// create a cycle.
    451 ///
    452 /// The algorithm first computes a topological ordering for the DAG by
    453 /// initializing the Index2Node and Node2Index arrays and then tries to keep
    454 /// the ordering up-to-date after edge insertions by reordering the DAG.
    455 ///
    456 /// On insertion of the edge X->Y, the algorithm first marks by calling DFS
    457 /// the nodes reachable from Y, and then shifts them using Shift to lie
    458 /// immediately after X in Index2Node.
    459 void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
    460   unsigned DAGSize = SUnits.size();
    461   std::vector<SUnit*> WorkList;
    462   WorkList.reserve(DAGSize);
    463 
    464   Index2Node.resize(DAGSize);
    465   Node2Index.resize(DAGSize);
    466 
    467   // Initialize the data structures.
    468   if (ExitSU)
    469     WorkList.push_back(ExitSU);
    470   for (unsigned i = 0, e = DAGSize; i != e; ++i) {
    471     SUnit *SU = &SUnits[i];
    472     int NodeNum = SU->NodeNum;
    473     unsigned Degree = SU->Succs.size();
    474     // Temporarily use the Node2Index array as scratch space for degree counts.
    475     Node2Index[NodeNum] = Degree;
    476 
    477     // Is it a node without dependencies?
    478     if (Degree == 0) {
    479       assert(SU->Succs.empty() && "SUnit should have no successors");
    480       // Collect leaf nodes.
    481       WorkList.push_back(SU);
    482     }
    483   }
    484 
    485   int Id = DAGSize;
    486   while (!WorkList.empty()) {
    487     SUnit *SU = WorkList.back();
    488     WorkList.pop_back();
    489     if (SU->NodeNum < DAGSize)
    490       Allocate(SU->NodeNum, --Id);
    491     for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
    492          I != E; ++I) {
    493       SUnit *SU = I->getSUnit();
    494       if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
    495         // If all dependencies of the node are processed already,
    496         // then the node can be computed now.
    497         WorkList.push_back(SU);
    498     }
    499   }
    500 
    501   Visited.resize(DAGSize);
    502 
    503 #ifndef NDEBUG
    504   // Check correctness of the ordering
    505   for (unsigned i = 0, e = DAGSize; i != e; ++i) {
    506     SUnit *SU = &SUnits[i];
    507     for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
    508          I != E; ++I) {
    509       assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
    510       "Wrong topological sorting");
    511     }
    512   }
    513 #endif
    514 }
    515 
    516 /// AddPred - Updates the topological ordering to accommodate an edge
    517 /// to be added from SUnit X to SUnit Y.
    518 void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
    519   int UpperBound, LowerBound;
    520   LowerBound = Node2Index[Y->NodeNum];
    521   UpperBound = Node2Index[X->NodeNum];
    522   bool HasLoop = false;
    523   // Is Ord(X) < Ord(Y) ?
    524   if (LowerBound < UpperBound) {
    525     // Update the topological order.
    526     Visited.reset();
    527     DFS(Y, UpperBound, HasLoop);
    528     assert(!HasLoop && "Inserted edge creates a loop!");
    529     // Recompute topological indexes.
    530     Shift(Visited, LowerBound, UpperBound);
    531   }
    532 }
    533 
    534 /// RemovePred - Updates the topological ordering to accommodate an
    535 /// an edge to be removed from the specified node N from the predecessors
    536 /// of the current node M.
    537 void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
    538   // InitDAGTopologicalSorting();
    539 }
    540 
    541 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
    542 /// all nodes affected by the edge insertion. These nodes will later get new
    543 /// topological indexes by means of the Shift method.
    544 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
    545                                      bool &HasLoop) {
    546   std::vector<const SUnit*> WorkList;
    547   WorkList.reserve(SUnits.size());
    548 
    549   WorkList.push_back(SU);
    550   do {
    551     SU = WorkList.back();
    552     WorkList.pop_back();
    553     Visited.set(SU->NodeNum);
    554     for (int I = SU->Succs.size()-1; I >= 0; --I) {
    555       unsigned s = SU->Succs[I].getSUnit()->NodeNum;
    556       // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
    557       if (s >= Node2Index.size())
    558         continue;
    559       if (Node2Index[s] == UpperBound) {
    560         HasLoop = true;
    561         return;
    562       }
    563       // Visit successors if not already and in affected region.
    564       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
    565         WorkList.push_back(SU->Succs[I].getSUnit());
    566       }
    567     }
    568   } while (!WorkList.empty());
    569 }
    570 
    571 /// Shift - Renumber the nodes so that the topological ordering is
    572 /// preserved.
    573 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
    574                                        int UpperBound) {
    575   std::vector<int> L;
    576   int shift = 0;
    577   int i;
    578 
    579   for (i = LowerBound; i <= UpperBound; ++i) {
    580     // w is node at topological index i.
    581     int w = Index2Node[i];
    582     if (Visited.test(w)) {
    583       // Unmark.
    584       Visited.reset(w);
    585       L.push_back(w);
    586       shift = shift + 1;
    587     } else {
    588       Allocate(w, i - shift);
    589     }
    590   }
    591 
    592   for (unsigned j = 0; j < L.size(); ++j) {
    593     Allocate(L[j], i - shift);
    594     i = i + 1;
    595   }
    596 }
    597 
    598 
    599 /// WillCreateCycle - Returns true if adding an edge to TargetSU from SU will
    600 /// create a cycle. If so, it is not safe to call AddPred(TargetSU, SU).
    601 bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
    602   // Is SU reachable from TargetSU via successor edges?
    603   if (IsReachable(SU, TargetSU))
    604     return true;
    605   for (SUnit::pred_iterator
    606          I = TargetSU->Preds.begin(), E = TargetSU->Preds.end(); I != E; ++I)
    607     if (I->isAssignedRegDep() &&
    608         IsReachable(SU, I->getSUnit()))
    609       return true;
    610   return false;
    611 }
    612 
    613 /// IsReachable - Checks if SU is reachable from TargetSU.
    614 bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
    615                                              const SUnit *TargetSU) {
    616   // If insertion of the edge SU->TargetSU would create a cycle
    617   // then there is a path from TargetSU to SU.
    618   int UpperBound, LowerBound;
    619   LowerBound = Node2Index[TargetSU->NodeNum];
    620   UpperBound = Node2Index[SU->NodeNum];
    621   bool HasLoop = false;
    622   // Is Ord(TargetSU) < Ord(SU) ?
    623   if (LowerBound < UpperBound) {
    624     Visited.reset();
    625     // There may be a path from TargetSU to SU. Check for it.
    626     DFS(TargetSU, UpperBound, HasLoop);
    627   }
    628   return HasLoop;
    629 }
    630 
    631 /// Allocate - assign the topological index to the node n.
    632 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
    633   Node2Index[n] = index;
    634   Index2Node[index] = n;
    635 }
    636 
    637 ScheduleDAGTopologicalSort::
    638 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
    639   : SUnits(sunits), ExitSU(exitsu) {}
    640 
    641 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() {}
    642