1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the ScheduleDAG class, which is used as the common 11 // base class for instruction schedulers. This encapsulates the scheduling DAG, 12 // which is shared between SelectionDAG and MachineInstr scheduling. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H 17 #define LLVM_CODEGEN_SCHEDULEDAG_H 18 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/GraphTraits.h" 21 #include "llvm/ADT/PointerIntPair.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/Target/TargetLowering.h" 25 26 namespace llvm { 27 class AliasAnalysis; 28 class SUnit; 29 class MachineConstantPool; 30 class MachineFunction; 31 class MachineRegisterInfo; 32 class MachineInstr; 33 struct MCSchedClassDesc; 34 class TargetRegisterInfo; 35 class ScheduleDAG; 36 class SDNode; 37 class TargetInstrInfo; 38 class MCInstrDesc; 39 class TargetMachine; 40 class TargetRegisterClass; 41 template<class Graph> class GraphWriter; 42 43 /// SDep - Scheduling dependency. This represents one direction of an 44 /// edge in the scheduling DAG. 45 class SDep { 46 public: 47 /// Kind - These are the different kinds of scheduling dependencies. 48 enum Kind { 49 Data, ///< Regular data dependence (aka true-dependence). 50 Anti, ///< A register anti-dependedence (aka WAR). 51 Output, ///< A register output-dependence (aka WAW). 52 Order ///< Any other ordering dependency. 53 }; 54 55 // Strong dependencies must be respected by the scheduler. Artificial 56 // dependencies may be removed only if they are redundant with another 57 // strong depedence. 58 // 59 // Weak dependencies may be violated by the scheduling strategy, but only if 60 // the strategy can prove it is correct to do so. 61 // 62 // Strong OrderKinds must occur before "Weak". 63 // Weak OrderKinds must occur after "Weak". 64 enum OrderKind { 65 Barrier, ///< An unknown scheduling barrier. 66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias. 67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias. 68 Artificial, ///< Arbitrary strong DAG edge (no real dependence). 69 Weak, ///< Arbitrary weak DAG edge. 70 Cluster ///< Weak DAG edge linking a chain of clustered instrs. 71 }; 72 73 private: 74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum 75 /// indicating the kind of the dependency. 76 PointerIntPair<SUnit *, 2, Kind> Dep; 77 78 /// Contents - A union discriminated by the dependence kind. 79 union { 80 /// Reg - For Data, Anti, and Output dependencies, the associated 81 /// register. For Data dependencies that don't currently have a register 82 /// assigned, this is set to zero. 83 unsigned Reg; 84 85 /// Order - Additional information about Order dependencies. 86 unsigned OrdKind; // enum OrderKind 87 } Contents; 88 89 /// Latency - The time associated with this edge. Often this is just 90 /// the value of the Latency field of the predecessor, however advanced 91 /// models may provide additional information about specific edges. 92 unsigned Latency; 93 94 public: 95 /// SDep - Construct a null SDep. This is only for use by container 96 /// classes which require default constructors. SUnits may not 97 /// have null SDep edges. 98 SDep() : Dep(0, Data) {} 99 100 /// SDep - Construct an SDep with the specified values. 101 SDep(SUnit *S, Kind kind, unsigned Reg) 102 : Dep(S, kind), Contents() { 103 switch (kind) { 104 default: 105 llvm_unreachable("Reg given for non-register dependence!"); 106 case Anti: 107 case Output: 108 assert(Reg != 0 && 109 "SDep::Anti and SDep::Output must use a non-zero Reg!"); 110 Contents.Reg = Reg; 111 Latency = 0; 112 break; 113 case Data: 114 Contents.Reg = Reg; 115 Latency = 1; 116 break; 117 } 118 } 119 SDep(SUnit *S, OrderKind kind) 120 : Dep(S, Order), Contents(), Latency(0) { 121 Contents.OrdKind = kind; 122 } 123 124 /// Return true if the specified SDep is equivalent except for latency. 125 bool overlaps(const SDep &Other) const { 126 if (Dep != Other.Dep) return false; 127 switch (Dep.getInt()) { 128 case Data: 129 case Anti: 130 case Output: 131 return Contents.Reg == Other.Contents.Reg; 132 case Order: 133 return Contents.OrdKind == Other.Contents.OrdKind; 134 } 135 llvm_unreachable("Invalid dependency kind!"); 136 } 137 138 bool operator==(const SDep &Other) const { 139 return overlaps(Other) && Latency == Other.Latency; 140 } 141 142 bool operator!=(const SDep &Other) const { 143 return !operator==(Other); 144 } 145 146 /// getLatency - Return the latency value for this edge, which roughly 147 /// means the minimum number of cycles that must elapse between the 148 /// predecessor and the successor, given that they have this edge 149 /// between them. 150 unsigned getLatency() const { 151 return Latency; 152 } 153 154 /// setLatency - Set the latency for this edge. 155 void setLatency(unsigned Lat) { 156 Latency = Lat; 157 } 158 159 //// getSUnit - Return the SUnit to which this edge points. 160 SUnit *getSUnit() const { 161 return Dep.getPointer(); 162 } 163 164 //// setSUnit - Assign the SUnit to which this edge points. 165 void setSUnit(SUnit *SU) { 166 Dep.setPointer(SU); 167 } 168 169 /// getKind - Return an enum value representing the kind of the dependence. 170 Kind getKind() const { 171 return Dep.getInt(); 172 } 173 174 /// isCtrl - Shorthand for getKind() != SDep::Data. 175 bool isCtrl() const { 176 return getKind() != Data; 177 } 178 179 /// isNormalMemory - Test if this is an Order dependence between two 180 /// memory accesses where both sides of the dependence access memory 181 /// in non-volatile and fully modeled ways. 182 bool isNormalMemory() const { 183 return getKind() == Order && (Contents.OrdKind == MayAliasMem 184 || Contents.OrdKind == MustAliasMem); 185 } 186 187 /// isMustAlias - Test if this is an Order dependence that is marked 188 /// as "must alias", meaning that the SUnits at either end of the edge 189 /// have a memory dependence on a known memory location. 190 bool isMustAlias() const { 191 return getKind() == Order && Contents.OrdKind == MustAliasMem; 192 } 193 194 /// isWeak - Test if this a weak dependence. Weak dependencies are 195 /// considered DAG edges for height computation and other heuristics, but do 196 /// not force ordering. Breaking a weak edge may require the scheduler to 197 /// compensate, for example by inserting a copy. 198 bool isWeak() const { 199 return getKind() == Order && Contents.OrdKind >= Weak; 200 } 201 202 /// isArtificial - Test if this is an Order dependence that is marked 203 /// as "artificial", meaning it isn't necessary for correctness. 204 bool isArtificial() const { 205 return getKind() == Order && Contents.OrdKind == Artificial; 206 } 207 208 /// isCluster - Test if this is an Order dependence that is marked 209 /// as "cluster", meaning it is artificial and wants to be adjacent. 210 bool isCluster() const { 211 return getKind() == Order && Contents.OrdKind == Cluster; 212 } 213 214 /// isAssignedRegDep - Test if this is a Data dependence that is 215 /// associated with a register. 216 bool isAssignedRegDep() const { 217 return getKind() == Data && Contents.Reg != 0; 218 } 219 220 /// getReg - Return the register associated with this edge. This is 221 /// only valid on Data, Anti, and Output edges. On Data edges, this 222 /// value may be zero, meaning there is no associated register. 223 unsigned getReg() const { 224 assert((getKind() == Data || getKind() == Anti || getKind() == Output) && 225 "getReg called on non-register dependence edge!"); 226 return Contents.Reg; 227 } 228 229 /// setReg - Assign the associated register for this edge. This is 230 /// only valid on Data, Anti, and Output edges. On Anti and Output 231 /// edges, this value must not be zero. On Data edges, the value may 232 /// be zero, which would mean that no specific register is associated 233 /// with this edge. 234 void setReg(unsigned Reg) { 235 assert((getKind() == Data || getKind() == Anti || getKind() == Output) && 236 "setReg called on non-register dependence edge!"); 237 assert((getKind() != Anti || Reg != 0) && 238 "SDep::Anti edge cannot use the zero register!"); 239 assert((getKind() != Output || Reg != 0) && 240 "SDep::Output edge cannot use the zero register!"); 241 Contents.Reg = Reg; 242 } 243 }; 244 245 template <> 246 struct isPodLike<SDep> { static const bool value = true; }; 247 248 /// SUnit - Scheduling unit. This is a node in the scheduling DAG. 249 class SUnit { 250 private: 251 enum { BoundaryID = ~0u }; 252 253 SDNode *Node; // Representative node. 254 MachineInstr *Instr; // Alternatively, a MachineInstr. 255 public: 256 SUnit *OrigNode; // If not this, the node from which 257 // this node was cloned. 258 // (SD scheduling only) 259 260 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass. 261 262 // Preds/Succs - The SUnits before/after us in the graph. 263 SmallVector<SDep, 4> Preds; // All sunit predecessors. 264 SmallVector<SDep, 4> Succs; // All sunit successors. 265 266 typedef SmallVectorImpl<SDep>::iterator pred_iterator; 267 typedef SmallVectorImpl<SDep>::iterator succ_iterator; 268 typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator; 269 typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator; 270 271 unsigned NodeNum; // Entry # of node in the node vector. 272 unsigned NodeQueueId; // Queue id of node. 273 unsigned NumPreds; // # of SDep::Data preds. 274 unsigned NumSuccs; // # of SDep::Data sucss. 275 unsigned NumPredsLeft; // # of preds not scheduled. 276 unsigned NumSuccsLeft; // # of succs not scheduled. 277 unsigned WeakPredsLeft; // # of weak preds not scheduled. 278 unsigned WeakSuccsLeft; // # of weak succs not scheduled. 279 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use. 280 unsigned short Latency; // Node latency. 281 bool isVRegCycle : 1; // May use and def the same vreg. 282 bool isCall : 1; // Is a function call. 283 bool isCallOp : 1; // Is a function call operand. 284 bool isTwoAddress : 1; // Is a two-address instruction. 285 bool isCommutable : 1; // Is a commutable instruction. 286 bool hasPhysRegUses : 1; // Has physreg uses. 287 bool hasPhysRegDefs : 1; // Has physreg defs that are being used. 288 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not. 289 bool isPending : 1; // True once pending. 290 bool isAvailable : 1; // True once available. 291 bool isScheduled : 1; // True once scheduled. 292 bool isScheduleHigh : 1; // True if preferable to schedule high. 293 bool isScheduleLow : 1; // True if preferable to schedule low. 294 bool isCloned : 1; // True if this node has been cloned. 295 Sched::Preference SchedulingPref; // Scheduling preference. 296 297 private: 298 bool isDepthCurrent : 1; // True if Depth is current. 299 bool isHeightCurrent : 1; // True if Height is current. 300 unsigned Depth; // Node depth. 301 unsigned Height; // Node height. 302 public: 303 unsigned TopReadyCycle; // Cycle relative to start when node is ready. 304 unsigned BotReadyCycle; // Cycle relative to end when node is ready. 305 306 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null. 307 const TargetRegisterClass *CopySrcRC; 308 309 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent 310 /// an SDNode and any nodes flagged to it. 311 SUnit(SDNode *node, unsigned nodenum) 312 : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum), 313 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), 314 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0), 315 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), 316 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false), 317 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false), 318 isAvailable(false), isScheduled(false), isScheduleHigh(false), 319 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None), 320 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), 321 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {} 322 323 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent 324 /// a MachineInstr. 325 SUnit(MachineInstr *instr, unsigned nodenum) 326 : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum), 327 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), 328 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0), 329 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), 330 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false), 331 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false), 332 isAvailable(false), isScheduled(false), isScheduleHigh(false), 333 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None), 334 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), 335 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {} 336 337 /// SUnit - Construct a placeholder SUnit. 338 SUnit() 339 : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID), 340 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), 341 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0), 342 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), 343 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false), 344 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false), 345 isAvailable(false), isScheduled(false), isScheduleHigh(false), 346 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None), 347 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0), 348 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {} 349 350 /// \brief Boundary nodes are placeholders for the boundary of the 351 /// scheduling region. 352 /// 353 /// BoundaryNodes can have DAG edges, including Data edges, but they do not 354 /// correspond to schedulable entities (e.g. instructions) and do not have a 355 /// valid ID. Consequently, always check for boundary nodes before accessing 356 /// an assoicative data structure keyed on node ID. 357 bool isBoundaryNode() const { return NodeNum == BoundaryID; }; 358 359 /// setNode - Assign the representative SDNode for this SUnit. 360 /// This may be used during pre-regalloc scheduling. 361 void setNode(SDNode *N) { 362 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!"); 363 Node = N; 364 } 365 366 /// getNode - Return the representative SDNode for this SUnit. 367 /// This may be used during pre-regalloc scheduling. 368 SDNode *getNode() const { 369 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!"); 370 return Node; 371 } 372 373 /// isInstr - Return true if this SUnit refers to a machine instruction as 374 /// opposed to an SDNode. 375 bool isInstr() const { return Instr; } 376 377 /// setInstr - Assign the instruction for the SUnit. 378 /// This may be used during post-regalloc scheduling. 379 void setInstr(MachineInstr *MI) { 380 assert(!Node && "Setting MachineInstr of SUnit with SDNode!"); 381 Instr = MI; 382 } 383 384 /// getInstr - Return the representative MachineInstr for this SUnit. 385 /// This may be used during post-regalloc scheduling. 386 MachineInstr *getInstr() const { 387 assert(!Node && "Reading MachineInstr of SUnit with SDNode!"); 388 return Instr; 389 } 390 391 /// addPred - This adds the specified edge as a pred of the current node if 392 /// not already. It also adds the current node as a successor of the 393 /// specified node. 394 bool addPred(const SDep &D, bool Required = true); 395 396 /// removePred - This removes the specified edge as a pred of the current 397 /// node if it exists. It also removes the current node as a successor of 398 /// the specified node. 399 void removePred(const SDep &D); 400 401 /// getDepth - Return the depth of this node, which is the length of the 402 /// maximum path up to any node which has no predecessors. 403 unsigned getDepth() const { 404 if (!isDepthCurrent) 405 const_cast<SUnit *>(this)->ComputeDepth(); 406 return Depth; 407 } 408 409 /// getHeight - Return the height of this node, which is the length of the 410 /// maximum path down to any node which has no successors. 411 unsigned getHeight() const { 412 if (!isHeightCurrent) 413 const_cast<SUnit *>(this)->ComputeHeight(); 414 return Height; 415 } 416 417 /// setDepthToAtLeast - If NewDepth is greater than this node's 418 /// depth value, set it to be the new depth value. This also 419 /// recursively marks successor nodes dirty. 420 void setDepthToAtLeast(unsigned NewDepth); 421 422 /// setDepthToAtLeast - If NewDepth is greater than this node's 423 /// depth value, set it to be the new height value. This also 424 /// recursively marks predecessor nodes dirty. 425 void setHeightToAtLeast(unsigned NewHeight); 426 427 /// setDepthDirty - Set a flag in this node to indicate that its 428 /// stored Depth value will require recomputation the next time 429 /// getDepth() is called. 430 void setDepthDirty(); 431 432 /// setHeightDirty - Set a flag in this node to indicate that its 433 /// stored Height value will require recomputation the next time 434 /// getHeight() is called. 435 void setHeightDirty(); 436 437 /// isPred - Test if node N is a predecessor of this node. 438 bool isPred(SUnit *N) { 439 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i) 440 if (Preds[i].getSUnit() == N) 441 return true; 442 return false; 443 } 444 445 /// isSucc - Test if node N is a successor of this node. 446 bool isSucc(SUnit *N) { 447 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i) 448 if (Succs[i].getSUnit() == N) 449 return true; 450 return false; 451 } 452 453 bool isTopReady() const { 454 return NumPredsLeft == 0; 455 } 456 bool isBottomReady() const { 457 return NumSuccsLeft == 0; 458 } 459 460 /// \brief Order this node's predecessor edges such that the critical path 461 /// edge occurs first. 462 void biasCriticalPath(); 463 464 void dump(const ScheduleDAG *G) const; 465 void dumpAll(const ScheduleDAG *G) const; 466 void print(raw_ostream &O, const ScheduleDAG *G) const; 467 468 private: 469 void ComputeDepth(); 470 void ComputeHeight(); 471 }; 472 473 //===--------------------------------------------------------------------===// 474 /// SchedulingPriorityQueue - This interface is used to plug different 475 /// priorities computation algorithms into the list scheduler. It implements 476 /// the interface of a standard priority queue, where nodes are inserted in 477 /// arbitrary order and returned in priority order. The computation of the 478 /// priority and the representation of the queue are totally up to the 479 /// implementation to decide. 480 /// 481 class SchedulingPriorityQueue { 482 virtual void anchor(); 483 unsigned CurCycle; 484 bool HasReadyFilter; 485 public: 486 SchedulingPriorityQueue(bool rf = false): 487 CurCycle(0), HasReadyFilter(rf) {} 488 virtual ~SchedulingPriorityQueue() {} 489 490 virtual bool isBottomUp() const = 0; 491 492 virtual void initNodes(std::vector<SUnit> &SUnits) = 0; 493 virtual void addNode(const SUnit *SU) = 0; 494 virtual void updateNode(const SUnit *SU) = 0; 495 virtual void releaseState() = 0; 496 497 virtual bool empty() const = 0; 498 499 bool hasReadyFilter() const { return HasReadyFilter; } 500 501 virtual bool tracksRegPressure() const { return false; } 502 503 virtual bool isReady(SUnit *) const { 504 assert(!HasReadyFilter && "The ready filter must override isReady()"); 505 return true; 506 } 507 virtual void push(SUnit *U) = 0; 508 509 void push_all(const std::vector<SUnit *> &Nodes) { 510 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(), 511 E = Nodes.end(); I != E; ++I) 512 push(*I); 513 } 514 515 virtual SUnit *pop() = 0; 516 517 virtual void remove(SUnit *SU) = 0; 518 519 virtual void dump(ScheduleDAG *) const {} 520 521 /// scheduledNode - As each node is scheduled, this method is invoked. This 522 /// allows the priority function to adjust the priority of related 523 /// unscheduled nodes, for example. 524 /// 525 virtual void scheduledNode(SUnit *) {} 526 527 virtual void unscheduledNode(SUnit *) {} 528 529 void setCurCycle(unsigned Cycle) { 530 CurCycle = Cycle; 531 } 532 533 unsigned getCurCycle() const { 534 return CurCycle; 535 } 536 }; 537 538 class ScheduleDAG { 539 public: 540 const TargetMachine &TM; // Target processor 541 const TargetInstrInfo *TII; // Target instruction information 542 const TargetRegisterInfo *TRI; // Target processor register info 543 MachineFunction &MF; // Machine function 544 MachineRegisterInfo &MRI; // Virtual/real register map 545 std::vector<SUnit> SUnits; // The scheduling units. 546 SUnit EntrySU; // Special node for the region entry. 547 SUnit ExitSU; // Special node for the region exit. 548 549 #ifdef NDEBUG 550 static const bool StressSched = false; 551 #else 552 bool StressSched; 553 #endif 554 555 explicit ScheduleDAG(MachineFunction &mf); 556 557 virtual ~ScheduleDAG(); 558 559 /// clearDAG - clear the DAG state (between regions). 560 void clearDAG(); 561 562 /// getInstrDesc - Return the MCInstrDesc of this SUnit. 563 /// Return NULL for SDNodes without a machine opcode. 564 const MCInstrDesc *getInstrDesc(const SUnit *SU) const { 565 if (SU->isInstr()) return &SU->getInstr()->getDesc(); 566 return getNodeDesc(SU->getNode()); 567 } 568 569 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered 570 /// using 'dot'. 571 /// 572 virtual void viewGraph(const Twine &Name, const Twine &Title); 573 virtual void viewGraph(); 574 575 virtual void dumpNode(const SUnit *SU) const = 0; 576 577 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization 578 /// of the ScheduleDAG. 579 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0; 580 581 /// getDAGLabel - Return a label for the region of code covered by the DAG. 582 virtual std::string getDAGName() const = 0; 583 584 /// addCustomGraphFeatures - Add custom features for a visualization of 585 /// the ScheduleDAG. 586 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {} 587 588 #ifndef NDEBUG 589 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that 590 /// their state is consistent. Return the number of scheduled SUnits. 591 unsigned VerifyScheduledDAG(bool isBottomUp); 592 #endif 593 594 private: 595 // Return the MCInstrDesc of this SDNode or NULL. 596 const MCInstrDesc *getNodeDesc(const SDNode *Node) const; 597 }; 598 599 class SUnitIterator : public std::iterator<std::forward_iterator_tag, 600 SUnit, ptrdiff_t> { 601 SUnit *Node; 602 unsigned Operand; 603 604 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {} 605 public: 606 bool operator==(const SUnitIterator& x) const { 607 return Operand == x.Operand; 608 } 609 bool operator!=(const SUnitIterator& x) const { return !operator==(x); } 610 611 const SUnitIterator &operator=(const SUnitIterator &I) { 612 assert(I.Node==Node && "Cannot assign iterators to two different nodes!"); 613 Operand = I.Operand; 614 return *this; 615 } 616 617 pointer operator*() const { 618 return Node->Preds[Operand].getSUnit(); 619 } 620 pointer operator->() const { return operator*(); } 621 622 SUnitIterator& operator++() { // Preincrement 623 ++Operand; 624 return *this; 625 } 626 SUnitIterator operator++(int) { // Postincrement 627 SUnitIterator tmp = *this; ++*this; return tmp; 628 } 629 630 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); } 631 static SUnitIterator end (SUnit *N) { 632 return SUnitIterator(N, (unsigned)N->Preds.size()); 633 } 634 635 unsigned getOperand() const { return Operand; } 636 const SUnit *getNode() const { return Node; } 637 /// isCtrlDep - Test if this is not an SDep::Data dependence. 638 bool isCtrlDep() const { 639 return getSDep().isCtrl(); 640 } 641 bool isArtificialDep() const { 642 return getSDep().isArtificial(); 643 } 644 const SDep &getSDep() const { 645 return Node->Preds[Operand]; 646 } 647 }; 648 649 template <> struct GraphTraits<SUnit*> { 650 typedef SUnit NodeType; 651 typedef SUnitIterator ChildIteratorType; 652 static inline NodeType *getEntryNode(SUnit *N) { return N; } 653 static inline ChildIteratorType child_begin(NodeType *N) { 654 return SUnitIterator::begin(N); 655 } 656 static inline ChildIteratorType child_end(NodeType *N) { 657 return SUnitIterator::end(N); 658 } 659 }; 660 661 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> { 662 typedef std::vector<SUnit>::iterator nodes_iterator; 663 static nodes_iterator nodes_begin(ScheduleDAG *G) { 664 return G->SUnits.begin(); 665 } 666 static nodes_iterator nodes_end(ScheduleDAG *G) { 667 return G->SUnits.end(); 668 } 669 }; 670 671 /// ScheduleDAGTopologicalSort is a class that computes a topological 672 /// ordering for SUnits and provides methods for dynamically updating 673 /// the ordering as new edges are added. 674 /// 675 /// This allows a very fast implementation of IsReachable, for example. 676 /// 677 class ScheduleDAGTopologicalSort { 678 /// SUnits - A reference to the ScheduleDAG's SUnits. 679 std::vector<SUnit> &SUnits; 680 SUnit *ExitSU; 681 682 /// Index2Node - Maps topological index to the node number. 683 std::vector<int> Index2Node; 684 /// Node2Index - Maps the node number to its topological index. 685 std::vector<int> Node2Index; 686 /// Visited - a set of nodes visited during a DFS traversal. 687 BitVector Visited; 688 689 /// DFS - make a DFS traversal and mark all nodes affected by the 690 /// edge insertion. These nodes will later get new topological indexes 691 /// by means of the Shift method. 692 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop); 693 694 /// Shift - reassign topological indexes for the nodes in the DAG 695 /// to preserve the topological ordering. 696 void Shift(BitVector& Visited, int LowerBound, int UpperBound); 697 698 /// Allocate - assign the topological index to the node n. 699 void Allocate(int n, int index); 700 701 public: 702 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU); 703 704 /// InitDAGTopologicalSorting - create the initial topological 705 /// ordering from the DAG to be scheduled. 706 void InitDAGTopologicalSorting(); 707 708 /// IsReachable - Checks if SU is reachable from TargetSU. 709 bool IsReachable(const SUnit *SU, const SUnit *TargetSU); 710 711 /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle. 712 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU); 713 714 /// AddPred - Updates the topological ordering to accommodate an edge 715 /// to be added from SUnit X to SUnit Y. 716 void AddPred(SUnit *Y, SUnit *X); 717 718 /// RemovePred - Updates the topological ordering to accommodate an 719 /// an edge to be removed from the specified node N from the predecessors 720 /// of the current node M. 721 void RemovePred(SUnit *M, SUnit *N); 722 723 typedef std::vector<int>::iterator iterator; 724 typedef std::vector<int>::const_iterator const_iterator; 725 iterator begin() { return Index2Node.begin(); } 726 const_iterator begin() const { return Index2Node.begin(); } 727 iterator end() { return Index2Node.end(); } 728 const_iterator end() const { return Index2Node.end(); } 729 730 typedef std::vector<int>::reverse_iterator reverse_iterator; 731 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator; 732 reverse_iterator rbegin() { return Index2Node.rbegin(); } 733 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); } 734 reverse_iterator rend() { return Index2Node.rend(); } 735 const_reverse_iterator rend() const { return Index2Node.rend(); } 736 }; 737 } 738 739 #endif 740