1 //===-- InterferenceCache.h - Caching per-block interference ---*- C++ -*--===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // InterferenceCache remembers per-block interference in LiveIntervalUnions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "regalloc" 15 #include "InterferenceCache.h" 16 #include "llvm/Target/TargetRegisterInfo.h" 17 #include "llvm/Support/ErrorHandling.h" 18 19 using namespace llvm; 20 21 void InterferenceCache::init(MachineFunction *mf, 22 LiveIntervalUnion *liuarray, 23 SlotIndexes *indexes, 24 const TargetRegisterInfo *tri) { 25 MF = mf; 26 LIUArray = liuarray; 27 TRI = tri; 28 PhysRegEntries.assign(TRI->getNumRegs(), 0); 29 for (unsigned i = 0; i != CacheEntries; ++i) 30 Entries[i].clear(mf, indexes); 31 } 32 33 InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) { 34 unsigned E = PhysRegEntries[PhysReg]; 35 if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) { 36 if (!Entries[E].valid(LIUArray, TRI)) 37 Entries[E].revalidate(); 38 return &Entries[E]; 39 } 40 // No valid entry exists, pick the next round-robin entry. 41 E = RoundRobin; 42 if (++RoundRobin == CacheEntries) 43 RoundRobin = 0; 44 for (unsigned i = 0; i != CacheEntries; ++i) { 45 // Skip entries that are in use. 46 if (Entries[E].hasRefs()) { 47 if (++E == CacheEntries) 48 E = 0; 49 continue; 50 } 51 Entries[E].reset(PhysReg, LIUArray, TRI, MF); 52 PhysRegEntries[PhysReg] = E; 53 return &Entries[E]; 54 } 55 llvm_unreachable("Ran out of interference cache entries."); 56 } 57 58 /// revalidate - LIU contents have changed, update tags. 59 void InterferenceCache::Entry::revalidate() { 60 // Invalidate all block entries. 61 ++Tag; 62 // Invalidate all iterators. 63 PrevPos = SlotIndex(); 64 for (unsigned i = 0, e = Aliases.size(); i != e; ++i) 65 Aliases[i].second = Aliases[i].first->getTag(); 66 } 67 68 void InterferenceCache::Entry::reset(unsigned physReg, 69 LiveIntervalUnion *LIUArray, 70 const TargetRegisterInfo *TRI, 71 const MachineFunction *MF) { 72 assert(!hasRefs() && "Cannot reset cache entry with references"); 73 // LIU's changed, invalidate cache. 74 ++Tag; 75 PhysReg = physReg; 76 Blocks.resize(MF->getNumBlockIDs()); 77 Aliases.clear(); 78 for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) { 79 LiveIntervalUnion *LIU = LIUArray + *AS; 80 Aliases.push_back(std::make_pair(LIU, LIU->getTag())); 81 } 82 83 // Reset iterators. 84 PrevPos = SlotIndex(); 85 unsigned e = Aliases.size(); 86 Iters.resize(e); 87 for (unsigned i = 0; i != e; ++i) 88 Iters[i].setMap(Aliases[i].first->getMap()); 89 } 90 91 bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray, 92 const TargetRegisterInfo *TRI) { 93 unsigned i = 0, e = Aliases.size(); 94 for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) { 95 LiveIntervalUnion *LIU = LIUArray + *AS; 96 if (i == e || Aliases[i].first != LIU) 97 return false; 98 if (LIU->changedSince(Aliases[i].second)) 99 return false; 100 } 101 return i == e; 102 } 103 104 void InterferenceCache::Entry::update(unsigned MBBNum) { 105 SlotIndex Start, Stop; 106 tie(Start, Stop) = Indexes->getMBBRange(MBBNum); 107 108 // Use advanceTo only when possible. 109 if (PrevPos != Start) { 110 if (!PrevPos.isValid() || Start < PrevPos) 111 for (unsigned i = 0, e = Iters.size(); i != e; ++i) 112 Iters[i].find(Start); 113 else 114 for (unsigned i = 0, e = Iters.size(); i != e; ++i) 115 Iters[i].advanceTo(Start); 116 PrevPos = Start; 117 } 118 119 MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum); 120 BlockInterference *BI = &Blocks[MBBNum]; 121 for (;;) { 122 BI->Tag = Tag; 123 BI->First = BI->Last = SlotIndex(); 124 125 // Check for first interference. 126 for (unsigned i = 0, e = Iters.size(); i != e; ++i) { 127 Iter &I = Iters[i]; 128 if (!I.valid()) 129 continue; 130 SlotIndex StartI = I.start(); 131 if (StartI >= Stop) 132 continue; 133 if (!BI->First.isValid() || StartI < BI->First) 134 BI->First = StartI; 135 } 136 137 PrevPos = Stop; 138 if (BI->First.isValid()) 139 break; 140 141 // No interference in this block? Go ahead and precompute the next block. 142 if (++MFI == MF->end()) 143 return; 144 MBBNum = MFI->getNumber(); 145 BI = &Blocks[MBBNum]; 146 if (BI->Tag == Tag) 147 return; 148 tie(Start, Stop) = Indexes->getMBBRange(MBBNum); 149 } 150 151 // Check for last interference in block. 152 for (unsigned i = 0, e = Iters.size(); i != e; ++i) { 153 Iter &I = Iters[i]; 154 if (!I.valid() || I.start() >= Stop) 155 continue; 156 I.advanceTo(Stop); 157 bool Backup = !I.valid() || I.start() >= Stop; 158 if (Backup) 159 --I; 160 SlotIndex StopI = I.stop(); 161 if (!BI->Last.isValid() || StopI > BI->Last) 162 BI->Last = StopI; 163 if (Backup) 164 ++I; 165 } 166 } 167