Home | History | Annotate | Download | only in CodeGen
      1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the spill code placement analysis.
     11 //
     12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
     13 // basic blocks are weighted by the block frequency and added to become the node
     14 // bias.
     15 //
     16 // Transparent basic blocks have the variable live through, but don't care if it
     17 // is spilled or in a register. These blocks become connections in the Hopfield
     18 // network, again weighted by block frequency.
     19 //
     20 // The Hopfield network minimizes (possibly locally) its energy function:
     21 //
     22 //   E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
     23 //
     24 // The energy function represents the expected spill code execution frequency,
     25 // or the cost of spilling. This is a Lyapunov function which never increases
     26 // when a node is updated. It is guaranteed to converge to a local minimum.
     27 //
     28 //===----------------------------------------------------------------------===//
     29 
     30 #include "SpillPlacement.h"
     31 #include "llvm/ADT/BitVector.h"
     32 #include "llvm/CodeGen/EdgeBundles.h"
     33 #include "llvm/CodeGen/MachineBasicBlock.h"
     34 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
     35 #include "llvm/CodeGen/MachineFunction.h"
     36 #include "llvm/CodeGen/MachineLoopInfo.h"
     37 #include "llvm/CodeGen/Passes.h"
     38 #include "llvm/Support/Debug.h"
     39 #include "llvm/Support/ManagedStatic.h"
     40 
     41 using namespace llvm;
     42 
     43 #define DEBUG_TYPE "spillplacement"
     44 
     45 char SpillPlacement::ID = 0;
     46 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
     47                       "Spill Code Placement Analysis", true, true)
     48 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
     49 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
     50 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
     51                     "Spill Code Placement Analysis", true, true)
     52 
     53 char &llvm::SpillPlacementID = SpillPlacement::ID;
     54 
     55 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
     56   AU.setPreservesAll();
     57   AU.addRequired<MachineBlockFrequencyInfo>();
     58   AU.addRequiredTransitive<EdgeBundles>();
     59   AU.addRequiredTransitive<MachineLoopInfo>();
     60   MachineFunctionPass::getAnalysisUsage(AU);
     61 }
     62 
     63 /// Node - Each edge bundle corresponds to a Hopfield node.
     64 ///
     65 /// The node contains precomputed frequency data that only depends on the CFG,
     66 /// but Bias and Links are computed each time placeSpills is called.
     67 ///
     68 /// The node Value is positive when the variable should be in a register. The
     69 /// value can change when linked nodes change, but convergence is very fast
     70 /// because all weights are positive.
     71 ///
     72 struct SpillPlacement::Node {
     73   /// BiasN - Sum of blocks that prefer a spill.
     74   BlockFrequency BiasN;
     75   /// BiasP - Sum of blocks that prefer a register.
     76   BlockFrequency BiasP;
     77 
     78   /// Value - Output value of this node computed from the Bias and links.
     79   /// This is always on of the values {-1, 0, 1}. A positive number means the
     80   /// variable should go in a register through this bundle.
     81   int Value;
     82 
     83   typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
     84 
     85   /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
     86   /// bundles. The weights are all positive block frequencies.
     87   LinkVector Links;
     88 
     89   /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
     90   BlockFrequency SumLinkWeights;
     91 
     92   /// preferReg - Return true when this node prefers to be in a register.
     93   bool preferReg() const {
     94     // Undecided nodes (Value==0) go on the stack.
     95     return Value > 0;
     96   }
     97 
     98   /// mustSpill - Return True if this node is so biased that it must spill.
     99   bool mustSpill() const {
    100     // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
    101     // BiasN is saturated when MustSpill is set, make sure this still returns
    102     // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
    103     return BiasN >= BiasP + SumLinkWeights;
    104   }
    105 
    106   /// clear - Reset per-query data, but preserve frequencies that only depend on
    107   // the CFG.
    108   void clear(const BlockFrequency &Threshold) {
    109     BiasN = BiasP = Value = 0;
    110     SumLinkWeights = Threshold;
    111     Links.clear();
    112   }
    113 
    114   /// addLink - Add a link to bundle b with weight w.
    115   void addLink(unsigned b, BlockFrequency w) {
    116     // Update cached sum.
    117     SumLinkWeights += w;
    118 
    119     // There can be multiple links to the same bundle, add them up.
    120     for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
    121       if (I->second == b) {
    122         I->first += w;
    123         return;
    124       }
    125     // This must be the first link to b.
    126     Links.push_back(std::make_pair(w, b));
    127   }
    128 
    129   /// addBias - Bias this node.
    130   void addBias(BlockFrequency freq, BorderConstraint direction) {
    131     switch (direction) {
    132     default:
    133       break;
    134     case PrefReg:
    135       BiasP += freq;
    136       break;
    137     case PrefSpill:
    138       BiasN += freq;
    139       break;
    140     case MustSpill:
    141       BiasN = BlockFrequency::getMaxFrequency();
    142       break;
    143     }
    144   }
    145 
    146   /// update - Recompute Value from Bias and Links. Return true when node
    147   /// preference changes.
    148   bool update(const Node nodes[], const BlockFrequency &Threshold) {
    149     // Compute the weighted sum of inputs.
    150     BlockFrequency SumN = BiasN;
    151     BlockFrequency SumP = BiasP;
    152     for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
    153       if (nodes[I->second].Value == -1)
    154         SumN += I->first;
    155       else if (nodes[I->second].Value == 1)
    156         SumP += I->first;
    157     }
    158 
    159     // Each weighted sum is going to be less than the total frequency of the
    160     // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
    161     // will add a dead zone around 0 for two reasons:
    162     //
    163     //  1. It avoids arbitrary bias when all links are 0 as is possible during
    164     //     initial iterations.
    165     //  2. It helps tame rounding errors when the links nominally sum to 0.
    166     //
    167     bool Before = preferReg();
    168     if (SumN >= SumP + Threshold)
    169       Value = -1;
    170     else if (SumP >= SumN + Threshold)
    171       Value = 1;
    172     else
    173       Value = 0;
    174     return Before != preferReg();
    175   }
    176 
    177   void getDissentingNeighbors(SparseSet<unsigned> &List,
    178                               const Node nodes[]) const {
    179     for (const auto &Elt : Links) {
    180       unsigned n = Elt.second;
    181       // Neighbors that already have the same value are not going to
    182       // change because of this node changing.
    183       if (Value != nodes[n].Value)
    184         List.insert(n);
    185     }
    186   }
    187 };
    188 
    189 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
    190   MF = &mf;
    191   bundles = &getAnalysis<EdgeBundles>();
    192   loops = &getAnalysis<MachineLoopInfo>();
    193 
    194   assert(!nodes && "Leaking node array");
    195   nodes = new Node[bundles->getNumBundles()];
    196   TodoList.clear();
    197   TodoList.setUniverse(bundles->getNumBundles());
    198 
    199   // Compute total ingoing and outgoing block frequencies for all bundles.
    200   BlockFrequencies.resize(mf.getNumBlockIDs());
    201   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
    202   setThreshold(MBFI->getEntryFreq());
    203   for (auto &I : mf) {
    204     unsigned Num = I.getNumber();
    205     BlockFrequencies[Num] = MBFI->getBlockFreq(&I);
    206   }
    207 
    208   // We never change the function.
    209   return false;
    210 }
    211 
    212 void SpillPlacement::releaseMemory() {
    213   delete[] nodes;
    214   nodes = nullptr;
    215   TodoList.clear();
    216 }
    217 
    218 /// activate - mark node n as active if it wasn't already.
    219 void SpillPlacement::activate(unsigned n) {
    220   TodoList.insert(n);
    221   if (ActiveNodes->test(n))
    222     return;
    223   ActiveNodes->set(n);
    224   nodes[n].clear(Threshold);
    225 
    226   // Very large bundles usually come from big switches, indirect branches,
    227   // landing pads, or loops with many 'continue' statements. It is difficult to
    228   // allocate registers when so many different blocks are involved.
    229   //
    230   // Give a small negative bias to large bundles such that a substantial
    231   // fraction of the connected blocks need to be interested before we consider
    232   // expanding the region through the bundle. This helps compile time by
    233   // limiting the number of blocks visited and the number of links in the
    234   // Hopfield network.
    235   if (bundles->getBlocks(n).size() > 100) {
    236     nodes[n].BiasP = 0;
    237     nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
    238   }
    239 }
    240 
    241 /// \brief Set the threshold for a given entry frequency.
    242 ///
    243 /// Set the threshold relative to \c Entry.  Since the threshold is used as a
    244 /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
    245 /// threshold.
    246 void SpillPlacement::setThreshold(const BlockFrequency &Entry) {
    247   // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
    248   // it.  Divide by 2^13, rounding as appropriate.
    249   uint64_t Freq = Entry.getFrequency();
    250   uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
    251   Threshold = std::max(UINT64_C(1), Scaled);
    252 }
    253 
    254 /// addConstraints - Compute node biases and weights from a set of constraints.
    255 /// Set a bit in NodeMask for each active node.
    256 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
    257   for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
    258        E = LiveBlocks.end(); I != E; ++I) {
    259     BlockFrequency Freq = BlockFrequencies[I->Number];
    260 
    261     // Live-in to block?
    262     if (I->Entry != DontCare) {
    263       unsigned ib = bundles->getBundle(I->Number, 0);
    264       activate(ib);
    265       nodes[ib].addBias(Freq, I->Entry);
    266     }
    267 
    268     // Live-out from block?
    269     if (I->Exit != DontCare) {
    270       unsigned ob = bundles->getBundle(I->Number, 1);
    271       activate(ob);
    272       nodes[ob].addBias(Freq, I->Exit);
    273     }
    274   }
    275 }
    276 
    277 /// addPrefSpill - Same as addConstraints(PrefSpill)
    278 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
    279   for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
    280        I != E; ++I) {
    281     BlockFrequency Freq = BlockFrequencies[*I];
    282     if (Strong)
    283       Freq += Freq;
    284     unsigned ib = bundles->getBundle(*I, 0);
    285     unsigned ob = bundles->getBundle(*I, 1);
    286     activate(ib);
    287     activate(ob);
    288     nodes[ib].addBias(Freq, PrefSpill);
    289     nodes[ob].addBias(Freq, PrefSpill);
    290   }
    291 }
    292 
    293 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
    294   for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
    295        ++I) {
    296     unsigned Number = *I;
    297     unsigned ib = bundles->getBundle(Number, 0);
    298     unsigned ob = bundles->getBundle(Number, 1);
    299 
    300     // Ignore self-loops.
    301     if (ib == ob)
    302       continue;
    303     activate(ib);
    304     activate(ob);
    305     BlockFrequency Freq = BlockFrequencies[Number];
    306     nodes[ib].addLink(ob, Freq);
    307     nodes[ob].addLink(ib, Freq);
    308   }
    309 }
    310 
    311 bool SpillPlacement::scanActiveBundles() {
    312   RecentPositive.clear();
    313   for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
    314     update(n);
    315     // A node that must spill, or a node without any links is not going to
    316     // change its value ever again, so exclude it from iterations.
    317     if (nodes[n].mustSpill())
    318       continue;
    319     if (nodes[n].preferReg())
    320       RecentPositive.push_back(n);
    321   }
    322   return !RecentPositive.empty();
    323 }
    324 
    325 bool SpillPlacement::update(unsigned n) {
    326   if (!nodes[n].update(nodes, Threshold))
    327     return false;
    328   nodes[n].getDissentingNeighbors(TodoList, nodes);
    329   return true;
    330 }
    331 
    332 /// iterate - Repeatedly update the Hopfield nodes until stability or the
    333 /// maximum number of iterations is reached.
    334 void SpillPlacement::iterate() {
    335   // We do not need to push those node in the todolist.
    336   // They are already been proceeded as part of the previous iteration.
    337   RecentPositive.clear();
    338 
    339   // Since the last iteration, the todolist have been augmented by calls
    340   // to addConstraints, addLinks, and co.
    341   // Update the network energy starting at this new frontier.
    342   // The call to ::update will add the nodes that changed into the todolist.
    343   unsigned Limit = bundles->getNumBundles() * 10;
    344   while(Limit-- > 0 && !TodoList.empty()) {
    345     unsigned n = TodoList.pop_back_val();
    346     if (!update(n))
    347       continue;
    348     if (nodes[n].preferReg())
    349       RecentPositive.push_back(n);
    350   }
    351 }
    352 
    353 void SpillPlacement::prepare(BitVector &RegBundles) {
    354   RecentPositive.clear();
    355   TodoList.clear();
    356   // Reuse RegBundles as our ActiveNodes vector.
    357   ActiveNodes = &RegBundles;
    358   ActiveNodes->clear();
    359   ActiveNodes->resize(bundles->getNumBundles());
    360 }
    361 
    362 bool
    363 SpillPlacement::finish() {
    364   assert(ActiveNodes && "Call prepare() first");
    365 
    366   // Write preferences back to ActiveNodes.
    367   bool Perfect = true;
    368   for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
    369     if (!nodes[n].preferReg()) {
    370       ActiveNodes->reset(n);
    371       Perfect = false;
    372     }
    373   ActiveNodes = nullptr;
    374   return Perfect;
    375 }
    376