Home | History | Annotate | Download | only in CodeGen
      1 //===----- CriticalAntiDepBreaker.cpp - Anti-dep breaker -------- ---------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the CriticalAntiDepBreaker class, which
     11 // implements register anti-dependence breaking along a blocks
     12 // critical path during post-RA scheduler.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "CriticalAntiDepBreaker.h"
     17 #include "llvm/CodeGen/MachineBasicBlock.h"
     18 #include "llvm/CodeGen/MachineFrameInfo.h"
     19 #include "llvm/Support/Debug.h"
     20 #include "llvm/Support/ErrorHandling.h"
     21 #include "llvm/Support/raw_ostream.h"
     22 #include "llvm/Target/TargetInstrInfo.h"
     23 #include "llvm/Target/TargetMachine.h"
     24 #include "llvm/Target/TargetRegisterInfo.h"
     25 
     26 using namespace llvm;
     27 
     28 #define DEBUG_TYPE "post-RA-sched"
     29 
     30 CriticalAntiDepBreaker::
     31 CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
     32   AntiDepBreaker(), MF(MFi),
     33   MRI(MF.getRegInfo()),
     34   TII(MF.getTarget().getInstrInfo()),
     35   TRI(MF.getTarget().getRegisterInfo()),
     36   RegClassInfo(RCI),
     37   Classes(TRI->getNumRegs(), nullptr),
     38   KillIndices(TRI->getNumRegs(), 0),
     39   DefIndices(TRI->getNumRegs(), 0),
     40   KeepRegs(TRI->getNumRegs(), false) {}
     41 
     42 CriticalAntiDepBreaker::~CriticalAntiDepBreaker() {
     43 }
     44 
     45 void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
     46   const unsigned BBSize = BB->size();
     47   for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
     48     // Clear out the register class data.
     49     Classes[i] = nullptr;
     50 
     51     // Initialize the indices to indicate that no registers are live.
     52     KillIndices[i] = ~0u;
     53     DefIndices[i] = BBSize;
     54   }
     55 
     56   // Clear "do not change" set.
     57   KeepRegs.reset();
     58 
     59   bool IsReturnBlock = (BBSize != 0 && BB->back().isReturn());
     60 
     61   // Examine the live-in regs of all successors.
     62   for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
     63          SE = BB->succ_end(); SI != SE; ++SI)
     64     for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
     65            E = (*SI)->livein_end(); I != E; ++I) {
     66       for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
     67         unsigned Reg = *AI;
     68         Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
     69         KillIndices[Reg] = BBSize;
     70         DefIndices[Reg] = ~0u;
     71       }
     72     }
     73 
     74   // Mark live-out callee-saved registers. In a return block this is
     75   // all callee-saved registers. In non-return this is any
     76   // callee-saved register that is not saved in the prolog.
     77   const MachineFrameInfo *MFI = MF.getFrameInfo();
     78   BitVector Pristine = MFI->getPristineRegs(BB);
     79   for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
     80     if (!IsReturnBlock && !Pristine.test(*I)) continue;
     81     for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI) {
     82       unsigned Reg = *AI;
     83       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
     84       KillIndices[Reg] = BBSize;
     85       DefIndices[Reg] = ~0u;
     86     }
     87   }
     88 }
     89 
     90 void CriticalAntiDepBreaker::FinishBlock() {
     91   RegRefs.clear();
     92   KeepRegs.reset();
     93 }
     94 
     95 void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
     96                                      unsigned InsertPosIndex) {
     97   // Kill instructions can define registers but are really nops, and there might
     98   // be a real definition earlier that needs to be paired with uses dominated by
     99   // this kill.
    100 
    101   // FIXME: It may be possible to remove the isKill() restriction once PR18663
    102   // has been properly fixed. There can be value in processing kills as seen in
    103   // the AggressiveAntiDepBreaker class.
    104   if (MI->isDebugValue() || MI->isKill())
    105     return;
    106   assert(Count < InsertPosIndex && "Instruction index out of expected range!");
    107 
    108   for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
    109     if (KillIndices[Reg] != ~0u) {
    110       // If Reg is currently live, then mark that it can't be renamed as
    111       // we don't know the extent of its live-range anymore (now that it
    112       // has been scheduled).
    113       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
    114       KillIndices[Reg] = Count;
    115     } else if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
    116       // Any register which was defined within the previous scheduling region
    117       // may have been rescheduled and its lifetime may overlap with registers
    118       // in ways not reflected in our current liveness state. For each such
    119       // register, adjust the liveness state to be conservatively correct.
    120       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
    121 
    122       // Move the def index to the end of the previous region, to reflect
    123       // that the def could theoretically have been scheduled at the end.
    124       DefIndices[Reg] = InsertPosIndex;
    125     }
    126   }
    127 
    128   PrescanInstruction(MI);
    129   ScanInstruction(MI, Count);
    130 }
    131 
    132 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
    133 /// critical path.
    134 static const SDep *CriticalPathStep(const SUnit *SU) {
    135   const SDep *Next = nullptr;
    136   unsigned NextDepth = 0;
    137   // Find the predecessor edge with the greatest depth.
    138   for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
    139        P != PE; ++P) {
    140     const SUnit *PredSU = P->getSUnit();
    141     unsigned PredLatency = P->getLatency();
    142     unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
    143     // In the case of a latency tie, prefer an anti-dependency edge over
    144     // other types of edges.
    145     if (NextDepth < PredTotalLatency ||
    146         (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
    147       NextDepth = PredTotalLatency;
    148       Next = &*P;
    149     }
    150   }
    151   return Next;
    152 }
    153 
    154 void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
    155   // It's not safe to change register allocation for source operands of
    156   // instructions that have special allocation requirements. Also assume all
    157   // registers used in a call must not be changed (ABI).
    158   // FIXME: The issue with predicated instruction is more complex. We are being
    159   // conservative here because the kill markers cannot be trusted after
    160   // if-conversion:
    161   // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
    162   // ...
    163   // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
    164   // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
    165   // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
    166   //
    167   // The first R6 kill is not really a kill since it's killed by a predicated
    168   // instruction which may not be executed. The second R6 def may or may not
    169   // re-define R6 so it's not safe to change it since the last R6 use cannot be
    170   // changed.
    171   bool Special = MI->isCall() ||
    172     MI->hasExtraSrcRegAllocReq() ||
    173     TII->isPredicated(MI);
    174 
    175   // Scan the register operands for this instruction and update
    176   // Classes and RegRefs.
    177   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    178     MachineOperand &MO = MI->getOperand(i);
    179     if (!MO.isReg()) continue;
    180     unsigned Reg = MO.getReg();
    181     if (Reg == 0) continue;
    182     const TargetRegisterClass *NewRC = nullptr;
    183 
    184     if (i < MI->getDesc().getNumOperands())
    185       NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
    186 
    187     // For now, only allow the register to be changed if its register
    188     // class is consistent across all uses.
    189     if (!Classes[Reg] && NewRC)
    190       Classes[Reg] = NewRC;
    191     else if (!NewRC || Classes[Reg] != NewRC)
    192       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
    193 
    194     // Now check for aliases.
    195     for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
    196       // If an alias of the reg is used during the live range, give up.
    197       // Note that this allows us to skip checking if AntiDepReg
    198       // overlaps with any of the aliases, among other things.
    199       unsigned AliasReg = *AI;
    200       if (Classes[AliasReg]) {
    201         Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
    202         Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
    203       }
    204     }
    205 
    206     // If we're still willing to consider this register, note the reference.
    207     if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
    208       RegRefs.insert(std::make_pair(Reg, &MO));
    209 
    210     // If this reg is tied and live (Classes[Reg] is set to -1), we can't change
    211     // it or any of its sub or super regs. We need to use KeepRegs to mark the
    212     // reg because not all uses of the same reg within an instruction are
    213     // necessarily tagged as tied.
    214     // Example: an x86 "xor %eax, %eax" will have one source operand tied to the
    215     // def register but not the second (see PR20020 for details).
    216     // FIXME: can this check be relaxed to account for undef uses
    217     // of a register? In the above 'xor' example, the uses of %eax are undef, so
    218     // earlier instructions could still replace %eax even though the 'xor'
    219     // itself can't be changed.
    220     if (MI->isRegTiedToUseOperand(i) &&
    221         Classes[Reg] == reinterpret_cast<TargetRegisterClass *>(-1)) {
    222       for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
    223            SubRegs.isValid(); ++SubRegs) {
    224         KeepRegs.set(*SubRegs);
    225       }
    226       for (MCSuperRegIterator SuperRegs(Reg, TRI);
    227            SuperRegs.isValid(); ++SuperRegs) {
    228         KeepRegs.set(*SuperRegs);
    229       }
    230     }
    231 
    232     if (MO.isUse() && Special) {
    233       if (!KeepRegs.test(Reg)) {
    234         for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
    235              SubRegs.isValid(); ++SubRegs)
    236           KeepRegs.set(*SubRegs);
    237       }
    238     }
    239   }
    240 }
    241 
    242 void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
    243                                              unsigned Count) {
    244   // Update liveness.
    245   // Proceeding upwards, registers that are defed but not used in this
    246   // instruction are now dead.
    247   assert(!MI->isKill() && "Attempting to scan a kill instruction");
    248 
    249   if (!TII->isPredicated(MI)) {
    250     // Predicated defs are modeled as read + write, i.e. similar to two
    251     // address updates.
    252     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    253       MachineOperand &MO = MI->getOperand(i);
    254 
    255       if (MO.isRegMask())
    256         for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
    257           if (MO.clobbersPhysReg(i)) {
    258             DefIndices[i] = Count;
    259             KillIndices[i] = ~0u;
    260             KeepRegs.reset(i);
    261             Classes[i] = nullptr;
    262             RegRefs.erase(i);
    263           }
    264 
    265       if (!MO.isReg()) continue;
    266       unsigned Reg = MO.getReg();
    267       if (Reg == 0) continue;
    268       if (!MO.isDef()) continue;
    269 
    270       // If we've already marked this reg as unchangeable, carry on.
    271       if (KeepRegs.test(Reg)) continue;
    272 
    273       // Ignore two-addr defs.
    274       if (MI->isRegTiedToUseOperand(i)) continue;
    275 
    276       // FIXME: we should use a SubRegIterator that includes self (as above), so
    277       // we don't have to repeat all this code for the reg itself.
    278       DefIndices[Reg] = Count;
    279       KillIndices[Reg] = ~0u;
    280       assert(((KillIndices[Reg] == ~0u) !=
    281               (DefIndices[Reg] == ~0u)) &&
    282              "Kill and Def maps aren't consistent for Reg!");
    283       KeepRegs.reset(Reg);
    284       Classes[Reg] = nullptr;
    285       RegRefs.erase(Reg);
    286       // Repeat, for all subregs.
    287       for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
    288         unsigned SubregReg = *SubRegs;
    289         DefIndices[SubregReg] = Count;
    290         KillIndices[SubregReg] = ~0u;
    291         KeepRegs.reset(SubregReg);
    292         Classes[SubregReg] = nullptr;
    293         RegRefs.erase(SubregReg);
    294       }
    295       // Conservatively mark super-registers as unusable.
    296       for (MCSuperRegIterator SR(Reg, TRI); SR.isValid(); ++SR)
    297         Classes[*SR] = reinterpret_cast<TargetRegisterClass *>(-1);
    298     }
    299   }
    300   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    301     MachineOperand &MO = MI->getOperand(i);
    302     if (!MO.isReg()) continue;
    303     unsigned Reg = MO.getReg();
    304     if (Reg == 0) continue;
    305     if (!MO.isUse()) continue;
    306 
    307     const TargetRegisterClass *NewRC = nullptr;
    308     if (i < MI->getDesc().getNumOperands())
    309       NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
    310 
    311     // For now, only allow the register to be changed if its register
    312     // class is consistent across all uses.
    313     if (!Classes[Reg] && NewRC)
    314       Classes[Reg] = NewRC;
    315     else if (!NewRC || Classes[Reg] != NewRC)
    316       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
    317 
    318     RegRefs.insert(std::make_pair(Reg, &MO));
    319 
    320     // FIXME: we should use an MCRegAliasIterator that includes self so we don't
    321     // have to repeat all this code for the reg itself.
    322 
    323     // It wasn't previously live but now it is, this is a kill.
    324     if (KillIndices[Reg] == ~0u) {
    325       KillIndices[Reg] = Count;
    326       DefIndices[Reg] = ~0u;
    327           assert(((KillIndices[Reg] == ~0u) !=
    328                   (DefIndices[Reg] == ~0u)) &&
    329                "Kill and Def maps aren't consistent for Reg!");
    330     }
    331     // Repeat, for all aliases.
    332     for (MCRegAliasIterator AI(Reg, TRI, false); AI.isValid(); ++AI) {
    333       unsigned AliasReg = *AI;
    334       if (KillIndices[AliasReg] == ~0u) {
    335         KillIndices[AliasReg] = Count;
    336         DefIndices[AliasReg] = ~0u;
    337       }
    338     }
    339   }
    340 }
    341 
    342 // Check all machine operands that reference the antidependent register and must
    343 // be replaced by NewReg. Return true if any of their parent instructions may
    344 // clobber the new register.
    345 //
    346 // Note: AntiDepReg may be referenced by a two-address instruction such that
    347 // it's use operand is tied to a def operand. We guard against the case in which
    348 // the two-address instruction also defines NewReg, as may happen with
    349 // pre/postincrement loads. In this case, both the use and def operands are in
    350 // RegRefs because the def is inserted by PrescanInstruction and not erased
    351 // during ScanInstruction. So checking for an instruction with definitions of
    352 // both NewReg and AntiDepReg covers it.
    353 bool
    354 CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
    355                                                 RegRefIter RegRefEnd,
    356                                                 unsigned NewReg)
    357 {
    358   for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
    359     MachineOperand *RefOper = I->second;
    360 
    361     // Don't allow the instruction defining AntiDepReg to earlyclobber its
    362     // operands, in case they may be assigned to NewReg. In this case antidep
    363     // breaking must fail, but it's too rare to bother optimizing.
    364     if (RefOper->isDef() && RefOper->isEarlyClobber())
    365       return true;
    366 
    367     // Handle cases in which this instruction defines NewReg.
    368     MachineInstr *MI = RefOper->getParent();
    369     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    370       const MachineOperand &CheckOper = MI->getOperand(i);
    371 
    372       if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
    373         return true;
    374 
    375       if (!CheckOper.isReg() || !CheckOper.isDef() ||
    376           CheckOper.getReg() != NewReg)
    377         continue;
    378 
    379       // Don't allow the instruction to define NewReg and AntiDepReg.
    380       // When AntiDepReg is renamed it will be an illegal op.
    381       if (RefOper->isDef())
    382         return true;
    383 
    384       // Don't allow an instruction using AntiDepReg to be earlyclobbered by
    385       // NewReg.
    386       if (CheckOper.isEarlyClobber())
    387         return true;
    388 
    389       // Don't allow inline asm to define NewReg at all. Who knows what it's
    390       // doing with it.
    391       if (MI->isInlineAsm())
    392         return true;
    393     }
    394   }
    395   return false;
    396 }
    397 
    398 unsigned CriticalAntiDepBreaker::
    399 findSuitableFreeRegister(RegRefIter RegRefBegin,
    400                          RegRefIter RegRefEnd,
    401                          unsigned AntiDepReg,
    402                          unsigned LastNewReg,
    403                          const TargetRegisterClass *RC,
    404                          SmallVectorImpl<unsigned> &Forbid)
    405 {
    406   ArrayRef<MCPhysReg> Order = RegClassInfo.getOrder(RC);
    407   for (unsigned i = 0; i != Order.size(); ++i) {
    408     unsigned NewReg = Order[i];
    409     // Don't replace a register with itself.
    410     if (NewReg == AntiDepReg) continue;
    411     // Don't replace a register with one that was recently used to repair
    412     // an anti-dependence with this AntiDepReg, because that would
    413     // re-introduce that anti-dependence.
    414     if (NewReg == LastNewReg) continue;
    415     // If any instructions that define AntiDepReg also define the NewReg, it's
    416     // not suitable.  For example, Instruction with multiple definitions can
    417     // result in this condition.
    418     if (isNewRegClobberedByRefs(RegRefBegin, RegRefEnd, NewReg)) continue;
    419     // If NewReg is dead and NewReg's most recent def is not before
    420     // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
    421     assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u))
    422            && "Kill and Def maps aren't consistent for AntiDepReg!");
    423     assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u))
    424            && "Kill and Def maps aren't consistent for NewReg!");
    425     if (KillIndices[NewReg] != ~0u ||
    426         Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
    427         KillIndices[AntiDepReg] > DefIndices[NewReg])
    428       continue;
    429     // If NewReg overlaps any of the forbidden registers, we can't use it.
    430     bool Forbidden = false;
    431     for (SmallVectorImpl<unsigned>::iterator it = Forbid.begin(),
    432            ite = Forbid.end(); it != ite; ++it)
    433       if (TRI->regsOverlap(NewReg, *it)) {
    434         Forbidden = true;
    435         break;
    436       }
    437     if (Forbidden) continue;
    438     return NewReg;
    439   }
    440 
    441   // No registers are free and available!
    442   return 0;
    443 }
    444 
    445 unsigned CriticalAntiDepBreaker::
    446 BreakAntiDependencies(const std::vector<SUnit>& SUnits,
    447                       MachineBasicBlock::iterator Begin,
    448                       MachineBasicBlock::iterator End,
    449                       unsigned InsertPosIndex,
    450                       DbgValueVector &DbgValues) {
    451   // The code below assumes that there is at least one instruction,
    452   // so just duck out immediately if the block is empty.
    453   if (SUnits.empty()) return 0;
    454 
    455   // Keep a map of the MachineInstr*'s back to the SUnit representing them.
    456   // This is used for updating debug information.
    457   //
    458   // FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
    459   DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
    460 
    461   // Find the node at the bottom of the critical path.
    462   const SUnit *Max = nullptr;
    463   for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
    464     const SUnit *SU = &SUnits[i];
    465     MISUnitMap[SU->getInstr()] = SU;
    466     if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
    467       Max = SU;
    468   }
    469 
    470 #ifndef NDEBUG
    471   {
    472     DEBUG(dbgs() << "Critical path has total latency "
    473           << (Max->getDepth() + Max->Latency) << "\n");
    474     DEBUG(dbgs() << "Available regs:");
    475     for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
    476       if (KillIndices[Reg] == ~0u)
    477         DEBUG(dbgs() << " " << TRI->getName(Reg));
    478     }
    479     DEBUG(dbgs() << '\n');
    480   }
    481 #endif
    482 
    483   // Track progress along the critical path through the SUnit graph as we walk
    484   // the instructions.
    485   const SUnit *CriticalPathSU = Max;
    486   MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
    487 
    488   // Consider this pattern:
    489   //   A = ...
    490   //   ... = A
    491   //   A = ...
    492   //   ... = A
    493   //   A = ...
    494   //   ... = A
    495   //   A = ...
    496   //   ... = A
    497   // There are three anti-dependencies here, and without special care,
    498   // we'd break all of them using the same register:
    499   //   A = ...
    500   //   ... = A
    501   //   B = ...
    502   //   ... = B
    503   //   B = ...
    504   //   ... = B
    505   //   B = ...
    506   //   ... = B
    507   // because at each anti-dependence, B is the first register that
    508   // isn't A which is free.  This re-introduces anti-dependencies
    509   // at all but one of the original anti-dependencies that we were
    510   // trying to break.  To avoid this, keep track of the most recent
    511   // register that each register was replaced with, avoid
    512   // using it to repair an anti-dependence on the same register.
    513   // This lets us produce this:
    514   //   A = ...
    515   //   ... = A
    516   //   B = ...
    517   //   ... = B
    518   //   C = ...
    519   //   ... = C
    520   //   B = ...
    521   //   ... = B
    522   // This still has an anti-dependence on B, but at least it isn't on the
    523   // original critical path.
    524   //
    525   // TODO: If we tracked more than one register here, we could potentially
    526   // fix that remaining critical edge too. This is a little more involved,
    527   // because unlike the most recent register, less recent registers should
    528   // still be considered, though only if no other registers are available.
    529   std::vector<unsigned> LastNewReg(TRI->getNumRegs(), 0);
    530 
    531   // Attempt to break anti-dependence edges on the critical path. Walk the
    532   // instructions from the bottom up, tracking information about liveness
    533   // as we go to help determine which registers are available.
    534   unsigned Broken = 0;
    535   unsigned Count = InsertPosIndex - 1;
    536   for (MachineBasicBlock::iterator I = End, E = Begin; I != E; --Count) {
    537     MachineInstr *MI = --I;
    538     // Kill instructions can define registers but are really nops, and there
    539     // might be a real definition earlier that needs to be paired with uses
    540     // dominated by this kill.
    541 
    542     // FIXME: It may be possible to remove the isKill() restriction once PR18663
    543     // has been properly fixed. There can be value in processing kills as seen
    544     // in the AggressiveAntiDepBreaker class.
    545     if (MI->isDebugValue() || MI->isKill())
    546       continue;
    547 
    548     // Check if this instruction has a dependence on the critical path that
    549     // is an anti-dependence that we may be able to break. If it is, set
    550     // AntiDepReg to the non-zero register associated with the anti-dependence.
    551     //
    552     // We limit our attention to the critical path as a heuristic to avoid
    553     // breaking anti-dependence edges that aren't going to significantly
    554     // impact the overall schedule. There are a limited number of registers
    555     // and we want to save them for the important edges.
    556     //
    557     // TODO: Instructions with multiple defs could have multiple
    558     // anti-dependencies. The current code here only knows how to break one
    559     // edge per instruction. Note that we'd have to be able to break all of
    560     // the anti-dependencies in an instruction in order to be effective.
    561     unsigned AntiDepReg = 0;
    562     if (MI == CriticalPathMI) {
    563       if (const SDep *Edge = CriticalPathStep(CriticalPathSU)) {
    564         const SUnit *NextSU = Edge->getSUnit();
    565 
    566         // Only consider anti-dependence edges.
    567         if (Edge->getKind() == SDep::Anti) {
    568           AntiDepReg = Edge->getReg();
    569           assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
    570           if (!MRI.isAllocatable(AntiDepReg))
    571             // Don't break anti-dependencies on non-allocatable registers.
    572             AntiDepReg = 0;
    573           else if (KeepRegs.test(AntiDepReg))
    574             // Don't break anti-dependencies if a use down below requires
    575             // this exact register.
    576             AntiDepReg = 0;
    577           else {
    578             // If the SUnit has other dependencies on the SUnit that it
    579             // anti-depends on, don't bother breaking the anti-dependency
    580             // since those edges would prevent such units from being
    581             // scheduled past each other regardless.
    582             //
    583             // Also, if there are dependencies on other SUnits with the
    584             // same register as the anti-dependency, don't attempt to
    585             // break it.
    586             for (SUnit::const_pred_iterator P = CriticalPathSU->Preds.begin(),
    587                  PE = CriticalPathSU->Preds.end(); P != PE; ++P)
    588               if (P->getSUnit() == NextSU ?
    589                     (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
    590                     (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
    591                 AntiDepReg = 0;
    592                 break;
    593               }
    594           }
    595         }
    596         CriticalPathSU = NextSU;
    597         CriticalPathMI = CriticalPathSU->getInstr();
    598       } else {
    599         // We've reached the end of the critical path.
    600         CriticalPathSU = nullptr;
    601         CriticalPathMI = nullptr;
    602       }
    603     }
    604 
    605     PrescanInstruction(MI);
    606 
    607     SmallVector<unsigned, 2> ForbidRegs;
    608 
    609     // If MI's defs have a special allocation requirement, don't allow
    610     // any def registers to be changed. Also assume all registers
    611     // defined in a call must not be changed (ABI).
    612     if (MI->isCall() || MI->hasExtraDefRegAllocReq() || TII->isPredicated(MI))
    613       // If this instruction's defs have special allocation requirement, don't
    614       // break this anti-dependency.
    615       AntiDepReg = 0;
    616     else if (AntiDepReg) {
    617       // If this instruction has a use of AntiDepReg, breaking it
    618       // is invalid.  If the instruction defines other registers,
    619       // save a list of them so that we don't pick a new register
    620       // that overlaps any of them.
    621       for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    622         MachineOperand &MO = MI->getOperand(i);
    623         if (!MO.isReg()) continue;
    624         unsigned Reg = MO.getReg();
    625         if (Reg == 0) continue;
    626         if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
    627           AntiDepReg = 0;
    628           break;
    629         }
    630         if (MO.isDef() && Reg != AntiDepReg)
    631           ForbidRegs.push_back(Reg);
    632       }
    633     }
    634 
    635     // Determine AntiDepReg's register class, if it is live and is
    636     // consistently used within a single class.
    637     const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg]
    638                                                     : nullptr;
    639     assert((AntiDepReg == 0 || RC != nullptr) &&
    640            "Register should be live if it's causing an anti-dependence!");
    641     if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
    642       AntiDepReg = 0;
    643 
    644     // Look for a suitable register to use to break the anti-dependence.
    645     //
    646     // TODO: Instead of picking the first free register, consider which might
    647     // be the best.
    648     if (AntiDepReg != 0) {
    649       std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
    650                 std::multimap<unsigned, MachineOperand *>::iterator>
    651         Range = RegRefs.equal_range(AntiDepReg);
    652       if (unsigned NewReg = findSuitableFreeRegister(Range.first, Range.second,
    653                                                      AntiDepReg,
    654                                                      LastNewReg[AntiDepReg],
    655                                                      RC, ForbidRegs)) {
    656         DEBUG(dbgs() << "Breaking anti-dependence edge on "
    657               << TRI->getName(AntiDepReg)
    658               << " with " << RegRefs.count(AntiDepReg) << " references"
    659               << " using " << TRI->getName(NewReg) << "!\n");
    660 
    661         // Update the references to the old register to refer to the new
    662         // register.
    663         for (std::multimap<unsigned, MachineOperand *>::iterator
    664              Q = Range.first, QE = Range.second; Q != QE; ++Q) {
    665           Q->second->setReg(NewReg);
    666           // If the SU for the instruction being updated has debug information
    667           // related to the anti-dependency register, make sure to update that
    668           // as well.
    669           const SUnit *SU = MISUnitMap[Q->second->getParent()];
    670           if (!SU) continue;
    671           for (DbgValueVector::iterator DVI = DbgValues.begin(),
    672                  DVE = DbgValues.end(); DVI != DVE; ++DVI)
    673             if (DVI->second == Q->second->getParent())
    674               UpdateDbgValue(DVI->first, AntiDepReg, NewReg);
    675         }
    676 
    677         // We just went back in time and modified history; the
    678         // liveness information for the anti-dependence reg is now
    679         // inconsistent. Set the state as if it were dead.
    680         Classes[NewReg] = Classes[AntiDepReg];
    681         DefIndices[NewReg] = DefIndices[AntiDepReg];
    682         KillIndices[NewReg] = KillIndices[AntiDepReg];
    683         assert(((KillIndices[NewReg] == ~0u) !=
    684                 (DefIndices[NewReg] == ~0u)) &&
    685              "Kill and Def maps aren't consistent for NewReg!");
    686 
    687         Classes[AntiDepReg] = nullptr;
    688         DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
    689         KillIndices[AntiDepReg] = ~0u;
    690         assert(((KillIndices[AntiDepReg] == ~0u) !=
    691                 (DefIndices[AntiDepReg] == ~0u)) &&
    692              "Kill and Def maps aren't consistent for AntiDepReg!");
    693 
    694         RegRefs.erase(AntiDepReg);
    695         LastNewReg[AntiDepReg] = NewReg;
    696         ++Broken;
    697       }
    698     }
    699 
    700     ScanInstruction(MI, Count);
    701   }
    702 
    703   return Broken;
    704 }
    705