Home | History | Annotate | Download | only in X86
      1 //===-- X86FloatingPoint.cpp - Floating point Reg -> Stack converter ------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines the pass which converts floating point instructions from
     11 // pseudo registers into register stack instructions.  This pass uses live
     12 // variable information to indicate where the FPn registers are used and their
     13 // lifetimes.
     14 //
     15 // The x87 hardware tracks liveness of the stack registers, so it is necessary
     16 // to implement exact liveness tracking between basic blocks. The CFG edges are
     17 // partitioned into bundles where the same FP registers must be live in
     18 // identical stack positions. Instructions are inserted at the end of each basic
     19 // block to rearrange the live registers to match the outgoing bundle.
     20 //
     21 // This approach avoids splitting critical edges at the potential cost of more
     22 // live register shuffling instructions when critical edges are present.
     23 //
     24 //===----------------------------------------------------------------------===//
     25 
     26 #include "X86.h"
     27 #include "X86InstrInfo.h"
     28 #include "llvm/ADT/DepthFirstIterator.h"
     29 #include "llvm/ADT/STLExtras.h"
     30 #include "llvm/ADT/SmallPtrSet.h"
     31 #include "llvm/ADT/SmallVector.h"
     32 #include "llvm/ADT/Statistic.h"
     33 #include "llvm/CodeGen/EdgeBundles.h"
     34 #include "llvm/CodeGen/MachineFunctionPass.h"
     35 #include "llvm/CodeGen/MachineInstrBuilder.h"
     36 #include "llvm/CodeGen/MachineRegisterInfo.h"
     37 #include "llvm/CodeGen/Passes.h"
     38 #include "llvm/IR/InlineAsm.h"
     39 #include "llvm/Support/Debug.h"
     40 #include "llvm/Support/ErrorHandling.h"
     41 #include "llvm/Support/raw_ostream.h"
     42 #include "llvm/Target/TargetInstrInfo.h"
     43 #include "llvm/Target/TargetMachine.h"
     44 #include <algorithm>
     45 using namespace llvm;
     46 
     47 #define DEBUG_TYPE "x86-codegen"
     48 
     49 STATISTIC(NumFXCH, "Number of fxch instructions inserted");
     50 STATISTIC(NumFP  , "Number of floating point instructions");
     51 
     52 namespace {
     53   struct FPS : public MachineFunctionPass {
     54     static char ID;
     55     FPS() : MachineFunctionPass(ID) {
     56       initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
     57       // This is really only to keep valgrind quiet.
     58       // The logic in isLive() is too much for it.
     59       memset(Stack, 0, sizeof(Stack));
     60       memset(RegMap, 0, sizeof(RegMap));
     61     }
     62 
     63     void getAnalysisUsage(AnalysisUsage &AU) const override {
     64       AU.setPreservesCFG();
     65       AU.addRequired<EdgeBundles>();
     66       AU.addPreservedID(MachineLoopInfoID);
     67       AU.addPreservedID(MachineDominatorsID);
     68       MachineFunctionPass::getAnalysisUsage(AU);
     69     }
     70 
     71     bool runOnMachineFunction(MachineFunction &MF) override;
     72 
     73     const char *getPassName() const override { return "X86 FP Stackifier"; }
     74 
     75   private:
     76     const TargetInstrInfo *TII; // Machine instruction info.
     77 
     78     // Two CFG edges are related if they leave the same block, or enter the same
     79     // block. The transitive closure of an edge under this relation is a
     80     // LiveBundle. It represents a set of CFG edges where the live FP stack
     81     // registers must be allocated identically in the x87 stack.
     82     //
     83     // A LiveBundle is usually all the edges leaving a block, or all the edges
     84     // entering a block, but it can contain more edges if critical edges are
     85     // present.
     86     //
     87     // The set of live FP registers in a LiveBundle is calculated by bundleCFG,
     88     // but the exact mapping of FP registers to stack slots is fixed later.
     89     struct LiveBundle {
     90       // Bit mask of live FP registers. Bit 0 = FP0, bit 1 = FP1, &c.
     91       unsigned Mask;
     92 
     93       // Number of pre-assigned live registers in FixStack. This is 0 when the
     94       // stack order has not yet been fixed.
     95       unsigned FixCount;
     96 
     97       // Assigned stack order for live-in registers.
     98       // FixStack[i] == getStackEntry(i) for all i < FixCount.
     99       unsigned char FixStack[8];
    100 
    101       LiveBundle() : Mask(0), FixCount(0) {}
    102 
    103       // Have the live registers been assigned a stack order yet?
    104       bool isFixed() const { return !Mask || FixCount; }
    105     };
    106 
    107     // Numbered LiveBundle structs. LiveBundles[0] is used for all CFG edges
    108     // with no live FP registers.
    109     SmallVector<LiveBundle, 8> LiveBundles;
    110 
    111     // The edge bundle analysis provides indices into the LiveBundles vector.
    112     EdgeBundles *Bundles;
    113 
    114     // Return a bitmask of FP registers in block's live-in list.
    115     static unsigned calcLiveInMask(MachineBasicBlock *MBB) {
    116       unsigned Mask = 0;
    117       for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
    118            E = MBB->livein_end(); I != E; ++I) {
    119         unsigned Reg = *I;
    120         if (Reg < X86::FP0 || Reg > X86::FP6)
    121           continue;
    122         Mask |= 1 << (Reg - X86::FP0);
    123       }
    124       return Mask;
    125     }
    126 
    127     // Partition all the CFG edges into LiveBundles.
    128     void bundleCFG(MachineFunction &MF);
    129 
    130     MachineBasicBlock *MBB;     // Current basic block
    131 
    132     // The hardware keeps track of how many FP registers are live, so we have
    133     // to model that exactly. Usually, each live register corresponds to an
    134     // FP<n> register, but when dealing with calls, returns, and inline
    135     // assembly, it is sometimes necessary to have live scratch registers.
    136     unsigned Stack[8];          // FP<n> Registers in each stack slot...
    137     unsigned StackTop;          // The current top of the FP stack.
    138 
    139     enum {
    140       NumFPRegs = 16            // Including scratch pseudo-registers.
    141     };
    142 
    143     // For each live FP<n> register, point to its Stack[] entry.
    144     // The first entries correspond to FP0-FP6, the rest are scratch registers
    145     // used when we need slightly different live registers than what the
    146     // register allocator thinks.
    147     unsigned RegMap[NumFPRegs];
    148 
    149     // Pending fixed registers - Inline assembly needs FP registers to appear
    150     // in fixed stack slot positions. This is handled by copying FP registers
    151     // to ST registers before the instruction, and copying back after the
    152     // instruction.
    153     //
    154     // This is modeled with pending ST registers. NumPendingSTs is the number
    155     // of ST registers (ST0-STn) we are tracking. PendingST[n] points to an FP
    156     // register that holds the ST value. The ST registers are not moved into
    157     // place until immediately before the instruction that needs them.
    158     //
    159     // It can happen that we need an ST register to be live when no FP register
    160     // holds the value:
    161     //
    162     //   %ST0 = COPY %FP4<kill>
    163     //
    164     // When that happens, we allocate a scratch FP register to hold the ST
    165     // value. That means every register in PendingST must be live.
    166 
    167     unsigned NumPendingSTs;
    168     unsigned char PendingST[8];
    169 
    170     // Set up our stack model to match the incoming registers to MBB.
    171     void setupBlockStack();
    172 
    173     // Shuffle live registers to match the expectations of successor blocks.
    174     void finishBlockStack();
    175 
    176 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    177     void dumpStack() const {
    178       dbgs() << "Stack contents:";
    179       for (unsigned i = 0; i != StackTop; ++i) {
    180         dbgs() << " FP" << Stack[i];
    181         assert(RegMap[Stack[i]] == i && "Stack[] doesn't match RegMap[]!");
    182       }
    183       for (unsigned i = 0; i != NumPendingSTs; ++i)
    184         dbgs() << ", ST" << i << " in FP" << unsigned(PendingST[i]);
    185       dbgs() << "\n";
    186     }
    187 #endif
    188 
    189     /// getSlot - Return the stack slot number a particular register number is
    190     /// in.
    191     unsigned getSlot(unsigned RegNo) const {
    192       assert(RegNo < NumFPRegs && "Regno out of range!");
    193       return RegMap[RegNo];
    194     }
    195 
    196     /// isLive - Is RegNo currently live in the stack?
    197     bool isLive(unsigned RegNo) const {
    198       unsigned Slot = getSlot(RegNo);
    199       return Slot < StackTop && Stack[Slot] == RegNo;
    200     }
    201 
    202     /// getScratchReg - Return an FP register that is not currently in use.
    203     unsigned getScratchReg() const {
    204       for (int i = NumFPRegs - 1; i >= 8; --i)
    205         if (!isLive(i))
    206           return i;
    207       llvm_unreachable("Ran out of scratch FP registers");
    208     }
    209 
    210     /// isScratchReg - Returns trus if RegNo is a scratch FP register.
    211     static bool isScratchReg(unsigned RegNo) {
    212       return RegNo > 8 && RegNo < NumFPRegs;
    213     }
    214 
    215     /// getStackEntry - Return the X86::FP<n> register in register ST(i).
    216     unsigned getStackEntry(unsigned STi) const {
    217       if (STi >= StackTop)
    218         report_fatal_error("Access past stack top!");
    219       return Stack[StackTop-1-STi];
    220     }
    221 
    222     /// getSTReg - Return the X86::ST(i) register which contains the specified
    223     /// FP<RegNo> register.
    224     unsigned getSTReg(unsigned RegNo) const {
    225       return StackTop - 1 - getSlot(RegNo) + X86::ST0;
    226     }
    227 
    228     // pushReg - Push the specified FP<n> register onto the stack.
    229     void pushReg(unsigned Reg) {
    230       assert(Reg < NumFPRegs && "Register number out of range!");
    231       if (StackTop >= 8)
    232         report_fatal_error("Stack overflow!");
    233       Stack[StackTop] = Reg;
    234       RegMap[Reg] = StackTop++;
    235     }
    236 
    237     bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; }
    238     void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) {
    239       DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
    240       if (isAtTop(RegNo)) return;
    241 
    242       unsigned STReg = getSTReg(RegNo);
    243       unsigned RegOnTop = getStackEntry(0);
    244 
    245       // Swap the slots the regs are in.
    246       std::swap(RegMap[RegNo], RegMap[RegOnTop]);
    247 
    248       // Swap stack slot contents.
    249       if (RegMap[RegOnTop] >= StackTop)
    250         report_fatal_error("Access past stack top!");
    251       std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
    252 
    253       // Emit an fxch to update the runtime processors version of the state.
    254       BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
    255       ++NumFXCH;
    256     }
    257 
    258     void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
    259       DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
    260       unsigned STReg = getSTReg(RegNo);
    261       pushReg(AsReg);   // New register on top of stack
    262 
    263       BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg);
    264     }
    265 
    266     /// duplicatePendingSTBeforeKill - The instruction at I is about to kill
    267     /// RegNo. If any PendingST registers still need the RegNo value, duplicate
    268     /// them to new scratch registers.
    269     void duplicatePendingSTBeforeKill(unsigned RegNo, MachineInstr *I) {
    270       for (unsigned i = 0; i != NumPendingSTs; ++i) {
    271         if (PendingST[i] != RegNo)
    272           continue;
    273         unsigned SR = getScratchReg();
    274         DEBUG(dbgs() << "Duplicating pending ST" << i
    275                      << " in FP" << RegNo << " to FP" << SR << '\n');
    276         duplicateToTop(RegNo, SR, I);
    277         PendingST[i] = SR;
    278       }
    279     }
    280 
    281     /// popStackAfter - Pop the current value off of the top of the FP stack
    282     /// after the specified instruction.
    283     void popStackAfter(MachineBasicBlock::iterator &I);
    284 
    285     /// freeStackSlotAfter - Free the specified register from the register
    286     /// stack, so that it is no longer in a register.  If the register is
    287     /// currently at the top of the stack, we just pop the current instruction,
    288     /// otherwise we store the current top-of-stack into the specified slot,
    289     /// then pop the top of stack.
    290     void freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned Reg);
    291 
    292     /// freeStackSlotBefore - Just the pop, no folding. Return the inserted
    293     /// instruction.
    294     MachineBasicBlock::iterator
    295     freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo);
    296 
    297     /// Adjust the live registers to be the set in Mask.
    298     void adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I);
    299 
    300     /// Shuffle the top FixCount stack entries such that FP reg FixStack[0] is
    301     /// st(0), FP reg FixStack[1] is st(1) etc.
    302     void shuffleStackTop(const unsigned char *FixStack, unsigned FixCount,
    303                          MachineBasicBlock::iterator I);
    304 
    305     bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
    306 
    307     void handleZeroArgFP(MachineBasicBlock::iterator &I);
    308     void handleOneArgFP(MachineBasicBlock::iterator &I);
    309     void handleOneArgFPRW(MachineBasicBlock::iterator &I);
    310     void handleTwoArgFP(MachineBasicBlock::iterator &I);
    311     void handleCompareFP(MachineBasicBlock::iterator &I);
    312     void handleCondMovFP(MachineBasicBlock::iterator &I);
    313     void handleSpecialFP(MachineBasicBlock::iterator &I);
    314 
    315     // Check if a COPY instruction is using FP registers.
    316     static bool isFPCopy(MachineInstr *MI) {
    317       unsigned DstReg = MI->getOperand(0).getReg();
    318       unsigned SrcReg = MI->getOperand(1).getReg();
    319 
    320       return X86::RFP80RegClass.contains(DstReg) ||
    321         X86::RFP80RegClass.contains(SrcReg);
    322     }
    323   };
    324   char FPS::ID = 0;
    325 }
    326 
    327 FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); }
    328 
    329 /// getFPReg - Return the X86::FPx register number for the specified operand.
    330 /// For example, this returns 3 for X86::FP3.
    331 static unsigned getFPReg(const MachineOperand &MO) {
    332   assert(MO.isReg() && "Expected an FP register!");
    333   unsigned Reg = MO.getReg();
    334   assert(Reg >= X86::FP0 && Reg <= X86::FP6 && "Expected FP register!");
    335   return Reg - X86::FP0;
    336 }
    337 
    338 /// runOnMachineFunction - Loop over all of the basic blocks, transforming FP
    339 /// register references into FP stack references.
    340 ///
    341 bool FPS::runOnMachineFunction(MachineFunction &MF) {
    342   // We only need to run this pass if there are any FP registers used in this
    343   // function.  If it is all integer, there is nothing for us to do!
    344   bool FPIsUsed = false;
    345 
    346   assert(X86::FP6 == X86::FP0+6 && "Register enums aren't sorted right!");
    347   for (unsigned i = 0; i <= 6; ++i)
    348     if (MF.getRegInfo().isPhysRegUsed(X86::FP0+i)) {
    349       FPIsUsed = true;
    350       break;
    351     }
    352 
    353   // Early exit.
    354   if (!FPIsUsed) return false;
    355 
    356   Bundles = &getAnalysis<EdgeBundles>();
    357   TII = MF.getTarget().getInstrInfo();
    358 
    359   // Prepare cross-MBB liveness.
    360   bundleCFG(MF);
    361 
    362   StackTop = 0;
    363 
    364   // Process the function in depth first order so that we process at least one
    365   // of the predecessors for every reachable block in the function.
    366   SmallPtrSet<MachineBasicBlock*, 8> Processed;
    367   MachineBasicBlock *Entry = MF.begin();
    368 
    369   bool Changed = false;
    370   for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*, 8> >
    371          I = df_ext_begin(Entry, Processed), E = df_ext_end(Entry, Processed);
    372        I != E; ++I)
    373     Changed |= processBasicBlock(MF, **I);
    374 
    375   // Process any unreachable blocks in arbitrary order now.
    376   if (MF.size() != Processed.size())
    377     for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
    378       if (Processed.insert(BB))
    379         Changed |= processBasicBlock(MF, *BB);
    380 
    381   LiveBundles.clear();
    382 
    383   return Changed;
    384 }
    385 
    386 /// bundleCFG - Scan all the basic blocks to determine consistent live-in and
    387 /// live-out sets for the FP registers. Consistent means that the set of
    388 /// registers live-out from a block is identical to the live-in set of all
    389 /// successors. This is not enforced by the normal live-in lists since
    390 /// registers may be implicitly defined, or not used by all successors.
    391 void FPS::bundleCFG(MachineFunction &MF) {
    392   assert(LiveBundles.empty() && "Stale data in LiveBundles");
    393   LiveBundles.resize(Bundles->getNumBundles());
    394 
    395   // Gather the actual live-in masks for all MBBs.
    396   for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    397     MachineBasicBlock *MBB = I;
    398     const unsigned Mask = calcLiveInMask(MBB);
    399     if (!Mask)
    400       continue;
    401     // Update MBB ingoing bundle mask.
    402     LiveBundles[Bundles->getBundle(MBB->getNumber(), false)].Mask |= Mask;
    403   }
    404 }
    405 
    406 /// processBasicBlock - Loop over all of the instructions in the basic block,
    407 /// transforming FP instructions into their stack form.
    408 ///
    409 bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
    410   bool Changed = false;
    411   MBB = &BB;
    412   NumPendingSTs = 0;
    413 
    414   setupBlockStack();
    415 
    416   for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
    417     MachineInstr *MI = I;
    418     uint64_t Flags = MI->getDesc().TSFlags;
    419 
    420     unsigned FPInstClass = Flags & X86II::FPTypeMask;
    421     if (MI->isInlineAsm())
    422       FPInstClass = X86II::SpecialFP;
    423 
    424     if (MI->isCopy() && isFPCopy(MI))
    425       FPInstClass = X86II::SpecialFP;
    426 
    427     if (MI->isImplicitDef() &&
    428         X86::RFP80RegClass.contains(MI->getOperand(0).getReg()))
    429       FPInstClass = X86II::SpecialFP;
    430 
    431     if (FPInstClass == X86II::NotFP)
    432       continue;  // Efficiently ignore non-fp insts!
    433 
    434     MachineInstr *PrevMI = nullptr;
    435     if (I != BB.begin())
    436       PrevMI = std::prev(I);
    437 
    438     ++NumFP;  // Keep track of # of pseudo instrs
    439     DEBUG(dbgs() << "\nFPInst:\t" << *MI);
    440 
    441     // Get dead variables list now because the MI pointer may be deleted as part
    442     // of processing!
    443     SmallVector<unsigned, 8> DeadRegs;
    444     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    445       const MachineOperand &MO = MI->getOperand(i);
    446       if (MO.isReg() && MO.isDead())
    447         DeadRegs.push_back(MO.getReg());
    448     }
    449 
    450     switch (FPInstClass) {
    451     case X86II::ZeroArgFP:  handleZeroArgFP(I); break;
    452     case X86II::OneArgFP:   handleOneArgFP(I);  break;  // fstp ST(0)
    453     case X86II::OneArgFPRW: handleOneArgFPRW(I); break; // ST(0) = fsqrt(ST(0))
    454     case X86II::TwoArgFP:   handleTwoArgFP(I);  break;
    455     case X86II::CompareFP:  handleCompareFP(I); break;
    456     case X86II::CondMovFP:  handleCondMovFP(I); break;
    457     case X86II::SpecialFP:  handleSpecialFP(I); break;
    458     default: llvm_unreachable("Unknown FP Type!");
    459     }
    460 
    461     // Check to see if any of the values defined by this instruction are dead
    462     // after definition.  If so, pop them.
    463     for (unsigned i = 0, e = DeadRegs.size(); i != e; ++i) {
    464       unsigned Reg = DeadRegs[i];
    465       if (Reg >= X86::FP0 && Reg <= X86::FP6) {
    466         DEBUG(dbgs() << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
    467         freeStackSlotAfter(I, Reg-X86::FP0);
    468       }
    469     }
    470 
    471     // Print out all of the instructions expanded to if -debug
    472     DEBUG(
    473       MachineBasicBlock::iterator PrevI(PrevMI);
    474       if (I == PrevI) {
    475         dbgs() << "Just deleted pseudo instruction\n";
    476       } else {
    477         MachineBasicBlock::iterator Start = I;
    478         // Rewind to first instruction newly inserted.
    479         while (Start != BB.begin() && std::prev(Start) != PrevI) --Start;
    480         dbgs() << "Inserted instructions:\n\t";
    481         Start->print(dbgs(), &MF.getTarget());
    482         while (++Start != std::next(I)) {}
    483       }
    484       dumpStack();
    485     );
    486     (void)PrevMI;
    487 
    488     Changed = true;
    489   }
    490 
    491   finishBlockStack();
    492 
    493   return Changed;
    494 }
    495 
    496 /// setupBlockStack - Use the live bundles to set up our model of the stack
    497 /// to match predecessors' live out stack.
    498 void FPS::setupBlockStack() {
    499   DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber()
    500                << " derived from " << MBB->getName() << ".\n");
    501   StackTop = 0;
    502   // Get the live-in bundle for MBB.
    503   const LiveBundle &Bundle =
    504     LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
    505 
    506   if (!Bundle.Mask) {
    507     DEBUG(dbgs() << "Block has no FP live-ins.\n");
    508     return;
    509   }
    510 
    511   // Depth-first iteration should ensure that we always have an assigned stack.
    512   assert(Bundle.isFixed() && "Reached block before any predecessors");
    513 
    514   // Push the fixed live-in registers.
    515   for (unsigned i = Bundle.FixCount; i > 0; --i) {
    516     MBB->addLiveIn(X86::ST0+i-1);
    517     DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP"
    518                  << unsigned(Bundle.FixStack[i-1]) << '\n');
    519     pushReg(Bundle.FixStack[i-1]);
    520   }
    521 
    522   // Kill off unwanted live-ins. This can happen with a critical edge.
    523   // FIXME: We could keep these live registers around as zombies. They may need
    524   // to be revived at the end of a short block. It might save a few instrs.
    525   adjustLiveRegs(calcLiveInMask(MBB), MBB->begin());
    526   DEBUG(MBB->dump());
    527 }
    528 
    529 /// finishBlockStack - Revive live-outs that are implicitly defined out of
    530 /// MBB. Shuffle live registers to match the expected fixed stack of any
    531 /// predecessors, and ensure that all predecessors are expecting the same
    532 /// stack.
    533 void FPS::finishBlockStack() {
    534   // The RET handling below takes care of return blocks for us.
    535   if (MBB->succ_empty())
    536     return;
    537 
    538   DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber()
    539                << " derived from " << MBB->getName() << ".\n");
    540 
    541   // Get MBB's live-out bundle.
    542   unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
    543   LiveBundle &Bundle = LiveBundles[BundleIdx];
    544 
    545   // We may need to kill and define some registers to match successors.
    546   // FIXME: This can probably be combined with the shuffle below.
    547   MachineBasicBlock::iterator Term = MBB->getFirstTerminator();
    548   adjustLiveRegs(Bundle.Mask, Term);
    549 
    550   if (!Bundle.Mask) {
    551     DEBUG(dbgs() << "No live-outs.\n");
    552     return;
    553   }
    554 
    555   // Has the stack order been fixed yet?
    556   DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
    557   if (Bundle.isFixed()) {
    558     DEBUG(dbgs() << "Shuffling stack to match.\n");
    559     shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
    560   } else {
    561     // Not fixed yet, we get to choose.
    562     DEBUG(dbgs() << "Fixing stack order now.\n");
    563     Bundle.FixCount = StackTop;
    564     for (unsigned i = 0; i < StackTop; ++i)
    565       Bundle.FixStack[i] = getStackEntry(i);
    566   }
    567 }
    568 
    569 
    570 //===----------------------------------------------------------------------===//
    571 // Efficient Lookup Table Support
    572 //===----------------------------------------------------------------------===//
    573 
    574 namespace {
    575   struct TableEntry {
    576     uint16_t from;
    577     uint16_t to;
    578     bool operator<(const TableEntry &TE) const { return from < TE.from; }
    579     friend bool operator<(const TableEntry &TE, unsigned V) {
    580       return TE.from < V;
    581     }
    582     friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned V,
    583                                                 const TableEntry &TE) {
    584       return V < TE.from;
    585     }
    586   };
    587 }
    588 
    589 #ifndef NDEBUG
    590 static bool TableIsSorted(const TableEntry *Table, unsigned NumEntries) {
    591   for (unsigned i = 0; i != NumEntries-1; ++i)
    592     if (!(Table[i] < Table[i+1])) return false;
    593   return true;
    594 }
    595 #endif
    596 
    597 static int Lookup(const TableEntry *Table, unsigned N, unsigned Opcode) {
    598   const TableEntry *I = std::lower_bound(Table, Table+N, Opcode);
    599   if (I != Table+N && I->from == Opcode)
    600     return I->to;
    601   return -1;
    602 }
    603 
    604 #ifdef NDEBUG
    605 #define ASSERT_SORTED(TABLE)
    606 #else
    607 #define ASSERT_SORTED(TABLE)                                              \
    608   { static bool TABLE##Checked = false;                                   \
    609     if (!TABLE##Checked) {                                                \
    610        assert(TableIsSorted(TABLE, array_lengthof(TABLE)) &&              \
    611               "All lookup tables must be sorted for efficient access!");  \
    612        TABLE##Checked = true;                                             \
    613     }                                                                     \
    614   }
    615 #endif
    616 
    617 //===----------------------------------------------------------------------===//
    618 // Register File -> Register Stack Mapping Methods
    619 //===----------------------------------------------------------------------===//
    620 
    621 // OpcodeTable - Sorted map of register instructions to their stack version.
    622 // The first element is an register file pseudo instruction, the second is the
    623 // concrete X86 instruction which uses the register stack.
    624 //
    625 static const TableEntry OpcodeTable[] = {
    626   { X86::ABS_Fp32     , X86::ABS_F     },
    627   { X86::ABS_Fp64     , X86::ABS_F     },
    628   { X86::ABS_Fp80     , X86::ABS_F     },
    629   { X86::ADD_Fp32m    , X86::ADD_F32m  },
    630   { X86::ADD_Fp64m    , X86::ADD_F64m  },
    631   { X86::ADD_Fp64m32  , X86::ADD_F32m  },
    632   { X86::ADD_Fp80m32  , X86::ADD_F32m  },
    633   { X86::ADD_Fp80m64  , X86::ADD_F64m  },
    634   { X86::ADD_FpI16m32 , X86::ADD_FI16m },
    635   { X86::ADD_FpI16m64 , X86::ADD_FI16m },
    636   { X86::ADD_FpI16m80 , X86::ADD_FI16m },
    637   { X86::ADD_FpI32m32 , X86::ADD_FI32m },
    638   { X86::ADD_FpI32m64 , X86::ADD_FI32m },
    639   { X86::ADD_FpI32m80 , X86::ADD_FI32m },
    640   { X86::CHS_Fp32     , X86::CHS_F     },
    641   { X86::CHS_Fp64     , X86::CHS_F     },
    642   { X86::CHS_Fp80     , X86::CHS_F     },
    643   { X86::CMOVBE_Fp32  , X86::CMOVBE_F  },
    644   { X86::CMOVBE_Fp64  , X86::CMOVBE_F  },
    645   { X86::CMOVBE_Fp80  , X86::CMOVBE_F  },
    646   { X86::CMOVB_Fp32   , X86::CMOVB_F   },
    647   { X86::CMOVB_Fp64   , X86::CMOVB_F  },
    648   { X86::CMOVB_Fp80   , X86::CMOVB_F  },
    649   { X86::CMOVE_Fp32   , X86::CMOVE_F  },
    650   { X86::CMOVE_Fp64   , X86::CMOVE_F   },
    651   { X86::CMOVE_Fp80   , X86::CMOVE_F   },
    652   { X86::CMOVNBE_Fp32 , X86::CMOVNBE_F },
    653   { X86::CMOVNBE_Fp64 , X86::CMOVNBE_F },
    654   { X86::CMOVNBE_Fp80 , X86::CMOVNBE_F },
    655   { X86::CMOVNB_Fp32  , X86::CMOVNB_F  },
    656   { X86::CMOVNB_Fp64  , X86::CMOVNB_F  },
    657   { X86::CMOVNB_Fp80  , X86::CMOVNB_F  },
    658   { X86::CMOVNE_Fp32  , X86::CMOVNE_F  },
    659   { X86::CMOVNE_Fp64  , X86::CMOVNE_F  },
    660   { X86::CMOVNE_Fp80  , X86::CMOVNE_F  },
    661   { X86::CMOVNP_Fp32  , X86::CMOVNP_F  },
    662   { X86::CMOVNP_Fp64  , X86::CMOVNP_F  },
    663   { X86::CMOVNP_Fp80  , X86::CMOVNP_F  },
    664   { X86::CMOVP_Fp32   , X86::CMOVP_F   },
    665   { X86::CMOVP_Fp64   , X86::CMOVP_F   },
    666   { X86::CMOVP_Fp80   , X86::CMOVP_F   },
    667   { X86::COS_Fp32     , X86::COS_F     },
    668   { X86::COS_Fp64     , X86::COS_F     },
    669   { X86::COS_Fp80     , X86::COS_F     },
    670   { X86::DIVR_Fp32m   , X86::DIVR_F32m },
    671   { X86::DIVR_Fp64m   , X86::DIVR_F64m },
    672   { X86::DIVR_Fp64m32 , X86::DIVR_F32m },
    673   { X86::DIVR_Fp80m32 , X86::DIVR_F32m },
    674   { X86::DIVR_Fp80m64 , X86::DIVR_F64m },
    675   { X86::DIVR_FpI16m32, X86::DIVR_FI16m},
    676   { X86::DIVR_FpI16m64, X86::DIVR_FI16m},
    677   { X86::DIVR_FpI16m80, X86::DIVR_FI16m},
    678   { X86::DIVR_FpI32m32, X86::DIVR_FI32m},
    679   { X86::DIVR_FpI32m64, X86::DIVR_FI32m},
    680   { X86::DIVR_FpI32m80, X86::DIVR_FI32m},
    681   { X86::DIV_Fp32m    , X86::DIV_F32m  },
    682   { X86::DIV_Fp64m    , X86::DIV_F64m  },
    683   { X86::DIV_Fp64m32  , X86::DIV_F32m  },
    684   { X86::DIV_Fp80m32  , X86::DIV_F32m  },
    685   { X86::DIV_Fp80m64  , X86::DIV_F64m  },
    686   { X86::DIV_FpI16m32 , X86::DIV_FI16m },
    687   { X86::DIV_FpI16m64 , X86::DIV_FI16m },
    688   { X86::DIV_FpI16m80 , X86::DIV_FI16m },
    689   { X86::DIV_FpI32m32 , X86::DIV_FI32m },
    690   { X86::DIV_FpI32m64 , X86::DIV_FI32m },
    691   { X86::DIV_FpI32m80 , X86::DIV_FI32m },
    692   { X86::ILD_Fp16m32  , X86::ILD_F16m  },
    693   { X86::ILD_Fp16m64  , X86::ILD_F16m  },
    694   { X86::ILD_Fp16m80  , X86::ILD_F16m  },
    695   { X86::ILD_Fp32m32  , X86::ILD_F32m  },
    696   { X86::ILD_Fp32m64  , X86::ILD_F32m  },
    697   { X86::ILD_Fp32m80  , X86::ILD_F32m  },
    698   { X86::ILD_Fp64m32  , X86::ILD_F64m  },
    699   { X86::ILD_Fp64m64  , X86::ILD_F64m  },
    700   { X86::ILD_Fp64m80  , X86::ILD_F64m  },
    701   { X86::ISTT_Fp16m32 , X86::ISTT_FP16m},
    702   { X86::ISTT_Fp16m64 , X86::ISTT_FP16m},
    703   { X86::ISTT_Fp16m80 , X86::ISTT_FP16m},
    704   { X86::ISTT_Fp32m32 , X86::ISTT_FP32m},
    705   { X86::ISTT_Fp32m64 , X86::ISTT_FP32m},
    706   { X86::ISTT_Fp32m80 , X86::ISTT_FP32m},
    707   { X86::ISTT_Fp64m32 , X86::ISTT_FP64m},
    708   { X86::ISTT_Fp64m64 , X86::ISTT_FP64m},
    709   { X86::ISTT_Fp64m80 , X86::ISTT_FP64m},
    710   { X86::IST_Fp16m32  , X86::IST_F16m  },
    711   { X86::IST_Fp16m64  , X86::IST_F16m  },
    712   { X86::IST_Fp16m80  , X86::IST_F16m  },
    713   { X86::IST_Fp32m32  , X86::IST_F32m  },
    714   { X86::IST_Fp32m64  , X86::IST_F32m  },
    715   { X86::IST_Fp32m80  , X86::IST_F32m  },
    716   { X86::IST_Fp64m32  , X86::IST_FP64m },
    717   { X86::IST_Fp64m64  , X86::IST_FP64m },
    718   { X86::IST_Fp64m80  , X86::IST_FP64m },
    719   { X86::LD_Fp032     , X86::LD_F0     },
    720   { X86::LD_Fp064     , X86::LD_F0     },
    721   { X86::LD_Fp080     , X86::LD_F0     },
    722   { X86::LD_Fp132     , X86::LD_F1     },
    723   { X86::LD_Fp164     , X86::LD_F1     },
    724   { X86::LD_Fp180     , X86::LD_F1     },
    725   { X86::LD_Fp32m     , X86::LD_F32m   },
    726   { X86::LD_Fp32m64   , X86::LD_F32m   },
    727   { X86::LD_Fp32m80   , X86::LD_F32m   },
    728   { X86::LD_Fp64m     , X86::LD_F64m   },
    729   { X86::LD_Fp64m80   , X86::LD_F64m   },
    730   { X86::LD_Fp80m     , X86::LD_F80m   },
    731   { X86::MUL_Fp32m    , X86::MUL_F32m  },
    732   { X86::MUL_Fp64m    , X86::MUL_F64m  },
    733   { X86::MUL_Fp64m32  , X86::MUL_F32m  },
    734   { X86::MUL_Fp80m32  , X86::MUL_F32m  },
    735   { X86::MUL_Fp80m64  , X86::MUL_F64m  },
    736   { X86::MUL_FpI16m32 , X86::MUL_FI16m },
    737   { X86::MUL_FpI16m64 , X86::MUL_FI16m },
    738   { X86::MUL_FpI16m80 , X86::MUL_FI16m },
    739   { X86::MUL_FpI32m32 , X86::MUL_FI32m },
    740   { X86::MUL_FpI32m64 , X86::MUL_FI32m },
    741   { X86::MUL_FpI32m80 , X86::MUL_FI32m },
    742   { X86::SIN_Fp32     , X86::SIN_F     },
    743   { X86::SIN_Fp64     , X86::SIN_F     },
    744   { X86::SIN_Fp80     , X86::SIN_F     },
    745   { X86::SQRT_Fp32    , X86::SQRT_F    },
    746   { X86::SQRT_Fp64    , X86::SQRT_F    },
    747   { X86::SQRT_Fp80    , X86::SQRT_F    },
    748   { X86::ST_Fp32m     , X86::ST_F32m   },
    749   { X86::ST_Fp64m     , X86::ST_F64m   },
    750   { X86::ST_Fp64m32   , X86::ST_F32m   },
    751   { X86::ST_Fp80m32   , X86::ST_F32m   },
    752   { X86::ST_Fp80m64   , X86::ST_F64m   },
    753   { X86::ST_FpP80m    , X86::ST_FP80m  },
    754   { X86::SUBR_Fp32m   , X86::SUBR_F32m },
    755   { X86::SUBR_Fp64m   , X86::SUBR_F64m },
    756   { X86::SUBR_Fp64m32 , X86::SUBR_F32m },
    757   { X86::SUBR_Fp80m32 , X86::SUBR_F32m },
    758   { X86::SUBR_Fp80m64 , X86::SUBR_F64m },
    759   { X86::SUBR_FpI16m32, X86::SUBR_FI16m},
    760   { X86::SUBR_FpI16m64, X86::SUBR_FI16m},
    761   { X86::SUBR_FpI16m80, X86::SUBR_FI16m},
    762   { X86::SUBR_FpI32m32, X86::SUBR_FI32m},
    763   { X86::SUBR_FpI32m64, X86::SUBR_FI32m},
    764   { X86::SUBR_FpI32m80, X86::SUBR_FI32m},
    765   { X86::SUB_Fp32m    , X86::SUB_F32m  },
    766   { X86::SUB_Fp64m    , X86::SUB_F64m  },
    767   { X86::SUB_Fp64m32  , X86::SUB_F32m  },
    768   { X86::SUB_Fp80m32  , X86::SUB_F32m  },
    769   { X86::SUB_Fp80m64  , X86::SUB_F64m  },
    770   { X86::SUB_FpI16m32 , X86::SUB_FI16m },
    771   { X86::SUB_FpI16m64 , X86::SUB_FI16m },
    772   { X86::SUB_FpI16m80 , X86::SUB_FI16m },
    773   { X86::SUB_FpI32m32 , X86::SUB_FI32m },
    774   { X86::SUB_FpI32m64 , X86::SUB_FI32m },
    775   { X86::SUB_FpI32m80 , X86::SUB_FI32m },
    776   { X86::TST_Fp32     , X86::TST_F     },
    777   { X86::TST_Fp64     , X86::TST_F     },
    778   { X86::TST_Fp80     , X86::TST_F     },
    779   { X86::UCOM_FpIr32  , X86::UCOM_FIr  },
    780   { X86::UCOM_FpIr64  , X86::UCOM_FIr  },
    781   { X86::UCOM_FpIr80  , X86::UCOM_FIr  },
    782   { X86::UCOM_Fpr32   , X86::UCOM_Fr   },
    783   { X86::UCOM_Fpr64   , X86::UCOM_Fr   },
    784   { X86::UCOM_Fpr80   , X86::UCOM_Fr   },
    785 };
    786 
    787 static unsigned getConcreteOpcode(unsigned Opcode) {
    788   ASSERT_SORTED(OpcodeTable);
    789   int Opc = Lookup(OpcodeTable, array_lengthof(OpcodeTable), Opcode);
    790   assert(Opc != -1 && "FP Stack instruction not in OpcodeTable!");
    791   return Opc;
    792 }
    793 
    794 //===----------------------------------------------------------------------===//
    795 // Helper Methods
    796 //===----------------------------------------------------------------------===//
    797 
    798 // PopTable - Sorted map of instructions to their popping version.  The first
    799 // element is an instruction, the second is the version which pops.
    800 //
    801 static const TableEntry PopTable[] = {
    802   { X86::ADD_FrST0 , X86::ADD_FPrST0  },
    803 
    804   { X86::DIVR_FrST0, X86::DIVR_FPrST0 },
    805   { X86::DIV_FrST0 , X86::DIV_FPrST0  },
    806 
    807   { X86::IST_F16m  , X86::IST_FP16m   },
    808   { X86::IST_F32m  , X86::IST_FP32m   },
    809 
    810   { X86::MUL_FrST0 , X86::MUL_FPrST0  },
    811 
    812   { X86::ST_F32m   , X86::ST_FP32m    },
    813   { X86::ST_F64m   , X86::ST_FP64m    },
    814   { X86::ST_Frr    , X86::ST_FPrr     },
    815 
    816   { X86::SUBR_FrST0, X86::SUBR_FPrST0 },
    817   { X86::SUB_FrST0 , X86::SUB_FPrST0  },
    818 
    819   { X86::UCOM_FIr  , X86::UCOM_FIPr   },
    820 
    821   { X86::UCOM_FPr  , X86::UCOM_FPPr   },
    822   { X86::UCOM_Fr   , X86::UCOM_FPr    },
    823 };
    824 
    825 /// popStackAfter - Pop the current value off of the top of the FP stack after
    826 /// the specified instruction.  This attempts to be sneaky and combine the pop
    827 /// into the instruction itself if possible.  The iterator is left pointing to
    828 /// the last instruction, be it a new pop instruction inserted, or the old
    829 /// instruction if it was modified in place.
    830 ///
    831 void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
    832   MachineInstr* MI = I;
    833   DebugLoc dl = MI->getDebugLoc();
    834   ASSERT_SORTED(PopTable);
    835   if (StackTop == 0)
    836     report_fatal_error("Cannot pop empty stack!");
    837   RegMap[Stack[--StackTop]] = ~0;     // Update state
    838 
    839   // Check to see if there is a popping version of this instruction...
    840   int Opcode = Lookup(PopTable, array_lengthof(PopTable), I->getOpcode());
    841   if (Opcode != -1) {
    842     I->setDesc(TII->get(Opcode));
    843     if (Opcode == X86::UCOM_FPPr)
    844       I->RemoveOperand(0);
    845   } else {    // Insert an explicit pop
    846     I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
    847   }
    848 }
    849 
    850 /// freeStackSlotAfter - Free the specified register from the register stack, so
    851 /// that it is no longer in a register.  If the register is currently at the top
    852 /// of the stack, we just pop the current instruction, otherwise we store the
    853 /// current top-of-stack into the specified slot, then pop the top of stack.
    854 void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
    855   if (getStackEntry(0) == FPRegNo) {  // already at the top of stack? easy.
    856     popStackAfter(I);
    857     return;
    858   }
    859 
    860   // Otherwise, store the top of stack into the dead slot, killing the operand
    861   // without having to add in an explicit xchg then pop.
    862   //
    863   I = freeStackSlotBefore(++I, FPRegNo);
    864 }
    865 
    866 /// freeStackSlotBefore - Free the specified register without trying any
    867 /// folding.
    868 MachineBasicBlock::iterator
    869 FPS::freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo) {
    870   unsigned STReg    = getSTReg(FPRegNo);
    871   unsigned OldSlot  = getSlot(FPRegNo);
    872   unsigned TopReg   = Stack[StackTop-1];
    873   Stack[OldSlot]    = TopReg;
    874   RegMap[TopReg]    = OldSlot;
    875   RegMap[FPRegNo]   = ~0;
    876   Stack[--StackTop] = ~0;
    877   return BuildMI(*MBB, I, DebugLoc(), TII->get(X86::ST_FPrr)).addReg(STReg);
    878 }
    879 
    880 /// adjustLiveRegs - Kill and revive registers such that exactly the FP
    881 /// registers with a bit in Mask are live.
    882 void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
    883   unsigned Defs = Mask;
    884   unsigned Kills = 0;
    885   for (unsigned i = 0; i < StackTop; ++i) {
    886     unsigned RegNo = Stack[i];
    887     if (!(Defs & (1 << RegNo)))
    888       // This register is live, but we don't want it.
    889       Kills |= (1 << RegNo);
    890     else
    891       // We don't need to imp-def this live register.
    892       Defs &= ~(1 << RegNo);
    893   }
    894   assert((Kills & Defs) == 0 && "Register needs killing and def'ing?");
    895 
    896   // Produce implicit-defs for free by using killed registers.
    897   while (Kills && Defs) {
    898     unsigned KReg = countTrailingZeros(Kills);
    899     unsigned DReg = countTrailingZeros(Defs);
    900     DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
    901     std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
    902     std::swap(RegMap[KReg], RegMap[DReg]);
    903     Kills &= ~(1 << KReg);
    904     Defs &= ~(1 << DReg);
    905   }
    906 
    907   // Kill registers by popping.
    908   if (Kills && I != MBB->begin()) {
    909     MachineBasicBlock::iterator I2 = std::prev(I);
    910     while (StackTop) {
    911       unsigned KReg = getStackEntry(0);
    912       if (!(Kills & (1 << KReg)))
    913         break;
    914       DEBUG(dbgs() << "Popping %FP" << KReg << "\n");
    915       popStackAfter(I2);
    916       Kills &= ~(1 << KReg);
    917     }
    918   }
    919 
    920   // Manually kill the rest.
    921   while (Kills) {
    922     unsigned KReg = countTrailingZeros(Kills);
    923     DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
    924     freeStackSlotBefore(I, KReg);
    925     Kills &= ~(1 << KReg);
    926   }
    927 
    928   // Load zeros for all the imp-defs.
    929   while(Defs) {
    930     unsigned DReg = countTrailingZeros(Defs);
    931     DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
    932     BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
    933     pushReg(DReg);
    934     Defs &= ~(1 << DReg);
    935   }
    936 
    937   // Now we should have the correct registers live.
    938   DEBUG(dumpStack());
    939   assert(StackTop == CountPopulation_32(Mask) && "Live count mismatch");
    940 }
    941 
    942 /// shuffleStackTop - emit fxch instructions before I to shuffle the top
    943 /// FixCount entries into the order given by FixStack.
    944 /// FIXME: Is there a better algorithm than insertion sort?
    945 void FPS::shuffleStackTop(const unsigned char *FixStack,
    946                           unsigned FixCount,
    947                           MachineBasicBlock::iterator I) {
    948   // Move items into place, starting from the desired stack bottom.
    949   while (FixCount--) {
    950     // Old register at position FixCount.
    951     unsigned OldReg = getStackEntry(FixCount);
    952     // Desired register at position FixCount.
    953     unsigned Reg = FixStack[FixCount];
    954     if (Reg == OldReg)
    955       continue;
    956     // (Reg st0) (OldReg st0) = (Reg OldReg st0)
    957     moveToTop(Reg, I);
    958     if (FixCount > 0)
    959       moveToTop(OldReg, I);
    960   }
    961   DEBUG(dumpStack());
    962 }
    963 
    964 
    965 //===----------------------------------------------------------------------===//
    966 // Instruction transformation implementation
    967 //===----------------------------------------------------------------------===//
    968 
    969 /// handleZeroArgFP - ST(0) = fld0    ST(0) = flds <mem>
    970 ///
    971 void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
    972   MachineInstr *MI = I;
    973   unsigned DestReg = getFPReg(MI->getOperand(0));
    974 
    975   // Change from the pseudo instruction to the concrete instruction.
    976   MI->RemoveOperand(0);   // Remove the explicit ST(0) operand
    977   MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
    978 
    979   // Result gets pushed on the stack.
    980   pushReg(DestReg);
    981 }
    982 
    983 /// handleOneArgFP - fst <mem>, ST(0)
    984 ///
    985 void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
    986   MachineInstr *MI = I;
    987   unsigned NumOps = MI->getDesc().getNumOperands();
    988   assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
    989          "Can only handle fst* & ftst instructions!");
    990 
    991   // Is this the last use of the source register?
    992   unsigned Reg = getFPReg(MI->getOperand(NumOps-1));
    993   bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
    994 
    995   if (KillsSrc)
    996     duplicatePendingSTBeforeKill(Reg, I);
    997 
    998   // FISTP64m is strange because there isn't a non-popping versions.
    999   // If we have one _and_ we don't want to pop the operand, duplicate the value
   1000   // on the stack instead of moving it.  This ensure that popping the value is
   1001   // always ok.
   1002   // Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
   1003   //
   1004   if (!KillsSrc &&
   1005       (MI->getOpcode() == X86::IST_Fp64m32 ||
   1006        MI->getOpcode() == X86::ISTT_Fp16m32 ||
   1007        MI->getOpcode() == X86::ISTT_Fp32m32 ||
   1008        MI->getOpcode() == X86::ISTT_Fp64m32 ||
   1009        MI->getOpcode() == X86::IST_Fp64m64 ||
   1010        MI->getOpcode() == X86::ISTT_Fp16m64 ||
   1011        MI->getOpcode() == X86::ISTT_Fp32m64 ||
   1012        MI->getOpcode() == X86::ISTT_Fp64m64 ||
   1013        MI->getOpcode() == X86::IST_Fp64m80 ||
   1014        MI->getOpcode() == X86::ISTT_Fp16m80 ||
   1015        MI->getOpcode() == X86::ISTT_Fp32m80 ||
   1016        MI->getOpcode() == X86::ISTT_Fp64m80 ||
   1017        MI->getOpcode() == X86::ST_FpP80m)) {
   1018     duplicateToTop(Reg, getScratchReg(), I);
   1019   } else {
   1020     moveToTop(Reg, I);            // Move to the top of the stack...
   1021   }
   1022 
   1023   // Convert from the pseudo instruction to the concrete instruction.
   1024   MI->RemoveOperand(NumOps-1);    // Remove explicit ST(0) operand
   1025   MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
   1026 
   1027   if (MI->getOpcode() == X86::IST_FP64m ||
   1028       MI->getOpcode() == X86::ISTT_FP16m ||
   1029       MI->getOpcode() == X86::ISTT_FP32m ||
   1030       MI->getOpcode() == X86::ISTT_FP64m ||
   1031       MI->getOpcode() == X86::ST_FP80m) {
   1032     if (StackTop == 0)
   1033       report_fatal_error("Stack empty??");
   1034     --StackTop;
   1035   } else if (KillsSrc) { // Last use of operand?
   1036     popStackAfter(I);
   1037   }
   1038 }
   1039 
   1040 
   1041 /// handleOneArgFPRW: Handle instructions that read from the top of stack and
   1042 /// replace the value with a newly computed value.  These instructions may have
   1043 /// non-fp operands after their FP operands.
   1044 ///
   1045 ///  Examples:
   1046 ///     R1 = fchs R2
   1047 ///     R1 = fadd R2, [mem]
   1048 ///
   1049 void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
   1050   MachineInstr *MI = I;
   1051 #ifndef NDEBUG
   1052   unsigned NumOps = MI->getDesc().getNumOperands();
   1053   assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
   1054 #endif
   1055 
   1056   // Is this the last use of the source register?
   1057   unsigned Reg = getFPReg(MI->getOperand(1));
   1058   bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
   1059 
   1060   if (KillsSrc) {
   1061     duplicatePendingSTBeforeKill(Reg, I);
   1062     // If this is the last use of the source register, just make sure it's on
   1063     // the top of the stack.
   1064     moveToTop(Reg, I);
   1065     if (StackTop == 0)
   1066       report_fatal_error("Stack cannot be empty!");
   1067     --StackTop;
   1068     pushReg(getFPReg(MI->getOperand(0)));
   1069   } else {
   1070     // If this is not the last use of the source register, _copy_ it to the top
   1071     // of the stack.
   1072     duplicateToTop(Reg, getFPReg(MI->getOperand(0)), I);
   1073   }
   1074 
   1075   // Change from the pseudo instruction to the concrete instruction.
   1076   MI->RemoveOperand(1);   // Drop the source operand.
   1077   MI->RemoveOperand(0);   // Drop the destination operand.
   1078   MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
   1079 }
   1080 
   1081 
   1082 //===----------------------------------------------------------------------===//
   1083 // Define tables of various ways to map pseudo instructions
   1084 //
   1085 
   1086 // ForwardST0Table - Map: A = B op C  into: ST(0) = ST(0) op ST(i)
   1087 static const TableEntry ForwardST0Table[] = {
   1088   { X86::ADD_Fp32  , X86::ADD_FST0r },
   1089   { X86::ADD_Fp64  , X86::ADD_FST0r },
   1090   { X86::ADD_Fp80  , X86::ADD_FST0r },
   1091   { X86::DIV_Fp32  , X86::DIV_FST0r },
   1092   { X86::DIV_Fp64  , X86::DIV_FST0r },
   1093   { X86::DIV_Fp80  , X86::DIV_FST0r },
   1094   { X86::MUL_Fp32  , X86::MUL_FST0r },
   1095   { X86::MUL_Fp64  , X86::MUL_FST0r },
   1096   { X86::MUL_Fp80  , X86::MUL_FST0r },
   1097   { X86::SUB_Fp32  , X86::SUB_FST0r },
   1098   { X86::SUB_Fp64  , X86::SUB_FST0r },
   1099   { X86::SUB_Fp80  , X86::SUB_FST0r },
   1100 };
   1101 
   1102 // ReverseST0Table - Map: A = B op C  into: ST(0) = ST(i) op ST(0)
   1103 static const TableEntry ReverseST0Table[] = {
   1104   { X86::ADD_Fp32  , X86::ADD_FST0r  },   // commutative
   1105   { X86::ADD_Fp64  , X86::ADD_FST0r  },   // commutative
   1106   { X86::ADD_Fp80  , X86::ADD_FST0r  },   // commutative
   1107   { X86::DIV_Fp32  , X86::DIVR_FST0r },
   1108   { X86::DIV_Fp64  , X86::DIVR_FST0r },
   1109   { X86::DIV_Fp80  , X86::DIVR_FST0r },
   1110   { X86::MUL_Fp32  , X86::MUL_FST0r  },   // commutative
   1111   { X86::MUL_Fp64  , X86::MUL_FST0r  },   // commutative
   1112   { X86::MUL_Fp80  , X86::MUL_FST0r  },   // commutative
   1113   { X86::SUB_Fp32  , X86::SUBR_FST0r },
   1114   { X86::SUB_Fp64  , X86::SUBR_FST0r },
   1115   { X86::SUB_Fp80  , X86::SUBR_FST0r },
   1116 };
   1117 
   1118 // ForwardSTiTable - Map: A = B op C  into: ST(i) = ST(0) op ST(i)
   1119 static const TableEntry ForwardSTiTable[] = {
   1120   { X86::ADD_Fp32  , X86::ADD_FrST0  },   // commutative
   1121   { X86::ADD_Fp64  , X86::ADD_FrST0  },   // commutative
   1122   { X86::ADD_Fp80  , X86::ADD_FrST0  },   // commutative
   1123   { X86::DIV_Fp32  , X86::DIVR_FrST0 },
   1124   { X86::DIV_Fp64  , X86::DIVR_FrST0 },
   1125   { X86::DIV_Fp80  , X86::DIVR_FrST0 },
   1126   { X86::MUL_Fp32  , X86::MUL_FrST0  },   // commutative
   1127   { X86::MUL_Fp64  , X86::MUL_FrST0  },   // commutative
   1128   { X86::MUL_Fp80  , X86::MUL_FrST0  },   // commutative
   1129   { X86::SUB_Fp32  , X86::SUBR_FrST0 },
   1130   { X86::SUB_Fp64  , X86::SUBR_FrST0 },
   1131   { X86::SUB_Fp80  , X86::SUBR_FrST0 },
   1132 };
   1133 
   1134 // ReverseSTiTable - Map: A = B op C  into: ST(i) = ST(i) op ST(0)
   1135 static const TableEntry ReverseSTiTable[] = {
   1136   { X86::ADD_Fp32  , X86::ADD_FrST0 },
   1137   { X86::ADD_Fp64  , X86::ADD_FrST0 },
   1138   { X86::ADD_Fp80  , X86::ADD_FrST0 },
   1139   { X86::DIV_Fp32  , X86::DIV_FrST0 },
   1140   { X86::DIV_Fp64  , X86::DIV_FrST0 },
   1141   { X86::DIV_Fp80  , X86::DIV_FrST0 },
   1142   { X86::MUL_Fp32  , X86::MUL_FrST0 },
   1143   { X86::MUL_Fp64  , X86::MUL_FrST0 },
   1144   { X86::MUL_Fp80  , X86::MUL_FrST0 },
   1145   { X86::SUB_Fp32  , X86::SUB_FrST0 },
   1146   { X86::SUB_Fp64  , X86::SUB_FrST0 },
   1147   { X86::SUB_Fp80  , X86::SUB_FrST0 },
   1148 };
   1149 
   1150 
   1151 /// handleTwoArgFP - Handle instructions like FADD and friends which are virtual
   1152 /// instructions which need to be simplified and possibly transformed.
   1153 ///
   1154 /// Result: ST(0) = fsub  ST(0), ST(i)
   1155 ///         ST(i) = fsub  ST(0), ST(i)
   1156 ///         ST(0) = fsubr ST(0), ST(i)
   1157 ///         ST(i) = fsubr ST(0), ST(i)
   1158 ///
   1159 void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
   1160   ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
   1161   ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
   1162   MachineInstr *MI = I;
   1163 
   1164   unsigned NumOperands = MI->getDesc().getNumOperands();
   1165   assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
   1166   unsigned Dest = getFPReg(MI->getOperand(0));
   1167   unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
   1168   unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
   1169   bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
   1170   bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
   1171   DebugLoc dl = MI->getDebugLoc();
   1172 
   1173   unsigned TOS = getStackEntry(0);
   1174 
   1175   // One of our operands must be on the top of the stack.  If neither is yet, we
   1176   // need to move one.
   1177   if (Op0 != TOS && Op1 != TOS) {   // No operand at TOS?
   1178     // We can choose to move either operand to the top of the stack.  If one of
   1179     // the operands is killed by this instruction, we want that one so that we
   1180     // can update right on top of the old version.
   1181     if (KillsOp0) {
   1182       moveToTop(Op0, I);         // Move dead operand to TOS.
   1183       TOS = Op0;
   1184     } else if (KillsOp1) {
   1185       moveToTop(Op1, I);
   1186       TOS = Op1;
   1187     } else {
   1188       // All of the operands are live after this instruction executes, so we
   1189       // cannot update on top of any operand.  Because of this, we must
   1190       // duplicate one of the stack elements to the top.  It doesn't matter
   1191       // which one we pick.
   1192       //
   1193       duplicateToTop(Op0, Dest, I);
   1194       Op0 = TOS = Dest;
   1195       KillsOp0 = true;
   1196     }
   1197   } else if (!KillsOp0 && !KillsOp1) {
   1198     // If we DO have one of our operands at the top of the stack, but we don't
   1199     // have a dead operand, we must duplicate one of the operands to a new slot
   1200     // on the stack.
   1201     duplicateToTop(Op0, Dest, I);
   1202     Op0 = TOS = Dest;
   1203     KillsOp0 = true;
   1204   }
   1205 
   1206   // Now we know that one of our operands is on the top of the stack, and at
   1207   // least one of our operands is killed by this instruction.
   1208   assert((TOS == Op0 || TOS == Op1) && (KillsOp0 || KillsOp1) &&
   1209          "Stack conditions not set up right!");
   1210 
   1211   // We decide which form to use based on what is on the top of the stack, and
   1212   // which operand is killed by this instruction.
   1213   const TableEntry *InstTable;
   1214   bool isForward = TOS == Op0;
   1215   bool updateST0 = (TOS == Op0 && !KillsOp1) || (TOS == Op1 && !KillsOp0);
   1216   if (updateST0) {
   1217     if (isForward)
   1218       InstTable = ForwardST0Table;
   1219     else
   1220       InstTable = ReverseST0Table;
   1221   } else {
   1222     if (isForward)
   1223       InstTable = ForwardSTiTable;
   1224     else
   1225       InstTable = ReverseSTiTable;
   1226   }
   1227 
   1228   int Opcode = Lookup(InstTable, array_lengthof(ForwardST0Table),
   1229                       MI->getOpcode());
   1230   assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
   1231 
   1232   // NotTOS - The register which is not on the top of stack...
   1233   unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
   1234 
   1235   // Replace the old instruction with a new instruction
   1236   MBB->remove(I++);
   1237   I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
   1238 
   1239   // If both operands are killed, pop one off of the stack in addition to
   1240   // overwriting the other one.
   1241   if (KillsOp0 && KillsOp1 && Op0 != Op1) {
   1242     assert(!updateST0 && "Should have updated other operand!");
   1243     popStackAfter(I);   // Pop the top of stack
   1244   }
   1245 
   1246   // Update stack information so that we know the destination register is now on
   1247   // the stack.
   1248   unsigned UpdatedSlot = getSlot(updateST0 ? TOS : NotTOS);
   1249   assert(UpdatedSlot < StackTop && Dest < 7);
   1250   Stack[UpdatedSlot]   = Dest;
   1251   RegMap[Dest]         = UpdatedSlot;
   1252   MBB->getParent()->DeleteMachineInstr(MI); // Remove the old instruction
   1253 }
   1254 
   1255 /// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
   1256 /// register arguments and no explicit destinations.
   1257 ///
   1258 void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
   1259   ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
   1260   ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
   1261   MachineInstr *MI = I;
   1262 
   1263   unsigned NumOperands = MI->getDesc().getNumOperands();
   1264   assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
   1265   unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
   1266   unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
   1267   bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
   1268   bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
   1269 
   1270   // Make sure the first operand is on the top of stack, the other one can be
   1271   // anywhere.
   1272   moveToTop(Op0, I);
   1273 
   1274   // Change from the pseudo instruction to the concrete instruction.
   1275   MI->getOperand(0).setReg(getSTReg(Op1));
   1276   MI->RemoveOperand(1);
   1277   MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
   1278 
   1279   // If any of the operands are killed by this instruction, free them.
   1280   if (KillsOp0) freeStackSlotAfter(I, Op0);
   1281   if (KillsOp1 && Op0 != Op1) freeStackSlotAfter(I, Op1);
   1282 }
   1283 
   1284 /// handleCondMovFP - Handle two address conditional move instructions.  These
   1285 /// instructions move a st(i) register to st(0) iff a condition is true.  These
   1286 /// instructions require that the first operand is at the top of the stack, but
   1287 /// otherwise don't modify the stack at all.
   1288 void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
   1289   MachineInstr *MI = I;
   1290 
   1291   unsigned Op0 = getFPReg(MI->getOperand(0));
   1292   unsigned Op1 = getFPReg(MI->getOperand(2));
   1293   bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
   1294 
   1295   // The first operand *must* be on the top of the stack.
   1296   moveToTop(Op0, I);
   1297 
   1298   // Change the second operand to the stack register that the operand is in.
   1299   // Change from the pseudo instruction to the concrete instruction.
   1300   MI->RemoveOperand(0);
   1301   MI->RemoveOperand(1);
   1302   MI->getOperand(0).setReg(getSTReg(Op1));
   1303   MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
   1304 
   1305   // If we kill the second operand, make sure to pop it from the stack.
   1306   if (Op0 != Op1 && KillsOp1) {
   1307     // Get this value off of the register stack.
   1308     freeStackSlotAfter(I, Op1);
   1309   }
   1310 }
   1311 
   1312 
   1313 /// handleSpecialFP - Handle special instructions which behave unlike other
   1314 /// floating point instructions.  This is primarily intended for use by pseudo
   1315 /// instructions.
   1316 ///
   1317 void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
   1318   MachineInstr *MI = I;
   1319   switch (MI->getOpcode()) {
   1320   default: llvm_unreachable("Unknown SpecialFP instruction!");
   1321   case TargetOpcode::COPY: {
   1322     // We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
   1323     const MachineOperand &MO1 = MI->getOperand(1);
   1324     const MachineOperand &MO0 = MI->getOperand(0);
   1325     unsigned DstST = MO0.getReg() - X86::ST0;
   1326     unsigned SrcST = MO1.getReg() - X86::ST0;
   1327     bool KillsSrc = MI->killsRegister(MO1.getReg());
   1328 
   1329     // ST = COPY FP. Set up a pending ST register.
   1330     if (DstST < 8) {
   1331       unsigned SrcFP = getFPReg(MO1);
   1332       assert(isLive(SrcFP) && "Cannot copy dead register");
   1333       assert(!MO0.isDead() && "Cannot copy to dead ST register");
   1334 
   1335       // Unallocated STs are marked as the nonexistent FP255.
   1336       while (NumPendingSTs <= DstST)
   1337         PendingST[NumPendingSTs++] = NumFPRegs;
   1338 
   1339       // STi could still be live from a previous inline asm.
   1340       if (isScratchReg(PendingST[DstST])) {
   1341         DEBUG(dbgs() << "Clobbering old ST in FP" << unsigned(PendingST[DstST])
   1342                      << '\n');
   1343         freeStackSlotBefore(MI, PendingST[DstST]);
   1344       }
   1345 
   1346       // When the source is killed, allocate a scratch FP register.
   1347       if (KillsSrc) {
   1348         duplicatePendingSTBeforeKill(SrcFP, I);
   1349         unsigned Slot = getSlot(SrcFP);
   1350         unsigned SR = getScratchReg();
   1351         PendingST[DstST] = SR;
   1352         Stack[Slot] = SR;
   1353         RegMap[SR] = Slot;
   1354       } else
   1355         PendingST[DstST] = SrcFP;
   1356       break;
   1357     }
   1358 
   1359     // FP = COPY ST. Extract fixed stack value.
   1360     // Any instruction defining ST registers must have assigned them to a
   1361     // scratch register.
   1362     if (SrcST < 8) {
   1363       unsigned DstFP = getFPReg(MO0);
   1364       assert(!isLive(DstFP) && "Cannot copy ST to live FP register");
   1365       assert(NumPendingSTs > SrcST && "Cannot copy from dead ST register");
   1366       unsigned SrcFP = PendingST[SrcST];
   1367       assert(isScratchReg(SrcFP) && "Expected ST in a scratch register");
   1368       assert(isLive(SrcFP) && "Scratch holding ST is dead");
   1369 
   1370       // DstFP steals the stack slot from SrcFP.
   1371       unsigned Slot = getSlot(SrcFP);
   1372       Stack[Slot] = DstFP;
   1373       RegMap[DstFP] = Slot;
   1374 
   1375       // Always treat the ST as killed.
   1376       PendingST[SrcST] = NumFPRegs;
   1377       while (NumPendingSTs && PendingST[NumPendingSTs - 1] == NumFPRegs)
   1378         --NumPendingSTs;
   1379       break;
   1380     }
   1381 
   1382     // FP <- FP copy.
   1383     unsigned DstFP = getFPReg(MO0);
   1384     unsigned SrcFP = getFPReg(MO1);
   1385     assert(isLive(SrcFP) && "Cannot copy dead register");
   1386     if (KillsSrc) {
   1387       // If the input operand is killed, we can just change the owner of the
   1388       // incoming stack slot into the result.
   1389       unsigned Slot = getSlot(SrcFP);
   1390       Stack[Slot] = DstFP;
   1391       RegMap[DstFP] = Slot;
   1392     } else {
   1393       // For COPY we just duplicate the specified value to a new stack slot.
   1394       // This could be made better, but would require substantial changes.
   1395       duplicateToTop(SrcFP, DstFP, I);
   1396     }
   1397     break;
   1398   }
   1399 
   1400   case TargetOpcode::IMPLICIT_DEF: {
   1401     // All FP registers must be explicitly defined, so load a 0 instead.
   1402     unsigned Reg = MI->getOperand(0).getReg() - X86::FP0;
   1403     DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
   1404     BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::LD_F0));
   1405     pushReg(Reg);
   1406     break;
   1407   }
   1408 
   1409   case X86::FpPOP_RETVAL: {
   1410     // The FpPOP_RETVAL instruction is used after calls that return a value on
   1411     // the floating point stack. We cannot model this with ST defs since CALL
   1412     // instructions have fixed clobber lists. This instruction is interpreted
   1413     // to mean that there is one more live register on the stack than we
   1414     // thought.
   1415     //
   1416     // This means that StackTop does not match the hardware stack between a
   1417     // call and the FpPOP_RETVAL instructions.  We do tolerate FP instructions
   1418     // between CALL and FpPOP_RETVAL as long as they don't overflow the
   1419     // hardware stack.
   1420     unsigned DstFP = getFPReg(MI->getOperand(0));
   1421 
   1422     // Move existing stack elements up to reflect reality.
   1423     assert(StackTop < 8 && "Stack overflowed before FpPOP_RETVAL");
   1424     if (StackTop) {
   1425       std::copy_backward(Stack, Stack + StackTop, Stack + StackTop + 1);
   1426       for (unsigned i = 0; i != NumFPRegs; ++i)
   1427         ++RegMap[i];
   1428     }
   1429     ++StackTop;
   1430 
   1431     // DstFP is the new bottom of the stack.
   1432     Stack[0] = DstFP;
   1433     RegMap[DstFP] = 0;
   1434 
   1435     // DstFP will be killed by processBasicBlock if this was a dead def.
   1436     break;
   1437   }
   1438 
   1439   case TargetOpcode::INLINEASM: {
   1440     // The inline asm MachineInstr currently only *uses* FP registers for the
   1441     // 'f' constraint.  These should be turned into the current ST(x) register
   1442     // in the machine instr.
   1443     //
   1444     // There are special rules for x87 inline assembly. The compiler must know
   1445     // exactly how many registers are popped and pushed implicitly by the asm.
   1446     // Otherwise it is not possible to restore the stack state after the inline
   1447     // asm.
   1448     //
   1449     // There are 3 kinds of input operands:
   1450     //
   1451     // 1. Popped inputs. These must appear at the stack top in ST0-STn. A
   1452     //    popped input operand must be in a fixed stack slot, and it is either
   1453     //    tied to an output operand, or in the clobber list. The MI has ST use
   1454     //    and def operands for these inputs.
   1455     //
   1456     // 2. Fixed inputs. These inputs appear in fixed stack slots, but are
   1457     //    preserved by the inline asm. The fixed stack slots must be STn-STm
   1458     //    following the popped inputs. A fixed input operand cannot be tied to
   1459     //    an output or appear in the clobber list. The MI has ST use operands
   1460     //    and no defs for these inputs.
   1461     //
   1462     // 3. Preserved inputs. These inputs use the "f" constraint which is
   1463     //    represented as an FP register. The inline asm won't change these
   1464     //    stack slots.
   1465     //
   1466     // Outputs must be in ST registers, FP outputs are not allowed. Clobbered
   1467     // registers do not count as output operands. The inline asm changes the
   1468     // stack as if it popped all the popped inputs and then pushed all the
   1469     // output operands.
   1470 
   1471     // Scan the assembly for ST registers used, defined and clobbered. We can
   1472     // only tell clobbers from defs by looking at the asm descriptor.
   1473     unsigned STUses = 0, STDefs = 0, STClobbers = 0, STDeadDefs = 0;
   1474     unsigned NumOps = 0;
   1475     for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
   1476          i != e && MI->getOperand(i).isImm(); i += 1 + NumOps) {
   1477       unsigned Flags = MI->getOperand(i).getImm();
   1478       NumOps = InlineAsm::getNumOperandRegisters(Flags);
   1479       if (NumOps != 1)
   1480         continue;
   1481       const MachineOperand &MO = MI->getOperand(i + 1);
   1482       if (!MO.isReg())
   1483         continue;
   1484       unsigned STReg = MO.getReg() - X86::ST0;
   1485       if (STReg >= 8)
   1486         continue;
   1487 
   1488       switch (InlineAsm::getKind(Flags)) {
   1489       case InlineAsm::Kind_RegUse:
   1490         STUses |= (1u << STReg);
   1491         break;
   1492       case InlineAsm::Kind_RegDef:
   1493       case InlineAsm::Kind_RegDefEarlyClobber:
   1494         STDefs |= (1u << STReg);
   1495         if (MO.isDead())
   1496           STDeadDefs |= (1u << STReg);
   1497         break;
   1498       case InlineAsm::Kind_Clobber:
   1499         STClobbers |= (1u << STReg);
   1500         break;
   1501       default:
   1502         break;
   1503       }
   1504     }
   1505 
   1506     if (STUses && !isMask_32(STUses))
   1507       MI->emitError("fixed input regs must be last on the x87 stack");
   1508     unsigned NumSTUses = CountTrailingOnes_32(STUses);
   1509 
   1510     // Defs must be contiguous from the stack top. ST0-STn.
   1511     if (STDefs && !isMask_32(STDefs)) {
   1512       MI->emitError("output regs must be last on the x87 stack");
   1513       STDefs = NextPowerOf2(STDefs) - 1;
   1514     }
   1515     unsigned NumSTDefs = CountTrailingOnes_32(STDefs);
   1516 
   1517     // So must the clobbered stack slots. ST0-STm, m >= n.
   1518     if (STClobbers && !isMask_32(STDefs | STClobbers))
   1519       MI->emitError("clobbers must be last on the x87 stack");
   1520 
   1521     // Popped inputs are the ones that are also clobbered or defined.
   1522     unsigned STPopped = STUses & (STDefs | STClobbers);
   1523     if (STPopped && !isMask_32(STPopped))
   1524       MI->emitError("implicitly popped regs must be last on the x87 stack");
   1525     unsigned NumSTPopped = CountTrailingOnes_32(STPopped);
   1526 
   1527     DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
   1528                  << NumSTPopped << ", and defines " << NumSTDefs << " regs.\n");
   1529 
   1530     // Scan the instruction for FP uses corresponding to "f" constraints.
   1531     // Collect FP registers to kill afer the instruction.
   1532     // Always kill all the scratch regs.
   1533     unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
   1534     unsigned FPUsed = 0;
   1535     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
   1536       MachineOperand &Op = MI->getOperand(i);
   1537       if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
   1538         continue;
   1539       if (!Op.isUse())
   1540         MI->emitError("illegal \"f\" output constraint");
   1541       unsigned FPReg = getFPReg(Op);
   1542       FPUsed |= 1U << FPReg;
   1543 
   1544       // If we kill this operand, make sure to pop it from the stack after the
   1545       // asm.  We just remember it for now, and pop them all off at the end in
   1546       // a batch.
   1547       if (Op.isKill())
   1548         FPKills |= 1U << FPReg;
   1549     }
   1550 
   1551     // The popped inputs will be killed by the instruction, so duplicate them
   1552     // if the FP register needs to be live after the instruction, or if it is
   1553     // used in the instruction itself. We effectively treat the popped inputs
   1554     // as early clobbers.
   1555     for (unsigned i = 0; i < NumSTPopped; ++i) {
   1556       if ((FPKills & ~FPUsed) & (1u << PendingST[i]))
   1557         continue;
   1558       unsigned SR = getScratchReg();
   1559       duplicateToTop(PendingST[i], SR, I);
   1560       DEBUG(dbgs() << "Duplicating ST" << i << " in FP"
   1561                    << unsigned(PendingST[i]) << " to avoid clobbering it.\n");
   1562       PendingST[i] = SR;
   1563     }
   1564 
   1565     // Make sure we have a unique live register for every fixed use. Some of
   1566     // them could be undef uses, and we need to emit LD_F0 instructions.
   1567     for (unsigned i = 0; i < NumSTUses; ++i) {
   1568       if (i < NumPendingSTs && PendingST[i] < NumFPRegs) {
   1569         // Check for shared assignments.
   1570         for (unsigned j = 0; j < i; ++j) {
   1571           if (PendingST[j] != PendingST[i])
   1572             continue;
   1573           // STi and STj are inn the same register, create a copy.
   1574           unsigned SR = getScratchReg();
   1575           duplicateToTop(PendingST[i], SR, I);
   1576           DEBUG(dbgs() << "Duplicating ST" << i << " in FP"
   1577                        << unsigned(PendingST[i])
   1578                        << " to avoid collision with ST" << j << '\n');
   1579           PendingST[i] = SR;
   1580         }
   1581         continue;
   1582       }
   1583       unsigned SR = getScratchReg();
   1584       DEBUG(dbgs() << "Emitting LD_F0 for ST" << i << " in FP" << SR << '\n');
   1585       BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::LD_F0));
   1586       pushReg(SR);
   1587       PendingST[i] = SR;
   1588       if (NumPendingSTs == i)
   1589         ++NumPendingSTs;
   1590     }
   1591     assert(NumPendingSTs >= NumSTUses && "Fixed registers should be assigned");
   1592 
   1593     // Now we can rearrange the live registers to match what was requested.
   1594     shuffleStackTop(PendingST, NumPendingSTs, I);
   1595     DEBUG({dbgs() << "Before asm: "; dumpStack();});
   1596 
   1597     // With the stack layout fixed, rewrite the FP registers.
   1598     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
   1599       MachineOperand &Op = MI->getOperand(i);
   1600       if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
   1601         continue;
   1602       unsigned FPReg = getFPReg(Op);
   1603       Op.setReg(getSTReg(FPReg));
   1604     }
   1605 
   1606     // Simulate the inline asm popping its inputs and pushing its outputs.
   1607     StackTop -= NumSTPopped;
   1608 
   1609     // Hold the fixed output registers in scratch FP registers. They will be
   1610     // transferred to real FP registers by copies.
   1611     NumPendingSTs = 0;
   1612     for (unsigned i = 0; i < NumSTDefs; ++i) {
   1613       unsigned SR = getScratchReg();
   1614       pushReg(SR);
   1615       FPKills &= ~(1u << SR);
   1616     }
   1617     for (unsigned i = 0; i < NumSTDefs; ++i)
   1618       PendingST[NumPendingSTs++] = getStackEntry(i);
   1619     DEBUG({dbgs() << "After asm: "; dumpStack();});
   1620 
   1621     // If any of the ST defs were dead, pop them immediately. Our caller only
   1622     // handles dead FP defs.
   1623     MachineBasicBlock::iterator InsertPt = MI;
   1624     for (unsigned i = 0; STDefs & (1u << i); ++i) {
   1625       if (!(STDeadDefs & (1u << i)))
   1626         continue;
   1627       freeStackSlotAfter(InsertPt, PendingST[i]);
   1628       PendingST[i] = NumFPRegs;
   1629     }
   1630     while (NumPendingSTs && PendingST[NumPendingSTs - 1] == NumFPRegs)
   1631       --NumPendingSTs;
   1632 
   1633     // If this asm kills any FP registers (is the last use of them) we must
   1634     // explicitly emit pop instructions for them.  Do this now after the asm has
   1635     // executed so that the ST(x) numbers are not off (which would happen if we
   1636     // did this inline with operand rewriting).
   1637     //
   1638     // Note: this might be a non-optimal pop sequence.  We might be able to do
   1639     // better by trying to pop in stack order or something.
   1640     while (FPKills) {
   1641       unsigned FPReg = countTrailingZeros(FPKills);
   1642       if (isLive(FPReg))
   1643         freeStackSlotAfter(InsertPt, FPReg);
   1644       FPKills &= ~(1U << FPReg);
   1645     }
   1646     // Don't delete the inline asm!
   1647     return;
   1648   }
   1649 
   1650   case X86::WIN_FTOL_32:
   1651   case X86::WIN_FTOL_64: {
   1652     // Push the operand into ST0.
   1653     MachineOperand &Op = MI->getOperand(0);
   1654     assert(Op.isUse() && Op.isReg() &&
   1655       Op.getReg() >= X86::FP0 && Op.getReg() <= X86::FP6);
   1656     unsigned FPReg = getFPReg(Op);
   1657     if (Op.isKill())
   1658       moveToTop(FPReg, I);
   1659     else
   1660       duplicateToTop(FPReg, FPReg, I);
   1661 
   1662     // Emit the call. This will pop the operand.
   1663     BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::CALLpcrel32))
   1664       .addExternalSymbol("_ftol2")
   1665       .addReg(X86::ST0, RegState::ImplicitKill)
   1666       .addReg(X86::ECX, RegState::ImplicitDefine)
   1667       .addReg(X86::EAX, RegState::Define | RegState::Implicit)
   1668       .addReg(X86::EDX, RegState::Define | RegState::Implicit)
   1669       .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
   1670     --StackTop;
   1671 
   1672     break;
   1673   }
   1674 
   1675   case X86::RETQ:
   1676   case X86::RETL:
   1677   case X86::RETIL:
   1678   case X86::RETIQ:
   1679     // If RET has an FP register use operand, pass the first one in ST(0) and
   1680     // the second one in ST(1).
   1681 
   1682     // Find the register operands.
   1683     unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
   1684     unsigned LiveMask = 0;
   1685 
   1686     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
   1687       MachineOperand &Op = MI->getOperand(i);
   1688       if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
   1689         continue;
   1690       // FP Register uses must be kills unless there are two uses of the same
   1691       // register, in which case only one will be a kill.
   1692       assert(Op.isUse() &&
   1693              (Op.isKill() ||                        // Marked kill.
   1694               getFPReg(Op) == FirstFPRegOp ||       // Second instance.
   1695               MI->killsRegister(Op.getReg())) &&    // Later use is marked kill.
   1696              "Ret only defs operands, and values aren't live beyond it");
   1697 
   1698       if (FirstFPRegOp == ~0U)
   1699         FirstFPRegOp = getFPReg(Op);
   1700       else {
   1701         assert(SecondFPRegOp == ~0U && "More than two fp operands!");
   1702         SecondFPRegOp = getFPReg(Op);
   1703       }
   1704       LiveMask |= (1 << getFPReg(Op));
   1705 
   1706       // Remove the operand so that later passes don't see it.
   1707       MI->RemoveOperand(i);
   1708       --i, --e;
   1709     }
   1710 
   1711     // We may have been carrying spurious live-ins, so make sure only the returned
   1712     // registers are left live.
   1713     adjustLiveRegs(LiveMask, MI);
   1714     if (!LiveMask) return;  // Quick check to see if any are possible.
   1715 
   1716     // There are only four possibilities here:
   1717     // 1) we are returning a single FP value.  In this case, it has to be in
   1718     //    ST(0) already, so just declare success by removing the value from the
   1719     //    FP Stack.
   1720     if (SecondFPRegOp == ~0U) {
   1721       // Assert that the top of stack contains the right FP register.
   1722       assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
   1723              "Top of stack not the right register for RET!");
   1724 
   1725       // Ok, everything is good, mark the value as not being on the stack
   1726       // anymore so that our assertion about the stack being empty at end of
   1727       // block doesn't fire.
   1728       StackTop = 0;
   1729       return;
   1730     }
   1731 
   1732     // Otherwise, we are returning two values:
   1733     // 2) If returning the same value for both, we only have one thing in the FP
   1734     //    stack.  Consider:  RET FP1, FP1
   1735     if (StackTop == 1) {
   1736       assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
   1737              "Stack misconfiguration for RET!");
   1738 
   1739       // Duplicate the TOS so that we return it twice.  Just pick some other FPx
   1740       // register to hold it.
   1741       unsigned NewReg = getScratchReg();
   1742       duplicateToTop(FirstFPRegOp, NewReg, MI);
   1743       FirstFPRegOp = NewReg;
   1744     }
   1745 
   1746     /// Okay we know we have two different FPx operands now:
   1747     assert(StackTop == 2 && "Must have two values live!");
   1748 
   1749     /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
   1750     ///    in ST(1).  In this case, emit an fxch.
   1751     if (getStackEntry(0) == SecondFPRegOp) {
   1752       assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
   1753       moveToTop(FirstFPRegOp, MI);
   1754     }
   1755 
   1756     /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
   1757     /// ST(1).  Just remove both from our understanding of the stack and return.
   1758     assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
   1759     assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
   1760     StackTop = 0;
   1761     return;
   1762   }
   1763 
   1764   I = MBB->erase(I);  // Remove the pseudo instruction
   1765 
   1766   // We want to leave I pointing to the previous instruction, but what if we
   1767   // just erased the first instruction?
   1768   if (I == MBB->begin()) {
   1769     DEBUG(dbgs() << "Inserting dummy KILL\n");
   1770     I = BuildMI(*MBB, I, DebugLoc(), TII->get(TargetOpcode::KILL));
   1771   } else
   1772     --I;
   1773 }
   1774