Home | History | Annotate | Download | only in AMDGPU
      1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 /// \file
      9 //===----------------------------------------------------------------------===//
     10 //
     11 
     12 #include "AMDGPU.h"
     13 #include "AMDGPUSubtarget.h"
     14 #include "SIInstrInfo.h"
     15 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
     16 #include "llvm/CodeGen/MachineFunctionPass.h"
     17 #include "llvm/CodeGen/MachineInstrBuilder.h"
     18 #include "llvm/CodeGen/MachineRegisterInfo.h"
     19 #include "llvm/Support/Debug.h"
     20 #include "llvm/Support/raw_ostream.h"
     21 #include "llvm/Target/TargetMachine.h"
     22 
     23 #define DEBUG_TYPE "si-fold-operands"
     24 using namespace llvm;
     25 
     26 namespace {
     27 
     28 class SIFoldOperands : public MachineFunctionPass {
     29 public:
     30   static char ID;
     31 
     32 public:
     33   SIFoldOperands() : MachineFunctionPass(ID) {
     34     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
     35   }
     36 
     37   bool runOnMachineFunction(MachineFunction &MF) override;
     38 
     39   const char *getPassName() const override {
     40     return "SI Fold Operands";
     41   }
     42 
     43   void getAnalysisUsage(AnalysisUsage &AU) const override {
     44     AU.setPreservesCFG();
     45     MachineFunctionPass::getAnalysisUsage(AU);
     46   }
     47 };
     48 
     49 struct FoldCandidate {
     50   MachineInstr *UseMI;
     51   unsigned UseOpNo;
     52   MachineOperand *OpToFold;
     53   uint64_t ImmToFold;
     54 
     55   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
     56                 UseMI(MI), UseOpNo(OpNo) {
     57 
     58     if (FoldOp->isImm()) {
     59       OpToFold = nullptr;
     60       ImmToFold = FoldOp->getImm();
     61     } else {
     62       assert(FoldOp->isReg());
     63       OpToFold = FoldOp;
     64     }
     65   }
     66 
     67   bool isImm() const {
     68     return !OpToFold;
     69   }
     70 };
     71 
     72 } // End anonymous namespace.
     73 
     74 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
     75                 "SI Fold Operands", false, false)
     76 
     77 char SIFoldOperands::ID = 0;
     78 
     79 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
     80 
     81 FunctionPass *llvm::createSIFoldOperandsPass() {
     82   return new SIFoldOperands();
     83 }
     84 
     85 static bool isSafeToFold(unsigned Opcode) {
     86   switch(Opcode) {
     87   case AMDGPU::V_MOV_B32_e32:
     88   case AMDGPU::V_MOV_B32_e64:
     89   case AMDGPU::V_MOV_B64_PSEUDO:
     90   case AMDGPU::S_MOV_B32:
     91   case AMDGPU::S_MOV_B64:
     92   case AMDGPU::COPY:
     93     return true;
     94   default:
     95     return false;
     96   }
     97 }
     98 
     99 static bool updateOperand(FoldCandidate &Fold,
    100                           const TargetRegisterInfo &TRI) {
    101   MachineInstr *MI = Fold.UseMI;
    102   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
    103   assert(Old.isReg());
    104 
    105   if (Fold.isImm()) {
    106     Old.ChangeToImmediate(Fold.ImmToFold);
    107     return true;
    108   }
    109 
    110   MachineOperand *New = Fold.OpToFold;
    111   if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
    112       TargetRegisterInfo::isVirtualRegister(New->getReg())) {
    113     Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
    114     return true;
    115   }
    116 
    117   // FIXME: Handle physical registers.
    118 
    119   return false;
    120 }
    121 
    122 static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
    123                               const MachineInstr *MI) {
    124   for (auto Candidate : FoldList) {
    125     if (Candidate.UseMI == MI)
    126       return true;
    127   }
    128   return false;
    129 }
    130 
    131 static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
    132                              MachineInstr *MI, unsigned OpNo,
    133                              MachineOperand *OpToFold,
    134                              const SIInstrInfo *TII) {
    135   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
    136 
    137     // Special case for v_mac_f32_e64 if we are trying to fold into src2
    138     unsigned Opc = MI->getOpcode();
    139     if (Opc == AMDGPU::V_MAC_F32_e64 &&
    140         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
    141       // Check if changing this to a v_mad_f32 instruction will allow us to
    142       // fold the operand.
    143       MI->setDesc(TII->get(AMDGPU::V_MAD_F32));
    144       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
    145       if (FoldAsMAD) {
    146         MI->untieRegOperand(OpNo);
    147         return true;
    148       }
    149       MI->setDesc(TII->get(Opc));
    150     }
    151 
    152     // If we are already folding into another operand of MI, then
    153     // we can't commute the instruction, otherwise we risk making the
    154     // other fold illegal.
    155     if (isUseMIInFoldList(FoldList, MI))
    156       return false;
    157 
    158     // Operand is not legal, so try to commute the instruction to
    159     // see if this makes it possible to fold.
    160     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
    161     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
    162     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
    163 
    164     if (CanCommute) {
    165       if (CommuteIdx0 == OpNo)
    166         OpNo = CommuteIdx1;
    167       else if (CommuteIdx1 == OpNo)
    168         OpNo = CommuteIdx0;
    169     }
    170 
    171     // One of operands might be an Imm operand, and OpNo may refer to it after
    172     // the call of commuteInstruction() below. Such situations are avoided
    173     // here explicitly as OpNo must be a register operand to be a candidate
    174     // for memory folding.
    175     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
    176                        !MI->getOperand(CommuteIdx1).isReg()))
    177       return false;
    178 
    179     if (!CanCommute ||
    180         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
    181       return false;
    182 
    183     if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
    184       return false;
    185   }
    186 
    187   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
    188   return true;
    189 }
    190 
    191 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
    192                         unsigned UseOpIdx,
    193                         std::vector<FoldCandidate> &FoldList,
    194                         SmallVectorImpl<MachineInstr *> &CopiesToReplace,
    195                         const SIInstrInfo *TII, const SIRegisterInfo &TRI,
    196                         MachineRegisterInfo &MRI) {
    197   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
    198 
    199   // FIXME: Fold operands with subregs.
    200   if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
    201       UseOp.isImplicit())) {
    202     return;
    203   }
    204 
    205   bool FoldingImm = OpToFold.isImm();
    206   APInt Imm;
    207 
    208   if (FoldingImm) {
    209     unsigned UseReg = UseOp.getReg();
    210     const TargetRegisterClass *UseRC
    211       = TargetRegisterInfo::isVirtualRegister(UseReg) ?
    212       MRI.getRegClass(UseReg) :
    213       TRI.getPhysRegClass(UseReg);
    214 
    215     Imm = APInt(64, OpToFold.getImm());
    216 
    217     const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
    218     const TargetRegisterClass *FoldRC =
    219         TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
    220 
    221     // Split 64-bit constants into 32-bits for folding.
    222     if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
    223       if (UseRC->getSize() != 8)
    224         return;
    225 
    226       if (UseOp.getSubReg() == AMDGPU::sub0) {
    227         Imm = Imm.getLoBits(32);
    228       } else {
    229         assert(UseOp.getSubReg() == AMDGPU::sub1);
    230         Imm = Imm.getHiBits(32);
    231       }
    232     }
    233 
    234     // In order to fold immediates into copies, we need to change the
    235     // copy to a MOV.
    236     if (UseMI->getOpcode() == AMDGPU::COPY) {
    237       unsigned DestReg = UseMI->getOperand(0).getReg();
    238       const TargetRegisterClass *DestRC
    239         = TargetRegisterInfo::isVirtualRegister(DestReg) ?
    240         MRI.getRegClass(DestReg) :
    241         TRI.getPhysRegClass(DestReg);
    242 
    243       unsigned MovOp = TII->getMovOpcode(DestRC);
    244       if (MovOp == AMDGPU::COPY)
    245         return;
    246 
    247       UseMI->setDesc(TII->get(MovOp));
    248       CopiesToReplace.push_back(UseMI);
    249     }
    250   }
    251 
    252   // Special case for REG_SEQUENCE: We can't fold literals into
    253   // REG_SEQUENCE instructions, so we have to fold them into the
    254   // uses of REG_SEQUENCE.
    255   if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
    256     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
    257     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
    258 
    259     for (MachineRegisterInfo::use_iterator
    260          RSUse = MRI.use_begin(RegSeqDstReg),
    261          RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {
    262 
    263       MachineInstr *RSUseMI = RSUse->getParent();
    264       if (RSUse->getSubReg() != RegSeqDstSubReg)
    265         continue;
    266 
    267       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
    268                   CopiesToReplace, TII, TRI, MRI);
    269     }
    270     return;
    271   }
    272 
    273   const MCInstrDesc &UseDesc = UseMI->getDesc();
    274 
    275   // Don't fold into target independent nodes.  Target independent opcodes
    276   // don't have defined register classes.
    277   if (UseDesc.isVariadic() ||
    278       UseDesc.OpInfo[UseOpIdx].RegClass == -1)
    279     return;
    280 
    281   if (FoldingImm) {
    282     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    283     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    284     return;
    285   }
    286 
    287   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
    288 
    289   // FIXME: We could try to change the instruction from 64-bit to 32-bit
    290   // to enable more folding opportunites.  The shrink operands pass
    291   // already does this.
    292   return;
    293 }
    294 
    295 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
    296   if (skipFunction(*MF.getFunction()))
    297     return false;
    298 
    299   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
    300 
    301   MachineRegisterInfo &MRI = MF.getRegInfo();
    302   const SIInstrInfo *TII = ST.getInstrInfo();
    303   const SIRegisterInfo &TRI = TII->getRegisterInfo();
    304 
    305   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
    306                                                   BI != BE; ++BI) {
    307 
    308     MachineBasicBlock &MBB = *BI;
    309     MachineBasicBlock::iterator I, Next;
    310     for (I = MBB.begin(); I != MBB.end(); I = Next) {
    311       Next = std::next(I);
    312       MachineInstr &MI = *I;
    313 
    314       if (!isSafeToFold(MI.getOpcode()))
    315         continue;
    316 
    317       unsigned OpSize = TII->getOpSize(MI, 1);
    318       MachineOperand &OpToFold = MI.getOperand(1);
    319       bool FoldingImm = OpToFold.isImm();
    320 
    321       // FIXME: We could also be folding things like FrameIndexes and
    322       // TargetIndexes.
    323       if (!FoldingImm && !OpToFold.isReg())
    324         continue;
    325 
    326       // Folding immediates with more than one use will increase program size.
    327       // FIXME: This will also reduce register usage, which may be better
    328       // in some cases.  A better heuristic is needed.
    329       if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
    330           !MRI.hasOneUse(MI.getOperand(0).getReg()))
    331         continue;
    332 
    333       if (OpToFold.isReg() &&
    334           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
    335         continue;
    336 
    337       // Prevent folding operands backwards in the function. For example,
    338       // the COPY opcode must not be replaced by 1 in this example:
    339       //
    340       //    %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
    341       //    ...
    342       //    %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
    343       MachineOperand &Dst = MI.getOperand(0);
    344       if (Dst.isReg() &&
    345           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
    346         continue;
    347 
    348       // We need mutate the operands of new mov instructions to add implicit
    349       // uses of EXEC, but adding them invalidates the use_iterator, so defer
    350       // this.
    351       SmallVector<MachineInstr *, 4> CopiesToReplace;
    352 
    353       std::vector<FoldCandidate> FoldList;
    354       for (MachineRegisterInfo::use_iterator
    355            Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
    356            Use != E; ++Use) {
    357 
    358         MachineInstr *UseMI = Use->getParent();
    359 
    360         foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
    361                     CopiesToReplace, TII, TRI, MRI);
    362       }
    363 
    364       // Make sure we add EXEC uses to any new v_mov instructions created.
    365       for (MachineInstr *Copy : CopiesToReplace)
    366         Copy->addImplicitDefUseOperands(MF);
    367 
    368       for (FoldCandidate &Fold : FoldList) {
    369         if (updateOperand(Fold, TRI)) {
    370           // Clear kill flags.
    371           if (!Fold.isImm()) {
    372             assert(Fold.OpToFold && Fold.OpToFold->isReg());
    373             // FIXME: Probably shouldn't bother trying to fold if not an
    374             // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
    375             // copies.
    376             MRI.clearKillFlags(Fold.OpToFold->getReg());
    377           }
    378           DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
    379                 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
    380         }
    381       }
    382     }
    383   }
    384   return false;
    385 }
    386