Home | History | Annotate | Download | only in AMDGPU
      1 //===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This pass tries to fuse DS instructions with close by immediate offsets.
     11 // This will fuse operations such as
     12 //  ds_read_b32 v0, v2 offset:16
     13 //  ds_read_b32 v1, v2 offset:32
     14 // ==>
     15 //   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
     16 //
     17 //
     18 // Future improvements:
     19 //
     20 // - This currently relies on the scheduler to place loads and stores next to
     21 //   each other, and then only merges adjacent pairs of instructions. It would
     22 //   be good to be more flexible with interleaved instructions, and possibly run
     23 //   before scheduling. It currently missing stores of constants because loading
     24 //   the constant into the data register is placed between the stores, although
     25 //   this is arguably a scheduling problem.
     26 //
     27 // - Live interval recomputing seems inefficient. This currently only matches
     28 //   one pair, and recomputes live intervals and moves on to the next pair. It
     29 //   would be better to compute a list of all merges that need to occur.
     30 //
     31 // - With a list of instructions to process, we can also merge more. If a
     32 //   cluster of loads have offsets that are too large to fit in the 8-bit
     33 //   offsets, but are close enough to fit in the 8 bits, we can add to the base
     34 //   pointer and use the new reduced offsets.
     35 //
     36 //===----------------------------------------------------------------------===//
     37 
     38 #include "AMDGPU.h"
     39 #include "AMDGPUSubtarget.h"
     40 #include "SIInstrInfo.h"
     41 #include "SIRegisterInfo.h"
     42 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
     43 #include "llvm/CodeGen/LiveVariables.h"
     44 #include "llvm/CodeGen/MachineFunction.h"
     45 #include "llvm/CodeGen/MachineFunctionPass.h"
     46 #include "llvm/CodeGen/MachineInstrBuilder.h"
     47 #include "llvm/CodeGen/MachineRegisterInfo.h"
     48 #include "llvm/Support/Debug.h"
     49 #include "llvm/Support/raw_ostream.h"
     50 #include "llvm/Target/TargetMachine.h"
     51 
     52 using namespace llvm;
     53 
     54 #define DEBUG_TYPE "si-load-store-opt"
     55 
     56 namespace {
     57 
     58 class SILoadStoreOptimizer : public MachineFunctionPass {
     59 private:
     60   const SIInstrInfo *TII;
     61   const SIRegisterInfo *TRI;
     62   MachineRegisterInfo *MRI;
     63   LiveIntervals *LIS;
     64 
     65   static bool offsetsCanBeCombined(unsigned Offset0,
     66                                    unsigned Offset1,
     67                                    unsigned EltSize);
     68 
     69   MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
     70                                                  unsigned EltSize);
     71 
     72   MachineBasicBlock::iterator mergeRead2Pair(
     73     MachineBasicBlock::iterator I,
     74     MachineBasicBlock::iterator Paired,
     75     unsigned EltSize);
     76 
     77   MachineBasicBlock::iterator mergeWrite2Pair(
     78     MachineBasicBlock::iterator I,
     79     MachineBasicBlock::iterator Paired,
     80     unsigned EltSize);
     81 
     82 public:
     83   static char ID;
     84 
     85   SILoadStoreOptimizer()
     86       : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
     87         LIS(nullptr) {}
     88 
     89   SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
     90     initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
     91   }
     92 
     93   bool optimizeBlock(MachineBasicBlock &MBB);
     94 
     95   bool runOnMachineFunction(MachineFunction &MF) override;
     96 
     97   const char *getPassName() const override {
     98     return "SI Load / Store Optimizer";
     99   }
    100 
    101   void getAnalysisUsage(AnalysisUsage &AU) const override {
    102     AU.setPreservesCFG();
    103     AU.addPreserved<SlotIndexes>();
    104     AU.addPreserved<LiveIntervals>();
    105     AU.addPreserved<LiveVariables>();
    106     AU.addRequired<LiveIntervals>();
    107 
    108     MachineFunctionPass::getAnalysisUsage(AU);
    109   }
    110 };
    111 
    112 } // End anonymous namespace.
    113 
    114 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
    115                       "SI Load / Store Optimizer", false, false)
    116 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
    117 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
    118 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
    119 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
    120                     "SI Load / Store Optimizer", false, false)
    121 
    122 char SILoadStoreOptimizer::ID = 0;
    123 
    124 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
    125 
    126 FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
    127   return new SILoadStoreOptimizer(TM);
    128 }
    129 
    130 bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
    131                                                 unsigned Offset1,
    132                                                 unsigned Size) {
    133   // XXX - Would the same offset be OK? Is there any reason this would happen or
    134   // be useful?
    135   if (Offset0 == Offset1)
    136     return false;
    137 
    138   // This won't be valid if the offset isn't aligned.
    139   if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
    140     return false;
    141 
    142   unsigned EltOffset0 = Offset0 / Size;
    143   unsigned EltOffset1 = Offset1 / Size;
    144 
    145   // Check if the new offsets fit in the reduced 8-bit range.
    146   if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
    147     return true;
    148 
    149   // If the offset in elements doesn't fit in 8-bits, we might be able to use
    150   // the stride 64 versions.
    151   if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
    152     return false;
    153 
    154   return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
    155 }
    156 
    157 MachineBasicBlock::iterator
    158 SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
    159                                          unsigned EltSize){
    160   MachineBasicBlock::iterator E = I->getParent()->end();
    161   MachineBasicBlock::iterator MBBI = I;
    162   ++MBBI;
    163 
    164   if (MBBI->getOpcode() != I->getOpcode())
    165     return E;
    166 
    167   // Don't merge volatiles.
    168   if (MBBI->hasOrderedMemoryRef())
    169     return E;
    170 
    171   int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
    172   const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
    173   const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
    174 
    175   // Check same base pointer. Be careful of subregisters, which can occur with
    176   // vectors of pointers.
    177   if (AddrReg0.getReg() == AddrReg1.getReg() &&
    178       AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
    179     int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
    180                                                AMDGPU::OpName::offset);
    181     unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
    182     unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
    183 
    184     // Check both offsets fit in the reduced range.
    185     if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
    186       return MBBI;
    187   }
    188 
    189   return E;
    190 }
    191 
    192 MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
    193   MachineBasicBlock::iterator I,
    194   MachineBasicBlock::iterator Paired,
    195   unsigned EltSize) {
    196   MachineBasicBlock *MBB = I->getParent();
    197 
    198   // Be careful, since the addresses could be subregisters themselves in weird
    199   // cases, like vectors of pointers.
    200   const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
    201 
    202   const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst);
    203   const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst);
    204 
    205   unsigned Offset0
    206     = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
    207   unsigned Offset1
    208     = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
    209 
    210   unsigned NewOffset0 = Offset0 / EltSize;
    211   unsigned NewOffset1 = Offset1 / EltSize;
    212   unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
    213 
    214   // Prefer the st64 form if we can use it, even if we can fit the offset in the
    215   // non st64 version. I'm not sure if there's any real reason to do this.
    216   bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
    217   if (UseST64) {
    218     NewOffset0 /= 64;
    219     NewOffset1 /= 64;
    220     Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
    221   }
    222 
    223   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
    224          (NewOffset0 != NewOffset1) &&
    225          "Computed offset doesn't fit");
    226 
    227   const MCInstrDesc &Read2Desc = TII->get(Opc);
    228 
    229   const TargetRegisterClass *SuperRC
    230     = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
    231   unsigned DestReg = MRI->createVirtualRegister(SuperRC);
    232 
    233   DebugLoc DL = I->getDebugLoc();
    234   MachineInstrBuilder Read2
    235     = BuildMI(*MBB, I, DL, Read2Desc, DestReg)
    236     .addOperand(*AddrReg) // addr
    237     .addImm(NewOffset0) // offset0
    238     .addImm(NewOffset1) // offset1
    239     .addImm(0) // gds
    240     .addMemOperand(*I->memoperands_begin())
    241     .addMemOperand(*Paired->memoperands_begin());
    242 
    243   unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
    244   unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
    245 
    246   const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
    247 
    248   // Copy to the old destination registers.
    249   MachineInstr *Copy0 = BuildMI(*MBB, I, DL, CopyDesc)
    250     .addOperand(*Dest0) // Copy to same destination including flags and sub reg.
    251     .addReg(DestReg, 0, SubRegIdx0);
    252   MachineInstr *Copy1 = BuildMI(*MBB, I, DL, CopyDesc)
    253     .addOperand(*Dest1)
    254     .addReg(DestReg, RegState::Kill, SubRegIdx1);
    255 
    256   LIS->InsertMachineInstrInMaps(*Read2);
    257 
    258   // repairLiveintervalsInRange() doesn't handle physical register, so we have
    259   // to update the M0 range manually.
    260   SlotIndex PairedIndex = LIS->getInstructionIndex(*Paired);
    261   LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
    262   LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
    263   bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
    264 
    265   // The new write to the original destination register is now the copy. Steal
    266   // the old SlotIndex.
    267   LIS->ReplaceMachineInstrInMaps(*I, *Copy0);
    268   LIS->ReplaceMachineInstrInMaps(*Paired, *Copy1);
    269 
    270   I->eraseFromParent();
    271   Paired->eraseFromParent();
    272 
    273   LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
    274   LIS->shrinkToUses(&AddrRegLI);
    275 
    276   LIS->createAndComputeVirtRegInterval(DestReg);
    277 
    278   if (UpdateM0Range) {
    279     SlotIndex Read2Index = LIS->getInstructionIndex(*Read2);
    280     M0Segment->end = Read2Index.getRegSlot();
    281   }
    282 
    283   DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
    284   return Read2.getInstr();
    285 }
    286 
    287 MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
    288   MachineBasicBlock::iterator I,
    289   MachineBasicBlock::iterator Paired,
    290   unsigned EltSize) {
    291   MachineBasicBlock *MBB = I->getParent();
    292 
    293   // Be sure to use .addOperand(), and not .addReg() with these. We want to be
    294   // sure we preserve the subregister index and any register flags set on them.
    295   const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
    296   const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
    297   const MachineOperand *Data1
    298     = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
    299 
    300 
    301   unsigned Offset0
    302     = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
    303   unsigned Offset1
    304     = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
    305 
    306   unsigned NewOffset0 = Offset0 / EltSize;
    307   unsigned NewOffset1 = Offset1 / EltSize;
    308   unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
    309 
    310   // Prefer the st64 form if we can use it, even if we can fit the offset in the
    311   // non st64 version. I'm not sure if there's any real reason to do this.
    312   bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
    313   if (UseST64) {
    314     NewOffset0 /= 64;
    315     NewOffset1 /= 64;
    316     Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
    317   }
    318 
    319   assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
    320          (NewOffset0 != NewOffset1) &&
    321          "Computed offset doesn't fit");
    322 
    323   const MCInstrDesc &Write2Desc = TII->get(Opc);
    324   DebugLoc DL = I->getDebugLoc();
    325 
    326   // repairLiveintervalsInRange() doesn't handle physical register, so we have
    327   // to update the M0 range manually.
    328   SlotIndex PairedIndex = LIS->getInstructionIndex(*Paired);
    329   LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
    330   LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
    331   bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
    332 
    333   MachineInstrBuilder Write2
    334     = BuildMI(*MBB, I, DL, Write2Desc)
    335     .addOperand(*Addr) // addr
    336     .addOperand(*Data0) // data0
    337     .addOperand(*Data1) // data1
    338     .addImm(NewOffset0) // offset0
    339     .addImm(NewOffset1) // offset1
    340     .addImm(0) // gds
    341     .addMemOperand(*I->memoperands_begin())
    342     .addMemOperand(*Paired->memoperands_begin());
    343 
    344   // XXX - How do we express subregisters here?
    345   unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
    346 
    347   LIS->RemoveMachineInstrFromMaps(*I);
    348   LIS->RemoveMachineInstrFromMaps(*Paired);
    349   I->eraseFromParent();
    350   Paired->eraseFromParent();
    351 
    352   // This doesn't handle physical registers like M0
    353   LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
    354 
    355   if (UpdateM0Range) {
    356     SlotIndex Write2Index = LIS->getInstructionIndex(*Write2);
    357     M0Segment->end = Write2Index.getRegSlot();
    358   }
    359 
    360   DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
    361   return Write2.getInstr();
    362 }
    363 
    364 // Scan through looking for adjacent LDS operations with constant offsets from
    365 // the same base register. We rely on the scheduler to do the hard work of
    366 // clustering nearby loads, and assume these are all adjacent.
    367 bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
    368   bool Modified = false;
    369 
    370   for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
    371     MachineInstr &MI = *I;
    372 
    373     // Don't combine if volatile.
    374     if (MI.hasOrderedMemoryRef()) {
    375       ++I;
    376       continue;
    377     }
    378 
    379     unsigned Opc = MI.getOpcode();
    380     if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
    381       unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
    382       MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
    383       if (Match != E) {
    384         Modified = true;
    385         I = mergeRead2Pair(I, Match, Size);
    386       } else {
    387         ++I;
    388       }
    389 
    390       continue;
    391     } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
    392       unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
    393       MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
    394       if (Match != E) {
    395         Modified = true;
    396         I = mergeWrite2Pair(I, Match, Size);
    397       } else {
    398         ++I;
    399       }
    400 
    401       continue;
    402     }
    403 
    404     ++I;
    405   }
    406 
    407   return Modified;
    408 }
    409 
    410 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
    411   if (skipFunction(*MF.getFunction()))
    412     return false;
    413 
    414   const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
    415   if (!STM.loadStoreOptEnabled())
    416     return false;
    417 
    418   TII = STM.getInstrInfo();
    419   TRI = &TII->getRegisterInfo();
    420 
    421   MRI = &MF.getRegInfo();
    422 
    423   LIS = &getAnalysis<LiveIntervals>();
    424 
    425   DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
    426 
    427   assert(!MRI->isSSA());
    428 
    429   bool Modified = false;
    430 
    431   for (MachineBasicBlock &MBB : MF)
    432     Modified |= optimizeBlock(MBB);
    433 
    434   return Modified;
    435 }
    436