Home | History | Annotate | Download | only in AMDGPU
      1 //===- AMDGPUPerfHintAnalysis.cpp - analysis of functions memory traffic --===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief Analyzes if a function potentially memory bound and if a kernel
     12 /// kernel may benefit from limiting number of waves to reduce cache thrashing.
     13 ///
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "AMDGPU.h"
     17 #include "AMDGPUPerfHintAnalysis.h"
     18 #include "Utils/AMDGPUBaseInfo.h"
     19 #include "llvm/ADT/SmallSet.h"
     20 #include "llvm/ADT/Statistic.h"
     21 #include "llvm/Analysis/ValueTracking.h"
     22 #include "llvm/CodeGen/TargetLowering.h"
     23 #include "llvm/CodeGen/TargetPassConfig.h"
     24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
     25 #include "llvm/IR/Constants.h"
     26 #include "llvm/IR/Instructions.h"
     27 #include "llvm/IR/IntrinsicInst.h"
     28 #include "llvm/IR/Module.h"
     29 #include "llvm/IR/ValueMap.h"
     30 #include "llvm/Support/CommandLine.h"
     31 
     32 using namespace llvm;
     33 
     34 #define DEBUG_TYPE "amdgpu-perf-hint"
     35 
     36 static cl::opt<unsigned>
     37     MemBoundThresh("amdgpu-membound-threshold", cl::init(50), cl::Hidden,
     38                    cl::desc("Function mem bound threshold in %"));
     39 
     40 static cl::opt<unsigned>
     41     LimitWaveThresh("amdgpu-limit-wave-threshold", cl::init(50), cl::Hidden,
     42                     cl::desc("Kernel limit wave threshold in %"));
     43 
     44 static cl::opt<unsigned>
     45     IAWeight("amdgpu-indirect-access-weight", cl::init(1000), cl::Hidden,
     46              cl::desc("Indirect access memory instruction weight"));
     47 
     48 static cl::opt<unsigned>
     49     LSWeight("amdgpu-large-stride-weight", cl::init(1000), cl::Hidden,
     50              cl::desc("Large stride memory access weight"));
     51 
     52 static cl::opt<unsigned>
     53     LargeStrideThresh("amdgpu-large-stride-threshold", cl::init(64), cl::Hidden,
     54                       cl::desc("Large stride memory access threshold"));
     55 
     56 STATISTIC(NumMemBound, "Number of functions marked as memory bound");
     57 STATISTIC(NumLimitWave, "Number of functions marked as needing limit wave");
     58 
     59 char llvm::AMDGPUPerfHintAnalysis::ID = 0;
     60 char &llvm::AMDGPUPerfHintAnalysisID = AMDGPUPerfHintAnalysis::ID;
     61 
     62 INITIALIZE_PASS(AMDGPUPerfHintAnalysis, DEBUG_TYPE,
     63                 "Analysis if a function is memory bound", true, true)
     64 
     65 namespace {
     66 
     67 struct AMDGPUPerfHint {
     68   friend AMDGPUPerfHintAnalysis;
     69 
     70 public:
     71   AMDGPUPerfHint(AMDGPUPerfHintAnalysis::FuncInfoMap &FIM_,
     72                  const TargetLowering *TLI_)
     73       : FIM(FIM_), DL(nullptr), TLI(TLI_) {}
     74 
     75   void runOnFunction(Function &F);
     76 
     77 private:
     78   struct MemAccessInfo {
     79     const Value *V;
     80     const Value *Base;
     81     int64_t Offset;
     82     MemAccessInfo() : V(nullptr), Base(nullptr), Offset(0) {}
     83     bool isLargeStride(MemAccessInfo &Reference) const;
     84 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
     85     Printable print() const {
     86       return Printable([this](raw_ostream &OS) {
     87         OS << "Value: " << *V << '\n'
     88            << "Base: " << *Base << " Offset: " << Offset << '\n';
     89       });
     90     }
     91 #endif
     92   };
     93 
     94   MemAccessInfo makeMemAccessInfo(Instruction *) const;
     95 
     96   MemAccessInfo LastAccess; // Last memory access info
     97 
     98   AMDGPUPerfHintAnalysis::FuncInfoMap &FIM;
     99 
    100   const DataLayout *DL;
    101 
    102   AMDGPUAS AS;
    103 
    104   const TargetLowering *TLI;
    105 
    106   void visit(const Function &F);
    107   static bool isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &F);
    108   static bool needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &F);
    109 
    110   bool isIndirectAccess(const Instruction *Inst) const;
    111 
    112   /// Check if the instruction is large stride.
    113   /// The purpose is to identify memory access pattern like:
    114   /// x = a[i];
    115   /// y = a[i+1000];
    116   /// z = a[i+2000];
    117   /// In the above example, the second and third memory access will be marked
    118   /// large stride memory access.
    119   bool isLargeStride(const Instruction *Inst);
    120 
    121   bool isGlobalAddr(const Value *V) const;
    122   bool isLocalAddr(const Value *V) const;
    123   bool isConstantAddr(const Value *V) const;
    124 };
    125 
    126 static const Value *getMemoryInstrPtr(const Instruction *Inst) {
    127   if (auto LI = dyn_cast<LoadInst>(Inst)) {
    128     return LI->getPointerOperand();
    129   }
    130   if (auto SI = dyn_cast<StoreInst>(Inst)) {
    131     return SI->getPointerOperand();
    132   }
    133   if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
    134     return AI->getPointerOperand();
    135   }
    136   if (auto AI = dyn_cast<AtomicRMWInst>(Inst)) {
    137     return AI->getPointerOperand();
    138   }
    139   if (auto MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
    140     return MI->getRawDest();
    141   }
    142 
    143   return nullptr;
    144 }
    145 
    146 bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const {
    147   LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n');
    148   SmallSet<const Value *, 32> WorkSet;
    149   SmallSet<const Value *, 32> Visited;
    150   if (const Value *MO = getMemoryInstrPtr(Inst)) {
    151     if (isGlobalAddr(MO))
    152       WorkSet.insert(MO);
    153   }
    154 
    155   while (!WorkSet.empty()) {
    156     const Value *V = *WorkSet.begin();
    157     WorkSet.erase(*WorkSet.begin());
    158     if (!Visited.insert(V).second)
    159       continue;
    160     LLVM_DEBUG(dbgs() << "  check: " << *V << '\n');
    161 
    162     if (auto LD = dyn_cast<LoadInst>(V)) {
    163       auto M = LD->getPointerOperand();
    164       if (isGlobalAddr(M) || isLocalAddr(M) || isConstantAddr(M)) {
    165         LLVM_DEBUG(dbgs() << "    is IA\n");
    166         return true;
    167       }
    168       continue;
    169     }
    170 
    171     if (auto GEP = dyn_cast<GetElementPtrInst>(V)) {
    172       auto P = GEP->getPointerOperand();
    173       WorkSet.insert(P);
    174       for (unsigned I = 1, E = GEP->getNumIndices() + 1; I != E; ++I)
    175         WorkSet.insert(GEP->getOperand(I));
    176       continue;
    177     }
    178 
    179     if (auto U = dyn_cast<UnaryInstruction>(V)) {
    180       WorkSet.insert(U->getOperand(0));
    181       continue;
    182     }
    183 
    184     if (auto BO = dyn_cast<BinaryOperator>(V)) {
    185       WorkSet.insert(BO->getOperand(0));
    186       WorkSet.insert(BO->getOperand(1));
    187       continue;
    188     }
    189 
    190     if (auto S = dyn_cast<SelectInst>(V)) {
    191       WorkSet.insert(S->getFalseValue());
    192       WorkSet.insert(S->getTrueValue());
    193       continue;
    194     }
    195 
    196     if (auto E = dyn_cast<ExtractElementInst>(V)) {
    197       WorkSet.insert(E->getVectorOperand());
    198       continue;
    199     }
    200 
    201     LLVM_DEBUG(dbgs() << "    dropped\n");
    202   }
    203 
    204   LLVM_DEBUG(dbgs() << "  is not IA\n");
    205   return false;
    206 }
    207 
    208 void AMDGPUPerfHint::visit(const Function &F) {
    209   auto FIP = FIM.insert(std::make_pair(&F, AMDGPUPerfHintAnalysis::FuncInfo()));
    210   if (!FIP.second)
    211     return;
    212 
    213   AMDGPUPerfHintAnalysis::FuncInfo &FI = FIP.first->second;
    214 
    215   LLVM_DEBUG(dbgs() << "[AMDGPUPerfHint] process " << F.getName() << '\n');
    216 
    217   for (auto &B : F) {
    218     LastAccess = MemAccessInfo();
    219     for (auto &I : B) {
    220       if (getMemoryInstrPtr(&I)) {
    221         if (isIndirectAccess(&I))
    222           ++FI.IAMInstCount;
    223         if (isLargeStride(&I))
    224           ++FI.LSMInstCount;
    225         ++FI.MemInstCount;
    226         ++FI.InstCount;
    227         continue;
    228       }
    229       CallSite CS(const_cast<Instruction *>(&I));
    230       if (CS) {
    231         Function *Callee = CS.getCalledFunction();
    232         if (!Callee || Callee->isDeclaration()) {
    233           ++FI.InstCount;
    234           continue;
    235         }
    236         if (&F == Callee) // Handle immediate recursion
    237           continue;
    238 
    239         visit(*Callee);
    240         auto Loc = FIM.find(Callee);
    241 
    242         assert(Loc != FIM.end() && "No func info");
    243         FI.MemInstCount += Loc->second.MemInstCount;
    244         FI.InstCount += Loc->second.InstCount;
    245         FI.IAMInstCount += Loc->second.IAMInstCount;
    246         FI.LSMInstCount += Loc->second.LSMInstCount;
    247       } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
    248         TargetLoweringBase::AddrMode AM;
    249         auto *Ptr = GetPointerBaseWithConstantOffset(GEP, AM.BaseOffs, *DL);
    250         AM.BaseGV = dyn_cast_or_null<GlobalValue>(const_cast<Value *>(Ptr));
    251         AM.HasBaseReg = !AM.BaseGV;
    252         if (TLI->isLegalAddressingMode(*DL, AM, GEP->getResultElementType(),
    253                                        GEP->getPointerAddressSpace()))
    254           // Offset will likely be folded into load or store
    255           continue;
    256         ++FI.InstCount;
    257       } else {
    258         ++FI.InstCount;
    259       }
    260     }
    261   }
    262 }
    263 
    264 void AMDGPUPerfHint::runOnFunction(Function &F) {
    265   if (FIM.find(&F) != FIM.end())
    266     return;
    267 
    268   const Module &M = *F.getParent();
    269   DL = &M.getDataLayout();
    270   AS = AMDGPU::getAMDGPUAS(M);
    271 
    272   visit(F);
    273   auto Loc = FIM.find(&F);
    274 
    275   assert(Loc != FIM.end() && "No func info");
    276   LLVM_DEBUG(dbgs() << F.getName() << " MemInst: " << Loc->second.MemInstCount
    277                     << '\n'
    278                     << " IAMInst: " << Loc->second.IAMInstCount << '\n'
    279                     << " LSMInst: " << Loc->second.LSMInstCount << '\n'
    280                     << " TotalInst: " << Loc->second.InstCount << '\n');
    281 
    282   auto &FI = Loc->second;
    283 
    284   if (isMemBound(FI)) {
    285     LLVM_DEBUG(dbgs() << F.getName() << " is memory bound\n");
    286     NumMemBound++;
    287   }
    288 
    289   if (AMDGPU::isEntryFunctionCC(F.getCallingConv()) && needLimitWave(FI)) {
    290     LLVM_DEBUG(dbgs() << F.getName() << " needs limit wave\n");
    291     NumLimitWave++;
    292   }
    293 }
    294 
    295 bool AMDGPUPerfHint::isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
    296   return FI.MemInstCount * 100 / FI.InstCount > MemBoundThresh;
    297 }
    298 
    299 bool AMDGPUPerfHint::needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
    300   return ((FI.MemInstCount + FI.IAMInstCount * IAWeight +
    301            FI.LSMInstCount * LSWeight) *
    302           100 / FI.InstCount) > LimitWaveThresh;
    303 }
    304 
    305 bool AMDGPUPerfHint::isGlobalAddr(const Value *V) const {
    306   if (auto PT = dyn_cast<PointerType>(V->getType())) {
    307     unsigned As = PT->getAddressSpace();
    308     // Flat likely points to global too.
    309     return As == AS.GLOBAL_ADDRESS || As == AS.FLAT_ADDRESS;
    310   }
    311   return false;
    312 }
    313 
    314 bool AMDGPUPerfHint::isLocalAddr(const Value *V) const {
    315   if (auto PT = dyn_cast<PointerType>(V->getType()))
    316     return PT->getAddressSpace() == AS.LOCAL_ADDRESS;
    317   return false;
    318 }
    319 
    320 bool AMDGPUPerfHint::isLargeStride(const Instruction *Inst) {
    321   LLVM_DEBUG(dbgs() << "[isLargeStride] " << *Inst << '\n');
    322 
    323   MemAccessInfo MAI = makeMemAccessInfo(const_cast<Instruction *>(Inst));
    324   bool IsLargeStride = MAI.isLargeStride(LastAccess);
    325   if (MAI.Base)
    326     LastAccess = std::move(MAI);
    327 
    328   return IsLargeStride;
    329 }
    330 
    331 AMDGPUPerfHint::MemAccessInfo
    332 AMDGPUPerfHint::makeMemAccessInfo(Instruction *Inst) const {
    333   MemAccessInfo MAI;
    334   const Value *MO = getMemoryInstrPtr(Inst);
    335 
    336   LLVM_DEBUG(dbgs() << "[isLargeStride] MO: " << *MO << '\n');
    337   // Do not treat local-addr memory access as large stride.
    338   if (isLocalAddr(MO))
    339     return MAI;
    340 
    341   MAI.V = MO;
    342   MAI.Base = GetPointerBaseWithConstantOffset(MO, MAI.Offset, *DL);
    343   return MAI;
    344 }
    345 
    346 bool AMDGPUPerfHint::isConstantAddr(const Value *V) const {
    347   if (auto PT = dyn_cast<PointerType>(V->getType())) {
    348     unsigned As = PT->getAddressSpace();
    349     return As == AS.CONSTANT_ADDRESS || As == AS.CONSTANT_ADDRESS_32BIT;
    350   }
    351   return false;
    352 }
    353 
    354 bool AMDGPUPerfHint::MemAccessInfo::isLargeStride(
    355     MemAccessInfo &Reference) const {
    356 
    357   if (!Base || !Reference.Base || Base != Reference.Base)
    358     return false;
    359 
    360   uint64_t Diff = Offset > Reference.Offset ? Offset - Reference.Offset
    361                                             : Reference.Offset - Offset;
    362   bool Result = Diff > LargeStrideThresh;
    363   LLVM_DEBUG(dbgs() << "[isLargeStride compare]\n"
    364                << print() << "<=>\n"
    365                << Reference.print() << "Result:" << Result << '\n');
    366   return Result;
    367 }
    368 } // namespace
    369 
    370 bool AMDGPUPerfHintAnalysis::runOnFunction(Function &F) {
    371   auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
    372   if (!TPC)
    373     return false;
    374 
    375   const TargetMachine &TM = TPC->getTM<TargetMachine>();
    376   const TargetSubtargetInfo *ST = TM.getSubtargetImpl(F);
    377 
    378   AMDGPUPerfHint Analyzer(FIM, ST->getTargetLowering());
    379   Analyzer.runOnFunction(F);
    380   return false;
    381 }
    382 
    383 bool AMDGPUPerfHintAnalysis::isMemoryBound(const Function *F) const {
    384   auto FI = FIM.find(F);
    385   if (FI == FIM.end())
    386     return false;
    387 
    388   return AMDGPUPerfHint::isMemBound(FI->second);
    389 }
    390 
    391 bool AMDGPUPerfHintAnalysis::needsWaveLimiter(const Function *F) const {
    392   auto FI = FIM.find(F);
    393   if (FI == FIM.end())
    394     return false;
    395 
    396   return AMDGPUPerfHint::needLimitWave(FI->second);
    397 }
    398