Home | History | Annotate | Download | only in AMDGPU
      1 //===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // \file
     11 // This file implements a TargetTransformInfo analysis pass specific to the
     12 // AMDGPU target machine. It uses the target's detailed information to provide
     13 // more precise answers to certain TTI queries, while letting the target
     14 // independent and default TTI implementations handle the rest.
     15 //
     16 //===----------------------------------------------------------------------===//
     17 
     18 #include "AMDGPUTargetTransformInfo.h"
     19 #include "llvm/Analysis/LoopInfo.h"
     20 #include "llvm/Analysis/TargetTransformInfo.h"
     21 #include "llvm/Analysis/ValueTracking.h"
     22 #include "llvm/CodeGen/BasicTTIImpl.h"
     23 #include "llvm/IR/Module.h"
     24 #include "llvm/Support/Debug.h"
     25 #include "llvm/Target/CostTable.h"
     26 #include "llvm/Target/TargetLowering.h"
     27 using namespace llvm;
     28 
     29 #define DEBUG_TYPE "AMDGPUtti"
     30 
     31 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
     32                                             TTI::UnrollingPreferences &UP) {
     33   UP.Threshold = 300; // Twice the default.
     34   UP.MaxCount = UINT_MAX;
     35   UP.Partial = true;
     36 
     37   // TODO: Do we want runtime unrolling?
     38 
     39   for (const BasicBlock *BB : L->getBlocks()) {
     40     const DataLayout &DL = BB->getModule()->getDataLayout();
     41     for (const Instruction &I : *BB) {
     42       const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
     43       if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
     44         continue;
     45 
     46       const Value *Ptr = GEP->getPointerOperand();
     47       const AllocaInst *Alloca =
     48           dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
     49       if (Alloca) {
     50         // We want to do whatever we can to limit the number of alloca
     51         // instructions that make it through to the code generator.  allocas
     52         // require us to use indirect addressing, which is slow and prone to
     53         // compiler bugs.  If this loop does an address calculation on an
     54         // alloca ptr, then we want to use a higher than normal loop unroll
     55         // threshold. This will give SROA a better chance to eliminate these
     56         // allocas.
     57         //
     58         // Don't use the maximum allowed value here as it will make some
     59         // programs way too big.
     60         UP.Threshold = 800;
     61       }
     62     }
     63   }
     64 }
     65 
     66 unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
     67   if (Vec)
     68     return 0;
     69 
     70   // Number of VGPRs on SI.
     71   if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
     72     return 256;
     73 
     74   return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
     75 }
     76 
     77 unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool) { return 32; }
     78 
     79 unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
     80   // Semi-arbitrary large amount.
     81   return 64;
     82 }
     83 
     84 unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
     85   // XXX - For some reason this isn't called for switch.
     86   switch (Opcode) {
     87   case Instruction::Br:
     88   case Instruction::Ret:
     89     return 10;
     90   default:
     91     return BaseT::getCFInstrCost(Opcode);
     92   }
     93 }
     94 
     95 int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
     96                                       unsigned Index) {
     97   switch (Opcode) {
     98   case Instruction::ExtractElement:
     99     // Dynamic indexing isn't free and is best avoided.
    100     return Index == ~0u ? 2 : 0;
    101   default:
    102     return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
    103   }
    104 }
    105 
    106 static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII,
    107                                           const IntrinsicInst *I) {
    108   switch (I->getIntrinsicID()) {
    109   default:
    110     return false;
    111   case Intrinsic::not_intrinsic:
    112     // This means we have an intrinsic that isn't defined in
    113     // IntrinsicsAMDGPU.td
    114     break;
    115 
    116   case Intrinsic::amdgcn_interp_p1:
    117   case Intrinsic::amdgcn_interp_p2:
    118   case Intrinsic::amdgcn_mbcnt_hi:
    119   case Intrinsic::amdgcn_mbcnt_lo:
    120   case Intrinsic::r600_read_tidig_x:
    121   case Intrinsic::r600_read_tidig_y:
    122   case Intrinsic::r600_read_tidig_z:
    123     return true;
    124   }
    125 
    126   StringRef Name = I->getCalledFunction()->getName();
    127   switch (TII->lookupName((const char *)Name.bytes_begin(), Name.size())) {
    128   default:
    129     return false;
    130   case AMDGPUIntrinsic::SI_tid:
    131   case AMDGPUIntrinsic::SI_fs_interp:
    132     return true;
    133   }
    134 }
    135 
    136 static bool isArgPassedInSGPR(const Argument *A) {
    137   const Function *F = A->getParent();
    138   unsigned ShaderType = AMDGPU::getShaderType(*F);
    139 
    140   // Arguments to compute shaders are never a source of divergence.
    141   if (ShaderType == ShaderType::COMPUTE)
    142     return true;
    143 
    144   // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
    145   if (F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::InReg) ||
    146       F->getAttributes().hasAttribute(A->getArgNo() + 1, Attribute::ByVal))
    147     return true;
    148 
    149   // Everything else is in VGPRs.
    150   return false;
    151 }
    152 
    153 ///
    154 /// \returns true if the result of the value could potentially be
    155 /// different across workitems in a wavefront.
    156 bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
    157 
    158   if (const Argument *A = dyn_cast<Argument>(V))
    159     return !isArgPassedInSGPR(A);
    160 
    161   // Loads from the private address space are divergent, because threads
    162   // can execute the load instruction with the same inputs and get different
    163   // results.
    164   //
    165   // All other loads are not divergent, because if threads issue loads with the
    166   // same arguments, they will always get the same result.
    167   if (const LoadInst *Load = dyn_cast<LoadInst>(V))
    168     return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
    169 
    170   if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
    171     const TargetMachine &TM = getTLI()->getTargetMachine();
    172     return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic);
    173   }
    174 
    175   // Assume all function calls are a source of divergence.
    176   if (isa<CallInst>(V) || isa<InvokeInst>(V))
    177     return true;
    178 
    179   return false;
    180 }
    181