Home | History | Annotate | Download | only in R600
      1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief The AMDGPU target machine contains all of the hardware specific
     12 /// information  needed to emit code for R600 and SI GPUs.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #include "AMDGPUTargetMachine.h"
     17 #include "AMDGPU.h"
     18 #include "R600ISelLowering.h"
     19 #include "R600InstrInfo.h"
     20 #include "R600MachineScheduler.h"
     21 #include "SIISelLowering.h"
     22 #include "SIInstrInfo.h"
     23 #include "llvm/Analysis/Passes.h"
     24 #include "llvm/Analysis/Verifier.h"
     25 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
     26 #include "llvm/CodeGen/MachineModuleInfo.h"
     27 #include "llvm/CodeGen/Passes.h"
     28 #include "llvm/MC/MCAsmInfo.h"
     29 #include "llvm/PassManager.h"
     30 #include "llvm/Support/TargetRegistry.h"
     31 #include "llvm/Support/raw_os_ostream.h"
     32 #include "llvm/Transforms/IPO.h"
     33 #include "llvm/Transforms/Scalar.h"
     34 #include <llvm/CodeGen/Passes.h>
     35 
     36 using namespace llvm;
     37 
     38 extern "C" void LLVMInitializeR600Target() {
     39   // Register the target
     40   RegisterTargetMachine<AMDGPUTargetMachine> X(TheAMDGPUTarget);
     41 }
     42 
     43 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
     44   return new ScheduleDAGMI(C, new R600SchedStrategy());
     45 }
     46 
     47 static MachineSchedRegistry
     48 SchedCustomRegistry("r600", "Run R600's custom scheduler",
     49                     createR600MachineScheduler);
     50 
     51 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
     52     StringRef CPU, StringRef FS,
     53   TargetOptions Options,
     54   Reloc::Model RM, CodeModel::Model CM,
     55   CodeGenOpt::Level OptLevel
     56 )
     57 :
     58   LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
     59   Subtarget(TT, CPU, FS),
     60   Layout(Subtarget.getDataLayout()),
     61   FrameLowering(TargetFrameLowering::StackGrowsUp, 16 // Stack Alignment
     62                                                  , 0),
     63   IntrinsicInfo(this),
     64   InstrItins(&Subtarget.getInstrItineraryData()) {
     65   // TLInfo uses InstrInfo so it must be initialized after.
     66   if (Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
     67     InstrInfo.reset(new R600InstrInfo(*this));
     68     TLInfo.reset(new R600TargetLowering(*this));
     69   } else {
     70     InstrInfo.reset(new SIInstrInfo(*this));
     71     TLInfo.reset(new SITargetLowering(*this));
     72   }
     73   initAsmInfo();
     74 }
     75 
     76 AMDGPUTargetMachine::~AMDGPUTargetMachine() {
     77 }
     78 
     79 namespace {
     80 class AMDGPUPassConfig : public TargetPassConfig {
     81 public:
     82   AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
     83     : TargetPassConfig(TM, PM) {
     84     const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
     85     if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
     86       enablePass(&MachineSchedulerID);
     87       MachineSchedRegistry::setDefault(createR600MachineScheduler);
     88     }
     89   }
     90 
     91   AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
     92     return getTM<AMDGPUTargetMachine>();
     93   }
     94   virtual bool addPreISel();
     95   virtual bool addInstSelector();
     96   virtual bool addPreRegAlloc();
     97   virtual bool addPostRegAlloc();
     98   virtual bool addPreSched2();
     99   virtual bool addPreEmitPass();
    100 };
    101 } // End of anonymous namespace
    102 
    103 TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
    104   return new AMDGPUPassConfig(this, PM);
    105 }
    106 
    107 //===----------------------------------------------------------------------===//
    108 // AMDGPU Analysis Pass Setup
    109 //===----------------------------------------------------------------------===//
    110 
    111 void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
    112   // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This
    113   // allows the AMDGPU pass to delegate to the target independent layer when
    114   // appropriate.
    115   PM.add(createBasicTargetTransformInfoPass(this));
    116   PM.add(createAMDGPUTargetTransformInfoPass(this));
    117 }
    118 
    119 bool
    120 AMDGPUPassConfig::addPreISel() {
    121   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    122   addPass(createFlattenCFGPass());
    123   if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
    124     addPass(createStructurizeCFGPass());
    125     addPass(createSIAnnotateControlFlowPass());
    126   } else {
    127     addPass(createR600TextureIntrinsicsReplacer());
    128   }
    129   return false;
    130 }
    131 
    132 bool AMDGPUPassConfig::addInstSelector() {
    133   addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
    134 
    135   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    136   if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
    137     // This callbacks this pass uses are not implemented yet on SI.
    138     addPass(createAMDGPUIndirectAddressingPass(*TM));
    139   }
    140   return false;
    141 }
    142 
    143 bool AMDGPUPassConfig::addPreRegAlloc() {
    144   addPass(createAMDGPUConvertToISAPass(*TM));
    145   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    146 
    147   if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
    148     addPass(createR600VectorRegMerger(*TM));
    149   } else {
    150     addPass(createSIFixSGPRCopiesPass(*TM));
    151   }
    152   return false;
    153 }
    154 
    155 bool AMDGPUPassConfig::addPostRegAlloc() {
    156   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    157 
    158   if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
    159     addPass(createSIInsertWaits(*TM));
    160   }
    161   return false;
    162 }
    163 
    164 bool AMDGPUPassConfig::addPreSched2() {
    165   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    166 
    167   if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
    168     addPass(createR600EmitClauseMarkers(*TM));
    169   }
    170   addPass(&IfConverterID);
    171   return false;
    172 }
    173 
    174 bool AMDGPUPassConfig::addPreEmitPass() {
    175   const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
    176   if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
    177     addPass(createAMDGPUCFGStructurizerPass(*TM));
    178     addPass(createR600ExpandSpecialInstrsPass(*TM));
    179     addPass(&FinalizeMachineBundlesID);
    180     addPass(createR600Packetizer(*TM));
    181     addPass(createR600ControlFlowFinalizer(*TM));
    182   } else {
    183     addPass(createSILowerControlFlowPass(*TM));
    184   }
    185 
    186   return false;
    187 }
    188 
    189