Home | History | Annotate | Download | only in R600
      1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief R600 Machine Scheduler interface
     12 // TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #define DEBUG_TYPE "misched"
     17 
     18 #include "R600MachineScheduler.h"
     19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
     20 #include "llvm/CodeGen/MachineRegisterInfo.h"
     21 #include "llvm/Pass.h"
     22 #include "llvm/PassManager.h"
     23 #include "llvm/Support/raw_ostream.h"
     24 
     25 using namespace llvm;
     26 
     27 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
     28 
     29   DAG = dag;
     30   TII = static_cast<const R600InstrInfo*>(DAG->TII);
     31   TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
     32   MRI = &DAG->MRI;
     33   CurInstKind = IDOther;
     34   CurEmitted = 0;
     35   OccupedSlotsMask = 31;
     36   InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
     37   InstKindLimit[IDOther] = 32;
     38 
     39   const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
     40   InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
     41   AluInstCount = 0;
     42   FetchInstCount = 0;
     43 }
     44 
     45 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
     46                                   std::vector<SUnit *> &QDst)
     47 {
     48   QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
     49   QSrc.clear();
     50 }
     51 
     52 static
     53 unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
     54   assert (GPRCount && "GPRCount cannot be 0");
     55   return 248 / GPRCount;
     56 }
     57 
     58 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
     59   SUnit *SU = 0;
     60   NextInstKind = IDOther;
     61 
     62   IsTopNode = false;
     63 
     64   // check if we might want to switch current clause type
     65   bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
     66       (Available[CurInstKind].empty());
     67   bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
     68       (!Available[IDFetch].empty() || !Available[IDOther].empty());
     69 
     70   if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
     71     // We use the heuristic provided by AMD Accelerated Parallel Processing
     72     // OpenCL Programming Guide :
     73     // The approx. number of WF that allows TEX inst to hide ALU inst is :
     74     // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
     75     float ALUFetchRationEstimate =
     76         (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
     77         (FetchInstCount + Available[IDFetch].size());
     78     unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
     79     DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
     80     // We assume the local GPR requirements to be "dominated" by the requirement
     81     // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
     82     // after TEX are indeed likely to consume or generate values from/for the
     83     // TEX clause.
     84     // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
     85     // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
     86     // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
     87     // (TODO : use RegisterPressure)
     88     // If we are going too use too many GPR, we flush Fetch instruction to lower
     89     // register pressure on 128 bits regs.
     90     unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
     91     if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
     92       AllowSwitchFromAlu = true;
     93   }
     94 
     95 
     96   // We want to scheduled AR defs as soon as possible to make sure they aren't
     97   // put in a different ALU clause from their uses.
     98   if (!SU && !UnscheduledARDefs.empty()) {
     99       SU = UnscheduledARDefs[0];
    100       UnscheduledARDefs.erase(UnscheduledARDefs.begin());
    101       NextInstKind = IDAlu;
    102   }
    103 
    104   if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
    105       (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
    106     // try to pick ALU
    107     SU = pickAlu();
    108     if (!SU && !PhysicalRegCopy.empty()) {
    109       SU = PhysicalRegCopy.front();
    110       PhysicalRegCopy.erase(PhysicalRegCopy.begin());
    111     }
    112     if (SU) {
    113       if (CurEmitted >= InstKindLimit[IDAlu])
    114         CurEmitted = 0;
    115       NextInstKind = IDAlu;
    116     }
    117   }
    118 
    119   if (!SU) {
    120     // try to pick FETCH
    121     SU = pickOther(IDFetch);
    122     if (SU)
    123       NextInstKind = IDFetch;
    124   }
    125 
    126   // try to pick other
    127   if (!SU) {
    128     SU = pickOther(IDOther);
    129     if (SU)
    130       NextInstKind = IDOther;
    131   }
    132 
    133   // We want to schedule the AR uses as late as possible to make sure that
    134   // the AR defs have been released.
    135   if (!SU && !UnscheduledARUses.empty()) {
    136       SU = UnscheduledARUses[0];
    137       UnscheduledARUses.erase(UnscheduledARUses.begin());
    138       NextInstKind = IDAlu;
    139   }
    140 
    141 
    142   DEBUG(
    143       if (SU) {
    144         dbgs() << " ** Pick node **\n";
    145         SU->dump(DAG);
    146       } else {
    147         dbgs() << "NO NODE \n";
    148         for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
    149           const SUnit &S = DAG->SUnits[i];
    150           if (!S.isScheduled)
    151             S.dump(DAG);
    152         }
    153       }
    154   );
    155 
    156   return SU;
    157 }
    158 
    159 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
    160   if (NextInstKind != CurInstKind) {
    161     DEBUG(dbgs() << "Instruction Type Switch\n");
    162     if (NextInstKind != IDAlu)
    163       OccupedSlotsMask |= 31;
    164     CurEmitted = 0;
    165     CurInstKind = NextInstKind;
    166   }
    167 
    168   if (CurInstKind == IDAlu) {
    169     AluInstCount ++;
    170     switch (getAluKind(SU)) {
    171     case AluT_XYZW:
    172       CurEmitted += 4;
    173       break;
    174     case AluDiscarded:
    175       break;
    176     default: {
    177       ++CurEmitted;
    178       for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
    179           E = SU->getInstr()->operands_end(); It != E; ++It) {
    180         MachineOperand &MO = *It;
    181         if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
    182           ++CurEmitted;
    183       }
    184     }
    185     }
    186   } else {
    187     ++CurEmitted;
    188   }
    189 
    190 
    191   DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
    192 
    193   if (CurInstKind != IDFetch) {
    194     MoveUnits(Pending[IDFetch], Available[IDFetch]);
    195   } else
    196     FetchInstCount++;
    197 }
    198 
    199 static bool
    200 isPhysicalRegCopy(MachineInstr *MI) {
    201   if (MI->getOpcode() != AMDGPU::COPY)
    202     return false;
    203 
    204   return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
    205 }
    206 
    207 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
    208   DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
    209 }
    210 
    211 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
    212   DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
    213   if (isPhysicalRegCopy(SU->getInstr())) {
    214     PhysicalRegCopy.push_back(SU);
    215     return;
    216   }
    217 
    218   int IK = getInstKind(SU);
    219 
    220   // Check for AR register defines
    221   for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(),
    222                                         E = SU->getInstr()->operands_end();
    223                                         I != E; ++I) {
    224     if (I->isReg() && I->getReg() == AMDGPU::AR_X) {
    225       if (I->isDef()) {
    226         UnscheduledARDefs.push_back(SU);
    227       } else {
    228         UnscheduledARUses.push_back(SU);
    229       }
    230       return;
    231     }
    232   }
    233 
    234   // There is no export clause, we can schedule one as soon as its ready
    235   if (IK == IDOther)
    236     Available[IDOther].push_back(SU);
    237   else
    238     Pending[IK].push_back(SU);
    239 
    240 }
    241 
    242 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
    243                                           const TargetRegisterClass *RC) const {
    244   if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
    245     return RC->contains(Reg);
    246   } else {
    247     return MRI->getRegClass(Reg) == RC;
    248   }
    249 }
    250 
    251 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
    252   MachineInstr *MI = SU->getInstr();
    253 
    254   if (TII->isTransOnly(MI))
    255     return AluTrans;
    256 
    257     switch (MI->getOpcode()) {
    258     case AMDGPU::PRED_X:
    259       return AluPredX;
    260     case AMDGPU::INTERP_PAIR_XY:
    261     case AMDGPU::INTERP_PAIR_ZW:
    262     case AMDGPU::INTERP_VEC_LOAD:
    263     case AMDGPU::DOT_4:
    264       return AluT_XYZW;
    265     case AMDGPU::COPY:
    266       if (MI->getOperand(1).isUndef()) {
    267         // MI will become a KILL, don't considers it in scheduling
    268         return AluDiscarded;
    269       }
    270     default:
    271       break;
    272     }
    273 
    274     // Does the instruction take a whole IG ?
    275     // XXX: Is it possible to add a helper function in R600InstrInfo that can
    276     // be used here and in R600PacketizerList::isSoloInstruction() ?
    277     if(TII->isVector(*MI) ||
    278         TII->isCubeOp(MI->getOpcode()) ||
    279         TII->isReductionOp(MI->getOpcode()) ||
    280         MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
    281       return AluT_XYZW;
    282     }
    283 
    284     if (TII->isLDSInstr(MI->getOpcode())) {
    285       return AluT_X;
    286     }
    287 
    288     // Is the result already assigned to a channel ?
    289     unsigned DestSubReg = MI->getOperand(0).getSubReg();
    290     switch (DestSubReg) {
    291     case AMDGPU::sub0:
    292       return AluT_X;
    293     case AMDGPU::sub1:
    294       return AluT_Y;
    295     case AMDGPU::sub2:
    296       return AluT_Z;
    297     case AMDGPU::sub3:
    298       return AluT_W;
    299     default:
    300       break;
    301     }
    302 
    303     // Is the result already member of a X/Y/Z/W class ?
    304     unsigned DestReg = MI->getOperand(0).getReg();
    305     if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
    306         regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
    307       return AluT_X;
    308     if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
    309       return AluT_Y;
    310     if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
    311       return AluT_Z;
    312     if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
    313       return AluT_W;
    314     if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
    315       return AluT_XYZW;
    316 
    317     return AluAny;
    318 
    319 }
    320 
    321 int R600SchedStrategy::getInstKind(SUnit* SU) {
    322   int Opcode = SU->getInstr()->getOpcode();
    323 
    324   if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
    325     return IDFetch;
    326 
    327   if (TII->isALUInstr(Opcode)) {
    328     return IDAlu;
    329   }
    330 
    331   switch (Opcode) {
    332   case AMDGPU::PRED_X:
    333   case AMDGPU::COPY:
    334   case AMDGPU::CONST_COPY:
    335   case AMDGPU::INTERP_PAIR_XY:
    336   case AMDGPU::INTERP_PAIR_ZW:
    337   case AMDGPU::INTERP_VEC_LOAD:
    338   case AMDGPU::DOT_4:
    339     return IDAlu;
    340   default:
    341     return IDOther;
    342   }
    343 }
    344 
    345 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) {
    346   if (Q.empty())
    347     return NULL;
    348   for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
    349       It != E; ++It) {
    350     SUnit *SU = *It;
    351     InstructionsGroupCandidate.push_back(SU->getInstr());
    352     if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)) {
    353       InstructionsGroupCandidate.pop_back();
    354       Q.erase((It + 1).base());
    355       return SU;
    356     } else {
    357       InstructionsGroupCandidate.pop_back();
    358     }
    359   }
    360   return NULL;
    361 }
    362 
    363 void R600SchedStrategy::LoadAlu() {
    364   std::vector<SUnit *> &QSrc = Pending[IDAlu];
    365   for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
    366     AluKind AK = getAluKind(QSrc[i]);
    367     AvailableAlus[AK].push_back(QSrc[i]);
    368   }
    369   QSrc.clear();
    370 }
    371 
    372 void R600SchedStrategy::PrepareNextSlot() {
    373   DEBUG(dbgs() << "New Slot\n");
    374   assert (OccupedSlotsMask && "Slot wasn't filled");
    375   OccupedSlotsMask = 0;
    376   InstructionsGroupCandidate.clear();
    377   LoadAlu();
    378 }
    379 
    380 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
    381   int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
    382   if (DstIndex == -1) {
    383     return;
    384   }
    385   unsigned DestReg = MI->getOperand(DstIndex).getReg();
    386   // PressureRegister crashes if an operand is def and used in the same inst
    387   // and we try to constraint its regclass
    388   for (MachineInstr::mop_iterator It = MI->operands_begin(),
    389       E = MI->operands_end(); It != E; ++It) {
    390     MachineOperand &MO = *It;
    391     if (MO.isReg() && !MO.isDef() &&
    392         MO.getReg() == DestReg)
    393       return;
    394   }
    395   // Constrains the regclass of DestReg to assign it to Slot
    396   switch (Slot) {
    397   case 0:
    398     MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
    399     break;
    400   case 1:
    401     MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
    402     break;
    403   case 2:
    404     MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
    405     break;
    406   case 3:
    407     MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
    408     break;
    409   }
    410 }
    411 
    412 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
    413   static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
    414   SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
    415   if (SlotedSU)
    416     return SlotedSU;
    417   SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
    418   if (UnslotedSU)
    419     AssignSlot(UnslotedSU->getInstr(), Slot);
    420   return UnslotedSU;
    421 }
    422 
    423 unsigned R600SchedStrategy::AvailablesAluCount() const {
    424   return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
    425       AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
    426       AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
    427       AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
    428       AvailableAlus[AluPredX].size();
    429 }
    430 
    431 SUnit* R600SchedStrategy::pickAlu() {
    432   while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
    433     if (!OccupedSlotsMask) {
    434       // Bottom up scheduling : predX must comes first
    435       if (!AvailableAlus[AluPredX].empty()) {
    436         OccupedSlotsMask |= 31;
    437         return PopInst(AvailableAlus[AluPredX]);
    438       }
    439       // Flush physical reg copies (RA will discard them)
    440       if (!AvailableAlus[AluDiscarded].empty()) {
    441         OccupedSlotsMask |= 31;
    442         return PopInst(AvailableAlus[AluDiscarded]);
    443       }
    444       // If there is a T_XYZW alu available, use it
    445       if (!AvailableAlus[AluT_XYZW].empty()) {
    446         OccupedSlotsMask |= 15;
    447         return PopInst(AvailableAlus[AluT_XYZW]);
    448       }
    449     }
    450     bool TransSlotOccuped = OccupedSlotsMask & 16;
    451     if (!TransSlotOccuped) {
    452       if (!AvailableAlus[AluTrans].empty()) {
    453         OccupedSlotsMask |= 16;
    454         return PopInst(AvailableAlus[AluTrans]);
    455       }
    456     }
    457     for (int Chan = 3; Chan > -1; --Chan) {
    458       bool isOccupied = OccupedSlotsMask & (1 << Chan);
    459       if (!isOccupied) {
    460         SUnit *SU = AttemptFillSlot(Chan);
    461         if (SU) {
    462           OccupedSlotsMask |= (1 << Chan);
    463           InstructionsGroupCandidate.push_back(SU->getInstr());
    464           return SU;
    465         }
    466       }
    467     }
    468     PrepareNextSlot();
    469   }
    470   return NULL;
    471 }
    472 
    473 SUnit* R600SchedStrategy::pickOther(int QID) {
    474   SUnit *SU = 0;
    475   std::vector<SUnit *> &AQ = Available[QID];
    476 
    477   if (AQ.empty()) {
    478     MoveUnits(Pending[QID], AQ);
    479   }
    480   if (!AQ.empty()) {
    481     SU = AQ.back();
    482     AQ.resize(AQ.size() - 1);
    483   }
    484   return SU;
    485 }
    486 
    487