Home | History | Annotate | Download | only in R600
      1 //===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// Vector, Reduction, and Cube instructions need to fill the entire instruction
     12 /// group to work correctly.  This pass expands these individual instructions
     13 /// into several instructions that will completely fill the instruction group.
     14 //
     15 //===----------------------------------------------------------------------===//
     16 
     17 #include "AMDGPU.h"
     18 #include "R600Defines.h"
     19 #include "R600InstrInfo.h"
     20 #include "R600MachineFunctionInfo.h"
     21 #include "R600RegisterInfo.h"
     22 #include "llvm/CodeGen/MachineFunctionPass.h"
     23 #include "llvm/CodeGen/MachineInstrBuilder.h"
     24 #include "llvm/CodeGen/MachineRegisterInfo.h"
     25 
     26 using namespace llvm;
     27 
     28 namespace {
     29 
     30 class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
     31 
     32 private:
     33   static char ID;
     34   const R600InstrInfo *TII;
     35 
     36   bool ExpandInputPerspective(MachineInstr& MI);
     37   bool ExpandInputConstant(MachineInstr& MI);
     38 
     39 public:
     40   R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
     41     TII(0) { }
     42 
     43   virtual bool runOnMachineFunction(MachineFunction &MF);
     44 
     45   const char *getPassName() const {
     46     return "R600 Expand special instructions pass";
     47   }
     48 };
     49 
     50 } // End anonymous namespace
     51 
     52 char R600ExpandSpecialInstrsPass::ID = 0;
     53 
     54 FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
     55   return new R600ExpandSpecialInstrsPass(TM);
     56 }
     57 
     58 bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
     59   TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
     60 
     61   const R600RegisterInfo &TRI = TII->getRegisterInfo();
     62 
     63   for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
     64                                                   BB != BB_E; ++BB) {
     65     MachineBasicBlock &MBB = *BB;
     66     MachineBasicBlock::iterator I = MBB.begin();
     67     while (I != MBB.end()) {
     68       MachineInstr &MI = *I;
     69       I = llvm::next(I);
     70 
     71       switch (MI.getOpcode()) {
     72       default: break;
     73       // Expand PRED_X to one of the PRED_SET instructions.
     74       case AMDGPU::PRED_X: {
     75         uint64_t Flags = MI.getOperand(3).getImm();
     76         // The native opcode used by PRED_X is stored as an immediate in the
     77         // third operand.
     78         MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
     79                                             MI.getOperand(2).getImm(), // opcode
     80                                             MI.getOperand(0).getReg(), // dst
     81                                             MI.getOperand(1).getReg(), // src0
     82                                             AMDGPU::ZERO);             // src1
     83         TII->addFlag(PredSet, 0, MO_FLAG_MASK);
     84         if (Flags & MO_FLAG_PUSH) {
     85           TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
     86         } else {
     87           TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
     88         }
     89         MI.eraseFromParent();
     90         continue;
     91         }
     92 
     93       case AMDGPU::INTERP_PAIR_XY: {
     94         MachineInstr *BMI;
     95         unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
     96                 MI.getOperand(2).getImm());
     97 
     98         for (unsigned Chan = 0; Chan < 4; ++Chan) {
     99           unsigned DstReg;
    100 
    101           if (Chan < 2)
    102             DstReg = MI.getOperand(Chan).getReg();
    103           else
    104             DstReg = Chan == 2 ? AMDGPU::T0_Z : AMDGPU::T0_W;
    105 
    106           BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_XY,
    107               DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
    108 
    109           if (Chan > 0) {
    110             BMI->bundleWithPred();
    111           }
    112           if (Chan >= 2)
    113             TII->addFlag(BMI, 0, MO_FLAG_MASK);
    114           if (Chan != 3)
    115             TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
    116         }
    117 
    118         MI.eraseFromParent();
    119         continue;
    120         }
    121 
    122       case AMDGPU::INTERP_PAIR_ZW: {
    123         MachineInstr *BMI;
    124         unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
    125                 MI.getOperand(2).getImm());
    126 
    127         for (unsigned Chan = 0; Chan < 4; ++Chan) {
    128           unsigned DstReg;
    129 
    130           if (Chan < 2)
    131             DstReg = Chan == 0 ? AMDGPU::T0_X : AMDGPU::T0_Y;
    132           else
    133             DstReg = MI.getOperand(Chan-2).getReg();
    134 
    135           BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_ZW,
    136               DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
    137 
    138           if (Chan > 0) {
    139             BMI->bundleWithPred();
    140           }
    141           if (Chan < 2)
    142             TII->addFlag(BMI, 0, MO_FLAG_MASK);
    143           if (Chan != 3)
    144             TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
    145         }
    146 
    147         MI.eraseFromParent();
    148         continue;
    149         }
    150 
    151       case AMDGPU::INTERP_VEC_LOAD: {
    152         const R600RegisterInfo &TRI = TII->getRegisterInfo();
    153         MachineInstr *BMI;
    154         unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
    155                 MI.getOperand(1).getImm());
    156         unsigned DstReg = MI.getOperand(0).getReg();
    157 
    158         for (unsigned Chan = 0; Chan < 4; ++Chan) {
    159           BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_LOAD_P0,
    160               TRI.getSubReg(DstReg, TRI.getSubRegFromChannel(Chan)), PReg);
    161           if (Chan > 0) {
    162             BMI->bundleWithPred();
    163           }
    164           if (Chan != 3)
    165             TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
    166         }
    167 
    168         MI.eraseFromParent();
    169         continue;
    170         }
    171       case AMDGPU::DOT_4: {
    172 
    173         const R600RegisterInfo &TRI = TII->getRegisterInfo();
    174 
    175         unsigned DstReg = MI.getOperand(0).getReg();
    176         unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
    177 
    178         for (unsigned Chan = 0; Chan < 4; ++Chan) {
    179           bool Mask = (Chan != TRI.getHWRegChan(DstReg));
    180           unsigned SubDstReg =
    181               AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
    182           MachineInstr *BMI =
    183               TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
    184           if (Chan > 0) {
    185             BMI->bundleWithPred();
    186           }
    187           if (Mask) {
    188             TII->addFlag(BMI, 0, MO_FLAG_MASK);
    189           }
    190           if (Chan != 3)
    191             TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
    192           unsigned Opcode = BMI->getOpcode();
    193           // While not strictly necessary from hw point of view, we force
    194           // all src operands of a dot4 inst to belong to the same slot.
    195           unsigned Src0 = BMI->getOperand(
    196               TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
    197               .getReg();
    198           unsigned Src1 = BMI->getOperand(
    199               TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
    200               .getReg();
    201           (void) Src0;
    202           (void) Src1;
    203           if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
    204               (TRI.getEncodingValue(Src1) & 0xff) < 127)
    205             assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
    206         }
    207         MI.eraseFromParent();
    208         continue;
    209       }
    210       }
    211 
    212       bool IsReduction = TII->isReductionOp(MI.getOpcode());
    213       bool IsVector = TII->isVector(MI);
    214       bool IsCube = TII->isCubeOp(MI.getOpcode());
    215       if (!IsReduction && !IsVector && !IsCube) {
    216         continue;
    217       }
    218 
    219       // Expand the instruction
    220       //
    221       // Reduction instructions:
    222       // T0_X = DP4 T1_XYZW, T2_XYZW
    223       // becomes:
    224       // TO_X = DP4 T1_X, T2_X
    225       // TO_Y (write masked) = DP4 T1_Y, T2_Y
    226       // TO_Z (write masked) = DP4 T1_Z, T2_Z
    227       // TO_W (write masked) = DP4 T1_W, T2_W
    228       //
    229       // Vector instructions:
    230       // T0_X = MULLO_INT T1_X, T2_X
    231       // becomes:
    232       // T0_X = MULLO_INT T1_X, T2_X
    233       // T0_Y (write masked) = MULLO_INT T1_X, T2_X
    234       // T0_Z (write masked) = MULLO_INT T1_X, T2_X
    235       // T0_W (write masked) = MULLO_INT T1_X, T2_X
    236       //
    237       // Cube instructions:
    238       // T0_XYZW = CUBE T1_XYZW
    239       // becomes:
    240       // TO_X = CUBE T1_Z, T1_Y
    241       // T0_Y = CUBE T1_Z, T1_X
    242       // T0_Z = CUBE T1_X, T1_Z
    243       // T0_W = CUBE T1_Y, T1_Z
    244       for (unsigned Chan = 0; Chan < 4; Chan++) {
    245         unsigned DstReg = MI.getOperand(
    246                             TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
    247         unsigned Src0 = MI.getOperand(
    248                            TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
    249         unsigned Src1 = 0;
    250 
    251         // Determine the correct source registers
    252         if (!IsCube) {
    253           int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
    254           if (Src1Idx != -1) {
    255             Src1 = MI.getOperand(Src1Idx).getReg();
    256           }
    257         }
    258         if (IsReduction) {
    259           unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
    260           Src0 = TRI.getSubReg(Src0, SubRegIndex);
    261           Src1 = TRI.getSubReg(Src1, SubRegIndex);
    262         } else if (IsCube) {
    263           static const int CubeSrcSwz[] = {2, 2, 0, 1};
    264           unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
    265           unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
    266           Src1 = TRI.getSubReg(Src0, SubRegIndex1);
    267           Src0 = TRI.getSubReg(Src0, SubRegIndex0);
    268         }
    269 
    270         // Determine the correct destination registers;
    271         bool Mask = false;
    272         bool NotLast = true;
    273         if (IsCube) {
    274           unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
    275           DstReg = TRI.getSubReg(DstReg, SubRegIndex);
    276         } else {
    277           // Mask the write if the original instruction does not write to
    278           // the current Channel.
    279           Mask = (Chan != TRI.getHWRegChan(DstReg));
    280           unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
    281           DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
    282         }
    283 
    284         // Set the IsLast bit
    285         NotLast = (Chan != 3 );
    286 
    287         // Add the new instruction
    288         unsigned Opcode = MI.getOpcode();
    289         switch (Opcode) {
    290         case AMDGPU::CUBE_r600_pseudo:
    291           Opcode = AMDGPU::CUBE_r600_real;
    292           break;
    293         case AMDGPU::CUBE_eg_pseudo:
    294           Opcode = AMDGPU::CUBE_eg_real;
    295           break;
    296         default:
    297           break;
    298         }
    299 
    300         MachineInstr *NewMI =
    301           TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
    302 
    303         if (Chan != 0)
    304           NewMI->bundleWithPred();
    305         if (Mask) {
    306           TII->addFlag(NewMI, 0, MO_FLAG_MASK);
    307         }
    308         if (NotLast) {
    309           TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
    310         }
    311       }
    312       MI.eraseFromParent();
    313     }
    314   }
    315   return false;
    316 }
    317