/external/llvm/lib/Target/WebAssembly/ |
WebAssemblyLowerBrUnless.cpp | 60 const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); 78 case EQ_I32: Def->setDesc(TII.get(NE_I32)); Inverted = true; break; 79 case NE_I32: Def->setDesc(TII.get(EQ_I32)); Inverted = true; break; 80 case GT_S_I32: Def->setDesc(TII.get(LE_S_I32)); Inverted = true; break; 81 case GE_S_I32: Def->setDesc(TII.get(LT_S_I32)); Inverted = true; break; 82 case LT_S_I32: Def->setDesc(TII.get(GE_S_I32)); Inverted = true; break; 83 case LE_S_I32: Def->setDesc(TII.get(GT_S_I32)); Inverted = true; break; 84 case GT_U_I32: Def->setDesc(TII.get(LE_U_I32)); Inverted = true; break; 85 case GE_U_I32: Def->setDesc(TII.get(LT_U_I32)); Inverted = true; break; 86 case LT_U_I32: Def->setDesc(TII.get(GE_U_I32)); Inverted = true; break [all...] |
WebAssemblyFrameLowering.cpp | 70 const TargetInstrInfo* TII, 76 BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), SPReg) 84 BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::LOAD_I32), SPReg) 90 BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) 93 TII->get(AdjustUp ? WebAssembly::ADD_I32 : WebAssembly::SUB_I32), 98 BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) 102 BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::STORE_I32), WebAssembly::SP32) 112 const auto *TII = 116 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 120 TII, I, DL) [all...] |
/external/llvm/lib/Target/AMDGPU/ |
R600ExpandSpecialInstrs.cpp | 35 const R600InstrInfo *TII; 42 TII(nullptr) { } 61 int OpIdx = TII->getOperandIdx(*OldMI, Op); 64 TII->setImmOperand(NewMI, Op, Val); 69 TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo()); 71 const R600RegisterInfo &TRI = TII->getRegisterInfo(); 82 if (TII->isLDSRetInstr(MI.getOpcode())) { 83 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 86 MachineInstr *Mov = TII->buildMovInstr(&MBB, I, 89 int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode() [all...] |
SIShrinkInstructions.cpp | 86 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, 90 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 103 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 112 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 114 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 125 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 128 if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 138 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII, 144 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI)) [all...] |
R600EmitClauseMarkers.cpp | 38 const R600InstrInfo *TII; 56 if (TII->isLDSRetInstr(MI->getOpcode())) 59 if(TII->isVector(*MI) || 60 TII->isCubeOp(MI->getOpcode()) || 61 TII->isReductionOp(MI->getOpcode())) 75 if (TII->isALUInstr(MI->getOpcode())) 77 if (TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode())) 122 if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4) 126 TII->getSrcs(MI) [all...] |
R600Packetizer.cpp | 61 const R600InstrInfo *TII; 76 if (!TII->isALUInstr(I->getOpcode()) && !I->isBundle()) 88 if (TII->isPredicated(&*BI)) 90 int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write); 93 int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst); 98 if (isTrans || TII->isTransOnly(&*BI)) { 140 int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]); 153 TII(static_cast<const R600InstrInfo *>( 155 TRI(TII->getRegisterInfo()) { 173 if (TII->isVector(*MI) [all...] |
R600ClauseMergePass.cpp | 48 const R600InstrInfo *TII; 77 TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::COUNT)).getImm(); 83 TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::Enabled)).getImm(); 88 int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT); 107 int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT); 111 if (CumuledInsts >= TII->getMaxAlusPerClause()) { 119 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE0); 121 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0); 123 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0); 135 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE1) [all...] |
SIFoldOperands.cpp | 142 const SIInstrInfo *TII) { 143 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) { 151 MI->setDesc(TII->get(AMDGPU::V_MAD_F32)); 152 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 157 MI->setDesc(TII->get(Opc)); 170 bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1); 188 !TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1)) 191 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) 203 const SIInstrInfo *TII, const SIRegisterInfo &TRI, 225 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode()) [all...] |
SILowerControlFlow.cpp | 73 const SIInstrInfo *TII; 98 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 146 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 164 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 168 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 180 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 189 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 192 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 208 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 211 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC [all...] |
/external/llvm/lib/Target/AArch64/ |
AArch64A53Fix835769.cpp | 82 const TargetInstrInfo *TII; 112 TII = F.getSubtarget().getInstrInfo(); 123 const TargetInstrInfo *TII) { 136 if (S == PrevBB && !TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond) && 148 const TargetInstrInfo *TII) { 153 while ((FMBB = getBBFallenThrough(FMBB, TII))) { 164 const TargetInstrInfo *TII) { 168 MachineInstr *I = getLastNonPseudo(MBB, TII); 171 BuildMI(I->getParent(), DL, TII->get(AArch64::HINT)).addImm(0); 175 BuildMI(MBB, MI, DL, TII->get(AArch64::HINT)).addImm(0) [all...] |
AArch64StorePairSuppress.cpp | 30 const AArch64InstrInfo *TII; 81 unsigned SCIdx = TII->get(AArch64::STPDi).getSchedClass(); 119 TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo()); 122 SchedModel.init(ST.getSchedModel(), &ST, TII); 145 if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) { 153 TII->suppressLdStPair(&MI);
|
/external/mesa3d/src/gallium/drivers/radeon/ |
R600ExpandSpecialInstrs.cpp | 30 const R600InstrInfo *TII; 34 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { } 53 const R600RegisterInfo &TRI = TII->getRegisterInfo(); 63 bool IsReduction = TII->isReductionOp(MI.getOpcode()); 64 bool IsVector = TII->isVector(MI); 65 bool IsCube = TII->isCubeOp(MI.getOpcode()); 151 BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(Opcode), DstReg) 157 TII->addFlag(NewMI, 0, Flags);
|
SIRegisterInfo.h | 28 const TargetInstrInfo &TII; 30 SIRegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
|
AMDGPUConvertToISA.cpp | 49 const AMDGPUInstrInfo * TII = 58 TII->convertToISA(MI, MF, MBB.findDebugLoc(I));
|
AMDGPURegisterInfo.cpp | 20 const TargetInstrInfo &tii) 23 TII(tii)
|
R600RegisterInfo.h | 28 const TargetInstrInfo &TII; 30 R600RegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
|
/external/llvm/lib/Target/Hexagon/ |
HexagonExpandPredSpillCode.cpp | 72 const HexagonInstrInfo *TII = QST.getInstrInfo(); 107 BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr), 111 TII->get(Opcode)); 150 BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr), 154 TII->get(Opcode)); 191 BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr), 195 TII->get(Opcode)); 228 BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr), 232 TII->get(Opcode)); 252 if (!TII->isValidOffset(Hexagon::S2_storeri_io, Offset)) [all...] |
/external/llvm/lib/Target/Mips/ |
Mips16FrameLowering.cpp | 39 const Mips16InstrInfo &TII = 57 TII.makeFrame(Mips::SP, StackSize, MBB, MBBI); 62 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 77 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 82 BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0) 91 const Mips16InstrInfo &TII = 100 BuildMI(MBB, MBBI, dl, TII.get(Mips::Move32R16), Mips::SP) 105 TII.restoreFrame(Mips::SP, StackSize, MBB, MBBI); 163 const Mips16InstrInfo &TII = 165 const MipsRegisterInfo &RI = TII.getRegisterInfo() [all...] |
MipsLongBranch.cpp | 170 const MipsInstrInfo *TII = 178 MBBInfos[I].Size += TII->GetInstSizeInBytes(&*MI); 217 const MipsInstrInfo *TII = static_cast<const MipsInstrInfo *>( 219 unsigned NewOpc = TII->getOppositeBranchOpc(Br->getOpcode()); 220 const MCInstrDesc &NewDesc = TII->get(NewOpc); 261 const MipsInstrInfo *TII = 295 BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP) 297 BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SW)).addReg(Mips::RA) 316 BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_LUi), Mips::AT) 319 .append(BuildMI(*MF, DL, TII->get(BalOp)).addMBB(BalTgtMBB) [all...] |
/external/llvm/lib/Target/ARM/ |
ARMHazardRecognizer.cpp | 48 const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>( 54 !(TII.getSubtarget().isLikeA9() && LastMI->mayLoadOrStore()) && 63 if (TII.isFpMLxInstruction(DefMI->getOpcode()) && 64 (TII.canCauseFpMLxStall(MI->getOpcode()) || 65 hasRAWHazard(DefMI, MI, TII.getRegisterInfo()))) {
|
/external/llvm/lib/Target/MSP430/ |
MSP430FrameLowering.cpp | 47 const MSP430InstrInfo &TII = 68 BuildMI(MBB, MBBI, DL, TII.get(MSP430::PUSH16r)) 72 BuildMI(MBB, MBBI, DL, TII.get(MSP430::MOV16rr), MSP430::FP) 100 BuildMI(MBB, MBBI, DL, TII.get(MSP430::SUB16ri), MSP430::SP) 112 const MSP430InstrInfo &TII = 137 BuildMI(MBB, MBBI, DL, TII.get(MSP430::POP16r), MSP430::FP); 159 TII.get(MSP430::MOV16rr), MSP430::SP).addReg(MSP430::FP); 163 TII.get(MSP430::SUB16ri), MSP430::SP) 172 BuildMI(MBB, MBBI, DL, TII.get(MSP430::ADD16ri), MSP430::SP) 193 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo() [all...] |
/external/llvm/lib/Target/SystemZ/ |
SystemZRegisterInfo.cpp | 65 auto *TII = 86 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 96 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 109 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 115 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 117 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 122 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 123 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg) 132 MI->setDesc(TII->get(OpcodeForOffset));
|
/external/llvm/lib/Target/XCore/ |
XCoreRegisterInfo.cpp | 63 const XCoreInstrInfo &TII, 71 BuildMI(MBB, II, dl, TII.get(XCore::LDW_2rus), Reg) 77 BuildMI(MBB, II, dl, TII.get(XCore::STW_2rus)) 84 BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l2rus), Reg) 94 const XCoreInstrInfo &TII, 103 TII.loadImmediate(MBB, II, ScratchOffset, Offset); 107 BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg) 113 BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r)) 120 BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg) 130 const XCoreInstrInfo &TII, [all...] |
XCoreFrameToArgsOffsetElim.cpp | 46 const XCoreInstrInfo &TII = 57 MBBI = TII.loadImmediate(MBB, MBBI, Reg, StackSize);
|
/external/llvm/lib/Target/PowerPC/ |
PPCBranchSelector.cpp | 67 const PPCInstrInfo *TII = 74 [TII](MachineBasicBlock &MBB, unsigned Offset) -> unsigned { 107 BlockSize += TII->GetInstSizeInBytes(MBBI); 154 MBBStartOffset += TII->GetInstSizeInBytes(I); 197 BuildMI(MBB, I, dl, TII->get(PPC::BCC)) 201 BuildMI(MBB, I, dl, TII->get(PPC::BCn)).addReg(CRBit).addImm(2); 204 BuildMI(MBB, I, dl, TII->get(PPC::BC)).addReg(CRBit).addImm(2); 206 BuildMI(MBB, I, dl, TII->get(PPC::BDZ)).addImm(2); 208 BuildMI(MBB, I, dl, TII->get(PPC::BDZ8)).addImm(2); 210 BuildMI(MBB, I, dl, TII->get(PPC::BDNZ)).addImm(2) [all...] |