HomeSort by relevance Sort by last modified time
    Searched defs:Src0 (Results 1 - 25 of 41) sorted by null

1 2

  /external/llvm/lib/Target/AMDGPU/
R600ExpandSpecialInstrs.cpp 108 MI.getOperand(1).getReg(), // src0
222 unsigned Src0 = BMI->getOperand(
223 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
228 (void) Src0;
230 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
232 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
274 unsigned Src0 = MI.getOperand(
275 TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
287 Src0 = TRI.getSubReg(Src0, SubRegIndex)
    [all...]
SIShrinkInstructions.cpp 113 // We don't need to check src0, all input types are legal, so just make sure
114 // src0 isn't using any modifiers.
138 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
139 MachineOperand &Src0 = MI.getOperand(Src0Idx);
141 // Only one literal constant is allowed per instruction, so if src0 is a
143 if (Src0.isImm() &&
144 TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
147 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
150 if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI)
    [all...]
AMDGPUPromoteAlloca.cpp 729 Value *Src0 = CI->getOperand(0);
730 Type *EltTy = Src0->getType()->getPointerElementType();
SIInstrInfo.cpp 889 unsigned Src0 = MI.getOperand(1).getReg();
894 .addReg(RI.getSubReg(Src0, AMDGPU::sub0))
899 .addReg(RI.getSubReg(Src0, AMDGPU::sub1))
953 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
954 MachineOperand &Src0 = MI.getOperand(Src0Idx);
955 if (!Src0.isReg())
    [all...]
  /external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/
SIOptimizeExecMasking.cpp 328 MachineOperand &Src0 = SaveExecInst->getOperand(1);
333 if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
339 OtherOp = &Src0;
R600ExpandSpecialInstrs.cpp 124 MI.getOperand(1).getReg(), // src0
159 unsigned Src0 = BMI->getOperand(
160 TII->getOperandIdx(Opcode, R600::OpName::src0))
165 (void) Src0;
167 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
169 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
211 unsigned Src0 = MI.getOperand(
212 TII->getOperandIdx(MI, R600::OpName::src0)).getReg();
224 Src0 = TRI.getSubReg(Src0, SubRegIndex)
    [all...]
SIShrinkInstructions.cpp 110 // We don't need to check src0, all input types are legal, so just make sure
111 // src0 isn't using any modifiers.
127 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
129 // Try to fold Src0
130 MachineOperand &Src0 = MI.getOperand(Src0Idx);
131 if (Src0.isReg()) {
132 unsigned Reg = Src0.getReg();
143 Src0.setSubReg(0);
144 Src0.ChangeToImmediate(MovSrc.getImm());
147 Src0.setSubReg(0)
    [all...]
AMDGPUPromoteAlloca.cpp 809 Value *Src0 = CI->getOperand(0);
810 Type *EltTy = Src0->getType()->getPointerElementType();
SIFoldOperands.cpp 169 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
546 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
547 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
550 if (!Src0->isImm() && !Src1->isImm())
556 if (Src0->isImm() && Src1->isImm()) {
558 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
564 // Be careful to change the right operand, src0 may belong to a different
575 if (Src0->isImm() && !Src1->isImm()) {
576 std::swap(Src0, Src1);
638 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0)
    [all...]
SILoadStoreOptimizer.cpp 796 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
800 .add(*Src0)
    [all...]
SIPeepholeSDWA.cpp 337 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
372 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
378 // If this is not src0 then it could be src1
562 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
563 auto Imm = foldToImm(*Src0);
603 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
604 auto Imm = foldToImm(*Src0);
672 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0)
    [all...]
R600InstrInfo.cpp 91 NewMI->getOperand(getOperandIdx(*NewMI, R600::OpName::src0))
258 {R600::OpName::src0, R600::OpName::src0_sel},
311 {R600::OpName::src0, R600::OpName::src0_sel},
541 //Todo : support shared src0 - src1 operand
    [all...]
AMDGPUISelDAGToDAG.cpp 770 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
784 // src0_modifiers, src0, src1_modifiers, src1, clamp, omod
    [all...]
  /external/llvm/lib/Target/AArch64/
AArch64AdvSIMDScalarPass.cpp 306 unsigned Src0 = 0, SubReg0;
317 Src0 = MOSrc0->getReg();
319 // Src0 is going to be reused, thus, it cannot be killed anymore.
338 // Src0 is going to be reused, thus, it cannot be killed anymore.
349 if (!Src0) {
351 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
352 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0);
371 .addReg(Src0, getKillRegState(KillSrc0), SubReg0)
AArch64FastISel.cpp     [all...]
  /external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/
ScalarizeMaskedMemIntrin.cpp 119 Value *Src0 = CI->getArgOperand(3);
170 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
228 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
384 Value *Src0 = CI->getArgOperand(3);
420 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
479 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
  /external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AArch64/
AArch64AdvSIMDScalarPass.cpp 299 unsigned Src0 = 0, SubReg0;
310 Src0 = MOSrc0->getReg();
312 // Src0 is going to be reused, thus, it cannot be killed anymore.
331 // Src0 is going to be reused, thus, it cannot be killed anymore.
342 if (!Src0) {
344 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
345 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0);
364 .addReg(Src0, getKillRegState(KillSrc0), SubReg0)
  /external/swiftshader/third_party/subzero/src/
IceInstMIPS32.cpp 165 auto *Src0 = llvm::cast<Constant>(getSrc(0));
166 if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src0)) {
172 Src0->emit(Func);
183 const CfgNode *TargetFalse, Operand *Src0,
187 addSource(Src0);
191 const CfgNode *TargetFalse, Operand *Src0,
196 addSource(Src0);
    [all...]
IceConverter.cpp 346 Ice::Operand *Src0 = convertOperand(Instr, 0);
349 return Ice::InstArithmetic::create(Func.get(), Opcode, Dest, Src0, Src1);
406 Ice::Operand *Src0 = convertOperand(Instr, 0);
446 return Ice::InstIcmp::create(Func.get(), Cond, Dest, Src0, Src1);
450 Ice::Operand *Src0 = convertOperand(Instr, 0);
510 return Ice::InstFcmp::create(Func.get(), Cond, Dest, Src0, Src1);
    [all...]
IceInstX86BaseImpl.h 253 InstImpl<TraitsType>::InstX86Icmp::InstX86Icmp(Cfg *Func, Operand *Src0,
256 this->addSource(Src0);
261 InstImpl<TraitsType>::InstX86Ucomiss::InstX86Ucomiss(Cfg *Func, Operand *Src0,
264 this->addSource(Src0);
    [all...]
IceTargetLowering.h 504 Operand *Src0, Operand *Src1);
509 /// (Variable *Dest, Variable *Src0, Variable *Src1) -> Instr *.
567 auto *Src0 = thunk0();
568 return insertScalarInstruction(Res, Src0);
576 auto *Src0 = thunk0();
578 return insertScalarInstruction(Res, Src0, Src1);
586 auto *Src0 = thunk0();
589 return insertScalarInstruction(Res, Src0, Src1, Src2);
IceCfg.cpp     [all...]
IceInstARM32.cpp     [all...]
  /external/swiftshader/third_party/llvm-7.0/llvm/unittests/CodeGen/GlobalISel/
PatternMatchTest.cpp 164 unsigned Src0, Src1, Src2;
166 m_GAdd(m_Reg(Src0), m_Reg(Src1)));
168 ASSERT_EQ(Src0, Copies[0]);
176 m_GMul(m_Reg(Src0), m_Reg(Src1)));
178 ASSERT_EQ(Src0, MIBAdd->getOperand(0).getReg());
183 m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2)));
185 ASSERT_EQ(Src0, Copies[0]);
195 m_GMul(m_ICst(Cst), m_Reg(Src0)));
198 ASSERT_EQ(Src0, Copies[0]);
203 m_GSub(m_ICst(Cst), m_Reg(Src0)));
    [all...]
  /external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Scalar/
InferAddressSpaces.cpp 527 Constant *Src0 = CE->getOperand(1);
529 if (Src0->getType()->getPointerAddressSpace() ==
533 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
697 Value *Src0 = Op.getOperand(1);
700 auto I = InferredAddrSpace.find(Src0);
702 I->second : Src0->getType()->getPointerAddressSpace();
708 auto *C0 = dyn_cast<Constant>(Src0);
    [all...]

Completed in 3665 milliseconds

1 2