/prebuilts/go/darwin-x86/src/cmd/compile/internal/ssa/ |
phiopt.go | 61 if v.Op != OpPhi { 78 if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool { 80 ops := [2]Op{OpNot, OpCopy} 84 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 95 if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 { 100 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 111 if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 { 116 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 128 if a0.Op != a1.Op [all...] |
writebarrier.go | 37 switch v.Op { 40 switch v.Op { 42 v.Op = OpStore 44 v.Op = OpMove 47 v.Op = OpZero 58 if v.Op == OpSB { 61 if v.Op == OpSP { 88 if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpMoveWBVolatile || w.Op == OpZeroWB [all...] |
zcse.go | 19 if opcodeTable[v.Op].argLen == 0 { 20 key := vkey{v.Op, keyFor(v), v.Aux, v.Type} 48 if opcodeTable[a.Op].argLen == 0 { 49 key := vkey{a.Op, keyFor(a), a.Aux, a.Type} 61 op Op 70 switch v.Op {
|
rewriteARM.go | 10 switch v.Op { 744 if v_0.Op != OpARMMOVWconst { 762 if v_1.Op != OpARMMOVWconst { 779 if v_1.Op != OpARMSLLconst { 797 if v_0.Op != OpARMSLLconst { 817 if v_1.Op != OpARMSRLconst { 835 if v_0.Op != OpARMSRLconst { 855 if v_1.Op != OpARMSRAconst { 873 if v_0.Op != OpARMSRAconst { 893 if v_1.Op != OpARMSLL [all...] |
/prebuilts/go/linux-x86/src/cmd/compile/internal/ssa/ |
lower.go | 20 if !opcodeTable[v.Op].generic { 23 switch v.Op { 32 s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
|
phiopt.go | 61 if v.Op != OpPhi { 78 if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool { 80 ops := [2]Op{OpNot, OpCopy} 84 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 95 if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 { 100 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 111 if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 { 116 f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) 128 if a0.Op != a1.Op [all...] |
writebarrier.go | 37 switch v.Op { 40 switch v.Op { 42 v.Op = OpStore 44 v.Op = OpMove 47 v.Op = OpZero 58 if v.Op == OpSB { 61 if v.Op == OpSP { 88 if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpMoveWBVolatile || w.Op == OpZeroWB [all...] |
zcse.go | 19 if opcodeTable[v.Op].argLen == 0 { 20 key := vkey{v.Op, keyFor(v), v.Aux, v.Type} 48 if opcodeTable[a.Op].argLen == 0 { 49 key := vkey{a.Op, keyFor(a), a.Aux, a.Type} 61 op Op 70 switch v.Op {
|
rewriteARM.go | 10 switch v.Op { 744 if v_0.Op != OpARMMOVWconst { 762 if v_1.Op != OpARMMOVWconst { 779 if v_1.Op != OpARMSLLconst { 797 if v_0.Op != OpARMSLLconst { 817 if v_1.Op != OpARMSRLconst { 835 if v_0.Op != OpARMSRLconst { 855 if v_1.Op != OpARMSRAconst { 873 if v_0.Op != OpARMSRAconst { 893 if v_1.Op != OpARMSLL [all...] |
/external/llvm/lib/Target/AMDGPU/ |
AMDGPUISelLowering.cpp | 687 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 692 SDLoc(Op).getDebugLoc()); 694 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 698 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 700 switch (Op.getOpcode()) { 702 Op->dump(&DAG); 706 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 707 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 708 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG) [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/MSP430/ |
MSP430ISelLowering.cpp | 179 SDValue MSP430TargetLowering::LowerOperation(SDValue Op, 181 switch (Op.getOpcode()) { 184 case ISD::SRA: return LowerShifts(Op, DAG); 185 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 186 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 187 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 188 case ISD::SETCC: return LowerSETCC(Op, DAG); 189 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 190 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 191 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG) [all...] |
/external/clang/test/CodeGenCXX/ |
visibility-inlines-hidden.cpp | 141 template <int> inline void Op(); 144 Op<0>(); 147 template <int Idx_nocapture> void Op() {
|
/external/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/ |
StaticSymmetry.h | 142 template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args> 147 initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices(), remaining_indices()), first::flags, initial, std::forward<Args>(args)...); 148 return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...); 151 template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args> 155 initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices()), first::flags, initial, std::forward<Args>(args)...); 156 return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...); 163 template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args> 170 template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args> 196 template<typename Op, typename RV, typename Index, std::size_t N, typename... Args> 199 return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV, NumIndices>(idx, initial, args...) [all...] |
/external/llvm/lib/CodeGen/AsmPrinter/ |
DwarfExpression.h | 41 virtual void EmitOp(uint8_t Op, const char *Comment = nullptr) = 0; 129 void EmitOp(uint8_t Op, const char *Comment = nullptr) override; 144 void EmitOp(uint8_t Op, const char *Comment = nullptr) override;
|
/prebuilts/go/darwin-x86/src/cmd/vendor/golang.org/x/arch/x86/x86asm/ |
intel.go | 22 switch inst.Op { 24 if inst.Op == MOV && (inst.Opcode>>16)&0xFFFC != 0x0F20 { 34 switch inst.Op { 56 switch inst.Op { 82 if inst.Op != 0 { 94 if isLoop[inst.Op] || inst.Op == JCXZ || inst.Op == JECXZ || inst.Op == JRCXZ { 102 switch inst.Op { [all...] |
/prebuilts/go/linux-x86/src/cmd/vendor/golang.org/x/arch/x86/x86asm/ |
intel.go | 22 switch inst.Op { 24 if inst.Op == MOV && (inst.Opcode>>16)&0xFFFC != 0x0F20 { 34 switch inst.Op { 56 switch inst.Op { 82 if inst.Op != 0 { 94 if isLoop[inst.Op] || inst.Op == JCXZ || inst.Op == JECXZ || inst.Op == JRCXZ { 102 switch inst.Op { [all...] |
/external/llvm/include/llvm/IR/ |
OperandTraits.h | 117 template <int> inline Use &Op(); \ 118 template <int> inline const Use &Op() const; \ 150 template <int Idx_nocapture> Use &CLASS::Op() { \ 153 template <int Idx_nocapture> const Use &CLASS::Op() const { \
|
/external/llvm/lib/Target/NVPTX/ |
NVPTXPeephole.cpp | 82 auto &Op = Root.getOperand(1); 85 if (Op.isReg() && TargetRegisterInfo::isVirtualRegister(Op.getReg())) { 86 GenericAddrDef = MRI.getUniqueVRegDef(Op.getReg());
|
/external/llvm/lib/Target/X86/ |
X86InstrInfo.h | 121 inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) { 122 if (MI.getOperand(Op).isFI()) 124 return Op + X86::AddrSegmentReg <= MI.getNumOperands() && 125 MI.getOperand(Op + X86::AddrBaseReg).isReg() && 126 isScale(MI.getOperand(Op + X86::AddrScaleAmt)) && 127 MI.getOperand(Op + X86::AddrIndexReg).isReg() && 128 (MI.getOperand(Op + X86::AddrDisp).isImm() || 129 MI.getOperand(Op + X86::AddrDisp).isGlobal() || 130 MI.getOperand(Op + X86::AddrDisp).isCPI() || 131 MI.getOperand(Op + X86::AddrDisp).isJTI()) [all...] |
/external/skia/include/pathops/ |
SkPathOps.h | 23 kDifference_SkPathOp, //!< subtract the op path from the first path 27 kReverseDifference_SkPathOp, //!< subtract the first path from the op path 30 /** Set this path to the result of applying the Op to this path and the 31 specified path: this = (this op operand). 41 @param op The operator to apply. 46 bool SK_API Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result); 75 path is added, so the result of a single add is (emptyPath OP path).
|
/external/skia/src/core/ |
SkAAClip.h | 43 bool op(const SkAAClip&, const SkAAClip&, SkRegion::Op); 45 // Helpers for op() 46 bool op(const SkIRect&, SkRegion::Op); 47 bool op(const SkRect&, SkRegion::Op, bool doAA); 48 bool op(const SkAAClip&, SkRegion::Op);
|
/external/swiftshader/third_party/LLVM/include/llvm/ |
OperandTraits.h | 117 template <int> inline Use &Op(); \ 118 template <int> inline const Use &Op() const; \ 150 template <int Idx_nocapture> Use &CLASS::Op() { \ 153 template <int Idx_nocapture> const Use &CLASS::Op() const { \
|
/external/swiftshader/third_party/LLVM/lib/VMCore/ |
Metadata.cpp | 82 static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) { 84 assert(Op <= N->getNumOperands() && "Invalid operand number"); 85 return reinterpret_cast<MDNodeOperand*>(N+1)+Op; 97 for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands; 98 Op != E; ++Op, ++i) 99 new (Op) MDNodeOperand(Vals[i], this); 115 for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands [all...] |
/frameworks/base/services/core/java/com/android/server/wm/ |
StrictModeFlash.java | 80 c.clipRect(new Rect(0, 0, dw, mThickness), Region.Op.REPLACE); 83 c.clipRect(new Rect(0, 0, mThickness, dh), Region.Op.REPLACE); 86 c.clipRect(new Rect(dw - mThickness, 0, dw, dh), Region.Op.REPLACE); 89 c.clipRect(new Rect(0, dh - mThickness, dw, dh), Region.Op.REPLACE);
|
/external/llvm/include/llvm/Transforms/Utils/ |
Local.h | 205 Value *Op = *i; 207 if (Constant *OpC = dyn_cast<Constant>(Op)) { 233 if (Op->getType() != IntPtrTy) 234 Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); 237 Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), 242 Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
|