/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
TensorSyclExtractFunctors.h | 47 template <template <class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev> 48 struct FunctorExtractor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > { 50 OP func; 51 FunctorExtractor(const TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev>& expr) 56 template <template <class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev> 57 struct FunctorExtractor<TensorEvaluator<UnaryCategory<OP, RHSExpr>, Dev> > 58 : FunctorExtractor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> >{}; 62 template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev> 63 struct FunctorExtractor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> > { 66 OP func [all...] |
/external/llvm/lib/Target/AMDGPU/MCTargetDesc/ |
SIMCCodeEmitter.cpp | 206 // Check for additional literals in SRC0/1/2 (Op 1/2/3) 217 const MCOperand &Op = MI.getOperand(i); 218 if (getLitEncoding(Op, RC.getSize()) != 255) 224 if (Op.isImm()) 225 Imm = Op.getImm(); 226 else if (Op.isExpr()) { 227 if (const MCConstantExpr *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 230 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
|
/external/llvm/lib/Target/Mips/InstPrinter/ |
MipsInstPrinter.cpp | 127 const MCOperand &Op = MI->getOperand(OpNo); 128 if (Op.isReg()) { 129 printRegName(O, Op.getReg()); 133 if (Op.isImm()) { 134 O << formatImm(Op.getImm()); 138 assert(Op.isExpr() && "unknown operand kind in printOperand"); 139 Op.getExpr()->print(O, &MAI, true);
|
/external/skia/src/gpu/ |
GrAuditTrail.cpp | 14 void GrAuditTrail::addOp(const GrOp* op, GrRenderTargetProxy::UniqueID proxyID) { 16 Op* auditOp = new Op; 18 auditOp->fName = op->name(); 19 auditOp->fBounds = op->bounds(); 46 // We use the op pointer as a key to find the OpNode we are 'glomming' ops onto 47 fIDLookup.set(op->uniqueID(), auditOp->fOpListID); 49 opNode->fBounds = op->bounds(); 55 // Look up the op we are going to glom onto 62 // Look up the op which will be glomme [all...] |
/external/skqp/src/gpu/ |
GrAuditTrail.cpp | 14 void GrAuditTrail::addOp(const GrOp* op, GrRenderTargetProxy::UniqueID proxyID) { 16 Op* auditOp = new Op; 18 auditOp->fName = op->name(); 19 auditOp->fBounds = op->bounds(); 46 // We use the op pointer as a key to find the OpNode we are 'glomming' ops onto 47 fIDLookup.set(op->uniqueID(), auditOp->fOpListID); 49 opNode->fBounds = op->bounds(); 55 // Look up the op we are going to glom onto 62 // Look up the op which will be glomme [all...] |
/external/spirv-llvm/lib/SPIRV/libSPIRV/ |
SPIRVInstruction.cpp | 49 SPIRVInstruction::SPIRVInstruction(unsigned TheWordCount, Op TheOC, 56 SPIRVInstruction::SPIRVInstruction(unsigned TheWordCount, Op TheOC, 63 SPIRVInstruction::SPIRVInstruction(unsigned TheWordCount, Op TheOC, 69 SPIRVInstruction::SPIRVInstruction(unsigned TheWordCount, Op TheOC, 75 SPIRVInstruction::SPIRVInstruction(unsigned TheWordCount, Op TheOC, 138 isSpecConstantOpAllowedOp(Op OC) { 209 "Op code not allowed for OpSpecConstantOp"); 222 auto OC = static_cast<Op>(Ops[0]); 224 "Op code not allowed for OpSpecConstantOp");
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Analysis/ |
AssumptionCache.cpp | 69 Value *Op; 70 if (match(I, m_BitCast(m_Value(Op))) || 71 match(I, m_PtrToInt(m_Value(Op))) || 72 match(I, m_Not(m_Value(Op)))) { 73 if (isa<Instruction>(Op) || isa<Argument>(Op)) 74 Affected.push_back(Op);
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Mips/InstPrinter/ |
MipsInstPrinter.cpp | 127 const MCOperand &Op = MI->getOperand(OpNo); 128 if (Op.isReg()) { 129 printRegName(O, Op.getReg()); 133 if (Op.isImm()) { 134 O << formatImm(Op.getImm()); 138 assert(Op.isExpr() && "unknown operand kind in printOperand"); 139 Op.getExpr()->print(O, &MAI, true);
|
/external/tensorflow/tensorflow/go/ |
operation_test.go | 37 return output.Op, nil 43 op, err := createGraphAndOp() 48 if got, want := op.Name(), "my_placeholder"; got != want { 51 if got, want := op.Type(), "Placeholder"; got != want { 66 // The ShapeN op takes a list of tensors as input and a list as output. 67 op, err := graph.AddOperation(OpSpec{ 74 n, err := op.OutputListSize("output") 81 if got, want := op.NumOutputs(), 2; got != want { 183 addOp := add.Op 205 consumers := []*Operation{a.Op, b.Op [all...] |
/external/llvm/lib/Target/ARM/MCTargetDesc/ |
ARMMCCodeEmitter.cpp | 264 unsigned getCCOutOpValue(const MCInst &MI, unsigned Op, 269 return MI.getOperand(Op).getReg() == ARM::CPSR; 273 unsigned getSOImmOpValue(const MCInst &MI, unsigned Op, 277 const MCOperand &MO = MI.getOperand(Op); 307 unsigned getModImmOpValue(const MCInst &MI, unsigned Op, 310 const MCOperand &MO = MI.getOperand(Op); 326 unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op, 329 unsigned SoImm = MI.getOperand(Op).getImm(); 346 unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op, 349 unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op, [all...] |
/external/clang/lib/ASTMatchers/Dynamic/ |
VariantValue.cpp | 61 DynTypedMatcher::VariadicOperator Op, 75 return DynTypedMatcher::constructVariadic(Op, NodeKind, DynMatchers); 179 VariadicOpPayload(DynTypedMatcher::VariadicOperator Op, 181 : Op(Op), Args(std::move(Args)) {} 199 return Ops.constructVariadicOperator(Op, Args); 212 const DynTypedMatcher::VariadicOperator Op; 228 DynTypedMatcher::VariadicOperator Op, 230 return VariantMatcher(new VariadicOpPayload(Op, std::move(Args)));
|
/external/llvm/lib/IR/ |
Instructions.cpp | 115 Op<-1>().set(nullptr); 134 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 253 Op<-1>() = Func; 279 Op<-1>() = Func; 643 Op<-3>() = Fn; 644 Op<-2>() = IfNormal; 645 Op<-1>() = IfException; 812 Op<0>() = RI.Op<0>(); 821 Op<0>() = retVal [all...] |
/external/llvm/utils/TableGen/ |
AsmMatcherEmitter.cpp | 560 [&](const AsmOperand &Op) { 561 return Op.SrcOpName == N && Op.SubOpIdx == SubOpIdx; 570 [&](const AsmOperand &Op) { 571 return Op.SrcOpName == N; 750 MatchableInfo::AsmOperand &Op); 789 const AsmOperand &Op = AsmOperands[i]; 790 errs() << " op[" << i << "] = " << Op.Class->ClassName << " - "; 791 errs() << '\"' << Op.Token << "\"\n" [all...] |
/external/clang/lib/StaticAnalyzer/Checkers/ |
IdenticalExprChecker.cpp | 183 BinaryOperator::Opcode Op = B->getOpcode(); 185 if (BinaryOperator::isBitwiseOp(Op)) 188 if (BinaryOperator::isLogicalOp(Op)) 191 if (BinaryOperator::isComparisonOp(Op)) 201 BinaryOperator::Opcode Op = B->getOpcode(); 234 if ((Op == BO_EQ) || (Op == BO_NE)) { 241 if ((Op == BO_EQ) || (Op == BO_NE)) { 258 if (((Op == BO_EQ) || (Op == BO_LE) || (Op == BO_GE)) [all...] |
/external/llvm/include/llvm/MC/ |
MCExpr.h | 340 Opcode Op; 343 MCUnaryExpr(Opcode Op, const MCExpr *Expr) 344 : MCExpr(MCExpr::Unary), Op(Op), Expr(Expr) {} 350 static const MCUnaryExpr *create(Opcode Op, const MCExpr *Expr, 370 Opcode getOpcode() const { return Op; } 412 Opcode Op; 415 MCBinaryExpr(Opcode Op, const MCExpr *LHS, const MCExpr *RHS) 416 : MCExpr(MCExpr::Binary), Op(Op), LHS(LHS), RHS(RHS) { [all...] |
/external/llvm/lib/CodeGen/SelectionDAG/ |
InstrEmitter.h | 59 unsigned getVR(SDValue Op, 66 SDValue Op, 77 SDValue Op,
|
/external/llvm/lib/Target/NVPTX/ |
NVPTXISelLowering.h | 443 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 445 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 499 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 523 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 525 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; 526 SDValue LowerLOADi1(SDValue Op, SelectionDAG &DAG) const; 528 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 529 SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const; 530 SDValue LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const; 532 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const [all...] |
/external/spirv-llvm/lib/SPIRV/ |
SPIRVMDWalker.h | 111 Metadata* Op = M->getOperand(I++); 112 if (!Op) 114 else if (auto Str = dyn_cast<MDString>(Op))
|
/external/swiftshader/third_party/LLVM/lib/CodeGen/SelectionDAG/ |
InstrEmitter.h | 58 unsigned getVR(SDValue Op, 64 void AddRegisterOperand(MachineInstr *MI, SDValue Op, 74 void AddOperand(MachineInstr *MI, SDValue Op,
|
/external/swiftshader/third_party/LLVM/lib/Target/Alpha/ |
AlphaISelLowering.cpp | 194 static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) { 195 EVT PtrVT = Op.getValueType(); 196 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 199 DebugLoc dl = Op.getDebugLoc(); 573 SDValue AlphaTargetLowering::LowerOperation(SDValue Op, 575 DebugLoc dl = Op.getDebugLoc(); 576 switch (Op.getOpcode()) { 578 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 581 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 586 Op.getOperand(1), Op.getOperand(2)) [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/MBlaze/MCTargetDesc/ |
MBlazeAsmBackend.cpp | 70 static unsigned getRelaxedOpcode(unsigned Op) { 71 switch (Op) { 72 default: return Op;
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/MC/ |
MCExpr.h | 368 Opcode Op; 371 MCUnaryExpr(Opcode Op, const MCExpr *Expr, SMLoc Loc) 372 : MCExpr(MCExpr::Unary, Loc), Op(Op), Expr(Expr) {} 378 static const MCUnaryExpr *create(Opcode Op, const MCExpr *Expr, 402 Opcode getOpcode() const { return Op; } 444 Opcode Op; 447 MCBinaryExpr(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, 449 : MCExpr(MCExpr::Binary, Loc), Op(Op), LHS(LHS), RHS(RHS) { [all...] |
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/SelectionDAG/ |
InstrEmitter.h | 59 unsigned getVR(SDValue Op, 66 SDValue Op, 77 SDValue Op,
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
AMDGPUUnifyMetadata.cpp | 99 for (const auto &Op : MD->operands()) 100 if (std::find(All.begin(), All.end(), Op.get()) == All.end()) 101 All.push_back(Op.get());
|
SIFixWWMLiveness.cpp | 120 for (const MachineOperand &Op : MI.defs()) { 121 if (Op.isReg()) { 122 unsigned Reg = Op.getReg();
|