/external/llvm/lib/Target/AArch64/ |
AArch64InstrInfo.td | [all...] |
AArch64SchedA57.td | 345 // Q form - v16i8, v8i16, v4i32, v2i64 389 def : InstRW<[A57Write_3cyc_1W], (instregex "^PMULL(v1i64|v2i64)")>; 404 def : InstRW<[A57Write_4cyc_2X], (instregex "^[SU]SHL(v16i8|v8i16|v4i32|v2i64)")>; 410 def : InstRW<[A57Write_5cyc_2X], (instregex "^[SU][QR]{1,2}SHL(v16i8|v8i16|v4i32|v2i64)")>; 422 // Q form - v4i32, v2i64 432 def : InstRW<[A57Write_9cyc_3V], (instregex "^FADDP(v4f32|v2f64|v2i64)")>; 437 def : InstRW<[A57Write_5cyc_2V], (instregex "^(FACGE|FACGT|FCMEQ|FCMGE|FCMGT|FCMLE|FCMLT)(v4f32|v2f64|v4i32|v2i64)")>; 444 def : InstRW<[A57Write_5cyc_2V], (instregex "^[FVSU]CVT([AMNPZ][SU])?(_Int)?(v4f32|v2f64|v4i32|v2i64)")>; 468 def : InstRW<[A57Write_9cyc_3V], (instregex "^(FMAX|FMIN)(NM)?P(v4f32|v2f64|v2i64)")>; 475 def : InstRW<[A57Write_5cyc_2V], (instregex "^FMULX?(v4f32|v2f64|v4i32|v2i64)")>; [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vmul.ll | 27 %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 33 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 58 %tmp3 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 64 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 80 %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 102 %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 108 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 271 %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 293 %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 299 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64> [all...] |
arm64-neon-add-pairwise.ll | 59 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) 63 %val = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
|
arm64-vqadd.ll | 89 %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 125 %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 142 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 147 declare <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 208 %tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 236 declare <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 297 %tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 332 declare <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
arm64-neon-2velem.ll | 25 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) 29 declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) 33 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) 37 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) 41 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) 632 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) 654 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) 678 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) 702 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) 724 %vmull2.i = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle [all...] |
arm64-st1.ll | 299 call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64 1, i64* %D) 306 declare void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readnone 332 call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64 1, i64* %D) 339 declare void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone 365 call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 1, i64* %E) 372 declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone 554 call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %P) 561 call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P) 568 call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P) 572 declare void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*) nounwind readonl [all...] |
arm64-vaddv.ll | 77 %vaddv.i = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a1) 87 %0 = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a2) 216 %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1) 226 %0 = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a2) 238 %vaddv.i = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a1) 389 declare i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64>) 399 declare i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64>)
|
/external/llvm/lib/Target/X86/ |
X86InstrXOP.td | 113 defm VPROTQ : xop3op<0x93, "vprotq", X86vprot, v2i64>; 117 defm VPSHAQ : xop3op<0x9B, "vpshaq", X86vpsha, v2i64>; 121 defm VPSHLQ : xop3op<0x97, "vpshlq", X86vpshl, v2i64>; 142 defm VPROTQ : xop3opimm<0xC3, "vprotq", X86vproti, v2i64>; 218 defm VPCOMQ : xopvpcom<0xCF, "q", X86vpcom, v2i64>; 222 defm VPCOMUQ : xopvpcom<0xEF, "uq", X86vpcomu, v2i64>; 336 def : Pat<(v2i64 (or (and VR128:$src3, VR128:$src1), 415 v2i64, v4i64, loadv2f64, loadv4f64>;
|
/external/llvm/test/Analysis/CostModel/X86/ |
bswap.ll | 10 declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) 24 %bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %a)
|
ctbits-cost.ll | 10 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) 25 %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a) 94 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) 109 %ctlz = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 0) 118 %ctlz = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 1) 250 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) 265 %cttz = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 0) 274 %cttz = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 1)
|
/external/llvm/test/CodeGen/AMDGPU/ |
bswap.ll | 9 declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) nounwind readnone 105 %bswap = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %val) nounwind readnone
|
ctpop64.ll | 5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone 65 %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone 92 %ctpop = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %val) nounwind readnone
|
/external/llvm/test/CodeGen/SystemZ/ |
vec-move-10.ll | 197 ; Test v2i64 extraction from the first element. 207 ; Test v2i64 extraction from the last element. 217 ; Test v2i64 extraction of an invalid element. This must compile, 228 ; Test v2i64 extraction with the highest in-range offset. 239 ; Test v2i64 extraction with the first ouf-of-range offset. 251 ; Test v2i64 extraction from a variable element. 421 ; Test a v2i64 scatter of the first element. 434 ; Test a v2i64 scatter of the last element.
|
vec-move-08.ll | 164 ; Test v2i64 insertion into the first element. 174 ; Test v2i64 insertion into the last element. 184 ; Test v2i64 insertion with the highest in-range offset. 195 ; Test v2i64 insertion with the first ouf-of-range offset. 207 ; Test v2i64 insertion into a variable element. 366 ; Test a v2i64 gather of the first element. 379 ; Test a v2i64 gather of the last element.
|
vec-perm-03.ll | 122 ; Test a v2i64 replicating load with no offset. 134 ; Test a v2i64 replicating load with the maximum in-range offset. 147 ; Test a v2i64 replicating load with the first out-of-range offset.
|
/external/webp/src/dsp/ |
lossless_enc_msa.c | 82 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); 87 const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0);
|
/external/llvm/test/CodeGen/ARM/ |
vshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 210 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 308 %tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 340 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 357 declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 468 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 504 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 600 %tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 > [all...] |
vqshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 271 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 303 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 335 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 357 declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 367 declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 473 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 509 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2 [all...] |
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
vshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 210 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 308 %tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 340 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 357 declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 468 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 504 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 600 %tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 > [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
X86GenDAGISel.inc | 95 /*172*/ OPC_CheckChild1Type, MVT::v2i64,
104 // Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity = 422
105 // Dst: (VMOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src)
112 // Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity = 422
113 // Dst: (MOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src)
[all...] |
X86InstrFragmentsSIMD.td | 212 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; 255 (v2i64 (alignedload node:$ptr))>; 285 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; 337 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; 344 (bitconvert (v2i64 (X86vzmovl 345 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; 351 (bitconvert (v2i64 (X86vzload node:$src)))>;
|
/external/llvm/lib/Target/ARM/ |
ARMInstrNEON.td | [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
SPUISelDAGToDAG.cpp | 192 ((vecVT == MVT::v2i64) && 602 case MVT::v2i64: 716 } else if (Opc == ISD::ADD && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { 728 } else if (Opc == ISD::SUB && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { 740 } else if (Opc == ISD::MUL && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { 824 MVT::v2i64, 837 SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, 905 * \note This code could also be used to implement v2i64 shl. [all...] |
/external/swiftshader/third_party/LLVM/lib/VMCore/ |
ValueTypes.cpp | 133 case MVT::v2i64: return "v2i64"; 180 case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2);
|