/external/llvm/lib/Target/SystemZ/ |
SystemZISelLowering.h | 192 // Take one element of the first v2i64 operand and the one element of 193 // the second v2i64 operand and concatenate them to form a v2i64 result. 384 // type (v2i64).
|
/external/llvm/test/CodeGen/AArch64/ |
fp16-vector-nvcast.ll | 79 ; Test pattern (v8f16 (AArch64NvCast (v2i64 FPR128:$src)))
|
arm64-neon-3vdiff.ll | 5 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) 7 declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) 13 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) 17 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) 23 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) [all...] |
arm64-indexed-vector-ldst.ll | 744 %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A) 753 %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A) 759 declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64*) [all...] |
arm64-vadd.ll | 471 %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1) 481 declare <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32>) nounwind readnone 527 %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1) 537 declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone 583 %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1) 633 %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1) 698 %tmp3 = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 708 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) nounwind readnone [all...] |
/external/llvm/test/CodeGen/ARM/ |
vsra.ll | 278 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) 318 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) 336 declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 341 declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
vmul.ll | 206 %tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 266 %tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 315 %1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 355 %1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 361 declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 365 declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
popcnt.ll | 84 %tmp2 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp1) 95 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
|
/external/llvm/test/CodeGen/SystemZ/ |
vec-and-02.ll | 73 ; Test a v2i64 AND-NOT.
|
vec-const-10.ll | 1 ; Test vector replicates, v2i64 version.
|
vec-div-01.ll | 55 ; Test a v2i64 division.
|
vec-max-04.ll | 1 ; Test v2i64 maximum.
|
vec-min-04.ll | 1 ; Test v2i64 minimum.
|
vec-move-11.ll | 86 ; Test v2i64 insertion into an undef.
|
vec-move-12.ll | 95 ; Test v2i64 insertion into an undef.
|
vec-or-02.ll | 85 ; Test v2i64.
|
/external/llvm/test/CodeGen/X86/ |
widen_conv-1.ll | 5 ; truncate v2i64 to v2i32
|
/external/swiftshader/third_party/LLVM/include/llvm/CodeGen/ |
ValueTypes.td | 48 def v2i64 : ValueType<128, 25>; // 2 x i64 vector value
|
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
SPUISelLowering.h | 79 //! Simplify a EVT::v2i64 constant splat to CellSPU-ready form
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
vsra.ll | 278 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) 318 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) 336 declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 341 declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/llvm/lib/Target/X86/ |
X86RegisterInfo.td | 469 def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64], 476 def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64], 478 def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64], 505 def VR128X : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64], 528 def BNDR : RegisterClass<"X86", [v2i64], 128, (sequence "BND%u", 0, 3)>;
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
sixtap_filter_msa.c | 789 src2110 = (v16i8)__msa_ilvr_d((v2i64)src21_r, (v2i64)src10_r) [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
variance_msa.c | 309 v2i64 sq_src_l = { 0 }; 310 v2i64 sq_src_r = { 0 };
|
/external/llvm/lib/Target/AMDGPU/ |
CIInstructions.td | 200 atomic_cmp_swap_flat, v2i64, VReg_128 346 def : FlatAtomicPat <FLAT_ATOMIC_CMPSWAP_X2_RTN, atomic_cmp_swap_global, i64, v2i64>;
|
/external/swiftshader/third_party/LLVM/lib/CodeGen/SelectionDAG/ |
LegalizeVectorOps.cpp | 15 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition 16 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
|