/external/llvm/test/CodeGen/SystemZ/ |
vec-shift-01.ll | 32 ; Test a v2i64 shift.
|
vec-shift-02.ll | 32 ; Test a v2i64 shift.
|
vec-shift-03.ll | 32 ; Test a v2i64 shift.
|
vec-xor-01.ll | 32 ; Test a v2i64 XOR.
|
vec-move-09.ll | 176 ; Test v2i64 insertion into the first element. 185 ; Test v2i64 insertion into the last element. 194 ; Test v2i64 insertion with the maximum value allowed by VLEIG. 203 ; Test v2i64 insertion with the next value up. 212 ; Test v2i64 insertion with the minimum value allowed by VLEIG. 221 ; Test v2i64 insertion with the next value down. 230 ; Test v2i64 insertion into a variable element.
|
vec-move-04.ll | 86 ; Test v2i64 insertion into the first element. 95 ; Test v2i64 insertion into the last element. 104 ; Test v2i64 insertion into a variable element.
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
vpadal.ll | 80 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 121 declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone 125 declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
|
/external/llvm/test/CodeGen/AMDGPU/ |
bitreverse.ll | 11 declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) #1 93 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1 101 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1
|
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
SPUCallingConv.td | 24 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 41 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
SPUInstrInfo.td | 63 def v2i64: LoadDFormVec<v2i64>; 95 def v2i64: LoadAFormVec<v2i64>; 127 def v2i64: LoadXFormVec<v2i64>; 175 def v2i64: StoreDFormVec<v2i64>; 205 def v2i64: StoreAFormVec<v2i64>; [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
X86InstrSSE.td | 135 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
136 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
147 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
174 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
175 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
176 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
177 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>; [all...] |
X86InstrMMX.td | 195 (i64 (vector_extract (v2i64 VR128:$src),
201 (v2i64 (scalar_to_vector
425 [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>;
427 def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
428 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
430 def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))),
431 (v2i64 (MOVQI2PQIrm addr:$src))>;
433 def : Pat<(v2i64 (MMX_X86movq2dq
435 (v2i64 (MOVDI2PDIrm addr:$src))>;
439 [SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>;
[all...] |
/external/llvm/lib/Target/AArch64/ |
AArch64TargetTransformInfo.cpp | 217 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 220 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 225 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 228 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, 258 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 261 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 263 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). 264 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, 267 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, 473 // unaligned v2i64 stores because the negative impact that has shown i [all...] |
/external/llvm/lib/Target/ARM/ |
ARMTargetTransformInfo.cpp | 107 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 108 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 368 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 390 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 443 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 444 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 445 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 446 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 472 // the vectorized code. Because we have support for v2i64 but not i64 those 474 // To work around this we increase the cost of v2i64 operations to make the [all...] |
/external/libvpx/libvpx/vp8/common/mips/msa/ |
idct_msa.c | 24 out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \ 25 out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \ 184 v2i64 zero = { 0 }; 281 vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0);
|
/external/llvm/lib/Target/PowerPC/ |
PPCCallingConv.td | 68 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 71 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()", 121 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 124 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()", 172 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>> 190 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 193 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
|
PPCInstrVSX.td | 468 int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>; 474 int_ppc_vsx_xvcmpgedp, v2i64, v2f64>; 480 int_ppc_vsx_xvcmpgtdp, v2i64, v2f64>; 578 [(set v2i64:$XT, (fp_to_sint v2f64:$XB))]>; 585 [(set v2i64:$XT, (fp_to_uint v2f64:$XB))]>; 608 [(set v2f64:$XT, (sint_to_fp v2i64:$XB))]>; 622 [(set v2f64:$XT, (uint_to_fp v2i64:$XB))]>; [all...] |
/external/llvm/lib/Target/X86/ |
X86CallingConv.td | 46 CCIfType<[v2i1], CCPromoteToType<v2i64>>, 56 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 113 CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64], 141 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 318 CCPromoteToType<v2i64>>>>, 323 CCIfType<[v2i1], CCPromoteToType<v2i64>>, 331 CCIfType<[f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 359 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 399 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 425 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] [all...] |
X86TargetTransformInfo.cpp | 157 { ISD::SHL, MVT::v2i64, 1 }, 158 { ISD::SRL, MVT::v2i64, 1 }, 187 { ISD::SHL, MVT::v2i64, 1 }, 188 { ISD::SRL, MVT::v2i64, 2 }, 189 { ISD::SRA, MVT::v2i64, 2 }, 220 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 252 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 270 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 327 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence [all...] |
X86InstrSSE.td | 345 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))), 346 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>; 358 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)), 385 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>; 386 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>; 387 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>; 388 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)> [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-neon-2velem-high.ll | 39 %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 50 %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 511, i32 511>) 89 %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 100 %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 4294966784, i32 4294966784>) 139 %vqdmull9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 150 %vqdmull9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>) 191 %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 203 %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>) 245 %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) 257 %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29> [all...] |
arm64-vclz.ll | 113 %vclz1.i = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false) nounwind 119 %vclz1.i = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false) nounwind 123 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone
|
/external/llvm/lib/Target/Mips/ |
MipsMSAInstrInfo.td | 101 (v2i64 (vector_insert node:$vec, node:$val, node:$idx))>; 110 (v2i64 (MipsINSVE node:$v1, node:$i1, node:$v2, node:$i2))>; 118 def vfsetoeq_v2f64 : vfsetcc_type<v2i64, v2f64, SETOEQ>; 120 def vfsetoge_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGE>; 122 def vfsetogt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGT>; 124 def vfsetole_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLE>; 126 def vfsetolt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLT>; 128 def vfsetone_v2f64 : vfsetcc_type<v2i64, v2f64, SETONE>; 130 def vfsetord_v2f64 : vfsetcc_type<v2i64, v2f64, SETO>; 132 def vfsetun_v2f64 : vfsetcc_type<v2i64, v2f64, SETUO> [all...] |
/external/llvm/lib/Target/SystemZ/ |
SystemZInstrVector.td | 178 defm : ReplicatePeephole<VLREPG, v2i64, load, i64>; 336 defm : GenericVectorOps<v2i64, v2i64>; 338 defm : GenericVectorOps<v2f64, v2i64>; 664 defm : BitwiseVectorOps<v2i64>; [all...] |
/external/clang/test/CodeGen/ |
avx512vlcd-builtins.c | 146 // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false) 152 // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false) 159 // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false)
|