/external/llvm/test/CodeGen/SystemZ/ |
vec-move-16.ll | 63 ; Test a v2i1->v2i64 extension. 71 ; Test a v2i8->v2i64 extension. 84 ; Test a v2i16->v2i64 extension. 96 ; Test a v2i32->v2i64 extension.
|
vec-move-17.ll | 63 ; Test a v2i64->v2i1 truncation. 71 ; Test a v2i64->v2i8 truncation. At the moment we use a VPERM rather than 83 ; Test a v2i64->v2i16 truncation. At the moment we use a VPERM rather than 95 ; Test a v2i64->v2i32 truncation.
|
vec-and-01.ll | 32 ; Test a v2i64 AND.
|
vec-or-01.ll | 32 ; Test a v2i64 OR.
|
/external/llvm/test/CodeGen/X86/ |
dagcombine-buildvector.ll | 4 ; with v2i64 build_vector i32, i32.
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
dagcombine-buildvector.ll | 4 ; with v2i64 build_vector i32, i32.
|
vec_i64.ll | 4 ; Used movq to load i64 into a v2i64 when the top i64 is 0.
|
/external/webp/src/dsp/ |
dec_msa.c | 499 p2_d = __msa_copy_s_d((v2i64)p2, 0); 500 p1_d = __msa_copy_s_d((v2i64)p1, 0); 501 p0_d = __msa_copy_s_d((v2i64)p0, 0); 502 q0_d = __msa_copy_s_d((v2i64)q0, 0); 503 q1_d = __msa_copy_s_d((v2i64)q1, 0); 504 q2_d = __msa_copy_s_d((v2i64)q2, 0); 511 p2_d = __msa_copy_s_d((v2i64)p2, 1); 512 p1_d = __msa_copy_s_d((v2i64)p1, 1); 513 p0_d = __msa_copy_s_d((v2i64)p0, 1); 514 q0_d = __msa_copy_s_d((v2i64)q0, 1) [all...] |
msa_macro.h | 589 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); 590 const v2i64 res1_m = __msa_splati_d(res0_m, 1); 591 const v2i64 out = res0_m + res1_m; 606 const v2i64 res0 = __msa_hadd_s_d(res, res); 607 const v2i64 res1 = __msa_splati_d(res0, 1); 608 const v2i64 res2 = res0 + res1; 625 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); 699 out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ 700 out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ 720 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__ [all...] |
enc_msa.c | 127 out0 = __msa_copy_s_d((v2i64)t0, 0); 128 out1 = __msa_copy_s_d((v2i64)t0, 1); 129 out2 = __msa_copy_s_d((v2i64)t1, 0); 130 out3 = __msa_copy_s_d((v2i64)t1, 1); 265 const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); 299 const v16u8 A1 = (v16u8)__msa_insert_d((v2i64)A2, 0, val_m); 319 const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); 564 const v2i64 temp2 = (v2i64)__msa_hadd_u_d(temp1, temp1); \ 565 const v2i64 temp3 = __msa_splati_d(temp2, 1); [all...] |
/external/llvm/lib/Target/PowerPC/ |
PPCInstrAltivec.td | [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
ARMCallingConv.td | 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 76 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 92 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 139 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 149 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 164 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 176 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vcvt.ll | 26 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %A) 32 declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) nounwind readnone 57 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %A) 63 declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) nounwind readnone 88 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %A) 94 declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) nounwind readnone 119 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %A) 125 declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) nounwind readnone 150 %tmp3 = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %A) 156 declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) nounwind readnon [all...] |
arm64-vpopcnt.ll | 64 %cnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x) 68 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
|
neon-truncStore-extLoad.ll | 5 define void @truncStore.v2i64(<2 x i64> %a, <2 x i32>* %result) { 6 ; CHECK-LABEL: truncStore.v2i64:
|
machine-copy-prop.ll | 21 %vmull = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> <i32 -1, i32 -1>, <2 x i32> %shuffle251) 40 call void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64> zeroinitializer, <2 x i64> zeroinitializer, i64 1, i8* %t1) 87 declare void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64>, <2 x i64>, i64, i8* nocapture) 101 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
|
/external/llvm/test/CodeGen/ARM/ |
vqdmul.ll | 175 %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 193 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 198 declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 217 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3) 218 %tmp5 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4) 237 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg1_int32x2_t, <2 x i32> %0) 238 %2 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i64> %1) 243 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 262 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3) 263 %tmp5 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4 [all...] |
/external/llvm/test/CodeGen/Mips/msa/ |
endian.ll | 7 @v2i64 = global <2 x i64> <i64 0, i64 0> 104 store volatile <2 x i64> <i64 1, i64 2>, <2 x i64>*@v2i64
|
/external/llvm/test/TableGen/ |
MultiPat.td | 9 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 67 def VR128 : RegisterClass<[v2i64, v2f64],
|
TargetInstrSpec.td | 15 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 64 def VR128 : RegisterClass<[v2i64, v2f64],
|
/external/swiftshader/third_party/LLVM/test/TableGen/ |
MultiPat.td | 9 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 67 def VR128 : RegisterClass<[v2i64, v2f64],
|
/external/valgrind/coregrind/m_gdbserver/ |
64bit-sse-valgrind-s1.xml | 15 <vector id="v2i64" type="int64" count="2"/> 22 <field name="v2_int64" type="v2i64"/>
|
64bit-sse-valgrind-s2.xml | 15 <vector id="v2i64" type="int64" count="2"/> 22 <field name="v2_int64" type="v2i64"/>
|
64bit-sse.xml | 15 <vector id="v2i64" type="int64" count="2"/> 22 <field name="v2_int64" type="v2i64"/>
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
vpx_convolve_avg_msa.c | 72 out0 = __msa_copy_u_d((v2i64)dst0, 0); 73 out1 = __msa_copy_u_d((v2i64)dst1, 0); 74 out2 = __msa_copy_u_d((v2i64)dst2, 0); 75 out3 = __msa_copy_u_d((v2i64)dst3, 0);
|