/external/llvm/test/CodeGen/SystemZ/ |
vec-shift-04.ll | 105 ; Test a v2i64 shift by a variable. 118 ; Test a v2i64 shift by the lowest useful constant. 127 ; Test a v2i64 shift by the highest useful constant.
|
vec-shift-05.ll | 105 ; Test a v2i64 shift by a variable. 118 ; Test a v2i64 shift by the lowest useful constant. 127 ; Test a v2i64 shift by the highest useful constant.
|
vec-shift-06.ll | 105 ; Test a v2i64 shift by a variable. 118 ; Test a v2i64 shift by the lowest useful constant. 127 ; Test a v2i64 shift by the highest useful constant.
|
vec-add-01.ll | 32 ; Test a v2i64 addition.
|
vec-move-05.ll | 116 ; Test v2i64 extraction of the first element. 125 ; Test v2i64 extraction of the last element. 134 ; Test v2i64 extractions of an absurd element number. This must compile 144 ; Test v2i64 extraction of a variable element.
|
vec-move-07.ll | 32 ; Test v2i64. Here we load %val into both halves.
|
vec-mul-01.ll | 32 ; Test a v2i64 multiplication. There's no vector equivalent.
|
vec-neg-01.ll | 32 ; Test a v2i64 negation.
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
vqshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 271 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 303 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 335 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 357 declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 367 declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 473 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 509 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2 [all...] |
vpadd.ll | 113 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1) 137 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1) 165 declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone 169 declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
vqadd.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
vqdmul.ll | 175 %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 193 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 198 declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 216 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) 234 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 239 declare <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone 257 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) 275 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 280 declare <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
|
vqsub.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/libpng/mips/ |
filter_msa_intrinsics.c | 489 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); 531 out0 = __msa_copy_s_d((v2i64) dst0, 0); 585 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); 643 out0 = __msa_copy_s_d((v2i64) dst0, 0); 717 dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0); 795 out0 = __msa_copy_s_d((v2i64) dst0, 0);
|
/external/llvm/lib/Target/ARM/ |
ARMCallingConv.td | 34 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 59 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 73 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 92 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 108 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 164 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 180 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 204 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 225 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/lib/Target/SystemZ/ |
SystemZCallingConv.td | 57 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 96 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 103 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 108 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-vqsub.ll | 89 %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 125 %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 142 declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 147 declare <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/llvm/test/CodeGen/ARM/ |
vpadal.ll | 80 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 121 declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone 125 declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
|
vpadd.ll | 113 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1) 137 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1) 176 declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone 180 declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
vqadd.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
vqsub.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/llvm/test/CodeGen/X86/ |
vec_i64.ll | 5 ; Used movq to load i64 into a v2i64 when the top i64 is 0.
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
X86CallingConv.td | 40 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
154 CCPromoteToType<v2i64>>>>,
157 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
175 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
194 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
213 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
235 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
273 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
282 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
|
/external/llvm/test/CodeGen/Mips/msa/ |
basic_operations.ll | 24 @v2i64 = global <2 x i64> <i64 0, i64 0> 142 store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64 145 store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64 148 store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64 151 store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64 154 store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64 157 store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64 164 store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64 285 store volatile <2 x i64> %2, <2 x i64>*@v2i64 344 %1 = load <2 x i64>, <2 x i64>* @v2i64 [all...] |
/external/llvm/lib/Target/AArch64/ |
AArch64SchedKryoDetails.td | 27 (instregex "(S|U)R?SRA(v2i64|v4i32|v8i16|v16i8)_shift")>; 52 (instregex "(S|U)(ABD|QSUB|RHADD)(v16i8|v8i16|v4i32|v2i64)")>; 94 (instregex "((S|U)ADDLP|ABS)(v2i64|v4i32|v8i16|v16i8)(_v.*)?")>; 154 (instregex "(S|U)CVTF(v2i64|v4i32|v2f64|v4f32)(_shift)?")>; 208 (instregex "^(S|U)QADD(v16i8|v8i16|v4i32|v2i64)")>; 226 (instregex "(S|U)(QSHLU?|RSHR)(v16i8|v8i16|v4i32|v2i64)_shift$")>; 238 (instregex "(S|U)(QSHL|RSHL|QRSHL)(v16i8|v8i16|v4i32|v2i64)$")>; 292 (instregex "(S|U)SHL(v16i8|v8i16|v4i32|v2i64)$")>; 310 (instregex "((S|U)SHR|SHL)(v16i8|v8i16|v4i32|v2i64)_shift$")>; 382 (instregex "(ADD|SUB)(v16i8|v8i16|v4i32|v2i64)")>; [all...] |