/prebuilts/gcc/darwin-x86/mips/mips64el-linux-android-4.9/lib/gcc/mips64el-linux-android/4.9.x/include/ |
msa.h | 43 typedef long long v2i64 __attribute__((vector_size(16), aligned(16))); typedef 56 extern v2i64 __builtin_msa_sll_d(v2i64, v2i64); 60 extern v2i64 __builtin_msa_slli_d(v2i64, unsigned char); 64 extern v2i64 __builtin_msa_sra_d(v2i64, v2i64); 68 extern v2i64 __builtin_msa_srai_d(v2i64, unsigned char) [all...] |
/prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/lib/gcc/mips64el-linux-android/4.9.x/include/ |
msa.h | 43 typedef long long v2i64 __attribute__((vector_size(16), aligned(16))); typedef 56 extern v2i64 __builtin_msa_sll_d(v2i64, v2i64); 60 extern v2i64 __builtin_msa_slli_d(v2i64, unsigned char); 64 extern v2i64 __builtin_msa_sra_d(v2i64, v2i64); 68 extern v2i64 __builtin_msa_srai_d(v2i64, unsigned char) [all...] |
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
vec_ctbits.ll | 3 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) 4 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) 5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) 8 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a) 12 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a) 16 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
|
widen_conv-1.ll | 5 ; truncate v2i64 to v2i32
|
legalizedag_vec.ll | 6 ; v2i64 is a legal type but with mmx disabled, i64 is an illegal type. When
|
/external/llvm/test/Analysis/CostModel/X86/ |
scalarize.ll | 14 declare %i8 @llvm.bswap.v2i64(%i8) 17 declare %i8 @llvm.cttz.v2i64(%i8) 27 ; CHECK32: cost of 1 {{.*}}bswap.v2i64 28 ; CHECK64: cost of 1 {{.*}}bswap.v2i64 29 %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef) 34 ; CHECK32: cost of 10 {{.*}}cttz.v2i64 35 ; CHECK64: cost of 6 {{.*}}cttz.v2i64 36 %r5 = call %i8 @llvm.cttz.v2i64(%i8 undef)
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
vpx_convolve_copy_msa.c | 26 out0 = __msa_copy_u_d((v2i64)src0, 0); 27 out1 = __msa_copy_u_d((v2i64)src1, 0); 28 out2 = __msa_copy_u_d((v2i64)src2, 0); 29 out3 = __msa_copy_u_d((v2i64)src3, 0); 30 out4 = __msa_copy_u_d((v2i64)src4, 0); 31 out5 = __msa_copy_u_d((v2i64)src5, 0); 32 out6 = __msa_copy_u_d((v2i64)src6, 0); 33 out7 = __msa_copy_u_d((v2i64)src7, 0); 43 out0 = __msa_copy_u_d((v2i64)src0, 0); 44 out1 = __msa_copy_u_d((v2i64)src1, 0) [all...] |
deblock_msa.c | 41 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \ 42 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \ 43 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \ 44 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \ 45 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); [all...] |
loopfilter_4_msa.c | 33 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); 34 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); 35 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); 36 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); 55 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); 59 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); 63 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0) [all...] |
loopfilter_8_msa.c | 38 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); 41 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); 42 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); 43 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); 44 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); 65 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); 66 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); 67 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); 68 q0_d = __msa_copy_u_d((v2i64)q0_out, 0) [all...] |
loopfilter_16_msa.c | 457 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); 460 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); 461 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); 462 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); 463 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); 493 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); 494 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); 495 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); 496 q0_d = __msa_copy_u_d((v2i64)q0_out, 0) [all...] |
macros_msa.h | 305 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ 306 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ 425 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 441 out0_m = __msa_copy_u_d((v2i64)in, 0); \ 442 out1_m = __msa_copy_u_d((v2i64)in, 1); \ 465 out0_m = __msa_copy_u_d((v2i64)in0, 0); \ 466 out1_m = __msa_copy_u_d((v2i64)in0, 1); \ 467 out2_m = __msa_copy_u_d((v2i64)in1, 0); [all...] |
/external/gemmlowp/internal/ |
pack_msa.h | 90 v2i64 tmp = __builtin_msa_ilvr_d( 91 reinterpret_cast<v2i64>( 93 reinterpret_cast<v2i64>( 99 v2i64 tmp = __builtin_msa_ilvl_d( 100 reinterpret_cast<v2i64>( 102 reinterpret_cast<v2i64>( 110 v2i64 tmp = __builtin_msa_ilvr_d( 111 reinterpret_cast<v2i64>( 113 reinterpret_cast<v2i64>( 119 v2i64 tmp = reinterpret_cast<v2i64> [all...] |
output_msa.h | 315 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0))); 317 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0))); 321 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0))); 323 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0))); 481 v2i64 u0 = reinterpret_cast<v2i64>(__builtin_msa_ilvr_w(t1, t0)) [all...] |
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/ |
SPU64InstrInfo.td | 21 // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO) 24 // 5. The code sequences for r64 and v2i64 are probably overly conservative, 67 // v2i64 seteq (equality): the setcc result is v4i32 71 def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>; 83 def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>; 120 def v2i64: CodeFrag<CLGTv2i64compare.Fragment>; 132 //def : Pat<(setugt (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), 154 def v2i64: CodeFrag<CLGEv2i64compare.Fragment> [all...] |
/external/llvm/test/CodeGen/PowerPC/ |
vec_popcnt.ll | 2 ; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8. 9 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone 39 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x) 48 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp) 58 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp) 67 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
|
vec_clz.ll | 8 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) nounwind readnone 35 %vcnt = tail call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %x)
|
/external/llvm/test/CodeGen/SystemZ/ |
vec-move-06.ll | 5 ; Test the basic v2i64 usage.
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
loopfilter_filters_msa.c | 223 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); 227 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); 231 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); 263 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); 267 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0) [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-vcvt_n.ll | 35 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 12) 40 %vcvt_n1 = tail call <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 9) 48 declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone 49 declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) nounwind readnone
|
arm64-detect-vec-redux.ll | 20 %vpaddq_v2.i = tail call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> undef, <2 x i64> %1) #2 21 %vqdmlal2.i = tail call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> undef, <2 x i32> undef) #2 22 %vqdmlal_v3.i = tail call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %vpaddq_v2.i, <2 x i64> %vqdmlal2.i) #2 39 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) #1 42 declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) #1 45 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) #1
|
/external/llvm/test/CodeGen/X86/ |
legalizedag_vec.ll | 6 ; v2i64 is a legal type but with mmx disabled, i64 is an illegal type. When
|
/external/clang/test/CodeGen/ |
arm-neon-misc.c | 17 // CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8 27 // CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8
|
/external/llvm/test/CodeGen/ARM/ |
2010-06-29-PartialRedefFastAlloc.ll | 19 %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8* %arg, i32 1) 25 declare <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8*, i32) nounwind readonly
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
2010-06-29-PartialRedefFastAlloc.ll | 19 %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg, i32 1) 25 declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
|