/toolchain/binutils/binutils-2.25/gas/testsuite/gas/arm/ |
half-prec-neon.s | 4 vcvt.f32.f16 q5, d6
|
neon-omit.s | 10 vhsub.s32 q5,q7 12 vqshl.u32 q5,q6 15 vceq.i16 q5,#0 16 vceq.i16 q5,q5 17 vclt.s16 q5,#0 18 vabs.s16 q5,q6 36 vacle.f q5,q6 58 vrhadd.s32 q1,q5,q2 59 vhsub.s32 q5,q8,q [all...] |
half-prec-neon.d | 9 0+4 <[^>]*> f3b6a706 vcvt\.f32\.f16 q5, d6
|
half-prec-psyntax.s | 3 vcvt q5.f32, d6.f16
|
neon-omit.d | 11 0[0-9a-f]+ <[^>]+> f22aa24e vhsub\.s32 q5, q5, q7 13 0[0-9a-f]+ <[^>]+> f32ca45a vqshl\.u32 q5, q5, q6 16 0[0-9a-f]+ <[^>]+> f3b5a14a vceq\.i16 q5, q5, #0 17 0[0-9a-f]+ <[^>]+> f31aa85a vceq\.i16 q5, q5, q5 18 0[0-9a-f]+ <[^>]+> f3b5a24a vclt\.s16 q5, q5, # [all...] |
/frameworks/rs/cpu_ref/ |
rsCpuIntrinsics_neon_Convolve.S | 58 vmovl.u8 q5, d29 176 vmull.s16 q5, d19, d0[0] 177 vmlal.s16 q5, d20, d0[1] 178 vmlal.s16 q5, d21, d0[2] 179 vmlal.s16 q5, d22, d0[3] 180 vmlal.s16 q5, d23, d1[0] 182 vmlal.s16 q5, d25, d1[1] 183 vmlal.s16 q5, d26, d1[2] 184 vmlal.s16 q5, d27, d1[3] 185 vmlal.s16 q5, d28, d2[0 [all...] |
/external/libvpx/libvpx/vp8/common/arm/neon/ |
idct_dequant_full_2x_neon.c | 21 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local 44 q5 = vld1q_s16(q); 69 q5 = vmulq_s16(q5, q1); 81 dLow1 = vget_low_s16(q5); 82 dHigh1 = vget_high_s16(q5); 84 q5 = vcombine_s16(dHigh0, dHigh1); 87 q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); 89 q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); 98 q5 = vqaddq_s16(q5, q9) [all...] |
mbloopfilter_neon.c | 19 uint8x16_t q5, // p1 39 q12u8 = vabdq_u8(q4, q5); 40 q13u8 = vabdq_u8(q5, q6); 59 q1u8 = vabdq_u8(q5, q8); 69 q5 = veorq_u8(q5, q0u8); 83 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); 139 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); 158 uint8x16_t q5, q6, q7, q8, q9, q10; local 170 q5 = vld1q_u8(src) 206 uint8x16_t q5, q6, q7, q8, q9, q10; local 293 uint8x16_t q5, q6, q7, q8, q9, q10; local 457 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
vp8_loopfilter_neon.c | 20 uint8x16_t q5, // p1 37 q12u8 = vabdq_u8(q4, q5); 38 q13u8 = vabdq_u8(q5, q6); 55 q2u8 = vabdq_u8(q5, q8); 66 q5 = veorq_u8(q5, q10); 80 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); 114 q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); 130 uint8x16_t q5, q6, q7, q8, q9, q10; local 141 q5 = vld1q_u8(src) 173 uint8x16_t q5, q6, q7, q8, q9, q10; local 321 uint8x16_t q5, q6, q7, q8, q9, q10; local 433 uint8x16_t q5, q6, q7, q8, q9, q10; local [all...] |
/external/llvm/test/MC/ARM/ |
diagnostics-noneon.s | 5 vmov q4, q5
|
neon-shiftaccum-encoding.s | 9 vsra.s32 q9, q5, #32 18 vsra.u64 q4, q5, #25 27 vsra.s32 q5, #32 36 vsra.u64 q5, #25 44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2] 53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3] 61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2] 70 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xda,0xa1,0xa7,0xf3 [all...] |
neont2-shiftaccum-encoding.s | 11 vsra.s32 q9, q5, #32 20 vsra.u64 q4, q5, #25 29 vsra.s32 q5, #32 38 vsra.u64 q5, #25 46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21] 55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81] 63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1] 72 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xa7,0xff,0xda,0xa1 [all...] |
neon-minmax-encoding.s | 20 vmax.s16 q4, q5, q6 25 vmax.f32 q9, q5, q1 28 vmax.s16 q5, q6 31 vmax.u16 q4, q5 50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2] 55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2] 57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2] 60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3] 82 vmin.s16 q4, q5, q [all...] |
neont2-minmax-encoding.s | 22 vmax.s16 q4, q5, q6 27 vmax.f32 q9, q5, q1 30 vmax.s16 q5, q6 33 vmax.u16 q4, q5 52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86] 57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f] 59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6] 62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86] 84 vmin.s16 q4, q5, q [all...] |
neon-shift-encoding.s | 116 vsra.s64 q4, q5, #63 123 vsra.s16 q5, #15 134 @ CHECK: vsra.s64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf2] 140 @ CHECK: vsra.s16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf2] 152 vsra.u64 q4, q5, #63 159 vsra.u16 q5, #15 170 @ CHECK: vsra.u64 q4, q5, #63 @ encoding: [0xda,0x81,0x81,0xf3] 176 @ CHECK: vsra.u16 q5, q5, #15 @ encoding: [0x5a,0xa1,0x91,0xf3 [all...] |
basic-arm-instructions-v8.1a.s | 15 vqrdmlsh.f32 q3, q4, q5 25 //CHECK-ERROR: vqrdmlsh.f32 q3, q4, q5 37 //CHECK-V8: vqrdmlsh.f32 q3, q4, q5 93 vqrdmlsh.s32 q3, q4, q5 94 //CHECK-V81aARM: vqrdmlsh.s32 q3, q4, q5 @ encoding: [0x5a,0x6c,0x28,0xf3] 95 //CHECK-V81aTHUMB: vqrdmlsh.s32 q3, q4, q5 @ encoding: [0x28,0xff,0x5a,0x6c] 97 //CHECK-V8: vqrdmlsh.s32 q3, q4, q5
|
/external/valgrind/none/tests/arm/ |
neon128.c | 358 TESTINSN_imm("vmov.i32 q5", q5, 0x700); 372 TESTINSN_imm("vmvn.i32 q5", q5, 0x700); 391 TESTINSN_imm("vbic.i32 q5", q5, 0x700); 439 TESTINSN_bin("vand q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57); 445 TESTINSN_bin("vbic q4, q6, q5", q4, q6, i8, 0xff, q5, i16, 0x57) [all...] |
/external/libavc/common/arm/ |
ih264_inter_pred_luma_horz_qpel_vert_qpel_a9q.s | 146 vld1.32 {q5}, [r7], r2 @ Vector load from src[5_0] 248 vaddl.u8 q5, d0, d5 249 vmlal.u8 q5, d2, d30 250 vmlal.u8 q5, d3, d30 251 vmlsl.u8 q5, d1, d31 252 vmlsl.u8 q5, d4, d31 259 vqrshrun.s16 d26, q5, #5 261 vaddl.u8 q5, d12, d17 262 vmlal.u8 q5, d14, d30 263 vmlal.u8 q5, d15, d3 [all...] |
ih264_inter_pred_chroma_a9q.s | 148 vmull.u8 q5, d0, d28 149 vmlal.u8 q5, d5, d30 150 vmlal.u8 q5, d3, d29 151 vmlal.u8 q5, d8, d31 163 vqrshrun.s16 d14, q5, #6 175 vmull.u8 q5, d0, d28 176 vmlal.u8 q5, d5, d30 177 vmlal.u8 q5, d3, d29 178 vmlal.u8 q5, d8, d31 186 vqrshrun.s16 d14, q5, # [all...] |
/external/libhevc/common/arm/ |
ihevc_inter_pred_luma_vert_w16inp_w16out.s | 174 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 176 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@ 178 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@ 180 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@ 182 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@ 184 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@ 185 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@ 186 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@ 201 vsub.s32 q5, q5, q1 [all...] |
ihevc_itrans_recon_4x4.s | 158 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2] 160 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2]) 163 vadd.s32 q7,q5,q3 @((e[0] + o[0] ) 166 vsub.s32 q10,q5,q3 @((e[0] - o[0]) 188 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2] 190 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2]) 194 vadd.s32 q7,q5,q3 @((e[0] + o[0] ) 197 vsub.s32 q10,q5,q3 @((e[0] - o[0] [all...] |
ihevc_inter_pred_filters_luma_vert_w16inp.s | 164 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@ 166 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@ 168 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@ 170 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@ 172 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@ 174 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@ 175 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@ 176 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@ 191 vqshrn.s32 d10, q5, #6 210 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp) [all...] |
/external/boringssl/src/crypto/curve25519/asm/ |
x25519-asm-arm.S | 31 vpush {q4,q5,q6,q7} 100 vshr.u64 q5,q5,#26 113 vand q5,q5,q3 123 vadd.i64 q5,q5,q12 125 vadd.i64 q14,q5,q0 137 vsub.i64 q5,q5,q1 [all...] |
/external/boringssl/linux-arm/crypto/aes/ |
bsaes-armv7.S | 107 veor q15, q5, q9 123 veor q11, q11, q5 128 veor q5, q5, q11 145 vshr.u64 q10, q5, #2 155 veor q5, q5, q10 183 veor q10, q10, q5 187 veor q5, q5, q1 [all...] |
/external/libavc/encoder/arm/ |
ih264e_evaluate_intra4x4_modes_a9q.s | 237 vext.8 q5, q0, q0, #2 315 vext.8 q15, q5, q5, #4 317 vext.8 q15, q5, q5, #3 351 vmov.8 q1, q5 363 vext.8 q15, q5, q5, #3 390 vext.8 q15, q5, q5, # [all...] |