HomeSort by relevance Sort by last modified time
    Searched refs:q5 (Results 1 - 25 of 198) sorted by null

1 2 3 4 5 6 7 8

  /external/capstone/suite/MC/ARM/
neon-bitwise-encoding.s.cs 92 0x5a,0xc1,0x0c,0xf2 = vand q6, q6, q5
93 0x5a,0xc1,0x0c,0xf2 = vand q6, q6, q5
97 0x5a,0xc1,0x0c,0xf3 = veor q6, q6, q5
98 0x5a,0xc1,0x0c,0xf3 = veor q6, q6, q5
102 0x5a,0xc1,0x0c,0xf3 = veor q6, q6, q5
103 0x5a,0xc1,0x0c,0xf3 = veor q6, q6, q5
107 0x4a,0xa2,0xb5,0xf3 = vclt.s16 q5, q5, #0 package
109 0x56,0xa8,0x1a,0xf3 = vceq.i16 q5, q5, q package
111 0x46,0xa3,0x1a,0xf2 = vcgt.s16 q5, q5, q3 package
113 0x56,0xa3,0x1a,0xf2 = vcge.s16 q5, q5, q3 package
115 0x4a,0xa0,0xb5,0xf3 = vcgt.s16 q5, q5, #0 package
117 0xca,0xa0,0xb5,0xf3 = vcge.s16 q5, q5, #0 package
119 0x4a,0xa1,0xb5,0xf3 = vceq.i16 q5, q5, #0 package
121 0xca,0xa1,0xb5,0xf3 = vcle.s16 q5, q5, #0 package
124 0x56,0xae,0x0a,0xf3 = vacge.f32 q5, q5, q3 package
126 0x56,0xae,0x2a,0xf3 = vacgt.f32 q5, q5, q3 package
    [all...]
neon-minmax-encoding.s.cs 17 0x4c,0x86,0x1a,0xf2 = vmax.s16 q4, q5, q6 package
22 0x42,0x2f,0x4a,0xf2 = vmax.f32 q9, q5, q1 package
24 0x4c,0xa6,0x1a,0xf2 = vmax.s16 q5, q5, q6 package
27 0x4a,0x86,0x18,0xf3 = vmax.u16 q4, q4, q5
45 0x5c,0x86,0x1a,0xf2 = vmin.s16 q4, q5, q6 package
50 0x42,0x2f,0x6a,0xf2 = vmin.f32 q9, q5, q1 package
52 0x5c,0xa6,0x1a,0xf2 = vmin.s16 q5, q5, q6 package
55 0x5a,0x86,0x18,0xf3 = vmin.u16 q4, q4, q5
    [all...]
neont2-minmax-encoding.s.cs 17 0x1a,0xef,0x4c,0x86 = vmax.s16 q4, q5, q6 package
22 0x4a,0xef,0x42,0x2f = vmax.f32 q9, q5, q1 package
24 0x1a,0xef,0x4c,0xa6 = vmax.s16 q5, q5, q6 package
27 0x18,0xff,0x4a,0x86 = vmax.u16 q4, q4, q5
45 0x1a,0xef,0x5c,0x86 = vmin.s16 q4, q5, q6 package
50 0x6a,0xef,0x42,0x2f = vmin.f32 q9, q5, q1 package
52 0x1a,0xef,0x5c,0xa6 = vmin.s16 q5, q5, q6 package
55 0x18,0xff,0x5a,0x86 = vmin.u16 q4, q4, q5
    [all...]
  /external/llvm/test/MC/ARM/
diagnostics-noneon.s 5 vmov q4, q5
neon-shiftaccum-encoding.s 9 vsra.s32 q9, q5, #32
18 vsra.u64 q4, q5, #25
27 vsra.s32 q5, #32
36 vsra.u64 q5, #25
44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2]
70 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xda,0xa1,0xa7,0xf3
    [all...]
neont2-shiftaccum-encoding.s 11 vsra.s32 q9, q5, #32
20 vsra.u64 q4, q5, #25
29 vsra.s32 q5, #32
38 vsra.u64 q5, #25
46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1]
72 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xa7,0xff,0xda,0xa1
    [all...]
neon-minmax-encoding.s 20 vmax.s16 q4, q5, q6
25 vmax.f32 q9, q5, q1
28 vmax.s16 q5, q6
31 vmax.u16 q4, q5
50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2]
55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2]
57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2]
60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3]
82 vmin.s16 q4, q5, q
    [all...]
neont2-minmax-encoding.s 22 vmax.s16 q4, q5, q6
27 vmax.f32 q9, q5, q1
30 vmax.s16 q5, q6
33 vmax.u16 q4, q5
52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86]
57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f]
59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6]
62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86]
84 vmin.s16 q4, q5, q
    [all...]
  /external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/
diagnostics-noneon.s 5 vmov q4, q5
neon-shiftaccum-encoding.s 9 vsra.s32 q9, q5, #32
18 vsra.u64 q4, q5, #25
27 vsra.s32 q5, #32
36 vsra.u64 q5, #25
44 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0x5a,0x21,0xe0,0xf2]
53 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xda,0x81,0xa7,0xf3]
61 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0x5a,0xa1,0xa0,0xf2]
70 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xda,0xa1,0xa7,0xf3
    [all...]
neont2-shiftaccum-encoding.s 11 vsra.s32 q9, q5, #32
20 vsra.u64 q4, q5, #25
29 vsra.s32 q5, #32
38 vsra.u64 q5, #25
46 @ CHECK: vsra.s32 q9, q5, #32 @ encoding: [0xe0,0xef,0x5a,0x21]
55 @ CHECK: vsra.u64 q4, q5, #25 @ encoding: [0xa7,0xff,0xda,0x81]
63 @ CHECK: vsra.s32 q5, q5, #32 @ encoding: [0xa0,0xef,0x5a,0xa1]
72 @ CHECK: vsra.u64 q5, q5, #25 @ encoding: [0xa7,0xff,0xda,0xa1
    [all...]
neon-minmax-encoding.s 20 vmax.s16 q4, q5, q6
25 vmax.f32 q9, q5, q1
28 vmax.s16 q5, q6
31 vmax.u16 q4, q5
50 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x4c,0x86,0x1a,0xf2]
55 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x42,0x2f,0x4a,0xf2]
57 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x4c,0xa6,0x1a,0xf2]
60 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x4a,0x86,0x18,0xf3]
82 vmin.s16 q4, q5, q
    [all...]
neont2-minmax-encoding.s 22 vmax.s16 q4, q5, q6
27 vmax.f32 q9, q5, q1
30 vmax.s16 q5, q6
33 vmax.u16 q4, q5
52 @ CHECK: vmax.s16 q4, q5, q6 @ encoding: [0x1a,0xef,0x4c,0x86]
57 @ CHECK: vmax.f32 q9, q5, q1 @ encoding: [0x4a,0xef,0x42,0x2f]
59 @ CHECK: vmax.s16 q5, q5, q6 @ encoding: [0x1a,0xef,0x4c,0xa6]
62 @ CHECK: vmax.u16 q4, q4, q5 @ encoding: [0x18,0xff,0x4a,0x86]
84 vmin.s16 q4, q5, q
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/neon/
idct_blk_neon.c 71 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local
94 q5 = vld1q_s16(q);
119 q5 = vmulq_s16(q5, q1);
131 dLow1 = vget_low_s16(q5);
132 dHigh1 = vget_high_s16(q5);
134 q5 = vcombine_s16(dHigh0, dHigh1);
137 q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
139 q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
148 q5 = vqaddq_s16(q5, q9)
    [all...]
mbloopfilter_neon.c 19 uint8x16_t q5, // p1
39 q12u8 = vabdq_u8(q4, q5);
40 q13u8 = vabdq_u8(q5, q6);
59 q1u8 = vabdq_u8(q5, q8);
69 q5 = veorq_u8(q5, q0u8);
83 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
139 q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
158 uint8x16_t q5, q6, q7, q8, q9, q10; local
170 q5 = vld1q_u8(src)
206 uint8x16_t q5, q6, q7, q8, q9, q10; local
293 uint8x16_t q5, q6, q7, q8, q9, q10; local
457 uint8x16_t q5, q6, q7, q8, q9, q10; local
    [all...]
vp8_loopfilter_neon.c 20 uint8x16_t q5, // p1
37 q12u8 = vabdq_u8(q4, q5);
38 q13u8 = vabdq_u8(q5, q6);
55 q2u8 = vabdq_u8(q5, q8);
66 q5 = veorq_u8(q5, q10);
80 q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8));
114 q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
130 uint8x16_t q5, q6, q7, q8, q9, q10; local
141 q5 = vld1q_u8(src)
173 uint8x16_t q5, q6, q7, q8, q9, q10; local
321 uint8x16_t q5, q6, q7, q8, q9, q10; local
433 uint8x16_t q5, q6, q7, q8, q9, q10; local
    [all...]
  /external/libavc/common/arm/
ih264_inter_pred_luma_horz_qpel_vert_qpel_a9q.s 146 vld1.32 {q5}, [r7], r2 @ Vector load from src[5_0]
248 vaddl.u8 q5, d0, d5
249 vmlal.u8 q5, d2, d30
250 vmlal.u8 q5, d3, d30
251 vmlsl.u8 q5, d1, d31
252 vmlsl.u8 q5, d4, d31
259 vqrshrun.s16 d26, q5, #5
261 vaddl.u8 q5, d12, d17
262 vmlal.u8 q5, d14, d30
263 vmlal.u8 q5, d15, d3
    [all...]
ih264_inter_pred_chroma_a9q.s 148 vmull.u8 q5, d0, d28
149 vmlal.u8 q5, d5, d30
150 vmlal.u8 q5, d3, d29
151 vmlal.u8 q5, d8, d31
163 vqrshrun.s16 d14, q5, #6
175 vmull.u8 q5, d0, d28
176 vmlal.u8 q5, d5, d30
177 vmlal.u8 q5, d3, d29
178 vmlal.u8 q5, d8, d31
186 vqrshrun.s16 d14, q5, #
    [all...]
  /external/libhevc/common/arm/
ihevc_inter_pred_luma_vert_w16inp_w16out.s 179 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
181 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
183 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
185 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
187 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
189 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
190 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
191 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
206 vsub.s32 q5, q5, q1
    [all...]
ihevc_itrans_recon_4x4.s 163 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
165 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
168 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
171 vsub.s32 q10,q5,q3 @((e[0] - o[0])
193 vaddl.s16 q5,d0,d2 @pi2_src[0] + pi2_src[2]
195 vshl.s32 q5,q5,#6 @e[0] = 64*(pi2_src[0] + pi2_src[2])
199 vadd.s32 q7,q5,q3 @((e[0] + o[0] )
202 vsub.s32 q10,q5,q3 @((e[0] - o[0]
    [all...]
ihevc_inter_pred_filters_luma_vert_w16inp.s 169 vmull.s16 q5,d2,d23 @mul_res2 = vmull_u8(src_tmp3, coeffabs_1)@
171 vmlal.s16 q5,d1,d22 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_0)@
173 vmlal.s16 q5,d3,d24 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_2)@
175 vmlal.s16 q5,d4,d25 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_3)@
177 vmlal.s16 q5,d5,d26 @mul_res2 = vmlal_u8(mul_res2, src_tmp2, coeffabs_4)@
179 vmlal.s16 q5,d6,d27 @mul_res2 = vmlal_u8(mul_res2, src_tmp3, coeffabs_5)@
180 vmlal.s16 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
181 vmlal.s16 q5,d16,d29 @mul_res2 = vmlal_u8(mul_res2, src_tmp1, coeffabs_7)@
196 vqshrn.s32 d10, q5, #6
215 vqrshrun.s16 d10,q5,#6 @sto_res = vqmovun_s16(sto_res_tmp)
    [all...]
  /external/libxaac/decoder/armv7/
ixheaacd_imdct_using_fft.s 156 VADD.I32 q0, q1, q5
160 VSUB.I32 q4, q1, q5
178 VSUB.I32 q5, q2, q6
195 VSUB.S32 q6, q4, q5
198 VADD.S32 q9, q4, q5
204 VSUB.S32 q5, q8, q1
285 VADD.S32 q8, q5, q15
286 VSUB.S32 q7, q5, q15
287 VSUB.S32 q5, q0, q2
318 VPUSH {q5}
    [all...]
ixheaacd_sbr_imdct_using_fft.s 160 VADD.I32 q0, q1, q5
164 VSUB.I32 q4, q1, q5
182 VSUB.I32 q5, q2, q6
199 VSUB.S32 q6, q4, q5
202 VADD.S32 q9, q4, q5
208 VSUB.S32 q5, q8, q1
289 VADD.S32 q8, q5, q15
290 VSUB.S32 q7, q5, q15
291 VSUB.S32 q5, q0, q2
322 VPUSH {q5}
    [all...]
  /external/boringssl/src/crypto/curve25519/asm/
x25519-asm-arm.S 40 vpush {q4,q5,q6,q7}
109 vshr.u64 q5,q5,#26
122 vand q5,q5,q3
132 vadd.i64 q5,q5,q12
134 vadd.i64 q14,q5,q0
146 vsub.i64 q5,q5,q1
    [all...]
  /external/libvpx/libvpx/vpx_dsp/arm/
vpx_convolve8_avg_horiz_filter_type1_neon.asm 102 vdup.16 q5, r7
132 vmlal.u8 q5, d15, d27 ;mul_res = vmull_u8(src[0_3],
135 vmlal.u8 q5, d14, d26 ;mul_res = vmlsl_u8(src[0_2],
139 vmlal.u8 q5, d16, d28 ;mul_res = vmlal_u8(src[0_4],
142 vmlal.u8 q5, d17, d29 ;mul_res = vmlsl_u8(src[0_5],
147 vmlsl.u8 q5, d18, d30 ;mul_res = vmlal_u8(src[0_6],
149 vmlsl.u8 q5, d19, d31 ;mul_res = vmlsl_u8(src[0_7],
153 vmlsl.u8 q5, d12, d24 ;mul_res = vmlsl_u8(src[0_0],
155 vmlsl.u8 q5, d13, d25 ;mul_res = vmlal_u8(src[0_1],
158 vhadd.s16 q5, q5, q1
    [all...]

Completed in 2397 milliseconds

1 2 3 4 5 6 7 8