HomeSort by relevance Sort by last modified time
    Searched refs:q8 (Results 176 - 200 of 283) sorted by null

1 2 3 4 5 6 78 91011>>

  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_YuvToRGB.S 25 * variants for different data layouts. Y data starts in q8, but with the even
41 vmull.u8 q8, d20, d14 // g2 = u * 50 + v * 104
42 vmlal.u8 q8, d21, d15
66 vqsub.u16 q1, q8 // g0 = satu16(g0 - g2)
67 vqsub.u16 q5, q8 // g1 = satu16(g1 - g2)
137 vmov.i8 q8, #0
rsCpuIntrinsics_neon_Blur.S 289 107: vext.u16 q12, q8, q9, #1
295 106: vext.u16 q12, q8, q9, #2
301 105: vext.u16 q12, q8, q9, #3
307 104: //vext.u16 q12, q8, q9, #4
313 103: vext.u16 q12, q8, q9, #5
319 102: vext.u16 q12, q8, q9, #6
325 101: vext.u16 q12, q8, q9, #7
336 vmov q8, q9
412 108: //vext.u16 q12, q7, q8, #0
418 107: vext.u16 q12, q7, q8, #
    [all...]
  /external/libavc/common/arm/
ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s 210 vaddl.u8 q8, d2, d3
215 vmla.u16 q7, q8, q11
217 vaddl.u8 q8, d1, d4
219 vmls.u16 q7, q8, q12
226 vaddl.u8 q8, d0, d5
231 vmla.u16 q8, q9, q11
236 vmls.u16 q8, q9, q12
243 vst1.32 {q8}, [r9], r6 @ store temp buffer r5
263 vadd.s16 q14, q5, q8
304 vadd.s16 q15, q7, q8
    [all...]
ih264_iquant_itrans_recon_dc_a9.s 259 vdup.s16 q8, r6 @copy transform output to Q0
266 vaddw.u8 q0, q8, d24
268 vaddw.u8 q1, q8, d25
270 vaddw.u8 q2, q8, d26
272 vaddw.u8 q3, q8, d27
274 vaddw.u8 q4, q8, d28
281 vaddw.u8 q5, q8, d29
283 vaddw.u8 q6, q8, d30
286 vaddw.u8 q7, q8, d31
ih264_inter_pred_filters_luma_horz_a9q.s 133 vaddl.u8 q8, d27, d6 @// a0 + a5 (column2,row1)
141 vmlal.u8 q8, d27, d1 @// a0 + a5 + 20a2 (column2,row1)
149 vmlal.u8 q8, d27, d1 @// a0 + a5 + 20a2 + 20a3 (column2,row1)
157 vmlsl.u8 q8, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column2,row1)
164 vmlsl.u8 q8, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column2,row1)
171 vqrshrun.s16 d24, q8, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1)
ih264_inter_pred_luma_horz_qpel_a9q.s 140 vaddl.u8 q8, d27, d6 @// a0 + a5 (column2,row1)
148 vmlal.u8 q8, d27, d1 @// a0 + a5 + 20a2 (column2,row1)
156 vmlal.u8 q8, d27, d1 @// a0 + a5 + 20a2 + 20a3 (column2,row1)
164 vmlsl.u8 q8, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column2,row1)
171 vmlsl.u8 q8, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column2,row1)
180 vqrshrun.s16 d19, q8, #5 @// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2,row1)
ih264_intra_pred_luma_8x8_a9q.s 127 vaddl.u8 q8, d6, d10
130 vadd.u16 q6, q8, q9
482 vext.8 q8, q9, q9, #1
484 vext.8 q15, q8, q8, #1
574 vext.8 q8, q9, q9, #15
576 vext.8 q15, q8, q8, #15
670 vext.8 q8, q2, q2, #1
673 vmov.8 q15, q8
    [all...]
ih264_intra_pred_chroma_a9q.s 407 vmul.s16 q8, q6, q4
408 vuzp.16 q7, q8
464 vmul.s16 q8, q2, q5
466 vadd.s16 q8, q0, q8
476 vadd.s16 q0, q8, q9
479 vadd.s16 q13, q8, q4
489 vadd.s16 q0, q8, q9
497 vadd.s16 q13, q8, q4
505 vadd.s16 q0, q8, q
    [all...]
ih264_weighted_pred_a9q.s 215 vmovl.u8 q8, d6 @converting row 2L to 16-bit
220 vmul.s16 q8, q8, d2[0] @weight mult. for row 2L
233 vrshl.s16 q8, q8, q0 @rounds off the weighted samples from row 2L
239 vaddw.s8 q8, q8, d3 @adding offset for row 2L
243 vqmovun.s16 d6, q8 @saturating row 2L to unsigned 8-bit
421 vmovl.u8 q8, d7 @converting row 1H to 16-bit
426 vmul.s16 q8, q8, q1 @weight mult. for row 1
    [all...]
  /external/libxaac/decoder/armv7/
ixheaacd_fft32x32_ld.s 102 @ b_data0_r=q8
152 @VHADD.S32 q8, q0, q4 @b_data0_r=vhaddq_s32(a_data0_r_i.val[0],a_data4_r_i.val[0])@
153 VADD.I32 q8, q0, q4 @b_data0_r=vhaddq_s32(a_data0_r_i.val[0],a_data4_r_i.val[0])@
209 VADD.S32 q4, q8, q1 @c_data0_r=vaddq_s32(b_data0_r,b_data2_r)@
212 VSUB.S32 q5, q8, q1 @c_data2_r=vsubq_s32(b_data0_r,b_data2_r)@
215 VADD.S32 q8, q0, q2 @c_data0_i=vaddq_s32(b_data0_i,b_data2_i)@
287 VADD.S32 q4, q8, q7 @b_data0_i=vaddq_s32(c_data0_i,c_data1_i)@
291 VSUB.S32 q11, q8, q7 @b_data1_i=vsubq_s32(c_data0_i,c_data1_i)@
293 VADD.S32 q8, q5, q15 @b_data2_r=vaddq_s32(c_data2_r,c_data3_i)@
307 @ b_data5_r = q1 free regs = q3,q4,q5,q7,q8,q10,q1
    [all...]
  /external/boringssl/ios-arm/crypto/fipsmodule/
armv4-mont.S 230 vmull.u32 q8,d28,d1[0]
248 vmlal.u32 q8,d29,d5[0]
256 vmov q7,q8
258 vmov q8,q9
279 vmlal.u32 q8,d28,d1[0]
296 vmlal.u32 q8,d29,d5[0]
304 vmov q7,q8
306 vmov q8,q9
335 veor q8,q8,q
    [all...]
  /external/boringssl/linux-arm/crypto/fipsmodule/
armv4-mont.S 227 vmull.u32 q8,d28,d1[0]
245 vmlal.u32 q8,d29,d5[0]
253 vmov q7,q8
255 vmov q8,q9
276 vmlal.u32 q8,d28,d1[0]
293 vmlal.u32 q8,d29,d5[0]
301 vmov q7,q8
303 vmov q8,q9
332 veor q8,q8,q
    [all...]
  /external/flac/libFLAC/
lpc_intrin_avx2.c 64 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local
73 q8 = _mm256_set1_epi32(0xffff & qlp_coeff[8 ]);
83 mull = _mm256_madd_epi16(q8, _mm256_loadu_si256((const __m256i*)(data+i-9 ))); summ = _mm256_add_epi32(summ, mull);
97 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10; local
106 q8 = _mm256_set1_epi32(0xffff & qlp_coeff[8 ]);
114 mull = _mm256_madd_epi16(q8, _mm256_loadu_si256((const __m256i*)(data+i-9 ))); summ = _mm256_add_epi32(summ, mull);
130 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9; local
139 q8 = _mm256_set1_epi32(0xffff & qlp_coeff[8 ]);
145 mull = _mm256_madd_epi16(q8, _mm256_loadu_si256((const __m256i*)(data+i-9 ))); summ = _mm256_add_epi32(summ, mull);
159 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8; local
419 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local
452 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10; local
485 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9; local
514 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8; local
778 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; local
811 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10; local
844 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9; local
873 __m256i q0, q1, q2, q3, q4, q5, q6, q7, q8; local
    [all...]
  /external/valgrind/none/tests/arm/
neon128.c 383 TESTINSN_imm("vorr.i32 q8", q8, 0x700);
392 TESTINSN_imm("vbic.i16 q8", q8, 0x700);
    [all...]
  /external/libhevc/common/arm/
ihevc_inter_pred_luma_copy_w16out.s 158 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
163 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
188 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
204 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
227 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
238 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
ihevc_inter_pred_chroma_copy_w16out.s 220 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
225 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
250 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
266 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
289 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
300 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
318 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
321 vshl.i16 q0,q8,#6 @vshlq_n_s16(tmp, 6)
ihevc_itrans_recon_4x4.s 169 vadd.s32 q8,q6,q4 @((e[1] + o[1])
174 vqrshrn.s32 d1,q8,#shift_stage1_idct @pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) )
200 vadd.s32 q8,q6,q4 @((e[1] + o[1])
205 vqrshrn.s32 d1,q8,#shift_stage2_idct @pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) )
  /external/capstone/suite/MC/ARM/
neon-crypto.s.cs 16 0xa1,0x0e,0xe0,0xf2 = vmull.p64 q8, d16, d17
thumb-neon-crypto.s.cs 16 0xe0,0xef,0xa1,0x0e = vmull.p64 q8, d16, d17
  /external/llvm/test/MC/ARM/
neont2-pairwise-encoding.s 25 vpaddl.u16 q8, q3
38 @ CHECK: vpaddl.u16 q8, q3 @ encoding: [0xf4,0xff,0xc6,0x02]
52 vpadal.u16 q8, q14
65 @ CHECK: vpadal.u16 q8, q14 @ encoding: [0xf4,0xff,0xec,0x06]
neon-crypto.s 49 vmull.p64 q8, d16, d17
50 @ CHECK: vmull.p64 q8, d16, d17 @ encoding: [0xa1,0x0e,0xe0,0xf2]
  /external/swiftshader/third_party/LLVM/test/MC/ARM/
neont2-pairwise-encoding.s 25 vpaddl.u16 q8, q3
38 @ CHECK: vpaddl.u16 q8, q3 @ encoding: [0xf4,0xff,0xc6,0x02]
52 vpadal.u16 q8, q14
65 @ CHECK: vpadal.u16 q8, q14 @ encoding: [0xf4,0xff,0xec,0x06]
arm_instructions.s 11 @ CHECK: vqdmull.s32 q8, d17, d16
13 vqdmull.s32 q8, d17, d16
  /external/boringssl/src/crypto/poly1305/
poly1305_arm_asm.S 980 # asm 2: vshr.u64 >t2=q8,<r1=q3,#26
981 vshr.u64 q8,q3,#26
995 # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
996 vadd.i64 q8,q13,q8
1025 # asm 2: vshr.u64 >t3=q13,<r2=q8,#26
1026 vshr.u64 q13,q8,#26
1040 # asm 2: vand >x23=q9,<r2=q8,<mask=q6
1041 vand q9,q8,q
    [all...]
  /external/libavc/encoder/arm/
ih264e_evaluate_intra_chroma_modes_a9q.s 169 vabdl.u8 q8, d0, d10
185 vabal.u8 q8, d2, d10
204 vabal.u8 q8, d0, d10
221 vabal.u8 q8, d2, d10
237 vadd.i16 q9, q9, q8 @/VERT

Completed in 2812 milliseconds

1 2 3 4 5 6 78 91011>>