HomeSort by relevance Sort by last modified time
    Searched refs:q5 (Results 26 - 50 of 88) sorted by null

12 3 4

  /external/libvpx/libvpx/vp8/common/arm/neon/
sixtappredict16x16_neon.asm 169 vmull.u8 q5, d29, d3
173 vqadd.s16 q10, q5
203 vld1.s32 {q5, q6}, [r3] ;load second_pass filter
206 vabs.s32 q7, q5
234 vmull.u8 q5, d20, d0
239 vmlsl.u8 q5, d21, d1
244 vmlsl.u8 q5, d24, d4
249 vmlal.u8 q5, d22, d2
254 vmlal.u8 q5, d25, d5
266 vqadd.s16 q9, q5
    [all...]
vp8_subpixelvariance8x8_neon.asm 74 vld1.u8 {q5}, [r0], r1
117 vmull.u8 q5, d26, d0
126 vmlal.u8 q5, d27, d1
135 vqrshrn.u16 d26, q5, #7
173 vsubl.u8 q5, d23, d1
182 vpadal.s16 q8, q5
207 vmull.s32 q5, d0, d0
dequant_idct_neon.asm 27 vld1.16 {q5, q6}, [r1]
39 vmul.i16 q1, q3, q5 ;input for short_idct4x4llm_neon
bilinearpredict8x8_neon.asm 71 vld1.u8 {q5}, [r0], r1
115 vmull.u8 q5, d26, d0
124 vmlal.u8 q5, d27, d1
133 vqrshrn.u16 d6, q5, #7
variance_neon.asm 84 vmull.s32 q5, d0, d0
146 vmull.s32 q5, d0, d0
201 vmull.s32 q5, d0, d0
266 vmull.s32 q5, d0, d0
vp8_subpixelvariance16x16_neon.asm 204 vmull.u8 q5, d26, d0
213 vmlal.u8 q5, d28, d1
224 vqrshrn.u16 d6, q5, #7
331 vmull.u8 q5, d26, d0
340 vmlal.u8 q5, d28, d1
349 vqrshrn.u16 d6, q5, #7
411 vmull.s32 q5, d0, d0
bilinearpredict16x16_neon.asm 193 vmull.u8 q5, d26, d0
202 vmlal.u8 q5, d28, d1
213 vqrshrn.u16 d6, q5, #7
317 vmull.u8 q5, d26, d0
326 vmlal.u8 q5, d28, d1
335 vqrshrn.u16 d6, q5, #7
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S 27 VADDL.U8 q5,d0,d1
128 VADDL.S16 q5,d10,d20
132 VMLA.I32 q5,q1,q15
138 VSUB.I32 q5,q5,q4
152 VQRSHRUN.S32 d0,q5,#10
omxVCM4P10_PredictIntraChroma_8x8_s.S 173 VMUL.I16 q2,q2,q5
174 VMUL.I16 q3,q3,q5
179 VDUP.16 q5,d6[3]
187 VADD.I16 q5,q2,q5
195 VQRSHRUN.S16 d3,q5,#5
  /external/jpeg/
jsimd_arm_neon.S 203 vmul.s16 q5, q5, q13
211 idct_helper q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14
214 vtrn.16 q4, q5
216 vtrn.32 q3, q5
229 idct_helper q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14
233 vtrn.16 q4, q5
235 vtrn.32 q3, q5
252 vqadd.s16 q5, q5, q1
    [all...]
  /external/clang/test/SemaCXX/
addr-of-overloaded-function.cpp 83 void q5(); // expected-note{{possible target for call}}
97 q5; // expected-error{{reference to non-static member function must be called; did you mean to call it with no arguments?}}
  /external/llvm/test/MC/ARM/
neon-shuffle-encoding.s 16 vext.32 q5, q8, #3
17 vext.64 q5, q8, #1
33 @ CHECK: vext.32 q5, q5, q8, #3 @ encoding: [0x60,0xac,0xba,0xf2]
34 @ CHECK: vext.64 q5, q5, q8, #1 @ encoding: [0x60,0xa8,0xba,0xf2]
neon-mul-encoding.s 119 vmul.u32 q5, d4[0]
134 vmul.s32 q5, q4, d3[1]
135 vmul.u32 q4, q5, d4[0]
151 @ CHECK: vmul.i32 q5, q5, d4[0] @ encoding: [0x44,0xa8,0xaa,0xf3]
166 @ CHECK: vmul.i32 q5, q4, d3[1] @ encoding: [0x63,0xa8,0xa8,0xf3]
167 @ CHECK: vmul.i32 q4, q5, d4[0] @ encoding: [0x44,0x88,0xaa,0xf3]
neon-sub-encoding.s 23 vsub.f32 q5, q6
45 @ CHECK: vsub.f32 q5, q5, q6 @ encoding: [0x4c,0xad,0x2a,0xf2]
146 vhsub.u16 q5, q8
159 @ CHECK: vhsub.u16 q5, q5, q8 @ encoding: [0x60,0xa2,0x1a,0xf3]
  /external/libvpx/libvpx/vp9/common/mips/dspr2/
vp9_loopfilter_filters_dspr2.h 533 const uint32_t q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7; local
541 /* addition of p6,p5,p4,p3,p2,p1,p0,q0,q1,q2,q3,q4,q5,q6
554 "addu.ph %[add_p6toq6], %[add_p6toq6], %[q5] \n\t"
562 [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6),
577 "subu.ph %[res_op6], %[res_op6], %[q5] \n\t"
591 "subu.ph %[res_op5], %[res_op5], %[q5] \n\t"
603 "subu.ph %[res_op4], %[res_op4], %[q5] \n\t"
613 "subu.ph %[res_op3], %[res_op3], %[q5] \n\t"
623 "subu.ph %[res_op2], %[res_op2], %[q5] \n\t
    [all...]
vp9_mblpf_vert_loopfilter_dspr2.c 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
90 "lw %[q5], +4(%[s3]) \n\t"
95 [q5] "=&r" (q5), [q4] "=&r" (q4)
213 /* transpose q4, q5, q6, q7
218 q5 q5_0 q5_1 q5_2 q5_3
225 q5 q4_1 q5_1 q26_1 q7_1
231 "precrq.qb.ph %[prim3], %[q5], %[q4] \n\t"
232 "precr.qb.ph %[prim4], %[q5], %[q4] \n\t"
240 "precrq.ph.w %[q5], %[q4], %[sec4] \n\t
    [all...]
  /external/libvpx/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_copy_y_neon.asm 51 vld1.8 {q4, q5}, [r8]!
62 vst1.8 {q4, q5}, [r9]!
vp8_vpxyv12_copysrcframe_func_neon.asm 54 vld1.8 {q4, q5}, [r10]!
64 vst1.8 {q4, q5}, [r11]!
168 vld1.8 {q4, q5}, [r10]!
174 vst1.8 {q4, q5}, [r11]!
vp8_vpxyv12_extendframeborders_neon.asm 53 vmov q5, q4
64 vst1.8 {q4, q5}, [r5], lr
91 vld1.8 {q4, q5}, [r1]!
105 vst1.8 {q4, q5}, [r5]!
172 vst1.8 {q5}, [r6], lr
vp8_vpxyv12_copyframe_func_neon.asm 64 vld1.8 {q4, q5}, [r8]!
75 vst1.8 {q4, q5}, [r9]!
  /external/chromium_org/third_party/openssl/openssl/crypto/poly1305/
poly1305_arm_asm.S 157 vpush {q4,q5,q6,q7}
278 # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
279 vshl.i32 q14,q5,#2
288 # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
289 vadd.i32 q14,q14,q5
947 # asm 2: vand >r0=q5,<r0=q7,<mask=q6
948 vand q5,q7,q6
1002 # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
1003 vadd.i64 q5,q5,q
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon.S 60 vmovl.u8 q5, d29
282 vsubl.u8 Q5, d10, d6 @ Y to 16 bit - 16 (in 16bit) (n to n+7)
286 vsubl.u8 Q5, d12, d8 @ V to 16 bit - 128 = Q5 // V(n, n+1, n+2,n+3)
290 vzip.u16 d10, d11 @ Q5 = V (n,n n+1, n+1) V(n+2, n+2, n+3, n+3)
291 vzip.u16 d12, d13 @ Q5 = U (n,n n+1, n+1) U(n+2, n+2, n+3, n+3)
373 vsubl.u8 Q5, d10, d6 @ Y to 16 bit - 16 (in 16bit) (n to n+7)
377 vsubl.u8 Q5, d14, d8 @ V to 16 bit - 128 = Q5 // V(n, n+1, n+2,n+3)
381 vzip.u16 d10, d11 @ Q5 = V (n,n n+1, n+1) V(n+2, n+2, n+3, n+3
    [all...]
  /hardware/samsung_slsi/exynos5/libswconverter/
csc_tiled_to_linear_uv_neon.s 90 vld1.8 {q4, q5}, [r8]!
99 vst1.8 {q5}, [r7], r2
  /external/libvpx/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_avx2.c 403 __m128i q5, q6, q7; local
538 q5 = _mm_loadu_si128((__m128i *) (s + 5 * p));
541 _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5)));
589 q256_5 = _mm256_cvtepu8_epi16(q5);
    [all...]
  /external/libvpx/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 53 vld1.u8 {d13}, [r8@64], r1 ; q5
162 vtrn.32 q5, q7
166 vtrn.16 q4, q5
305 ; d13 q5
365 ; flatmask5(1, p7, p6, p5, p4, p0, q0, q4, q5, q6, q7)
369 vabd.u8 d25, d8, d13 ; abs(q0 - q5)
377 vmax.u8 d23, d24, d25 ; max(abs(p0 - p5), abs(q0 - q5))
537 vaddw.u8 q15, d13 ; op1 += q5
597 vbif d2, d13, d17 ; oq5 |= q5 & ~(f2 & f & m)

Completed in 1381 milliseconds

12 3 4