HomeSort by relevance Sort by last modified time
    Searched refs:q3 (Results 126 - 150 of 305) sorted by null

1 2 3 4 56 7 8 91011>>

  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_sse2.c 396 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
414 q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
440 work = _mm_max_epu8(abs_diff(q2, q1), abs_diff(q3, q2));
450 work = _mm_max_epu8(abs_diff(p3, p0), abs_diff(q3, q0));
540 const __m128i q3_lo = _mm_unpacklo_epi8(q3, zero);
549 const __m128i q3_hi = _mm_unpackhi_epi8(q3, zero);
602 const __m128i q3_lo = _mm_unpacklo_epi8(q3, zero);
619 const __m128i q3_hi = _mm_unpackhi_epi8(q3, zero);
694 q3 = filter16_mask(&flat2, &q3, &f_lo, &f_hi)
743 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
969 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
1208 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
    [all...]
vp9_loopfilter_intrin_avx2.c 402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
420 q3 = _mm_loadu_si128((__m128i *) (s + 3 * p));
453 _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
528 _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
587 q256_3 = _mm256_cvtepu8_epi16(q3);
    [all...]
  /external/libvpx/libvpx/vp8/common/arm/neon/
sixtappredict16x16_neon.asm 178 vmull.u8 q3, d6, d3
184 vqadd.s16 q13, q3
232 vmull.u8 q3, d18, d0 ;(src_ptr[-2] * vp8_filter[0])
237 vmlsl.u8 q3, d19, d1 ;-(src_ptr[-1] * vp8_filter[1])
242 vmlsl.u8 q3, d22, d4 ;-(src_ptr[2] * vp8_filter[4])
247 vmlal.u8 q3, d20, d2 ;(src_ptr[0] * vp8_filter[2])
252 vmlal.u8 q3, d23, d5 ;(src_ptr[3] * vp8_filter[5])
264 vqadd.s16 q7, q3 ;sum of all (src_data*filter_parameters)
382 vst1.u8 {q3}, [r4], r5 ;store result
422 vmull.u8 q3, d18, d0 ;(src_ptr[-2] * vp8_filter[0]
    [all...]
vp8_subpixelvariance16x16_neon.asm 148 vmull.u8 q3, d14, d0
155 vmlal.u8 q3, d14, d1
171 vqrshrn.u16 d18, q3, #7
200 vmull.u8 q3, d24, d0
211 vmlal.u8 q3, d26, d1
222 vqrshrn.u16 d4, q3, #7
327 vmull.u8 q3, d24, d0
338 vmlal.u8 q3, d26, d1
347 vqrshrn.u16 d4, q3, #7
379 vld1.8 {q3}, [r4], r
    [all...]
vp8_subpixelvariance8x8_neon.asm 45 vld1.u8 {q3}, [r0], r1
68 vld1.u8 {q3}, [r0], r1
115 vmull.u8 q3, d24, d0
124 vmlal.u8 q3, d25, d1
133 vqrshrn.u16 d24, q3, #7
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/
sixtappredict16x16_neon.asm 178 vmull.u8 q3, d6, d3
184 vqadd.s16 q13, q3
232 vmull.u8 q3, d18, d0 ;(src_ptr[-2] * vp8_filter[0])
237 vmlsl.u8 q3, d19, d1 ;-(src_ptr[-1] * vp8_filter[1])
242 vmlsl.u8 q3, d22, d4 ;-(src_ptr[2] * vp8_filter[4])
247 vmlal.u8 q3, d20, d2 ;(src_ptr[0] * vp8_filter[2])
252 vmlal.u8 q3, d23, d5 ;(src_ptr[3] * vp8_filter[5])
264 vqadd.s16 q7, q3 ;sum of all (src_data*filter_parameters)
382 vst1.u8 {q3}, [r4], r5 ;store result
422 vmull.u8 q3, d18, d0 ;(src_ptr[-2] * vp8_filter[0]
    [all...]
vp8_subpixelvariance16x16_neon.asm 148 vmull.u8 q3, d14, d0
155 vmlal.u8 q3, d14, d1
171 vqrshrn.u16 d18, q3, #7
200 vmull.u8 q3, d24, d0
211 vmlal.u8 q3, d26, d1
222 vqrshrn.u16 d4, q3, #7
327 vmull.u8 q3, d24, d0
338 vmlal.u8 q3, d26, d1
347 vqrshrn.u16 d4, q3, #7
379 vld1.8 {q3}, [r4], r
    [all...]
vp8_subpixelvariance8x8_neon.asm 45 vld1.u8 {q3}, [r0], r1
68 vld1.u8 {q3}, [r0], r1
115 vmull.u8 q3, d24, d0
124 vmlal.u8 q3, d25, d1
133 vqrshrn.u16 d24, q3, #7
  /frameworks/av/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/
armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S 161 VADDL.S16 q3,d16,d26
164 VMLA.I32 q3,q1,q15
166 VMLS.I32 q3,q0,q14
170 VQRSHRUN.S32 d6,q3,#10
174 VQMOVN.U16 d6,q3
  /external/pixman/pixman/
pixman-arm-neon-asm.S 170 vrshr.u16 q3, q11, #8
173 vraddhn.u16 d23, q11, q3
281 vrshr.u16 q3, q11, #8
287 vraddhn.u16 d23, q11, q3
368 vrshr.u16 q3, q11, #8
371 vraddhn.u16 d23, q11, q3
503 vqadd.u8 q15, q1, q3
525 vqadd.u8 q15, q1, q3
557 vqadd.u8 q15, q1, q3
724 vrshr.u16 q3, q11, #
    [all...]
pixman-android-neon.S 128 bilinear_store_&dst_fmt 1, q2, q3
145 bilinear_store_&dst_fmt 2, q2, q3
151 q3, q9, d4, d5, d16, d17, d18, d19
177 bilinear_store_&dst_fmt 4, q2, q3
  /external/libvpx/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_extendframeborders_neon.asm 52 vmov q3, q2
63 vst1.8 {q2, q3}, [r6], lr
89 vld1.8 {q2, q3}, [r1]!
103 vst1.8 {q2, q3}, [r5]!
170 vst1.8 {q3}, [r6], lr
200 vld1.8 {q2, q3}, [r1]!
210 vst1.8 {q2, q3}, [r5]!
  /external/llvm/test/MC/ARM/
neon-mul-encoding.s 117 vmul.i32 q3, d2[0]
133 vmul.i32 q6, q3, d2[0]
136 vmul.f32 q3, q6, d5[1]
149 @ CHECK: vmul.i32 q3, q3, d2[0] @ encoding: [0x42,0x68,0xa6,0xf3]
165 @ CHECK: vmul.i32 q6, q3, d2[0] @ encoding: [0x42,0xc8,0xa6,0xf3]
168 @ CHECK: vmul.f32 q3, q6, d5[1] @ encoding: [0x65,0x69,0xac,0xf3]
neon-shift-encoding.s 115 vsra.s32 q3, q6, #31
133 @ CHECK: vsra.s32 q3, q6, #31 @ encoding: [0x5c,0x61,0xa1,0xf2]
151 vsra.u32 q3, q6, #31
169 @ CHECK: vsra.u32 q3, q6, #31 @ encoding: [0x5c,0x61,0xa1,0xf3]
187 vsri.32 q3, q6, #31
205 @ CHECK: vsri.32 q3, q6, #31 @ encoding: [0x5c,0x64,0xa1,0xf3]
223 vsli.32 q3, q6, #31
241 @ CHECK: vsli.32 q3, q6, #31 @ encoding: [0x5c,0x65,0xbf,0xf3]
443 vrshl.s32 q3, q14
460 @ CHECK: vrshl.s32 q3, q3, q14 @ encoding: [0xc6,0x65,0x2c,0xf2
    [all...]
neon-sub-encoding.s 21 vsub.i32 q3, q8
43 @ CHECK: vsub.i32 q3, q3, q8 @ encoding: [0x60,0x68,0x26,0xf3]
144 vhsub.s32 q3, q10
157 @ CHECK: vhsub.s32 q3, q3, q10 @ encoding: [0x64,0x62,0x26,0xf2]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_extendframeborders_neon.asm 52 vmov q3, q2
63 vst1.8 {q2, q3}, [r6], lr
89 vld1.8 {q2, q3}, [r1]!
103 vst1.8 {q2, q3}, [r5]!
170 vst1.8 {q3}, [r6], lr
200 vld1.8 {q2, q3}, [r1]!
210 vst1.8 {q2, q3}, [r5]!
  /external/chromium_org/third_party/boringssl/linux-arm/crypto/modes/
ghash-armv4.S 330 vshl.i64 q3,q3,#1
333 veor q3,q3,q8 @ twisted H
334 vstmia r0,{q3}
350 vrev64.8 q3,q3
378 vrev64.8 q3,q3
380 veor q3,q0 @ inp^=X
    [all...]
  /external/openssl/crypto/modes/asm/
ghash-armv4.S 324 vshl.i64 q3,q3,#1
327 veor q3,q3,q8 @ twisted H
328 vstmia r0,{q3}
343 vrev64.8 q3,q3
370 vrev64.8 q3,q3
372 veor q3,q0 @ inp^=X
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/mips/dspr2/
vp9_mblpf_vert_loopfilter_dspr2.c 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
84 "lw %[q3], (%[s1]) \n\t"
93 : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1),
137 /* transpose q0, q1, q2, q3
140 q3 q3_0 q3_1 q3_2 q3_3
147 q3 q0_3 q1_3 q2_3 q3_3
153 "precrq.qb.ph %[prim1], %[q3], %[q2] \n\t"
154 "precr.qb.ph %[prim2], %[q3], %[q2] \n\t"
163 "precrq.ph.w %[q3], %[q2], %[sec3] \n\t
    [all...]
  /external/libvpx/libvpx/vp9/common/mips/dspr2/
vp9_mblpf_vert_loopfilter_dspr2.c 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
84 "lw %[q3], (%[s1]) \n\t"
93 : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1),
137 /* transpose q0, q1, q2, q3
140 q3 q3_0 q3_1 q3_2 q3_3
147 q3 q0_3 q1_3 q2_3 q3_3
153 "precrq.qb.ph %[prim1], %[q3], %[q2] \n\t"
154 "precr.qb.ph %[prim2], %[q3], %[q2] \n\t"
163 "precrq.ph.w %[q3], %[q2], %[sec3] \n\t
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/
vp9_mblpf_vert_loopfilter_dspr2.c 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; local
84 "lw %[q3], (%[s1]) \n\t"
93 : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1),
137 /* transpose q0, q1, q2, q3
140 q3 q3_0 q3_1 q3_2 q3_3
147 q3 q0_3 q1_3 q2_3 q3_3
153 "precrq.qb.ph %[prim1], %[q3], %[q2] \n\t"
154 "precr.qb.ph %[prim2], %[q3], %[q2] \n\t"
163 "precrq.ph.w %[q3], %[q2], %[sec3] \n\t
    [all...]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_avx2.c 402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
420 q3 = _mm_loadu_si128((__m128i *) (s + 3 * p));
453 _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
528 _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
587 q256_3 = _mm256_cvtepu8_epi16(q3);
    [all...]
  /external/openssl/crypto/
armv4cpuid.S 139 .byte 0x56,0x61,0x06,0xf3 @ veor q3, q3, q3
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_YuvToRGB.S 27 * and d21. Working constants are pre-loaded into q13-q15, and q3 is
96 vmov.i8 q3, #0xff
197 vzip.8 q1, q3
199 vzip.8 q2, q3
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_avx2.c 402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
420 q3 = _mm_loadu_si128((__m128i *) (s + 3 * p));
453 _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3)));
528 _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3)));
587 q256_3 = _mm256_cvtepu8_epi16(q3);
    [all...]

Completed in 454 milliseconds

1 2 3 4 56 7 8 91011>>