HomeSort by relevance Sort by last modified time
    Searched refs:q0 (Results 101 - 125 of 323) sorted by null

1 2 3 45 6 7 8 91011>>

  /external/chromium_org/third_party/webrtc/common_audio/signal_processing/
vector_scaling_operations_neon.S 46 vmull.s16 q0, d28, d26
50 vadd.s32 q0, q2
52 vrshl.s32 q0, q12 @ Round shift right by right_shifts.
54 vmovn.i32 d0, q0 @ Cast to 16 bit values.
  /external/llvm/test/MC/ARM/
vfp4.s 24 @ ARM: vfma.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x08,0xf2]
25 @ THUMB: vfma.f32 q2, q4, q0 @ encoding: [0x08,0xef,0x50,0x4c]
27 @ THUMB_V7EM-ERRORS-NEXT: vfma.f32 q2, q4, q0
28 vfma.f32 q2, q4, q0
58 @ ARM: vfms.f32 q2, q4, q0 @ encoding: [0x50,0x4c,0x28,0xf2]
59 @ THUMB: vfms.f32 q2, q4, q0 @ encoding: [0x28,0xef,0x50,0x4c]
61 @ THUMB_V7EM-ERRORS-NEXT: vfms.f32 q2, q4, q0
62 vfms.f32 q2, q4, q0
  /bionic/libc/arch-arm/krait/bionic/
memset.S 75 vdup.8 q0, r1
80 vmov q1, q0
  /external/libvpx/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_extendframeborders_neon.asm 51 vmov q1, q0
62 vst1.8 {q0, q1}, [r5], lr
87 vld1.8 {q0, q1}, [r1]!
101 vst1.8 {q0, q1}, [r5]!
167 vst1.8 {q0}, [r5], lr
198 vld1.8 {q0, q1}, [r1]!
208 vst1.8 {q0, q1}, [r5]!
240 vld1.8 {q0}, [r1]!
248 vst1.8 {q0}, [r5], lr
250 vst1.8 {q0}, [r5], l
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/
vp8_vpxyv12_extendframeborders_neon.asm 51 vmov q1, q0
62 vst1.8 {q0, q1}, [r5], lr
87 vld1.8 {q0, q1}, [r1]!
101 vst1.8 {q0, q1}, [r5]!
167 vst1.8 {q0}, [r5], lr
198 vld1.8 {q0, q1}, [r1]!
208 vst1.8 {q0, q1}, [r5]!
240 vld1.8 {q0}, [r1]!
248 vst1.8 {q0}, [r5], lr
250 vst1.8 {q0}, [r5], l
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/x86/
loopfilter_sse2.asm 33 movdqa xmm5, [rsi] ; q0
39 movlps xmm5, [rsi + rax] ; q0
66 movdqa xmm0, xmm5 ; q0
69 psubusb xmm5, xmm3 ; q0-=q1
70 psubusb xmm3, xmm0 ; q1-=q0
72 por xmm5, xmm3 ; abs(q0-q1)
141 movdqa xmm3, xmm0 ; q0
147 psubusb xmm5, xmm3 ; p0-=q0
148 psubusb xmm3, xmm6 ; q0-=p0
149 por xmm5, xmm3 ; abs(p0 - q0)
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
loopfilter_sse2.asm 33 movdqa xmm5, [rsi] ; q0
39 movlps xmm5, [rsi + rax] ; q0
66 movdqa xmm0, xmm5 ; q0
69 psubusb xmm5, xmm3 ; q0-=q1
70 psubusb xmm3, xmm0 ; q1-=q0
72 por xmm5, xmm3 ; abs(q0-q1)
141 movdqa xmm3, xmm0 ; q0
147 psubusb xmm5, xmm3 ; p0-=q0
148 psubusb xmm3, xmm6 ; q0-=p0
149 por xmm5, xmm3 ; abs(p0 - q0)
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
loopfilter_sse2.asm 33 movdqa xmm5, [rsi] ; q0
39 movlps xmm5, [rsi + rax] ; q0
66 movdqa xmm0, xmm5 ; q0
69 psubusb xmm5, xmm3 ; q0-=q1
70 psubusb xmm3, xmm0 ; q1-=q0
72 por xmm5, xmm3 ; abs(q0-q1)
141 movdqa xmm3, xmm0 ; q0
147 psubusb xmm5, xmm3 ; p0-=q0
148 psubusb xmm3, xmm6 ; q0-=p0
149 por xmm5, xmm3 ; abs(p0 - q0)
    [all...]
  /external/pixman/pixman/
pixman-arm-neon-asm.S 180 vqadd.u8 q9, q0, q11
256 vqadd.u8 q9, q0, q11
378 vqadd.u8 q9, q0, q11
467 vshrn.u16 d30, q0, #8
468 vshrn.u16 d29, q0, #3
469 vsli.u16 q0, q0, #5
473 vshrn.u16 d28, q0, #2
502 vqadd.u8 q14, q0, q2
522 vqadd.u8 q14, q0, q
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 48 vld1.u8 {d8}, [r8@64], r1 ; q0
159 vtrn.32 q0, q2
164 vtrn.16 q0, q1
300 ; d8 q0
315 vabd.u8 d22, d9, d8 ; abs(q1 - q0)
321 vmax.u8 d20, d21, d22 ; max(abs(p1 - p0), abs(q1 - q0))
325 vabd.u8 d24, d7, d8 ; abs(p0 - q0)
330 vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
337 vabd.u8 d26, d8, d10 ; abs(q0 - q2)
339 vabd.u8 d28, d11, d8 ; abs(q3 - q0)
    [all...]
vp9_idct16x16_add_neon.asm 47 ; will be stored back into q8-q15 registers. This function will touch q0-q7
155 vmull.s16 q0, d24, d30
166 vadd.s32 q3, q2, q0
170 vsub.s32 q13, q2, q0
183 vmull.s16 q0, d20, d31
191 vmlal.s16 q0, d28, d30
199 vqrshrn.s32 d22, q0, #14 ; >> 14
216 vadd.s16 q0, q8, q11 ; step1[0] = step2[0] + step2[3];
248 vadd.s16 q8, q0, q15 ; step2[0] = step1[0] + step1[7];
255 vsub.s16 q15, q0, q15 ; step2[7] = step1[0] - step1[7]
    [all...]
  /external/libvpx/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 48 vld1.u8 {d8}, [r8@64], r1 ; q0
159 vtrn.32 q0, q2
164 vtrn.16 q0, q1
300 ; d8 q0
315 vabd.u8 d22, d9, d8 ; abs(q1 - q0)
321 vmax.u8 d20, d21, d22 ; max(abs(p1 - p0), abs(q1 - q0))
325 vabd.u8 d24, d7, d8 ; abs(p0 - q0)
330 vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
337 vabd.u8 d26, d8, d10 ; abs(q0 - q2)
339 vabd.u8 d28, d11, d8 ; abs(q3 - q0)
    [all...]
vp9_idct16x16_add_neon.asm 47 ; will be stored back into q8-q15 registers. This function will touch q0-q7
155 vmull.s16 q0, d24, d30
166 vadd.s32 q3, q2, q0
170 vsub.s32 q13, q2, q0
183 vmull.s16 q0, d20, d31
191 vmlal.s16 q0, d28, d30
199 vqrshrn.s32 d22, q0, #14 ; >> 14
216 vadd.s16 q0, q8, q11 ; step1[0] = step2[0] + step2[3];
248 vadd.s16 q8, q0, q15 ; step2[0] = step1[0] + step1[7];
255 vsub.s16 q15, q0, q15 ; step2[7] = step1[0] - step1[7]
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
vp9_mb_lpf_neon.asm 48 vld1.u8 {d8}, [r8@64], r1 ; q0
159 vtrn.32 q0, q2
164 vtrn.16 q0, q1
300 ; d8 q0
315 vabd.u8 d22, d9, d8 ; abs(q1 - q0)
321 vmax.u8 d20, d21, d22 ; max(abs(p1 - p0), abs(q1 - q0))
325 vabd.u8 d24, d7, d8 ; abs(p0 - q0)
330 vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2
337 vabd.u8 d26, d8, d10 ; abs(q0 - q2)
339 vabd.u8 d28, d11, d8 ; abs(q3 - q0)
    [all...]
vp9_idct16x16_add_neon.asm 47 ; will be stored back into q8-q15 registers. This function will touch q0-q7
155 vmull.s16 q0, d24, d30
166 vadd.s32 q3, q2, q0
170 vsub.s32 q13, q2, q0
183 vmull.s16 q0, d20, d31
191 vmlal.s16 q0, d28, d30
199 vqrshrn.s32 d22, q0, #14 ; >> 14
216 vadd.s16 q0, q8, q11 ; step1[0] = step2[0] + step2[3];
248 vadd.s16 q8, q0, q15 ; step2[0] = step1[0] + step1[7];
255 vsub.s16 q15, q0, q15 ; step2[7] = step1[0] - step1[7]
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp8/common/
loopfilter_filters.c 29 uc q0, uc q1, uc q2, uc q3)
35 mask |= (abs(q1 - q0) > limit);
38 mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit);
43 static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1)
47 hev |= (abs(q1 - q0) > thresh) * -1;
282 static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
288 signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1;
297 signed char q0 = (signed char) * oq0 ^ 0x80; local
302 filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
308 u = vp8_signed_char_clamp(q0 - Filter1)
    [all...]
  /external/libvpx/libvpx/vp8/common/
loopfilter_filters.c 29 uc q0, uc q1, uc q2, uc q3)
35 mask |= (abs(q1 - q0) > limit);
38 mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit);
43 static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1)
47 hev |= (abs(q1 - q0) > thresh) * -1;
282 static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
288 signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1;
297 signed char q0 = (signed char) * oq0 ^ 0x80; local
302 filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
308 u = vp8_signed_char_clamp(q0 - Filter1)
    [all...]
  /external/openssl/crypto/bn/asm/
armv4-gf2m.S 108 vmull.p8 q0, d26, d0 @ E = A*B1
114 veor q1, q1, q0 @ L = E + F
118 vmull.p8 q0, d26, d0 @ I = A*B3
125 veor q3, q3, q0 @ N = I + J
135 vmull.p8 q0, d26, d27 @ D = A*B
140 veor q0, q0, q1
141 veor q0, q0, q3
143 vst1.32 {q0}, [r0
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/
loopfilter_filters.c 29 uc q0, uc q1, uc q2, uc q3)
35 mask |= (abs(q1 - q0) > limit);
38 mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit);
43 static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1)
47 hev |= (abs(q1 - q0) > thresh) * -1;
282 static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
288 signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1;
297 signed char q0 = (signed char) * oq0 ^ 0x80; local
302 filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
308 u = vp8_signed_char_clamp(q0 - Filter1)
    [all...]
  /external/libhevc/common/arm/
ihevc_deblk_luma_vert.s 199 vaddl.u8 q0,d5,d4
203 vaddw.u8 q0,q0,d2
214 vadd.i16 q10,q10,q0
225 vmla.i16 q10,q0,q8
227 vaddw.u8 q0,q0,d7
231 vrshrn.i16 d0,q0,#2
265 vaddl.u8 q0,d2,d3
268 vaddw.u8 q0,q0,d
    [all...]
  /external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_sse2.c 67 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
70 // mask |= (abs(q1 - q0) > limit) * -1;
396 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
411 q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
422 const __m128i abs_q1q0 = abs_diff(q1, q0);
425 __m128i abs_p0q0 = abs_diff(p0, q0);
434 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
437 // mask |= (abs(q1 - q0) > limit) * -1;
448 work = _mm_max_epu8(abs_diff(p2, p0), abs_diff(q2, q0));
450 work = _mm_max_epu8(abs_diff(p3, p0), abs_diff(q3, q0));
743 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
969 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
1208 __m128i p3, p2, p1, p0, q0, q1, q2, q3; local
    [all...]
vp9_loopfilter_intrin_avx2.c 66 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
69 // mask |= (abs(q1 - q0) > limit) * -1;
402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
417 q0 = _mm_loadu_si128((__m128i *) (s - 0 * p));
426 const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
427 _mm_subs_epu8(q0, q1));
430 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
431 _mm_subs_epu8(q0, p0));
443 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
446 // mask |= (abs(q1 - q0) > limit) * -1
    [all...]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_loopfilter_intrin_avx2.c 66 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
69 // mask |= (abs(q1 - q0) > limit) * -1;
402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; local
417 q0 = _mm_loadu_si128((__m128i *) (s - 0 * p));
426 const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0),
427 _mm_subs_epu8(q0, q1));
430 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0),
431 _mm_subs_epu8(q0, p0));
443 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
446 // mask |= (abs(q1 - q0) > limit) * -1
    [all...]
  /frameworks/rs/cpu_ref/
rsCpuIntrinsics_neon_3DLUT.S 142 /* q0,q1,q2,q5 source data
147 vmovl.u8 q0, d0
150 vmul.u16 q0, q0, d6[0]
159 vsra.u16 q0, q0, #8
163 vshr.u16 q12, q0, #8
167 vbic.u16 q0, #0xff00
171 /* q0,d2,q2 fractional offset
rsCpuIntrinsics_neon_YuvToRGB.S 45 vaddw.u8 q0, q1, d14 // r0 = y0 * 149 + (v >> 1)
57 vhadd.u16 q0, q11 // r0 = (r0 + r2) >> 1
64 vqsub.u16 q0, q13 // r0 = satu16(r0 - (16 * 149 + (128 >> 1) + 128 * 204) >> 1)
71 vqrshrn.u16 d0, q0, #6
78 vzip.u8 q0, q1
196 vzip.8 q0, q2
198 vzip.8 q0, q1

Completed in 767 milliseconds

1 2 3 45 6 7 8 91011>>