HomeSort by relevance Sort by last modified time
    Searched refs:vp8_filter (Results 1 - 25 of 25) sorted by null

  /external/libvpx/vp8/common/arm/
bilinearfilter_arm.h 22 const short *vp8_filter
32 const short *vp8_filter
filter_arm.c 25 const short *vp8_filter
34 const short *vp8_filter
43 const short *vp8_filter
53 const short *vp8_filter
64 const short *vp8_filter
  /external/libvpx/vp8/common/
loopfilter_filters.c 51 static __inline void vp8_filter(signed char mask, signed char hev, uc *op1, uc *op0, uc *oq0, uc *oq1) function
56 signed char vp8_filter, Filter1, Filter2; local
65 vp8_filter = vp8_signed_char_clamp(ps1 - qs1);
66 vp8_filter &= hev;
69 vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0));
70 vp8_filter &= mask;
76 Filter1 = vp8_signed_char_clamp(vp8_filter + 4);
77 Filter2 = vp8_signed_char_clamp(vp8_filter + 3);
84 vp8_filter = Filter1
164 signed char vp8_filter, Filter1, Filter2; local
293 signed char vp8_filter, Filter1, Filter2; local
    [all...]
filter.c 49 const short *vp8_filter
59 Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
60 ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
61 ((int)src_ptr[0] * vp8_filter[2]) +
62 ((int)src_ptr[pixel_step] * vp8_filter[3]) +
63 ((int)src_ptr[2*pixel_step] * vp8_filter[4]) +
64 ((int)src_ptr[3*pixel_step] * vp8_filter[5]) +
94 const short *vp8_filter
105 Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
106 ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1])
    [all...]
  /external/libvpx/vp8/common/arm/neon/
sixtappredict4x4_neon.asm 74 vmull.u8 q7, d18, d5 ;(src_ptr[3] * vp8_filter[5])
84 vmlal.u8 q7, d6, d0 ;+(src_ptr[-2] * vp8_filter[0])
91 vmlsl.u8 q7, d18, d1 ;-(src_ptr[-1] * vp8_filter[1])
98 vmlsl.u8 q7, d6, d4 ;-(src_ptr[2] * vp8_filter[4])
105 vmlal.u8 q7, d18, d2 ;(src_ptr[0] * vp8_filter[2])
110 vmull.u8 q9, d6, d3 ;(src_ptr[1] * vp8_filter[3])
139 vmull.u8 q7, d18, d5 ;(src_ptr[3] * vp8_filter[5])
141 vmull.u8 q12, d31, d5 ;(src_ptr[3] * vp8_filter[5])
151 vmlal.u8 q7, d6, d0 ;+(src_ptr[-2] * vp8_filter[0])
153 vmlal.u8 q12, d22, d0 ;(src_ptr[-2] * vp8_filter[0]
    [all...]
loopfiltersimplehorizontaledge_neon.asm 48 ;vp8_filter() function
63 vqsub.s8 q4, q5, q8 ; q4: vp8_filter = vp8_signed_char_clamp(ps1-qs1)
74 vaddw.s8 q2, q2, d8 ; vp8_filter + 3 * ( qs0 - ps0)
77 ;vqadd.s8 q4, q4, q2 ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
78 vqmovn.s16 d8, q2 ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
82 vand q4, q4, q15 ; vp8_filter &= mask
84 vqadd.s8 q2, q4, q10 ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
85 vqadd.s8 q4, q4, q9 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4
    [all...]
sixtappredict16x16_neon.asm 78 vmull.u8 q8, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
89 vmlsl.u8 q8, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
97 vmlsl.u8 q9, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
105 vmlsl.u8 q8, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
113 vmlsl.u8 q9, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
121 vmlal.u8 q8, d28, d5 ;(src_ptr[3] * vp8_filter[5])
129 vmlal.u8 q9, d28, d5 ;(src_ptr[3] * vp8_filter[5])
137 vmlal.u8 q8, d28, d2 ;(src_ptr[0] * vp8_filter[2])
145 vmlal.u8 q9, d28, d2 ;(src_ptr[0] * vp8_filter[2])
157 vmull.u8 q4, d28, d3 ;(src_ptr[1] * vp8_filter[3]
    [all...]
sixtappredict8x4_neon.asm 68 vmull.u8 q7, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
78 vmlsl.u8 q7, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
88 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
98 vmlal.u8 q7, d28, d2 ;(src_ptr[0] * vp8_filter[2])
108 vmlal.u8 q7, d28, d5 ;(src_ptr[3] * vp8_filter[5])
118 vmull.u8 q3, d28, d3 ;(src_ptr[1] * vp8_filter[3])
145 vmull.u8 q8, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
157 vmlsl.u8 q8, d27, d1 ;-(src_ptr[-1] * vp8_filter[1])
169 vmlsl.u8 q8, d27, d4 ;-(src_ptr[2] * vp8_filter[4])
181 vmlal.u8 q8, d27, d2 ;(src_ptr[0] * vp8_filter[2]
    [all...]
sixtappredict8x8_neon.asm 72 vmull.u8 q7, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
82 vmlsl.u8 q7, d28, d1 ;-(src_ptr[-1] * vp8_filter[1])
92 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
102 vmlal.u8 q7, d28, d2 ;(src_ptr[0] * vp8_filter[2])
112 vmlal.u8 q7, d28, d5 ;(src_ptr[3] * vp8_filter[5])
122 vmull.u8 q3, d28, d3 ;(src_ptr[1] * vp8_filter[3])
158 vmull.u8 q8, d6, d0 ;(src_ptr[-2] * vp8_filter[0])
170 vmlsl.u8 q8, d27, d1 ;-(src_ptr[-1] * vp8_filter[1])
182 vmlsl.u8 q8, d27, d4 ;-(src_ptr[2] * vp8_filter[4])
194 vmlal.u8 q8, d27, d2 ;(src_ptr[0] * vp8_filter[2]
    [all...]
loopfiltersimpleverticaledge_neon.asm 77 ;vp8_filter() function
83 vqsub.s8 q1, q3, q6 ; vp8_filter = vp8_signed_char_clamp(ps1-qs1)
85 ;vmul.i8 q2, q2, q11 ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
92 vaddw.s8 q2, q2, d2 ; vp8_filter + 3 * ( qs0 - ps0)
95 vqmovn.s16 d2, q2 ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
102 vand q1, q1, q15 ; vp8_filter &= mask
104 vqadd.s8 q2, q1, q11 ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
105 vqadd.s8 q1, q1, q12 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4
    [all...]
bilinearpredict16x16_neon.asm 62 vmull.u8 q7, d2, d0 ;(src_ptr[0] * vp8_filter[0])
76 vmlal.u8 q7, d2, d1 ;(src_ptr[0] * vp8_filter[1])
86 vmlal.u8 q8, d3, d1 ;(src_ptr[0] * vp8_filter[1])
115 vmull.u8 q9, d2, d0 ;(src_ptr[0] * vp8_filter[0])
126 vmlal.u8 q9, d2, d1 ;(src_ptr[0] * vp8_filter[1])
134 vmlal.u8 q10, d3, d1 ;(src_ptr[0] * vp8_filter[1])
146 vmlal.u8 q1, d11, d1 ;(src_ptr[0] * vp8_filter[1])
152 vmlal.u8 q2, d12, d1 ;(src_ptr[0] * vp8_filter[1])
185 vmull.u8 q1, d22, d0 ;(src_ptr[0] * vp8_filter[0])
198 vmlal.u8 q1, d24, d1 ;(src_ptr[pixel_step] * vp8_filter[1]
    [all...]
loopfilter_neon.asm 344 ; vp8_filter() function
358 vqsub.s8 q1, q5, q8 ; vp8_filter = clamp(ps1-qs1)
364 vand q1, q1, q14 ; vp8_filter &= hev
372 ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
375 vand q1, q1, q15 ; vp8_filter &= mask
377 vqadd.s8 q2, q1, q10 ; Filter2 = clamp(vp8_filter+3)
378 vqadd.s8 q1, q1, q9 ; Filter1 = clamp(vp8_filter+4)
385 ; outer tap adjustments: ++vp8_filter >> 1
387 vbic q1, q1, q14 ; vp8_filter &= ~he
    [all...]
bilinearpredict8x4_neon.asm 46 vmull.u8 q6, d2, d0 ;(src_ptr[0] * vp8_filter[0])
59 vmlal.u8 q6, d3, d1 ;(src_ptr[1] * vp8_filter[1])
85 vmull.u8 q1, d22, d0 ;(src_ptr[0] * vp8_filter[0])
90 vmlal.u8 q1, d23, d1 ;(src_ptr[pixel_step] * vp8_filter[1])
bilinearpredict8x8_neon.asm 46 vmull.u8 q6, d2, d0 ;(src_ptr[0] * vp8_filter[0])
56 vmlal.u8 q6, d3, d1 ;(src_ptr[1] * vp8_filter[1])
73 vmull.u8 q6, d2, d0 ;(src_ptr[0] * vp8_filter[0])
85 vmlal.u8 q6, d3, d1 ;(src_ptr[1] * vp8_filter[1])
111 vmull.u8 q1, d22, d0 ;(src_ptr[0] * vp8_filter[0])
120 vmlal.u8 q1, d23, d1 ;(src_ptr[pixel_step] * vp8_filter[1])
bilinearpredict4x4_neon.asm 57 vmull.u8 q7, d2, d0 ;(src_ptr[0] * vp8_filter[0])
61 vmlal.u8 q7, d8, d1 ;(src_ptr[1] * vp8_filter[1])
mbloopfilter_neon.asm 411 ; vp8_filter
425 vqsub.s8 q1, q5, q8 ; vp8_filter = clamp(ps1-qs1)
436 vaddw.s8 q2, q2, d2 ; vp8_filter + 3 * ( qs0 - ps0)
441 ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
445 vand q1, q1, q15 ; vp8_filter &= mask
470 vbic q1, q1, q14 ; vp8_filter &= ~hev
  /external/libvpx/vp8/common/x86/
vp8_asm_stubs.c 27 const short *vp8_filter
38 const short *vp8_filter
48 const short *vp8_filter
58 const short *vp8_filter
69 const short *vp8_filter
80 const short *vp8_filter
97 const short *vp8_filter
106 const short *vp8_filter
115 const short *vp8_filter
subpixel_sse2.asm 33 ; short *vp8_filter
46 mov rdx, arg(6) ;vp8_filter
147 ; short *vp8_filter
166 mov rdx, arg(6) ;vp8_filter
325 ; short * vp8_filter
342 mov rax, arg(7) ;vp8_filter
420 ; const short *vp8_filter
437 mov rax, arg(7) ;vp8_filter
533 ; const short *vp8_filter
547 mov rdx, arg(5) ;vp8_filter
    [all...]
loopfilter_mmx.asm 770 ; mm1 = vp8_filter, mm4=hev, mm6=ps0, mm3=qs0
771 movq mm2, mm1 ; vp8_filter
772 pand mm2, mm4; ; Filter2 = vp8_filter & hev
798 ; mm0= filter2 mm1 = vp8_filter, mm3 =qs0 mm5=s mm4 =hev mm6=ps0
802 ; mm1=vp8_filter, mm3=qs0, mm4 =hev mm6=ps0
803 ; vp8_filter &= ~hev;
804 ; Filter2 = vp8_filter;
805 pandn mm4, mm1 ; vp8_filter&=~hev
    [all...]
subpixel_mmx.asm 28 ; short * vp8_filter
40 mov rdx, arg(6) ;vp8_filter
125 ; short * vp8_filter
139 mov rbx, arg(7) ;vp8_filter
loopfilter_sse2.asm 422 movdqa xmm2, xmm1 ; vp8_filter
424 pand xmm2, xmm4 ; Filter2 = vp8_filter & hev
427 pandn xmm4, xmm1 ; vp8_filter&=~hev
    [all...]
  /external/libvpx/vp8/encoder/
variance_c.c 206 * INT32 *vp8_filter : Array of 2 bi-linear filter taps.
233 const short *vp8_filter
243 output_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) +
244 ((int)src_ptr[pixel_step] * vp8_filter[1]) +
264 * INT32 *vp8_filter : Array of 2 bi-linear filter taps.
291 const short *vp8_filter
302 Temp = ((int)src_ptr[0] * vp8_filter[0]) +
303 ((int)src_ptr[pixel_step] * vp8_filter[1]) +
  /external/libvpx/vp8/common/arm/armv6/
simpleloopfilter_v6.asm 104 qsub8 r3, r3, r6 ; vp8_filter = p1 - q1
110 qadd8 r3, r3, r6 ; vp8_filter = p1-q1 + 3*(q0-p0))
112 and r3, r3, r10 ; vp8_filter &= mask
114 qadd8 r7 , r3 , r7 ; Filter1 = vp8_filter + 4
115 qadd8 r8 , r3 , r8 ; Filter2 = vp8_filter + 3
208 qsub8 r3, r3, r6 ; vp8_filter = p1 - q1
211 qadd8 r3, r3, r6 ; vp8_filter += q0 - p0
214 qadd8 r3, r3, r6 ; vp8_filter += q0 - p0
217 qadd8 r3, r3, r6 ; vp8_filter = p1-q1 + 3*(q0-p0))
219 and r3, r3, lr ; vp8_filter &= mas
    [all...]
loopfilter_v6.asm 165 ;vp8_filter() function
182 qsub8 r8, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
184 and r7, r7, r6 ; vp8_filter (r7) &= hev
193 and r7, r7, lr ; vp8_filter &= mask;
195 ;modify code for vp8 -- Filter1 = vp8_filter (r7)
196 qadd8 r8 , r7 , r9 ; Filter2 (r8) = vp8_signed_char_clamp(vp8_filter+3)
197 qadd8 r7 , r7 , r10 ; vp8_filter = vp8_signed_char_clamp(vp8_filter+4)
201 shadd8 r7 , r7 , r9 ; vp8_filter >>= 3
214 ;qadd8 lr, r8, r7 ; u = vp8_signed_char_clamp(s + vp8_filter)
    [all...]
sixtappredict8x4_v6.asm 65 smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1]
70 smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3]
76 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5]

Completed in 265 milliseconds