HomeSort by relevance Sort by last modified time
    Searched refs:pmullw (Results 1 - 25 of 46) sorted by null

1 2

  /external/libvpx/libvpx/vp8/common/x86/
subpixel_sse2.asm 77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
81 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
87 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
92 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
98 pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
101 pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
206 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
212 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
217 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap
    [all...]
subpixel_mmx.asm 59 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
63 pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers
69 pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers
75 pmullw mm4, mm6 ; mm5 *= kernel 3 modifiers
81 pmullw mm4, [rdx+80] ; mm5 *= kernel 0 modifiers
85 pmullw mm5, [rdx] ; mm5 *= kernel 5 modifiers
158 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
162 pmullw mm4, mm7 ; mm4 *= kernel 4 modifiers.
166 pmullw mm4, mm2 ; mm4 *= kernel 2 modifiers.
170 pmullw mm4, [rbx] ; mm4 *= kernel 0 modifiers
    [all...]
dequantize_mmx.asm 30 pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
34 pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
38 pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
42 pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
72 pmullw mm0, [rdx]
75 pmullw mm1, [rdx +8]
78 pmullw mm2, [rdx+16]
81 pmullw mm3, [rdx+24]
mfqe_sse2.asm 57 pmullw xmm2, xmm0
58 pmullw xmm3, xmm0
64 pmullw xmm4, xmm1
65 pmullw xmm5, xmm1
132 pmullw xmm2, xmm0
136 pmullw xmm3, xmm1
idctllm_sse2.asm 39 pmullw xmm4, xmm5
140 pmullw xmm0, [rdx]
141 pmullw xmm2, [rdx+16]
142 pmullw xmm1, [rdx]
143 pmullw xmm3, [rdx+16]
472 pmullw xmm0, [rdx]
473 pmullw xmm2, [rdx+16]
474 pmullw xmm1, [rdx]
475 pmullw xmm3, [rdx+16]
variance_impl_mmx.asm 553 pmullw mm1, [rax] ;
556 pmullw mm3, [rax+8] ;
577 pmullw mm1, [rax] ;
580 pmullw mm3, [rax+8] ;
589 pmullw mm3, [rdx] ;
591 pmullw mm1, [rdx+8] ;
701 pmullw mm1, [rax] ;
703 pmullw mm2, [rax] ;
707 pmullw mm3, [rax+8] ;
709 pmullw mm4, [rax+8]
    [all...]
postproc_mmx.asm 100 pmullw mm1, mm1 ;
125 pmullw mm2, mm2
134 pmullw mm1, mm1
151 pmullw mm1, mm1
variance_impl_sse2.asm 460 pmullw xmm1, [rax] ;
462 pmullw xmm3, [rax+16] ;
480 pmullw xmm1, [rax] ;
482 pmullw xmm3, [rax+16] ;
491 pmullw xmm3, [rdx] ;
492 pmullw xmm1, [rdx+16] ;
544 pmullw xmm1, [rdx] ;
545 pmullw xmm3, [rdx+16] ;
611 pmullw xmm1, [rax] ;
613 pmullw xmm3, [rax+16]
    [all...]
postproc_sse2.asm 319 pmullw xmm1, xmm1 ;
344 pmullw xmm2, xmm2
353 pmullw xmm1, xmm1
370 pmullw xmm1, xmm1
  /external/libvpx/libvpx/vpx_scale/win32/
scaleopt.c 114 pmullw mm1, mm5
116 pmullw mm0, mm6
166 pmullw mm1, three_fourths
168 pmullw mm2, one_fourths
171 pmullw mm3, two_fourths
175 pmullw mm4, two_fourths
180 pmullw mm5, one_fourths
189 pmullw mm6, three_fourths
265 pmullw mm0, mm6
267 pmullw mm1, mm
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_subpel_variance.asm 420 ; slightly faster because of pmullw latency. It would also cut our rodata
422 pmullw m2, filter_y_a
423 pmullw m3, filter_y_b
425 pmullw m0, filter_y_a
426 pmullw m4, filter_y_b
464 pmullw m0, filter_y_a
465 pmullw m1, m2, filter_y_b
468 pmullw m2, filter_y_a
469 pmullw m4, filter_y_b
719 pmullw m2, filter_y_
    [all...]
vp9_temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
vp9_quantize_ssse3.asm 94 pmullw m8, m3 ; dqc[i] = qc[i] * q
96 pmullw m13, m3 ; dqc[i] = qc[i] * q
148 pmullw m14, m3 ; dqc[i] = qc[i] * q
149 pmullw m13, m3 ; dqc[i] = qc[i] * q
  /external/libvpx/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
quantize_mmx.asm 66 pmullw mm3, mm2
106 pmullw mm7, mm6
147 pmullw mm7, mm6
188 pmullw mm7, mm6
quantize_ssse3.asm 97 pmullw xmm0, xmm1
98 pmullw xmm4, xmm5
quantize_sse4.asm 200 pmullw xmm0, xmm4
201 pmullw xmm1, xmm5
  /external/qemu/distrib/sdl-1.2.15/src/audio/
SDL_mixer_MMX_VC.c 77 pmullw mm1, mm0 //%%mm0,%%mm1\n" // mm1 = l(a*v)|l(b*v)|l(c*v)|l(d*v)
80 pmullw mm4, mm0 //%%mm0,%%mm4\n" // mm4 = l(e*v)|l(f*v)|l(g*v)|l(h*v)
161 pmullw mm1, mm0 //%%mm0,%%mm1 // mm1 = v*a|v*b|v*c|v*d
163 pmullw mm3, mm0 //%%mm0,%%mm3 // mm3 = v*e|v*f|v*g|v*h
  /external/libvpx/libvpx/vp9/common/x86/
vp9_postproc_mmx.asm 71 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
76 pmullw mm6, mm5 ; mm6 *= p0..p3 * kernel 3 modifiers
89 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
105 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
119 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
166 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
172 pmullw mm6, mm5 ; mm6 *= p1..p4 * kernel 3 modifiers
186 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
202 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
216 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers
    [all...]
vp9_subpixel_8t_sse2.asm 61 pmullw xmm0, k0k1 ;multiply the filter factors
62 pmullw xmm6, k6k7
63 pmullw xmm2, k2k3
64 pmullw xmm5, k5k4
152 pmullw xmm0, k0
153 pmullw xmm1, k1
154 pmullw xmm6, k6
155 pmullw xmm7, k7
156 pmullw xmm2, k2
157 pmullw xmm5, k
    [all...]
  /external/chromium_org/third_party/mesa/src/src/mesa/x86/
read_rgba_span_x86.S 561 pmullw %mm6, %mm0
562 pmullw %mm6, %mm2
596 pmullw %mm6, %mm0
597 pmullw %mm6, %mm2
634 pmullw %mm6, %mm0
635 pmullw %mm6, %mm2
665 pmullw %mm6, %mm0
  /external/mesa3d/src/mesa/x86/
read_rgba_span_x86.S 561 pmullw %mm6, %mm0
562 pmullw %mm6, %mm2
596 pmullw %mm6, %mm0
597 pmullw %mm6, %mm2
634 pmullw %mm6, %mm0
635 pmullw %mm6, %mm2
665 pmullw %mm6, %mm0
  /external/valgrind/main/VEX/test/
mmxtest.c 286 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
287 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
288 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) macro
299 (muls like pmullw, then adds adjacent 16-bit fields
555 do_test("pmullw", pmullw(ma,mb));
  /external/qemu/distrib/sdl-1.2.15/src/video/
mmx.h 479 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
480 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
481 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) macro
492 (muls like pmullw, then adds adjacent 16-bit fields
  /external/valgrind/main/none/tests/amd64/
insn_mmx.def 68 pmullw mm.sw[1111,2222,-1111,-2222] mm.sw[3333,-4444,3333,-4444] => 1.uw[0x80b3,0x5378,0x7f4d,0xac88]
69 pmullw m64.sw[1111,2222,-1111,-2222] mm.sw[3333,-4444,3333,-4444] => 1.uw[0x80b3,0x5378,0x7f4d,0xac88]

Completed in 219 milliseconds

1 2