HomeSort by relevance Sort by last modified time
    Searched full:pmullw (Results 1 - 25 of 54) sorted by null

1 2 3

  /external/libvpx/libvpx/vp8/common/x86/
subpixel_mmx.asm 59 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
63 pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers
69 pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers
75 pmullw mm4, mm6 ; mm5 *= kernel 3 modifiers
81 pmullw mm4, [rdx+80] ; mm5 *= kernel 0 modifiers
85 pmullw mm5, [rdx] ; mm5 *= kernel 5 modifiers
158 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
162 pmullw mm4, mm7 ; mm4 *= kernel 4 modifiers.
166 pmullw mm4, mm2 ; mm4 *= kernel 2 modifiers.
170 pmullw mm4, [rbx] ; mm4 *= kernel 0 modifiers
    [all...]
subpixel_sse2.asm 77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
81 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
87 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
92 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
98 pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
101 pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
206 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
212 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
217 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap
    [all...]
dequantize_mmx.asm 30 pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
34 pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
38 pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
42 pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
72 pmullw mm0, [rdx]
75 pmullw mm1, [rdx +8]
78 pmullw mm2, [rdx+16]
81 pmullw mm3, [rdx+24]
mfqe_sse2.asm 57 pmullw xmm2, xmm0
58 pmullw xmm3, xmm0
64 pmullw xmm4, xmm1
65 pmullw xmm5, xmm1
132 pmullw xmm2, xmm0
136 pmullw xmm3, xmm1
variance_impl_mmx.asm 553 pmullw mm1, [rax] ;
556 pmullw mm3, [rax+8] ;
577 pmullw mm1, [rax] ;
580 pmullw mm3, [rax+8] ;
589 pmullw mm3, [rdx] ;
591 pmullw mm1, [rdx+8] ;
701 pmullw mm1, [rax] ;
703 pmullw mm2, [rax] ;
707 pmullw mm3, [rax+8] ;
709 pmullw mm4, [rax+8]
    [all...]
postproc_mmx.asm 100 pmullw mm1, mm1 ;
125 pmullw mm2, mm2
134 pmullw mm1, mm1
151 pmullw mm1, mm1
variance_impl_sse2.asm 460 pmullw xmm1, [rax] ;
462 pmullw xmm3, [rax+16] ;
480 pmullw xmm1, [rax] ;
482 pmullw xmm3, [rax+16] ;
491 pmullw xmm3, [rdx] ;
492 pmullw xmm1, [rdx+16] ;
544 pmullw xmm1, [rdx] ;
545 pmullw xmm3, [rdx+16] ;
611 pmullw xmm1, [rax] ;
613 pmullw xmm3, [rax+16]
    [all...]
idctllm_sse2.asm 39 pmullw xmm4, xmm5
140 pmullw xmm0, [rdx]
141 pmullw xmm2, [rdx+16]
142 pmullw xmm1, [rdx]
143 pmullw xmm3, [rdx+16]
472 pmullw xmm0, [rdx]
473 pmullw xmm2, [rdx+16]
474 pmullw xmm1, [rdx]
475 pmullw xmm3, [rdx+16]
postproc_sse2.asm 319 pmullw xmm1, xmm1 ;
344 pmullw xmm2, xmm2
353 pmullw xmm1, xmm1
370 pmullw xmm1, xmm1
  /external/libvpx/libvpx/vpx_scale/win32/
scaleopt.c 108 pmullw mm1, mm6 //
111 pmullw mm0, mm5 //
153 pmullw mm1, mm6 //
156 pmullw mm0, mm5 //
228 pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
230 pmullw mm1, mm6 // 01*205 02*154 03*102 04* 51
234 pmullw mm2, mm5 // 04* 51 05*102 06*154 07*205
237 pmullw mm3, mm6 // 05*205 06*154 07*102 08* 51
279 pmullw mm0, mm5 // 00* 51 01*102 02*154 03*205
281 pmullw mm1, mm6 // 01*205 02*154 03*102 04* 5
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
quantize_ssse3.asm 97 pmullw xmm0, xmm1
98 pmullw xmm4, xmm5
quantize_mmx.asm 66 pmullw mm3, mm2
106 pmullw mm7, mm6
147 pmullw mm7, mm6
188 pmullw mm7, mm6
quantize_sse2.asm 189 pmullw xmm0, xmm2
190 pmullw xmm1, xmm3
323 pmullw xmm2, [rcx]
324 pmullw xmm3, [rcx + 16]
quantize_sse4.asm 200 pmullw xmm0, xmm4
201 pmullw xmm1, xmm5
  /external/llvm/test/CodeGen/X86/
2008-02-26-AsmDirectMemOp.ll 15 tail call void asm sideeffect "movd $1, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09movd $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0A\09packssdw %mm5, %mm5 \0A\09psubw %mm5, %mm7 \0A\09pxor %mm4, %mm4 \0A\09.align 1<<4\0A\091: \0A\09movq ($0, $3), %mm0 \0A\09movq 8($0, $3), %mm1 \0A\09pmullw %mm6, %mm0 \0A\09pmullw %mm6, %mm1 \0A\09movq ($0, $3), %mm2 \0A\09movq 8($0, $3), %mm3 \0A\09pcmpgtw %mm4, %mm2 \0A\09pcmpgtw %mm4, %mm3 \0A\09pxor %mm2, %mm0 \0A\09pxor %mm3, %mm1 \0A\09paddw %mm7, %mm0 \0A\09paddw %mm7, %mm1 \0A\09pxor %mm0, %mm2 \0A\09pxor %mm1, %mm3 \0A\09pcmpeqw %mm7, %mm0 \0A\09pcmpeqw %mm7, %mm1 \0A\09pandn %mm2, %mm0 \0A\09pandn %mm3, %mm1 \0A\09movq %mm0, ($0, $3) \0A\09movq %mm1, 8($0, $3) \0A\09add $$16, $3 \0A\09jng 1b \0A\09", "r,imr,imr,r,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* null, i32 %tmp1, i32 0, i32 0 ) nounwind
widen_arith-4.ll 3 ; CHECK-NEXT: pmullw
  /external/qemu/distrib/sdl-1.2.15/src/audio/
SDL_mixer_MMX.c 76 " pmullw %%mm0,%%mm1\n" /* mm1 = l(a*v)|l(b*v)|l(c*v)|l(d*v) */
81 " pmullw %%mm0,%%mm4\n" /* mm4 = l(e*v)|l(f*v)|l(g*v)|l(h*v) */
179 " pmullw %%mm0,%%mm1\n" /* mm1 = v*a|v*b|v*c|v*d */
182 " pmullw %%mm0,%%mm3\n" /* mm3 = v*e|v*f|v*g|v*h */
SDL_mixer_MMX_VC.c 77 pmullw mm1, mm0 //%%mm0,%%mm1\n" // mm1 = l(a*v)|l(b*v)|l(c*v)|l(d*v)
80 pmullw mm4, mm0 //%%mm0,%%mm4\n" // mm4 = l(e*v)|l(f*v)|l(g*v)|l(h*v)
161 pmullw mm1, mm0 //%%mm0,%%mm1 // mm1 = v*a|v*b|v*c|v*d
163 pmullw mm3, mm0 //%%mm0,%%mm3 // mm3 = v*e|v*f|v*g|v*h
  /external/qemu/distrib/sdl-1.2.15/src/video/
SDL_yuv_mmx.c 119 "pmullw %10,%%mm0\n" // red*-46dec=0.7136*64
120 "pmullw %11,%%mm1\n" // red*89dec=1.4013*64
150 "pmullw %14,%%mm5\n" // blue*-109dec=1.7129*64
151 "pmullw %15,%%mm1\n" // blue*114dec=1.78125*64
279 "pmullw %10, %%mm2\n" // Cb2green 0 R3 0 R2 0 R1 0 R0
281 "pmullw %11, %%mm0\n" // Cb2blue
283 "pmullw %13, %%mm3\n" // Cr2green
285 "pmullw %14, %%mm1\n" // Cr2red
287 "pmullw %15, %%mm6\n" // lum1
289 "pmullw %15, %%mm7\n" // lum
    [all...]
mmx.h 479 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
480 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
481 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) macro
492 (muls like pmullw, then adds adjacent 16-bit fields
  /external/valgrind/main/VEX/test/
mmxtest.c 286 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
287 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
288 #define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) macro
299 (muls like pmullw, then adds adjacent 16-bit fields
555 do_test("pmullw", pmullw(ma,mb));
  /external/clang/test/CodeGen/
mmx-builtins.c 261 // CHECK: pmullw
266 // CHECK: pmullw
  /external/valgrind/main/memcheck/tests/amd64/
sse_memory.stdout.exp     [all...]
  /external/valgrind/main/memcheck/tests/x86/
sse2_memory.stdout.exp     [all...]

Completed in 232 milliseconds

1 2 3