HomeSort by relevance Sort by last modified time
    Searched full:pmullw (Results 1 - 25 of 89) sorted by null

1 2 3 4

  /external/libvpx/libvpx/vp8/common/x86/
subpixel_mmx.asm 59 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
63 pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers
69 pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers
75 pmullw mm4, mm6 ; mm5 *= kernel 3 modifiers
81 pmullw mm4, [rdx+80] ; mm5 *= kernel 0 modifiers
85 pmullw mm5, [rdx] ; mm5 *= kernel 5 modifiers
158 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
162 pmullw mm4, mm7 ; mm4 *= kernel 4 modifiers.
166 pmullw mm4, mm2 ; mm4 *= kernel 2 modifiers.
170 pmullw mm4, [rbx] ; mm4 *= kernel 0 modifiers
    [all...]
subpixel_sse2.asm 77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
81 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
87 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
92 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
98 pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
101 pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
206 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
212 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
217 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap
    [all...]
dequantize_mmx.asm 30 pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
34 pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
38 pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
42 pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
72 pmullw mm0, [rdx]
75 pmullw mm1, [rdx +8]
78 pmullw mm2, [rdx+16]
81 pmullw mm3, [rdx+24]
mfqe_sse2.asm 57 pmullw xmm2, xmm0
58 pmullw xmm3, xmm0
64 pmullw xmm4, xmm1
65 pmullw xmm5, xmm1
132 pmullw xmm2, xmm0
136 pmullw xmm3, xmm1
variance_impl_mmx.asm 553 pmullw mm1, [rax] ;
556 pmullw mm3, [rax+8] ;
577 pmullw mm1, [rax] ;
580 pmullw mm3, [rax+8] ;
589 pmullw mm3, [rdx] ;
591 pmullw mm1, [rdx+8] ;
701 pmullw mm1, [rax] ;
703 pmullw mm2, [rax] ;
707 pmullw mm3, [rax+8] ;
709 pmullw mm4, [rax+8]
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
subpixel_mmx.asm 59 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
63 pmullw mm4, mm7 ; mm5 *= kernel 4 modifiers
69 pmullw mm5, mm2 ; mm5 *= kernel 2 modifiers
75 pmullw mm4, mm6 ; mm5 *= kernel 3 modifiers
81 pmullw mm4, [rdx+80] ; mm5 *= kernel 0 modifiers
85 pmullw mm5, [rdx] ; mm5 *= kernel 5 modifiers
158 pmullw mm3, mm1 ; mm3 *= kernel 1 modifiers.
162 pmullw mm4, mm7 ; mm4 *= kernel 4 modifiers.
166 pmullw mm4, mm2 ; mm4 *= kernel 2 modifiers.
170 pmullw mm4, [rbx] ; mm4 *= kernel 0 modifiers
    [all...]
subpixel_sse2.asm 77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
81 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
87 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
92 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4
98 pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5
101 pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
206 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
212 pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3
217 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap
    [all...]
dequantize_mmx.asm 30 pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
34 pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
38 pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
42 pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
72 pmullw mm0, [rdx]
75 pmullw mm1, [rdx +8]
78 pmullw mm2, [rdx+16]
81 pmullw mm3, [rdx+24]
mfqe_sse2.asm 57 pmullw xmm2, xmm0
58 pmullw xmm3, xmm0
64 pmullw xmm4, xmm1
65 pmullw xmm5, xmm1
132 pmullw xmm2, xmm0
136 pmullw xmm3, xmm1
variance_impl_mmx.asm 553 pmullw mm1, [rax] ;
556 pmullw mm3, [rax+8] ;
577 pmullw mm1, [rax] ;
580 pmullw mm3, [rax+8] ;
589 pmullw mm3, [rdx] ;
591 pmullw mm1, [rdx+8] ;
701 pmullw mm1, [rax] ;
703 pmullw mm2, [rax] ;
707 pmullw mm3, [rax+8] ;
709 pmullw mm4, [rax+8]
    [all...]
  /external/llvm/test/CodeGen/X86/
vec_shift6.ll 8 ; Check that we produce a SSE2 packed integer multiply (pmullw) instead.
15 ; CHECK: pmullw
24 ; CHECK: pmullw
55 ; into two pmullw instructions. With AVX2, the test case below would produce
63 ; SSE: pmullw
64 ; SSE-NEXT: pmullw
87 ; parts and then we convert each part into a pmullw.
94 ; SSE: pmullw
95 ; SSE-NEXT: pmullw
96 ; SSE-NEXT: pmullw
    [all...]
2008-02-26-AsmDirectMemOp.ll 15 tail call void asm sideeffect "movd $1, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09movd $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0A\09packssdw %mm5, %mm5 \0A\09psubw %mm5, %mm7 \0A\09pxor %mm4, %mm4 \0A\09.align 1<<4\0A\091: \0A\09movq ($0, $3), %mm0 \0A\09movq 8($0, $3), %mm1 \0A\09pmullw %mm6, %mm0 \0A\09pmullw %mm6, %mm1 \0A\09movq ($0, $3), %mm2 \0A\09movq 8($0, $3), %mm3 \0A\09pcmpgtw %mm4, %mm2 \0A\09pcmpgtw %mm4, %mm3 \0A\09pxor %mm2, %mm0 \0A\09pxor %mm3, %mm1 \0A\09paddw %mm7, %mm0 \0A\09paddw %mm7, %mm1 \0A\09pxor %mm0, %mm2 \0A\09pxor %mm1, %mm3 \0A\09pcmpeqw %mm7, %mm0 \0A\09pcmpeqw %mm7, %mm1 \0A\09pandn %mm2, %mm0 \0A\09pandn %mm3, %mm1 \0A\09movq %mm0, ($0, $3) \0A\09movq %mm1, 8($0, $3) \0A\09add $$16, $3 \0A\09jng 1b \0A\09", "r,imr,imr,r,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* null, i32 %tmp1, i32 0, i32 0 ) nounwind
  /external/libvpx/libvpx/vpx_scale/win32/
scaleopt.c 114 pmullw mm1, mm5
116 pmullw mm0, mm6
166 pmullw mm1, three_fourths
168 pmullw mm2, one_fourths
171 pmullw mm3, two_fourths
175 pmullw mm4, two_fourths
180 pmullw mm5, one_fourths
189 pmullw mm6, three_fourths
265 pmullw mm0, mm6
267 pmullw mm1, mm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/win32/
scaleopt.c 114 pmullw mm1, mm5
116 pmullw mm0, mm6
166 pmullw mm1, three_fourths
168 pmullw mm2, one_fourths
171 pmullw mm3, two_fourths
175 pmullw mm4, two_fourths
180 pmullw mm5, one_fourths
189 pmullw mm6, three_fourths
265 pmullw mm0, mm6
267 pmullw mm1, mm
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_subpel_variance.asm 420 ; slightly faster because of pmullw latency. It would also cut our rodata
422 pmullw m2, filter_y_a
423 pmullw m3, filter_y_b
425 pmullw m0, filter_y_a
426 pmullw m4, filter_y_b
464 pmullw m0, filter_y_a
465 pmullw m1, m2, filter_y_b
468 pmullw m2, filter_y_a
469 pmullw m4, filter_y_b
719 pmullw m2, filter_y_
    [all...]
vp9_temporal_filter_apply_sse2.asm 109 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
110 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
113 pmullw xmm0, [GLOBAL(_const_3w)]
114 pmullw xmm1, [GLOBAL(_const_3w)]
132 pmullw xmm2, [rsp + filter_weight]
133 pmullw xmm3, [rsp + filter_weight]
155 pmullw xmm0, xmm2
156 pmullw xmm1, xmm3
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_subpel_variance.asm 420 ; slightly faster because of pmullw latency. It would also cut our rodata
422 pmullw m2, filter_y_a
423 pmullw m3, filter_y_b
425 pmullw m0, filter_y_a
426 pmullw m4, filter_y_b
464 pmullw m0, filter_y_a
465 pmullw m1, m2, filter_y_b
468 pmullw m2, filter_y_a
469 pmullw m4, filter_y_b
719 pmullw m2, filter_y_
    [all...]
vp9_temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
  /external/libvpx/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 104 pmullw xmm0, xmm0 ; modifer[ 0- 7]^2
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
108 pmullw xmm0, [GLOBAL(_const_3w)]
109 pmullw xmm1, [GLOBAL(_const_3w)]
127 pmullw xmm2, [rsp + filter_weight]
128 pmullw xmm3, [rsp + filter_weight]
150 pmullw xmm0, xmm2
151 pmullw xmm1, xmm3
  /external/libvpx/libvpx/vp9/common/x86/
vp9_postproc_mmx.asm 71 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
76 pmullw mm6, mm5 ; mm6 *= p0..p3 * kernel 3 modifiers
89 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
105 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
119 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
166 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
172 pmullw mm6, mm5 ; mm6 *= p1..p4 * kernel 3 modifiers
186 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
202 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
216 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers
    [all...]
vp9_subpixel_bilinear_sse2.asm 39 pmullw xmm0, xmm4 ;multiply the filter factors
87 pmullw xmm0, xmm6
88 pmullw xmm1, xmm7
110 pmullw xmm0, xmm6
111 pmullw xmm1, xmm7
112 pmullw xmm2, xmm6
113 pmullw xmm3, xmm7
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_postproc_mmx.asm 71 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
76 pmullw mm6, mm5 ; mm6 *= p0..p3 * kernel 3 modifiers
89 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
105 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
119 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers.
166 pmullw mm3, mm6 ; mm3 *= kernel 2 modifiers
172 pmullw mm6, mm5 ; mm6 *= p1..p4 * kernel 3 modifiers
186 pmullw mm6, mm5 ; mm5 *= kernel 4 modifiers
202 pmullw mm6, mm5 ; mm5 *= kernel 0 modifiers
216 pmullw mm6, mm4 ; mm4 *= kernel 1 modifiers
    [all...]
vp9_subpixel_bilinear_sse2.asm 39 pmullw xmm0, xmm4 ;multiply the filter factors
87 pmullw xmm0, xmm6
88 pmullw xmm1, xmm7
110 pmullw xmm0, xmm6
111 pmullw xmm1, xmm7
112 pmullw xmm2, xmm6
113 pmullw xmm3, xmm7
  /external/mesa3d/src/mesa/x86/
mmx_blend.S 27 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
30 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
50 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
51 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
78 PMULLW ( MP1, MA1 ) /* t1 = p1*a1 */ ;\
81 TWO(PMULLW ( MP2, MA2 )) /* t2 = p2*a2 */ ;\
102 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
106 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;\
133 PMULLW ( MP1, MA1 ) /* t1 = (q1 - p1)*pa1 */ ;\
137 TWO(PMULLW ( MP2, MA2 )) /* t2 = (q2 - p2)*pa2 */ ;
    [all...]

Completed in 425 milliseconds

1 2 3 4