HomeSort by relevance Sort by last modified time
    Searched full:xmm3 (Results 1 - 25 of 367) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/libvpx/libvpx/vp8/common/x86/
variance_impl_sse2.asm 40 movdqa xmm3, [rax+48]
44 pmaddwd xmm3, xmm3
47 paddd xmm2, xmm3
55 movdqa xmm3,xmm4
57 paddd xmm4,xmm3
58 movdqa xmm3,xmm4
60 paddd xmm4,xmm3
136 movdqa xmm3, xmm1
141 punpckhbw xmm3, xmm
    [all...]
variance_impl_ssse3.asm 94 movdqa xmm3, xmm1
97 punpckhbw xmm3, xmm2
99 pmaddubsw xmm3, [rax]
102 paddw xmm3, [GLOBAL(xmm_bi_rd)]
104 psraw xmm3, xmm_filter_shift
105 packuswb xmm1, xmm3
109 movdqa xmm3, xmm2
112 punpckhbw xmm3, xmm1
114 pmaddubsw xmm3, [rdx]
117 paddw xmm3, [GLOBAL(xmm_bi_rd)
    [all...]
sad_sse4.asm 18 movq xmm3, MMWORD PTR [rdi+8]
20 punpcklqdq xmm1, xmm3
21 punpcklqdq xmm3, xmm2
29 movdqa xmm4, xmm3
30 mpsadbw xmm3, xmm0, 0x0
34 paddw xmm1, xmm3
39 movq xmm3, MMWORD PTR [rdi+8]
41 punpcklqdq xmm5, xmm3
42 punpcklqdq xmm3, xmm2
50 movdqa xmm4, xmm3
    [all...]
loopfilter_block_sse2_x86_64.asm 202 movdqa xmm3, i3
207 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
211 movdqa xmm3, i4
213 LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
218 movdqa i4, xmm3
227 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
233 LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
244 movdqa xmm3, i13
247 LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
251 movdqa xmm3, i1
    [all...]
iwalsh_sse2.asm 31 movdqa xmm3, xmm0 ;ip[4] ip[0]
34 psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
37 punpcklqdq xmm0, xmm3 ;d1 a1
38 punpckhqdq xmm4, xmm3 ;c1 b1
49 movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00
51 punpckhwd xmm3, xmm0 ; 33 13 32 12 31 11 30 10
53 punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00
54 punpckhwd xmm1, xmm3 ; 33 23 13 03 32 22 12 02
58 movdqa xmm3, xmm4 ;ip[4] ip[0]
63 psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d
    [all...]
subpixel_sse2.asm 60 movq xmm3, MMWORD PTR [rsi - 2]
66 por xmm1, xmm3
74 punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
107 paddsw xmm4, xmm3
181 movq xmm3, MMWORD PTR [rsi - 2]
191 por xmm1, xmm3
199 punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
231 paddsw xmm4, xmm3
    [all...]
subpixel_ssse3.asm 72 ;xmm3 free
120 movdqa xmm3, XMMWORD PTR [GLOBAL(shuf2bfrom1)]
140 pshufb xmm0, xmm3
213 movq xmm3, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
215 punpcklbw xmm0, xmm3 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
224 movq xmm3, MMWORD PTR [rsi + 6]
230 punpcklbw xmm3, xmm7
233 movdqa xmm1, xmm3
235 pmaddubsw xmm3, xmm4
251 paddsw xmm3, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
variance_impl_sse2.asm 40 movdqa xmm3, [rax+48]
44 pmaddwd xmm3, xmm3
47 paddd xmm2, xmm3
55 movdqa xmm3,xmm4
57 paddd xmm4,xmm3
58 movdqa xmm3,xmm4
60 paddd xmm4,xmm3
136 movdqa xmm3, xmm1
141 punpckhbw xmm3, xmm
    [all...]
variance_impl_ssse3.asm 94 movdqa xmm3, xmm1
97 punpckhbw xmm3, xmm2
99 pmaddubsw xmm3, [rax]
102 paddw xmm3, [GLOBAL(xmm_bi_rd)]
104 psraw xmm3, xmm_filter_shift
105 packuswb xmm1, xmm3
109 movdqa xmm3, xmm2
112 punpckhbw xmm3, xmm1
114 pmaddubsw xmm3, [rdx]
117 paddw xmm3, [GLOBAL(xmm_bi_rd)
    [all...]
sad_sse4.asm 18 movq xmm3, MMWORD PTR [rdi+8]
20 punpcklqdq xmm1, xmm3
21 punpcklqdq xmm3, xmm2
29 movdqa xmm4, xmm3
30 mpsadbw xmm3, xmm0, 0x0
34 paddw xmm1, xmm3
39 movq xmm3, MMWORD PTR [rdi+8]
41 punpcklqdq xmm5, xmm3
42 punpcklqdq xmm3, xmm2
50 movdqa xmm4, xmm3
    [all...]
loopfilter_block_sse2.asm 202 movdqa xmm3, i3
207 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
211 movdqa xmm3, i4
213 LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
218 movdqa i4, xmm3
227 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
233 LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
244 movdqa xmm3, i13
247 LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
251 movdqa xmm3, i1
    [all...]
iwalsh_sse2.asm 31 movdqa xmm3, xmm0 ;ip[4] ip[0]
34 psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d1
37 punpcklqdq xmm0, xmm3 ;d1 a1
38 punpckhqdq xmm4, xmm3 ;c1 b1
49 movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00
51 punpckhwd xmm3, xmm0 ; 33 13 32 12 31 11 30 10
53 punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00
54 punpckhwd xmm1, xmm3 ; 33 23 13 03 32 22 12 02
58 movdqa xmm3, xmm4 ;ip[4] ip[0]
63 psubw xmm3, xmm2 ;ip[4]-ip[8] ip[0]-ip[12] aka c1 d
    [all...]
subpixel_sse2.asm 60 movq xmm3, MMWORD PTR [rsi - 2]
66 por xmm1, xmm3
74 punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
77 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
107 paddsw xmm4, xmm3
181 movq xmm3, MMWORD PTR [rsi - 2]
191 por xmm1, xmm3
199 punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2
202 pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1
231 paddsw xmm4, xmm3
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 18 movq xmm3, MMWORD PTR [rdi+8]
20 punpcklqdq xmm1, xmm3
21 punpcklqdq xmm3, xmm2
29 movdqa xmm4, xmm3
30 mpsadbw xmm3, xmm0, 0x0
34 paddw xmm1, xmm3
39 movq xmm3, MMWORD PTR [rdi+8]
41 punpcklqdq xmm5, xmm3
42 punpcklqdq xmm3, xmm2
50 movdqa xmm4, xmm3
    [all...]
vp9_variance_impl_sse2.asm 38 movdqa xmm3, [rax+48]
42 pmaddwd xmm3, xmm3
45 paddd xmm2, xmm3
53 movdqa xmm3,xmm4
55 paddd xmm4,xmm3
56 movdqa xmm3,xmm4
58 paddd xmm4,xmm3
134 movdqa xmm3, xmm1
139 punpckhbw xmm3, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 18 movq xmm3, MMWORD PTR [rdi+8]
20 punpcklqdq xmm1, xmm3
21 punpcklqdq xmm3, xmm2
29 movdqa xmm4, xmm3
30 mpsadbw xmm3, xmm0, 0x0
34 paddw xmm1, xmm3
39 movq xmm3, MMWORD PTR [rdi+8]
41 punpcklqdq xmm5, xmm3
42 punpcklqdq xmm3, xmm2
50 movdqa xmm4, xmm3
    [all...]
vp9_variance_impl_sse2.asm 38 movdqa xmm3, [rax+48]
42 pmaddwd xmm3, xmm3
45 paddd xmm2, xmm3
53 movdqa xmm3,xmm4
55 paddd xmm4,xmm3
56 movdqa xmm3,xmm4
58 paddd xmm4,xmm3
134 movdqa xmm3, xmm1
139 punpckhbw xmm3, xmm
    [all...]
vp9_subpel_variance_impl_sse2.asm 46 movdqu xmm3, XMMWORD PTR [rsi+1]
47 pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
54 pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1
62 movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7
63 punpcklbw xmm3, xmm0 ; xmm3 = words of above
64 psubw xmm5, xmm3 ; xmm5 -= xmm3
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
dct_sse2.asm 73 movq xmm3, MMWORD PTR[input+ pitch] ;33 32 31 30
76 punpcklqdq xmm1, xmm3 ;33 32 31 30 23 22 21 20
87 movdqa xmm3, xmm0
89 psubw xmm3, xmm1 ;c1 d1 c1 d1 c1 d1 c1 d1
91 psllw xmm3, 3 ;c1 <<= 3 d1 <<= 3
96 movdqa xmm4, xmm3
97 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352
100 paddd xmm3, XMMWORD PTR[GLOBAL(_14500)]
102 psrad xmm3, 12 ;(c1 * 2217 + d1 * 5352 + 14500)>>12
106 packssdw xmm3, xmm4 ;op[3] op[1
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
dct_sse2.asm 73 movq xmm3, MMWORD PTR[input+ pitch] ;33 32 31 30
76 punpcklqdq xmm1, xmm3 ;33 32 31 30 23 22 21 20
87 movdqa xmm3, xmm0
89 psubw xmm3, xmm1 ;c1 d1 c1 d1 c1 d1 c1 d1
91 psllw xmm3, 3 ;c1 <<= 3 d1 <<= 3
96 movdqa xmm4, xmm3
97 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352
100 paddd xmm3, XMMWORD PTR[GLOBAL(_14500)]
102 psrad xmm3, 12 ;(c1 * 2217 + d1 * 5352 + 14500)>>12
106 packssdw xmm3, xmm4 ;op[3] op[1
    [all...]
  /external/llvm/test/MC/X86/
x86-32-avx.s 330 // CHECK: vshufps $8, %xmm1, %xmm2, %xmm3
332 vshufps $8, %xmm1, %xmm2, %xmm3
334 // CHECK: vshufps $8, -4(%ebx,%ecx,8), %xmm2, %xmm3
336 vshufps $8, -4(%ebx,%ecx,8), %xmm2, %xmm3
338 // CHECK: vshufpd $8, %xmm1, %xmm2, %xmm3
340 vshufpd $8, %xmm1, %xmm2, %xmm3
342 // CHECK: vshufpd $8, -4(%ebx,%ecx,8), %xmm2, %xmm3
344 vshufpd $8, -4(%ebx,%ecx,8), %xmm2, %xmm3
346 // CHECK: vcmpeqps %xmm1, %xmm2, %xmm3
348 vcmpeqps %xmm1, %xmm2, %xmm3
    [all...]
x86_64-xop-encoding.s 43 // CHECK: vphaddwd %xmm3, %xmm4
45 vphaddwd %xmm3, %xmm4
72 // CHECK: vphaddubw (%rcx), %xmm3
74 vphaddubw (%rcx), %xmm3
123 // CHECK: vphaddbd %xmm1, %xmm3
125 vphaddbd %xmm1, %xmm3
144 // CHECK: vfrczps 4(%rax), %xmm3
146 vfrczps 4(%rax), %xmm3
200 // CHECK: vpshld %xmm7, %xmm5, %xmm3
202 vpshld %xmm7, %xmm5, %xmm3
    [all...]
  /art/runtime/arch/x86/
memcmp16_x86.S 74 movdqu (%eax), %xmm3
78 pcmpeqb %xmm0, %xmm3
79 pmovmskb %xmm3, %edx
194 movdqa 32(%esi), %xmm3
195 palignr $2,%xmm2, %xmm3
196 pcmpeqb 16(%edi), %xmm3
198 pand %xmm1, %xmm3
199 pmovmskb %xmm3, %edx
219 movdqa 32(%esi), %xmm3
220 palignr $2,16(%esi), %xmm3
    [all...]
  /external/boringssl/win-x86/crypto/aes/
aesni-x86.asm 74 pxor xmm3,xmm0
99 pxor xmm3,xmm0
124 pxor xmm3,xmm0
154 pxor xmm3,xmm0
184 pxor xmm3,xmm0
220 pxor xmm3,xmm0
256 pxor xmm3,xmm0
308 pxor xmm3,xmm0
377 movdqu xmm3,[16+esi]
389 movups [16+edi],xmm3
    [all...]
  /external/llvm/test/CodeGen/X86/
bswap-vector.ll 122 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm3
123 ; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15
    [all...]

Completed in 257 milliseconds

1 2 3 4 5 6 7 8 91011>>