HomeSort by relevance Sort by last modified time
    Searched full:xmm4 (Results 1 - 25 of 131) sorted by null

1 2 3 4 5 6

  /external/libvpx/vp8/common/x86/
subpixel_sse2.asm 67 movdqa xmm4, xmm1
74 psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1
77 punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1
80 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2
103 paddsw xmm4, xmm7
104 paddsw xmm4, xmm5
106 paddsw xmm4, xmm3
107 paddsw xmm4, xmm6
109 paddsw xmm4, xmm1
110 paddsw xmm4, [GLOBAL(rd)
    [all...]
iwalsh_sse2.asm 41 movdqa xmm4, xmm0
43 punpckhqdq xmm4, xmm3 ;c1 b1
46 movdqa xmm1, xmm4 ;c1 b1
47 paddw xmm4, xmm0 ;dl+cl a1+b1 aka op[4] op[0]
51 ;; movdqu [rdi + 0], xmm4
59 movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00
60 punpcklwd xmm4, xmm0 ; 23 03 22 02 21 01 20 00
62 movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00
63 punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00
67 movdqa xmm3, xmm4 ;ip[4] ip[0
    [all...]
idctllm_sse2.asm 38 movd xmm4, [rax]
41 pinsrw xmm4, [rax+32], 4
44 pmullw xmm4, xmm5
50 pshuflw xmm4, xmm4, 00000000b
51 pshufhw xmm4, xmm4, 00000000b
54 paddw xmm4, [GLOBAL(fours)]
57 psraw xmm4, 3
74 paddw xmm0, xmm4
    [all...]
recon_sse2.asm 50 movq xmm4, MMWORD PTR [rsi+24]
51 punpcklbw xmm4, xmm0
52 paddsw xmm4, XMMWORD PTR [rdx+48]
53 packuswb xmm4, xmm0 ; pack and unpack to saturate
54 movq MMWORD PTR [rdi+rax*2], xmm4
111 movdqa xmm4, XMMWORD PTR [rsi+48]
112 movdqa xmm5, xmm4
113 punpcklbw xmm4, xmm0
115 paddsw xmm4, XMMWORD PTR [rdx+96]
117 packuswb xmm4, xmm5 ; pack and unpack to saturat
    [all...]
subpixel_ssse3.asm 60 movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
80 pmaddubsw xmm0, xmm4
119 movdqa xmm4, XMMWORD PTR [GLOBAL(shuf3bfrom1)]
140 pshufb xmm2, xmm4
203 movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
219 pmaddubsw xmm0, xmm4
236 pmaddubsw xmm3, xmm4
363 movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
383 pmaddubsw xmm0, xmm4
509 movq xmm4, MMWORD PTR [rax + rdx * 2] ;
    [all...]
loopfilter_sse2.asm 21 movdqa xmm4, [rsi+rax] ; q1
27 movlps xmm4, [rsi] ; q1
32 movhps xmm4, [rdi]
39 movdqa XMMWORD PTR [rsp + 16], xmm4 ; store q1
43 movdqa xmm3, xmm4 ; q1
48 psubusb xmm4, xmm6 ; q1-=q2
51 por xmm4, xmm6 ; abs(q2-q1)
55 pmaxub xmm1, xmm4
67 movdqa xmm4, [rdi+4*rax] ; p2
71 movlps xmm4, [rsi] ; p
    [all...]
postproc_sse2.asm 106 movq xmm4, QWORD PTR [rsi+rax] ; mm4 = r-1 p0..p7
107 punpcklbw xmm4, xmm0 ; mm4 = r-1 p0..p3
108 paddusw xmm3, xmm4 ; mm3 += mm5
112 psubusw xmm6, xmm4 ; mm6 = p0..p3 - r-2 p0..p3
113 psubusw xmm4, xmm1 ; mm5 = r-1 p0..p3 - p0..p3
114 paddusw xmm6, xmm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3)
147 movd xmm4, DWORD PTR [rdi +rdx +6]
149 pslldq xmm4, 8
150 por xmm4, xmm7
152 movdqa xmm3, xmm4
    [all...]
postproc_mmx.c 512 movq xmm4, QWORD PTR [esi+eax] ; local
514 punpcklbw xmm4, xmm0 ;
516 paddusw xmm3, xmm4 ;
523 psubusw xmm6, xmm4 ;
525 psubusw xmm4, xmm1 ;
527 paddusw xmm6, xmm4 ;
569 movd xmm4, DWORD PTR [edi +edx +6] local
571 pslldq xmm4, 8 local
572 por xmm4, xmm7 local
574 movdqa xmm3, xmm4
624 movdqa xmm5, xmm4 ; local
645 psrldq xmm4, 1 ; local
943 movdqa xmm4, xmm2 local
946 punpckhwd xmm4, xmm0 local
967 movdqa xmm4, xmm5 local
970 pmulhw xmm4, xmm4 local
976 movdqa xmm4, xmm7 local
977 pslld xmm4, 4 local
979 psubd xmm4, xmm7 local
982 psubd xmm4, xmm2 local
985 psubd xmm4, flimit4 local
988 psrad xmm4, 31 local
1002 movdqu xmm4, vp8_rv[ecx*2] local
1224 pshufd xmm4, xmm2, 3 \/\/ 0000 8--7 8--7 8--7 squared local
1230 pshufd xmm4, xmm2, 01011111b \/\/ 0000 0000 9--6 9--6 squared local
1236 pshufd xmm4, xmm2, 10111111b \/\/ 0000 0000 8--7 8--7 squared local
    [all...]
  /external/libvpx/vp8/encoder/x86/
dct_sse2.asm 94 movdqa xmm4, xmm3
96 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)];d1*2217 - c1*5352
99 paddd xmm4, XMMWORD PTR[GLOBAL(_7500)]
101 psrad xmm4, 12 ;(d1 * 2217 - c1 * 5352 + 7500)>>12
104 packssdw xmm3, xmm4 ;op[3] op[1]
137 pxor xmm4, xmm4 ;zero out for compare
140 pcmpeqw xmm2, xmm4
146 movdqa xmm4, xmm3
148 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)] ;d1*2217 - c1*535
    [all...]
quantize_ssse3.asm 34 movdqa xmm4, [rdx + 16]
40 movdqa xmm5, xmm4
43 psraw xmm4, 15 ;sign of z (aka sz)
59 pxor xmm5, xmm4
61 psubw xmm5, xmm4
69 pxor xmm4, xmm4
73 pcmpeqw xmm1, xmm4 ;non zero mask
74 pcmpeqw xmm5, xmm4 ;non zero mask
quantize_sse2.asm 63 movdqa xmm4, [rdx + 16]
70 movdqa xmm5, xmm4
74 psraw xmm4, 15
78 pxor xmm5, xmm4
82 psubw xmm5, xmm4
184 pxor xmm3, xmm4
187 psubw xmm3, xmm4
265 movdqa xmm4, XMMWORD PTR[rdx + 16]
271 movdqa xmm5, xmm4
274 psraw xmm4, 15 ;sign of z (aka sz
    [all...]
fwalsh_sse2.asm 84 pshufd xmm4, xmm0, 0xd8 ; d11 d10 a11 a10
89 movdqa xmm0, xmm4
91 punpckhqdq xmm4, xmm5 ; c11 c10 d11 d10
97 paddd xmm0, xmm4 ; b21 b20 a21 a20
98 psubd xmm2, xmm4 ; c21 c20 d21 d20
103 pxor xmm4, xmm4
104 movdqa xmm5, xmm4
105 pcmpgtd xmm4, xmm0
107 pand xmm4, [GLOBAL(cd1)
    [all...]
variance_impl_ssse3.asm 122 pxor xmm4, xmm4
123 punpcklbw xmm1, xmm4
125 punpcklbw xmm5, xmm4
188 pxor xmm4, xmm4
189 punpcklbw xmm3, xmm4
191 punpcklbw xmm5, xmm4
232 movq xmm4, QWORD PTR [rdi+8]
233 punpcklbw xmm4, xmm
    [all...]
sad_ssse3.asm 58 movdqa xmm4, XMMWORD PTR [rdi]
62 palignr xmm5, xmm4, %2
65 palignr xmm6, xmm4, (%2+1)
67 palignr xmm7, xmm4, (%2+2)
74 movdqa xmm4, XMMWORD PTR [rdi]
78 palignr xmm1, xmm4, %2
81 palignr xmm2, xmm4, (%2+1)
83 palignr xmm3, xmm4, (%2+2)
94 movdqa xmm4, XMMWORD PTR [rdi+rdx]
98 palignr xmm1, xmm4, %
    [all...]
ssim_opt.asm 16 paddusw xmm14, xmm4 ; sum_r
20 movdqa xmm2, xmm4
23 pmaddwd xmm3, xmm4
92 movdqa xmm4, xmm6
94 punpckhbw xmm4, xmm0 ; high_r
99 movdqa xmm4, xmm6
101 punpcklbw xmm4, xmm0 ; low_r
181 movdqa xmm4, xmm6
183 punpcklbw xmm4, xmm0 ; low_r
temporal_filter_apply_sse2.asm 53 movd xmm4, rdx ; can't use rdx w/ shift
55 psrlw xmm5, xmm4
131 movdqa xmm4, [rax]
134 paddw xmm4, xmm2
137 movdqa [rax], xmm4
162 movdqa xmm4, [rdi]
167 paddw xmm4, xmm0
172 movdqa [rdi], xmm4
variance_impl_sse2.asm 34 pxor xmm4, xmm4
48 paddd xmm4, xmm0
49 paddd xmm4, xmm2
55 movdqa xmm3,xmm4
56 psrldq xmm4,8
57 paddd xmm4,xmm3
58 movdqa xmm3,xmm4
59 psrldq xmm4,4
60 paddd xmm4,xmm
    [all...]
sad_sse4.asm 29 movdqa xmm4, xmm3
31 mpsadbw xmm4, xmm0, 0x5
35 paddw xmm1, xmm4
50 movdqa xmm4, xmm3
52 mpsadbw xmm4, xmm0, 0x5
56 paddw xmm5, xmm4
75 movdqa xmm4, xmm3
77 mpsadbw xmm4, xmm0, 0x5
81 paddw xmm5, xmm4
  /external/llvm/test/MC/X86/
x86_64-xop-encoding.s 32 // CHECK: vphaddwq (%rcx), %xmm4
34 vphaddwq (%rcx), %xmm4
43 // CHECK: vphaddwd %xmm3, %xmm4
45 vphaddwd %xmm3, %xmm4
64 // CHECK: vphaddudq 8(%rcx,%rax), %xmm4
66 vphaddudq 8(%rcx,%rax), %xmm4
80 // CHECK: vphaddubq (%rcx), %xmm4
82 vphaddubq (%rcx), %xmm4
96 // CHECK: vphadddq (%rdx), %xmm4
98 vphadddq (%rdx), %xmm4
    [all...]
  /external/openssl/crypto/aes/asm/
vpaes-x86.pl 201 &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u
202 &pshufb ("xmm4","xmm2"); # 4 = sb1u
203 &pxor ("xmm4","xmm5"); # 4 = sb1u + k
206 &pxor ("xmm0","xmm4"); # 0 = A
213 &movdqa ("xmm4",&QWP(0,$base,$magic)); # .Lk_mc_backward[]
218 &pshufb ("xmm3","xmm4"); # 3 = D
238 &movdqa ("xmm4","xmm7"); # 4 : 1/j
239 &pshufb ("xmm4","xmm0"); # 4 = 1/j
240 &pxor ("xmm4","xmm5"); # 4 = jak = 1/j + a/k
246 &pshufb ("xmm3","xmm4"); # 3 = 1/ja
    [all...]
vpaes-x86_64.pl 105 movdqa %xmm13, %xmm4 # 4 : sb1u
106 pshufb %xmm2, %xmm4 # 4 = sb1u
107 pxor %xmm5, %xmm4 # 4 = sb1u + k
110 pxor %xmm4, %xmm0 # 0 = A
117 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
122 pshufb %xmm4, %xmm3 # 3 = D
142 movdqa %xmm10, %xmm4 # 4 : 1/j
143 pshufb %xmm0, %xmm4 # 4 = 1/j
144 pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
150 pshufb %xmm4, %xmm3 # 3 = 1/ja
    [all...]
  /dalvik/vm/mterp/x86-atom/
OP_SHR_LONG.S 47 movq .L64bits, %xmm4 # %xmm4<- lower 64 bits set
48 psllq %xmm3, %xmm4 # %xmm4<- correct mask for sign bits
49 por %xmm4, %xmm1 # %xmm1<- signed and shifted vBB
OP_SHR_LONG_2ADDR.S 48 movq .L64bits, %xmm4 # %xmm4<- lower 64 bits set
49 psllq %xmm3, %xmm4 # %xmm4<- correct mask for sign bits
50 por %xmm4, %xmm1 # %xmm1<- signed and shifted vBB
  /external/libyuv/files/source/
rotate.cc 97 movq xmm4, qword ptr [eax] local
100 punpcklbw xmm4, xmm5 local
102 movdqa xmm5, xmm4
117 punpcklwd xmm4, xmm6 local
119 movdqa xmm6, xmm4
125 punpckldq xmm0, xmm4
127 movdqa xmm4, xmm0 local
128 palignr xmm4, xmm4, 8 local
129 movq qword ptr [edx + esi], xmm4
198 movdqa xmm4, [eax] local
202 punpcklbw xmm4, xmm5 local
226 punpcklwd xmm4, xmm6 local
240 movdqa xmm4, xmm6 local
    [all...]
  /external/llvm/test/CodeGen/X86/
fold-xmm-zero.ll 12 %0 = tail call %0 asm sideeffect "foo", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00, float 8.000000e+00) nounwind
22 %1 = tail call %0 asm sideeffect "bar", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float %div, float %asmresult8, float %asmresult9, float %asmresult10, float %asmresult11, float %asmresult12, float %asmresult13, float %asmresult14) nounwind
32 %2 = tail call %0 asm sideeffect "baz", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float %div33, float %asmresult25, float %asmresult26, float %asmresult27, float %asmresult28, float %asmresult29, float %asmresult30, float %asmresult31) nounwind

Completed in 4736 milliseconds

1 2 3 4 5 6