HomeSort by relevance Sort by last modified time
    Searched full:xmm1 (Results 26 - 50 of 242) sorted by null

12 3 4 5 6 7 8 910

  /external/libyuv/files/source/
compare.cc 71 // 83: 66 0F 38 40 CD pmulld xmm1,xmm5
87 movdqu xmm1, [eax] // src[0-15] local
91 movdqa xmm2, xmm1
101 punpckhbw xmm1, xmm7 // src[8-15] local
102 movdqa xmm2, xmm1
106 punpckhwd xmm1, xmm7 // src[12-15] local
107 pmulld(0xcd) // pmulld xmm1, xmm5
109 paddd xmm1, xmm2 local
111 paddd xmm1, xmm3 local
113 pshufd xmm2, xmm1, 14 // upper 2 dword local
114 paddd xmm1, xmm2 local
115 pshufd xmm2, xmm1, 1 local
116 paddd xmm1, xmm2 local
267 movdqa xmm1, [eax] local
272 psubusb xmm1, xmm2 local
274 por xmm1, xmm2 local
276 punpcklbw xmm1, xmm5 local
278 pmaddwd xmm1, xmm1 local
284 pshufd xmm1, xmm0, 0EEh local
286 pshufd xmm1, xmm0, 01h local
    [all...]
row_posix.cc 130 "movdqa %%xmm0,%%xmm1 \n"
132 "punpckhwd %%xmm1,%%xmm1 \n"
134 "por %%xmm5,%%xmm1 \n"
136 "movdqa %%xmm1,0x10(%1) \n"
146 , "xmm0", "xmm1", "xmm5"
254 "movdqu 0x10(%0),%%xmm1 \n"
258 "palignr $0x8,%%xmm1,%%xmm2 \n"
261 "palignr $0xc,%%xmm0,%%xmm1 \n"
265 "pshufb %%xmm4,%%xmm1 \n
    [all...]
scale.cc 215 movdqa xmm1, [eax + 16] local
218 pand xmm1, xmm5 local
219 packuswb xmm0, xmm1
245 movdqa xmm1, [eax + 16] local
250 pavgb xmm1, xmm3 local
254 movdqa xmm3, xmm1
255 psrlw xmm1, 8 local
259 pavgw xmm1, xmm3 local
260 packuswb xmm0, xmm1
289 movdqu xmm1, [eax + 16 local
292 pand xmm1, xmm5 local
320 movdqu xmm1, [eax + 16] local
325 pavgb xmm1, xmm3 local
330 psrlw xmm1, 8 local
334 pavgw xmm1, xmm3 local
364 movdqa xmm1, [eax + 16] local
367 pand xmm1, xmm5 local
398 movdqa xmm1, [eax + 16] local
402 pavgb xmm1, xmm3 local
411 pavgb xmm1, xmm3 local
416 psrlw xmm1, 8 local
420 pavgw xmm1, xmm3 local
457 movdqa xmm1, [eax + 16] local
460 pand xmm1, xmm5 local
492 movdqa xmm1, [eax + 16] local
496 pavgb xmm1, xmm3 local
506 pavgb xmm1, xmm3 local
523 pavgb xmm1, xmm3 local
526 psadbw xmm1, xmm7 local
528 pshufd xmm1, xmm1, 0x8d \/\/ x3x2 -> 32xx local
568 movdqa xmm1, [eax + 16] local
571 palignr xmm1, xmm0, 8 local
573 pshufb xmm1, xmm4 local
622 movdqa xmm1, [eax + esi] local
631 movdqu xmm1, [eax + esi + 8] local
640 movdqa xmm1, [eax + esi + 16] local
644 movdqa xmm1, kMadd21 local
681 movdqa xmm1, [eax + esi] local
682 pavgb xmm1, xmm0 local
691 movdqu xmm1, [eax + esi + 8] local
692 pavgb xmm1, xmm0 local
701 movdqa xmm1, [eax + esi + 16] local
703 pavgb xmm1, xmm0 local
706 movdqa xmm1, kMadd21 local
739 movdqa xmm1, [eax + 16] \/\/ 16 pixels -> 6,7,8,9,10,11 local
742 pshufb xmm1, xmm5 local
747 movhlps xmm1, xmm0 local
776 movhlps xmm1, xmm0 local
779 punpcklbw xmm1, xmm5 local
783 paddusw xmm1, xmm7 local
790 paddusw xmm1, xmm7 local
800 psrldq xmm1, 2 local
802 psrldq xmm1, 2 local
844 movdqa xmm1, xmm0 \/\/ 16 pixels -> 0,1,2,3,4,5 of xmm1 local
845 pshufb xmm1, xmm2 local
848 paddusw xmm1, xmm6 local
850 paddusw xmm1, xmm0 local
852 pmulhuw xmm1, xmm5 \/\/ divide by 3,3,2, 3,3,2 local
853 packuswb xmm1, xmm1 local
857 psrlq xmm1, 16 local
892 movdqa xmm1, xmm0 local
894 punpckhbw xmm1, xmm4 local
909 paddusw xmm1, xmm3 local
963 movdqa xmm1, xmm0 local
968 punpckhbw xmm1, xmm4 local
974 paddw xmm1, xmm3 local
1056 movdqa xmm1, xmm0 local
1058 punpckhbw xmm1, xmm2 local
1060 pmaddubsw xmm1, xmm5 local
1062 psrlw xmm1, 7 local
    [all...]
  /external/valgrind/main/none/tests/amd64/
aes.c 69 ; xmm1 and xmm2 hold two 128-bit inputs (xmm1 = State; xmm2 = Round key).
70 ; The result is delivered in xmm1.
80 "movdqu %1, %%xmm1" "\n\t"
82 "aesdec %%xmm2, %%xmm1" "\n\t"
83 "movdqu %%xmm1, %0" "\n\t"
87 : /*trash*/ "xmm1", "xmm2"
101 ; xmm1 and xmm2 hold two 128-bit inputs (xmm1 = State; xmm2 = Round key).
102 ; The result is delivered in xmm1
    [all...]
  /external/llvm/test/MC/X86/
x86_64-xop-encoding.s 8 // CHECK: vphsubwd (%rcx,%rax), %xmm1
10 vphsubwd (%rcx,%rax), %xmm1
11 // CHECK: vphsubwd %xmm0, %xmm1
13 vphsubwd %xmm0, %xmm1
16 // CHECK: vphsubdq (%rcx,%rax), %xmm1
18 vphsubdq (%rcx,%rax), %xmm1
19 // CHECK: vphsubdq %xmm0, %xmm1
21 vphsubdq %xmm0, %xmm1
24 // CHECK: vphsubbw (%rax), %xmm1
26 vphsubbw (%rax), %xmm1
    [all...]
x86-32-avx.s 274 // CHECK: vunpckhps %xmm1, %xmm2, %xmm4
276 vunpckhps %xmm1, %xmm2, %xmm4
278 // CHECK: vunpckhpd %xmm1, %xmm2, %xmm4
280 vunpckhpd %xmm1, %xmm2, %xmm4
282 // CHECK: vunpcklps %xmm1, %xmm2, %xmm4
284 vunpcklps %xmm1, %xmm2, %xmm4
286 // CHECK: vunpcklpd %xmm1, %xmm2, %xmm4
288 vunpcklpd %xmm1, %xmm2, %xmm4
306 // CHECK: vcmpps $0, %xmm0, %xmm6, %xmm1
308 vcmpps $0, %xmm0, %xmm6, %xmm1
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
sad_sse4.asm 17 movq xmm1, MMWORD PTR [rdi]
20 punpcklqdq xmm1, xmm3
23 movdqa xmm2, xmm1
24 mpsadbw xmm1, xmm0, 0x0
33 paddw xmm1, xmm2
34 paddw xmm1, xmm3
35 paddw xmm1, xmm4
58 paddw xmm1, xmm5
83 paddw xmm1, xmm5
89 movq xmm1, MMWORD PTR [rdi
    [all...]
subpixel_ssse3.asm 80 movdqa xmm1, xmm0
83 movdqa xmm2, xmm1
84 pshufb xmm1, [GLOBAL(shuf2bfrom1)]
87 pmaddubsw xmm1, xmm5
95 paddsw xmm0, xmm1
135 movq xmm1, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
137 punpcklbw xmm0, xmm1 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
217 movdqa xmm1, xmm0
220 movdqa xmm2, xmm1
221 pshufb xmm1, [GLOBAL(shuf2bfrom1)
    [all...]
idctllm_sse2.asm 60 movq xmm1, [rax+rdx]
65 punpcklbw xmm1, xmm5
72 paddw xmm1, xmm4
78 packuswb xmm1, xmm5
84 movq [rax + rdx], xmm1
126 ; note the transpose of xmm1 and xmm2, necessary for shuffle
130 movdqa xmm1, [rax+32]
142 pmullw xmm1, [rdx]
148 punpckldq xmm0, xmm1
149 punpckhdq xmm4, xmm1
    [all...]
loopfilter_block_sse2.asm 199 movdqa xmm1, i1
206 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
208 movdqa xmm1, i2
212 LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
213 movdqa i2, xmm1
221 movdqa xmm1, i7
226 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
229 movdqa xmm1, i7
232 LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
234 movdqa i7, xmm1
    [all...]
loopfilter_sse2.asm 31 movdqa xmm1, [rsi+2*rax] ; q2
37 movlps xmm1, [rsi + rcx] ; q2
42 movhps xmm1, [rdi + rcx]
49 movdqa [rsp+_q2], xmm1 ; store q2
54 movdqa xmm6, xmm1 ; q2
57 psubusb xmm1, xmm2 ; q2-=q3
64 por xmm1, xmm2 ; abs(q3-q2)
67 pmaxub xmm1, xmm4
75 pmaxub xmm1, xmm5
101 pmaxub xmm1, xmm4 ; abs(p3 - p2
    [all...]
iwalsh_sse2.asm 27 movdqa xmm1, [rcx + 16] ;ip[12] ip[8]
30 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
40 movdqa xmm1, xmm4 ;c1 b1
42 psubw xmm0, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
52 movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00
54 punpckhwd xmm1, xmm3 ; 33 23 13 03 32 22 12 02
57 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12]
69 movdqa xmm1, xmm5 ;c1 b1
71 psubw xmm4, xmm1 ;d1-c1 a1-b1 aka op[12] op[8]
subpixel_sse2.asm 61 movq xmm1, MMWORD PTR [rsi + 6]
65 pslldq xmm1, 8
66 por xmm1, xmm3
68 movdqa xmm4, xmm1
69 movdqa xmm5, xmm1
71 movdqa xmm6, xmm1
72 movdqa xmm7, xmm1
95 psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
100 punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03
101 pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 81 movq xmm1, [rsi] ; second row
83 punpcklbw xmm1, xmm7 ; src[ 8-15]
89 movdqa xmm1, xmm0
91 punpckhbw xmm1, xmm7 ; src[ 8-15]
101 psubw xmm1, xmm3 ; src - pred[ 8-15]
105 pmullw xmm1, xmm1 ; modifer[ 8-15]^2
109 pmullw xmm1, [GLOBAL(_const_3w)]
113 paddw xmm1, [rsp + rounding_bit]
117 psrlw xmm1, [rsp + strength
    [all...]
  /external/compiler-rt/lib/i386/
floatdidf.S 22 cvtsi2sd 8(%esp), %xmm1
26 mulsd REL_ADDR(twop32), %xmm1 // a_hi as a double (without rounding)
28 subsd %xmm2, %xmm1 // a_hi - 0x1p52 (no rounding occurs)
30 addsd %xmm1, %xmm0 // a_hi + a_lo (round happens here)
floatundisf.S 33 movd 8(%esp), %xmm1
35 punpckldq %xmm1, %xmm0
73 movd 8(%esp), %xmm1
75 punpckldq %xmm1, %xmm0
82 movsd STICKY, %xmm1 // (big input) ? 0xfff : 0
86 andpd %xmm0, %xmm1 // (big input) ? input & 0xfff : 0
89 orpd %xmm2, %xmm1 // 0x1.0p52 + ((big input) ? input & 0xfff : input)
90 orpd %xmm1, %xmm0 // 0x1.0p52 + ((big input) ? (input >> 12 | input & 0xfff) : input)
floatundixf.S 27 movss 4(%esp), %xmm1 // lo 32 bits of input
29 orpd REL_ADDR(twop52), %xmm1 // 2^52 + lo (as a double)
31 movsd %xmm1, 4(%esp)
  /external/openssl/crypto/aes/asm/
aesni-x86.S 14 movups 16(%edx),%xmm1
20 movups (%edx),%xmm1
38 movups 16(%edx),%xmm1
44 movups (%edx),%xmm1
56 movups 16(%edx),%xmm1
67 movups 16(%edx),%xmm1
87 movups 16(%edx),%xmm1
98 movups 16(%edx),%xmm1
117 movups 16(%edx),%xmm1
131 movups 16(%edx),%xmm1
    [all...]
vpaes-x86.pl 177 ## Clobbers %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
183 &movdqa ("xmm1","xmm6")
185 &pandn ("xmm1","xmm0");
187 &psrld ("xmm1",4);
191 &pshufb ("xmm0","xmm1");
209 &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
215 &pshufb ("xmm0","xmm1"); # 0 = B
221 &pshufb ("xmm0","xmm1"); # 0 = 2B+C
228 &movdqa ("xmm1","xmm6"); # 1 : i
229 &pandn ("xmm1","xmm0"); # 1 = i<<
    [all...]
vpaes-x86.S 73 movdqa %xmm6,%xmm1
75 pandn %xmm0,%xmm1
77 psrld $4,%xmm1
97 movdqa -64(%ebx,%ecx,1),%xmm1
114 movdqa %xmm6,%xmm1
115 pandn %xmm0,%xmm1
116 psrld $4,%xmm1
120 pxor %xmm1,%xmm0
133 pxor %xmm1,%xmm3
140 movdqa 64(%ebx,%ecx,1),%xmm1
    [all...]
  /external/llvm/test/CodeGen/X86/
illegal-vector-args-return.ll 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=nehalem | grep "mulpd %xmm3, %xmm1"
3 ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=nehalem | grep "addps %xmm3, %xmm1"
fma_patterns.ll 6 ; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0
9 ; CHECK_FMA4: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
18 ; CHECK: fmsub213ps %xmm2, %xmm1, %xmm0
21 ; CHECK_FMA4: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0
30 ; CHECK: fnmadd213ps %xmm2, %xmm1, %xmm0
33 ; CHECK_FMA4: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0
42 ; CHECK: fnmsub213ps %xmm2, %xmm1, %xmm0
45 ; CHECK_FMA4: fnmsubps %xmm2, %xmm1, %xmm0, %xmm0
125 ; CHECK: vfmsub213pd %xmm2, %xmm1, %xmm0
128 ; CHECK_FMA4: vfmsubpd %xmm2, %xmm1, %xmm0, %xmm
    [all...]
  /external/openssl/crypto/bn/asm/
x86_64-gf2m.S 83 movq (%rsp,%rsi,8),%xmm1
86 pslldq $1,%xmm1
92 pxor %xmm1,%xmm0
98 movq (%rsp,%rsi,8),%xmm1
101 pslldq $2,%xmm1
107 pxor %xmm1,%xmm0
113 movq (%rsp,%rsi,8),%xmm1
116 pslldq $3,%xmm1
122 pxor %xmm1,%xmm0
128 movq (%rsp,%rsi,8),%xmm1
    [all...]
  /external/llvm/test/MC/Disassembler/X86/
marked-up.txt 5 # CHECK: xorps <reg:%xmm1>, <reg:%xmm2>
x86-32.txt 156 # CHECK: vandps (%edx), %xmm1, %xmm7
168 # CHECK: vmaskmovpd %xmm0, %xmm1, (%eax)
175 # CHECK: blendps $129, %xmm2, %xmm1
178 # CHECK: blendpd $129, %xmm2, %xmm1
181 # CHECK: pblendw $129, %xmm2, %xmm1
184 # CHECK: mpsadbw $129, %xmm2, %xmm1
187 # CHECK: dpps $129, %xmm2, %xmm1
190 # CHECK: dppd $129, %xmm2, %xmm1
193 # CHECK: insertps $129, %xmm2, %xmm1
208 # CHECK: vpblendw $129, %xmm2, %xmm5, %xmm1
    [all...]

Completed in 340 milliseconds

12 3 4 5 6 7 8 910