/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
katmai.d | 14 12: 0f 55 ee [ ]*andnps %xmm6,%xmm5 21 2e: 0f c2 e5 06 [ ]*cmpnleps %xmm5,%xmm4 22 32: 0f c2 2e 07 [ ]*cmpordps \(%esi\),%xmm5 29 53: 0f c2 e5 01 [ ]*cmpltps %xmm5,%xmm4 30 57: 0f c2 2e 01 [ ]*cmpltps \(%esi\),%xmm5 38 7c: 0f c2 ee 03 [ ]*cmpunordps %xmm6,%xmm5 45 9d: 0f c2 e5 05 [ ]*cmpnltps %xmm5,%xmm4 46 a1: 0f c2 2e 05 [ ]*cmpnltps \(%esi\),%xmm5 54 c6: 0f c2 ee 07 [ ]*cmpordps %xmm6,%xmm5 62 e5: f3 0f 2a 2e [ ]*cvtsi2ssl \(%esi\),%xmm5 [all...] |
avx-gather-intel.d | 28 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 08 00 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*1\+0x8\],xmm5 29 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 f8 ff ff ff vgatherdps xmm6,DWORD PTR \[xmm4\*1-0x8\],xmm5 30 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 00 00 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*1\+0x0\],xmm5 31 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 98 02 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*1\+0x298\],xmm5 32 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 08 00 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*8\+0x8\],xmm5 33 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 f8 ff ff ff vgatherdps xmm6,DWORD PTR \[xmm4\*8-0x8\],xmm5 34 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 00 00 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*8\+0x0\],xmm5 35 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 98 02 00 00 vgatherdps xmm6,DWORD PTR \[xmm4\*8\+0x298\],xmm5 40 [ ]*[a-f0-9]+: c4 e2 51 90 34 25 08 00 00 00 vpgatherdd xmm6,DWORD PTR \[xmm4\*1\+0x8\],xmm5 41 [ ]*[a-f0-9]+: c4 e2 51 90 34 25 f8 ff ff ff vpgatherdd xmm6,DWORD PTR \[xmm4\*1-0x8\],xmm5 [all...] |
avx-gather.d | 27 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 08 00 00 00 vgatherdps %xmm5,0x8\(,%xmm4,1\),%xmm6 28 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 f8 ff ff ff vgatherdps %xmm5,-0x8\(,%xmm4,1\),%xmm6 29 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 00 00 00 00 vgatherdps %xmm5,0x0\(,%xmm4,1\),%xmm6 30 [ ]*[a-f0-9]+: c4 e2 51 92 34 25 98 02 00 00 vgatherdps %xmm5,0x298\(,%xmm4,1\),%xmm6 31 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 08 00 00 00 vgatherdps %xmm5,0x8\(,%xmm4,8\),%xmm6 32 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 f8 ff ff ff vgatherdps %xmm5,-0x8\(,%xmm4,8\),%xmm6 33 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 00 00 00 00 vgatherdps %xmm5,0x0\(,%xmm4,8\),%xmm6 34 [ ]*[a-f0-9]+: c4 e2 51 92 34 e5 98 02 00 00 vgatherdps %xmm5,0x298\(,%xmm4,8\),%xmm6 39 [ ]*[a-f0-9]+: c4 e2 51 90 34 25 08 00 00 00 vpgatherdd %xmm5,0x8\(,%xmm4,1\),%xmm6 40 [ ]*[a-f0-9]+: c4 e2 51 90 34 25 f8 ff ff ff vpgatherdd %xmm5,-0x8\(,%xmm4,1\),%xmm [all...] |
avx512f-nondef.d | 11 [ ]*[a-f0-9]+: 62 f3 d5 1f 0b f4 7b vrndscalesd \$0x7b,\{sae\},%xmm4,%xmm5,%xmm6\{%k7\} 12 [ ]*[a-f0-9]+: 62 f3 d5 5f 0b f4 7b vrndscalesd \$0x7b,\{sae\},%xmm4,%xmm5,%xmm6\{%k7\}
|
avx512f-nondef.s | 4 # vrndscalesd {sae}, $123, %xmm4, %xmm5, %xmm6{%k7} # with null RC 6 # vrndscalesd {sae}, $123, %xmm4, %xmm5, %xmm6{%k7} # with not-null RC
|
x86-64-avx512f-nondef.d | 11 [ ]*[a-f0-9]+: 62 f3 d5 1f 0b f4 7b vrndscalesd \$0x7b,\{sae\},%xmm4,%xmm5,%xmm6\{%k7\} 12 [ ]*[a-f0-9]+: 62 f3 d5 5f 0b f4 7b vrndscalesd \$0x7b,\{sae\},%xmm4,%xmm5,%xmm6\{%k7\}
|
x86-64-avx512f-nondef.s | 4 # vrndscalesd {sae}, $123, %xmm4, %xmm5, %xmm6{%k7} # with null RC 6 # vrndscalesd {sae}, $123, %xmm4, %xmm5, %xmm6{%k7} # with not-null RC
|
ssemmx2.s | 14 pminsw %xmm5,%xmm4 15 pminsw (%esi),%xmm5 18 pmovmskb %xmm5,%eax 19 pmulhuw %xmm5,%xmm4 20 pmulhuw (%esi),%xmm5
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 41 pxor xmm5, xmm5 44 punpckldq xmm0, xmm5 45 punpckhdq xmm1, xmm5 223 movd xmm5, dword ptr arg(2) ;dc 224 por xmm5, xmm4 226 pcmpeqw xmm5, xmm6 241 pand xmm0, xmm5
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 41 pxor xmm5, xmm5 44 punpckldq xmm0, xmm5 45 punpckhdq xmm1, xmm5 223 movd xmm5, dword ptr arg(2) ;dc 224 por xmm5, xmm4 226 pcmpeqw xmm5, xmm6 241 pand xmm0, xmm5
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/third_party/libyuv/source/ |
scale.c | 648 pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff local 649 psrlw xmm5, 8 local 655 pand xmm0, xmm5 656 pand xmm1, xmm5 677 pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff local 678 psrlw xmm5, 8 local 693 pand xmm2, xmm5 694 pand xmm3, xmm5 721 pcmpeqb xmm5, xmm5 \/\/ generate mask 0x000000ff local 722 psrld xmm5, 24 local 767 movdqa xmm5, [esi + edx + 16] local 812 pcmpeqb xmm5, xmm5 \/\/ generate mask isolating 1 src 8 bytes local 813 psrlq xmm5, 56 local 858 movdqa xmm5, [esi + edx + 16] local 869 movdqa xmm5, [ebp + ebx + 16] local 873 movdqa xmm5, [ebp + ebx * 2 + 16] local 877 pavgb xmm5, xmm6 local 920 movdqa xmm5, _shuf2 local 971 movdqa xmm5, _madd01 local 1028 movdqa xmm5, _madd01 local 1088 movdqa xmm5, _shuf38b local 1121 movdqa xmm5, _shufac3 local 1185 movdqa xmm5, _shufab1 local 1231 pxor xmm5, xmm5 local 1292 movd xmm5, eax local 1293 punpcklwd xmm5, xmm5 local 1294 pshufd xmm5, xmm5, 0 local 1382 movd xmm5, eax local 1383 punpcklwd xmm5, xmm5 local 1384 pshufd xmm5, xmm5, 0 local 1455 movdqa xmm5, _madd01 local [all...] |
/external/boringssl/linux-x86/crypto/aes/ |
aesni-x86.S | 192 pxor %xmm0,%xmm5 231 pxor %xmm0,%xmm5 271 pxor %xmm0,%xmm5 326 pxor %xmm0,%xmm5 397 movdqu 48(%esi),%xmm5 411 movups %xmm5,48(%edi) 412 movdqu 48(%esi),%xmm5 428 movups %xmm5,48(%edi) 443 movups 48(%esi),%xmm5 451 movups %xmm5,48(%edi [all...] |
/external/boringssl/mac-x86/crypto/aes/ |
aesni-x86.S | 179 pxor %xmm0,%xmm5 216 pxor %xmm0,%xmm5 254 pxor %xmm0,%xmm5 307 pxor %xmm0,%xmm5 376 movdqu 48(%esi),%xmm5 390 movups %xmm5,48(%edi) 391 movdqu 48(%esi),%xmm5 407 movups %xmm5,48(%edi) 422 movups 48(%esi),%xmm5 430 movups %xmm5,48(%edi [all...] |
/external/llvm/test/CodeGen/X86/ |
vec_minmax_sint.ll | 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 26 ; SSE2-NEXT: pand %xmm5, %xmm2 43 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 46 ; SSE41-NEXT: pand %xmm5, %xmm3 75 ; SSE2-NEXT: movdqa %xmm3, %xmm5 76 ; SSE2-NEXT: pxor %xmm4, %xmm5 80 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7 82 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6 83 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] 84 ; SSE2-NEXT: pand %xmm8, %xmm5 [all...] |
stack-folding-int-avx1.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 88 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 108 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 117 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 126 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
stack-folding-int-sse42.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 115 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 135 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 144 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 153 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
vpx_high_subpixel_8t_sse2.asm | 28 pshuflw xmm5, xmm7, 01010101b ;k5 33 punpcklwd xmm2, xmm5 64 punpcklwd xmm2, xmm5 103 pshufhw xmm5, xmm7, 01010101b ;k5 110 punpckhwd xmm2, xmm5 145 movdqu xmm5, [rsi + rax * 4 + %1] ;5 157 punpcklwd xmm2, xmm5 158 punpckhwd xmm7, xmm5 160 movdqu xmm5, temp 163 punpcklwd xmm3, xmm5 [all...] |
sad_sse3.asm | 85 lddqu xmm5, XMMWORD PTR [%3] 89 psadbw xmm5, xmm0 102 paddw xmm5, xmm1 120 paddw xmm5, xmm1 190 movq xmm0, xmm5 191 psrldq xmm5, 8 193 paddw xmm0, xmm5 228 movq xmm0, xmm5 229 psrldq xmm5, 8 231 paddw xmm0, xmm5 [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
scale_gcc.cc | 124 "pcmpeqb %%xmm5,%%xmm5 \n" 125 "psrlw $0x8,%%xmm5 \n" 136 "pand %%xmm5,%%xmm2 \n" 137 "pand %%xmm5,%%xmm3 \n" 148 :: "memory", "cc", "xmm0", "xmm1", "xmm5" 155 "pcmpeqb %%xmm5,%%xmm5 \n" 156 "psrlw $0x8,%%xmm5 \n" 171 "pand %%xmm5,%%xmm2 \n [all...] |
/external/fec/ |
sse2bfly29.s | 48 movd %eax,%xmm5 # xmm5[0] = second symbol 53 punpcklbw %xmm5,%xmm5 56 pshuflw $0,%xmm5,%xmm5 58 punpcklqdq %xmm5,%xmm5 59 # xmm6 now contains first symbol in each byte, xmm5 the second 69 pxor %xmm5,%xmm [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
vp9_sad_sse3.asm | 89 lddqu xmm5, XMMWORD PTR [%3] 93 psadbw xmm5, xmm0 106 paddw xmm5, xmm1 124 paddw xmm5, xmm1 194 movq xmm0, xmm5 195 psrldq xmm5, 8 197 paddw xmm0, xmm5 232 movq xmm0, xmm5 233 psrldq xmm5, 8 235 paddw xmm0, xmm5 [all...] |
/external/boringssl/win-x86_64/crypto/ec/ |
p256-x86_64-asm.asm | 724 pxor xmm5,xmm5 754 por xmm5,xmm12 765 movdqu XMMWORD[48+rcx],xmm5 808 pxor xmm5,xmm5 832 por xmm5,xmm12 840 movdqu XMMWORD[48+rcx],xmm5 1229 movdqu xmm5,XMMWORD[80+rsi] 1239 movdqa XMMWORD[(448+16)+rsp],xmm5 [all...] |
/external/boringssl/src/crypto/aes/asm/ |
vpaes-x86.pl | 178 ## Clobbers %xmm1-%xmm5, %eax, %ebx, %ecx, %edx 188 &movdqu ("xmm5",&QWP(0,$key)); 191 &pxor ("xmm2","xmm5"); 206 &pxor ("xmm4","xmm5"); # 4 = sb1u + k 207 &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u 210 &pshufb ("xmm5","xmm2"); # 4 = sb2u 215 &pxor ("xmm2","xmm5"); # 2 = 2A 230 &movdqa ("xmm5",&QWP($k_inv+16,$const));# 2 : a/k 234 &pshufb ("xmm5","xmm0"); # 2 = a/k 239 &pxor ("xmm3","xmm5"); # 3 = iak = 1/i + a/ [all...] |
/external/mesa3d/src/mesa/x86/ |
sse_xform3.S | 87 MOVSS ( REGOFF(4, ESI), XMM5 ) /* | | | oy */ 88 SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* oy | oy | oy | oy */ 93 MULPS ( XMM1, XMM5 ) /* m7*oy | m6*oy | m5*oy | m4*oy */ 96 ADDPS ( XMM5, XMM4 ) 281 MOVSS ( S(2), XMM5 ) /* oz */ 282 SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* oz | oz */ 283 MULPS ( XMM2, XMM5 ) /* oz*m9 | oz*m8 */ 284 ADDPS ( XMM5, XMM0 ) /* +oy*m5 | +ox*m0 * [all...] |
/external/libjpeg-turbo/simd/ |
jquanti-sse2-64.asm | 129 movdqa xmm5, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_DCTELEM)] 133 movdqa xmm1,xmm5 137 psraw xmm5,(WORD_BIT-1) 141 pxor xmm1,xmm5 145 psubw xmm1,xmm5 ; if (xmm1 < 0) xmm1 = -xmm1; 163 pxor xmm1,xmm5 167 psubw xmm1,xmm5
|