/external/libvpx/libvpx/third_party/libyuv/source/ |
rotate_gcc.cc | 123 "movdqa %%xmm0,%%xmm8 \n" 125 "punpckhbw %%xmm1,%%xmm8 \n" 128 "movdqa %%xmm8,%%xmm9 \n" 176 "punpcklwd %%xmm10,%%xmm8 \n" 178 "movdqa %%xmm8,%%xmm10 \n" 214 "punpckldq %%xmm12,%%xmm8 \n" 215 "movq %%xmm8,(%1) \n" 216 "movdqa %%xmm8,%%xmm12 \n" 247 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" 265 "movdqa %%xmm0,%%xmm8 \n [all...] |
/external/libyuv/files/source/ |
rotate_gcc.cc | 128 "movdqa %%xmm0,%%xmm8 \n" 130 "punpckhbw %%xmm1,%%xmm8 \n" 133 "movdqa %%xmm8,%%xmm9 \n" 181 "punpcklwd %%xmm10,%%xmm8 \n" 183 "movdqa %%xmm8,%%xmm10 \n" 219 "punpckldq %%xmm12,%%xmm8 \n" 220 "movq %%xmm8,(%1) \n" 221 "movdqa %%xmm8,%%xmm12 \n" 251 "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", 273 "movdqa %%xmm0,%%xmm8 \n [all...] |
/external/boringssl/win-x86_64/crypto/fipsmodule/ |
sha512-x86_64.asm | 1845 movaps XMMWORD[(128+64)+rsp],xmm8 1879 vpaddq xmm8,xmm0,XMMWORD[((-128))+rbp] 1885 vmovdqa XMMWORD[rsp],xmm8 1886 vpaddq xmm8,xmm4,XMMWORD[rbp] 1893 vmovdqa XMMWORD[64+rsp],xmm8 1906 vpalignr xmm8,xmm1,xmm0,8 1915 vpsrlq xmm8,xmm8,7 1926 vpxor xmm8,xmm8,xmm [all...] |
ghash-x86_64.asm | 974 movdqu xmm8,XMMWORD[r8] 979 pxor xmm0,xmm8 983 pshufd xmm8,xmm0,78 984 pxor xmm8,xmm0 1011 xorps xmm8,xmm4 1015 pxor xmm8,xmm0 1017 pxor xmm8,xmm1 1019 movdqa xmm9,xmm8 1021 pslldq xmm8,8 1023 pxor xmm0,xmm8 [all...] |
sha1-x86_64.asm | 1276 movaps XMMWORD[(-40-64)+r11],xmm8 1326 movdqa xmm8,xmm3 1334 psrldq xmm8,4 1340 pxor xmm8,xmm2 1344 pxor xmm4,xmm8 1354 movdqa xmm8,xmm4 1360 psrld xmm8,31 1370 por xmm4,xmm8 1411 movdqa xmm8,xmm5 1417 pslldq xmm8,1 [all...] |
/external/boringssl/linux-x86_64/crypto/fipsmodule/ |
bsaes-x86_64.S | 14 movdqa (%rax),%xmm8 17 pxor %xmm8,%xmm15 18 pxor %xmm8,%xmm0 19 pxor %xmm8,%xmm1 20 pxor %xmm8,%xmm2 23 pxor %xmm8,%xmm3 24 pxor %xmm8,%xmm4 27 pxor %xmm8,%xmm5 28 pxor %xmm8,%xmm6 35 movdqa 16(%r11),%xmm8 [all...] |
sha512-x86_64.S | 1847 vpaddq -128(%rbp),%xmm0,%xmm8 1853 vmovdqa %xmm8,0(%rsp) 1854 vpaddq 0(%rbp),%xmm4,%xmm8 1861 vmovdqa %xmm8,64(%rsp) 1874 vpalignr $8,%xmm0,%xmm1,%xmm8 1883 vpsrlq $7,%xmm8,%xmm8 1894 vpxor %xmm9,%xmm8,%xmm8 1902 vpxor %xmm10,%xmm8,%xmm [all...] |
/external/boringssl/mac-x86_64/crypto/fipsmodule/ |
bsaes-x86_64.S | 12 movdqa (%rax),%xmm8 15 pxor %xmm8,%xmm15 16 pxor %xmm8,%xmm0 17 pxor %xmm8,%xmm1 18 pxor %xmm8,%xmm2 21 pxor %xmm8,%xmm3 22 pxor %xmm8,%xmm4 25 pxor %xmm8,%xmm5 26 pxor %xmm8,%xmm6 33 movdqa 16(%r11),%xmm8 [all...] |
sha512-x86_64.S | 1846 vpaddq -128(%rbp),%xmm0,%xmm8 1852 vmovdqa %xmm8,0(%rsp) 1853 vpaddq 0(%rbp),%xmm4,%xmm8 1860 vmovdqa %xmm8,64(%rsp) 1873 vpalignr $8,%xmm0,%xmm1,%xmm8 1882 vpsrlq $7,%xmm8,%xmm8 1893 vpxor %xmm9,%xmm8,%xmm8 1901 vpxor %xmm10,%xmm8,%xmm [all...] |
/external/llvm/test/MC/AsmParser/ |
directive_seh.s | 17 movups %xmm8, (%rsp) 18 .seh_savexmm %xmm8, 0
|
/external/llvm/test/tools/llvm-objdump/Inputs/ |
win64-unwind.exe.coff-x86_64.asm | 11 movups %xmm8, (%rsp) 12 .seh_savexmm %xmm8, 0
|
/external/swiftshader/third_party/LLVM/test/MC/AsmParser/ |
directive_seh.s | 30 movups %xmm8, (%rsp) 31 .seh_savexmm %xmm8, 0
|
/toolchain/binutils/binutils-2.27/gas/testsuite/gas/i386/ilp32/ |
x86-64-opcode.d | 80 [ ]*[a-f0-9]+: 66 45 0f 58 00 addpd \(%r8\),%xmm8 81 [ ]*[a-f0-9]+: 66 44 0f 58 00 addpd \(%rax\),%xmm8 86 [ ]*[a-f0-9]+: 66 45 0f 58 c7 addpd %xmm15,%xmm8 94 [ ]*[a-f0-9]+: f2 4d 0f 2d c0 cvtsd2si %xmm8,%r8 95 [ ]*[a-f0-9]+: f2 49 0f 2d c0 cvtsd2si %xmm8,%rax 106 [ ]*[a-f0-9]+: f2 4d 0f 2c c0 cvttsd2si %xmm8,%r8 107 [ ]*[a-f0-9]+: f2 49 0f 2c c0 cvttsd2si %xmm8,%rax 118 [ ]*[a-f0-9]+: f3 4d 0f 2d c0 cvtss2si %xmm8,%r8 119 [ ]*[a-f0-9]+: f3 49 0f 2d c0 cvtss2si %xmm8,%rax 130 [ ]*[a-f0-9]+: f3 4d 0f 2c c0 cvttss2si %xmm8,%r [all...] |
/toolchain/binutils/binutils-2.27/gas/testsuite/gas/i386/ |
x86-64-opcode.d | 79 [ ]*[a-f0-9]+: 66 45 0f 58 00 addpd \(%r8\),%xmm8 80 [ ]*[a-f0-9]+: 66 44 0f 58 00 addpd \(%rax\),%xmm8 85 [ ]*[a-f0-9]+: 66 45 0f 58 c7 addpd %xmm15,%xmm8 93 [ ]*[a-f0-9]+: f2 4d 0f 2d c0 cvtsd2si %xmm8,%r8 94 [ ]*[a-f0-9]+: f2 49 0f 2d c0 cvtsd2si %xmm8,%rax 105 [ ]*[a-f0-9]+: f2 4d 0f 2c c0 cvttsd2si %xmm8,%r8 106 [ ]*[a-f0-9]+: f2 49 0f 2c c0 cvttsd2si %xmm8,%rax 117 [ ]*[a-f0-9]+: f3 4d 0f 2d c0 cvtss2si %xmm8,%r8 118 [ ]*[a-f0-9]+: f3 49 0f 2d c0 cvtss2si %xmm8,%rax 129 [ ]*[a-f0-9]+: f3 4d 0f 2c c0 cvttss2si %xmm8,%r [all...] |
x86-64-sha.s | 11 sha1nexte (%rax), %xmm8 15 sha1msg1 (%rax), %xmm8 19 sha1msg2 (%rax), %xmm8
|
/external/llvm/test/CodeGen/X86/ |
stack-folding-int-avx1.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 88 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 108 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 117 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 126 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
stack-folding-int-sse42.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 115 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 135 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 144 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 153 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
stack-folding-fp-sse42.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 22 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 30 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 38 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 47 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 55 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 64 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 73 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 82 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 96 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
stack-folding-fp-avx1.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 22 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 30 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 38 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 46 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 54 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 63 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 71 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 80 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 89 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
/external/libjpeg-turbo/simd/ |
jchuff-sse2-64.asm | 97 pxor xmm8, xmm8 ; __m128i neg = _mm_setzero_si128(); 137 pcmpgtw xmm8, %34 ; neg = _mm_cmpgt_epi16(neg, x1); 141 paddw %34, xmm8 ; x1 = _mm_add_epi16(x1, neg); 145 pxor %34, xmm8 ; x1 = _mm_xor_si128(x1, neg); 149 pxor xmm8, %34 ; neg = _mm_xor_si128(neg, x1); 157 movdqa XMMWORD [t2 + %1 * SIZEOF_WORD], xmm8 ; _mm_storeu_si128((__m128i *)(t2 + ko), neg); 198 movaps XMMWORD [rsp-1*SIZEOF_XMMWORD], xmm8 260 pxor xmm8, xmm8 [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
vpx_high_subpixel_bilinear_sse2.asm | 86 movq xmm8, rdx 88 pshufd xmm8, xmm8, 0b 89 movdqa xmm1, xmm8 90 psllw xmm8, xmm5 91 psubw xmm8, xmm1 ;max value (for clamping) 113 pminsw xmm0, xmm8 154 pminsw xmm0, xmm8 156 pminsw xmm2, xmm8
|
/external/llvm/test/TableGen/ |
cast.td | 54 def XMM8: Register<"xmm8">; 65 XMM8, XMM9, XMM10, XMM11,
|
/external/swiftshader/third_party/LLVM/test/TableGen/ |
Slice.td | 49 def XMM8: Register<"xmm8">; 60 XMM8, XMM9, XMM10, XMM11,
|
TargetInstrSpec.td | 50 def XMM8: Register<"xmm8">; 61 XMM8, XMM9, XMM10, XMM11,
|
cast.td | 49 def XMM8: Register<"xmm8">; 60 XMM8, XMM9, XMM10, XMM11,
|