/external/llvm/test/CodeGen/X86/ |
stack-folding-fp-sse42.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 22 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 30 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 38 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 47 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 55 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 64 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 73 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 82 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 96 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
fold-xmm-zero.ll | 12 %0 = tail call %0 asm sideeffect "foo", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00, float 5.000000e+00, float 6.000000e+00, float 7.000000e+00, float 8.000000e+00) nounwind 22 %1 = tail call %0 asm sideeffect "bar", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float %div, float %asmresult8, float %asmresult9, float %asmresult10, float %asmresult11, float %asmresult12, float %asmresult13, float %asmresult14) nounwind 32 %2 = tail call %0 asm sideeffect "baz", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},={xmm7},0,1,2,3,4,5,6,7,~{dirflag},~{fpsr},~{flags}"(float %div33, float %asmresult25, float %asmresult26, float %asmresult27, float %asmresult28, float %asmresult29, float %asmresult30, float %asmresult31) nounwind
|
stack-folding-fp-avx1.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 22 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 30 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 38 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 46 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 54 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 63 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 71 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 80 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 89 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
stack-folding-int-avx2.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 25 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 36 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 57 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 67 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 76 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 85 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 94 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 103 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
vector-tzcnt-256.ll | 18 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 20 ; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5 24 ; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1 71 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 73 ; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5 77 ; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1 124 ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5 126 ; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm [all...] |
/external/llvm/test/TableGen/ |
cast.td | 51 def XMM5: Register<"xmm5">; 64 [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
/external/boringssl/win-x86/crypto/sha/ |
sha1-586.asm | 1534 pshufd xmm5,xmm1,238 1540 punpcklqdq xmm5,xmm2 1551 pxor xmm5,xmm1 1558 pxor xmm5,xmm7 1564 movdqa xmm1,xmm5 1568 movdqa xmm7,xmm5 1571 paddd xmm5,xmm5 1584 por xmm5,xmm7 1591 pxor xmm5,xmm [all...] |
sha512-586.asm | 408 movdqa xmm5,[32+ebp] 411 paddq xmm5,xmm2 413 movdqa [edx-96],xmm5 415 movdqa xmm5,xmm4 421 movdqa xmm6,xmm5 422 movdqu xmm5,[80+edi] 430 paddq xmm0,xmm5 467 movdqa xmm3,xmm5 597 movdqa [16+edx],xmm5 599 movdqa xmm5,xmm [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
scale_win.cc | 132 pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff local 133 psrlw xmm5, 8 local 144 pand xmm2, xmm5 145 pand xmm3, xmm5 169 pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff local 170 psrlw xmm5, 8 local 185 pand xmm2, xmm5 186 pand xmm3, xmm5 318 pcmpeqb xmm5, xmm5 \/\/ generate mask 0x00ff0000 local 319 psrld xmm5, 24 local 320 pslld xmm5, 16 local 365 movdqu xmm5, [eax + edi + 16] local 504 movdqa xmm5, kShuf2 local 554 movdqa xmm5, kMadd01 local 611 movdqa xmm5, kMadd01 local 669 movdqa xmm5, kShuf38b local 704 pxor xmm5, xmm5 local 769 movdqa xmm5, kScaleAb2 local 807 pxor xmm5, xmm5 local 874 movd xmm5, eax local 1237 movdqa xmm5, kShuffleFractions local [all...] |
/external/libjpeg-turbo/simd/ |
jcolsamp.inc | 31 %define xmmB xmm5 53 %define xmmD xmm5 75 %define xmmF xmm5 97 %define xmmH xmm5
|
/external/valgrind/memcheck/tests/amd64-solaris/ |
context_sse.c | 64 "movups %[d0], %%xmm5\n" 75 "movups %%xmm5, 0x50 + %[out]\n" 83 printf(" xmm1=%Lf, xmm2=%Lf, xmm5=%Lf, xmm6=%Lf\n", 91 printf(" xmm0=%Lf, xmm2=%Lf, xmm5=%Lf, xmm6=%Lf\n",
|
/external/valgrind/memcheck/tests/x86-solaris/ |
context_sse.c | 62 "movups %[d0], %%xmm5\n" 80 "movups %%xmm5, 0x50 + %[out]\n" 89 printf(" xmm1=%Lf, xmm2=%Lf, xmm5=%Lf, xmm6=%Lf\n", 97 printf(" xmm0=%Lf, xmm2=%Lf, xmm5=%Lf, xmm6=%Lf\n",
|
/external/valgrind/none/tests/amd64-solaris/ |
coredump_single_thread_sse.post.exp | 13 %xmm5 0x43658709badcfe2189674523cafe6794
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
avx512cd_vl.s | 6 vpconflictd %xmm5, %xmm6{%k7} # AVX512{CD,VL} 7 vpconflictd %xmm5, %xmm6{%k7}{z} # AVX512{CD,VL} 32 vpconflictq %xmm5, %xmm6{%k7} # AVX512{CD,VL} 33 vpconflictq %xmm5, %xmm6{%k7}{z} # AVX512{CD,VL} 58 vplzcntd %xmm5, %xmm6{%k7} # AVX512{CD,VL} 59 vplzcntd %xmm5, %xmm6{%k7}{z} # AVX512{CD,VL} 84 vplzcntq %xmm5, %xmm6{%k7} # AVX512{CD,VL} 85 vplzcntq %xmm5, %xmm6{%k7}{z} # AVX512{CD,VL} 116 vpconflictd xmm6{k7}, xmm5 # AVX512{CD,VL} 117 vpconflictd xmm6{k7}{z}, xmm5 # AVX512{CD,VL [all...] |
avx512f-opts-intel.d | 48 [ ]*[a-f0-9]+: 62 f1 d7 0f 11 e6 vmovsd.s xmm6\{k7\},xmm5,xmm4 49 [ ]*[a-f0-9]+: 62 f1 d7 0f 10 f4 vmovsd xmm6\{k7\},xmm5,xmm4 50 [ ]*[a-f0-9]+: 62 f1 d7 8f 11 e6 vmovsd.s xmm6\{k7\}\{z\},xmm5,xmm4 51 [ ]*[a-f0-9]+: 62 f1 d7 8f 10 f4 vmovsd xmm6\{k7\}\{z\},xmm5,xmm4 52 [ ]*[a-f0-9]+: 62 f1 56 0f 11 e6 vmovss.s xmm6\{k7\},xmm5,xmm4 53 [ ]*[a-f0-9]+: 62 f1 56 0f 10 f4 vmovss xmm6\{k7\},xmm5,xmm4 54 [ ]*[a-f0-9]+: 62 f1 56 8f 11 e6 vmovss.s xmm6\{k7\}\{z\},xmm5,xmm4 55 [ ]*[a-f0-9]+: 62 f1 56 8f 10 f4 vmovss xmm6\{k7\}\{z\},xmm5,xmm4 104 [ ]*[a-f0-9]+: 62 f1 d7 0f 11 e6 vmovsd.s xmm6\{k7\},xmm5,xmm4 105 [ ]*[a-f0-9]+: 62 f1 d7 0f 10 f4 vmovsd xmm6\{k7\},xmm5,xmm [all...] |
avx512f-opts.d | 47 [ ]*[a-f0-9]+: 62 f1 d7 0f 11 e6 vmovsd.s %xmm4,%xmm5,%xmm6\{%k7\} 48 [ ]*[a-f0-9]+: 62 f1 d7 0f 10 f4 vmovsd %xmm4,%xmm5,%xmm6\{%k7\} 49 [ ]*[a-f0-9]+: 62 f1 d7 8f 11 e6 vmovsd.s %xmm4,%xmm5,%xmm6\{%k7\}\{z\} 50 [ ]*[a-f0-9]+: 62 f1 d7 8f 10 f4 vmovsd %xmm4,%xmm5,%xmm6\{%k7\}\{z\} 51 [ ]*[a-f0-9]+: 62 f1 56 0f 11 e6 vmovss.s %xmm4,%xmm5,%xmm6\{%k7\} 52 [ ]*[a-f0-9]+: 62 f1 56 0f 10 f4 vmovss %xmm4,%xmm5,%xmm6\{%k7\} 53 [ ]*[a-f0-9]+: 62 f1 56 8f 11 e6 vmovss.s %xmm4,%xmm5,%xmm6\{%k7\}\{z\} 54 [ ]*[a-f0-9]+: 62 f1 56 8f 10 f4 vmovss %xmm4,%xmm5,%xmm6\{%k7\}\{z\} 103 [ ]*[a-f0-9]+: 62 f1 d7 0f 11 e6 vmovsd.s %xmm4,%xmm5,%xmm6\{%k7\} 104 [ ]*[a-f0-9]+: 62 f1 d7 0f 10 f4 vmovsd %xmm4,%xmm5,%xmm6\{%k7\ [all...] |
inval-16.l | 15 [ ]*4[ ]+vaddsd %xmm4, %xmm5, %xmm6\{%k7\}
|
/bionic/libc/arch-x86/atom/string/ |
ssse3-strcpy-atom.S | 358 movaps 16(%ecx), %xmm5 361 pminub %xmm5, %xmm2 375 movaps %xmm5, -48(%edx) 389 pcmpeqb %xmm5, %xmm0 404 movaps %xmm5, -48(%edx) 503 movaps 63(%ecx), %xmm5 505 pminub %xmm5, %xmm7 509 movaps %xmm5, %xmm7 510 palignr $1, %xmm4, %xmm5 522 movaps %xmm5, 48(%edx [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
vpx_subpixel_bilinear_sse2.asm | 76 pxor xmm5, xmm5 84 punpcklbw xmm0, xmm5 85 punpcklbw xmm1, xmm5 105 punpcklbw xmm0, xmm5 106 punpcklbw xmm1, xmm5 107 punpckhbw xmm2, xmm5 108 punpckhbw xmm3, xmm5
|
/external/mesa3d/src/mesa/x86/ |
sse_normal.S | 168 MOVSS ( S(2), XMM5 ) /* uz */ 169 SHUFPS ( CONST(0x0), XMM5, XMM5 ) /* uz | uz */ 170 MULPS ( XMM2, XMM5 ) /* uz*m6 | uz*m2 */ 173 ADDPS ( XMM5, XMM3 ) 181 MOVSS ( S(0), XMM5 ) /* ux */ 182 MULSS ( XMM6, XMM5 ) /* ux*m8*scale */ 185 ADDSS ( XMM5, XMM3 )
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
vp9_subpixel_bilinear_sse2.asm | 76 pxor xmm5, xmm5 84 punpcklbw xmm0, xmm5 85 punpcklbw xmm1, xmm5 105 punpcklbw xmm0, xmm5 106 punpcklbw xmm1, xmm5 107 punpckhbw xmm2, xmm5 108 punpckhbw xmm3, xmm5
|
/external/gemmlowp/internal/ |
kernel_SSE.h | 64 // |xmm0 | | xmm4 | xmm5 | xmm6 | xmm7 | 65 // |xmm0 | (Iter1) | xmm4 | xmm5 | xmm6 | xmm7 | 66 // |xmm0 | | xmm4 | xmm5 | xmm6 | xmm7 | 67 // |xmm0 | | xmm4 | xmm5 | xmm6 | xmm7 | 76 "pxor %%xmm5 , %%xmm5 \n\t" 98 "paddd %%xmm3, %%xmm5 \n\t" 122 "paddd %%xmm3, %%xmm5 \n\t" 154 "paddd %%xmm3, %%xmm5 \n\t" 180 "paddd 0x00(%[dst_ptr], %%eax, 1) , %%xmm5 \n\t [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
recon_sse2.asm | 53 movdqu xmm5, [rsi+rax*2] 60 movdqa [rdi+rcx*2],xmm5 83 movdqu xmm5, [rsi+rax*2] 91 movdqa [rdi+rcx*2],xmm5 399 movd xmm5, [rsi+rax] 402 punpcklbw xmm5, xmm0 404 pshuflw xmm5, xmm5, 0x0 406 punpcklqdq xmm5, xmm5 [all...] |
sad_sse3.asm | 170 lddqu xmm5, XMMWORD PTR [%3] 174 psadbw xmm5, xmm0 187 paddw xmm5, xmm1 205 paddw xmm5, xmm1 265 lddqu xmm5, XMMWORD PTR [%4] 270 psadbw xmm5, xmm0 285 paddw xmm5, xmm2 302 paddw xmm5, xmm2 399 movq xmm0, xmm5 400 psrldq xmm5, [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
subtract_sse2.asm | 111 movdqa xmm5, [rax + rbx] 120 psubb xmm3, xmm5 122 pxor xmm5, xmm4 ;convert to signed values 124 pcmpgtb xmm5, xmm1 ; obtain sign information 127 punpcklbw xmm3, xmm5 ; put sign back to subtraction 128 punpckhbw xmm1, xmm5 ; put sign back to subtraction
|