/external/llvm/test/CodeGen/X86/ |
avx2-arith.ll | 84 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3 85 ; CHECK-NEXT: vpmovsxbw %xmm3, %ymm3 87 ; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3 89 ; CHECK-NEXT: vpshufb %xmm4, %xmm3, %xmm3 91 ; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
vectorcall.ll | 67 ; CHECK: xorps %xmm3 80 ; CHECK: xorps %xmm3
|
asm-reg-type-mismatch.ll | 15 call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},~{dirflag},~{fpsr},~{flags}"( double undef, double undef, double undef, double 1.0, double undef, double 0.0, double undef, double 0.0 ) nounwind
|
sqrt-fastmath.ll | 38 ; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm3 39 ; ESTIMATE-NEXT: vmulss %xmm3, %xmm1, %xmm1 118 ; NORECIP-NEXT: sqrtps %xmm0, %xmm3 121 ; NORECIP-NEXT: divps %xmm3, %xmm0
|
sse3-avx-addsub-2.ll | 154 ; SSE-NEXT: addsubpd %xmm3, %xmm1 205 ; SSE-NEXT: addsubps %xmm3, %xmm1 372 ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] 374 ; SSE-NEXT: addss %xmm3, %xmm2 386 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] 387 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2 410 ; SSE-NEXT: movaps %xmm0, %xmm3 411 ; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1,0] 414 ; SSE-NEXT: subss %xmm4, %xmm3 [all...] |
vec_cmp_sint-128.ll | 236 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 239 ; SSE2-NEXT: pand %xmm3, %xmm0 253 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 256 ; SSE41-NEXT: pand %xmm3, %xmm0 370 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 373 ; SSE2-NEXT: pand %xmm3, %xmm1 385 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 388 ; SSE41-NEXT: pand %xmm3, %xmm1 484 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 487 ; SSE2-NEXT: pand %xmm3, %xmm [all...] |
win32-spill-xmm.ll | 7 ; CHECK: movaps %xmm3, (%esp)
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
ssemmx2.d | 12 [ ]+8: 66 0f e3 d3[ ]+pavgw[ ]+%xmm3,%xmm2 13 [ ]+c: 66 0f e3 1c 24[ ]+pavgw[ ]+\(%esp\),%xmm3 20 [ ]+2c: 66 0f de 1c 24[ ]+pmaxub \(%esp\),%xmm3 30 [ ]+55: 66 0f 70 da 01[ ]+pshufd \$0x1,%xmm2,%xmm3 32 [ ]+60: f3 0f 70 da 01[ ]+pshufhw \$0x1,%xmm2,%xmm3 34 [ ]+6b: f2 0f 70 da 01[ ]+pshuflw \$0x1,%xmm2,%xmm3
|
x86-64-fma4.s | 66 vfmaddpd (%r13,%rcx),%xmm11,%xmm3,%xmm4 68 vfmsubpd (%r13,%rcx),%xmm11,%xmm3,%xmm4
|
sse-noavx.s | 6 cvtpd2pi %xmm3,%mm2
|
x86-64-sse-noavx.s | 7 cvtpd2pi %xmm3,%mm2
|
/external/boringssl/src/crypto/aes/asm/ |
aesni-x86.pl | 85 $inout1="xmm3"; 631 &pxor ("xmm3","xmm3"); 730 &pxor ("xmm3","xmm3"); 839 &pxor ("xmm3","xmm3"); [all...] |
/external/boringssl/linux-x86_64/crypto/sha/ |
sha1-x86_64.S | 1276 movdqu 48(%r9),%xmm3 1297 movdqa %xmm3,%xmm8 1298 paddd %xmm3,%xmm9 1372 pxor %xmm3,%xmm9 1423 punpcklqdq %xmm3,%xmm6 1478 pshufd $238,%xmm3,%xmm7 1491 pxor %xmm3,%xmm7 1639 pxor %xmm3,%xmm2 1677 pxor %xmm7,%xmm3 1683 pxor %xmm4,%xmm3 [all...] |
/external/boringssl/mac-x86_64/crypto/sha/ |
sha1-x86_64.S | 1275 movdqu 48(%r9),%xmm3 1296 movdqa %xmm3,%xmm8 1297 paddd %xmm3,%xmm9 1371 pxor %xmm3,%xmm9 1422 punpcklqdq %xmm3,%xmm6 1477 pshufd $238,%xmm3,%xmm7 1490 pxor %xmm3,%xmm7 1638 pxor %xmm3,%xmm2 1676 pxor %xmm7,%xmm3 1682 pxor %xmm4,%xmm3 [all...] |
/bionic/libm/x86/ |
s_atan.S | 87 movsd 2640(%ebx), %xmm3 92 andpd %xmm0, %xmm3 94 orpd %xmm4, %xmm3 100 mulsd %xmm3, %xmm1 101 subsd %xmm3, %xmm0 118 pshufd $68, %xmm1, %xmm3 125 mulsd %xmm3, %xmm0 126 addsd %xmm3, %xmm6 192 pshufd $68, %xmm0, %xmm3 199 mulsd %xmm3, %xmm [all...] |
/bionic/libm/x86_64/ |
s_atan.S | 70 movd %r8, %xmm3 75 andpd %xmm0, %xmm3 77 orpd %xmm4, %xmm3 83 mulsd %xmm3, %xmm1 84 subsd %xmm3, %xmm0 102 movddup %xmm1, %xmm3 109 mulsd %xmm3, %xmm0 110 addsd %xmm3, %xmm6 168 movddup %xmm0, %xmm3 175 mulsd %xmm3, %xmm [all...] |
/external/libvpx/libvpx/vp8/common/x86/ |
copy_sse3.asm | 104 movdqu xmm3, XMMWORD PTR [src_ptr + src_stride + 16] 117 movdqa XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
|
/external/valgrind/VEX/test/ |
fxsave.c | 40 asm __volatile__("movups vecZ, %xmm3"); 64 asm __volatile__("movaps %xmm2, %xmm3");
|
/external/valgrind/memcheck/tests/amd64-solaris/ |
context_sse.c | 62 "movups %[y0], %%xmm3\n" 73 "movups %%xmm3, 0x30 + %[out]\n"
|
/external/valgrind/memcheck/tests/x86/ |
fxsave.c | 41 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm3"); 65 asm __volatile__("movaps %xmm2, %xmm3");
|
/external/valgrind/memcheck/tests/x86-solaris/ |
context_sse.c | 60 "movups %[y0], %%xmm3\n" 78 "movups %%xmm3, 0x30 + %[out]\n"
|
/external/valgrind/none/tests/amd64-solaris/ |
coredump_single_thread_sse.c | 33 "movupd 48(%[input]), %%xmm3\n" 50 : "memory", "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6",
|
/art/runtime/arch/x86_64/ |
registers_x86_64.h | 56 XMM3 = 3,
|
/bionic/libc/arch-x86/atom/string/ |
sse2-wcscmp-atom.S | 526 movdqa 16(%edi), %xmm3 527 pcmpeqd %xmm3, %xmm0 /* Any null double_word? */ 528 pcmpeqd 16(%esi), %xmm3 /* compare first 4 double_words for equality */ 529 psubb %xmm0, %xmm3 /* packed sub of comparison results*/ 530 pmovmskb %xmm3, %edx 763 movdqu 16(%edi), %xmm3 765 pcmpeqd %xmm3, %xmm0 /* Any null double_word? */ 766 pcmpeqd %xmm4, %xmm3 /* compare first 4 double_words for equality */ 767 psubb %xmm0, %xmm3 /* packed sub of comparison results*/ 768 pmovmskb %xmm3, %ed [all...] |
/external/elfutils/tests/ |
testfile44.expect.bz2 | |