/external/llvm/test/CodeGen/X86/ |
vector-compare-results.ll | 782 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0] 783 ; SSE2-NEXT: pxor %xmm8, %xmm7 784 ; SSE2-NEXT: pxor %xmm8, %xmm3 793 ; SSE2-NEXT: pxor %xmm8, %xmm5 794 ; SSE2-NEXT: pxor %xmm8, %xmm1 805 ; SSE2-NEXT: pxor %xmm8, %xmm6 806 ; SSE2-NEXT: pxor %xmm8, %xmm2 815 ; SSE2-NEXT: pxor %xmm8, %xmm4 816 ; SSE2-NEXT: pxor %xmm8, %xmm0 [all...] |
vec_minmax_uint.ll | 88 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] 91 ; SSE2-NEXT: pand %xmm8, %xmm5 99 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] 102 ; SSE2-NEXT: pand %xmm8, %xmm4 115 ; SSE41-NEXT: movdqa %xmm0, %xmm8 131 ; SSE41-NEXT: pxor %xmm8, %xmm0 140 ; SSE41-NEXT: blendvpd %xmm8, %xmm2 513 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2] 516 ; SSE2-NEXT: pand %xmm8, %xmm4 517 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[1,1,3,3 [all...] |
vector-shuffle-variable-128.ll | 383 ; SSE2-NEXT: movd %eax, %xmm8 426 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] 458 ; SSSE3-NEXT: movd %eax, %xmm8 [all...] |
vec_minmax_sint.ll | 81 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] 84 ; SSE2-NEXT: pand %xmm8, %xmm5 92 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2] 95 ; SSE2-NEXT: pand %xmm8, %xmm4 108 ; SSE41-NEXT: movdqa %xmm0, %xmm8 124 ; SSE41-NEXT: pxor %xmm8, %xmm0 133 ; SSE41-NEXT: blendvpd %xmm8, %xmm2 469 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2] 472 ; SSE2-NEXT: pand %xmm8, %xmm4 473 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[1,1,3,3 [all...] |
/external/flac/libFLAC/ |
lpc_intrin_sse.c | 390 __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9; local 400 xmm8 = _mm_setzero_ps(); 429 /* xmm9|xmm8|xmm7|xmm6 += xmm0|xmm0|xmm0|xmm0 * xmm5|xmm4|xmm3|xmm2 */ 435 xmm8 = _mm_add_ps(xmm8, xmm1); 447 _mm_storeu_ps(autoc+8, xmm8);
|
/external/valgrind/memcheck/tests/amd64/ |
fxsave-amd64.c | 65 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm8"); 82 asm __volatile__("movups " VG_SYM(vecZ) "(%rip), %xmm8"); 116 asm __volatile__("movaps %xmm1, %xmm8");
|
/toolchain/binutils/binutils-2.27/gas/testsuite/gas/i386/ |
x86-64-sha.d | 14 [ ]*[a-f0-9]+: 44 0f 38 c8 00 sha1nexte \(%rax\),%xmm8 18 [ ]*[a-f0-9]+: 44 0f 38 c9 00 sha1msg1 \(%rax\),%xmm8 22 [ ]*[a-f0-9]+: 44 0f 38 ca 00 sha1msg2 \(%rax\),%xmm8
|
x86-64-f16c-intel.d | 29 [ ]*[a-f0-9]+: c4 43 79 1d 00 02 vcvtps2ph QWORD PTR \[r8\],xmm8,0x2
|
x86-64-f16c.d | 28 [ ]*[a-f0-9]+: c4 43 79 1d 00 02 vcvtps2ph \$0x2,%xmm8,\(%r8\)
|
/device/linaro/bootloader/edk2/EdkCompatibilityPkg/Foundation/Library/EdkIIGlueLib/Library/BaseLib/X64/ |
SetJump.S | 45 movdqu %xmm8, 0x78(%rcx)
|
LongJump.S | 45 movdqu 0x78(%rcx), %xmm8
|
LongJump.asm | 44 movdqu xmm8, [rcx + 78h]
|
SetJump.asm | 52 movdqu [rcx + 78h], xmm8
|
/device/linaro/bootloader/edk2/MdePkg/Library/BaseLib/X64/ |
SetJump.S | 44 movdqu %xmm8, 0x78(%rcx)
|
LongJump.S | 45 movdqu 0x78(%rcx), %xmm8
|
LongJump.asm | 46 movdqu xmm8, [rcx + 78h]
|
LongJump.nasm | 48 movdqu xmm8, [rcx + 0x78]
|
SetJump.asm | 54 movdqu [rcx + 78h], xmm8
|
SetJump.nasm | 56 movdqu [rcx + 0x78], xmm8
|
/device/linaro/bootloader/edk2/EdkCompatibilityPkg/Foundation/Library/Pei/PeiLib/X64/ |
ProcessorAsms.Asm | 126 movdqu [rax + 20h], xmm8
157 movdqu xmm8, [rax + 20h]
|
ProcessorAsms.S | 107 movdqu %xmm8, 0x20(%rax)
137 movdqu 0x20(%rax), %xmm8
|
/external/llvm/test/tools/llvm-objdump/ |
win64-unwind-data.test | 21 OBJ-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000] 70 EXE-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000]
|
/art/runtime/arch/x86_64/ |
context_x86_64.cc | 83 fprs_[XMM8] = nullptr;
|
quick_method_frame_info_x86_64.h | 55 (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
|
/external/valgrind/coregrind/m_gdbserver/ |
64bit-sse.xml | 50 <reg name="xmm8" bitsize="128" type="vec128"/>
|