/external/boringssl/src/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); 251 pxor %xmm0,%xmm2 252 pxor %xmm1,%xmm2 388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output
|
/external/google-breakpad/src/google_breakpad/common/ |
minidump_cpu_amd64.h | 174 uint128_struct xmm2; member in struct:__anon9467::__anon9468::__anon9469
|
/external/llvm/test/CodeGen/X86/ |
sha.ll | 88 ; CHECK: movaps %xmm2, %xmm0
|
vselect-avx.ll | 18 ; CHECK: vmovdqa {{.*#+}} xmm2 = [65533,124,125,14807]
|
sse-scalar-fp-arith.ll | 314 ; SSE-NEXT: movaps %xmm0, %xmm2 315 ; SSE-NEXT: subss %xmm1, %xmm2 316 ; SSE-NEXT: subss %xmm2, %xmm0 355 ; SSE-NEXT: movaps %xmm0, %xmm2 356 ; SSE-NEXT: divss %xmm1, %xmm2 357 ; SSE-NEXT: divss %xmm2, %xmm0
|
avx512er-intrinsics.ll | 87 ; CHECK: vrsqrt28ss %xmm1, %xmm0, %xmm2 {%k1}{sae} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
|
sink-hoist.ll | 92 ; CHECK: LCPI3_2(%rip), %xmm2
|
stack-folding-mmx.ll | 6 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 33 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 42 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 51 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
|
/external/llvm/lib/Target/X86/ |
README.txt | 514 cvtss2sd LCPI1_0(%rip), %xmm2 519 movapd %xmm3, %xmm2 521 movapd %xmm2, %xmm0 881 xorpd LCPI1_0, %xmm2 883 However, if xmm2 gets spilled, we end up with really ugly code like this: 1016 movsd 176(%esp), %xmm2 1017 mulsd %xmm0, %xmm2 1018 movapd %xmm2, %xmm3 1033 mulsd %xmm2, %xmm4 1120 movaps %xmm2, 80(%rsp [all...] |
X86RegisterInfo.td | 174 def XMM2: X86Reg<"xmm2", 2>, DwarfRegNum<[19, 23, 23]>;
|
/external/mesa3d/src/gallium/auxiliary/gallivm/ |
lp_bld_arit.c | 2133 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2"); local 2351 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2"); local [all...] |
/external/boringssl/linux-x86/crypto/sha/ |
sha256-586.S | [all...] |
/external/boringssl/mac-x86/crypto/sha/ |
sha256-586.S | [all...] |
/external/boringssl/linux-x86_64/crypto/sha/ |
sha256-x86_64.S | 1786 movdqu 32(%rsi),%xmm2 1799 paddd %xmm2,%xmm6 1959 movdqa %xmm2,%xmm4 2130 paddd %xmm7,%xmm2 [all...] |
/external/boringssl/mac-x86_64/crypto/sha/ |
sha256-x86_64.S | 1785 movdqu 32(%rsi),%xmm2 1798 paddd %xmm2,%xmm6 1958 movdqa %xmm2,%xmm4 2129 paddd %xmm7,%xmm2 [all...] |
/external/boringssl/linux-x86_64/crypto/bn/ |
modexp512-x86_64.S | 1494 movdqu 32(%rsi),%xmm2 1502 movdqa %xmm2,576(%rsp) 1508 movdqu 32(%rdx),%xmm2 1589 movdqa %xmm2,96(%rsp) 1701 movdqu 32(%rdx),%xmm2 1709 movdqa %xmm2,544(%rsp)
|
/external/boringssl/src/crypto/bn/asm/ |
x86_64-mont.pl | 692 movdqu (%rsp,$i),%xmm2 696 pxor %xmm1,%xmm2 # conditional select 698 pand %xmm0,%xmm2 700 pxor %xmm1,%xmm2 702 movdqu %xmm2,($rp,$i) 802 movq %r11, %xmm2 # save pointer to modulus copy
|
/external/lldb/tools/debugserver/source/MacOSX/i386/ |
DNBArchImplI386.cpp | [all...] |
/external/boringssl/linux-x86_64/crypto/rc4/ |
rc4-md5-x86_64.S | 326 movdqu (%r13),%xmm2 346 pxor %xmm0,%xmm2 347 pxor %xmm1,%xmm2 1232 movdqu %xmm2,(%r14,%r13,1)
|
/external/boringssl/mac-x86_64/crypto/rc4/ |
rc4-md5-x86_64.S | 326 movdqu (%r13),%xmm2 346 pxor %xmm0,%xmm2 347 pxor %xmm1,%xmm2 1232 movdqu %xmm2,(%r14,%r13,1)
|
/external/boringssl/win-x86_64/crypto/rc4/ |
rc4-md5-x86_64.asm | 341 movdqu xmm2,XMMWORD[r13] 361 pxor xmm2,xmm0 362 pxor xmm2,xmm1 1247 movdqu XMMWORD[r13*1+r14],xmm2
|
/external/valgrind/coregrind/m_gdbserver/ |
valgrind-low-x86.c | 79 { "xmm2", 1664, 128 },
|
/external/llvm/test/MC/X86/ |
intel-syntax.s | 72 // CHECK: vshufpd $1, %xmm2, %xmm1, %xmm0 73 vshufpd XMM0, XMM1, XMM2, 1
|
/art/compiler/dex/quick/x86/ |
x86_lir.h | 63 * XMM2: caller | caller, arg3 | caller, arg3, scratch | caller, arg3, scratch 589 Binary0fOpCode(kX86Movups), // load unaligned packed single FP values from xmm2/m128 to xmm1 591 Binary0fOpCode(kX86Movaps), // load aligned packed single FP values from xmm2/m128 to xmm1 [all...] |
/external/llvm/docs/TableGen/ |
index.rst | 69 XMM0, XMM1, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, XMM2, XMM3, XMM4, XMM5,
|