HomeSort by relevance Sort by last modified time
    Searched full:vaddss (Results 1 - 25 of 67) sorted by null

1 2 3

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-equ.s 5 vaddss %xmm0,%xmm1,ACC
10 vaddss xmm0,xmm1,ACC
x86-64-equ.d 9 [ ]*[a-f0-9]+: 62 e1 76 08 58 c8 vaddss %xmm0,%xmm1,%xmm17
10 [ ]*[a-f0-9]+: 62 b1 76 08 58 c1 vaddss %xmm17,%xmm1,%xmm0
avx-scalar.s 119 vaddss %xmm4,%xmm6,%xmm2
120 vaddss (%ecx),%xmm6,%xmm2
436 vaddss xmm2,xmm6,xmm4
437 vaddss xmm2,xmm6,DWORD PTR [ecx]
438 vaddss xmm2,xmm6,[ecx]
x86-64-avx-scalar.s 135 vaddss %xmm4,%xmm6,%xmm2
136 vaddss (%rcx),%xmm6,%xmm2
497 vaddss xmm2,xmm6,xmm4
498 vaddss xmm2,xmm6,DWORD PTR [rcx]
499 vaddss xmm2,xmm6,[rcx]
evex-lig.s 20 vaddss %xmm4, %xmm5, %xmm6{%k7} # AVX512
21 vaddss %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512
22 vaddss {rn-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
23 vaddss {ru-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
24 vaddss {rd-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
25 vaddss {rz-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
26 vaddss (%ecx), %xmm5, %xmm6{%k7} # AVX512
27 vaddss -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512
28 vaddss 508(%edx), %xmm5, %xmm6{%k7} # AVX512 Disp8
29 vaddss 512(%edx), %xmm5, %xmm6{%k7} # AVX51
    [all...]
x86-64-evex-lig.s 20 vaddss %xmm28, %xmm29, %xmm30{%k7} # AVX512
21 vaddss %xmm28, %xmm29, %xmm30{%k7}{z} # AVX512
22 vaddss {rn-sae}, %xmm28, %xmm29, %xmm30{%k7} # AVX512
23 vaddss {ru-sae}, %xmm28, %xmm29, %xmm30{%k7} # AVX512
24 vaddss {rd-sae}, %xmm28, %xmm29, %xmm30{%k7} # AVX512
25 vaddss {rz-sae}, %xmm28, %xmm29, %xmm30{%k7} # AVX512
26 vaddss (%rcx), %xmm29, %xmm30{%k7} # AVX512
27 vaddss 0x123(%rax,%r14,8), %xmm29, %xmm30{%k7} # AVX512
28 vaddss 508(%rdx), %xmm29, %xmm30{%k7} # AVX512 Disp8
29 vaddss 512(%rdx), %xmm29, %xmm30{%k7} # AVX51
    [all...]
avx-scalar-intel.d 108 [ ]*[a-f0-9]+: c5 ce 58 d4 vaddss xmm2,xmm6,xmm4
109 [ ]*[a-f0-9]+: c5 ce 58 11 vaddss xmm2,xmm6,DWORD PTR \[ecx\]
385 [ ]*[a-f0-9]+: c5 ce 58 d4 vaddss xmm2,xmm6,xmm4
386 [ ]*[a-f0-9]+: c5 ce 58 11 vaddss xmm2,xmm6,DWORD PTR \[ecx\]
387 [ ]*[a-f0-9]+: c5 ce 58 11 vaddss xmm2,xmm6,DWORD PTR \[ecx\]
    [all...]
evex-lig256-intel.d 24 [ ]*[a-f0-9]+: 62 f1 56 2f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4
25 [ ]*[a-f0-9]+: 62 f1 56 af 58 f4 vaddss xmm6\{k7\}\{z\},xmm5,xmm4
26 [ ]*[a-f0-9]+: 62 f1 56 1f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
27 [ ]*[a-f0-9]+: 62 f1 56 5f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
28 [ ]*[a-f0-9]+: 62 f1 56 3f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
29 [ ]*[a-f0-9]+: 62 f1 56 7f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
30 [ ]*[a-f0-9]+: 62 f1 56 2f 58 31 vaddss xmm6\{k7\},xmm5,DWORD PTR \[ecx\]
31 [ ]*[a-f0-9]+: 62 f1 56 2f 58 b4 f4 c0 1d fe ff vaddss xmm6\{k7\},xmm5,DWORD PTR \[esp\+esi\*8-0x1e240\]
32 [ ]*[a-f0-9]+: 62 f1 56 2f 58 72 7f vaddss xmm6\{k7\},xmm5,DWORD PTR \[edx\+0x1fc\]
33 [ ]*[a-f0-9]+: 62 f1 56 2f 58 b2 00 02 00 00 vaddss xmm6\{k7\},xmm5,DWORD PTR \[edx\+0x200\
    [all...]
evex-lig256.d 24 [ ]*[a-f0-9]+: 62 f1 56 2f 58 f4 vaddss %xmm4,%xmm5,%xmm6\{%k7\}
25 [ ]*[a-f0-9]+: 62 f1 56 af 58 f4 vaddss %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
26 [ ]*[a-f0-9]+: 62 f1 56 1f 58 f4 vaddss \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
27 [ ]*[a-f0-9]+: 62 f1 56 5f 58 f4 vaddss \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
28 [ ]*[a-f0-9]+: 62 f1 56 3f 58 f4 vaddss \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
29 [ ]*[a-f0-9]+: 62 f1 56 7f 58 f4 vaddss \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
30 [ ]*[a-f0-9]+: 62 f1 56 2f 58 31 vaddss \(%ecx\),%xmm5,%xmm6\{%k7\}
31 [ ]*[a-f0-9]+: 62 f1 56 2f 58 b4 f4 c0 1d fe ff vaddss -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
32 [ ]*[a-f0-9]+: 62 f1 56 2f 58 72 7f vaddss 0x1fc\(%edx\),%xmm5,%xmm6\{%k7\}
33 [ ]*[a-f0-9]+: 62 f1 56 2f 58 b2 00 02 00 00 vaddss 0x200\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
evex-lig512-intel.d 24 [ ]*[a-f0-9]+: 62 f1 56 4f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4
25 [ ]*[a-f0-9]+: 62 f1 56 cf 58 f4 vaddss xmm6\{k7\}\{z\},xmm5,xmm4
26 [ ]*[a-f0-9]+: 62 f1 56 1f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
27 [ ]*[a-f0-9]+: 62 f1 56 5f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
28 [ ]*[a-f0-9]+: 62 f1 56 3f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
29 [ ]*[a-f0-9]+: 62 f1 56 7f 58 f4 vaddss xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
30 [ ]*[a-f0-9]+: 62 f1 56 4f 58 31 vaddss xmm6\{k7\},xmm5,DWORD PTR \[ecx\]
31 [ ]*[a-f0-9]+: 62 f1 56 4f 58 b4 f4 c0 1d fe ff vaddss xmm6\{k7\},xmm5,DWORD PTR \[esp\+esi\*8-0x1e240\]
32 [ ]*[a-f0-9]+: 62 f1 56 4f 58 72 7f vaddss xmm6\{k7\},xmm5,DWORD PTR \[edx\+0x1fc\]
33 [ ]*[a-f0-9]+: 62 f1 56 4f 58 b2 00 02 00 00 vaddss xmm6\{k7\},xmm5,DWORD PTR \[edx\+0x200\
    [all...]
evex-lig512.d 24 [ ]*[a-f0-9]+: 62 f1 56 4f 58 f4 vaddss %xmm4,%xmm5,%xmm6\{%k7\}
25 [ ]*[a-f0-9]+: 62 f1 56 cf 58 f4 vaddss %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
26 [ ]*[a-f0-9]+: 62 f1 56 1f 58 f4 vaddss \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
27 [ ]*[a-f0-9]+: 62 f1 56 5f 58 f4 vaddss \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
28 [ ]*[a-f0-9]+: 62 f1 56 3f 58 f4 vaddss \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
29 [ ]*[a-f0-9]+: 62 f1 56 7f 58 f4 vaddss \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
30 [ ]*[a-f0-9]+: 62 f1 56 4f 58 31 vaddss \(%ecx\),%xmm5,%xmm6\{%k7\}
31 [ ]*[a-f0-9]+: 62 f1 56 4f 58 b4 f4 c0 1d fe ff vaddss -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
32 [ ]*[a-f0-9]+: 62 f1 56 4f 58 72 7f vaddss 0x1fc\(%edx\),%xmm5,%xmm6\{%k7\}
33 [ ]*[a-f0-9]+: 62 f1 56 4f 58 b2 00 02 00 00 vaddss 0x200\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
  /external/llvm/test/CodeGen/X86/
recip-fastmath.ll 28 ; RECIP: vaddss
36 ; REFINE: vaddss
40 ; REFINE: vaddss
dagcombine-unsafe-math.ll 12 ; CHECK: vaddss
41 ; CHECK: vaddss
dag-fmf-cse.ll 13 ; CHECK-NEXT: vaddss %xmm0, %xmm0, %xmm0
machine-combiner.ll 17 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
18 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
19 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
37 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
38 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
39 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
57 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
58 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1
59 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
77 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm
    [all...]
load-slice.ll 23 ; STRESS-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
27 ; STRESS-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
38 ; REGULAR-NEXT: vaddss ([[BASE]]), [[OUT_Real]], [[RES_Real:%xmm[0-9]+]]
42 ; REGULAR-NEXT: vaddss 4([[BASE]]), [[OUT_Imm]], [[RES_Imm:%xmm[0-9]+]]
sqrt-fastmath.ll 40 ; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
84 ; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
sse3-avx-addsub-2.ll 303 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
327 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
387 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
390 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
433 ; AVX-NEXT: vaddss %xmm0, %xmm4, %xmm4
436 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
fold-load-binops.ll 17 ; AVX-NEXT: vaddss (%rdi), %xmm0, %xmm0
sse-scalar-fp-arith.ll 19 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
200 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
344 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
345 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
428 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
575 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
696 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
823 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
944 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
vec_int_to_fp.ll     [all...]
half.ll 173 ; CHECK-F16C-NEXT: vaddss [[REG3]], [[REG3]], [[REG1:[%a-z0-9]+]]
  /external/llvm/test/MC/Disassembler/X86/
avx-512.txt 87 # CHECK: vaddss 256(%rdx), %xmm0, %xmm16
107 # CHECK: vaddss 255(%rdx), %xmm0, %xmm16
110 # CHECK: vaddss 1024(%rdx), %xmm0, %xmm16
  /external/v8/test/cctest/
test-disasm-ia32.cc 517 __ vaddss(xmm0, xmm1, xmm2);
518 __ vaddss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
test-disasm-x64.cc 503 __ vaddss(xmm0, xmm1, xmm2);
504 __ vaddss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));

Completed in 362 milliseconds

1 2 3