HomeSort by relevance Sort by last modified time
    Searched full:xmm5 (Results 1 - 25 of 503) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
evex-lig.s 7 vaddsd %xmm4, %xmm5, %xmm6{%k7} # AVX512
8 vaddsd %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512
9 vaddsd {rn-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
10 vaddsd {ru-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
11 vaddsd {rd-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
12 vaddsd {rz-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
13 vaddsd (%ecx), %xmm5, %xmm6{%k7} # AVX512
14 vaddsd -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512
15 vaddsd 1016(%edx), %xmm5, %xmm6{%k7} # AVX512 Disp8
16 vaddsd 1024(%edx), %xmm5, %xmm6{%k7} # AVX51
    [all...]
avx512er-rcig.s 10 vrcp28ss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
11 vrcp28sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
14 vrsqrt28ss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
15 vrsqrt28sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
22 vrcp28ss xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
23 vrcp28sd xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
26 vrsqrt28ss xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
27 vrsqrt28sd xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
avx512f_vl-opts.s 6 vmovapd %xmm5, %xmm6{%k7} # AVX512{F,VL}
7 vmovapd.s %xmm5, %xmm6{%k7} # AVX512{F,VL}
8 vmovapd %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
9 vmovapd.s %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
10 vmovapd %xmm5, %xmm6{%k7} # AVX512{F,VL}
11 vmovapd.s %xmm5, %xmm6{%k7} # AVX512{F,VL}
12 vmovapd %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
13 vmovapd.s %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
22 vmovaps %xmm5, %xmm6{%k7} # AVX512{F,VL}
23 vmovaps.s %xmm5, %xmm6{%k7} # AVX512{F,VL
    [all...]
avx512f-rcig.s 10 vcmpsd $0xab, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
11 vcmpsd $123, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
12 vcmpss $0xab, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
13 vcmpss $123, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
14 vcomisd {sae}, %xmm5, %xmm6 # AVX512F
15 vcomiss {sae}, %xmm5, %xmm6 # AVX512F
20 vcvtss2sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
29 vgetexpsd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
30 vgetexpss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
35 vgetmantsd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
    [all...]
avx512dq-rcig.s 10 vrangesd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
11 vrangesd $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
12 vrangess $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
13 vrangess $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
18 vreducesd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
19 vreducesd $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
20 vreducess $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
21 vreducess $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
32 vrangesd xmm6{k7}, xmm5, xmm4, {sae}, 0xab # AVX512DQ
33 vrangesd xmm6{k7}, xmm5, xmm4, {sae}, 123 # AVX512D
    [all...]
evex-lig256-intel.d 12 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4
13 [ ]*[a-f0-9]+: 62 f1 d7 af 58 f4 vaddsd xmm6\{k7\}\{z\},xmm5,xmm4
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
18 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 31 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[ecx\]
19 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b4 f4 c0 1d fe ff vaddsd xmm6\{k7\},xmm5,QWORD PTR \[esp\+esi\*8-0x1e240\]
20 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 72 7f vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x3f8\]
21 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b2 00 04 00 00 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x400\
    [all...]
evex-lig512-intel.d 12 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4
13 [ ]*[a-f0-9]+: 62 f1 d7 cf 58 f4 vaddsd xmm6\{k7\}\{z\},xmm5,xmm4
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
18 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 31 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[ecx\]
19 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b4 f4 c0 1d fe ff vaddsd xmm6\{k7\},xmm5,QWORD PTR \[esp\+esi\*8-0x1e240\]
20 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 72 7f vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x3f8\]
21 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b2 00 04 00 00 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x400\
    [all...]
avx512bw_vl-wig.s 6 vpabsb %xmm5, %xmm6{%k7} # AVX512{BW,VL}
7 vpabsb %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
22 vpabsw %xmm5, %xmm6{%k7} # AVX512{BW,VL}
23 vpabsw %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
38 vpacksswb %xmm4, %xmm5, %xmm6{%k7} # AVX512{BW,VL}
39 vpacksswb %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
40 vpacksswb (%ecx), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
41 vpacksswb -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
42 vpacksswb 2032(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL} Disp8
43 vpacksswb 2048(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL
    [all...]
avx-gather.s 24 vgatherdps %xmm5,0x8(,%xmm4,1),%xmm6
25 vgatherdps %xmm5,-0x8(,%xmm4,1),%xmm6
26 vgatherdps %xmm5,(,%xmm4,1),%xmm6
27 vgatherdps %xmm5,0x298(,%xmm4,1),%xmm6
28 vgatherdps %xmm5,0x8(,%xmm4,8),%xmm6
29 vgatherdps %xmm5,-0x8(,%xmm4,8),%xmm6
30 vgatherdps %xmm5,(,%xmm4,8),%xmm6
31 vgatherdps %xmm5,0x298(,%xmm4,8),%xmm6
38 vpgatherdd %xmm5,0x8(,%xmm4,1),%xmm6
39 vpgatherdd %xmm5,-0x8(,%xmm4,1),%xmm
    [all...]
avx512bw_vl-opts.s 6 vmovdqu8 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
7 vmovdqu8.s %xmm5, %xmm6{%k7} # AVX512{BW,VL}
8 vmovdqu8 %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
9 vmovdqu8.s %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
10 vmovdqu8 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
11 vmovdqu8.s %xmm5, %xmm6{%k7} # AVX512{BW,VL}
12 vmovdqu8 %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
13 vmovdqu8.s %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
22 vmovdqu16 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
23 vmovdqu16.s %xmm5, %xmm6{%k7} # AVX512{BW,VL
    [all...]
evex-lig256.d 12 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}
13 [ ]*[a-f0-9]+: 62 f1 d7 af 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
18 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 31 vaddsd \(%ecx\),%xmm5,%xmm6\{%k7\}
19 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b4 f4 c0 1d fe ff vaddsd -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
20 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 72 7f vaddsd 0x3f8\(%edx\),%xmm5,%xmm6\{%k7\}
21 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b2 00 04 00 00 vaddsd 0x400\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
evex-lig512.d 12 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}
13 [ ]*[a-f0-9]+: 62 f1 d7 cf 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
18 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 31 vaddsd \(%ecx\),%xmm5,%xmm6\{%k7\}
19 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b4 f4 c0 1d fe ff vaddsd -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
20 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 72 7f vaddsd 0x3f8\(%edx\),%xmm5,%xmm6\{%k7\}
21 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b2 00 04 00 00 vaddsd 0x400\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
avx512ifma_vl.s 6 vpmadd52luq %xmm4, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
7 vpmadd52luq %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{IFMA,VL}
8 vpmadd52luq (%ecx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
9 vpmadd52luq -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
10 vpmadd52luq (%eax){1to2}, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
11 vpmadd52luq 2032(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp8
12 vpmadd52luq 2048(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
13 vpmadd52luq -2048(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp8
14 vpmadd52luq -2064(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
15 vpmadd52luq 1016(%edx){1to2}, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp
    [all...]
avx512vbmi_vl.s 6 vpermb %xmm4, %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
7 vpermb %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{VBMI,VL}
8 vpermb (%ecx), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
9 vpermb -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
10 vpermb 2032(%edx), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL} Disp8
11 vpermb 2048(%edx), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
12 vpermb -2048(%edx), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL} Disp8
13 vpermb -2064(%edx), %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
22 vpermi2b %xmm4, %xmm5, %xmm6{%k7} # AVX512{VBMI,VL}
23 vpermi2b %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{VBMI,VL
    [all...]
  /external/valgrind/memcheck/tests/amd64-solaris/
context_sse.stdout.exp 2 xmm1=0.000000, xmm2=0.000000, xmm5=0.000000, xmm6=0.000000
4 xmm0=0.000000, xmm2=0.000000, xmm5=0.000000, xmm6=0.000000
  /external/valgrind/memcheck/tests/x86-solaris/
context_sse.stdout.exp 2 xmm1=0.000000, xmm2=0.000000, xmm5=0.000000, xmm6=0.000000
4 xmm0=0.000000, xmm2=0.000000, xmm5=0.000000, xmm6=0.000000
  /external/libvpx/libvpx/vpx_dsp/x86/
halfpix_variance_impl_sse2.asm 42 movdqu xmm5, XMMWORD PTR [rsi]
44 pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
53 pavgb xmm5, xmm1 ; xmm = vertical average of the above
55 movdqa xmm4, xmm5
56 punpcklbw xmm5, xmm0 ; xmm5 = words of above
61 psubw xmm5, xmm3 ; xmm5 -= xmm3
67 paddw xmm6, xmm5 ; xmm6 += accumulated column difference
    [all...]
sad_sse4.asm 38 movq xmm5, MMWORD PTR [rdi]
41 punpcklqdq xmm5, xmm3
44 movdqa xmm2, xmm5
45 mpsadbw xmm5, xmm0, 0x0
54 paddw xmm5, xmm2
55 paddw xmm5, xmm3
56 paddw xmm5, xmm4
58 paddw xmm1, xmm5
61 movq xmm5, MMWORD PTR [rdi+ rdx]
64 punpcklqdq xmm5, xmm
    [all...]
highbd_variance_impl_sse2.asm 84 pxor xmm5, xmm5
88 paddw xmm5, xmm1
95 paddw xmm5, xmm3
102 paddw xmm5, xmm1
108 paddw xmm5, xmm3
112 movdqa xmm1, xmm5
113 movdqa xmm2, xmm5
118 movdqa xmm2, xmm5
119 punpcklwd xmm5, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_subpel_variance_impl_sse2.asm 45 movdqu xmm5, XMMWORD PTR [rsi]
47 pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
56 pavgb xmm5, xmm1 ; xmm = vertical average of the above
58 movdqa xmm4, xmm5
59 punpcklbw xmm5, xmm0 ; xmm5 = words of above
64 psubw xmm5, xmm3 ; xmm5 -= xmm3
70 paddw xmm6, xmm5 ; xmm6 += accumulated column difference
    [all...]
vp9_sad_sse4.asm 38 movq xmm5, MMWORD PTR [rdi]
41 punpcklqdq xmm5, xmm3
44 movdqa xmm2, xmm5
45 mpsadbw xmm5, xmm0, 0x0
54 paddw xmm5, xmm2
55 paddw xmm5, xmm3
56 paddw xmm5, xmm4
58 paddw xmm1, xmm5
61 movq xmm5, MMWORD PTR [rdi+ rdx]
64 punpcklqdq xmm5, xmm
    [all...]
  /external/llvm/test/MC/X86/
x86-32-fma3.s 3 // CHECK: vfmadd132pd %xmm2, %xmm5, %xmm1
5 vfmadd132pd %xmm2, %xmm5, %xmm1
7 // CHECK: vfmadd132pd (%eax), %xmm5, %xmm1
9 vfmadd132pd (%eax), %xmm5, %xmm1
11 // CHECK: vfmadd132ps %xmm2, %xmm5, %xmm1
13 vfmadd132ps %xmm2, %xmm5, %xmm1
15 // CHECK: vfmadd132ps (%eax), %xmm5, %xmm1
17 vfmadd132ps (%eax), %xmm5, %xmm1
19 // CHECK: vfmadd213pd %xmm2, %xmm5, %xmm1
21 vfmadd213pd %xmm2, %xmm5, %xmm
    [all...]
x86-32-coverage.s     [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
variance_impl_sse2.asm 169 pxor xmm5, xmm5
172 punpckhwd xmm5, xmm7
173 psrad xmm5, 16
176 paddd xmm6, xmm5
467 movdqa xmm5, xmm1
488 movdqa xmm3, xmm5 ;
489 movdqa xmm5, xmm1 ;
542 movdqa xmm5, xmm3
559 movdqa xmm1, xmm5 ;
    [all...]
  /external/libvpx/libvpx/third_party/libyuv/source/
rotate_win.cc 54 movq xmm5, qword ptr [eax + edi] local
55 punpcklbw xmm4, xmm5
57 movdqa xmm5, xmm4 local
59 palignr xmm5, xmm5, 8 local
73 punpcklwd xmm5, xmm7 local
75 movdqa xmm7, xmm5
90 punpckldq xmm1, xmm5
93 movdqa xmm5, xmm1 local
95 palignr xmm5, xmm5, local
155 movdqu xmm5, [eax + edi] local
160 movdqa xmm5, xmm7 local
166 movdqa xmm5, xmm6 \/\/ use xmm5 as temp register. local
168 punpckhbw xmm5, xmm7 local
173 movdqa xmm5, xmm0 local
175 punpckhwd xmm5, xmm2 local
177 movdqa xmm5, xmm1 local
179 punpckhwd xmm5, xmm3 local
181 movdqa xmm5, xmm4 local
183 punpckhwd xmm5, xmm6 local
185 movdqu xmm5, [esp] \/\/ restore xmm5 local
188 punpcklwd xmm5, xmm7 local
    [all...]

Completed in 942 milliseconds

1 2 3 4 5 6 7 8 91011>>