HomeSort by relevance Sort by last modified time
    Searched refs:xmm5 (Results 1 - 25 of 352) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/llvm/test/MC/X86/
x86-32-coverage.s     [all...]
x86-32-fma3.s 3 // CHECK: vfmadd132pd %xmm2, %xmm5, %xmm1
5 vfmadd132pd %xmm2, %xmm5, %xmm1
7 // CHECK: vfmadd132pd (%eax), %xmm5, %xmm1
9 vfmadd132pd (%eax), %xmm5, %xmm1
11 // CHECK: vfmadd132ps %xmm2, %xmm5, %xmm1
13 vfmadd132ps %xmm2, %xmm5, %xmm1
15 // CHECK: vfmadd132ps (%eax), %xmm5, %xmm1
17 vfmadd132ps (%eax), %xmm5, %xmm1
19 // CHECK: vfmadd213pd %xmm2, %xmm5, %xmm1
21 vfmadd213pd %xmm2, %xmm5, %xmm
    [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
evex-lig.s 7 vaddsd %xmm4, %xmm5, %xmm6{%k7} # AVX512
8 vaddsd %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512
9 vaddsd {rn-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
10 vaddsd {ru-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
11 vaddsd {rd-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
12 vaddsd {rz-sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
13 vaddsd (%ecx), %xmm5, %xmm6{%k7} # AVX512
14 vaddsd -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512
15 vaddsd 1016(%edx), %xmm5, %xmm6{%k7} # AVX512 Disp8
16 vaddsd 1024(%edx), %xmm5, %xmm6{%k7} # AVX51
    [all...]
evex-lig256.d 12 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}
13 [ ]*[a-f0-9]+: 62 f1 d7 af 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
18 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 31 vaddsd \(%ecx\),%xmm5,%xmm6\{%k7\}
19 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b4 f4 c0 1d fe ff vaddsd -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
20 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 72 7f vaddsd 0x3f8\(%edx\),%xmm5,%xmm6\{%k7\}
21 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b2 00 04 00 00 vaddsd 0x400\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
evex-lig512.d 12 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}
13 [ ]*[a-f0-9]+: 62 f1 d7 cf 58 f4 vaddsd %xmm4,%xmm5,%xmm6\{%k7\}\{z\}
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd \{rn-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd \{ru-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd \{rd-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd \{rz-sae\},%xmm4,%xmm5,%xmm6\{%k7\}
18 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 31 vaddsd \(%ecx\),%xmm5,%xmm6\{%k7\}
19 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b4 f4 c0 1d fe ff vaddsd -0x1e240\(%esp,%esi,8\),%xmm5,%xmm6\{%k7\}
20 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 72 7f vaddsd 0x3f8\(%edx\),%xmm5,%xmm6\{%k7\}
21 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b2 00 04 00 00 vaddsd 0x400\(%edx\),%xmm5,%xmm6\{%k7\
    [all...]
avx512er-rcig.s 10 vrcp28ss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
11 vrcp28sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
14 vrsqrt28ss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
15 vrsqrt28sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512ER
22 vrcp28ss xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
23 vrcp28sd xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
26 vrsqrt28ss xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
27 vrsqrt28sd xmm6{k7}, xmm5, xmm4, {sae} # AVX512ER
avx512f-rcig.s 10 vcmpsd $0xab, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
11 vcmpsd $123, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
12 vcmpss $0xab, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
13 vcmpss $123, {sae}, %xmm4, %xmm5, %k5{%k7} # AVX512F
14 vcomisd {sae}, %xmm5, %xmm6 # AVX512F
15 vcomiss {sae}, %xmm5, %xmm6 # AVX512F
20 vcvtss2sd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
29 vgetexpsd {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
30 vgetexpss {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512F
35 vgetmantsd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512
    [all...]
avx512dq-rcig.s 10 vrangesd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
11 vrangesd $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
12 vrangess $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
13 vrangess $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
18 vreducesd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
19 vreducesd $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
20 vreducess $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
21 vreducess $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ
32 vrangesd xmm6{k7}, xmm5, xmm4, {sae}, 0xab # AVX512DQ
33 vrangesd xmm6{k7}, xmm5, xmm4, {sae}, 123 # AVX512D
    [all...]
x86-64-avx-gather.s 43 vgatherdps %xmm5,0x8(,%xmm4,1),%xmm6
44 vgatherdps %xmm5,-0x8(,%xmm4,1),%xmm6
45 vgatherdps %xmm5,(,%xmm4,1),%xmm6
46 vgatherdps %xmm5,0x298(,%xmm4,1),%xmm6
47 vgatherdps %xmm5,0x8(,%xmm4,8),%xmm6
48 vgatherdps %xmm5,-0x8(,%xmm4,8),%xmm6
49 vgatherdps %xmm5,(,%xmm4,8),%xmm6
50 vgatherdps %xmm5,0x298(,%xmm4,8),%xmm6
52 vgatherdps %xmm5,0x8(,%xmm14,1),%xmm6
53 vgatherdps %xmm5,-0x8(,%xmm14,1),%xmm
    [all...]
avx-gather.s 24 vgatherdps %xmm5,0x8(,%xmm4,1),%xmm6
25 vgatherdps %xmm5,-0x8(,%xmm4,1),%xmm6
26 vgatherdps %xmm5,(,%xmm4,1),%xmm6
27 vgatherdps %xmm5,0x298(,%xmm4,1),%xmm6
28 vgatherdps %xmm5,0x8(,%xmm4,8),%xmm6
29 vgatherdps %xmm5,-0x8(,%xmm4,8),%xmm6
30 vgatherdps %xmm5,(,%xmm4,8),%xmm6
31 vgatherdps %xmm5,0x298(,%xmm4,8),%xmm6
38 vpgatherdd %xmm5,0x8(,%xmm4,1),%xmm6
39 vpgatherdd %xmm5,-0x8(,%xmm4,1),%xmm
    [all...]
avx512bw_vl-opts.s 6 vmovdqu8 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
7 vmovdqu8.s %xmm5, %xmm6{%k7} # AVX512{BW,VL}
8 vmovdqu8 %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
9 vmovdqu8.s %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
10 vmovdqu8 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
11 vmovdqu8.s %xmm5, %xmm6{%k7} # AVX512{BW,VL}
12 vmovdqu8 %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
13 vmovdqu8.s %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
22 vmovdqu16 %xmm5, %xmm6{%k7} # AVX512{BW,VL}
23 vmovdqu16.s %xmm5, %xmm6{%k7} # AVX512{BW,VL
    [all...]
avx512f_vl-opts.s 6 vmovapd %xmm5, %xmm6{%k7} # AVX512{F,VL}
7 vmovapd.s %xmm5, %xmm6{%k7} # AVX512{F,VL}
8 vmovapd %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
9 vmovapd.s %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
10 vmovapd %xmm5, %xmm6{%k7} # AVX512{F,VL}
11 vmovapd.s %xmm5, %xmm6{%k7} # AVX512{F,VL}
12 vmovapd %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
13 vmovapd.s %xmm5, %xmm6{%k7}{z} # AVX512{F,VL}
22 vmovaps %xmm5, %xmm6{%k7} # AVX512{F,VL}
23 vmovaps.s %xmm5, %xmm6{%k7} # AVX512{F,VL
    [all...]
evex-lig256-intel.d 12 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4
13 [ ]*[a-f0-9]+: 62 f1 d7 af 58 f4 vaddsd xmm6\{k7\}\{z\},xmm5,xmm4
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
18 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 31 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[ecx\]
19 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b4 f4 c0 1d fe ff vaddsd xmm6\{k7\},xmm5,QWORD PTR \[esp\+esi\*8-0x1e240\]
20 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 72 7f vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x3f8\]
21 [ ]*[a-f0-9]+: 62 f1 d7 2f 58 b2 00 04 00 00 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x400\
    [all...]
evex-lig512-intel.d 12 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4
13 [ ]*[a-f0-9]+: 62 f1 d7 cf 58 f4 vaddsd xmm6\{k7\}\{z\},xmm5,xmm4
14 [ ]*[a-f0-9]+: 62 f1 d7 1f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rn-sae\}
15 [ ]*[a-f0-9]+: 62 f1 d7 5f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{ru-sae\}
16 [ ]*[a-f0-9]+: 62 f1 d7 3f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rd-sae\}
17 [ ]*[a-f0-9]+: 62 f1 d7 7f 58 f4 vaddsd xmm6\{k7\},xmm5,xmm4,\{rz-sae\}
18 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 31 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[ecx\]
19 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b4 f4 c0 1d fe ff vaddsd xmm6\{k7\},xmm5,QWORD PTR \[esp\+esi\*8-0x1e240\]
20 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 72 7f vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x3f8\]
21 [ ]*[a-f0-9]+: 62 f1 d7 4f 58 b2 00 04 00 00 vaddsd xmm6\{k7\},xmm5,QWORD PTR \[edx\+0x400\
    [all...]
katmai.s 10 andnps %xmm6,%xmm5
17 cmpps $0x6,%xmm5,%xmm4
18 cmpps $0x7,(%esi),%xmm5
25 cmpltps %xmm5,%xmm4
26 cmpltps (%esi),%xmm5
34 cmpunordps %xmm6,%xmm5
41 cmpnltps %xmm5,%xmm4
42 cmpnltps (%esi),%xmm5
50 cmpordps %xmm6,%xmm5
58 cvtsi2ss (%esi),%xmm5
    [all...]
avx512ifma_vl.s 6 vpmadd52luq %xmm4, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
7 vpmadd52luq %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{IFMA,VL}
8 vpmadd52luq (%ecx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
9 vpmadd52luq -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
10 vpmadd52luq (%eax){1to2}, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
11 vpmadd52luq 2032(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp8
12 vpmadd52luq 2048(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
13 vpmadd52luq -2048(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp8
14 vpmadd52luq -2064(%edx), %xmm5, %xmm6{%k7} # AVX512{IFMA,VL}
15 vpmadd52luq 1016(%edx){1to2}, %xmm5, %xmm6{%k7} # AVX512{IFMA,VL} Disp
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
halfpix_variance_impl_sse2.asm 42 movdqu xmm5, XMMWORD PTR [rsi]
44 pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
53 pavgb xmm5, xmm1 ; xmm = vertical average of the above
55 movdqa xmm4, xmm5
56 punpcklbw xmm5, xmm0 ; xmm5 = words of above
61 psubw xmm5, xmm3 ; xmm5 -= xmm3
67 paddw xmm6, xmm5 ; xmm6 += accumulated column difference
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_subpel_variance_impl_sse2.asm 45 movdqu xmm5, XMMWORD PTR [rsi]
47 pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1
56 pavgb xmm5, xmm1 ; xmm = vertical average of the above
58 movdqa xmm4, xmm5
59 punpcklbw xmm5, xmm0 ; xmm5 = words of above
64 psubw xmm5, xmm3 ; xmm5 -= xmm3
70 paddw xmm6, xmm5 ; xmm6 += accumulated column difference
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
idctllm_sse2.asm 34 movd xmm5, [rdx]
37 pinsrw xmm5, [rdx], 4
39 pmullw xmm4, xmm5
41 ; Zero out xmm5, for use unpacking
42 pxor xmm5, xmm5
45 movd [rax], xmm5
46 movd [rax+32], xmm5
64 punpcklbw xmm0, xmm5
65 punpcklbw xmm1, xmm5
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
idctllm_sse2.asm 34 movd xmm5, [rdx]
37 pinsrw xmm5, [rdx], 4
39 pmullw xmm4, xmm5
41 ; Zero out xmm5, for use unpacking
42 pxor xmm5, xmm5
45 movd [rax], xmm5
46 movd [rax+32], xmm5
64 punpcklbw xmm0, xmm5
65 punpcklbw xmm1, xmm5
    [all...]
  /external/libjpeg-turbo/simd/
jidctfst-sse2-64.asm 143 pshufd xmm5,xmm0,0xAA ; xmm5=col2=(02 02 02 02 02 02 02 02)
168 movdqa xmm5,xmm1
172 paddw xmm5,xmm3 ; xmm5=tmp13
176 psubw xmm1,xmm5 ; xmm1=tmp12
180 psubw xmm4,xmm5 ; xmm4=tmp3
182 paddw xmm6,xmm5 ; xmm6=tmp0
194 movdqa xmm5, XMMWORD [XMMBLOCK(5,0,rsi,SIZEOF_JCOEF)]
196 pmullw xmm5, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_IFAST_MULT_TYPE)
    [all...]
jidctfst-sse2.asm 149 pshufd xmm5,xmm0,0xAA ; xmm5=col2=(02 02 02 02 02 02 02 02)
175 movdqa xmm5,xmm1
179 paddw xmm5,xmm3 ; xmm5=tmp13
183 psubw xmm1,xmm5 ; xmm1=tmp12
187 psubw xmm4,xmm5 ; xmm4=tmp3
189 paddw xmm6,xmm5 ; xmm6=tmp0
201 movdqa xmm5, XMMWORD [XMMBLOCK(5,0,esi,SIZEOF_JCOEF)]
203 pmullw xmm5, XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_IFAST_MULT_TYPE)
    [all...]
jfdctfst-sse2-64.asm 110 movdqa xmm5,xmm2 ; transpose coefficients(phase 1)
112 punpckhwd xmm5,xmm3 ; xmm5=(24 34 25 35 26 36 27 37)
123 movdqa XMMWORD [wk(1)], xmm5 ; wk(1)=(24 34 25 35 26 36 27 37)
128 movdqa xmm5,xmm1 ; transpose coefficients(phase 1)
130 punpckhwd xmm5,xmm3 ; xmm5=(64 74 65 75 66 76 67 77)
136 punpckldq xmm2,xmm5 ; xmm2=(44 54 64 74 45 55 65 75)
137 punpckhdq xmm3,xmm5 ; xmm3=(46 56 66 76 47 57 67 77)
140 movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(24 34 25 35 26 36 27 37
    [all...]
jfdctfst-sse2.asm 116 movdqa xmm5,xmm2 ; transpose coefficients(phase 1)
118 punpckhwd xmm5,xmm3 ; xmm5=(24 34 25 35 26 36 27 37)
129 movdqa XMMWORD [wk(1)], xmm5 ; wk(1)=(24 34 25 35 26 36 27 37)
134 movdqa xmm5,xmm1 ; transpose coefficients(phase 1)
136 punpckhwd xmm5,xmm3 ; xmm5=(64 74 65 75 66 76 67 77)
142 punpckldq xmm2,xmm5 ; xmm2=(44 54 64 74 45 55 65 75)
143 punpckhdq xmm3,xmm5 ; xmm3=(46 56 66 76 47 57 67 77)
146 movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(24 34 25 35 26 36 27 37
    [all...]
jidctflt-sse2-64.asm 106 movq xmm5, XMM_MMWORD [MMBLOCK(5,0,rsi,SIZEOF_JCOEF)]
111 por xmm5,xmm6
113 por xmm5,xmm7
114 por xmm1,xmm5
178 movaps xmm5,xmm1
182 addps xmm5,xmm3 ; xmm5=tmp13
185 subps xmm1,xmm5 ; xmm1=tmp12
189 subps xmm4,xmm5 ; xmm4=tmp3
191 addps xmm6,xmm5 ; xmm6=tmp
    [all...]

Completed in 942 milliseconds

1 2 3 4 5 6 7 8 91011>>