HomeSort by relevance Sort by last modified time
    Searched full:xmm0 (Results 1 - 25 of 668) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/valgrind/none/tests/amd64/
pcmpxstrx64.stdout.exp 3 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 5555555555550006 flags 00000881
4 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881
5 istrm $0x4A: xmm0 000000000000000000ffffffffffffff rcx 5555555555555555 flags 00000881
6 istrm $0x0A: xmm0 0000000000000000000000000000007f rcx 5555555555555555 flags 00000881
7 estri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000008c1
8 estri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1
9 estrm $0x4A: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1
10 estrm $0x0A: xmm0 0000000000000000000000000000ffff rcx 5555555555555555 flags 000008c1
13 istri $0x4A: xmm0 55555555555555555555555555555555 rcx 555555555555000f flags 000000c1
14 istri $0x0A: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c
    [all...]
pcmpxstrx64w.stdout.exp 3 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550002 flags 00000881
4 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 00000881
5 istrm $0x4B: xmm0 00000000000000000000ffffffffffff rcx 5555555555555555 flags 00000881
6 istrm $0x0B: xmm0 00000000000000000000000000000007 rcx 5555555555555555 flags 00000881
7 estri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000008c1
8 estri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550000 flags 000008c1
9 estrm $0x4B: xmm0 ffffffffffffffffffffffffffffffff rcx 5555555555555555 flags 000008c1
10 estrm $0x0B: xmm0 000000000000000000000000000000ff rcx 5555555555555555 flags 000008c1
13 istri $0x4B: xmm0 55555555555555555555555555555555 rcx 5555555555550007 flags 000000c1
14 istri $0x0B: xmm0 55555555555555555555555555555555 rcx 5555555555550003 flags 000000c
    [all...]
  /external/llvm/test/CodeGen/X86/
vec_cast2.ll 7 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
8 ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
9 ; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
10 ; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0
    [all...]
vector-zext.ll 10 ; SSE2-NEXT: movdqa %xmm0, %xmm1
13 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
20 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
23 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3
    [all...]
vec_setcc.ll 10 ; SSE2: pmaxub %xmm0, %xmm1
11 ; SSE2: pcmpeqb %xmm1, %xmm0
14 ; SSE41: pmaxub %xmm0, %xmm1
15 ; SSE41: pcmpeqb %xmm1, %xmm0
18 ; AVX: vpmaxub %xmm1, %xmm0, %xmm1
19 ; AVX: vpcmpeqb %xmm1, %xmm0, %xmm0
27 ; SSE2: pminub %xmm0, %xmm1
28 ; SSE2: pcmpeqb %xmm1, %xmm0
31 ; SSE41: pminub %xmm0, %xmm
    [all...]
vector-shuffle-128-v8.ll 13 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
18 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
26 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
31 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,1,0]
39 ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0
    [all...]
uint_to_fp-2.ll 8 ; CHECK-NEXT: movsd .LCPI0_0, %xmm0
10 ; CHECK-NEXT: orpd %xmm0, %xmm1
11 ; CHECK-NEXT: subsd %xmm0, %xmm1
12 ; CHECK-NEXT: xorps %xmm0, %xmm0
13 ; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0
14 ; CHECK-NEXT: movss %xmm0, (%esp)
29 ; CHECK-NEXT: movss %xmm0, %xmm1
30 ; CHECK-NEXT: movsd .LCPI1_0, %xmm0
31 ; CHECK-NEXT: orps %xmm0, %xmm
    [all...]
vector-shuffle-128-v16.ll 14 ; FIXME-NEXT: punpcklbw %xmm0, %xmm0
15 ; FIXME-NEXT: pshuflw {{.*}} # xmm0 = xmm0[0,0,0,0,4,5,6,7]
16 ; FIXME-NEXT: pshufd {{.*}} # xmm0 = xmm0[0,1,0,1]
21 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
22 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3
    [all...]
vector-shuffle-128-v4.ll 14 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
19 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,1]
27 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,0]
32 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,0]
40 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,2
    [all...]
fast-isel-select-sse.ll 10 ; CHECK: cmpeqss %xmm1, %xmm0
11 ; CHECK-NEXT: andps %xmm0, %xmm2
12 ; CHECK-NEXT: andnps %xmm3, %xmm0
13 ; CHECK-NEXT: orps %xmm2, %xmm0
15 ; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
16 ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
24 ; CHECK: cmpeqsd %xmm1, %xmm0
25 ; CHECK-NEXT: andpd %xmm0, %xmm
    [all...]
vector-shuffle-combining.ll 57 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
62 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
76 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
81 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
95 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0
    [all...]
splat-const.ll 10 ; SSE: xorps %xmm0, %xmm0
13 ; AVX: vxorps %xmm0, %xmm0, %xmm0
16 ; AVX2: vxorps %xmm0, %xmm0, %xmm0
29 ; SSE: movaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42
    [all...]
vector-sext.ll 13 ; SSE2-NEXT: movdqa %xmm0, %xmm1
14 ; SSE2-NEXT: # kill: XMM0<def> XMM1<kill>
15 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
16 ; SSE2-NEXT: pslld $16, %xmm0
17 ; SSE2-NEXT: psrad $16, %xmm0
25 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
26 ; SSSE3-NEXT: # kill: XMM0<def> XMM1<kill>
27 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3
    [all...]
fast-isel-int-float-conversion.ll 7 ; SSE2: cvtsi2sdl %edi, %xmm0
8 ; AVX: vcvtsi2sdl %edi, %xmm0, %xmm0
17 ; SSE2: cvtsi2sdl (%rdi), %xmm0
18 ; AVX: vcvtsi2sdl (%rdi), %xmm0, %xmm0
28 ; SSE2: cvtsi2ssl %edi, %xmm0
29 ; AVX: vcvtsi2ssl %edi, %xmm0, %xmm0
38 ; SSE2: cvtsi2ssl (%rdi), %xmm0
    [all...]
pmul.ll 8 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
9 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
10 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
13 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
18 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
29 ; ALL-NEXT: movdqa %xmm0, %xmm2
32 ; ALL-NEXT: pmuludq %xmm0, %xmm
    [all...]
fast-isel-fptrunc-fpext.ll 26 ; AVX: vcvtss2sd %xmm0, %xmm0, %xmm0
36 ; AVX: vcvtsd2ss %xmm0, %xmm0, %xmm0
45 ; SSE: cvtss2sd (%rdi), %xmm0
46 ; AVX: vmovss (%rdi), %xmm0
47 ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm
    [all...]
  /external/llvm/test/MC/X86/
x86_64-fma4-encoding.s 4 // CHECK: vfmaddss (%rcx), %xmm1, %xmm0, %xmm0
6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0
8 // CHECK: vfmaddss %xmm1, (%rcx), %xmm0, %xmm0
10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0
12 // CHECK: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
    [all...]
x86_64-sse4a.s 3 extrq $2, $3, %xmm0
4 # CHECK: extrq $2, $3, %xmm0
7 extrq %xmm1, %xmm0
8 # CHECK: extrq %xmm1, %xmm0
11 insertq $6, $5, %xmm1, %xmm0
12 # CHECK: insertq $6, $5, %xmm1, %xmm0
15 insertq %xmm1, %xmm0
16 # CHECK: insertq %xmm1, %xmm0
19 movntsd %xmm0, (%rdi)
20 # CHECK: movntsd %xmm0, (%rdi
    [all...]
  /bionic/libc/arch-x86/silvermont/string/
sse2-memset-slm.S 199 pxor %xmm0, %xmm0
201 movd %eax, %xmm0
202 pshufd $0, %xmm0, %xmm0
207 movdqu %xmm0, (%edx)
208 movdqu %xmm0, -16(%edx, %ecx)
211 movdqu %xmm0, 16(%edx)
212 movdqu %xmm0, -32(%edx, %ecx)
221 movdqu %xmm0, (%edx
    [all...]
  /system/core/libcutils/arch-x86_64/
android_memset32.S 153 /* Fill xmm0 with the pattern. */
154 movd %ecx, %xmm0
155 pshufd $0, %xmm0, %xmm0
160 movdqu %xmm0, (%rdi)
184 movdqa %xmm0, (%rdi)
185 movdqa %xmm0, 0x10(%rdi)
186 movdqa %xmm0, 0x20(%rdi)
187 movdqa %xmm0, 0x30(%rdi)
188 movdqa %xmm0, 0x40(%rdi
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 16 movdqa xmm0, XMMWORD PTR [rsi]
24 mpsadbw xmm1, xmm0, 0x0
25 mpsadbw xmm2, xmm0, 0x5
27 psrldq xmm0, 8
30 mpsadbw xmm3, xmm0, 0x0
31 mpsadbw xmm4, xmm0, 0x5
37 movdqa xmm0, XMMWORD PTR [rsi]
45 mpsadbw xmm5, xmm0, 0x0
46 mpsadbw xmm2, xmm0, 0x5
48 psrldq xmm0,
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_sad_sse4.asm 16 movdqa xmm0, XMMWORD PTR [rsi]
24 mpsadbw xmm1, xmm0, 0x0
25 mpsadbw xmm2, xmm0, 0x5
27 psrldq xmm0, 8
30 mpsadbw xmm3, xmm0, 0x0
31 mpsadbw xmm4, xmm0, 0x5
37 movdqa xmm0, XMMWORD PTR [rsi]
45 mpsadbw xmm5, xmm0, 0x0
46 mpsadbw xmm2, xmm0, 0x5
48 psrldq xmm0,
    [all...]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_subpixel_bilinear_sse2.asm 37 punpckldq xmm0, xmm1 ;two row in one register
38 punpcklbw xmm0, xmm2 ;unpack to word
39 pmullw xmm0, xmm4 ;multiply the filter factors
41 movdqa xmm1, xmm0
43 paddsw xmm0, xmm1
45 paddsw xmm0, xmm3 ;rounding
46 psraw xmm0, 7 ;shift
47 packuswb xmm0, xmm0 ;pack to byte
51 pavgb xmm0, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_subpixel_bilinear_sse2.asm 37 punpckldq xmm0, xmm1 ;two row in one register
38 punpcklbw xmm0, xmm2 ;unpack to word
39 pmullw xmm0, xmm4 ;multiply the filter factors
41 movdqa xmm1, xmm0
43 paddsw xmm0, xmm1
45 paddsw xmm0, xmm3 ;rounding
46 psraw xmm0, 7 ;shift
47 packuswb xmm0, xmm0 ;pack to byte
51 pavgb xmm0, xmm
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
mfqe_sse2.asm 33 movd xmm0, arg(4) ; src_weight
34 pshuflw xmm0, xmm0, 0x0 ; replicate to all low words
35 punpcklqdq xmm0, xmm0 ; replicate to all hi words
38 psubw xmm1, xmm0 ; dst_weight
57 pmullw xmm2, xmm0
58 pmullw xmm3, xmm0
110 movd xmm0, arg(4) ; src_weight
111 pshuflw xmm0, xmm0, 0x0 ; replicate to all low word
    [all...]

Completed in 2008 milliseconds

1 2 3 4 5 6 7 8 91011>>