HomeSort by relevance Sort by last modified time
    Searched full:xmm3 (Results 201 - 225 of 523) sorted by null

1 2 3 4 5 6 7 891011>>

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-arch-2-2.l 41 [ ]*12[ ]+\?\?\?\? F30F58DC addss %xmm4,%xmm3
43 [ ]*14[ ]+\?\?\?\? F20F58DC addsd %xmm4,%xmm3
45 [ ]*16[ ]+addsubpd %xmm4,%xmm3
47 [ ]*18[ ]+phaddw %xmm4,%xmm3
49 [ ]*20[ ]+phminposuw %xmm1,%xmm3
x86-64-arch-2-bdver1.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-bdver2.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-bdver3.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-bdver4.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-btver1.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-btver2.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-lzcnt.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2-prefetchw.d 15 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
16 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
19 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
x86-64-arch-2.d 14 [ ]*[a-f0-9]+: f3 0f 58 dc addss %xmm4,%xmm3
15 [ ]*[a-f0-9]+: f2 0f 58 dc addsd %xmm4,%xmm3
16 [ ]*[a-f0-9]+: 66 0f d0 dc addsubpd %xmm4,%xmm3
17 [ ]*[a-f0-9]+: 66 0f 38 01 dc phaddw %xmm4,%xmm3
18 [ ]*[a-f0-9]+: 66 0f 38 41 d9 phminposuw %xmm1,%xmm3
  /external/boringssl/win-x86/crypto/sha/
sha1-586.asm 1453 movdqu xmm3,[ebp-16]
1479 movdqa xmm6,xmm3
1482 paddd xmm7,xmm3
1554 pxor xmm7,xmm3
1602 punpcklqdq xmm6,xmm3
1659 pshufd xmm7,xmm3,238
1670 movdqa [64+esp],xmm3
1676 pxor xmm7,xmm3
1689 movdqa xmm3,xmm7
1695 pslldq xmm3,1
    [all...]
  /external/fec/
sse2bfly29.s 67 movdqa Branchtab29_sse2+128+(16*\GROUP),%xmm3
69 pxor %xmm5,%xmm3
70 pavgb %xmm3,%xmm4
76 movdqa ((16*\GROUP)+128)(%esi),%xmm3 # Incoming path metric, high bit = 1
78 movdqa %xmm3,%xmm1
80 paddusb %xmm4,%xmm3
90 pminub %xmm3,%xmm2
93 pcmpeqb %xmm2,%xmm3
104 punpckhbw %xmm3,%xmm1
105 punpcklbw %xmm3,%xmm
    [all...]
  /external/libjpeg-turbo/simd/
jcsample-sse2-64.asm 128 movdqa xmm3,xmm1
133 psrlw xmm3,BYTE_BIT
136 paddw xmm1,xmm3
265 pxor xmm3,xmm3
273 movdqa xmm3, XMMWORD [rsi+1*SIZEOF_XMMWORD]
286 movdqa xmm5,xmm3
289 pand xmm3,xmm6
292 paddw xmm3,xmm5
295 paddw xmm2,xmm3
    [all...]
  /external/libyuv/files/source/
row_gcc.cc 226 "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n"
228 "movdqa %%xmm3,%%xmm2 \n"
239 "palignr $0x4,%%xmm3,%%xmm3 \n"
240 "pshufb %%xmm4,%%xmm3 \n"
242 "por %%xmm5,%%xmm3 \n"
243 "movdqu %%xmm3," MEMACCESS2(0x30,1) " \n"
251 : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
264 "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n"
266 "movdqa %%xmm3,%%xmm2 \n
    [all...]
  /external/llvm/test/CodeGen/X86/
stack-folding-int-avx1.ll 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
88 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
108 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
117 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
126 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xm (…)
    [all...]
pr3154.ll 40 %asmtmp = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -1*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movupd %xmm3, -1*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
44 %asmtmp23 = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -2*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movapd %xmm3, -2*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
84 %asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addp (…)
    [all...]
stack-folding-int-sse42.ll 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
115 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
135 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
144 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
153 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xm (…)
    [all...]
  /bionic/libc/arch-x86/atom/string/
sse2-strlen-atom.S 190 pxor %xmm3, %xmm3
195 pcmpeqb (%eax), %xmm3
196 pmovmskb %xmm3, %edx
224 pcmpeqb (%eax), %xmm3
225 pmovmskb %xmm3, %edx
253 pcmpeqb (%eax), %xmm3
254 pmovmskb %xmm3, %edx
282 pcmpeqb (%eax), %xmm3
283 pmovmskb %xmm3, %ed
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
vpx_subpixel_8t_sse2.asm 25 pshuflw xmm3, xmm7, 11111111b ;k3
33 punpcklqdq xmm2, xmm3
53 punpckldq xmm2, xmm3
98 pshuflw xmm3, xmm7, 11111111b ;k3
107 punpcklwd xmm3, xmm3
116 movdqa k3, xmm3
137 movq xmm3, [rsi + rax * 2 + %1] ;3
149 punpcklbw xmm3, zero
158 pmullw xmm3, k
    [all...]
vpx_high_subpixel_8t_sse2.asm 25 pshuflw xmm3, xmm7, 11111111b ;k3
34 punpcklwd xmm3, xmm4
39 movdqa k3k4, xmm3
65 punpcklwd xmm3, xmm4
70 pmaddwd xmm3, k3k4
74 paddd xmm0, xmm3
101 pshuflw xmm3, xmm7, 11111111b ;k3
107 punpcklqdq xmm3, xmm3
111 punpckhwd xmm3, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_subpixel_8t_sse2.asm 25 pshuflw xmm3, xmm7, 11111111b ;k3
33 punpcklqdq xmm2, xmm3
53 punpckldq xmm2, xmm3
98 pshuflw xmm3, xmm7, 11111111b ;k3
107 punpcklwd xmm3, xmm3
116 movdqa k3, xmm3
137 movq xmm3, [rsi + rax * 2 + %1] ;3
149 punpcklbw xmm3, zero
158 pmullw xmm3, k
    [all...]
  /bionic/libm/x86/
e_pow.S 123 movapd %xmm0, %xmm3
140 psllq $12, %xmm3
143 psrlq $12, %xmm3
155 orpd %xmm1, %xmm3
163 andpd %xmm3, %xmm5
166 subsd %xmm5, %xmm3
172 mulsd %xmm0, %xmm3
182 unpcklpd %xmm3, %xmm5
184 addsd %xmm5, %xmm3
189 pshufd $68, %xmm3, %xmm
    [all...]
  /bionic/libm/x86_64/
e_pow.S 104 movq %xmm0, %xmm3
120 psllq $12, %xmm3
123 psrlq $12, %xmm3
135 orpd %xmm1, %xmm3
140 andpd %xmm3, %xmm5
144 subsd %xmm5, %xmm3
151 mulsd %xmm0, %xmm3
162 unpcklpd %xmm3, %xmm5
164 addsd %xmm5, %xmm3
169 pshufd $68, %xmm3, %xmm
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
subpixel_mmx.asm 276 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
277 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
317 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
318 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
334 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
335 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
431 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
432 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
472 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
473 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 12
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
subpixel_mmx.asm 276 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
277 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
317 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
318 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
334 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
335 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
431 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
432 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 128
472 paddw mm3, [GLOBAL(rd)] ; xmm3 += round value
473 psraw mm3, VP8_FILTER_SHIFT ; xmm3 /= 12
    [all...]

Completed in 1667 milliseconds

1 2 3 4 5 6 7 891011>>