HomeSort by relevance Sort by last modified time
    Searched refs:xmm2 (Results 1 - 25 of 88) sorted by null

1 2 3 4

  /external/llvm/test/MC/X86/
x86-32-avx.s 3 // CHECK: vaddss %xmm4, %xmm6, %xmm2
5 vaddss %xmm4, %xmm6, %xmm2
7 // CHECK: vmulss %xmm4, %xmm6, %xmm2
9 vmulss %xmm4, %xmm6, %xmm2
11 // CHECK: vsubss %xmm4, %xmm6, %xmm2
13 vsubss %xmm4, %xmm6, %xmm2
15 // CHECK: vdivss %xmm4, %xmm6, %xmm2
17 vdivss %xmm4, %xmm6, %xmm2
19 // CHECK: vaddsd %xmm4, %xmm6, %xmm2
21 vaddsd %xmm4, %xmm6, %xmm2
    [all...]
x86_64-xop-encoding.s 27 // CHECK: vphsubbw %xmm2, %xmm1
29 vphsubbw %xmm2, %xmm1
35 // CHECK: vphaddwq %xmm6, %xmm2
37 vphaddwq %xmm6, %xmm2
59 // CHECK: vphadduwd %xmm2, %xmm1
61 vphadduwd %xmm2, %xmm1
67 // CHECK: vphaddudq %xmm6, %xmm2
69 vphaddudq %xmm6, %xmm2
83 // CHECK: vphaddubq %xmm2, %xmm2
    [all...]
x86-32-fma3.s 3 // CHECK: vfmadd132pd %xmm2, %xmm5, %xmm1
5 vfmadd132pd %xmm2, %xmm5, %xmm1
11 // CHECK: vfmadd132ps %xmm2, %xmm5, %xmm1
13 vfmadd132ps %xmm2, %xmm5, %xmm1
19 // CHECK: vfmadd213pd %xmm2, %xmm5, %xmm1
21 vfmadd213pd %xmm2, %xmm5, %xmm1
27 // CHECK: vfmadd213ps %xmm2, %xmm5, %xmm1
29 vfmadd213ps %xmm2, %xmm5, %xmm1
35 // CHECK: vfmadd231pd %xmm2, %xmm5, %xmm1
37 vfmadd231pd %xmm2, %xmm5, %xmm
    [all...]
x86_64-fma4-encoding.s 12 // CHECK: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
14 vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
24 // CHECK: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
26 vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
36 // CHECK: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
38 vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
48 // CHECK: vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0
50 vfmaddpd %xmm2, %xmm1, %xmm0, %xmm0
85 // CHECK: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
87 vfmsubss %xmm2, %xmm1, %xmm0, %xmm
    [all...]
  /external/compiler-rt/lib/i386/
floatdidf.S 27 movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52
28 subsd %xmm2, %xmm1 // a_hi - 0x1p52 (no rounding occurs)
29 orpd %xmm2, %xmm0 // 0x1p52 + a_lo (no rounding occurs)
ashrdi3.S 14 movd 12(%esp), %xmm2 // Load count
24 psrlq %xmm2, %xmm0 // unsigned shift input by count
33 pandn %xmm1, %xmm2 // 63 - count
35 psubq %xmm1, %xmm2 // 64 - count
36 psllq %xmm2, %xmm1 // -1 << (64 - count) = leading sign bits
ashldi3.S 21 movd 12(%esp), %xmm2 // Load count
29 psllq %xmm2, %xmm0 // shift input by count
lshrdi3.S 21 movd 12(%esp), %xmm2 // Load count
29 psrlq %xmm2, %xmm0 // shift input by count
  /frameworks/compile/libbcc/runtime/lib/i386/
floatdidf.S 27 movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52
28 subsd %xmm2, %xmm1 // a_hi - 0x1p52 (no rounding occurs)
29 orpd %xmm2, %xmm0 // 0x1p52 + a_lo (no rounding occurs)
ashrdi3.S 14 movd 12(%esp), %xmm2 // Load count
24 psrlq %xmm2, %xmm0 // unsigned shift input by count
33 pandn %xmm1, %xmm2 // 63 - count
35 psubq %xmm1, %xmm2 // 64 - count
36 psllq %xmm2, %xmm1 // -1 << (64 - count) = leading sign bits
ashldi3.S 21 movd 12(%esp), %xmm2 // Load count
29 psllq %xmm2, %xmm0 // shift input by count
  /external/libvpx/vp8/encoder/x86/
subtract_sse2.asm 98 movdqa xmm2, xmm0
102 pxor xmm2, [GLOBAL(t80)]
103 pcmpgtb xmm1, xmm2 ; obtain sign information
105 movdqa xmm2, xmm0
108 punpckhbw xmm2, xmm3 ; put sign back to subtraction
111 movdqa XMMWORD PTR [rdi +16], xmm2
170 movq xmm2, MMWORD PTR [rsi+rdx]
172 punpcklqdq xmm0, xmm2
174 movdqa xmm2, xmm0
178 pxor xmm2, [GLOBAL(t80)
    [all...]
variance_impl_ssse3.asm 69 movdqa xmm2, xmm0
72 punpckhbw xmm2, xmm1
74 pmaddubsw xmm2, [rax]
77 paddw xmm2, [GLOBAL(xmm_bi_rd)]
79 psraw xmm2, xmm_filter_shift
81 packuswb xmm0, xmm2
93 movdqu xmm2, XMMWORD PTR [rsi+1]
96 punpcklbw xmm1, xmm2
97 punpckhbw xmm3, xmm2
107 movdqa xmm2, xmm
    [all...]
fwalsh_sse2.asm 34 movq xmm2, MMWORD PTR [rsi]
38 punpcklwd xmm2, xmm3
41 punpckldq xmm0, xmm2 ; ip[1] ip[0]
42 punpckhdq xmm1, xmm2 ; ip[3] ip[2]
44 movdqa xmm2, xmm0
46 psubw xmm2, xmm1
49 psllw xmm2, 2 ; c1 b1
52 punpcklqdq xmm0, xmm2 ; b1 a1
53 punpckhqdq xmm1, xmm2 ; c1 d1
61 movdqa xmm2, xmm
    [all...]
dct_sse2.asm 68 movq xmm2, MMWORD PTR[input+ pitch] ;13 12 11 10
73 punpcklqdq xmm0, xmm2 ;13 12 11 10 03 02 01 00
76 movdqa xmm2, xmm0
78 punpckhdq xmm2, xmm1 ;33 32 13 12 31 30 11 10
80 punpckldq xmm0, xmm2 ;31 21 30 20 11 10 01 00
82 pshufhw xmm2, xmm2, 0b1h ;32 33 12 13 xx xx xx xx
84 punpckhdq xmm1, xmm2 ;32 33 22 23 12 13 02 03
109 movdqa xmm2, xmm0
111 punpckhqdq xmm2, xmm3 ;23 22 21 20 33 32 31 3
    [all...]
quantize_sse2.asm 84 movdqa xmm2, [rcx]
89 paddw xmm2, xmm7
93 psubw xmm1, xmm2
99 paddw xmm1, xmm2
102 movdqa xmm2, [rdx]
109 paddw xmm1, xmm2
170 movdqa xmm2, [rsp + qcoeff]
183 pxor xmm2, xmm0
186 psubw xmm2, xmm0
195 pmullw xmm0, xmm2
    [all...]
  /external/libvpx/vp8/common/x86/
idctllm_sse2.asm 61 movq xmm2, [rax+2*rcx]
67 punpcklbw xmm2, xmm7
76 paddw xmm2, xmm4
82 packuswb xmm2, xmm7
91 movq [rax], xmm2
122 ; note the transpose of xmm1 and xmm2, necessary for shuffle
125 movdqa xmm2, [rax+16]
137 pmullw xmm2, [rdx+16]
149 movdqa xmm4, xmm2
150 punpckldq xmm2, xmm
    [all...]
subpixel_ssse3.asm 75 movq xmm2, MMWORD PTR [rsi + 3] ; 3 4 5 6 7 8 9 10
77 punpcklbw xmm0, xmm2 ; -2 3 -1 4 0 5 1 6 2 7 3 8 4 9 5 10
82 movdqa xmm2, xmm1
85 pshufb xmm2, [GLOBAL(shuf3bfrom1)]
89 pmaddubsw xmm2, xmm6
95 paddsw xmm2, xmm7
97 paddsw xmm0, xmm2
137 movdqa xmm2, xmm0
140 pshufb xmm2, xmm4
144 pmaddubsw xmm2, xmm
    [all...]
  /dalvik/vm/mterp/x86-atom/
OP_SHL_LONG.S 34 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
36 pand %xmm2, %xmm0 # %xmm0<- masked shift bits
OP_SHL_LONG_2ADDR.S 37 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
38 pand %xmm2, %xmm0 # %xmm0<- masked shift bits
OP_USHR_LONG.S 33 movsd .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
35 pand %xmm2, %xmm0 # %xmm0<- masked shift bits
OP_USHR_LONG_2ADDR.S 35 movq .LshiftMask, %xmm2 # %xmm2<- mask for the shift bits
38 pand %xmm2, %xmm0 # %xmm0<- masked shift bits
  /bionic/libc/arch-x86/string/
ssse3-strcmp-latest.S 190 movlpd (%edx), %xmm2
192 movhpd 8(%edx), %xmm2
194 pcmpeqb %xmm2, %xmm1
291 movdqa (%edx, %ecx), %xmm2
294 pcmpeqb %xmm2, %xmm1
316 movdqa (%edx), %xmm2
319 pslldq $15, %xmm2
320 pcmpeqb %xmm1, %xmm2
321 psubb %xmm0, %xmm2
322 pmovmskb %xmm2, %ed
    [all...]
ssse3-memcpy5.S 305 movdqa 0x20(%eax), %xmm2
315 movdqa %xmm2, 0x20(%edx)
374 movdqa 0x20(%eax), %xmm2
384 movdqa %xmm2, 0x20(%edx)
447 movdqa 16(%eax, %edi), %xmm2
451 palignr $1, %xmm2, %xmm3
452 palignr $1, %xmm1, %xmm2
454 movdqa %xmm2, -32(%edx, %edi)
459 movdqa 16(%eax, %edi), %xmm2
463 palignr $1, %xmm2, %xmm
    [all...]
  /external/libyuv/files/source/
row_win.cc 90 movdqa xmm2, [eax + 32] local
94 pmaddubsw xmm2, xmm7 local
98 phaddw xmm2, xmm3 local
100 psrlw xmm2, 7 local
101 packuswb xmm0, xmm2
123 movdqa xmm2, [eax + 32] local
127 pmaddubsw xmm2, xmm7 local
131 phaddw xmm2, xmm3 local
133 psrlw xmm2, 7 local
134 packuswb xmm0, xmm2
156 movdqa xmm2, [eax + 32] local
160 pmaddubsw xmm2, xmm7 local
164 phaddw xmm2, xmm3 local
166 psrlw xmm2, 7 local
197 movdqa xmm2, [eax + 32] local
201 pavgb xmm2, [eax + esi + 32] local
209 shufps xmm2, xmm3, 0x88 local
211 pavgb xmm2, xmm4 local
219 pmaddubsw xmm2, xmm7 local
261 movdqa xmm2, [eax + 32] local
265 pavgb xmm2, [eax + esi + 32] local
273 shufps xmm2, xmm3, 0x88 local
275 pavgb xmm2, xmm4 local
283 pmaddubsw xmm2, xmm7 local
325 movdqa xmm2, [eax + 32] local
329 pavgb xmm2, [eax + esi + 32] local
337 shufps xmm2, xmm3, 0x88 local
339 pavgb xmm2, xmm4 local
347 pmaddubsw xmm2, xmm7 local
384 movdqa xmm2, xmm3 local
385 palignr xmm2, xmm1, 8 \/\/ xmm2 = { xmm3[0:3] xmm1[8:15]} local
386 pshufb xmm2, xmm6 local
387 por xmm2, xmm7 local
423 movdqa xmm2, xmm3 local
424 palignr xmm2, xmm1, 8 \/\/ xmm2 = { xmm3[0:3] xmm1[8:15]} local
425 pshufb xmm2, xmm6 local
426 por xmm2, xmm7 local
    [all...]

Completed in 533 milliseconds

1 2 3 4