HomeSort by relevance Sort by last modified time
    Searched full:xmm2 (Results 226 - 250 of 435) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/boringssl/linux-x86/crypto/sha/
sha512-586.S 385 movdqa %xmm1,%xmm2
391 movdqa %xmm2,%xmm3
392 movdqu 32(%edi),%xmm2
399 paddq %xmm2,%xmm5
428 movdqa %xmm2,32(%edx)
429 movdqa 112(%ebp),%xmm2
431 paddq %xmm7,%xmm2
432 movdqa %xmm2,-16(%edx)
436 movdqa 16(%edx),%xmm2
456 movdqa %xmm2,%xmm
    [all...]
  /external/boringssl/mac-x86/crypto/sha/
sha512-586.S 384 movdqa %xmm1,%xmm2
390 movdqa %xmm2,%xmm3
391 movdqu 32(%edi),%xmm2
398 paddq %xmm2,%xmm5
427 movdqa %xmm2,32(%edx)
428 movdqa 112(%ebp),%xmm2
430 paddq %xmm7,%xmm2
431 movdqa %xmm2,-16(%edx)
435 movdqa 16(%edx),%xmm2
455 movdqa %xmm2,%xmm
    [all...]
  /external/llvm/test/CodeGen/X86/
stack-folding-xop.ll 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
100 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
109 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
118 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
127 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm (…)
    [all...]
pr3154.ll 40 %asmtmp = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -1*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movupd %xmm3, -1*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
44 %asmtmp23 = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -2*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movapd %xmm3, -2*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
84 %asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addpd %xmm3, %xmm2 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09movhlps %xmm2, %xmm5 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09addsd %xmm5, %xmm2 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09mo (…)
    [all...]
fdiv-combine.ll 19 ; CHECK-NEXT: divss %xmm2, %xmm3
preserve_allcc64.ll 34 ;SSE-NEXT: movaps %xmm2
68 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
87 %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
102 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
preserve_mostcc64.ll 37 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
47 ;SSE: movaps %xmm2
69 %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
84 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
vector-shuffle-combining.ll 280 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
288 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
295 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
301 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
307 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
320 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
328 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3
    [all...]
  /bionic/libm/x86/
e_exp.S 96 movapd 80(%ebx), %xmm2
110 mulpd %xmm1, %xmm2
114 subpd %xmm2, %xmm0
127 movapd 160(%ebx,%ecx), %xmm2
136 addsd %xmm2, %xmm1
137 unpckhpd %xmm2, %xmm2
140 orpd %xmm7, %xmm2
147 mulsd %xmm2, %xmm0
148 addsd %xmm2, %xmm
    [all...]
e_asin.S 108 movsd 6000(%ebx), %xmm2
128 andpd %xmm7, %xmm2
132 orpd %xmm5, %xmm2
135 addsd %xmm2, %xmm7
136 subsd %xmm2, %xmm0
139 mulsd %xmm2, %xmm3
146 andpd 6064(%ebx), %xmm2
153 xorpd %xmm2, %xmm4
160 orpd %xmm2, %xmm4
195 andpd %xmm3, %xmm2
    [all...]
s_atan.S 105 movsd 2672(%ebx), %xmm2
120 mulsd %xmm0, %xmm2
124 addsd %xmm7, %xmm2
127 mulsd %xmm2, %xmm0
144 movsd 2680(%ebx), %xmm2
150 addsd %xmm4, %xmm2
152 mulsd %xmm1, %xmm2
154 mulsd %xmm7, %xmm2
155 addsd %xmm2, %xmm0
179 movsd 2672(%ebx), %xmm2
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
sad_sse3.asm 180 lddqu xmm2, XMMWORD PTR [%3+1]
184 psadbw xmm2, xmm0
188 paddw xmm6, xmm2
193 lddqu xmm2, XMMWORD PTR [%3+%5+1]
202 psadbw xmm2, xmm0
206 paddw xmm6, xmm2
276 lddqu xmm2, XMMWORD PTR [%4]
280 psadbw xmm2, xmm0
285 paddw xmm5, xmm2
293 lddqu xmm2, XMMWORD PTR [%4+%8
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
sad_sse3.asm 180 lddqu xmm2, XMMWORD PTR [%3+1]
184 psadbw xmm2, xmm0
188 paddw xmm6, xmm2
193 lddqu xmm2, XMMWORD PTR [%3+%5+1]
202 psadbw xmm2, xmm0
206 paddw xmm6, xmm2
276 lddqu xmm2, XMMWORD PTR [%4]
280 psadbw xmm2, xmm0
285 paddw xmm5, xmm2
293 lddqu xmm2, XMMWORD PTR [%4+%8
    [all...]
  /bionic/libm/x86_64/
e_exp.S 78 movapd 16+cv(%rip), %xmm2
92 mulpd %xmm1, %xmm2
96 subpd %xmm2, %xmm0
110 movapd (%rcx,%r8), %xmm2
119 addsd %xmm2, %xmm1
120 unpckhpd %xmm2, %xmm2
123 orpd %xmm7, %xmm2
130 mulsd %xmm2, %xmm0
131 addsd %xmm2, %xmm
    [all...]
s_atan.S 88 movq a2(%rip), %xmm2
104 mulsd %xmm0, %xmm2
108 addsd %xmm7, %xmm2
111 mulsd %xmm2, %xmm0
126 movq 8+a2(%rip), %xmm2
132 addsd %xmm4, %xmm2
134 mulsd %xmm1, %xmm2
136 mulsd %xmm7, %xmm2
137 addsd %xmm2, %xmm0
155 movq a2(%rip), %xmm2
    [all...]
s_log1p.S 67 movd %rax, %xmm2
74 pshufd $68, %xmm2, %xmm6
76 addsd %xmm2, %xmm0
80 orpd %xmm2, %xmm0
104 orpd %xmm2, %xmm1
119 subsd %xmm2, %xmm5
123 movapd 16+coeff(%rip), %xmm2
134 mulpd %xmm5, %xmm2
138 addpd 32+coeff(%rip), %xmm2
141 mulsd %xmm1, %xmm2
    [all...]
e_asin.S 90 movsd TMASK(%rip), %xmm2
109 andpd %xmm7, %xmm2
114 orpd %xmm5, %xmm2
118 addsd %xmm2, %xmm7
119 subsd %xmm2, %xmm0
122 mulsd %xmm2, %xmm3
129 andpd SIGNMASK(%rip), %xmm2
136 xorpd %xmm2, %xmm4
143 orpd %xmm2, %xmm4
176 andpd %xmm3, %xmm2
    [all...]
s_sin.S 189 movq SHIFTER(%rip), %xmm2
221 movapd (%rax), %xmm2
232 addsd %xmm3, %xmm2
233 subsd %xmm2, %xmm7
234 mulsd %xmm4, %xmm2
237 mulpd %xmm0, %xmm2
253 mulpd %xmm2, %xmm6
416 movq PI_4(%rip), %xmm2
426 mulsd %xmm2, %xmm0
429 mulsd %xmm2, %xmm
    [all...]
  /external/mesa3d/src/mesa/x86-64/
calling_convention.txt 31 xmm2?7 used to pass floating point arguments
  /external/flac/libFLAC/ia32/
lpc_asm.nasm 460 movaps xmm2, xmm0 ; xmm2 = 0,0,0,data[0]
462 .warmup: ; xmm2 == data[sample-3],data[sample-2],data[sample-1],data[sample]
463 mulps xmm0, xmm2 ; xmm0 = xmm0 * xmm2
464 addps xmm5, xmm0 ; xmm5 += xmm0 * xmm2
473 shufps xmm2, xmm2, 93h ; 93h=2-1-0-3 => xmm2 gets rotated left by one float
474 movss xmm2, xmm
    [all...]
  /bionic/libc/arch-x86_64/string/
sse2-strlcpy-slm.S 223 movdqu 16(%rsi), %xmm2
224 pcmpeqb %xmm2, %xmm0
655 movdqu 13(%rsi), %xmm2
657 movdqu %xmm2, 13(%rdi)
664 movdqu 14(%rsi), %xmm2
666 movdqu %xmm2, 14(%rdi)
673 movdqu 15(%rsi), %xmm2
675 movdqu %xmm2, 15(%rdi)
906 movdqu 13(%rsi), %xmm2
908 movdqu %xmm2, 13(%rdi
    [all...]
sse2-strcpy-slm.S 139 movaps 16(%rsi, %rcx), %xmm2
141 pcmpeqb %xmm2, %xmm0
156 movdqu %xmm2, (%rdi, %rcx)
203 movaps 16(%rsi, %rcx), %xmm2
205 pcmpeqb %xmm2, %xmm0
220 movdqu %xmm2, (%rdi, %rcx)
245 movaps (%rsi), %xmm2
246 movaps %xmm2, %xmm4
251 pminub %xmm5, %xmm2
253 pminub %xmm2, %xmm
    [all...]
  /external/libyuv/files/source/
convert_from.cc 250 movq xmm2, qword ptr [esi] // U local
253 punpcklbw xmm2, xmm3 // UV local
257 punpcklbw xmm0, xmm2 // YUYV
258 punpckhbw xmm1, xmm2
289 movq xmm2, qword ptr [esi] // U local
292 punpcklbw xmm2, xmm3 // UV local
294 movdqa xmm1, xmm2
297 punpckhbw xmm2, xmm0 local
299 movdqa [edi + 16], xmm2
319 "movq (%1),%%xmm2 \n
    [all...]
  /external/llvm/lib/Target/X86/
X86CallingConv.td 42 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
46 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
76 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
87 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
88 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
103 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
131 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
264 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
327 [XMM0, XMM1, XMM2, XMM3]>>,
333 [XMM1, XMM2, XMM3]>>>>
    [all...]
  /external/llvm/test/MC/Disassembler/X86/
x86-32.txt 171 # CHECK: vmovapd %xmm0, %xmm2
175 # CHECK: blendps $129, %xmm2, %xmm1
178 # CHECK: blendpd $129, %xmm2, %xmm1
181 # CHECK: pblendw $129, %xmm2, %xmm1
184 # CHECK: mpsadbw $129, %xmm2, %xmm1
187 # CHECK: dpps $129, %xmm2, %xmm1
190 # CHECK: dppd $129, %xmm2, %xmm1
193 # CHECK: insertps $129, %xmm2, %xmm1
208 # CHECK: vpblendw $129, %xmm2, %xmm5, %xmm1
211 # CHECK: vmpsadbw $129, %xmm2, %xmm5, %xmm
    [all...]

Completed in 565 milliseconds

1 2 3 4 5 6 7 8 91011>>