/external/llvm/test/CodeGen/X86/ |
extractelement-legalization-store-ordering.ll | 18 ; CHECK-NEXT: movdqa %xmm0, (%edx)
|
fast-isel-store.ll | 29 ; CHECK: movdqa
|
insertps-combine.ll | 157 ; SSE-NEXT: movdqa (%rdi), %xmm1 160 ; SSE-NEXT: movdqa %xmm1, (%rdi) 181 ; SSE-NEXT: movdqa (%rdi), %xmm1 184 ; SSE-NEXT: movdqa %xmm1, (%rdi)
|
vec_set.ll | 23 ; CHECK-NEXT: movdqa %xmm3, (%eax)
|
vec_shift5.ll | 96 ; X32-NEXT: movdqa {{.*#+}} xmm0 = [1,0,2,0] 111 ; X32-NEXT: movdqa {{.*#+}} xmm0 = [8,0,16,0] 154 ; X32-NEXT: movdqa {{.*#+}} xmm0 = <u,u,31,0> 225 ; X32-NEXT: movdqa {{.*#+}} xmm0 = <u,u,31,0>
|
widen_arith-5.ll | 2 ; CHECK: movdqa
|
widen_conv-4.ll | 14 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 45 ; X64-SSE2-NEXT: movdqa %xmm0, %xmm2 89 ; X86-SSE2-NEXT: movdqa %xmm0, (%esp) 138 ; X64-SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
|
vector-rem.ll | 36 ; CHECK-NEXT: movdqa %xmm2, %xmm0 75 ; CHECK-NEXT: movdqa %xmm2, %xmm0
|
widen_shuffle-1.ll | 72 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 86 ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33]
|
clear_upper_vector_element_bits.ll | 367 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255] 371 ; SSE-NEXT: movdqa %xmm1, %xmm3 375 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255] 377 ; SSE-NEXT: movdqa %xmm1, %xmm3 381 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255] 383 ; SSE-NEXT: movdqa %xmm1, %xmm3 387 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255] 389 ; SSE-NEXT: movdqa %xmm1, %xmm3 393 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255] 395 ; SSE-NEXT: movdqa %xmm1, %xmm [all...] |
vector-blend.ll | 275 ; SSE41-NEXT: movdqa %xmm0, %xmm2 278 ; SSE41-NEXT: movdqa %xmm1, %xmm0 642 ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [128,128,2,128,4,5,6,128,128,128,10,128,12,13,14,128] 644 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [0,1,128,3,128,128,128,7,8,9,128,11,128,128,128,15] 654 ; SSE41-NEXT: movdqa %xmm0, %xmm4 658 ; SSE41-NEXT: movdqa %xmm2, %xmm0 659 ; SSE41-NEXT: movdqa %xmm3, %xmm1 826 ; SSE41-NEXT: movdqa %xmm2, %xmm0 874 ; SSE41-NEXT: movdqa %xmm1, %xmm0 876 ; SSE41-NEXT: movdqa %xmm4, %xmm [all...] |
vector-shuffle-128-v2.ll | 303 ; SSE-NEXT: movdqa %xmm1, %xmm0 376 ; SSE41-NEXT: movdqa %xmm1, %xmm0 410 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 416 ; SSE41-NEXT: movdqa %xmm1, %xmm0 442 ; SSSE3-NEXT: movdqa %xmm2, %xmm0 448 ; SSE41-NEXT: movdqa %xmm2, %xmm0 475 ; SSE-NEXT: movdqa %xmm1, %xmm0 489 ; SSE-NEXT: movdqa %xmm1, %xmm0 503 ; SSE-NEXT: movdqa %xmm2, %xmm0 573 ; SSE41-NEXT: movdqa %xmm1, %xmm [all...] |
vector-sext.ll | 52 ; SSE2-NEXT: movdqa %xmm2, %xmm0 61 ; SSSE3-NEXT: movdqa %xmm2, %xmm0 69 ; SSE41-NEXT: movdqa %xmm2, %xmm0 95 ; X32-SSE41-NEXT: movdqa %xmm2, %xmm0 147 ; SSE2-NEXT: movdqa %xmm2, %xmm0 152 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 165 ; SSE41-NEXT: movdqa %xmm2, %xmm0 191 ; X32-SSE41-NEXT: movdqa %xmm2, %xmm0 204 ; SSE2-NEXT: movdqa %xmm0, %xmm1 214 ; SSSE3-NEXT: movdqa %xmm0, %xmm [all...] |
vec_int_to_fp.ll | 344 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] 380 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] 418 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] 560 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] 638 ; SSE-NEXT: movdqa %xmm0, %xmm2 641 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0] [all...] |
vector-shuffle-128-v16.ll | 227 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] 233 ; SSE2-NEXT: movdqa %xmm2, %xmm0 241 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 249 ; SSE41-NEXT: movdqa %xmm1, %xmm0 272 ; SSE2-NEXT: movdqa %xmm0, %xmm2 341 ; SSE2-NEXT: movdqa %xmm0, %xmm2 397 ; SSE41-NEXT: movdqa %xmm0, %xmm2 400 ; SSE41-NEXT: movdqa %xmm1, %xmm0 430 ; SSE41-NEXT: movdqa %xmm0, %xmm2 433 ; SSE41-NEXT: movdqa %xmm1, %xmm [all...] |
vector-trunc.ll | 162 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] 227 ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 235 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 273 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] 283 ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> 292 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> 335 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] 647 ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 655 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 725 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255 [all...] |
statepoint-vector.ll | 28 ; CHECK: movdqa %xmm0, 16(%rsp) 29 ; CHECK: movdqa %xmm1, (%rsp)
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
widen_arith-5.ll | 2 ; CHECK: movdqa
|
sse3.ll | 23 ; X64: movdqa %xmm0, (%rdi) 35 ; X64: movdqa (%rdi), %xmm0 124 ; X64: movdqa %xmm0, (%rdi)
|
/art/compiler/utils/x86/ |
assembler_x86_test.cc | 511 TEST_F(AssemblerX86Test, Movdqa) { 512 DriverStr(RepeatFF(&x86::X86Assembler::movdqa, "movdqa %{reg2}, %{reg1}"), "movdqa"); 516 DriverStr(RepeatFA(&x86::X86Assembler::movdqa, "movdqa {mem}, %{reg}"), "movdqa_load"); 520 DriverStr(RepeatAF(&x86::X86Assembler::movdqa, "movdqa %{reg}, {mem}"), "movdqa_store");
|
/bionic/libm/x86/ |
e_hypot.S | 162 movdqa %xmm0, %xmm2 163 movdqa 16(%ebx), %xmm3
|
e_exp.S | 121 movdqa 16(%ebx), %xmm6 123 movdqa 32(%ebx), %xmm6 159 movdqa (%ebx), %xmm6
|
/bionic/libm/x86_64/ |
e_hypot.S | 140 movdqa %xmm0, %xmm2 141 movdqa 16+static_const_table(%rip), %xmm3
|
/external/boringssl/win-x86/crypto/fipsmodule/ |
sha256-586.asm | [all...] |
/external/boringssl/src/crypto/fipsmodule/bn/asm/ |
x86_64-mont.pl | 757 movdqa %xmm0,(%rsp) 764 movdqa %xmm0,16(%rsp,$i) 766 movdqa %xmm0,32(%rsp,$i) 773 movdqa %xmm0,16(%rsp,$i) 973 movdqa 16*0(%rbx),%xmm2 974 movdqa 16*1(%rbx),%xmm3 979 movdqa %xmm0,-16*2(%rbx) # zero tp 980 movdqa %xmm0,-16*1(%rbx) 981 movdqa %xmm0,-16*2(%rbx,%rdx) 982 movdqa %xmm0,-16*1(%rbx,%rdx [all...] |