/external/chromium_org/third_party/openssl/openssl/crypto/aes/asm/ |
vpaes-x86.pl | 161 &movdqa ("xmm7",&QWP($k_inv,$const)); 162 &movdqa ("xmm6",&QWP($k_s0F,$const)); 183 &movdqa ("xmm1","xmm6") 184 &movdqa ("xmm2",&QWP($k_ipt,$const)); 190 &movdqa ("xmm0",&QWP($k_ipt+16,$const)); 201 &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u 204 &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t 207 &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u 209 &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[] 210 &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2 [all...] |
vpaes-x86_64.pl | 88 movdqa %xmm9, %xmm1 89 movdqa .Lk_ipt(%rip), %xmm2 # iptlo 95 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi 106 movdqa %xmm13, %xmm4 # 4 : sb1u 109 movdqa %xmm12, %xmm0 # 0 : sb1t 112 movdqa %xmm15, %xmm5 # 4 : sb2u 114 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 115 movdqa %xmm14, %xmm2 # 2 : sb2t 118 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 119 movdqa %xmm0, %xmm3 # 3 = [all...] |
/external/openssl/crypto/aes/asm/ |
vpaes-x86.pl | 161 &movdqa ("xmm7",&QWP($k_inv,$const)); 162 &movdqa ("xmm6",&QWP($k_s0F,$const)); 183 &movdqa ("xmm1","xmm6") 184 &movdqa ("xmm2",&QWP($k_ipt,$const)); 190 &movdqa ("xmm0",&QWP($k_ipt+16,$const)); 201 &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u 204 &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t 207 &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u 209 &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[] 210 &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2 [all...] |
vpaes-x86_64.pl | 88 movdqa %xmm9, %xmm1 89 movdqa .Lk_ipt(%rip), %xmm2 # iptlo 95 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi 106 movdqa %xmm13, %xmm4 # 4 : sb1u 109 movdqa %xmm12, %xmm0 # 0 : sb1t 112 movdqa %xmm15, %xmm5 # 4 : sb2u 114 movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 115 movdqa %xmm14, %xmm2 # 2 : sb2t 118 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 119 movdqa %xmm0, %xmm3 # 3 = [all...] |
/external/libvpx/libvpx/vp8/common/x86/ |
idctllm_sse2.asm | 128 movdqa xmm0, [rax] 129 movdqa xmm2, [rax+16] 130 movdqa xmm1, [rax+32] 131 movdqa xmm3, [rax+48] 134 movdqa [rax], xmm7 135 movdqa [rax+16], xmm7 136 movdqa [rax+32], xmm7 137 movdqa [rax+48], xmm7 147 movdqa xmm4, xmm0 154 movdqa xmm4, xmm [all...] |
subpixel_ssse3.asm | 52 movdqa xmm7, [GLOBAL(rd)] 61 movdqa xmm4, XMMWORD PTR [rax] ;k0_k5 62 movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4 63 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 80 movdqa xmm1, xmm0 83 movdqa xmm2, xmm1 117 movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4 118 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 120 movdqa xmm3, XMMWORD PTR [GLOBAL(shuf2bfrom1)] 121 movdqa xmm4, XMMWORD PTR [GLOBAL(shuf3bfrom1) [all...] |
iwalsh_sse2.asm | 26 movdqa xmm0, [rcx + 0] ;ip[4] ip[0] 27 movdqa xmm1, [rcx + 16] ;ip[12] ip[8] 31 movdqa xmm3, xmm0 ;ip[4] ip[0] 36 movdqa xmm4, xmm0 40 movdqa xmm1, xmm4 ;c1 b1 49 movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00 52 movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00 58 movdqa xmm3, xmm4 ;ip[4] ip[0] 65 movdqa xmm5, xmm4 69 movdqa xmm1, xmm5 ;c1 b [all...] |
mfqe_sse2.asm | 37 movdqa xmm1, [GLOBAL(tMFQE)] 49 movdqa xmm2, [rax] 50 movdqa xmm4, [rdx] 54 movdqa xmm3, xmm2 61 movdqa xmm5, xmm4 76 movdqa [rdx], xmm2 114 movdqa xmm1, [GLOBAL(tMFQE)] 193 movdqa xmm0, [rax] ; src1 194 movdqa xmm1, [rdx] ; src2 211 movdqa xmm0, xmm [all...] |
sad_ssse3.asm | 16 movdqa xmm0, XMMWORD PTR [rsi] 25 movdqa xmm0, XMMWORD PTR [rsi] 38 movdqa xmm0, XMMWORD PTR [rsi+rax] 57 movdqa xmm0, XMMWORD PTR [rsi] 58 movdqa xmm4, XMMWORD PTR [rdi] 59 movdqa xmm7, XMMWORD PTR [rdi+16] 61 movdqa xmm5, xmm7 64 movdqa xmm6, xmm7 73 movdqa xmm0, XMMWORD PTR [rsi] 74 movdqa xmm4, XMMWORD PTR [rdi [all...] |
subpixel_sse2.asm | 68 movdqa xmm4, xmm1 69 movdqa xmm5, xmm1 71 movdqa xmm6, xmm1 72 movdqa xmm7, xmm1 118 movdqa XMMWORD Ptr [rdi], xmm4 193 movdqa xmm4, xmm1 194 movdqa xmm5, xmm1 196 movdqa xmm6, xmm1 197 movdqa xmm7, xmm1 242 movdqa XMMWORD Ptr [rdi], xmm [all...] |
sad_sse4.asm | 16 movdqa xmm0, XMMWORD PTR [rsi] 23 movdqa xmm2, xmm1 29 movdqa xmm4, xmm3 37 movdqa xmm0, XMMWORD PTR [rsi] 44 movdqa xmm2, xmm5 50 movdqa xmm4, xmm3 60 movdqa xmm0, XMMWORD PTR [rsi + rax] 70 movdqa xmm2, xmm5 75 movdqa xmm4, xmm3 93 movdqa xmm2, xmm [all...] |
variance_impl_ssse3.asm | 69 movdqa xmm2, xmm0 94 movdqa xmm3, xmm1 107 movdqa xmm2, xmm0 108 movdqa xmm0, xmm1 109 movdqa xmm3, xmm2 164 movdqa xmm0, xmm1 174 movdqa xmm2, xmm1 175 movdqa xmm0, xmm3 202 movdqa xmm1, xmm0 266 movdqa xmm3, xmm [all...] |
variance_impl_sse2.asm | 37 movdqa xmm0, [rax] 38 movdqa xmm1, [rax+16] 39 movdqa xmm2, [rax+32] 40 movdqa xmm3, [rax+48] 55 movdqa xmm3,xmm4 58 movdqa xmm3,xmm4 136 movdqa xmm3, xmm1 137 movdqa xmm4, xmm2 166 movdqa xmm1, xmm6 178 movdqa xmm2, xmm [all...] |
/external/libvpx/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 27 movdqa xmm0, [rsi] 28 movdqa xmm1, [rdi] 30 movdqa xmm2, [rsi+16] 31 movdqa xmm3, [rdi+16] 42 movdqa xmm1, xmm0 48 movdqa xmm1, xmm0 230 movdqa xmm0, [rsi] 231 movdqa xmm1, [rdi] 233 movdqa xmm2, [rsi+16] 234 movdqa xmm3, [rdi+16 [all...] |
fwalsh_sse2.asm | 40 movdqa xmm1, xmm0 44 movdqa xmm2, xmm0 51 movdqa xmm1, xmm0 61 movdqa xmm2, xmm0 77 movdqa xmm2, xmm0 80 movdqa xmm3, xmm1 89 movdqa xmm0, xmm4 92 movdqa xmm1, xmm6 96 movdqa xmm2, xmm0 99 movdqa xmm3, xmm [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
scale.c | 652 movdqa xmm0, [eax] 653 movdqa xmm1, [eax + 16] 658 movdqa [edx], xmm0 local 681 movdqa xmm0, [eax] 682 movdqa xmm1, [eax + 16] 683 movdqa xmm2, [eax + esi] 684 movdqa xmm3, [eax + esi + 16] 689 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) 691 movdqa xmm3, xmm1 699 movdqa [edx], xmm local 1255 movdqa [edi], xmm2 local 1256 movdqa [edi + 16], xmm3 local 1316 movdqa [edi], xmm0 local 1330 movdqa [edi], xmm0 local 1346 movdqa [edi], xmm0 local 1398 movdqa [edi], xmm0 local 1412 movdqa [edi], xmm0 local 1428 movdqa [edi], xmm0 local [all...] |
/external/llvm/test/CodeGen/X86/ |
widen_load-2.ll | 9 ; CHECK: movdqa 39 ; CHECK: movdqa 40 ; CHECK: movdqa 45 ; CHECK: movdqa 56 ; CHECK: movdqa 57 ; CHECK: movdqa 58 ; CHECK: movdqa 62 ; CHECK: movdqa 63 ; CHECK: movdqa 64 ; CHECK: movdqa [all...] |
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_sad_sse4.asm | 16 movdqa xmm0, XMMWORD PTR [rsi] 23 movdqa xmm2, xmm1 29 movdqa xmm4, xmm3 37 movdqa xmm0, XMMWORD PTR [rsi] 44 movdqa xmm2, xmm5 50 movdqa xmm4, xmm3 60 movdqa xmm0, XMMWORD PTR [rsi + rax] 70 movdqa xmm2, xmm5 75 movdqa xmm4, xmm3 93 movdqa xmm2, xmm [all...] |
vp9_sad_ssse3.asm | 16 movdqa xmm0, XMMWORD PTR [rsi] 25 movdqa xmm0, XMMWORD PTR [rsi] 38 movdqa xmm0, XMMWORD PTR [rsi+rax] 57 movdqa xmm0, XMMWORD PTR [rsi] 58 movdqa xmm4, XMMWORD PTR [rdi] 59 movdqa xmm7, XMMWORD PTR [rdi+16] 61 movdqa xmm5, xmm7 64 movdqa xmm6, xmm7 73 movdqa xmm0, XMMWORD PTR [rsi] 74 movdqa xmm4, XMMWORD PTR [rdi [all...] |
vp9_subpel_variance_impl_sse2.asm | 58 movdqa xmm4, xmm5 77 movdqa xmm5, xmm1 ; save xmm1 for use on the next row 93 movdqa xmm1, xmm0 95 movdqa xmm6, xmm7 104 movdqa xmm7, xmm6 105 movdqa xmm1, xmm0 166 movdqa xmm4, xmm5 184 movdqa xmm5, xmm3 200 movdqa xmm1, xmm0 202 movdqa xmm6, xmm [all...] |
/external/libyuv/files/source/ |
scale.cc | 214 movdqa xmm0, [eax] 215 movdqa xmm1, [eax + 16] 221 movdqa [edx], xmm0 local 244 movdqa xmm0, [eax] 245 movdqa xmm1, [eax + 16] 246 movdqa xmm2, [eax + esi] 247 movdqa xmm3, [eax + esi + 16] 252 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) 254 movdqa xmm3, xmm1 263 movdqa [edx], xmm local 914 movdqa [edi + 16], xmm1 local 977 movdqa [esi + edi], xmm0 local 984 movdqa [esi + edi], xmm0 local 993 movdqa [esi + edi], xmm0 local 1000 movdqa [esi + edi], xmm0 local 1010 movdqa [esi + edi], xmm0 local 1017 movdqa [esi + edi], xmm0 local 1065 movdqa [esi + edi], xmm0 local 1072 movdqa [esi + edi], xmm0 local 1082 movdqa [esi + edi], xmm0 local 1089 movdqa [esi + edi], xmm0 local 1099 movdqa [esi + edi], xmm0 local 1106 movdqa [esi + edi], xmm0 local [all...] |
scale_argb.cc | 51 movdqa xmm0, [eax] 52 movdqa xmm1, [eax + 16] 56 movdqa [edx], xmm0 local 79 movdqa xmm0, [eax] 80 movdqa xmm1, [eax + 16] 81 movdqa xmm2, [eax + esi] 82 movdqa xmm3, [eax + esi + 16] 86 movdqa xmm2, xmm0 // average columns (8 to 4 pixels) 91 movdqa [edx], xmm0 local 129 movdqa [edx], xmm local 178 movdqa [edx], xmm0 local 236 movdqa [esi + edi], xmm0 local 241 movdqa [esi + edi], xmm0 \/\/ duplicate last pixel for filtering local 250 movdqa [esi + edi], xmm0 local 255 movdqa [esi + edi], xmm0 local 265 movdqa [esi + edi], xmm0 local 270 movdqa [esi + edi], xmm0 local 319 movdqa [esi + edi], xmm0 local 324 movdqa [esi + edi], xmm0 \/\/ duplicate last pixel for filtering local 333 movdqa [esi + edi], xmm0 local 338 movdqa [esi + edi], xmm0 local 348 movdqa [esi + edi], xmm0 local 353 movdqa [esi + edi], xmm0 local [all...] |
row_posix.cc | 130 "movdqa %%xmm0,%%xmm1 \n" 135 "movdqa %%xmm0,(%1) \n" 136 "movdqa %%xmm1,0x10(%1) \n" 153 "movdqa %3,%%xmm5 \n" 157 "movdqa (%0),%%xmm0 \n" 160 "movdqa %%xmm0,(%0,%1,1) \n" 177 "movdqa %3,%%xmm5 \n" 181 "movdqa (%0),%%xmm0 \n" 184 "movdqa %%xmm0,(%0,%1,1) \n" 200 "movdqa %3,%%xmm5 \n [all...] |
compare.cc | 83 movdqa xmm6, kHash16x33 90 movdqa xmm5, kHashMul0 91 movdqa xmm2, xmm1 93 movdqa xmm3, xmm2 96 movdqa xmm5, kHashMul1 97 movdqa xmm4, xmm2 100 movdqa xmm5, kHashMul2 102 movdqa xmm2, xmm1 105 movdqa xmm5, kHashMul3 165 "movdqa %4,%%xmm6 \n [all...] |
/external/libvpx/libvpx/vp9/common/x86/ |
vp9_subpixel_8t_ssse3.asm | 31 movdqa xmm4, [rdx] ;load filters 44 movdqa k0k1, xmm0 45 movdqa k2k3, xmm1 47 movdqa k4k5, xmm2 48 movdqa k6k7, xmm3 49 movdqa krd, xmm5 115 movdqa xmm4, [rdx] ;load filters 128 movdqa k0k1, xmm0 129 movdqa k2k3, xmm1 131 movdqa k4k5, xmm [all...] |