/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
sad_sse3.asm | 170 lddqu xmm5, XMMWORD PTR [%3] 174 psadbw xmm5, xmm0 187 paddw xmm5, xmm1 205 paddw xmm5, xmm1 265 lddqu xmm5, XMMWORD PTR [%4] 270 psadbw xmm5, xmm0 285 paddw xmm5, xmm2 302 paddw xmm5, xmm2 399 movq xmm0, xmm5 400 psrldq xmm5, [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
subtract_sse2.asm | 111 movdqa xmm5, [rax + rbx] 120 psubb xmm3, xmm5 122 pxor xmm5, xmm4 ;convert to signed values 124 pcmpgtb xmm5, xmm1 ; obtain sign information 127 punpcklbw xmm3, xmm5 ; put sign back to subtraction 128 punpckhbw xmm1, xmm5 ; put sign back to subtraction
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
evex-wig1-intel.d | 12 [ ]*[a-f0-9]+: 62 f2 fd 4f 21 f5 vpmovsxbd zmm6\{k7\},xmm5 13 [ ]*[a-f0-9]+: 62 f2 fd cf 21 f5 vpmovsxbd zmm6\{k7\}\{z\},xmm5 20 [ ]*[a-f0-9]+: 62 f2 fd 4f 22 f5 vpmovsxbq zmm6\{k7\},xmm5 21 [ ]*[a-f0-9]+: 62 f2 fd cf 22 f5 vpmovsxbq zmm6\{k7\}\{z\},xmm5 36 [ ]*[a-f0-9]+: 62 f2 fd 4f 24 f5 vpmovsxwq zmm6\{k7\},xmm5 37 [ ]*[a-f0-9]+: 62 f2 fd cf 24 f5 vpmovsxwq zmm6\{k7\}\{z\},xmm5 44 [ ]*[a-f0-9]+: 62 f2 fd 4f 31 f5 vpmovzxbd zmm6\{k7\},xmm5 45 [ ]*[a-f0-9]+: 62 f2 fd cf 31 f5 vpmovzxbd zmm6\{k7\}\{z\},xmm5 52 [ ]*[a-f0-9]+: 62 f2 fd 4f 32 f5 vpmovzxbq zmm6\{k7\},xmm5 53 [ ]*[a-f0-9]+: 62 f2 fd cf 32 f5 vpmovzxbq zmm6\{k7\}\{z\},xmm5 [all...] |
evex-wig1.d | 12 [ ]*[a-f0-9]+: 62 f2 fd 4f 21 f5 vpmovsxbd %xmm5,%zmm6\{%k7\} 13 [ ]*[a-f0-9]+: 62 f2 fd cf 21 f5 vpmovsxbd %xmm5,%zmm6\{%k7\}\{z\} 20 [ ]*[a-f0-9]+: 62 f2 fd 4f 22 f5 vpmovsxbq %xmm5,%zmm6\{%k7\} 21 [ ]*[a-f0-9]+: 62 f2 fd cf 22 f5 vpmovsxbq %xmm5,%zmm6\{%k7\}\{z\} 36 [ ]*[a-f0-9]+: 62 f2 fd 4f 24 f5 vpmovsxwq %xmm5,%zmm6\{%k7\} 37 [ ]*[a-f0-9]+: 62 f2 fd cf 24 f5 vpmovsxwq %xmm5,%zmm6\{%k7\}\{z\} 44 [ ]*[a-f0-9]+: 62 f2 fd 4f 31 f5 vpmovzxbd %xmm5,%zmm6\{%k7\} 45 [ ]*[a-f0-9]+: 62 f2 fd cf 31 f5 vpmovzxbd %xmm5,%zmm6\{%k7\}\{z\} 52 [ ]*[a-f0-9]+: 62 f2 fd 4f 32 f5 vpmovzxbq %xmm5,%zmm6\{%k7\} 53 [ ]*[a-f0-9]+: 62 f2 fd cf 32 f5 vpmovzxbq %xmm5,%zmm6\{%k7\}\{z\ [all...] |
avx512dq_vl-intel.d | 34 [ ]*[a-f0-9]+:[ ]*62 f1 fd 0f 7b f5[ ]*vcvtpd2qq xmm6\{k7\},xmm5 35 [ ]*[a-f0-9]+:[ ]*62 f1 fd 8f 7b f5[ ]*vcvtpd2qq xmm6\{k7\}\{z\},xmm5 60 [ ]*[a-f0-9]+:[ ]*62 f1 fd 0f 79 f5[ ]*vcvtpd2uqq xmm6\{k7\},xmm5 61 [ ]*[a-f0-9]+:[ ]*62 f1 fd 8f 79 f5[ ]*vcvtpd2uqq xmm6\{k7\}\{z\},xmm5 86 [ ]*[a-f0-9]+:[ ]*62 f1 7d 0f 7b f5[ ]*vcvtps2qq xmm6\{k7\},xmm5 87 [ ]*[a-f0-9]+:[ ]*62 f1 7d 8f 7b f5[ ]*vcvtps2qq xmm6\{k7\}\{z\},xmm5 99 [ ]*[a-f0-9]+:[ ]*62 f1 7d 2f 7b f5[ ]*vcvtps2qq ymm6\{k7\},xmm5 100 [ ]*[a-f0-9]+:[ ]*62 f1 7d af 7b f5[ ]*vcvtps2qq ymm6\{k7\}\{z\},xmm5 112 [ ]*[a-f0-9]+:[ ]*62 f1 7d 0f 79 f5[ ]*vcvtps2uqq xmm6\{k7\},xmm5 113 [ ]*[a-f0-9]+:[ ]*62 f1 7d 8f 79 f5[ ]*vcvtps2uqq xmm6\{k7\}\{z\},xmm5 [all...] |
avx512dq_vl.d | 34 [ ]*[a-f0-9]+:[ ]*62 f1 fd 0f 7b f5[ ]*vcvtpd2qq %xmm5,%xmm6\{%k7\} 35 [ ]*[a-f0-9]+:[ ]*62 f1 fd 8f 7b f5[ ]*vcvtpd2qq %xmm5,%xmm6\{%k7\}\{z\} 60 [ ]*[a-f0-9]+:[ ]*62 f1 fd 0f 79 f5[ ]*vcvtpd2uqq %xmm5,%xmm6\{%k7\} 61 [ ]*[a-f0-9]+:[ ]*62 f1 fd 8f 79 f5[ ]*vcvtpd2uqq %xmm5,%xmm6\{%k7\}\{z\} 86 [ ]*[a-f0-9]+:[ ]*62 f1 7d 0f 7b f5[ ]*vcvtps2qq %xmm5,%xmm6\{%k7\} 87 [ ]*[a-f0-9]+:[ ]*62 f1 7d 8f 7b f5[ ]*vcvtps2qq %xmm5,%xmm6\{%k7\}\{z\} 99 [ ]*[a-f0-9]+:[ ]*62 f1 7d 2f 7b f5[ ]*vcvtps2qq %xmm5,%ymm6\{%k7\} 100 [ ]*[a-f0-9]+:[ ]*62 f1 7d af 7b f5[ ]*vcvtps2qq %xmm5,%ymm6\{%k7\}\{z\} 112 [ ]*[a-f0-9]+:[ ]*62 f1 7d 0f 79 f5[ ]*vcvtps2uqq %xmm5,%xmm6\{%k7\} 113 [ ]*[a-f0-9]+:[ ]*62 f1 7d 8f 79 f5[ ]*vcvtps2uqq %xmm5,%xmm6\{%k7\}\{z\ [all...] |
avx512dq.s | 336 vrangesd $0xab, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ 337 vrangesd $0xab, %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512DQ 338 vrangesd $0xab, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ 339 vrangesd $123, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ 340 vrangesd $123, {sae}, %xmm4, %xmm5, %xmm6{%k7} # AVX512DQ 341 vrangesd $123, (%ecx), %xmm5, %xmm6{%k7} # AVX512DQ 342 vrangesd $123, -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512DQ 343 vrangesd $123, 1016(%edx), %xmm5, %xmm6{%k7} # AVX512DQ Disp8 344 vrangesd $123, 1024(%edx), %xmm5, %xmm6{%k7} # AVX512DQ 345 vrangesd $123, -1024(%edx), %xmm5, %xmm6{%k7} # AVX512DQ Disp [all...] |
/bionic/libm/x86/ |
e_pow.S | 154 movd %edx, %xmm5 160 psllq $32, %xmm5 163 andpd %xmm3, %xmm5 166 subsd %xmm5, %xmm3 170 mulpd %xmm0, %xmm5 174 subsd %xmm2, %xmm5 182 unpcklpd %xmm3, %xmm5 184 addsd %xmm5, %xmm3 187 addpd -3616(%ebx,%edx), %xmm5 193 addsd %xmm7, %xmm5 [all...] |
s_log1p.S | 90 movsd 2128(%ebx), %xmm5 107 pshufd $228, %xmm5, %xmm6 125 andpd %xmm1, %xmm5 127 subsd %xmm5, %xmm1 129 mulsd %xmm0, %xmm5 137 subsd %xmm2, %xmm5 142 addsd %xmm5, %xmm1 143 movapd %xmm1, %xmm5 145 subsd %xmm1, %xmm5 146 addsd %xmm5, %xmm [all...] |
/bionic/libm/x86_64/ |
e_pow.S | 134 movd %rdx, %xmm5 140 andpd %xmm3, %xmm5 144 subsd %xmm5, %xmm3 149 mulpd %xmm0, %xmm5 154 subsd %xmm2, %xmm5 162 unpcklpd %xmm3, %xmm5 164 addsd %xmm5, %xmm3 167 addpd -3648(%r11,%rdx), %xmm5 173 addsd %xmm7, %xmm5 180 pshufd $238, %xmm5, %xmm [all...] |
s_log1p.S | 72 movd %r8, %xmm5 90 pshufd $228, %xmm5, %xmm6 107 andpd %xmm1, %xmm5 109 subsd %xmm5, %xmm1 111 mulsd %xmm0, %xmm5 119 subsd %xmm2, %xmm5 124 addsd %xmm5, %xmm1 125 movq %xmm1, %xmm5 127 subsd %xmm1, %xmm5 128 addsd %xmm5, %xmm [all...] |
s_cos.S | 193 movapd ONEHALF(%rip), %xmm5 196 orps %xmm4, %xmm5 197 addpd %xmm5, %xmm1 207 movapd SC_4(%rip), %xmm5 219 mulpd %xmm0, %xmm5 224 mulpd %xmm0, %xmm5 237 addpd SC_3(%rip), %xmm5 240 mulpd %xmm0, %xmm5 246 addpd %xmm5, %xmm6 247 movq 8(%rax), %xmm5 [all...] |
/external/llvm/test/CodeGen/X86/ |
vector-rotate-256.ll | 18 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 19 ; AVX1-NEXT: vpsllq %xmm4, %xmm5, %xmm6 21 ; AVX1-NEXT: vpsllq %xmm4, %xmm5, %xmm4 28 ; AVX1-NEXT: vpsrlq %xmm2, %xmm5, %xmm4 30 ; AVX1-NEXT: vpsrlq %xmm2, %xmm5, %xmm2 81 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216] 82 ; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4 87 ; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1 93 ; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5 94 ; AVX1-NEXT: vpsrld %xmm5, %xmm6, %xmm [all...] |
stack-folding-xop.ll | 14 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 23 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 32 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 41 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 50 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 59 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 68 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 75 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 84 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() 91 %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"( [all...] |
vector-idiv.ll | 66 ; SSE41-NEXT: movdqa %xmm0, %xmm5 67 ; SSE41-NEXT: pmuludq %xmm2, %xmm5 68 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] 69 ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] 70 ; SSE41-NEXT: psubd %xmm5, %xmm0 72 ; SSE41-NEXT: paddd %xmm5, %xmm0 92 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3 [all...] |
/external/libjpeg-turbo/simd/ |
jquanti-sse2.asm | 138 movdqa xmm5, XMMWORD [XMMBLOCK(1,0,esi,SIZEOF_DCTELEM)] 142 movdqa xmm1,xmm5 146 psraw xmm5,(WORD_BIT-1) 150 pxor xmm1,xmm5 154 psubw xmm1,xmm5 ; if (xmm1 < 0) xmm1 = -xmm1; 172 pxor xmm1,xmm5 176 psubw xmm1,xmm5
|
jdmrgext-sse2-64.asm | 105 movdqa xmm5,xmm6 ; xmm5=CbH 128 paddw xmm6,xmm5 130 paddw xmm6,xmm5 ; xmm6=(CbH * FIX(1.77200))=(B-Y)H 138 movdqa xmm6,xmm5 140 punpcklwd xmm5,xmm1 142 pmaddwd xmm5,[rel PW_MF0344_F0285] 149 paddd xmm5,[rel PD_ONEHALF] 151 psrad xmm5,SCALEBITS 158 packssdw xmm5,xmm6 ; xmm5=CbH*-FIX(0.344)+CrH*FIX(0.285 [all...] |
jdmrgext-sse2.asm | 116 movdqa xmm5,xmm6 ; xmm5=CbH 139 paddw xmm6,xmm5 141 paddw xmm6,xmm5 ; xmm6=(CbH * FIX(1.77200))=(B-Y)H 149 movdqa xmm6,xmm5 151 punpcklwd xmm5,xmm1 153 pmaddwd xmm5,[GOTOFF(eax,PW_MF0344_F0285)] 160 paddd xmm5,[GOTOFF(eax,PD_ONEHALF)] 162 psrad xmm5,SCALEBITS 169 packssdw xmm5,xmm6 ; xmm5=CbH*-FIX(0.344)+CrH*FIX(0.285 [all...] |
/external/mesa3d/src/mesa/x86/ |
sse_xform4.S | 71 MOVAPS( MAT(4), XMM5 ) /* m7 | m6 | m5 | m4 */ 84 MULPS( XMM5, XMM1 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */ 154 MOVSS( SRC(1), XMM5 ) /* oy */ 155 SHUFPS( CONST(0x0), XMM5, XMM5 ) /* oy | oy | oy | oy */ 156 MULPS( XMM1, XMM5 ) /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */ 166 ADDPS( XMM5, XMM4 ) /* ox*m3+oy*m7 | ... */
|
/external/boringssl/linux-x86_64/crypto/aes/ |
aesni-x86_64.S | 189 xorps %xmm0,%xmm5 229 xorps %xmm0,%xmm5 273 pxor %xmm0,%xmm5 327 pxor %xmm0,%xmm5 377 pxor %xmm0,%xmm5 441 pxor %xmm0,%xmm5 517 movdqu 48(%rdi),%xmm5 535 movups %xmm5,48(%rsi) 536 movdqu 48(%rdi),%xmm5 559 movups %xmm5,48(%rsi [all...] |
/external/boringssl/mac-x86_64/crypto/aes/ |
aesni-x86_64.S | 188 xorps %xmm0,%xmm5 228 xorps %xmm0,%xmm5 272 pxor %xmm0,%xmm5 326 pxor %xmm0,%xmm5 376 pxor %xmm0,%xmm5 440 pxor %xmm0,%xmm5 516 movdqu 48(%rdi),%xmm5 534 movups %xmm5,48(%rsi) 535 movdqu 48(%rdi),%xmm5 558 movups %xmm5,48(%rsi [all...] |
/external/boringssl/win-x86_64/crypto/aes/ |
aesni-x86_64.asm | 190 xorps xmm5,xmm0 230 xorps xmm5,xmm0 274 pxor xmm5,xmm0 328 pxor xmm5,xmm0 378 pxor xmm5,xmm0 442 pxor xmm5,xmm0 534 movdqu xmm5,XMMWORD[48+rdi] 552 movups XMMWORD[48+rsi],xmm5 553 movdqu xmm5,XMMWORD[48+rdi] 576 movups XMMWORD[48+rsi],xmm5 [all...] |
/external/boringssl/linux-x86/crypto/aes/ |
vpaes-x86.S | 80 movdqu (%edx),%xmm5 83 pxor %xmm5,%xmm2 96 pxor %xmm5,%xmm4 97 movdqa 64(%ebp),%xmm5 105 pxor %xmm5,%xmm2 118 movdqa -32(%ebp),%xmm5 127 pxor %xmm5,%xmm3 130 pxor %xmm5,%xmm4 135 movdqu (%edx),%xmm5 141 pxor %xmm5,%xmm [all...] |
/external/boringssl/mac-x86/crypto/aes/ |
vpaes-x86.S | 77 movdqu (%edx),%xmm5 80 pxor %xmm5,%xmm2 93 pxor %xmm5,%xmm4 94 movdqa 64(%ebp),%xmm5 102 pxor %xmm5,%xmm2 115 movdqa -32(%ebp),%xmm5 124 pxor %xmm5,%xmm3 127 pxor %xmm5,%xmm4 132 movdqu (%edx),%xmm5 138 pxor %xmm5,%xmm [all...] |
/external/boringssl/win-x86/crypto/aes/ |
vpaes-x86.asm | 88 movdqu xmm5,[edx] 91 pxor xmm2,xmm5 104 pxor xmm4,xmm5 105 movdqa xmm5,[64+ebp] 113 pxor xmm2,xmm5 126 movdqa xmm5,[ebp-32] 135 pxor xmm3,xmm5 138 pxor xmm4,xmm5 143 movdqu xmm5,[edx] 149 pxor xmm4,xmm5 [all...] |