/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_sad4d_sse2.asm | 70 movhps m0, [srcq +%4] 71 movhps m4, [ref1q+%5] 72 movhps m5, [ref2q+%5] 73 movhps m6, [ref3q+%5] 74 movhps m7, [ref4q+%5] 83 movhps m0, [srcq +%4] 84 movhps m1, [ref1q+%5] 85 movhps m2, [ref2q+%5] 86 movhps m3, [ref3q+%5] 92 movhps m1, [ref4q+%5 [all...] |
vp9_sad_sse2.asm | 190 movhps m1, [refq+ref_strideq] 192 movhps m2, [refq+ref_stride3q] 199 movhps m3, [srcq+src_strideq] 201 movhps m4, [srcq+src_stride3q]
|
vp9_subpel_variance.asm | 263 movhps m0, [srcq+src_strideq] 323 movhps m2, [srcq+src_strideq*2] 530 movhps m0, [srcq+src_strideq] 531 movhps m4, [srcq+src_strideq+1] 610 movhps m2, [srcq+src_strideq] 611 movhps m3, [srcq+src_strideq+1] [all...] |
/external/libvpx/libvpx/vp9/common/x86/ |
vp9_intrapred_ssse3.asm | 197 movhps [dst8q ], m0 200 movhps [dst8q+strideq ], m0 203 movhps [dst8q+strideq*2 ], m0 206 movhps [dst8q+stride3q ], m0 214 movhps [dstq +8], m0 215 movhps [dstq+strideq +8], m0 216 movhps [dstq+strideq*2+8], m0 217 movhps [dstq+stride3q +8], m0 219 movhps [dstq +8], m0 220 movhps [dstq+strideq +8], m [all...] |
vp9_intrapred_sse2.asm | 264 movhps [dstq+strideq], m2
|
/external/libvpx/libvpx/vp8/common/x86/ |
loopfilter_sse2.asm | 41 movhps xmm2, [rdi + rcx*2] 42 movhps xmm1, [rdi + rcx] 43 movhps xmm4, [rdi] 44 movhps xmm5, [rdi + rax] 86 movhps xmm2, [rdi + rax] 87 movhps xmm4, [rdi] 88 movhps xmm6, [rdi + rcx] 115 movhps xmm4, [rdi + rcx*2] 263 movhps [rdi], xmm6 265 movhps [rdi + rax], xmm [all...] |
recon_sse2.asm | 415 movhps[rdi+rcx], xmm3 537 movhps [rdi+rcx], xmm0 539 movhps [rdi+rdx], xmm1 551 movhps [rdi+rcx], xmm0 553 movhps [rdi+rdx], xmm1
|
/external/llvm/test/CodeGen/X86/ |
vec_shuffle-26.ll | 43 ; movhps should happen before extractps to assure it gets the correct value. 45 ; CHECK: movhps ([[BASEREG:%[a-z]+]]), 49 ; ATOM: movhps ([[BASEREG:%[a-z]+]]),
|
vec_shuffle-38.ll | 34 ; CHECK: movhps ( 62 ; CHECK: movhps (
|
sse3.ll | 144 ; X64: movhps (%rsi), %xmm0
|
/external/valgrind/main/none/tests/amd64/ |
redundantRexW.c | 327 /* movhps mem, reg 48 0f 16 36 rex.W movhps (%rsi),%xmm6 */ 341 after_test( "rex.W movhps (%rsi),%xmm6", regs, mem ); 343 /* movhps reg, mem 49 0f 17 03 rex.WB movhps %xmm0,(%r11) */ 351 "\t.byte 0x49,0x0F,0x17,0x03\n" /* rex.WB movhps %xmm0,(%r11) */ 357 after_test( "rex.WB movhps %xmm0,(%r11)", regs, mem );
|
insn_sse.def | 74 movhps m64.ps[12.34,56.78] xmm.ps[11.11,22.22,33.33,44.44] => 1.ps[11.11,22.22,12.34,56.78] 75 movhps xmm.ps[12.34,56.78,43.21,87.65] m64.ps[11.11,22.22] => 1.ps[43.21,87.65]
|
gen_insn_test.pl | 698 print qq| \"movhps %$arg->{argnumh}, %%$arg->{register}\\n\"\n|; 812 print qq| \"movhps %%$result->{register}, %$result->{argnumh}\\n\"\n|;
|
/external/libyuv/files/source/ |
scale_argb.cc | 162 movhps xmm0, qword ptr [eax + ebx] 164 movhps xmm1, qword ptr [eax + edi] 167 movhps xmm2, qword ptr [esi + ebx] 169 movhps xmm3, qword ptr [esi + edi] 476 "movhps (%0,%1,1),%%xmm0 \n" 478 "movhps (%0,%4,1),%%xmm1 \n" 481 "movhps (%5,%1,1),%%xmm2 \n" 483 "movhps (%5,%4,1),%%xmm3 \n" [all...] |
row_win.cc | 985 movhps qword ptr [edx + edi], xmm0 // V 1055 movhps qword ptr [edx + edi], xmm0 // V [all...] |
row_posix.cc | 794 "movhps %%xmm0,(%1,%2,1) \n" [all...] |
/external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/ |
ssewidth.asm | 229 movhps xmm1, qword [rbx] label 230 movhps qword [rbx], xmm2 label
|
avx.asm | 869 movhps xmm1, [rax] label 870 movhps xmm1, qword [rax] label 875 movhps [rax], xmm2 label 876 movhps qword [rax], xmm2 label [all...] |
/external/valgrind/main/memcheck/tests/amd64/ |
sse_memory.c | 215 //TEST_INSN( &AllMask, 0,movhps) 446 //TEST_INSN( &AllMask, 0,movhps)
|
/external/valgrind/main/none/tests/x86/ |
insn_sse.def | 74 movhps m64.ps[12.34,56.78] xmm.ps[11.11,22.22,33.33,44.44] => 1.ps[11.11,22.22,12.34,56.78] 75 movhps xmm.ps[12.34,56.78,43.21,87.65] m64.ps[11.11,22.22] => 1.ps[43.21,87.65]
|
gen_insn_test.pl | 667 print qq| \"movhps %$arg->{argnumh}, %%$arg->{register}\\n\"\n|; 776 print qq| \"movhps %%$result->{register}, %$result->{argnumh}\\n\"\n|;
|
/external/llvm/test/MC/X86/ |
x86-32-coverage.s | 956 // CHECK: movhps %xmm5, 3735928559(%ebx,%ecx,8) 957 movhps %xmm5,0xdeadbeef(%ebx,%ecx,8) [all...] |
/external/elfutils/tests/ |
testfile44.expect.bz2 | |
/external/elfutils/libcpu/defs/ |
i386 | 641 00001111,00010110,{Mod}{xmmreg}{R_m}:movhps {Mod}{R_m},{xmmreg} 645 00001111,00010111,{Mod}{xmmreg}{R_m}:movhps {xmmreg},{Mod}{R_m} [all...] |
/external/llvm/lib/Target/X86/ |
README-SSE.txt | 139 Use movhps to update upper 64-bits of a v4sf value. Also movlps on lower half 711 movhps LCPI1_0, %xmm0
|