HomeSort by relevance Sort by last modified time
    Searched refs:rax (Results 1 - 25 of 72) sorted by null

1 2 3

  /external/openssl/crypto/bn/asm/
x86_64-mont.pl 78 mov ($ap),%rax
80 mov %rax,$lo0
83 imulq $n0,%rax # "tp[0]"*n0
84 mov %rax,$m1
87 add $lo0,%rax # discarded
93 mov ($ap,$j,8),%rax
95 add $hi0,%rax
97 mov %rax,$lo0
98 mov ($np,$j,8),%rax
102 add $hi1,%rax
    [all...]
  /bionic/libm/amd64/
s_lrint.S 35 cvtsd2si %xmm0, %rax
s_lrintf.S 35 cvtss2si %xmm0, %rax
  /external/libvpx/vp8/common/x86/
recon_mmx.asm 26 movsxd rax, dword ptr arg(3) ;stride
39 movd [rdi+rax], mm2
45 movd [rdi+2*rax], mm3
47 add rdi, rax
52 movd [rdi+2*rax], mm4
80 movsxd rax, dword ptr arg(1) ;src_stride;
83 movq mm1, [rsi+rax]
84 movq mm2, [rsi+rax*2]
87 lea rsi, [rsi+rax*2]
90 add rsi, rax
    [all...]
subpixel_ssse3.asm 53 lea rax, [k0_k5 GLOBAL]
54 add rax, rdx
57 cmp esi, DWORD PTR [rax]
60 movdqa xmm4, XMMWORD PTR [rax] ;k0_k5
61 movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
62 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3
65 movsxd rax, dword ptr arg(1) ;src_pixels_per_line
87 lea rsi, [rsi + rax]
107 movdqa xmm5, XMMWORD PTR [rax+256] ;k2_k4
108 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k
    [all...]
recon_sse2.asm 26 movsxd rax, dword ptr arg(3) ;stride
40 movq MMWORD PTR [rdi+rax], xmm2
47 movq MMWORD PTR [rdi+rax*2], xmm3
49 add rdi, rax
54 movq MMWORD PTR [rdi+rax*2], xmm4
78 movsxd rax, dword ptr arg(3) ;stride
98 movdqa XMMWORD PTR [rdi+rax], xmm2
108 movdqa XMMWORD PTR [rdi+rax*2], xmm3
110 add rdi, rax
118 movdqa XMMWORD PTR [rdi+rax*2], xmm
    [all...]
idctllm_mmx.asm 44 mov rax, arg(0) ;input
47 movq mm0, [rax ]
48 movq mm1, [rax+ 8]
50 movq mm2, [rax+16]
51 movq mm3, [rax+24]
53 movsxd rax, dword ptr arg(2) ;pitch
174 movq [rdx+rax], mm1
175 movq [rdx+rax*2], mm2
177 add rdx, rax
178 movq [rdx+rax*2], mm
    [all...]
postproc_mmx.asm 58 movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch?
74 movq mm5, [rsi + rax] ; mm4 = r1 p0..p7
87 movq mm5, [rsi + 2*rax] ; mm4 = r2 p0..p7
101 neg rax
103 movq mm5, [rsi+2*rax] ; mm4 = r-2 p0..p7
117 movq mm4, [rsi+rax] ; mm4 = r-1 p0..p7
141 neg rax ; pitch is positive
155 push rax
157 mov rax, [rdi-4];
243 pop rax
    [all...]
  /external/v8/src/x64/
ic-x64.cc 259 __ movq(rax, Operand(rsp, kPointerSize));
279 __ JumpIfNotSmi(rax, &check_string);
280 __ SmiToInteger32(rax, rax);
289 __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
292 __ movq(rax, Operand(rcx, rax, times_pointer_size,
294 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
302 // rax: untagged index
308 __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset))
717 __ movb(Operand(rcx, rbx, times_1, 0), rax); local
767 rax); local
775 rax); local
841 __ movb(Operand(rcx, rbx, times_1, 0), rax); local
845 __ movw(Operand(rcx, rbx, times_2, 0), rax); local
849 __ movl(Operand(rcx, rbx, times_4, 0), rax); local
896 __ movb(Operand(rcx, rbx, times_1, 0), rax); local
900 __ movw(Operand(rcx, rbx, times_2, 0), rax); local
917 __ movl(Operand(rcx, rbx, times_4, 0), rax); local
934 __ movb(Operand(rcx, rbx, times_1, 0), rax); local
938 __ movw(Operand(rcx, rbx, times_2, 0), rax); local
942 __ movl(Operand(rcx, rbx, times_4, 0), rax); local
    [all...]
regexp-macro-assembler-x64.cc 63 * The registers rax, rbx, r9 and r11 are free to use for computations.
194 __ lea(rax, Operand(rsi, rdi, times_1, 0));
195 __ cmpq(rax, Operand(rbp, kInputStart));
206 __ lea(rax, Operand(rsi, rdi, times_1, 0));
207 __ cmpq(rax, Operand(rbp, kInputStart));
300 __ movzxbl(rax, Operand(r11, 0));
303 __ cmpb(rax, rdx);
309 __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
311 __ cmpb(rax, rdx);
313 __ subb(rax, Immediate('a'))
736 __ movq(Operand(rbp, rcx, times_1, 0), rax); local
749 __ movq(register_location(i), rax); \/\/ One write every page. local
782 __ movl(Operand(rbx, i * kIntSize), rax); local
870 __ movq(backtrack_stackpointer(), rax); local
    [all...]
builtins-x64.cc 42 // -- rax : number of arguments excluding receiver
49 // -- rsp[8 * argc] : first argument (argc == rax)
64 // JumpToRuntime expects rax to contain the number of arguments
66 __ addq(rax, Immediate(num_extra_args + 1));
82 // rax and rbx because these registers are used when copying the
84 __ Integer32ToSmi(rcx, rax);
107 // -- rax : actual number of arguments
116 __ cmpq(rax, rbx);
127 __ lea(rax, Operand(rbp, rax, times_pointer_size, offset))
436 __ movq(Operand(rbp, kIndexOffset), rax); local
974 __ movq(Operand(rbx, JSObject::kMapOffset), rax); local
    [all...]
debug-x64.cc 68 __ xor_(rax, rax); // No arguments (argc == 0).
84 __ pop(rax);
103 // -- rax: number of arguments
105 // The number of arguments in rax is not smi encoded.
112 // rax is the actual number of arguments not encoded as a smi, see comment
115 // -- rax: number of arguments
117 // The number of arguments in rax is not smi encoded.
134 // -- rax : value
136 // Register rax contains an object that needs to be pushed on th
    [all...]
stub-cache-x64.cc 237 __ push(rax);
259 __ movq(FieldOperand(receiver_reg, offset), rax); local
263 __ movq(name_reg, rax);
270 __ movq(FieldOperand(scratch, offset), rax); local
274 __ movq(name_reg, rax);
278 // Return the value (register rax).
295 __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
331 __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
332 __ Integer32ToSmi(rax, rax);
692 rbx, rax, name, &miss); local
713 rax); local
733 rax); local
755 rax); local
764 rbx, rax, name, &miss); local
960 __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax); local
1471 __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax); local
1703 GenerateFastPropertyLoad(masm(), rax, reg, holder, index); local
    [all...]
  /external/libvpx/vp8/encoder/x86/
quantize_mmx.asm 31 mov rax, arg(1) ;zbin_ptr
32 movq mm1, [rax]
63 mov rax, arg(3) ;dequant_ptr
64 movq mm2, [rax]
67 mov rax, arg(7) ;dqcoeff_ptr
69 movq [rax], mm3
74 mov rax, arg(1) ;zbin_ptr
75 movq mm5, [rax+8]
103 mov rax, arg(3) ;dequant_ptr
104 movq mm6, [rax+8
    [all...]
subtract_mmx.asm 28 mov rax, arg(3) ;Predictor
35 movd mm1, [rax]
43 movd mm1, [rax+rcx]
51 movd mm1, [rax+rcx*2]
63 movd mm1, [rax+rcx]
90 mov rax, arg(2) ;pred
99 movq mm3, [rax]
118 movq mm3, [rax+8]
137 add rax, 16
172 mov rax, arg(3) ;pre
    [all...]
sad_mmx.asm 38 movsxd rax, dword ptr arg(1) ;src_stride
41 lea rcx, [rsi+rax*8]
43 lea rcx, [rcx+rax*8]
81 lea rsi, [rsi+rax]
103 movd rax, mm7
130 movsxd rax, dword ptr arg(1) ;src_stride
133 lea rcx, [rsi+rax*8]
135 lea rcx, [rcx+rax*8]
155 lea rsi, [rsi+rax]
175 movd rax, mm
    [all...]
quantize_sse2.asm 31 ALIGN_STACK 16, rax
117 xor rax, rax
126 movsxd rcx, DWORD PTR[rbx + rax*4] ;now we have rc
145 mov [rsp + eob], rax ;eob = i
148 movsxd rcx, DWORD PTR[rbx + rax*4 + 4]
153 lea rax, [rax + 1]
168 mov [rsp + eob], rax ;eob = i
171 movsxd rcx, DWORD PTR[rbx + rax*4 + 4
    [all...]
sad_sse2.asm 33 movsxd rax, dword ptr arg(1) ;src_stride
36 lea rcx, [rsi+rax*8]
38 lea rcx, [rcx+rax*8]
49 movq xmm4, QWORD PTR [rsi+rax]
57 movq xmm6, QWORD PTR [rsi+rax+8]
60 lea rsi, [rsi+rax*2]
78 movd rax, xmm0
116 movd rax, mm7
117 cmp rax, arg(4)
138 movd rax, mm
    [all...]
  /external/libffi/src/x86/
unix64.S 49 leaq (%rdi, %rsi), %rax /* Find local stack base. */
50 movq %rdx, (%rax) /* Save flags. */
51 movq %rcx, 8(%rax) /* Save raddr. */
52 movq %rbp, 16(%rax) /* Save old frame pointer. */
53 movq %r10, 24(%rax) /* Relocate return address. */
54 movq %rax, %rbp /* Finalize local stack frame. */
117 movzbq %al, %rax
118 movq %rax, (%rdi)
122 movsbq %al, %rax
123 movq %rax, (%rdi
    [all...]
darwin64.S 49 leaq (%rdi, %rsi), %rax /* Find local stack base. */
50 movq %rdx, (%rax) /* Save flags. */
51 movq %rcx, 8(%rax) /* Save raddr. */
52 movq %rbp, 16(%rax) /* Save old frame pointer. */
53 movq %r10, 24(%rax) /* Relocate return address. */
54 movq %rax, %rbp /* Finalize local stack frame. */
115 movzbq %al, %rax
116 movq %rax, (%rdi)
120 movsbq %al, %rax
121 movq %rax, (%rdi
    [all...]
  /external/v8/test/cctest/
test-assembler-x64.cc 44 using v8::internal::rax;
62 // the XMM registers. The return value is in RAX.
92 __ movq(rax, arg2);
120 __ pop(rax);
121 __ pop(rax);
122 __ pop(rax);
144 __ movq(rax, arg2);
145 __ addq(rax, arg1);
166 __ movq(rax, arg2);
168 __ movq(rax, rdx)
    [all...]
test-macro-assembler-x64.cc 49 using v8::internal::rax;
84 // the XMM registers. The return value is in RAX.
119 __ movl(rax, Immediate(id));
154 __ xor_(rax, rax); // Success.
173 __ movl(rax, Immediate(id + 1));
176 __ movl(rax, Immediate(id + 2));
180 __ movl(rax, Immediate(id + 3));
183 __ movl(rax, Immediate(id + 4));
186 __ incq(rax);
    [all...]
  /external/zlib/contrib/amd64/
amd64-match.S 307 pmovmskb %xmm1, %rax
320 pmovmskb %xmm3, %rax
329 pmovmskb %xmm5, %rax
337 pmovmskb %xmm7, %rax
345 LeaveLoopCmps: add %rax, %rdx
347 mov (%windowbestlen, %rdx), %rax
348 xor (%prev, %rdx), %rax
351 mov 8(%windowbestlen, %rdx), %rax
352 xor 8(%prev, %rdx), %rax
355 mov 16(%windowbestlen, %rdx), %rax
    [all...]
  /external/libvpx/vp8/decoder/x86/
dequantize_mmx.asm 27 mov rax, arg(2) ;q
30 pmullw mm1, [rax+0] ; mm4 *= kernel 0 modifiers.
34 pmullw mm1, [rax+8] ; mm4 *= kernel 0 modifiers.
38 pmullw mm1, [rax+16] ; mm4 *= kernel 0 modifiers.
42 pmullw mm1, [rax+24] ; mm4 *= kernel 0 modifiers.
64 mov rax, arg(0) ;input
68 movq mm0, [rax ]
71 movq mm1, [rax +8]
74 movq mm2, [rax+16]
77 movq mm3, [rax+24
    [all...]
  /external/openssl/crypto/rc4/asm/
rc4-x86_64.pl 117 ror \$8,%rax # ror is redundant when $i=0
130 ror \$8,%rax
133 xor ($inp),%rax
135 mov %rax,($out)
292 mov %eax,($dat,%rax,4)
314 mov %al,($dat,%rax)
348 lea .Lopts(%rip),%rax
352 add \$12,%rax
355 add \$13,%rax
392 mov 120($context),%rax # pull context->Ra
    [all...]

Completed in 496 milliseconds

1 2 3