Home | History | Annotate | Download | only in optimizing

Lines Matching defs:SP

440   __ Ld(GpuRegister(reg), SP, 0);
447 __ Sd(GpuRegister(reg), SP, 0);
457 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
461 SP,
465 SP,
469 SP,
471 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
488 SP,
509 // TODO: increment/decrement SP in one step instead of two or remove this comment.
518 __ Sd(reg, SP, ofs);
527 __ Sdc1(reg, SP, ofs);
539 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
556 // TODO: increment/decrement SP in one step instead of two or remove this comment.
563 __ Ldc1(reg, SP, ofs);
572 __ Ld(reg, SP, ofs);
630 SP,
636 SP,
686 SP,
691 SP,
703 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
709 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
710 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
712 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
713 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
758 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
762 SP,
772 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
811 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
814 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
874 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
879 blocked_core_registers_[SP] = true;
923 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
928 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
933 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
938 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
952 __ Ld(current_method, SP, kCurrentMethodStackOffset);
2338 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2459 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());