Home | History | Annotate | Download | only in mips

Lines Matching refs:rt

632 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
633 if (rt.is_reg()) {
634 addu(rd, rs, rt.rm());
636 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
637 addiu(rd, rs, rt.imm32_);
641 li(at, rt);
648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
649 if (rt.is_reg()) {
650 subu(rd, rs, rt.rm());
652 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
653 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
657 li(at, rt);
664 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
665 if (rt.is_reg()) {
667 mult(rs, rt.rm());
670 mul(rd, rs, rt.rm());
675 li(at, rt);
686 void MacroAssembler::Mult(Register rs, const Operand& rt) {
687 if (rt.is_reg()) {
688 mult(rs, rt.rm());
692 li(at, rt);
698 void MacroAssembler::Multu(Register rs, const Operand& rt) {
699 if (rt.is_reg()) {
700 multu(rs, rt.rm());
704 li(at, rt);
710 void MacroAssembler::Div(Register rs, const Operand& rt) {
711 if (rt.is_reg()) {
712 div(rs, rt.rm());
716 li(at, rt);
722 void MacroAssembler::Divu(Register rs, const Operand& rt) {
723 if (rt.is_reg()) {
724 divu(rs, rt.rm());
728 li(at, rt);
734 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
735 if (rt.is_reg()) {
736 and_(rd, rs, rt.rm());
738 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739 andi(rd, rs, rt.imm32_);
743 li(at, rt);
750 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
751 if (rt.is_reg()) {
752 or_(rd, rs, rt.rm());
754 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755 ori(rd, rs, rt.imm32_);
759 li(at, rt);
766 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
767 if (rt.is_reg()) {
768 xor_(rd, rs, rt.rm());
770 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
771 xori(rd, rs, rt.imm32_);
775 li(at, rt);
782 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
783 if (rt.is_reg()) {
784 nor(rd, rs, rt.rm());
788 li(at, rt);
794 void MacroAssembler::Neg(Register rs, const Operand& rt) {
795 ASSERT(rt.is_reg());
797 ASSERT(!at.is(rt.rm()));
799 xor_(rs, rt.rm(), at);
803 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
804 if (rt.is_reg()) {
805 slt(rd, rs, rt.rm());
807 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
808 slti(rd, rs, rt.imm32_);
812 li(at, rt);
819 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
820 if (rt.is_reg()) {
821 sltu(rd, rs, rt.rm());
823 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
824 sltiu(rd, rs, rt.imm32_);
828 li(at, rt);
835 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
837 if (rt.is_reg()) {
838 rotrv(rd, rs, rt.rm());
840 rotr(rd, rs, rt.imm32_);
843 if (rt.is_reg()) {
844 subu(at, zero_reg, rt.rm());
846 srlv(rd, rs, rt.rm());
849 if (rt.imm32_ == 0) {
852 srl(at, rs, rt.imm32_);
853 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1051 void MacroAssembler::Ext(Register rt,
1059 ext_(rt, rs, pos, size);
1061 // Move rs to rt and shift it left then right to get the
1064 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1068 srl(rt, rt, shift_right);
1074 void MacroAssembler::Ins(Register rt,
1083 ins_(rt, rs, pos, size);
1085 ASSERT(!rt.is(t8) && !rs.is(t8));
1092 and_(at, rt, at);
1093 or_(rt, t8, at);
1330 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1333 Branch(&done, ne, rt, Operand(zero_reg));
1337 movz(rd, rs, rt);
1342 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1345 Branch(&done, eq, rt, Operand(zero_reg));
1349 movn(rd, rs, rt);
1595 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1596 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1597 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1606 const Operand& rt,
1608 BranchShort(offset, cond, rs, rt, bdslot);
1630 const Operand& rt,
1634 BranchShort(L, cond, rs, rt, bdslot);
1639 BranchShort(&skip, neg_cond, rs, rt);
1651 BranchShort(&skip, neg_cond, rs, rt);
1658 BranchShort(L, cond, rs, rt, bdslot);
1684 const Operand& rt,
1686 BRANCH_ARGS_CHECK(cond, rs, rt);
1691 if (rt.is_reg()) {
1693 // rt.
1695 r2 = rt.rm_;
1789 li(r2, rt);
1796 li(r2, rt);
1801 if (rt.imm32_ == 0) {
1805 li(r2, rt);
1811 if (rt.imm32_ == 0) {
1813 } else if (is_int16(rt.imm32_)) {
1814 slti(scratch, rs, rt.imm32_);
1818 li(r2, rt);
1824 if (rt.imm32_ == 0) {
1826 } else if (is_int16(rt.imm32_)) {
1827 slti(scratch, rs, rt.imm32_);
1831 li(r2, rt);
1837 if (rt.imm32_ == 0) {
1841 li(r2, rt);
1848 if (rt.imm32_ == 0) {
1852 li(r2, rt);
1858 if (rt.imm32_ == 0) {
1860 } else if (is_int16(rt.imm32_)) {
1861 sltiu(scratch, rs, rt.imm32_);
1865 li(r2, rt);
1871 if (rt.imm32_ == 0) {
1874 } else if (is_int16(rt.imm32_)) {
1875 sltiu(scratch, rs, rt.imm32_);
1879 li(r2, rt);
1885 if (rt.imm32_ == 0) {
1889 li(r2, rt);
1917 const Operand& rt,
1919 BRANCH_ARGS_CHECK(cond, rs, rt);
1924 if (rt.is_reg()) {
1926 r2 = rt.rm_;
2041 li(r2, rt);
2048 li(r2, rt);
2054 if (rt.imm32_ == 0) {
2060 li(r2, rt);
2067 if (rt.imm32_ == 0) {
2070 } else if (is_int16(rt.imm32_)) {
2071 slti(scratch, rs, rt.imm32_);
2077 li(r2, rt);
2084 if (rt.imm32_ == 0) {
2087 } else if (is_int16(rt.imm32_)) {
2088 slti(scratch, rs, rt.imm32_);
2094 li(r2, rt);
2101 if (rt.imm32_ == 0) {
2107 li(r2, rt);
2115 if (rt.imm32_ == 0) {
2121 li(r2, rt);
2128 if (rt.imm32_ == 0) {
2131 } else if (is_int16(rt.imm32_)) {
2132 sltiu(scratch, rs, rt.imm32_);
2138 li(r2, rt);
2145 if (rt.imm32_ == 0) {
2148 } else if (is_int16(rt.imm32_)) {
2149 sltiu(scratch, rs, rt.imm32_);
2155 li(r2, rt);
2162 if (rt.imm32_ == 0) {
2168 li(r2, rt);
2192 const Operand& rt,
2194 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2216 const Operand& rt,
2220 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2224 BranchShort(&skip, neg_cond, rs, rt);
2232 BranchShort(&skip, neg_cond, rs, rt);
2236 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2256 Register rs, const Operand& rt,
2258 BRANCH_ARGS_CHECK(cond, rs, rt);
2262 if (rt.is_reg()) {
2263 r2 = rt.rm_;
2266 li(r2, rt);
2350 const Operand& rt,
2352 BRANCH_ARGS_CHECK(cond, rs, rt);
2357 if (rt.is_reg()) {
2358 r2 = rt.rm_;
2361 li(r2, rt);
2452 const Operand& rt,
2458 BRANCH_ARGS_CHECK(cond, rs, rt);
2459 Branch(2, NegateCondition(cond), rs, rt);
2472 const Operand& rt,
2476 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2490 const Operand& rt,
2493 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2501 const Operand& rt,
2505 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2512 const Operand& rt,
2533 const Operand& rt,
2541 BRANCH_ARGS_CHECK(cond, rs, rt);
2542 Branch(2, NegateCondition(cond), rs, rt);
2549 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2558 const Operand& rt,
2560 int size = CallSize(t9, cond, rs, rt, bd);
2569 const Operand& rt,
2579 Call(t9, cond, rs, rt, bd);
2580 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2590 const Operand& rt,
2594 rmode, cond, rs, rt, bd);
2603 const Operand& rt,
2614 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2615 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2622 const Operand& rt,
2624 Jump(ra, cond, rs, rt, bd);
4370 Register rs, Operand rt) {
4372 Check(cc, reason, rs, rt);
4396 Register rs, Operand rt) {
4398 Branch(&L, cc, rs, rt);