Home | History | Annotate | Download | only in aarch64

Lines Matching refs:vd

254 void Assembler::NEONTable(const VRegister& vd,
258 VIXL_ASSERT(vd.Is16B() || vd.Is8B());
260 VIXL_ASSERT(AreSameFormat(vd, vm));
261 Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
265 void Assembler::tbl(const VRegister& vd,
268 NEONTable(vd, vn, vm, NEON_TBL_1v);
272 void Assembler::tbl(const VRegister& vd,
279 NEONTable(vd, vn, vm, NEON_TBL_2v);
283 void Assembler::tbl(const VRegister& vd,
291 NEONTable(vd, vn, vm, NEON_TBL_3v);
295 void Assembler::tbl(const VRegister& vd,
304 NEONTable(vd, vn, vm, NEON_TBL_4v);
308 void Assembler::tbx(const VRegister& vd,
311 NEONTable(vd, vn, vm, NEON_TBX_1v);
315 void Assembler::tbx(const VRegister& vd,
322 NEONTable(vd, vn, vm, NEON_TBX_2v);
326 void Assembler::tbx(const VRegister& vd,
334 NEONTable(vd, vn, vm, NEON_TBX_3v);
338 void Assembler::tbx(const VRegister& vd,
347 NEONTable(vd, vn, vm, NEON_TBX_4v);
1851 void Assembler::NEON3DifferentL(const VRegister& vd,
1856 VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
1857 (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1858 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1859 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1861 if (vd.IsScalar()) {
1867 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
1871 void Assembler::NEON3DifferentW(const VRegister& vd,
1875 VIXL_ASSERT(AreSameFormat(vd, vn));
1876 VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
1877 (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
1878 (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
1879 Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1883 void Assembler::NEON3DifferentHN(const VRegister& vd,
1888 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1889 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1890 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1891 Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1937 void Assembler::FN(const VRegister& vd, \
1941 NEON3DifferentL(vd, vn, vm, OP); \
1948 V(addhn, NEON_ADDHN, vd.IsD()) \
1949 V(addhn2, NEON_ADDHN2, vd.IsQ()) \
1950 V(raddhn, NEON_RADDHN, vd.IsD()) \
1951 V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
1952 V(subhn, NEON_SUBHN, vd.IsD()) \
1953 V(subhn2, NEON_SUBHN2, vd.IsQ()) \
1954 V(rsubhn, NEON_RSUBHN, vd.IsD()) \
1955 V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
1959 void Assembler::FN(const VRegister& vd, \
1963 NEON3DifferentHN(vd, vn, vm, OP); \
1968 void Assembler::uaddw(const VRegister& vd,
1972 NEON3DifferentW(vd, vn, vm, NEON_UADDW);
1976 void Assembler::uaddw2(const VRegister& vd,
1980 NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
1984 void Assembler::saddw(const VRegister& vd,
1988 NEON3DifferentW(vd, vn, vm, NEON_SADDW);
1992 void Assembler::saddw2(const VRegister& vd,
1996 NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
2000 void Assembler::usubw(const VRegister& vd,
2004 NEON3DifferentW(vd, vn, vm, NEON_USUBW);
2008 void Assembler::usubw2(const VRegister& vd,
2012 NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
2016 void Assembler::ssubw(const VRegister& vd,
2020 NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
2024 void Assembler::ssubw2(const VRegister& vd,
2028 NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
2079 void Assembler::fmov(const VRegister& vd, double imm) {
2080 if (vd.IsScalar()) {
2081 VIXL_ASSERT(vd.Is1D());
2082 Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
2084 VIXL_ASSERT(vd.Is2D());
2088 vd));
2093 void Assembler::fmov(const VRegister& vd, float imm) {
2094 if (vd.IsScalar()) {
2095 VIXL_ASSERT(vd.Is1S());
2096 Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
2098 VIXL_ASSERT(vd.Is2S() | vd.Is4S());
2100 Instr q = vd.Is4S() ? NEON_Q : 0;
2102 Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
2115 void Assembler::fmov(const VRegister& vd, const Register& rn) {
2116 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2117 VIXL_ASSERT(vd.GetSizeInBits() == rn.GetSizeInBits());
2118 FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
2119 Emit(op | Rd(vd) | Rn(rn));
2123 void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
2124 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2125 VIXL_ASSERT(vd.IsSameFormat(vn));
2126 Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
2130 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
2131 VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
2133 Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
2144 void Assembler::fmadd(const VRegister& vd,
2148 FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
2152 void Assembler::fmsub(const VRegister& vd,
2156 FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
2160 void Assembler::fnmadd(const VRegister& vd,
2164 FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
2168 void Assembler::fnmsub(const VRegister& vd,
2172 FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
2176 void Assembler::fnmul(const VRegister& vd,
2179 VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
2180 Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
2181 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
2256 void Assembler::fcsel(const VRegister& vd,
2260 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2261 VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2262 Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
2273 void Assembler::NEONFPConvertToInt(const VRegister& vd,
2277 VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
2280 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2284 void Assembler::fcvt(const VRegister& vd, const VRegister& vn) {
2286 if (vd.Is1D()) {
2289 } else if (vd.Is1S()) {
2293 VIXL_ASSERT(vd.Is1H());
2297 FPDataProcessing1Source(vd, vn, op);
2301 void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) {
2302 VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
2303 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2304 Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
2308 void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) {
2309 VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
2310 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
2311 Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
2315 void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) {
2316 VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
2318 Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
2322 void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) {
2323 VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
2325 Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
2329 void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) {
2331 if (vd.IsScalar()) {
2332 VIXL_ASSERT(vd.Is1S() && vn.Is1D());
2333 Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
2335 VIXL_ASSERT(vd.Is2S() && vn.Is2D());
2336 Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
2341 void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
2342 VIXL_ASSERT(vd.Is4S() && vn.Is2D());
2344 Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
2362 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2363 NEONFPConvertToInt(vd, vn, VEC_OP); \
2381 void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) {
2384 NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
2386 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2387 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
2404 void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) {
2407 NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
2409 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2410 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
2414 void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) {
2417 NEONFP2RegMisc(vd, vn, NEON_UCVTF);
2419 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2420 NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
2424 void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) {
2427 NEONFP2RegMisc(vd, vn, NEON_SCVTF);
2429 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
2430 NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
2435 void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) {
2436 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2439 Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
2441 Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2442 Rd(vd));
2447 void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) {
2448 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2451 Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
2453 Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2454 Rd(vd));
2459 void Assembler::NEON3Same(const VRegister& vd,
2463 VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2464 VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
2467 if (vd.IsScalar()) {
2469 format = SFormat(vd);
2471 format = VFormat(vd);
2474 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
2478 void Assembler::NEONFP3Same(const VRegister& vd,
2482 VIXL_ASSERT(AreSameFormat(vd, vn, vm));
2483 Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
2505 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
2507 if (vd.IsScalar()) { \
2508 VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
2511 VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
2514 NEONFP2RegMisc(vd, vn, op); \
2520 void Assembler::NEONFP2RegMisc(const VRegister& vd,
2523 VIXL_ASSERT(AreSameFormat(vd, vn));
2524 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
2528 void Assembler::NEON2RegMisc(const VRegister& vd,
2532 VIXL_ASSERT(AreSameFormat(vd, vn));
2537 if (vd.IsScalar()) {
2539 format = SFormat(vd);
2541 format = VFormat(vd);
2544 Emit(format | op | Rn(vn) | Rd(vd));
2548 void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) {
2549 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2550 NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
2554 void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) {
2555 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2556 NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
2560 void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) {
2561 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2562 NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
2566 void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) {
2567 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2568 NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
2572 void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
2573 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
2574 NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
2578 void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) {
2579 VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
2580 (vd.Is4S() && vn.Is4H() && shift == 16) ||
2581 (vd.Is2D() && vn.Is2S() && shift == 32));
2583 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
2587 void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) {
2589 VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
2590 (vd.Is4S() && vn.Is8H() && shift == 16) ||
2591 (vd.Is2D() && vn.Is4S() && shift == 32));
2592 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
2596 void Assembler::NEONFP2RegMisc(const VRegister& vd,
2600 VIXL_ASSERT(AreSameFormat(vd, vn));
2605 if (vd.IsScalar()) {
2606 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2609 VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
2612 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
2616 void Assembler::fcmeq(const VRegister& vd
2617 NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
2621 void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) {
2622 NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
2626 void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) {
2627 NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
2631 void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) {
2632 NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
2636 void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) {
2637 NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
2641 void Assembler::frecpx(const VRegister& vd, const VRegister& vn) {
2642 VIXL_ASSERT(vd.IsScalar());
2643 VIXL_ASSERT(AreSameFormat(vd, vn));
2644 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
2645 Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
2651 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
2652 V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
2653 V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
2654 V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
2655 V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
2656 V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
2657 V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
2658 V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
2659 V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
2660 V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
2661 V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
2662 V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
2663 V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
2664 V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
2665 V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
2666 V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
2667 V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
2668 V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
2669 V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
2670 V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
2671 V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
2672 V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
2673 V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
2674 V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
2675 V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
2676 V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
2677 V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
2678 V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
2679 V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
2680 V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
2681 V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
2682 V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
2683 V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
2684 V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
2685 V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
2686 V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
2687 V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
2688 V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
2689 V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
2690 V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
2691 V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
2692 V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
2693 V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
2694 V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
2695 V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
2707 void Assembler::FN(const VRegister& vd, \
2711 NEON3Same(vd, vn, vm, OP); \
2746 void Assembler::FN(const VRegister& vd, \
2750 if ((SCA_OP != 0) && vd.IsScalar()) { \
2751 VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
2754 VIXL_ASSERT(vd.IsVector()); \
2755 VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
2758 NEONFP3Same(vd, vn, vm, op); \
2764 void Assembler::addp(const VRegister& vd, const VRegister& vn) {
2765 VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
2766 Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
2770 void Assembler::faddp(const VRegister& vd, const VRegister& vn) {
2771 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2772 Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
2776 void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) {
2777 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2778 Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
2782 void Assembler::fminp(const VRegister& vd, const VRegister& vn) {
2783 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2784 Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
2788 void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) {
2789 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2790 Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
2794 void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) {
2795 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
2796 Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
2800 void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) {
2801 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
2805 void Assembler::mov(const VRegister& vd, const VRegister& vn) {
2806 VIXL_ASSERT(AreSameFormat(vd, vn));
2807 if (vd.IsD()) {
2808 orr(vd.V8B(), vn.V8B(), vn.V8B());
2810 VIXL_ASSERT(vd.IsQ());
2811 orr(vd.V16B(), vn.V16B(), vn.V16B());
2816 void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) {
2817 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
2821 void Assembler::movi(const VRegister& vd,
2826 if (vd.Is2D() || vd.Is1D()) {
2836 int q = vd.Is2D() ? NEON_Q : 0;
2838 ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
2841 NEONModifiedImmShiftLsl(vd,
2847 NEONModifiedImmShiftMsl(vd,
2855 void Assembler::mvn(const VRegister& vd, const VRegister& vn) {
2856 VIXL_ASSERT(AreSameFormat(vd, vn));
2857 if (vd.IsD()) {
2858 not_(vd.V8B(), vn.V8B());
2860 VIXL_ASSERT(vd.IsQ());
2861 not_(vd.V16B(), vn.V16B());
2866 void Assembler::mvni(const VRegister& vd,
2872 NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
2874 NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
2879 void Assembler::NEONFPByElement(const VRegister& vd,
2884 VIXL_ASSERT(AreSameFormat(vd, vn));
2885 VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
2886 (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
2887 (vd.Is1D() && vm.Is1D()));
2892 if (vd.IsScalar()) {
2896 Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
2897 Rn(vn) | Rd(vd));
2901 void Assembler::NEONByElement(const VRegister& vd,
2906 VIXL_ASSERT(AreSameFormat(vd, vn));
2907 VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
2908 (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
2909 (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
2915 if (vd.IsScalar()) {
2922 Rd(vd));
2926 void Assembler::NEONByElementL(const VRegister& vd,
2931 VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
2932 (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
2933 (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
2934 (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
2935 (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
2936 (vd.Is1D() && vn.Is1S() && vm.Is1S()));
2943 if (vd.IsScalar()) {
2950 Rd(vd));
2965 void Assembler::FN(const VRegister& vd, \
2970 NEONByElement(vd, vn, vm, vm_index, OP); \
2986 void Assembler::FN(const VRegister& vd, \
2990 NEONFPByElement(vd, vn, vm, vm_index, OP); \
3020 void Assembler::FN(const VRegister& vd, \
3025 NEONByElementL(vd, vn, vm, vm_index, OP); \
3031 void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
3032 NEON2RegMisc(vd, vn, NEON_SUQADD);
3036 void Assembler::usqadd(const VRegister& vd, const VRegister& vn) {
3037 NEON2RegMisc(vd, vn, NEON_USQADD);
3041 void Assembler::abs(const VRegister& vd, const VRegister& vn) {
3042 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3043 NEON2RegMisc(vd, vn, NEON_ABS);
3047 void Assembler::sqabs(const VRegister& vd, const VRegister& vn) {
3048 NEON2RegMisc(vd, vn, NEON_SQABS);
3052 void Assembler::neg(const VRegister& vd, const VRegister& vn) {
3053 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3054 NEON2RegMisc(vd, vn, NEON_NEG);
3058 void Assembler::sqneg(const VRegister& vd, const VRegister& vn) {
3059 NEON2RegMisc(vd, vn, NEON_SQNEG);
3063 void Assembler::NEONXtn(const VRegister& vd,
3067 if (vd.IsScalar()) {
3068 VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3069 (vd.Is1S() && vn.Is1D()));
3071 format = SFormat(vd);
3073 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3074 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3075 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3076 format = VFormat(vd);
3078 Emit(format | op | Rn(vn) | Rd(vd));
3082 void Assembler::xtn(const VRegister& vd, const VRegister& vn) {
3083 VIXL_ASSERT(vd.IsVector() && vd.IsD());
3084 NEONXtn(vd, vn, NEON_XTN);
3088 void Assembler::xtn2(const VRegister& vd, const VRegister& vn) {
3089 VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3090 NEONXtn(vd, vn, NEON_XTN);
3094 void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) {
3095 VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3096 NEONXtn(vd, vn, NEON_SQXTN);
3100 void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) {
3101 VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3102 NEONXtn(vd, vn, NEON_SQXTN);
3106 void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) {
3107 VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3108 NEONXtn(vd, vn, NEON_SQXTUN);
3112 void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) {
3113 VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3114 NEONXtn(vd, vn, NEON_SQXTUN);
3118 void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) {
3119 VIXL_ASSERT(vd.IsScalar() || vd.IsD());
3120 NEONXtn(vd, vn, NEON_UQXTN);
3124 void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) {
3125 VIXL_ASSERT(vd.IsVector() && vd.IsQ());
3126 NEONXtn(vd, vn, NEON_UQXTN);
3131 void Assembler::not_(const VRegister& vd, const VRegister& vn) {
3132 VIXL_ASSERT(AreSameFormat(vd, vn));
3133 VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3134 Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3138 void Assembler::rbit(const VRegister& vd, const VRegister& vn) {
3139 VIXL_ASSERT(AreSameFormat(vd, vn));
3140 VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3141 Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3145 void Assembler::ext(const VRegister& vd,
3149 VIXL_ASSERT(AreSameFormat(vd, vn, vm));
3150 VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3151 VIXL_ASSERT((0 <= index) && (index < vd.GetLanes()));
3152 Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
3156 void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
3179 if (vd.IsScalar()) {
3183 VIXL_ASSERT(!vd.Is1D());
3184 q = vd.IsD() ? 0 : NEON_Q;
3188 Rd(vd));
3192 void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) {
3193 VIXL_ASSERT(vd.IsScalar());
3194 dup(vd, vn, vn_index);
3198 void Assembler::dup(const VRegister& vd, const Register& rn) {
3199 VIXL_ASSERT(!vd.Is1D());
3200 VIXL_ASSERT(vd.Is2D() == rn.IsX());
3201 int q = vd.IsD() ? 0 : NEON_Q;
3202 Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
3206 void Assembler::ins(const VRegister& vd,
3210 VIXL_ASSERT(AreSameFormat(vd, vn));
3211 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
3213 int lane_size = vd.GetLaneSizeInBytes();
3238 ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
3242 void Assembler::mov(const VRegister& vd,
3246 ins(vd, vd_index, vn, vn_index);
3250 void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) {
3251 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
3253 int lane_size = vd.GetLaneSizeInBytes();
3278 Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
3282 void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) {
3283 ins(vd, vd_index, rn);
3355 void Assembler::cls(const VRegister& vd, const VRegister& vn) {
3356 VIXL_ASSERT(AreSameFormat(vd, vn));
3357 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3358 Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
3362 void Assembler::clz(const VRegister& vd, const VRegister& vn) {
3363 VIXL_ASSERT(AreSameFormat(vd, vn));
3364 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3365 Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
3369 void Assembler::cnt(const VRegister& vd, const VRegister& vn) {
3370 VIXL_ASSERT(AreSameFormat(vd, vn));
3371 VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3372 Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
3376 void Assembler::rev16(const VRegister& vd, const VRegister& vn) {
3377 VIXL_ASSERT(AreSameFormat(vd, vn));
3378 VIXL_ASSERT(vd.Is8B() || vd.Is16B());
3379 Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
3383 void Assembler::rev32(const VRegister& vd, const VRegister& vn) {
3384 VIXL_ASSERT(AreSameFormat(vd, vn));
3385 VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
3386 Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
3390 void Assembler::rev64(const VRegister& vd, const VRegister& vn) {
3391 VIXL_ASSERT(AreSameFormat(vd, vn));
3392 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
3393 Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
3397 void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) {
3398 VIXL_ASSERT(AreSameFormat(vd, vn));
3399 VIXL_ASSERT(vd.Is2S() || vd.Is4S());
3400 Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
3404 void Assembler::urecpe(const VRegister& vd, const VRegister& vn) {
3405 VIXL_ASSERT(AreSameFormat(vd, vn));
3406 VIXL_ASSERT(vd.Is2S() || vd.Is4S());
3407 Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
3411 void Assembler::NEONAddlp(const VRegister& vd,
3417 VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
3418 (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
3419 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
3420 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3424 void Assembler::saddlp(const VRegister& vd, const VRegister& vn) {
3425 NEONAddlp(vd, vn, NEON_SADDLP);
3429 void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) {
3430 NEONAddlp(vd, vn, NEON_UADDLP);
3434 void Assembler::sadalp(const VRegister& vd, const VRegister& vn) {
3435 NEONAddlp(vd, vn, NEON_SADALP);
3439 void Assembler::uadalp(const VRegister& vd, const VRegister& vn) {
3440 NEONAddlp(vd, vn, NEON_UADALP);
3444 void Assembler::NEONAcrossLanesL(const VRegister& vd,
3447 VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
3448 (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
3449 (vn.Is4S() && vd.Is1D()));
3450 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3454 void Assembler::saddlv(const VRegister& vd, const VRegister& vn) {
3455 NEONAcrossLanesL(vd, vn, NEON_SADDLV);
3459 void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) {
3460 NEONAcrossLanesL(vd, vn, NEON_UADDLV);
3464 void Assembler::NEONAcrossLanes(const VRegister& vd,
3467 VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
3468 (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
3469 (vn.Is4S() && vd.Is1S()));
3471 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
3473 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
3479 V(fmaxv, NEON_FMAXV, vd.Is1S()) \
3480 V(fminv, NEON_FMINV, vd.Is1S()) \
3481 V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
3482 V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
3491 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
3493 NEONAcrossLanes(vd, vn, OP); \
3499 void Assembler::NEONPerm(const VRegister& vd,
3503 VIXL_ASSERT(AreSameFormat(vd, vn, vm));
3504 VIXL_ASSERT(!vd.Is1D());
3505 Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3509 void Assembler::trn1(const VRegister& vd,
3512 NEONPerm(vd, vn, vm, NEON_TRN1);
3516 void Assembler::trn2(const VRegister& vd,
3519 NEONPerm(vd, vn, vm, NEON_TRN2);
3523 void Assembler::uzp1(const VRegister& vd,
3526 NEONPerm(vd, vn, vm, NEON_UZP1);
3530 void Assembler::uzp2(const VRegister& vd,
3533 NEONPerm(vd, vn, vm, NEON_UZP2);
3537 void Assembler::zip1(const VRegister& vd,
3540 NEONPerm(vd, vn, vm, NEON_ZIP1);
3544 void Assembler::zip2(const VRegister& vd,
3547 NEONPerm(vd, vn, vm, NEON_ZIP2);
3551 void Assembler::NEONShiftImmediate(const VRegister& vd,
3555 VIXL_ASSERT(AreSameFormat(vd, vn));
3561 q = vd.IsD() ? 0 : NEON_Q;
3564 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
3568 void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
3574 NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
3578 void Assembler::NEONShiftRightImmediate(const VRegister& vd,
3584 NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
3588 void Assembler::NEONShiftImmediateL(const VRegister& vd,
3596 VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
3597 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
3598 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
3601 Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
3605 void Assembler::NEONShiftImmediateN(const VRegister& vd,
3610 int laneSizeInBits = vd.GetLaneSizeInBits();
3615 VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3616 (vd.Is1S() && vn.Is1D()));
3620 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3621 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3622 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3624 q = vd.IsD() ? 0 : NEON_Q;
3626 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
3630 void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) {
3631 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3632 NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
3636 void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) {
3637 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3638 NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
3642 void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) {
3643 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
3647 void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) {
3648 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
3652 void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) {
3653 NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
3657 void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) {
3659 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
3663 void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) {
3665 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
3669 void Assembler::sxtl(const VRegister& vd, const VRegister& vn) {
3670 sshll(vd, vn, 0);
3674 void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) {
3675 sshll2(vd, vn, 0);
3679 void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) {
3681 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
3685 void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) {
3687 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
3691 void Assembler::uxtl(const VRegister& vd, const VRegister& vn) {
3692 ushll(vd, vn, 0);
3696 void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) {
3697 ushll2(vd, vn, 0);
3701 void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) {
3702 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3703 NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
3707 void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) {
3708 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3709 NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
3713 void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) {
3714 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3715 NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
3719 void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) {
3720 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3721 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
3725 void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) {
3726 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3727 NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
3731 void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) {
3732 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3733 NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
3737 void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) {
3738 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3739 NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
3743 void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) {
3744 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3745 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
3749 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
3750 VIXL_ASSERT(vd.IsVector() || vd.Is1D());
3751 NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
3755 void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) {
3756 VIXL_ASSERT(vn.IsVector() && vd.IsD());
3757 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
3761 void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) {
3762 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3763 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
3767 void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) {
3768 VIXL_ASSERT(vn.IsVector() && vd.IsD());
3769 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
3773 void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3774 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3775 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
3779 void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) {
3780 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3781 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
3785 void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3786 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3787 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
3791 void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
3792 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3793 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
3797 void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3798 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3799 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
3803 void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) {
3804 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3805 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
3809 void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) {
3810 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3811 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
3815 void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) {
3816 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3817 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
3821 void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) {
3822 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3823 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
3827 void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) {
3828 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3829 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
3833 void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3834 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3835 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
3839 void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
3840 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
3841 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
3845 void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
3846 VIXL_ASSERT(vn.IsVector() && vd.IsQ());
3847 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
4085 void Assembler::FPDataProcessing1Source(const VRegister& vd,
4088 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
4089 Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
4093 void Assembler::FPDataProcessing3Source(const VRegister& vd,
4098 VIXL_ASSERT(vd.Is1S() || vd.Is1D());
4099 VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
4100 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
4104 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
4108 VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
4109 vd.Is4S());
4115 if (vd.Is8B() || vd.Is16B()) {
4124 if (vd.Is4H() || vd.Is8H()) {
4131 int q = vd.IsQ() ? NEON_Q : 0;
4133 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4137 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
4141 VIXL_ASSERT(vd.Is2S() || vd.Is4S());
4148 int q = vd.IsQ() ? NEON_Q : 0;
4150 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));