1 // Copyright 2015, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 28 #include <cmath> 29 30 #include "assembler-aarch64.h" 31 #include "macro-assembler-aarch64.h" 32 33 namespace vixl { 34 namespace aarch64 { 35 36 RawLiteral::RawLiteral(size_t size, 37 LiteralPool* literal_pool, 38 DeletionPolicy deletion_policy) 39 : size_(size), 40 offset_(0), 41 low64_(0), 42 high64_(0), 43 literal_pool_(literal_pool), 44 deletion_policy_(deletion_policy) { 45 VIXL_ASSERT((deletion_policy == kManuallyDeleted) || (literal_pool_ != NULL)); 46 if (deletion_policy == kDeletedOnPoolDestruction) { 47 literal_pool_->DeleteOnDestruction(this); 48 } 49 } 50 51 52 void Assembler::Reset() { GetBuffer()->Reset(); } 53 54 55 void Assembler::bind(Label* label) { 56 BindToOffset(label, GetBuffer()->GetCursorOffset()); 57 } 58 59 60 void Assembler::BindToOffset(Label* label, ptrdiff_t offset) { 61 VIXL_ASSERT((offset >= 0) && (offset <= GetBuffer()->GetCursorOffset())); 62 VIXL_ASSERT(offset % kInstructionSize == 0); 63 64 label->Bind(offset); 65 66 for (Label::LabelLinksIterator it(label); !it.Done(); it.Advance()) { 67 Instruction* link = 68 GetBuffer()->GetOffsetAddress<Instruction*>(*it.Current()); 69 link->SetImmPCOffsetTarget(GetLabelAddress<Instruction*>(label)); 70 } 71 label->ClearAllLinks(); 72 } 73 74 75 // A common implementation for the LinkAndGet<Type>OffsetTo helpers. 76 // 77 // The offset is calculated by aligning the PC and label addresses down to a 78 // multiple of 1 << element_shift, then calculating the (scaled) offset between 79 // them. This matches the semantics of adrp, for example. 80 template <int element_shift> 81 ptrdiff_t Assembler::LinkAndGetOffsetTo(Label* label) { 82 VIXL_STATIC_ASSERT(element_shift < (sizeof(ptrdiff_t) * 8)); 83 84 if (label->IsBound()) { 85 uintptr_t pc_offset = GetCursorAddress<uintptr_t>() >> element_shift; 86 uintptr_t label_offset = GetLabelAddress<uintptr_t>(label) >> element_shift; 87 return label_offset - pc_offset; 88 } else { 89 label->AddLink(GetBuffer()->GetCursorOffset()); 90 return 0; 91 } 92 } 93 94 95 ptrdiff_t Assembler::LinkAndGetByteOffsetTo(Label* label) { 96 return LinkAndGetOffsetTo<0>(label); 97 } 98 99 100 ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(Label* label) { 101 return LinkAndGetOffsetTo<kInstructionSizeLog2>(label); 102 } 103 104 105 ptrdiff_t Assembler::LinkAndGetPageOffsetTo(Label* label) { 106 return LinkAndGetOffsetTo<kPageSizeLog2>(label); 107 } 108 109 110 void Assembler::place(RawLiteral* literal) { 111 VIXL_ASSERT(!literal->IsPlaced()); 112 113 // Patch instructions using this literal. 114 if (literal->IsUsed()) { 115 Instruction* target = GetCursorAddress<Instruction*>(); 116 ptrdiff_t offset = literal->GetLastUse(); 117 bool done; 118 do { 119 Instruction* ldr = GetBuffer()->GetOffsetAddress<Instruction*>(offset); 120 VIXL_ASSERT(ldr->IsLoadLiteral()); 121 122 ptrdiff_t imm19 = ldr->GetImmLLiteral(); 123 VIXL_ASSERT(imm19 <= 0); 124 done = (imm19 == 0); 125 offset += imm19 * kLiteralEntrySize; 126 127 ldr->SetImmLLiteral(target); 128 } while (!done); 129 } 130 131 // "bind" the literal. 132 literal->SetOffset(GetCursorOffset()); 133 // Copy the data into the pool. 134 switch (literal->GetSize()) { 135 case kSRegSizeInBytes: 136 dc32(literal->GetRawValue32()); 137 break; 138 case kDRegSizeInBytes: 139 dc64(literal->GetRawValue64()); 140 break; 141 default: 142 VIXL_ASSERT(literal->GetSize() == kQRegSizeInBytes); 143 dc64(literal->GetRawValue128Low64()); 144 dc64(literal->GetRawValue128High64()); 145 } 146 147 literal->literal_pool_ = NULL; 148 } 149 150 151 ptrdiff_t Assembler::LinkAndGetWordOffsetTo(RawLiteral* literal) { 152 VIXL_ASSERT(IsWordAligned(GetCursorOffset())); 153 154 bool register_first_use = 155 (literal->GetLiteralPool() != NULL) && !literal->IsUsed(); 156 157 if (literal->IsPlaced()) { 158 // The literal is "behind", the offset will be negative. 159 VIXL_ASSERT((literal->GetOffset() - GetCursorOffset()) <= 0); 160 return (literal->GetOffset() - GetCursorOffset()) >> kLiteralEntrySizeLog2; 161 } 162 163 ptrdiff_t offset = 0; 164 // Link all uses together. 165 if (literal->IsUsed()) { 166 offset = 167 (literal->GetLastUse() - GetCursorOffset()) >> kLiteralEntrySizeLog2; 168 } 169 literal->SetLastUse(GetCursorOffset()); 170 171 if (register_first_use) { 172 literal->GetLiteralPool()->AddEntry(literal); 173 } 174 175 return offset; 176 } 177 178 179 // Code generation. 180 void Assembler::br(const Register& xn) { 181 VIXL_ASSERT(xn.Is64Bits()); 182 Emit(BR | Rn(xn)); 183 } 184 185 186 void Assembler::blr(const Register& xn) { 187 VIXL_ASSERT(xn.Is64Bits()); 188 Emit(BLR | Rn(xn)); 189 } 190 191 192 void Assembler::ret(const Register& xn) { 193 VIXL_ASSERT(xn.Is64Bits()); 194 Emit(RET | Rn(xn)); 195 } 196 197 198 void Assembler::braaz(const Register& xn) { 199 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 200 VIXL_ASSERT(xn.Is64Bits()); 201 Emit(BRAAZ | Rn(xn) | Rd_mask); 202 } 203 204 void Assembler::brabz(const Register& xn) { 205 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 206 VIXL_ASSERT(xn.Is64Bits()); 207 Emit(BRABZ | Rn(xn) | Rd_mask); 208 } 209 210 void Assembler::blraaz(const Register& xn) { 211 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 212 VIXL_ASSERT(xn.Is64Bits()); 213 Emit(BLRAAZ | Rn(xn) | Rd_mask); 214 } 215 216 void Assembler::blrabz(const Register& xn) { 217 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 218 VIXL_ASSERT(xn.Is64Bits()); 219 Emit(BLRABZ | Rn(xn) | Rd_mask); 220 } 221 222 void Assembler::retaa() { 223 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 224 Emit(RETAA | Rn_mask | Rd_mask); 225 } 226 227 void Assembler::retab() { 228 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 229 Emit(RETAB | Rn_mask | Rd_mask); 230 } 231 232 // The Arm ARM names the register Xm but encodes it in the Xd bitfield. 233 void Assembler::braa(const Register& xn, const Register& xm) { 234 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 235 VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); 236 Emit(BRAA | Rn(xn) | RdSP(xm)); 237 } 238 239 void Assembler::brab(const Register& xn, const Register& xm) { 240 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 241 VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); 242 Emit(BRAB | Rn(xn) | RdSP(xm)); 243 } 244 245 void Assembler::blraa(const Register& xn, const Register& xm) { 246 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 247 VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); 248 Emit(BLRAA | Rn(xn) | RdSP(xm)); 249 } 250 251 void Assembler::blrab(const Register& xn, const Register& xm) { 252 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 253 VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); 254 Emit(BLRAB | Rn(xn) | RdSP(xm)); 255 } 256 257 258 void Assembler::b(int64_t imm26) { Emit(B | ImmUncondBranch(imm26)); } 259 260 261 void Assembler::b(int64_t imm19, Condition cond) { 262 Emit(B_cond | ImmCondBranch(imm19) | cond); 263 } 264 265 266 void Assembler::b(Label* label) { 267 int64_t offset = LinkAndGetInstructionOffsetTo(label); 268 VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); 269 b(static_cast<int>(offset)); 270 } 271 272 273 void Assembler::b(Label* label, Condition cond) { 274 int64_t offset = LinkAndGetInstructionOffsetTo(label); 275 VIXL_ASSERT(Instruction::IsValidImmPCOffset(CondBranchType, offset)); 276 b(static_cast<int>(offset), cond); 277 } 278 279 280 void Assembler::bl(int64_t imm26) { Emit(BL | ImmUncondBranch(imm26)); } 281 282 283 void Assembler::bl(Label* label) { 284 int64_t offset = LinkAndGetInstructionOffsetTo(label); 285 VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); 286 bl(static_cast<int>(offset)); 287 } 288 289 290 void Assembler::cbz(const Register& rt, int64_t imm19) { 291 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); 292 } 293 294 295 void Assembler::cbz(const Register& rt, Label* label) { 296 int64_t offset = LinkAndGetInstructionOffsetTo(label); 297 VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); 298 cbz(rt, static_cast<int>(offset)); 299 } 300 301 302 void Assembler::cbnz(const Register& rt, int64_t imm19) { 303 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); 304 } 305 306 307 void Assembler::cbnz(const Register& rt, Label* label) { 308 int64_t offset = LinkAndGetInstructionOffsetTo(label); 309 VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); 310 cbnz(rt, static_cast<int>(offset)); 311 } 312 313 314 void Assembler::NEONTable(const VRegister& vd, 315 const VRegister& vn, 316 const VRegister& vm, 317 NEONTableOp op) { 318 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 319 VIXL_ASSERT(vd.Is16B() || vd.Is8B()); 320 VIXL_ASSERT(vn.Is16B()); 321 VIXL_ASSERT(AreSameFormat(vd, vm)); 322 Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); 323 } 324 325 326 void Assembler::tbl(const VRegister& vd, 327 const VRegister& vn, 328 const VRegister& vm) { 329 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 330 NEONTable(vd, vn, vm, NEON_TBL_1v); 331 } 332 333 334 void Assembler::tbl(const VRegister& vd, 335 const VRegister& vn, 336 const VRegister& vn2, 337 const VRegister& vm) { 338 USE(vn2); 339 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 340 VIXL_ASSERT(AreSameFormat(vn, vn2)); 341 VIXL_ASSERT(AreConsecutive(vn, vn2)); 342 NEONTable(vd, vn, vm, NEON_TBL_2v); 343 } 344 345 346 void Assembler::tbl(const VRegister& vd, 347 const VRegister& vn, 348 const VRegister& vn2, 349 const VRegister& vn3, 350 const VRegister& vm) { 351 USE(vn2, vn3); 352 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 353 VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); 354 VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); 355 NEONTable(vd, vn, vm, NEON_TBL_3v); 356 } 357 358 359 void Assembler::tbl(const VRegister& vd, 360 const VRegister& vn, 361 const VRegister& vn2, 362 const VRegister& vn3, 363 const VRegister& vn4, 364 const VRegister& vm) { 365 USE(vn2, vn3, vn4); 366 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 367 VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); 368 VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); 369 NEONTable(vd, vn, vm, NEON_TBL_4v); 370 } 371 372 373 void Assembler::tbx(const VRegister& vd, 374 const VRegister& vn, 375 const VRegister& vm) { 376 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 377 NEONTable(vd, vn, vm, NEON_TBX_1v); 378 } 379 380 381 void Assembler::tbx(const VRegister& vd, 382 const VRegister& vn, 383 const VRegister& vn2, 384 const VRegister& vm) { 385 USE(vn2); 386 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 387 VIXL_ASSERT(AreSameFormat(vn, vn2)); 388 VIXL_ASSERT(AreConsecutive(vn, vn2)); 389 NEONTable(vd, vn, vm, NEON_TBX_2v); 390 } 391 392 393 void Assembler::tbx(const VRegister& vd, 394 const VRegister& vn, 395 const VRegister& vn2, 396 const VRegister& vn3, 397 const VRegister& vm) { 398 USE(vn2, vn3); 399 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 400 VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); 401 VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); 402 NEONTable(vd, vn, vm, NEON_TBX_3v); 403 } 404 405 406 void Assembler::tbx(const VRegister& vd, 407 const VRegister& vn, 408 const VRegister& vn2, 409 const VRegister& vn3, 410 const VRegister& vn4, 411 const VRegister& vm) { 412 USE(vn2, vn3, vn4); 413 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 414 VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); 415 VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); 416 NEONTable(vd, vn, vm, NEON_TBX_4v); 417 } 418 419 420 void Assembler::tbz(const Register& rt, unsigned bit_pos, int64_t imm14) { 421 VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); 422 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); 423 } 424 425 426 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) { 427 ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); 428 VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); 429 tbz(rt, bit_pos, static_cast<int>(offset)); 430 } 431 432 433 void Assembler::tbnz(const Register& rt, unsigned bit_pos, int64_t imm14) { 434 VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); 435 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); 436 } 437 438 439 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) { 440 ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); 441 VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); 442 tbnz(rt, bit_pos, static_cast<int>(offset)); 443 } 444 445 446 void Assembler::adr(const Register& xd, int64_t imm21) { 447 VIXL_ASSERT(xd.Is64Bits()); 448 Emit(ADR | ImmPCRelAddress(imm21) | Rd(xd)); 449 } 450 451 452 void Assembler::adr(const Register& xd, Label* label) { 453 adr(xd, static_cast<int>(LinkAndGetByteOffsetTo(label))); 454 } 455 456 457 void Assembler::adrp(const Register& xd, int64_t imm21) { 458 VIXL_ASSERT(xd.Is64Bits()); 459 Emit(ADRP | ImmPCRelAddress(imm21) | Rd(xd)); 460 } 461 462 463 void Assembler::adrp(const Register& xd, Label* label) { 464 VIXL_ASSERT(AllowPageOffsetDependentCode()); 465 adrp(xd, static_cast<int>(LinkAndGetPageOffsetTo(label))); 466 } 467 468 469 void Assembler::add(const Register& rd, 470 const Register& rn, 471 const Operand& operand) { 472 AddSub(rd, rn, operand, LeaveFlags, ADD); 473 } 474 475 476 void Assembler::adds(const Register& rd, 477 const Register& rn, 478 const Operand& operand) { 479 AddSub(rd, rn, operand, SetFlags, ADD); 480 } 481 482 483 void Assembler::cmn(const Register& rn, const Operand& operand) { 484 Register zr = AppropriateZeroRegFor(rn); 485 adds(zr, rn, operand); 486 } 487 488 489 void Assembler::sub(const Register& rd, 490 const Register& rn, 491 const Operand& operand) { 492 AddSub(rd, rn, operand, LeaveFlags, SUB); 493 } 494 495 496 void Assembler::subs(const Register& rd, 497 const Register& rn, 498 const Operand& operand) { 499 AddSub(rd, rn, operand, SetFlags, SUB); 500 } 501 502 503 void Assembler::cmp(const Register& rn, const Operand& operand) { 504 Register zr = AppropriateZeroRegFor(rn); 505 subs(zr, rn, operand); 506 } 507 508 509 void Assembler::neg(const Register& rd, const Operand& operand) { 510 Register zr = AppropriateZeroRegFor(rd); 511 sub(rd, zr, operand); 512 } 513 514 515 void Assembler::negs(const Register& rd, const Operand& operand) { 516 Register zr = AppropriateZeroRegFor(rd); 517 subs(rd, zr, operand); 518 } 519 520 521 void Assembler::adc(const Register& rd, 522 const Register& rn, 523 const Operand& operand) { 524 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); 525 } 526 527 528 void Assembler::adcs(const Register& rd, 529 const Register& rn, 530 const Operand& operand) { 531 AddSubWithCarry(rd, rn, operand, SetFlags, ADC); 532 } 533 534 535 void Assembler::sbc(const Register& rd, 536 const Register& rn, 537 const Operand& operand) { 538 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); 539 } 540 541 542 void Assembler::sbcs(const Register& rd, 543 const Register& rn, 544 const Operand& operand) { 545 AddSubWithCarry(rd, rn, operand, SetFlags, SBC); 546 } 547 548 549 void Assembler::ngc(const Register& rd, const Operand& operand) { 550 Register zr = AppropriateZeroRegFor(rd); 551 sbc(rd, zr, operand); 552 } 553 554 555 void Assembler::ngcs(const Register& rd, const Operand& operand) { 556 Register zr = AppropriateZeroRegFor(rd); 557 sbcs(rd, zr, operand); 558 } 559 560 561 // Logical instructions. 562 void Assembler::and_(const Register& rd, 563 const Register& rn, 564 const Operand& operand) { 565 Logical(rd, rn, operand, AND); 566 } 567 568 569 void Assembler::ands(const Register& rd, 570 const Register& rn, 571 const Operand& operand) { 572 Logical(rd, rn, operand, ANDS); 573 } 574 575 576 void Assembler::tst(const Register& rn, const Operand& operand) { 577 ands(AppropriateZeroRegFor(rn), rn, operand); 578 } 579 580 581 void Assembler::bic(const Register& rd, 582 const Register& rn, 583 const Operand& operand) { 584 Logical(rd, rn, operand, BIC); 585 } 586 587 588 void Assembler::bics(const Register& rd, 589 const Register& rn, 590 const Operand& operand) { 591 Logical(rd, rn, operand, BICS); 592 } 593 594 595 void Assembler::orr(const Register& rd, 596 const Register& rn, 597 const Operand& operand) { 598 Logical(rd, rn, operand, ORR); 599 } 600 601 602 void Assembler::orn(const Register& rd, 603 const Register& rn, 604 const Operand& operand) { 605 Logical(rd, rn, operand, ORN); 606 } 607 608 609 void Assembler::eor(const Register& rd, 610 const Register& rn, 611 const Operand& operand) { 612 Logical(rd, rn, operand, EOR); 613 } 614 615 616 void Assembler::eon(const Register& rd, 617 const Register& rn, 618 const Operand& operand) { 619 Logical(rd, rn, operand, EON); 620 } 621 622 623 void Assembler::lslv(const Register& rd, 624 const Register& rn, 625 const Register& rm) { 626 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 627 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 628 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); 629 } 630 631 632 void Assembler::lsrv(const Register& rd, 633 const Register& rn, 634 const Register& rm) { 635 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 636 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 637 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); 638 } 639 640 641 void Assembler::asrv(const Register& rd, 642 const Register& rn, 643 const Register& rm) { 644 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 645 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 646 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); 647 } 648 649 650 void Assembler::rorv(const Register& rd, 651 const Register& rn, 652 const Register& rm) { 653 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 654 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 655 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); 656 } 657 658 659 // Bitfield operations. 660 void Assembler::bfm(const Register& rd, 661 const Register& rn, 662 unsigned immr, 663 unsigned imms) { 664 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 665 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 666 Emit(SF(rd) | BFM | N | ImmR(immr, rd.GetSizeInBits()) | 667 ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); 668 } 669 670 671 void Assembler::sbfm(const Register& rd, 672 const Register& rn, 673 unsigned immr, 674 unsigned imms) { 675 VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits()); 676 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 677 Emit(SF(rd) | SBFM | N | ImmR(immr, rd.GetSizeInBits()) | 678 ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); 679 } 680 681 682 void Assembler::ubfm(const Register& rd, 683 const Register& rn, 684 unsigned immr, 685 unsigned imms) { 686 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 687 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 688 Emit(SF(rd) | UBFM | N | ImmR(immr, rd.GetSizeInBits()) | 689 ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); 690 } 691 692 693 void Assembler::extr(const Register& rd, 694 const Register& rn, 695 const Register& rm, 696 unsigned lsb) { 697 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 698 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 699 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 700 Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.GetSizeInBits()) | Rn(rn) | 701 Rd(rd)); 702 } 703 704 705 void Assembler::csel(const Register& rd, 706 const Register& rn, 707 const Register& rm, 708 Condition cond) { 709 ConditionalSelect(rd, rn, rm, cond, CSEL); 710 } 711 712 713 void Assembler::csinc(const Register& rd, 714 const Register& rn, 715 const Register& rm, 716 Condition cond) { 717 ConditionalSelect(rd, rn, rm, cond, CSINC); 718 } 719 720 721 void Assembler::csinv(const Register& rd, 722 const Register& rn, 723 const Register& rm, 724 Condition cond) { 725 ConditionalSelect(rd, rn, rm, cond, CSINV); 726 } 727 728 729 void Assembler::csneg(const Register& rd, 730 const Register& rn, 731 const Register& rm, 732 Condition cond) { 733 ConditionalSelect(rd, rn, rm, cond, CSNEG); 734 } 735 736 737 void Assembler::cset(const Register& rd, Condition cond) { 738 VIXL_ASSERT((cond != al) && (cond != nv)); 739 Register zr = AppropriateZeroRegFor(rd); 740 csinc(rd, zr, zr, InvertCondition(cond)); 741 } 742 743 744 void Assembler::csetm(const Register& rd, Condition cond) { 745 VIXL_ASSERT((cond != al) && (cond != nv)); 746 Register zr = AppropriateZeroRegFor(rd); 747 csinv(rd, zr, zr, InvertCondition(cond)); 748 } 749 750 751 void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) { 752 VIXL_ASSERT((cond != al) && (cond != nv)); 753 csinc(rd, rn, rn, InvertCondition(cond)); 754 } 755 756 757 void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) { 758 VIXL_ASSERT((cond != al) && (cond != nv)); 759 csinv(rd, rn, rn, InvertCondition(cond)); 760 } 761 762 763 void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) { 764 VIXL_ASSERT((cond != al) && (cond != nv)); 765 csneg(rd, rn, rn, InvertCondition(cond)); 766 } 767 768 769 void Assembler::ConditionalSelect(const Register& rd, 770 const Register& rn, 771 const Register& rm, 772 Condition cond, 773 ConditionalSelectOp op) { 774 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 775 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 776 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); 777 } 778 779 780 void Assembler::ccmn(const Register& rn, 781 const Operand& operand, 782 StatusFlags nzcv, 783 Condition cond) { 784 ConditionalCompare(rn, operand, nzcv, cond, CCMN); 785 } 786 787 788 void Assembler::ccmp(const Register& rn, 789 const Operand& operand, 790 StatusFlags nzcv, 791 Condition cond) { 792 ConditionalCompare(rn, operand, nzcv, cond, CCMP); 793 } 794 795 796 void Assembler::DataProcessing3Source(const Register& rd, 797 const Register& rn, 798 const Register& rm, 799 const Register& ra, 800 DataProcessing3SourceOp op) { 801 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); 802 } 803 804 805 void Assembler::crc32b(const Register& wd, 806 const Register& wn, 807 const Register& wm) { 808 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 809 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 810 Emit(SF(wm) | Rm(wm) | CRC32B | Rn(wn) | Rd(wd)); 811 } 812 813 814 void Assembler::crc32h(const Register& wd, 815 const Register& wn, 816 const Register& wm) { 817 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 818 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 819 Emit(SF(wm) | Rm(wm) | CRC32H | Rn(wn) | Rd(wd)); 820 } 821 822 823 void Assembler::crc32w(const Register& wd, 824 const Register& wn, 825 const Register& wm) { 826 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 827 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 828 Emit(SF(wm) | Rm(wm) | CRC32W | Rn(wn) | Rd(wd)); 829 } 830 831 832 void Assembler::crc32x(const Register& wd, 833 const Register& wn, 834 const Register& xm) { 835 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 836 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); 837 Emit(SF(xm) | Rm(xm) | CRC32X | Rn(wn) | Rd(wd)); 838 } 839 840 841 void Assembler::crc32cb(const Register& wd, 842 const Register& wn, 843 const Register& wm) { 844 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 845 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 846 Emit(SF(wm) | Rm(wm) | CRC32CB | Rn(wn) | Rd(wd)); 847 } 848 849 850 void Assembler::crc32ch(const Register& wd, 851 const Register& wn, 852 const Register& wm) { 853 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 854 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 855 Emit(SF(wm) | Rm(wm) | CRC32CH | Rn(wn) | Rd(wd)); 856 } 857 858 859 void Assembler::crc32cw(const Register& wd, 860 const Register& wn, 861 const Register& wm) { 862 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 863 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); 864 Emit(SF(wm) | Rm(wm) | CRC32CW | Rn(wn) | Rd(wd)); 865 } 866 867 868 void Assembler::crc32cx(const Register& wd, 869 const Register& wn, 870 const Register& xm) { 871 VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); 872 VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); 873 Emit(SF(xm) | Rm(xm) | CRC32CX | Rn(wn) | Rd(wd)); 874 } 875 876 877 void Assembler::mul(const Register& rd, 878 const Register& rn, 879 const Register& rm) { 880 VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); 881 DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD); 882 } 883 884 885 void Assembler::madd(const Register& rd, 886 const Register& rn, 887 const Register& rm, 888 const Register& ra) { 889 DataProcessing3Source(rd, rn, rm, ra, MADD); 890 } 891 892 893 void Assembler::mneg(const Register& rd, 894 const Register& rn, 895 const Register& rm) { 896 VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); 897 DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB); 898 } 899 900 901 void Assembler::msub(const Register& rd, 902 const Register& rn, 903 const Register& rm, 904 const Register& ra) { 905 DataProcessing3Source(rd, rn, rm, ra, MSUB); 906 } 907 908 909 void Assembler::umaddl(const Register& xd, 910 const Register& wn, 911 const Register& wm, 912 const Register& xa) { 913 VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); 914 VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); 915 DataProcessing3Source(xd, wn, wm, xa, UMADDL_x); 916 } 917 918 919 void Assembler::smaddl(const Register& xd, 920 const Register& wn, 921 const Register& wm, 922 const Register& xa) { 923 VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); 924 VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); 925 DataProcessing3Source(xd, wn, wm, xa, SMADDL_x); 926 } 927 928 929 void Assembler::umsubl(const Register& xd, 930 const Register& wn, 931 const Register& wm, 932 const Register& xa) { 933 VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); 934 VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); 935 DataProcessing3Source(xd, wn, wm, xa, UMSUBL_x); 936 } 937 938 939 void Assembler::smsubl(const Register& xd, 940 const Register& wn, 941 const Register& wm, 942 const Register& xa) { 943 VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); 944 VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); 945 DataProcessing3Source(xd, wn, wm, xa, SMSUBL_x); 946 } 947 948 949 void Assembler::smull(const Register& xd, 950 const Register& wn, 951 const Register& wm) { 952 VIXL_ASSERT(xd.Is64Bits()); 953 VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); 954 DataProcessing3Source(xd, wn, wm, xzr, SMADDL_x); 955 } 956 957 958 void Assembler::sdiv(const Register& rd, 959 const Register& rn, 960 const Register& rm) { 961 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 962 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 963 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); 964 } 965 966 967 void Assembler::smulh(const Register& xd, 968 const Register& xn, 969 const Register& xm) { 970 VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); 971 DataProcessing3Source(xd, xn, xm, xzr, SMULH_x); 972 } 973 974 975 void Assembler::umulh(const Register& xd, 976 const Register& xn, 977 const Register& xm) { 978 VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); 979 DataProcessing3Source(xd, xn, xm, xzr, UMULH_x); 980 } 981 982 983 void Assembler::udiv(const Register& rd, 984 const Register& rn, 985 const Register& rm) { 986 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 987 VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); 988 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); 989 } 990 991 992 void Assembler::rbit(const Register& rd, const Register& rn) { 993 DataProcessing1Source(rd, rn, RBIT); 994 } 995 996 997 void Assembler::rev16(const Register& rd, const Register& rn) { 998 DataProcessing1Source(rd, rn, REV16); 999 } 1000 1001 1002 void Assembler::rev32(const Register& xd, const Register& xn) { 1003 VIXL_ASSERT(xd.Is64Bits()); 1004 DataProcessing1Source(xd, xn, REV); 1005 } 1006 1007 1008 void Assembler::rev(const Register& rd, const Register& rn) { 1009 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); 1010 } 1011 1012 1013 void Assembler::clz(const Register& rd, const Register& rn) { 1014 DataProcessing1Source(rd, rn, CLZ); 1015 } 1016 1017 1018 void Assembler::cls(const Register& rd, const Register& rn) { 1019 DataProcessing1Source(rd, rn, CLS); 1020 } 1021 1022 #define PAUTH_VARIATIONS(V) \ 1023 V(paci, PACI) \ 1024 V(pacd, PACD) \ 1025 V(auti, AUTI) \ 1026 V(autd, AUTD) 1027 1028 #define DEFINE_ASM_FUNCS(PRE, OP) \ 1029 void Assembler::PRE##a(const Register& xd, const Register& xn) { \ 1030 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ 1031 VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ 1032 Emit(SF(xd) | OP##A | Rd(xd) | RnSP(xn)); \ 1033 } \ 1034 \ 1035 void Assembler::PRE##za(const Register& xd) { \ 1036 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ 1037 VIXL_ASSERT(xd.Is64Bits()); \ 1038 Emit(SF(xd) | OP##ZA | Rd(xd)); \ 1039 } \ 1040 \ 1041 void Assembler::PRE##b(const Register& xd, const Register& xn) { \ 1042 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ 1043 VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ 1044 Emit(SF(xd) | OP##B | Rd(xd) | RnSP(xn)); \ 1045 } \ 1046 \ 1047 void Assembler::PRE##zb(const Register& xd) { \ 1048 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ 1049 VIXL_ASSERT(xd.Is64Bits()); \ 1050 Emit(SF(xd) | OP##ZB | Rd(xd)); \ 1051 } 1052 1053 PAUTH_VARIATIONS(DEFINE_ASM_FUNCS) 1054 #undef DEFINE_ASM_FUNCS 1055 1056 void Assembler::pacga(const Register& xd, 1057 const Register& xn, 1058 const Register& xm) { 1059 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric)); 1060 VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); 1061 Emit(SF(xd) | PACGA | Rd(xd) | Rn(xn) | RmSP(xm)); 1062 } 1063 1064 void Assembler::xpaci(const Register& xd) { 1065 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 1066 VIXL_ASSERT(xd.Is64Bits()); 1067 Emit(SF(xd) | XPACI | Rd(xd)); 1068 } 1069 1070 void Assembler::xpacd(const Register& xd) { 1071 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 1072 VIXL_ASSERT(xd.Is64Bits()); 1073 Emit(SF(xd) | XPACD | Rd(xd)); 1074 } 1075 1076 1077 void Assembler::ldp(const CPURegister& rt, 1078 const CPURegister& rt2, 1079 const MemOperand& src) { 1080 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); 1081 } 1082 1083 1084 void Assembler::stp(const CPURegister& rt, 1085 const CPURegister& rt2, 1086 const MemOperand& dst) { 1087 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); 1088 } 1089 1090 1091 void Assembler::ldpsw(const Register& xt, 1092 const Register& xt2, 1093 const MemOperand& src) { 1094 VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits()); 1095 LoadStorePair(xt, xt2, src, LDPSW_x); 1096 } 1097 1098 1099 void Assembler::LoadStorePair(const CPURegister& rt, 1100 const CPURegister& rt2, 1101 const MemOperand& addr, 1102 LoadStorePairOp op) { 1103 VIXL_ASSERT(CPUHas(rt, rt2)); 1104 1105 // 'rt' and 'rt2' can only be aliased for stores. 1106 VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); 1107 VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); 1108 VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), CalcLSPairDataSize(op))); 1109 1110 int offset = static_cast<int>(addr.GetOffset()); 1111 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | 1112 ImmLSPair(offset, CalcLSPairDataSize(op)); 1113 1114 Instr addrmodeop; 1115 if (addr.IsImmediateOffset()) { 1116 addrmodeop = LoadStorePairOffsetFixed; 1117 } else { 1118 if (addr.IsPreIndex()) { 1119 addrmodeop = LoadStorePairPreIndexFixed; 1120 } else { 1121 VIXL_ASSERT(addr.IsPostIndex()); 1122 addrmodeop = LoadStorePairPostIndexFixed; 1123 } 1124 } 1125 Emit(addrmodeop | memop); 1126 } 1127 1128 1129 void Assembler::ldnp(const CPURegister& rt, 1130 const CPURegister& rt2, 1131 const MemOperand& src) { 1132 LoadStorePairNonTemporal(rt, rt2, src, LoadPairNonTemporalOpFor(rt, rt2)); 1133 } 1134 1135 1136 void Assembler::stnp(const CPURegister& rt, 1137 const CPURegister& rt2, 1138 const MemOperand& dst) { 1139 LoadStorePairNonTemporal(rt, rt2, dst, StorePairNonTemporalOpFor(rt, rt2)); 1140 } 1141 1142 1143 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, 1144 const CPURegister& rt2, 1145 const MemOperand& addr, 1146 LoadStorePairNonTemporalOp op) { 1147 VIXL_ASSERT(CPUHas(rt, rt2)); 1148 1149 VIXL_ASSERT(!rt.Is(rt2)); 1150 VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); 1151 VIXL_ASSERT(addr.IsImmediateOffset()); 1152 1153 unsigned size = 1154 CalcLSPairDataSize(static_cast<LoadStorePairOp>(op & LoadStorePairMask)); 1155 VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size)); 1156 int offset = static_cast<int>(addr.GetOffset()); 1157 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | 1158 ImmLSPair(offset, size)); 1159 } 1160 1161 1162 // Memory instructions. 1163 void Assembler::ldrb(const Register& rt, 1164 const MemOperand& src, 1165 LoadStoreScalingOption option) { 1166 VIXL_ASSERT(option != RequireUnscaledOffset); 1167 VIXL_ASSERT(option != PreferUnscaledOffset); 1168 LoadStore(rt, src, LDRB_w, option); 1169 } 1170 1171 1172 void Assembler::strb(const Register& rt, 1173 const MemOperand& dst, 1174 LoadStoreScalingOption option) { 1175 VIXL_ASSERT(option != RequireUnscaledOffset); 1176 VIXL_ASSERT(option != PreferUnscaledOffset); 1177 LoadStore(rt, dst, STRB_w, option); 1178 } 1179 1180 1181 void Assembler::ldrsb(const Register& rt, 1182 const MemOperand& src, 1183 LoadStoreScalingOption option) { 1184 VIXL_ASSERT(option != RequireUnscaledOffset); 1185 VIXL_ASSERT(option != PreferUnscaledOffset); 1186 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); 1187 } 1188 1189 1190 void Assembler::ldrh(const Register& rt, 1191 const MemOperand& src, 1192 LoadStoreScalingOption option) { 1193 VIXL_ASSERT(option != RequireUnscaledOffset); 1194 VIXL_ASSERT(option != PreferUnscaledOffset); 1195 LoadStore(rt, src, LDRH_w, option); 1196 } 1197 1198 1199 void Assembler::strh(const Register& rt, 1200 const MemOperand& dst, 1201 LoadStoreScalingOption option) { 1202 VIXL_ASSERT(option != RequireUnscaledOffset); 1203 VIXL_ASSERT(option != PreferUnscaledOffset); 1204 LoadStore(rt, dst, STRH_w, option); 1205 } 1206 1207 1208 void Assembler::ldrsh(const Register& rt, 1209 const MemOperand& src, 1210 LoadStoreScalingOption option) { 1211 VIXL_ASSERT(option != RequireUnscaledOffset); 1212 VIXL_ASSERT(option != PreferUnscaledOffset); 1213 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); 1214 } 1215 1216 1217 void Assembler::ldr(const CPURegister& rt, 1218 const MemOperand& src, 1219 LoadStoreScalingOption option) { 1220 VIXL_ASSERT(option != RequireUnscaledOffset); 1221 VIXL_ASSERT(option != PreferUnscaledOffset); 1222 LoadStore(rt, src, LoadOpFor(rt), option); 1223 } 1224 1225 1226 void Assembler::str(const CPURegister& rt, 1227 const MemOperand& dst, 1228 LoadStoreScalingOption option) { 1229 VIXL_ASSERT(option != RequireUnscaledOffset); 1230 VIXL_ASSERT(option != PreferUnscaledOffset); 1231 LoadStore(rt, dst, StoreOpFor(rt), option); 1232 } 1233 1234 1235 void Assembler::ldrsw(const Register& xt, 1236 const MemOperand& src, 1237 LoadStoreScalingOption option) { 1238 VIXL_ASSERT(xt.Is64Bits()); 1239 VIXL_ASSERT(option != RequireUnscaledOffset); 1240 VIXL_ASSERT(option != PreferUnscaledOffset); 1241 LoadStore(xt, src, LDRSW_x, option); 1242 } 1243 1244 1245 void Assembler::ldurb(const Register& rt, 1246 const MemOperand& src, 1247 LoadStoreScalingOption option) { 1248 VIXL_ASSERT(option != RequireScaledOffset); 1249 VIXL_ASSERT(option != PreferScaledOffset); 1250 LoadStore(rt, src, LDRB_w, option); 1251 } 1252 1253 1254 void Assembler::sturb(const Register& rt, 1255 const MemOperand& dst, 1256 LoadStoreScalingOption option) { 1257 VIXL_ASSERT(option != RequireScaledOffset); 1258 VIXL_ASSERT(option != PreferScaledOffset); 1259 LoadStore(rt, dst, STRB_w, option); 1260 } 1261 1262 1263 void Assembler::ldursb(const Register& rt, 1264 const MemOperand& src, 1265 LoadStoreScalingOption option) { 1266 VIXL_ASSERT(option != RequireScaledOffset); 1267 VIXL_ASSERT(option != PreferScaledOffset); 1268 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); 1269 } 1270 1271 1272 void Assembler::ldurh(const Register& rt, 1273 const MemOperand& src, 1274 LoadStoreScalingOption option) { 1275 VIXL_ASSERT(option != RequireScaledOffset); 1276 VIXL_ASSERT(option != PreferScaledOffset); 1277 LoadStore(rt, src, LDRH_w, option); 1278 } 1279 1280 1281 void Assembler::sturh(const Register& rt, 1282 const MemOperand& dst, 1283 LoadStoreScalingOption option) { 1284 VIXL_ASSERT(option != RequireScaledOffset); 1285 VIXL_ASSERT(option != PreferScaledOffset); 1286 LoadStore(rt, dst, STRH_w, option); 1287 } 1288 1289 1290 void Assembler::ldursh(const Register& rt, 1291 const MemOperand& src, 1292 LoadStoreScalingOption option) { 1293 VIXL_ASSERT(option != RequireScaledOffset); 1294 VIXL_ASSERT(option != PreferScaledOffset); 1295 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); 1296 } 1297 1298 1299 void Assembler::ldur(const CPURegister& rt, 1300 const MemOperand& src, 1301 LoadStoreScalingOption option) { 1302 VIXL_ASSERT(option != RequireScaledOffset); 1303 VIXL_ASSERT(option != PreferScaledOffset); 1304 LoadStore(rt, src, LoadOpFor(rt), option); 1305 } 1306 1307 1308 void Assembler::stur(const CPURegister& rt, 1309 const MemOperand& dst, 1310 LoadStoreScalingOption option) { 1311 VIXL_ASSERT(option != RequireScaledOffset); 1312 VIXL_ASSERT(option != PreferScaledOffset); 1313 LoadStore(rt, dst, StoreOpFor(rt), option); 1314 } 1315 1316 1317 void Assembler::ldursw(const Register& xt, 1318 const MemOperand& src, 1319 LoadStoreScalingOption option) { 1320 VIXL_ASSERT(xt.Is64Bits()); 1321 VIXL_ASSERT(option != RequireScaledOffset); 1322 VIXL_ASSERT(option != PreferScaledOffset); 1323 LoadStore(xt, src, LDRSW_x, option); 1324 } 1325 1326 1327 void Assembler::ldrsw(const Register& xt, RawLiteral* literal) { 1328 VIXL_ASSERT(xt.Is64Bits()); 1329 VIXL_ASSERT(literal->GetSize() == kWRegSizeInBytes); 1330 ldrsw(xt, static_cast<int>(LinkAndGetWordOffsetTo(literal))); 1331 } 1332 1333 1334 void Assembler::ldr(const CPURegister& rt, RawLiteral* literal) { 1335 VIXL_ASSERT(CPUHas(rt)); 1336 VIXL_ASSERT(literal->GetSize() == static_cast<size_t>(rt.GetSizeInBytes())); 1337 ldr(rt, static_cast<int>(LinkAndGetWordOffsetTo(literal))); 1338 } 1339 1340 1341 void Assembler::ldrsw(const Register& rt, int64_t imm19) { 1342 Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt)); 1343 } 1344 1345 1346 void Assembler::ldr(const CPURegister& rt, int64_t imm19) { 1347 VIXL_ASSERT(CPUHas(rt)); 1348 LoadLiteralOp op = LoadLiteralOpFor(rt); 1349 Emit(op | ImmLLiteral(imm19) | Rt(rt)); 1350 } 1351 1352 1353 void Assembler::prfm(PrefetchOperation op, int64_t imm19) { 1354 Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19)); 1355 } 1356 1357 1358 // Exclusive-access instructions. 1359 void Assembler::stxrb(const Register& rs, 1360 const Register& rt, 1361 const MemOperand& dst) { 1362 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1363 Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1364 } 1365 1366 1367 void Assembler::stxrh(const Register& rs, 1368 const Register& rt, 1369 const MemOperand& dst) { 1370 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1371 Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1372 } 1373 1374 1375 void Assembler::stxr(const Register& rs, 1376 const Register& rt, 1377 const MemOperand& dst) { 1378 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1379 LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w; 1380 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1381 } 1382 1383 1384 void Assembler::ldxrb(const Register& rt, const MemOperand& src) { 1385 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1386 Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1387 } 1388 1389 1390 void Assembler::ldxrh(const Register& rt, const MemOperand& src) { 1391 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1392 Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1393 } 1394 1395 1396 void Assembler::ldxr(const Register& rt, const MemOperand& src) { 1397 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1398 LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w; 1399 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1400 } 1401 1402 1403 void Assembler::stxp(const Register& rs, 1404 const Register& rt, 1405 const Register& rt2, 1406 const MemOperand& dst) { 1407 VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); 1408 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1409 LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w; 1410 Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); 1411 } 1412 1413 1414 void Assembler::ldxp(const Register& rt, 1415 const Register& rt2, 1416 const MemOperand& src) { 1417 VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); 1418 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1419 LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w; 1420 Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); 1421 } 1422 1423 1424 void Assembler::stlxrb(const Register& rs, 1425 const Register& rt, 1426 const MemOperand& dst) { 1427 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1428 Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1429 } 1430 1431 1432 void Assembler::stlxrh(const Register& rs, 1433 const Register& rt, 1434 const MemOperand& dst) { 1435 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1436 Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1437 } 1438 1439 1440 void Assembler::stlxr(const Register& rs, 1441 const Register& rt, 1442 const MemOperand& dst) { 1443 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1444 LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w; 1445 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1446 } 1447 1448 1449 void Assembler::ldaxrb(const Register& rt, const MemOperand& src) { 1450 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1451 Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1452 } 1453 1454 1455 void Assembler::ldaxrh(const Register& rt, const MemOperand& src) { 1456 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1457 Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1458 } 1459 1460 1461 void Assembler::ldaxr(const Register& rt, const MemOperand& src) { 1462 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1463 LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w; 1464 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1465 } 1466 1467 1468 void Assembler::stlxp(const Register& rs, 1469 const Register& rt, 1470 const Register& rt2, 1471 const MemOperand& dst) { 1472 VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); 1473 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1474 LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w; 1475 Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); 1476 } 1477 1478 1479 void Assembler::ldaxp(const Register& rt, 1480 const Register& rt2, 1481 const MemOperand& src) { 1482 VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); 1483 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1484 LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w; 1485 Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); 1486 } 1487 1488 1489 void Assembler::stlrb(const Register& rt, const MemOperand& dst) { 1490 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1491 Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1492 } 1493 1494 1495 void Assembler::stlrh(const Register& rt, const MemOperand& dst) { 1496 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1497 Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1498 } 1499 1500 1501 void Assembler::stlr(const Register& rt, const MemOperand& dst) { 1502 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1503 LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w; 1504 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1505 } 1506 1507 1508 void Assembler::ldarb(const Register& rt, const MemOperand& src) { 1509 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1510 Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1511 } 1512 1513 1514 void Assembler::ldarh(const Register& rt, const MemOperand& src) { 1515 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1516 Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1517 } 1518 1519 1520 void Assembler::ldar(const Register& rt, const MemOperand& src) { 1521 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1522 LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w; 1523 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1524 } 1525 1526 1527 void Assembler::stllrb(const Register& rt, const MemOperand& dst) { 1528 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1529 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1530 Emit(STLLRB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1531 } 1532 1533 1534 void Assembler::stllrh(const Register& rt, const MemOperand& dst) { 1535 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1536 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1537 Emit(STLLRH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1538 } 1539 1540 1541 void Assembler::stllr(const Register& rt, const MemOperand& dst) { 1542 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1543 VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); 1544 LoadStoreExclusive op = rt.Is64Bits() ? STLLR_x : STLLR_w; 1545 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); 1546 } 1547 1548 1549 void Assembler::ldlarb(const Register& rt, const MemOperand& src) { 1550 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1551 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1552 Emit(LDLARB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1553 } 1554 1555 1556 void Assembler::ldlarh(const Register& rt, const MemOperand& src) { 1557 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1558 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1559 Emit(LDLARH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1560 } 1561 1562 1563 void Assembler::ldlar(const Register& rt, const MemOperand& src) { 1564 VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); 1565 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1566 LoadStoreExclusive op = rt.Is64Bits() ? LDLAR_x : LDLAR_w; 1567 Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); 1568 } 1569 1570 1571 // clang-format off 1572 #define COMPARE_AND_SWAP_W_X_LIST(V) \ 1573 V(cas, CAS) \ 1574 V(casa, CASA) \ 1575 V(casl, CASL) \ 1576 V(casal, CASAL) 1577 // clang-format on 1578 1579 #define DEFINE_ASM_FUNC(FN, OP) \ 1580 void Assembler::FN(const Register& rs, \ 1581 const Register& rt, \ 1582 const MemOperand& src) { \ 1583 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1584 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ 1585 LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ 1586 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ 1587 } 1588 COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC) 1589 #undef DEFINE_ASM_FUNC 1590 1591 // clang-format off 1592 #define COMPARE_AND_SWAP_W_LIST(V) \ 1593 V(casb, CASB) \ 1594 V(casab, CASAB) \ 1595 V(caslb, CASLB) \ 1596 V(casalb, CASALB) \ 1597 V(cash, CASH) \ 1598 V(casah, CASAH) \ 1599 V(caslh, CASLH) \ 1600 V(casalh, CASALH) 1601 // clang-format on 1602 1603 #define DEFINE_ASM_FUNC(FN, OP) \ 1604 void Assembler::FN(const Register& rs, \ 1605 const Register& rt, \ 1606 const MemOperand& src) { \ 1607 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1608 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ 1609 Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ 1610 } 1611 COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC) 1612 #undef DEFINE_ASM_FUNC 1613 1614 1615 // clang-format off 1616 #define COMPARE_AND_SWAP_PAIR_LIST(V) \ 1617 V(casp, CASP) \ 1618 V(caspa, CASPA) \ 1619 V(caspl, CASPL) \ 1620 V(caspal, CASPAL) 1621 // clang-format on 1622 1623 #define DEFINE_ASM_FUNC(FN, OP) \ 1624 void Assembler::FN(const Register& rs, \ 1625 const Register& rs1, \ 1626 const Register& rt, \ 1627 const Register& rt1, \ 1628 const MemOperand& src) { \ 1629 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1630 USE(rs1, rt1); \ 1631 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ 1632 VIXL_ASSERT(AreEven(rs, rt)); \ 1633 VIXL_ASSERT(AreConsecutive(rs, rs1)); \ 1634 VIXL_ASSERT(AreConsecutive(rt, rt1)); \ 1635 LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ 1636 Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ 1637 } 1638 COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC) 1639 #undef DEFINE_ASM_FUNC 1640 1641 // These macros generate all the variations of the atomic memory operations, 1642 // e.g. ldadd, ldadda, ldaddb, staddl, etc. 1643 // For a full list of the methods with comments, see the assembler header file. 1644 1645 // clang-format off 1646 #define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \ 1647 V(DEF, add, LDADD) \ 1648 V(DEF, clr, LDCLR) \ 1649 V(DEF, eor, LDEOR) \ 1650 V(DEF, set, LDSET) \ 1651 V(DEF, smax, LDSMAX) \ 1652 V(DEF, smin, LDSMIN) \ 1653 V(DEF, umax, LDUMAX) \ 1654 V(DEF, umin, LDUMIN) 1655 1656 #define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ 1657 V(NAME, OP##_x, OP##_w) \ 1658 V(NAME##l, OP##L_x, OP##L_w) \ 1659 V(NAME##b, OP##B, OP##B) \ 1660 V(NAME##lb, OP##LB, OP##LB) \ 1661 V(NAME##h, OP##H, OP##H) \ 1662 V(NAME##lh, OP##LH, OP##LH) 1663 1664 #define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \ 1665 ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ 1666 V(NAME##a, OP##A_x, OP##A_w) \ 1667 V(NAME##al, OP##AL_x, OP##AL_w) \ 1668 V(NAME##ab, OP##AB, OP##AB) \ 1669 V(NAME##alb, OP##ALB, OP##ALB) \ 1670 V(NAME##ah, OP##AH, OP##AH) \ 1671 V(NAME##alh, OP##ALH, OP##ALH) 1672 // clang-format on 1673 1674 #define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \ 1675 void Assembler::ld##FN(const Register& rs, \ 1676 const Register& rt, \ 1677 const MemOperand& src) { \ 1678 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1679 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ 1680 AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ 1681 Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ 1682 } 1683 #define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \ 1684 void Assembler::st##FN(const Register& rs, const MemOperand& src) { \ 1685 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1686 ld##FN(rs, AppropriateZeroRegFor(rs), src); \ 1687 } 1688 1689 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES, 1690 DEFINE_ASM_LOAD_FUNC) 1691 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES, 1692 DEFINE_ASM_STORE_FUNC) 1693 1694 #define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \ 1695 void Assembler::FN(const Register& rs, \ 1696 const Register& rt, \ 1697 const MemOperand& src) { \ 1698 VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ 1699 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ 1700 AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ 1701 Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ 1702 } 1703 1704 ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP) 1705 1706 #undef DEFINE_ASM_LOAD_FUNC 1707 #undef DEFINE_ASM_STORE_FUNC 1708 #undef DEFINE_ASM_SWP_FUNC 1709 1710 1711 void Assembler::ldaprb(const Register& rt, const MemOperand& src) { 1712 VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); 1713 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1714 AtomicMemoryOp op = LDAPRB; 1715 Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); 1716 } 1717 1718 void Assembler::ldaprh(const Register& rt, const MemOperand& src) { 1719 VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); 1720 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1721 AtomicMemoryOp op = LDAPRH; 1722 Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); 1723 } 1724 1725 void Assembler::ldapr(const Register& rt, const MemOperand& src) { 1726 VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); 1727 VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); 1728 AtomicMemoryOp op = rt.Is64Bits() ? LDAPR_x : LDAPR_w; 1729 Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); 1730 } 1731 1732 void Assembler::prfm(PrefetchOperation op, 1733 const MemOperand& address, 1734 LoadStoreScalingOption option) { 1735 VIXL_ASSERT(option != RequireUnscaledOffset); 1736 VIXL_ASSERT(option != PreferUnscaledOffset); 1737 Prefetch(op, address, option); 1738 } 1739 1740 1741 void Assembler::prfum(PrefetchOperation op, 1742 const MemOperand& address, 1743 LoadStoreScalingOption option) { 1744 VIXL_ASSERT(option != RequireScaledOffset); 1745 VIXL_ASSERT(option != PreferScaledOffset); 1746 Prefetch(op, address, option); 1747 } 1748 1749 1750 void Assembler::prfm(PrefetchOperation op, RawLiteral* literal) { 1751 prfm(op, static_cast<int>(LinkAndGetWordOffsetTo(literal))); 1752 } 1753 1754 1755 void Assembler::sys(int op1, int crn, int crm, int op2, const Register& xt) { 1756 VIXL_ASSERT(xt.Is64Bits()); 1757 Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(xt)); 1758 } 1759 1760 1761 void Assembler::sys(int op, const Register& xt) { 1762 VIXL_ASSERT(xt.Is64Bits()); 1763 Emit(SYS | SysOp(op) | Rt(xt)); 1764 } 1765 1766 1767 void Assembler::dc(DataCacheOp op, const Register& rt) { 1768 VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA)); 1769 sys(op, rt); 1770 } 1771 1772 1773 void Assembler::ic(InstructionCacheOp op, const Register& rt) { 1774 VIXL_ASSERT(op == IVAU); 1775 sys(op, rt); 1776 } 1777 1778 1779 void Assembler::hint(SystemHint code) { hint(static_cast<int>(code)); } 1780 1781 1782 void Assembler::hint(int imm7) { 1783 VIXL_ASSERT(IsUint7(imm7)); 1784 Emit(HINT | ImmHint(imm7) | Rt(xzr)); 1785 } 1786 1787 1788 // NEON structure loads and stores. 1789 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { 1790 Instr addr_field = RnSP(addr.GetBaseRegister()); 1791 1792 if (addr.IsPostIndex()) { 1793 VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex == 1794 static_cast<NEONLoadStoreMultiStructPostIndexOp>( 1795 NEONLoadStoreSingleStructPostIndex)); 1796 1797 addr_field |= NEONLoadStoreMultiStructPostIndex; 1798 if (addr.GetOffset() == 0) { 1799 addr_field |= RmNot31(addr.GetRegisterOffset()); 1800 } else { 1801 // The immediate post index addressing mode is indicated by rm = 31. 1802 // The immediate is implied by the number of vector registers used. 1803 addr_field |= (0x1f << Rm_offset); 1804 } 1805 } else { 1806 VIXL_ASSERT(addr.IsImmediateOffset() && (addr.GetOffset() == 0)); 1807 } 1808 return addr_field; 1809 } 1810 1811 void Assembler::LoadStoreStructVerify(const VRegister& vt, 1812 const MemOperand& addr, 1813 Instr op) { 1814 #ifdef VIXL_DEBUG 1815 // Assert that addressing mode is either offset (with immediate 0), post 1816 // index by immediate of the size of the register list, or post index by a 1817 // value in a core register. 1818 if (addr.IsImmediateOffset()) { 1819 VIXL_ASSERT(addr.GetOffset() == 0); 1820 } else { 1821 int offset = vt.GetSizeInBytes(); 1822 switch (op) { 1823 case NEON_LD1_1v: 1824 case NEON_ST1_1v: 1825 offset *= 1; 1826 break; 1827 case NEONLoadStoreSingleStructLoad1: 1828 case NEONLoadStoreSingleStructStore1: 1829 case NEON_LD1R: 1830 offset = (offset / vt.GetLanes()) * 1; 1831 break; 1832 1833 case NEON_LD1_2v: 1834 case NEON_ST1_2v: 1835 case NEON_LD2: 1836 case NEON_ST2: 1837 offset *= 2; 1838 break; 1839 case NEONLoadStoreSingleStructLoad2: 1840 case NEONLoadStoreSingleStructStore2: 1841 case NEON_LD2R: 1842 offset = (offset / vt.GetLanes()) * 2; 1843 break; 1844 1845 case NEON_LD1_3v: 1846 case NEON_ST1_3v: 1847 case NEON_LD3: 1848 case NEON_ST3: 1849 offset *= 3; 1850 break; 1851 case NEONLoadStoreSingleStructLoad3: 1852 case NEONLoadStoreSingleStructStore3: 1853 case NEON_LD3R: 1854 offset = (offset / vt.GetLanes()) * 3; 1855 break; 1856 1857 case NEON_LD1_4v: 1858 case NEON_ST1_4v: 1859 case NEON_LD4: 1860 case NEON_ST4: 1861 offset *= 4; 1862 break; 1863 case NEONLoadStoreSingleStructLoad4: 1864 case NEONLoadStoreSingleStructStore4: 1865 case NEON_LD4R: 1866 offset = (offset / vt.GetLanes()) * 4; 1867 break; 1868 default: 1869 VIXL_UNREACHABLE(); 1870 } 1871 VIXL_ASSERT(!addr.GetRegisterOffset().Is(NoReg) || 1872 addr.GetOffset() == offset); 1873 } 1874 #else 1875 USE(vt, addr, op); 1876 #endif 1877 } 1878 1879 void Assembler::LoadStoreStruct(const VRegister& vt, 1880 const MemOperand& addr, 1881 NEONLoadStoreMultiStructOp op) { 1882 LoadStoreStructVerify(vt, addr, op); 1883 VIXL_ASSERT(vt.IsVector() || vt.Is1D()); 1884 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); 1885 } 1886 1887 1888 void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, 1889 const MemOperand& addr, 1890 NEONLoadStoreSingleStructOp op) { 1891 LoadStoreStructVerify(vt, addr, op); 1892 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); 1893 } 1894 1895 1896 void Assembler::ld1(const VRegister& vt, const MemOperand& src) { 1897 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1898 LoadStoreStruct(vt, src, NEON_LD1_1v); 1899 } 1900 1901 1902 void Assembler::ld1(const VRegister& vt, 1903 const VRegister& vt2, 1904 const MemOperand& src) { 1905 USE(vt2); 1906 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1907 VIXL_ASSERT(AreSameFormat(vt, vt2)); 1908 VIXL_ASSERT(AreConsecutive(vt, vt2)); 1909 LoadStoreStruct(vt, src, NEON_LD1_2v); 1910 } 1911 1912 1913 void Assembler::ld1(const VRegister& vt, 1914 const VRegister& vt2, 1915 const VRegister& vt3, 1916 const MemOperand& src) { 1917 USE(vt2, vt3); 1918 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1919 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 1920 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 1921 LoadStoreStruct(vt, src, NEON_LD1_3v); 1922 } 1923 1924 1925 void Assembler::ld1(const VRegister& vt, 1926 const VRegister& vt2, 1927 const VRegister& vt3, 1928 const VRegister& vt4, 1929 const MemOperand& src) { 1930 USE(vt2, vt3, vt4); 1931 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1932 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 1933 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 1934 LoadStoreStruct(vt, src, NEON_LD1_4v); 1935 } 1936 1937 1938 void Assembler::ld2(const VRegister& vt, 1939 const VRegister& vt2, 1940 const MemOperand& src) { 1941 USE(vt2); 1942 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1943 VIXL_ASSERT(AreSameFormat(vt, vt2)); 1944 VIXL_ASSERT(AreConsecutive(vt, vt2)); 1945 LoadStoreStruct(vt, src, NEON_LD2); 1946 } 1947 1948 1949 void Assembler::ld2(const VRegister& vt, 1950 const VRegister& vt2, 1951 int lane, 1952 const MemOperand& src) { 1953 USE(vt2); 1954 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1955 VIXL_ASSERT(AreSameFormat(vt, vt2)); 1956 VIXL_ASSERT(AreConsecutive(vt, vt2)); 1957 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); 1958 } 1959 1960 1961 void Assembler::ld2r(const VRegister& vt, 1962 const VRegister& vt2, 1963 const MemOperand& src) { 1964 USE(vt2); 1965 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1966 VIXL_ASSERT(AreSameFormat(vt, vt2)); 1967 VIXL_ASSERT(AreConsecutive(vt, vt2)); 1968 LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); 1969 } 1970 1971 1972 void Assembler::ld3(const VRegister& vt, 1973 const VRegister& vt2, 1974 const VRegister& vt3, 1975 const MemOperand& src) { 1976 USE(vt2, vt3); 1977 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1978 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 1979 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 1980 LoadStoreStruct(vt, src, NEON_LD3); 1981 } 1982 1983 1984 void Assembler::ld3(const VRegister& vt, 1985 const VRegister& vt2, 1986 const VRegister& vt3, 1987 int lane, 1988 const MemOperand& src) { 1989 USE(vt2, vt3); 1990 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 1991 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 1992 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 1993 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); 1994 } 1995 1996 1997 void Assembler::ld3r(const VRegister& vt, 1998 const VRegister& vt2, 1999 const VRegister& vt3, 2000 const MemOperand& src) { 2001 USE(vt2, vt3); 2002 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2003 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 2004 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 2005 LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); 2006 } 2007 2008 2009 void Assembler::ld4(const VRegister& vt, 2010 const VRegister& vt2, 2011 const VRegister& vt3, 2012 const VRegister& vt4, 2013 const MemOperand& src) { 2014 USE(vt2, vt3, vt4); 2015 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2016 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2017 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2018 LoadStoreStruct(vt, src, NEON_LD4); 2019 } 2020 2021 2022 void Assembler::ld4(const VRegister& vt, 2023 const VRegister& vt2, 2024 const VRegister& vt3, 2025 const VRegister& vt4, 2026 int lane, 2027 const MemOperand& src) { 2028 USE(vt2, vt3, vt4); 2029 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2030 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2031 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2032 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); 2033 } 2034 2035 2036 void Assembler::ld4r(const VRegister& vt, 2037 const VRegister& vt2, 2038 const VRegister& vt3, 2039 const VRegister& vt4, 2040 const MemOperand& src) { 2041 USE(vt2, vt3, vt4); 2042 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2043 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2044 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2045 LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); 2046 } 2047 2048 2049 void Assembler::st1(const VRegister& vt, const MemOperand& src) { 2050 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2051 LoadStoreStruct(vt, src, NEON_ST1_1v); 2052 } 2053 2054 2055 void Assembler::st1(const VRegister& vt, 2056 const VRegister& vt2, 2057 const MemOperand& src) { 2058 USE(vt2); 2059 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2060 VIXL_ASSERT(AreSameFormat(vt, vt2)); 2061 VIXL_ASSERT(AreConsecutive(vt, vt2)); 2062 LoadStoreStruct(vt, src, NEON_ST1_2v); 2063 } 2064 2065 2066 void Assembler::st1(const VRegister& vt, 2067 const VRegister& vt2, 2068 const VRegister& vt3, 2069 const MemOperand& src) { 2070 USE(vt2, vt3); 2071 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2072 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 2073 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 2074 LoadStoreStruct(vt, src, NEON_ST1_3v); 2075 } 2076 2077 2078 void Assembler::st1(const VRegister& vt, 2079 const VRegister& vt2, 2080 const VRegister& vt3, 2081 const VRegister& vt4, 2082 const MemOperand& src) { 2083 USE(vt2, vt3, vt4); 2084 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2085 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2086 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2087 LoadStoreStruct(vt, src, NEON_ST1_4v); 2088 } 2089 2090 2091 void Assembler::st2(const VRegister& vt, 2092 const VRegister& vt2, 2093 const MemOperand& dst) { 2094 USE(vt2); 2095 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2096 VIXL_ASSERT(AreSameFormat(vt, vt2)); 2097 VIXL_ASSERT(AreConsecutive(vt, vt2)); 2098 LoadStoreStruct(vt, dst, NEON_ST2); 2099 } 2100 2101 2102 void Assembler::st2(const VRegister& vt, 2103 const VRegister& vt2, 2104 int lane, 2105 const MemOperand& dst) { 2106 USE(vt2); 2107 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2108 VIXL_ASSERT(AreSameFormat(vt, vt2)); 2109 VIXL_ASSERT(AreConsecutive(vt, vt2)); 2110 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); 2111 } 2112 2113 2114 void Assembler::st3(const VRegister& vt, 2115 const VRegister& vt2, 2116 const VRegister& vt3, 2117 const MemOperand& dst) { 2118 USE(vt2, vt3); 2119 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2120 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 2121 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 2122 LoadStoreStruct(vt, dst, NEON_ST3); 2123 } 2124 2125 2126 void Assembler::st3(const VRegister& vt, 2127 const VRegister& vt2, 2128 const VRegister& vt3, 2129 int lane, 2130 const MemOperand& dst) { 2131 USE(vt2, vt3); 2132 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2133 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); 2134 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); 2135 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); 2136 } 2137 2138 2139 void Assembler::st4(const VRegister& vt, 2140 const VRegister& vt2, 2141 const VRegister& vt3, 2142 const VRegister& vt4, 2143 const MemOperand& dst) { 2144 USE(vt2, vt3, vt4); 2145 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2146 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2147 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2148 LoadStoreStruct(vt, dst, NEON_ST4); 2149 } 2150 2151 2152 void Assembler::st4(const VRegister& vt, 2153 const VRegister& vt2, 2154 const VRegister& vt3, 2155 const VRegister& vt4, 2156 int lane, 2157 const MemOperand& dst) { 2158 USE(vt2, vt3, vt4); 2159 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2160 VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); 2161 VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); 2162 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); 2163 } 2164 2165 2166 void Assembler::LoadStoreStructSingle(const VRegister& vt, 2167 uint32_t lane, 2168 const MemOperand& addr, 2169 NEONLoadStoreSingleStructOp op) { 2170 LoadStoreStructVerify(vt, addr, op); 2171 2172 // We support vt arguments of the form vt.VxT() or vt.T(), where x is the 2173 // number of lanes, and T is b, h, s or d. 2174 unsigned lane_size = vt.GetLaneSizeInBytes(); 2175 VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size)); 2176 2177 // Lane size is encoded in the opcode field. Lane index is encoded in the Q, 2178 // S and size fields. 2179 lane *= lane_size; 2180 if (lane_size == 8) lane++; 2181 2182 Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; 2183 Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; 2184 Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; 2185 2186 Instr instr = op; 2187 switch (lane_size) { 2188 case 1: 2189 instr |= NEONLoadStoreSingle_b; 2190 break; 2191 case 2: 2192 instr |= NEONLoadStoreSingle_h; 2193 break; 2194 case 4: 2195 instr |= NEONLoadStoreSingle_s; 2196 break; 2197 default: 2198 VIXL_ASSERT(lane_size == 8); 2199 instr |= NEONLoadStoreSingle_d; 2200 } 2201 2202 Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); 2203 } 2204 2205 2206 void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) { 2207 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2208 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); 2209 } 2210 2211 2212 void Assembler::ld1r(const VRegister& vt, const MemOperand& src) { 2213 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2214 LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); 2215 } 2216 2217 2218 void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) { 2219 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2220 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); 2221 } 2222 2223 2224 void Assembler::NEON3DifferentL(const VRegister& vd, 2225 const VRegister& vn, 2226 const VRegister& vm, 2227 NEON3DifferentOp vop) { 2228 VIXL_ASSERT(AreSameFormat(vn, vm)); 2229 VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || 2230 (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || 2231 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || 2232 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); 2233 Instr format, op = vop; 2234 if (vd.IsScalar()) { 2235 op |= NEON_Q | NEONScalar; 2236 format = SFormat(vn); 2237 } else { 2238 format = VFormat(vn); 2239 } 2240 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); 2241 } 2242 2243 2244 void Assembler::NEON3DifferentW(const VRegister& vd, 2245 const VRegister& vn, 2246 const VRegister& vm, 2247 NEON3DifferentOp vop) { 2248 VIXL_ASSERT(AreSameFormat(vd, vn)); 2249 VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || 2250 (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || 2251 (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D())); 2252 Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); 2253 } 2254 2255 2256 void Assembler::NEON3DifferentHN(const VRegister& vd, 2257 const VRegister& vn, 2258 const VRegister& vm, 2259 NEON3DifferentOp vop) { 2260 VIXL_ASSERT(AreSameFormat(vm, vn)); 2261 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || 2262 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || 2263 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); 2264 Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); 2265 } 2266 2267 2268 // clang-format off 2269 #define NEON_3DIFF_LONG_LIST(V) \ 2270 V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ 2271 V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ 2272 V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ 2273 V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ 2274 V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ 2275 V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ 2276 V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ 2277 V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ 2278 V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ 2279 V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ 2280 V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ 2281 V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ 2282 V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ 2283 V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ 2284 V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ 2285 V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ 2286 V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ 2287 V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ 2288 V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ 2289 V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ 2290 V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ 2291 V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ 2292 V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ 2293 V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ 2294 V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ 2295 V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ 2296 V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ 2297 V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ 2298 V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ 2299 V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ 2300 V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 2301 V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ 2302 V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 2303 V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ 2304 V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 2305 V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ 2306 // clang-format on 2307 2308 2309 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 2310 void Assembler::FN(const VRegister& vd, \ 2311 const VRegister& vn, \ 2312 const VRegister& vm) { \ 2313 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 2314 VIXL_ASSERT(AS); \ 2315 NEON3DifferentL(vd, vn, vm, OP); \ 2316 } 2317 NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) 2318 #undef DEFINE_ASM_FUNC 2319 2320 // clang-format off 2321 #define NEON_3DIFF_HN_LIST(V) \ 2322 V(addhn, NEON_ADDHN, vd.IsD()) \ 2323 V(addhn2, NEON_ADDHN2, vd.IsQ()) \ 2324 V(raddhn, NEON_RADDHN, vd.IsD()) \ 2325 V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ 2326 V(subhn, NEON_SUBHN, vd.IsD()) \ 2327 V(subhn2, NEON_SUBHN2, vd.IsQ()) \ 2328 V(rsubhn, NEON_RSUBHN, vd.IsD()) \ 2329 V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) 2330 // clang-format on 2331 2332 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 2333 void Assembler::FN(const VRegister& vd, \ 2334 const VRegister& vn, \ 2335 const VRegister& vm) { \ 2336 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 2337 VIXL_ASSERT(AS); \ 2338 NEON3DifferentHN(vd, vn, vm, OP); \ 2339 } 2340 NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) 2341 #undef DEFINE_ASM_FUNC 2342 2343 void Assembler::uaddw(const VRegister& vd, 2344 const VRegister& vn, 2345 const VRegister& vm) { 2346 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2347 VIXL_ASSERT(vm.IsD()); 2348 NEON3DifferentW(vd, vn, vm, NEON_UADDW); 2349 } 2350 2351 2352 void Assembler::uaddw2(const VRegister& vd, 2353 const VRegister& vn, 2354 const VRegister& vm) { 2355 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2356 VIXL_ASSERT(vm.IsQ()); 2357 NEON3DifferentW(vd, vn, vm, NEON_UADDW2); 2358 } 2359 2360 2361 void Assembler::saddw(const VRegister& vd, 2362 const VRegister& vn, 2363 const VRegister& vm) { 2364 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2365 VIXL_ASSERT(vm.IsD()); 2366 NEON3DifferentW(vd, vn, vm, NEON_SADDW); 2367 } 2368 2369 2370 void Assembler::saddw2(const VRegister& vd, 2371 const VRegister& vn, 2372 const VRegister& vm) { 2373 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2374 VIXL_ASSERT(vm.IsQ()); 2375 NEON3DifferentW(vd, vn, vm, NEON_SADDW2); 2376 } 2377 2378 2379 void Assembler::usubw(const VRegister& vd, 2380 const VRegister& vn, 2381 const VRegister& vm) { 2382 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2383 VIXL_ASSERT(vm.IsD()); 2384 NEON3DifferentW(vd, vn, vm, NEON_USUBW); 2385 } 2386 2387 2388 void Assembler::usubw2(const VRegister& vd, 2389 const VRegister& vn, 2390 const VRegister& vm) { 2391 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2392 VIXL_ASSERT(vm.IsQ()); 2393 NEON3DifferentW(vd, vn, vm, NEON_USUBW2); 2394 } 2395 2396 2397 void Assembler::ssubw(const VRegister& vd, 2398 const VRegister& vn, 2399 const VRegister& vm) { 2400 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2401 VIXL_ASSERT(vm.IsD()); 2402 NEON3DifferentW(vd, vn, vm, NEON_SSUBW); 2403 } 2404 2405 2406 void Assembler::ssubw2(const VRegister& vd, 2407 const VRegister& vn, 2408 const VRegister& vm) { 2409 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2410 VIXL_ASSERT(vm.IsQ()); 2411 NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); 2412 } 2413 2414 2415 void Assembler::mov(const Register& rd, const Register& rm) { 2416 // Moves involving the stack pointer are encoded as add immediate with 2417 // second operand of zero. Otherwise, orr with first operand zr is 2418 // used. 2419 if (rd.IsSP() || rm.IsSP()) { 2420 add(rd, rm, 0); 2421 } else { 2422 orr(rd, AppropriateZeroRegFor(rd), rm); 2423 } 2424 } 2425 2426 void Assembler::xpaclri() { 2427 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2428 Emit(XPACLRI); 2429 } 2430 2431 void Assembler::pacia1716() { 2432 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2433 Emit(PACIA1716); 2434 } 2435 2436 void Assembler::pacib1716() { 2437 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2438 Emit(PACIB1716); 2439 } 2440 2441 void Assembler::autia1716() { 2442 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2443 Emit(AUTIA1716); 2444 } 2445 2446 void Assembler::autib1716() { 2447 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2448 Emit(AUTIB1716); 2449 } 2450 2451 void Assembler::paciaz() { 2452 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2453 Emit(PACIAZ); 2454 } 2455 2456 void Assembler::pacibz() { 2457 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2458 Emit(PACIBZ); 2459 } 2460 2461 void Assembler::autiaz() { 2462 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2463 Emit(AUTIAZ); 2464 } 2465 2466 void Assembler::autibz() { 2467 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2468 Emit(AUTIBZ); 2469 } 2470 2471 void Assembler::paciasp() { 2472 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2473 Emit(PACIASP); 2474 } 2475 2476 void Assembler::pacibsp() { 2477 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2478 Emit(PACIBSP); 2479 } 2480 2481 void Assembler::autiasp() { 2482 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2483 Emit(AUTIASP); 2484 } 2485 2486 void Assembler::autibsp() { 2487 VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); 2488 Emit(AUTIBSP); 2489 } 2490 2491 2492 void Assembler::mvn(const Register& rd, const Operand& operand) { 2493 orn(rd, AppropriateZeroRegFor(rd), operand); 2494 } 2495 2496 2497 void Assembler::mrs(const Register& xt, SystemRegister sysreg) { 2498 VIXL_ASSERT(xt.Is64Bits()); 2499 Emit(MRS | ImmSystemRegister(sysreg) | Rt(xt)); 2500 } 2501 2502 2503 void Assembler::msr(SystemRegister sysreg, const Register& xt) { 2504 VIXL_ASSERT(xt.Is64Bits()); 2505 Emit(MSR | Rt(xt) | ImmSystemRegister(sysreg)); 2506 } 2507 2508 2509 void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); } 2510 2511 2512 void Assembler::dmb(BarrierDomain domain, BarrierType type) { 2513 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 2514 } 2515 2516 2517 void Assembler::dsb(BarrierDomain domain, BarrierType type) { 2518 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 2519 } 2520 2521 2522 void Assembler::isb() { 2523 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); 2524 } 2525 2526 void Assembler::esb() { 2527 VIXL_ASSERT(CPUHas(CPUFeatures::kRAS)); 2528 hint(ESB); 2529 } 2530 2531 void Assembler::csdb() { hint(CSDB); } 2532 2533 void Assembler::fmov(const VRegister& vd, double imm) { 2534 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2535 if (vd.IsScalar()) { 2536 VIXL_ASSERT(vd.Is1D()); 2537 Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm)); 2538 } else { 2539 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2540 VIXL_ASSERT(vd.Is2D()); 2541 Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; 2542 Instr q = NEON_Q; 2543 uint32_t encoded_imm = FP64ToImm8(imm); 2544 Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); 2545 } 2546 } 2547 2548 2549 void Assembler::fmov(const VRegister& vd, float imm) { 2550 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2551 if (vd.IsScalar()) { 2552 VIXL_ASSERT(vd.Is1S()); 2553 Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm)); 2554 } else { 2555 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 2556 VIXL_ASSERT(vd.Is2S() | vd.Is4S()); 2557 Instr op = NEONModifiedImmediate_MOVI; 2558 Instr q = vd.Is4S() ? NEON_Q : 0; 2559 uint32_t encoded_imm = FP32ToImm8(imm); 2560 Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); 2561 } 2562 } 2563 2564 2565 void Assembler::fmov(const VRegister& vd, Float16 imm) { 2566 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2567 if (vd.IsScalar()) { 2568 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2569 VIXL_ASSERT(vd.Is1H()); 2570 Emit(FMOV_h_imm | Rd(vd) | ImmFP16(imm)); 2571 } else { 2572 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); 2573 VIXL_ASSERT(vd.Is4H() | vd.Is8H()); 2574 Instr q = vd.Is8H() ? NEON_Q : 0; 2575 uint32_t encoded_imm = FP16ToImm8(imm); 2576 Emit(q | NEONModifiedImmediate_FMOV | ImmNEONabcdefgh(encoded_imm) | 2577 NEONCmode(0xf) | Rd(vd)); 2578 } 2579 } 2580 2581 2582 void Assembler::fmov(const Register& rd, const VRegister& vn) { 2583 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2584 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 2585 VIXL_ASSERT((rd.GetSizeInBits() == vn.GetSizeInBits()) || vn.Is1H()); 2586 FPIntegerConvertOp op; 2587 switch (vn.GetSizeInBits()) { 2588 case 16: 2589 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2590 op = rd.Is64Bits() ? FMOV_xh : FMOV_wh; 2591 break; 2592 case 32: 2593 op = FMOV_ws; 2594 break; 2595 default: 2596 op = FMOV_xd; 2597 } 2598 Emit(op | Rd(rd) | Rn(vn)); 2599 } 2600 2601 2602 void Assembler::fmov(const VRegister& vd, const Register& rn) { 2603 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2604 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 2605 VIXL_ASSERT((vd.GetSizeInBits() == rn.GetSizeInBits()) || vd.Is1H()); 2606 FPIntegerConvertOp op; 2607 switch (vd.GetSizeInBits()) { 2608 case 16: 2609 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2610 op = rn.Is64Bits() ? FMOV_hx : FMOV_hw; 2611 break; 2612 case 32: 2613 op = FMOV_sw; 2614 break; 2615 default: 2616 op = FMOV_dx; 2617 } 2618 Emit(op | Rd(vd) | Rn(rn)); 2619 } 2620 2621 2622 void Assembler::fmov(const VRegister& vd, const VRegister& vn) { 2623 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2624 if (vd.Is1H()) { 2625 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2626 } 2627 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 2628 VIXL_ASSERT(vd.IsSameFormat(vn)); 2629 Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); 2630 } 2631 2632 2633 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { 2634 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); 2635 VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX()); 2636 USE(index); 2637 Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); 2638 } 2639 2640 2641 void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { 2642 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); 2643 VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX()); 2644 USE(index); 2645 Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); 2646 } 2647 2648 2649 void Assembler::fmadd(const VRegister& vd, 2650 const VRegister& vn, 2651 const VRegister& vm, 2652 const VRegister& va) { 2653 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2654 FPDataProcessing3SourceOp op; 2655 if (vd.Is1H()) { 2656 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2657 op = FMADD_h; 2658 } else if (vd.Is1S()) { 2659 op = FMADD_s; 2660 } else { 2661 VIXL_ASSERT(vd.Is1D()); 2662 op = FMADD_d; 2663 } 2664 FPDataProcessing3Source(vd, vn, vm, va, op); 2665 } 2666 2667 2668 void Assembler::fmsub(const VRegister& vd, 2669 const VRegister& vn, 2670 const VRegister& vm, 2671 const VRegister& va) { 2672 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2673 FPDataProcessing3SourceOp op; 2674 if (vd.Is1H()) { 2675 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2676 op = FMSUB_h; 2677 } else if (vd.Is1S()) { 2678 op = FMSUB_s; 2679 } else { 2680 VIXL_ASSERT(vd.Is1D()); 2681 op = FMSUB_d; 2682 } 2683 FPDataProcessing3Source(vd, vn, vm, va, op); 2684 } 2685 2686 2687 void Assembler::fnmadd(const VRegister& vd, 2688 const VRegister& vn, 2689 const VRegister& vm, 2690 const VRegister& va) { 2691 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2692 FPDataProcessing3SourceOp op; 2693 if (vd.Is1H()) { 2694 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2695 op = FNMADD_h; 2696 } else if (vd.Is1S()) { 2697 op = FNMADD_s; 2698 } else { 2699 VIXL_ASSERT(vd.Is1D()); 2700 op = FNMADD_d; 2701 } 2702 FPDataProcessing3Source(vd, vn, vm, va, op); 2703 } 2704 2705 2706 void Assembler::fnmsub(const VRegister& vd, 2707 const VRegister& vn, 2708 const VRegister& vm, 2709 const VRegister& va) { 2710 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2711 FPDataProcessing3SourceOp op; 2712 if (vd.Is1H()) { 2713 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2714 op = FNMSUB_h; 2715 } else if (vd.Is1S()) { 2716 op = FNMSUB_s; 2717 } else { 2718 VIXL_ASSERT(vd.Is1D()); 2719 op = FNMSUB_d; 2720 } 2721 FPDataProcessing3Source(vd, vn, vm, va, op); 2722 } 2723 2724 2725 void Assembler::fnmul(const VRegister& vd, 2726 const VRegister& vn, 2727 const VRegister& vm) { 2728 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2729 VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm)); 2730 Instr op; 2731 if (vd.Is1H()) { 2732 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2733 op = FNMUL_h; 2734 } else if (vd.Is1S()) { 2735 op = FNMUL_s; 2736 } else { 2737 VIXL_ASSERT(vd.Is1D()); 2738 op = FNMUL_d; 2739 } 2740 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); 2741 } 2742 2743 2744 void Assembler::FPCompareMacro(const VRegister& vn, 2745 double value, 2746 FPTrapFlags trap) { 2747 USE(value); 2748 // Although the fcmp{e} instructions can strictly only take an immediate 2749 // value of +0.0, we don't need to check for -0.0 because the sign of 0.0 2750 // doesn't affect the result of the comparison. 2751 VIXL_ASSERT(value == 0.0); 2752 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 2753 Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero; 2754 Emit(FPType(vn) | op | Rn(vn)); 2755 } 2756 2757 2758 void Assembler::FPCompareMacro(const VRegister& vn, 2759 const VRegister& vm, 2760 FPTrapFlags trap) { 2761 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 2762 VIXL_ASSERT(vn.IsSameSizeAndType(vm)); 2763 Instr op = (trap == EnableTrap) ? FCMPE : FCMP; 2764 Emit(FPType(vn) | op | Rm(vm) | Rn(vn)); 2765 } 2766 2767 2768 void Assembler::fcmp(const VRegister& vn, const VRegister& vm) { 2769 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2770 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2771 FPCompareMacro(vn, vm, DisableTrap); 2772 } 2773 2774 2775 void Assembler::fcmpe(const VRegister& vn, const VRegister& vm) { 2776 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2777 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2778 FPCompareMacro(vn, vm, EnableTrap); 2779 } 2780 2781 2782 void Assembler::fcmp(const VRegister& vn, double value) { 2783 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2784 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2785 FPCompareMacro(vn, value, DisableTrap); 2786 } 2787 2788 2789 void Assembler::fcmpe(const VRegister& vn, double value) { 2790 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2791 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2792 FPCompareMacro(vn, value, EnableTrap); 2793 } 2794 2795 2796 void Assembler::FPCCompareMacro(const VRegister& vn, 2797 const VRegister& vm, 2798 StatusFlags nzcv, 2799 Condition cond, 2800 FPTrapFlags trap) { 2801 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 2802 VIXL_ASSERT(vn.IsSameSizeAndType(vm)); 2803 Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP; 2804 Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv)); 2805 } 2806 2807 void Assembler::fccmp(const VRegister& vn, 2808 const VRegister& vm, 2809 StatusFlags nzcv, 2810 Condition cond) { 2811 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2812 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2813 FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap); 2814 } 2815 2816 2817 void Assembler::fccmpe(const VRegister& vn, 2818 const VRegister& vm, 2819 StatusFlags nzcv, 2820 Condition cond) { 2821 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2822 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2823 FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap); 2824 } 2825 2826 2827 void Assembler::fcsel(const VRegister& vd, 2828 const VRegister& vn, 2829 const VRegister& vm, 2830 Condition cond) { 2831 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2832 if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2833 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 2834 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 2835 Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd)); 2836 } 2837 2838 2839 void Assembler::fcvt(const VRegister& vd, const VRegister& vn) { 2840 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2841 FPDataProcessing1SourceOp op; 2842 // The half-precision variants belong to base FP, and do not require kFPHalf. 2843 if (vd.Is1D()) { 2844 VIXL_ASSERT(vn.Is1S() || vn.Is1H()); 2845 op = vn.Is1S() ? FCVT_ds : FCVT_dh; 2846 } else if (vd.Is1S()) { 2847 VIXL_ASSERT(vn.Is1D() || vn.Is1H()); 2848 op = vn.Is1D() ? FCVT_sd : FCVT_sh; 2849 } else { 2850 VIXL_ASSERT(vd.Is1H()); 2851 VIXL_ASSERT(vn.Is1D() || vn.Is1S()); 2852 op = vn.Is1D() ? FCVT_hd : FCVT_hs; 2853 } 2854 FPDataProcessing1Source(vd, vn, op); 2855 } 2856 2857 2858 void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) { 2859 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2860 VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S())); 2861 // The half-precision variants belong to base FP, and do not require kFPHalf. 2862 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; 2863 Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); 2864 } 2865 2866 2867 void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) { 2868 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2869 VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S())); 2870 // The half-precision variants belong to base FP, and do not require kFPHalf. 2871 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; 2872 Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); 2873 } 2874 2875 2876 void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) { 2877 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2878 VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S())); 2879 // The half-precision variants belong to base FP, and do not require kFPHalf. 2880 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; 2881 Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); 2882 } 2883 2884 2885 void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) { 2886 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2887 VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S())); 2888 // The half-precision variants belong to base FP, and do not require kFPHalf. 2889 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; 2890 Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); 2891 } 2892 2893 2894 void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) { 2895 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2896 Instr format = 1 << NEONSize_offset; 2897 if (vd.IsScalar()) { 2898 VIXL_ASSERT(vd.Is1S() && vn.Is1D()); 2899 Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); 2900 } else { 2901 VIXL_ASSERT(vd.Is2S() && vn.Is2D()); 2902 Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); 2903 } 2904 } 2905 2906 2907 void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) { 2908 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2909 VIXL_ASSERT(vd.Is4S() && vn.Is2D()); 2910 Instr format = 1 << NEONSize_offset; 2911 Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); 2912 } 2913 2914 void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) { 2915 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT)); 2916 VIXL_ASSERT(rd.IsW() && vn.Is1D()); 2917 Emit(FJCVTZS | Rn(vn) | Rd(rd)); 2918 } 2919 2920 2921 void Assembler::NEONFPConvertToInt(const Register& rd, 2922 const VRegister& vn, 2923 Instr op) { 2924 Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); 2925 } 2926 2927 2928 void Assembler::NEONFPConvertToInt(const VRegister& vd, 2929 const VRegister& vn, 2930 Instr op) { 2931 if (vn.IsScalar()) { 2932 VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); 2933 op |= NEON_Q | NEONScalar; 2934 } 2935 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); 2936 } 2937 2938 2939 void Assembler::NEONFP16ConvertToInt(const VRegister& vd, 2940 const VRegister& vn, 2941 Instr op) { 2942 VIXL_ASSERT(AreSameFormat(vd, vn)); 2943 VIXL_ASSERT(vn.IsLaneSizeH()); 2944 if (vn.IsScalar()) { 2945 op |= NEON_Q | NEONScalar; 2946 } else if (vn.Is8H()) { 2947 op |= NEON_Q; 2948 } 2949 Emit(op | Rn(vn) | Rd(vd)); 2950 } 2951 2952 2953 #define NEON_FP2REGMISC_FCVT_LIST(V) \ 2954 V(fcvtnu, NEON_FCVTNU, FCVTNU) \ 2955 V(fcvtns, NEON_FCVTNS, FCVTNS) \ 2956 V(fcvtpu, NEON_FCVTPU, FCVTPU) \ 2957 V(fcvtps, NEON_FCVTPS, FCVTPS) \ 2958 V(fcvtmu, NEON_FCVTMU, FCVTMU) \ 2959 V(fcvtms, NEON_FCVTMS, FCVTMS) \ 2960 V(fcvtau, NEON_FCVTAU, FCVTAU) \ 2961 V(fcvtas, NEON_FCVTAS, FCVTAS) 2962 2963 #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ 2964 void Assembler::FN(const Register& rd, const VRegister& vn) { \ 2965 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ 2966 if (vn.IsH()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ 2967 NEONFPConvertToInt(rd, vn, SCA_OP); \ 2968 } \ 2969 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 2970 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ 2971 if (vd.IsLaneSizeH()) { \ 2972 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 2973 NEONFP16ConvertToInt(vd, vn, VEC_OP##_H); \ 2974 } else { \ 2975 NEONFPConvertToInt(vd, vn, VEC_OP); \ 2976 } \ 2977 } 2978 NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) 2979 #undef DEFINE_ASM_FUNCS 2980 2981 2982 void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) { 2983 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 2984 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 2985 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 2986 VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); 2987 if (fbits == 0) { 2988 Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); 2989 } else { 2990 Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | 2991 Rd(rd)); 2992 } 2993 } 2994 2995 2996 void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) { 2997 // This form is a NEON scalar FP instruction. 2998 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 2999 if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3000 VIXL_ASSERT(fbits >= 0); 3001 if (fbits == 0) { 3002 if (vd.IsLaneSizeH()) { 3003 NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZS_H); 3004 } else { 3005 NEONFP2RegMisc(vd, vn, NEON_FCVTZS); 3006 } 3007 } else { 3008 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || 3009 vd.Is1H() || vd.Is4H() || vd.Is8H()); 3010 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); 3011 } 3012 } 3013 3014 3015 void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) { 3016 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 3017 if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 3018 VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); 3019 VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); 3020 if (fbits == 0) { 3021 Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); 3022 } else { 3023 Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | 3024 Rd(rd)); 3025 } 3026 } 3027 3028 3029 void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) { 3030 // This form is a NEON scalar FP instruction. 3031 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3032 if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3033 VIXL_ASSERT(fbits >= 0); 3034 if (fbits == 0) { 3035 if (vd.IsLaneSizeH()) { 3036 NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZU_H); 3037 } else { 3038 NEONFP2RegMisc(vd, vn, NEON_FCVTZU); 3039 } 3040 } else { 3041 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || 3042 vd.Is1H() || vd.Is4H() || vd.Is8H()); 3043 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); 3044 } 3045 } 3046 3047 void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) { 3048 // This form is a NEON scalar FP instruction. 3049 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3050 if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3051 VIXL_ASSERT(fbits >= 0); 3052 if (fbits == 0) { 3053 if (vd.IsLaneSizeH()) { 3054 NEONFP2RegMiscFP16(vd, vn, NEON_UCVTF_H); 3055 } else { 3056 NEONFP2RegMisc(vd, vn, NEON_UCVTF); 3057 } 3058 } else { 3059 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || 3060 vd.Is1H() || vd.Is4H() || vd.Is8H()); 3061 NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); 3062 } 3063 } 3064 3065 void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) { 3066 // This form is a NEON scalar FP instruction. 3067 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3068 if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3069 VIXL_ASSERT(fbits >= 0); 3070 if (fbits == 0) { 3071 if (vd.IsLaneSizeH()) { 3072 NEONFP2RegMiscFP16(vd, vn, NEON_SCVTF_H); 3073 } else { 3074 NEONFP2RegMisc(vd, vn, NEON_SCVTF); 3075 } 3076 } else { 3077 VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || 3078 vd.Is1H() || vd.Is4H() || vd.Is8H()); 3079 NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm); 3080 } 3081 } 3082 3083 3084 void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) { 3085 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 3086 if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 3087 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 3088 VIXL_ASSERT(fbits >= 0); 3089 if (fbits == 0) { 3090 Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd)); 3091 } else { 3092 Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | 3093 Rd(vd)); 3094 } 3095 } 3096 3097 3098 void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) { 3099 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); 3100 if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); 3101 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 3102 VIXL_ASSERT(fbits >= 0); 3103 if (fbits == 0) { 3104 Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd)); 3105 } else { 3106 Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | 3107 Rd(vd)); 3108 } 3109 } 3110 3111 3112 void Assembler::NEON3Same(const VRegister& vd, 3113 const VRegister& vn, 3114 const VRegister& vm, 3115 NEON3SameOp vop) { 3116 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3117 VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); 3118 3119 Instr format, op = vop; 3120 if (vd.IsScalar()) { 3121 op |= NEON_Q | NEONScalar; 3122 format = SFormat(vd); 3123 } else { 3124 format = VFormat(vd); 3125 } 3126 3127 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); 3128 } 3129 3130 3131 void Assembler::NEONFP3Same(const VRegister& vd, 3132 const VRegister& vn, 3133 const VRegister& vm, 3134 Instr op) { 3135 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3136 Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); 3137 } 3138 3139 3140 void Assembler::NEON3SameFP16(const VRegister& vd, 3141 const VRegister& vn, 3142 const VRegister& vm, 3143 Instr op) { 3144 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3145 VIXL_ASSERT(vd.GetLaneSizeInBytes() == kHRegSizeInBytes); 3146 if (vd.Is8H()) op |= NEON_Q; 3147 Emit(op | Rm(vm) | Rn(vn) | Rd(vd)); 3148 } 3149 3150 3151 // clang-format off 3152 #define NEON_FP2REGMISC_LIST(V) \ 3153 V(fabs, NEON_FABS, FABS, FABS_h) \ 3154 V(fneg, NEON_FNEG, FNEG, FNEG_h) \ 3155 V(fsqrt, NEON_FSQRT, FSQRT, FSQRT_h) \ 3156 V(frintn, NEON_FRINTN, FRINTN, FRINTN_h) \ 3157 V(frinta, NEON_FRINTA, FRINTA, FRINTA_h) \ 3158 V(frintp, NEON_FRINTP, FRINTP, FRINTP_h) \ 3159 V(frintm, NEON_FRINTM, FRINTM, FRINTM_h) \ 3160 V(frintx, NEON_FRINTX, FRINTX, FRINTX_h) \ 3161 V(frintz, NEON_FRINTZ, FRINTZ, FRINTZ_h) \ 3162 V(frinti, NEON_FRINTI, FRINTI, FRINTI_h) \ 3163 V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar, NEON_FRSQRTE_H_scalar) \ 3164 V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar, NEON_FRECPE_H_scalar) 3165 // clang-format on 3166 3167 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ 3168 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 3169 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ 3170 Instr op; \ 3171 if (vd.IsScalar()) { \ 3172 if (vd.Is1H()) { \ 3173 if ((SCA_OP_H & NEONScalar2RegMiscFP16FMask) == \ 3174 NEONScalar2RegMiscFP16Fixed) { \ 3175 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ 3176 } else { \ 3177 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ 3178 } \ 3179 op = SCA_OP_H; \ 3180 } else { \ 3181 if ((SCA_OP & NEONScalar2RegMiscFMask) == NEONScalar2RegMiscFixed) { \ 3182 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3183 } \ 3184 VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \ 3185 op = SCA_OP; \ 3186 } \ 3187 } else { \ 3188 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3189 VIXL_ASSERT(vd.Is4H() || vd.Is8H() || vd.Is2S() || vd.Is2D() || \ 3190 vd.Is4S()); \ 3191 if (vd.IsLaneSizeH()) { \ 3192 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 3193 op = VEC_OP##_H; \ 3194 if (vd.Is8H()) { \ 3195 op |= NEON_Q; \ 3196 } \ 3197 } else { \ 3198 op = VEC_OP; \ 3199 } \ 3200 } \ 3201 if (vd.IsLaneSizeH()) { \ 3202 NEONFP2RegMiscFP16(vd, vn, op); \ 3203 } else { \ 3204 NEONFP2RegMisc(vd, vn, op); \ 3205 } \ 3206 } 3207 NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC) 3208 #undef DEFINE_ASM_FUNC 3209 3210 3211 void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, 3212 const VRegister& vn, 3213 Instr op) { 3214 VIXL_ASSERT(AreSameFormat(vd, vn)); 3215 Emit(op | Rn(vn) | Rd(vd)); 3216 } 3217 3218 3219 void Assembler::NEONFP2RegMisc(const VRegister& vd, 3220 const VRegister& vn, 3221 Instr op) { 3222 VIXL_ASSERT(AreSameFormat(vd, vn)); 3223 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); 3224 } 3225 3226 3227 void Assembler::NEON2RegMisc(const VRegister& vd, 3228 const VRegister& vn, 3229 NEON2RegMiscOp vop, 3230 int value) { 3231 VIXL_ASSERT(AreSameFormat(vd, vn)); 3232 VIXL_ASSERT(value == 0); 3233 USE(value); 3234 3235 Instr format, op = vop; 3236 if (vd.IsScalar()) { 3237 op |= NEON_Q | NEONScalar; 3238 format = SFormat(vd); 3239 } else { 3240 format = VFormat(vd); 3241 } 3242 3243 Emit(format | op | Rn(vn) | Rd(vd)); 3244 } 3245 3246 3247 void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) { 3248 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3249 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 3250 NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value); 3251 } 3252 3253 3254 void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) { 3255 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3256 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 3257 NEON2RegMisc(vd, vn, NEON_CMGE_zero, value); 3258 } 3259 3260 3261 void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) { 3262 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3263 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 3264 NEON2RegMisc(vd, vn, NEON_CMGT_zero, value); 3265 } 3266 3267 3268 void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) { 3269 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 3270 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3271 NEON2RegMisc(vd, vn, NEON_CMLE_zero, value); 3272 } 3273 3274 3275 void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) { 3276 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3277 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 3278 NEON2RegMisc(vd, vn, NEON_CMLT_zero, value); 3279 } 3280 3281 3282 void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) { 3283 USE(shift); 3284 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3285 VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) || 3286 (vd.Is4S() && vn.Is4H() && shift == 16) || 3287 (vd.Is2D() && vn.Is2S() && shift == 32)); 3288 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); 3289 } 3290 3291 3292 void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) { 3293 USE(shift); 3294 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3295 VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) || 3296 (vd.Is4S() && vn.Is8H() && shift == 16) || 3297 (vd.Is2D() && vn.Is4S() && shift == 32)); 3298 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); 3299 } 3300 3301 3302 void Assembler::NEONFP2RegMisc(const VRegister& vd, 3303 const VRegister& vn, 3304 NEON2RegMiscOp vop, 3305 double value) { 3306 VIXL_ASSERT(AreSameFormat(vd, vn)); 3307 VIXL_ASSERT(value == 0.0); 3308 USE(value); 3309 3310 Instr op = vop; 3311 if (vd.IsScalar()) { 3312 VIXL_ASSERT(vd.Is1S() || vd.Is1D()); 3313 op |= NEON_Q | NEONScalar; 3314 } else { 3315 VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); 3316 } 3317 3318 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); 3319 } 3320 3321 3322 void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, 3323 const VRegister& vn, 3324 NEON2RegMiscFP16Op vop, 3325 double value) { 3326 VIXL_ASSERT(AreSameFormat(vd, vn)); 3327 VIXL_ASSERT(value == 0.0); 3328 USE(value); 3329 3330 Instr op = vop; 3331 if (vd.IsScalar()) { 3332 VIXL_ASSERT(vd.Is1H()); 3333 op |= NEON_Q | NEONScalar; 3334 } else { 3335 VIXL_ASSERT(vd.Is4H() || vd.Is8H()); 3336 if (vd.Is8H()) { 3337 op |= NEON_Q; 3338 } 3339 } 3340 3341 Emit(op | Rn(vn) | Rd(vd)); 3342 } 3343 3344 3345 void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) { 3346 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3347 if (vd.IsLaneSizeH()) { 3348 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3349 NEONFP2RegMiscFP16(vd, vn, NEON_FCMEQ_H_zero, value); 3350 } else { 3351 NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value); 3352 } 3353 } 3354 3355 3356 void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) { 3357 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3358 if (vd.IsLaneSizeH()) { 3359 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3360 NEONFP2RegMiscFP16(vd, vn, NEON_FCMGE_H_zero, value); 3361 } else { 3362 NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value); 3363 } 3364 } 3365 3366 3367 void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) { 3368 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3369 if (vd.IsLaneSizeH()) { 3370 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3371 NEONFP2RegMiscFP16(vd, vn, NEON_FCMGT_H_zero, value); 3372 } else { 3373 NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value); 3374 } 3375 } 3376 3377 3378 void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) { 3379 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3380 if (vd.IsLaneSizeH()) { 3381 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3382 NEONFP2RegMiscFP16(vd, vn, NEON_FCMLE_H_zero, value); 3383 } else { 3384 NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value); 3385 } 3386 } 3387 3388 3389 void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) { 3390 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3391 if (vd.IsLaneSizeH()) { 3392 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3393 NEONFP2RegMiscFP16(vd, vn, NEON_FCMLT_H_zero, value); 3394 } else { 3395 NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value); 3396 } 3397 } 3398 3399 3400 void Assembler::frecpx(const VRegister& vd, const VRegister& vn) { 3401 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3402 VIXL_ASSERT(vd.IsScalar()); 3403 VIXL_ASSERT(AreSameFormat(vd, vn)); 3404 Instr op; 3405 if (vd.Is1H()) { 3406 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3407 op = NEON_FRECPX_H_scalar; 3408 } else { 3409 VIXL_ASSERT(vd.Is1S() || vd.Is1D()); 3410 op = NEON_FRECPX_scalar; 3411 } 3412 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); 3413 } 3414 3415 3416 // clang-format off 3417 #define NEON_3SAME_LIST(V) \ 3418 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ 3419 V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ 3420 V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ 3421 V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ 3422 V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ 3423 V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ 3424 V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ 3425 V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ 3426 V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ 3427 V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ 3428 V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ 3429 V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ 3430 V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ 3431 V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ 3432 V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ 3433 V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3434 V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3435 V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3436 V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3437 V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ 3438 V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ 3439 V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ 3440 V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3441 V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ 3442 V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3443 V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ 3444 V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3445 V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ 3446 V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3447 V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3448 V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3449 V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3450 V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3451 V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3452 V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ 3453 V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ 3454 V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ 3455 V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ 3456 V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ 3457 V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ 3458 V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ 3459 V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ 3460 V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ 3461 V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ 3462 V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ 3463 V(uqadd, NEON_UQADD, true) \ 3464 V(sqadd, NEON_SQADD, true) \ 3465 V(uqsub, NEON_UQSUB, true) \ 3466 V(sqsub, NEON_SQSUB, true) \ 3467 V(sqshl, NEON_SQSHL, true) \ 3468 V(uqshl, NEON_UQSHL, true) \ 3469 V(sqrshl, NEON_SQRSHL, true) \ 3470 V(uqrshl, NEON_UQRSHL, true) 3471 // clang-format on 3472 3473 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 3474 void Assembler::FN(const VRegister& vd, \ 3475 const VRegister& vn, \ 3476 const VRegister& vm) { \ 3477 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3478 VIXL_ASSERT(AS); \ 3479 NEON3Same(vd, vn, vm, OP); \ 3480 } 3481 NEON_3SAME_LIST(DEFINE_ASM_FUNC) 3482 #undef DEFINE_ASM_FUNC 3483 3484 // clang-format off 3485 #define NEON_FP3SAME_OP_LIST(V) \ 3486 V(fmulx, NEON_FMULX, NEON_FMULX_scalar, NEON_FMULX_H_scalar) \ 3487 V(frecps, NEON_FRECPS, NEON_FRECPS_scalar, NEON_FRECPS_H_scalar) \ 3488 V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar, NEON_FRSQRTS_H_scalar) \ 3489 V(fabd, NEON_FABD, NEON_FABD_scalar, NEON_FABD_H_scalar) \ 3490 V(fmla, NEON_FMLA, 0, 0) \ 3491 V(fmls, NEON_FMLS, 0, 0) \ 3492 V(facge, NEON_FACGE, NEON_FACGE_scalar, NEON_FACGE_H_scalar) \ 3493 V(facgt, NEON_FACGT, NEON_FACGT_scalar, NEON_FACGT_H_scalar) \ 3494 V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar, NEON_FCMEQ_H_scalar) \ 3495 V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar, NEON_FCMGE_H_scalar) \ 3496 V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar, NEON_FCMGT_H_scalar) \ 3497 V(faddp, NEON_FADDP, 0, 0) \ 3498 V(fmaxp, NEON_FMAXP, 0, 0) \ 3499 V(fminp, NEON_FMINP, 0, 0) \ 3500 V(fmaxnmp, NEON_FMAXNMP, 0, 0) \ 3501 V(fadd, NEON_FADD, FADD, 0) \ 3502 V(fsub, NEON_FSUB, FSUB, 0) \ 3503 V(fmul, NEON_FMUL, FMUL, 0) \ 3504 V(fdiv, NEON_FDIV, FDIV, 0) \ 3505 V(fmax, NEON_FMAX, FMAX, 0) \ 3506 V(fmin, NEON_FMIN, FMIN, 0) \ 3507 V(fmaxnm, NEON_FMAXNM, FMAXNM, 0) \ 3508 V(fminnm, NEON_FMINNM, FMINNM, 0) \ 3509 V(fminnmp, NEON_FMINNMP, 0, 0) 3510 // clang-format on 3511 3512 // TODO: This macro is complicated because it classifies the instructions in the 3513 // macro list above, and treats each case differently. It could be somewhat 3514 // simpler if we were to split the macro, at the cost of some duplication. 3515 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ 3516 void Assembler::FN(const VRegister& vd, \ 3517 const VRegister& vn, \ 3518 const VRegister& vm) { \ 3519 VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ 3520 Instr op; \ 3521 bool is_fp16 = false; \ 3522 if ((SCA_OP != 0) && vd.IsScalar()) { \ 3523 if ((SCA_OP_H != 0) && vd.Is1H()) { \ 3524 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ 3525 is_fp16 = true; \ 3526 op = SCA_OP_H; \ 3527 } else { \ 3528 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); \ 3529 if ((SCA_OP & NEONScalar3SameFMask) == NEONScalar3SameFixed) { \ 3530 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3531 if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 3532 } else if (vd.Is1H()) { \ 3533 VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ 3534 } \ 3535 op = SCA_OP; \ 3536 } \ 3537 } else { \ 3538 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3539 VIXL_ASSERT(vd.IsVector()); \ 3540 if (vd.Is4H() || vd.Is8H()) { \ 3541 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 3542 is_fp16 = true; \ 3543 op = VEC_OP##_H; \ 3544 } else { \ 3545 VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ 3546 op = VEC_OP; \ 3547 } \ 3548 } \ 3549 if (is_fp16) { \ 3550 NEON3SameFP16(vd, vn, vm, op); \ 3551 } else { \ 3552 NEONFP3Same(vd, vn, vm, op); \ 3553 } \ 3554 } 3555 NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC) 3556 #undef DEFINE_ASM_FUNC 3557 3558 3559 void Assembler::addp(const VRegister& vd, const VRegister& vn) { 3560 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3561 VIXL_ASSERT((vd.Is1D() && vn.Is2D())); 3562 Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd)); 3563 } 3564 3565 3566 void Assembler::sqrdmlah(const VRegister& vd, 3567 const VRegister& vn, 3568 const VRegister& vm) { 3569 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); 3570 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3571 VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); 3572 3573 Instr format, op = NEON_SQRDMLAH; 3574 if (vd.IsScalar()) { 3575 op |= NEON_Q | NEONScalar; 3576 format = SFormat(vd); 3577 } else { 3578 format = VFormat(vd); 3579 } 3580 3581 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); 3582 } 3583 3584 3585 void Assembler::sqrdmlsh(const VRegister& vd, 3586 const VRegister& vn, 3587 const VRegister& vm) { 3588 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); 3589 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3590 VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); 3591 3592 Instr format, op = NEON_SQRDMLSH; 3593 if (vd.IsScalar()) { 3594 op |= NEON_Q | NEONScalar; 3595 format = SFormat(vd); 3596 } else { 3597 format = VFormat(vd); 3598 } 3599 3600 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); 3601 } 3602 3603 3604 void Assembler::sdot(const VRegister& vd, 3605 const VRegister& vn, 3606 const VRegister& vm) { 3607 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); 3608 VIXL_ASSERT(AreSameFormat(vn, vm)); 3609 VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); 3610 3611 Emit(VFormat(vd) | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd)); 3612 } 3613 3614 3615 void Assembler::udot(const VRegister& vd, 3616 const VRegister& vn, 3617 const VRegister& vm) { 3618 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); 3619 VIXL_ASSERT(AreSameFormat(vn, vm)); 3620 VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); 3621 3622 Emit(VFormat(vd) | NEON_UDOT | Rm(vm) | Rn(vn) | Rd(vd)); 3623 } 3624 3625 3626 void Assembler::faddp(const VRegister& vd, const VRegister& vn) { 3627 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3628 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || 3629 (vd.Is1H() && vn.Is2H())); 3630 if (vd.Is1H()) { 3631 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3632 Emit(NEON_FADDP_h_scalar | Rn(vn) | Rd(vd)); 3633 } else { 3634 Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd)); 3635 } 3636 } 3637 3638 3639 void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) { 3640 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3641 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || 3642 (vd.Is1H() && vn.Is2H())); 3643 if (vd.Is1H()) { 3644 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3645 Emit(NEON_FMAXP_h_scalar | Rn(vn) | Rd(vd)); 3646 } else { 3647 Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd)); 3648 } 3649 } 3650 3651 3652 void Assembler::fminp(const VRegister& vd, const VRegister& vn) { 3653 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3654 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || 3655 (vd.Is1H() && vn.Is2H())); 3656 if (vd.Is1H()) { 3657 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3658 Emit(NEON_FMINP_h_scalar | Rn(vn) | Rd(vd)); 3659 } else { 3660 Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd)); 3661 } 3662 } 3663 3664 3665 void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) { 3666 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3667 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || 3668 (vd.Is1H() && vn.Is2H())); 3669 if (vd.Is1H()) { 3670 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3671 Emit(NEON_FMAXNMP_h_scalar | Rn(vn) | Rd(vd)); 3672 } else { 3673 Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd)); 3674 } 3675 } 3676 3677 3678 void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) { 3679 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); 3680 VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || 3681 (vd.Is1H() && vn.Is2H())); 3682 if (vd.Is1H()) { 3683 VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3684 Emit(NEON_FMINNMP_h_scalar | Rn(vn) | Rd(vd)); 3685 } else { 3686 Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd)); 3687 } 3688 } 3689 3690 3691 // v8.3 complex numbers - floating-point complex multiply accumulate. 3692 void Assembler::fcmla(const VRegister& vd, 3693 const VRegister& vn, 3694 const VRegister& vm, 3695 int vm_index, 3696 int rot) { 3697 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); 3698 VIXL_ASSERT(vd.IsVector() && AreSameFormat(vd, vn)); 3699 VIXL_ASSERT((vm.IsH() && (vd.Is8H() || vd.Is4H())) || 3700 (vm.IsS() && vd.Is4S())); 3701 if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3702 int index_num_bits = vd.Is4S() ? 1 : 2; 3703 Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA_byelement | 3704 ImmNEONHLM(vm_index, index_num_bits) | ImmRotFcmlaSca(rot) | Rn(vn) | 3705 Rd(vd)); 3706 } 3707 3708 3709 void Assembler::fcmla(const VRegister& vd, 3710 const VRegister& vn, 3711 const VRegister& vm, 3712 int rot) { 3713 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); 3714 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3715 VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); 3716 if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3717 Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA | ImmRotFcmlaVec(rot) | Rn(vn) | 3718 Rd(vd)); 3719 } 3720 3721 3722 // v8.3 complex numbers - floating-point complex add. 3723 void Assembler::fcadd(const VRegister& vd, 3724 const VRegister& vn, 3725 const VRegister& vm, 3726 int rot) { 3727 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); 3728 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 3729 VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); 3730 if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); 3731 Emit(VFormat(vd) | Rm(vm) | NEON_FCADD | ImmRotFcadd(rot) | Rn(vn) | Rd(vd)); 3732 } 3733 3734 3735 void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) { 3736 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3737 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR); 3738 } 3739 3740 3741 void Assembler::mov(const VRegister& vd, const VRegister& vn) { 3742 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3743 VIXL_ASSERT(AreSameFormat(vd, vn)); 3744 if (vd.IsD()) { 3745 orr(vd.V8B(), vn.V8B(), vn.V8B()); 3746 } else { 3747 VIXL_ASSERT(vd.IsQ()); 3748 orr(vd.V16B(), vn.V16B(), vn.V16B()); 3749 } 3750 } 3751 3752 3753 void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) { 3754 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3755 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC); 3756 } 3757 3758 3759 void Assembler::movi(const VRegister& vd, 3760 const uint64_t imm, 3761 Shift shift, 3762 const int shift_amount) { 3763 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3764 VIXL_ASSERT((shift == LSL) || (shift == MSL)); 3765 if (vd.Is2D() || vd.Is1D()) { 3766 VIXL_ASSERT(shift_amount == 0); 3767 int imm8 = 0; 3768 for (int i = 0; i < 8; ++i) { 3769 int byte = (imm >> (i * 8)) & 0xff; 3770 VIXL_ASSERT((byte == 0) || (byte == 0xff)); 3771 if (byte == 0xff) { 3772 imm8 |= (1 << i); 3773 } 3774 } 3775 int q = vd.Is2D() ? NEON_Q : 0; 3776 Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | 3777 ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd)); 3778 } else if (shift == LSL) { 3779 VIXL_ASSERT(IsUint8(imm)); 3780 NEONModifiedImmShiftLsl(vd, 3781 static_cast<int>(imm), 3782 shift_amount, 3783 NEONModifiedImmediate_MOVI); 3784 } else { 3785 VIXL_ASSERT(IsUint8(imm)); 3786 NEONModifiedImmShiftMsl(vd, 3787 static_cast<int>(imm), 3788 shift_amount, 3789 NEONModifiedImmediate_MOVI); 3790 } 3791 } 3792 3793 3794 void Assembler::mvn(const VRegister& vd, const VRegister& vn) { 3795 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3796 VIXL_ASSERT(AreSameFormat(vd, vn)); 3797 if (vd.IsD()) { 3798 not_(vd.V8B(), vn.V8B()); 3799 } else { 3800 VIXL_ASSERT(vd.IsQ()); 3801 not_(vd.V16B(), vn.V16B()); 3802 } 3803 } 3804 3805 3806 void Assembler::mvni(const VRegister& vd, 3807 const int imm8, 3808 Shift shift, 3809 const int shift_amount) { 3810 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 3811 VIXL_ASSERT((shift == LSL) || (shift == MSL)); 3812 if (shift == LSL) { 3813 NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); 3814 } else { 3815 NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); 3816 } 3817 } 3818 3819 3820 void Assembler::NEONFPByElement(const VRegister& vd, 3821 const VRegister& vn, 3822 const VRegister& vm, 3823 int vm_index, 3824 NEONByIndexedElementOp vop, 3825 NEONByIndexedElementOp vop_half) { 3826 VIXL_ASSERT(AreSameFormat(vd, vn)); 3827 VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || 3828 (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) || 3829 (vd.Is1D() && vm.Is1D()) || (vd.Is4H() && vm.Is1H()) || 3830 (vd.Is8H() && vm.Is1H()) || (vd.Is1H() && vm.Is1H())); 3831 VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)) || 3832 (vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8))); 3833 3834 Instr op = vop; 3835 int index_num_bits; 3836 if (vm.Is1D()) { 3837 index_num_bits = 1; 3838 } else if (vm.Is1S()) { 3839 index_num_bits = 2; 3840 } else { 3841 index_num_bits = 3; 3842 op = vop_half; 3843 } 3844 3845 if (vd.IsScalar()) { 3846 op |= NEON_Q | NEONScalar; 3847 } 3848 3849 if (!vm.Is1H()) { 3850 op |= FPFormat(vd); 3851 } else if (vd.Is8H()) { 3852 op |= NEON_Q; 3853 } 3854 3855 Emit(op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); 3856 } 3857 3858 3859 void Assembler::NEONByElement(const VRegister& vd, 3860 const VRegister& vn, 3861 const VRegister& vm, 3862 int vm_index, 3863 NEONByIndexedElementOp vop) { 3864 VIXL_ASSERT(AreSameFormat(vd, vn)); 3865 VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) || 3866 (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) || 3867 (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S())); 3868 VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || 3869 (vm.Is1S() && (vm_index < 4))); 3870 3871 Instr format, op = vop; 3872 int index_num_bits = vm.Is1H() ? 3 : 2; 3873 if (vd.IsScalar()) { 3874 op |= NEONScalar | NEON_Q; 3875 format = SFormat(vn); 3876 } else { 3877 format = VFormat(vn); 3878 } 3879 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | 3880 Rd(vd)); 3881 } 3882 3883 3884 void Assembler::NEONByElementL(const VRegister& vd, 3885 const VRegister& vn, 3886 const VRegister& vm, 3887 int vm_index, 3888 NEONByIndexedElementOp vop) { 3889 VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) || 3890 (vd.Is4S() && vn.Is8H() && vm.Is1H()) || 3891 (vd.Is1S() && vn.Is1H() && vm.Is1H()) || 3892 (vd.Is2D() && vn.Is2S() && vm.Is1S()) || 3893 (vd.Is2D() && vn.Is4S() && vm.Is1S()) || 3894 (vd.Is1D() && vn.Is1S() && vm.Is1S())); 3895 3896 VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || 3897 (vm.Is1S() && (vm_index < 4))); 3898 3899 Instr format, op = vop; 3900 int index_num_bits = vm.Is1H() ? 3 : 2; 3901 if (vd.IsScalar()) { 3902 op |= NEONScalar | NEON_Q; 3903 format = SFormat(vn); 3904 } else { 3905 format = VFormat(vn); 3906 } 3907 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | 3908 Rd(vd)); 3909 } 3910 3911 3912 void Assembler::sdot(const VRegister& vd, 3913 const VRegister& vn, 3914 const VRegister& vm, 3915 int vm_index) { 3916 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); 3917 VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || 3918 (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); 3919 3920 int index_num_bits = 2; 3921 Emit(VFormat(vd) | NEON_SDOT_byelement | 3922 ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); 3923 } 3924 3925 3926 void Assembler::udot(const VRegister& vd, 3927 const VRegister& vn, 3928 const VRegister& vm, 3929 int vm_index) { 3930 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); 3931 VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || 3932 (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); 3933 3934 int index_num_bits = 2; 3935 Emit(VFormat(vd) | NEON_UDOT_byelement | 3936 ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); 3937 } 3938 3939 3940 // clang-format off 3941 #define NEON_BYELEMENT_LIST(V) \ 3942 V(mul, NEON_MUL_byelement, vn.IsVector()) \ 3943 V(mla, NEON_MLA_byelement, vn.IsVector()) \ 3944 V(mls, NEON_MLS_byelement, vn.IsVector()) \ 3945 V(sqdmulh, NEON_SQDMULH_byelement, true) \ 3946 V(sqrdmulh, NEON_SQRDMULH_byelement, true) \ 3947 // clang-format on 3948 3949 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 3950 void Assembler::FN(const VRegister& vd, \ 3951 const VRegister& vn, \ 3952 const VRegister& vm, \ 3953 int vm_index) { \ 3954 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 3955 VIXL_ASSERT(AS); \ 3956 NEONByElement(vd, vn, vm, vm_index, OP); \ 3957 } 3958 NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC) 3959 #undef DEFINE_ASM_FUNC 3960 3961 3962 // clang-format off 3963 #define NEON_BYELEMENT_RDM_LIST(V) \ 3964 V(sqrdmlah, NEON_SQRDMLAH_byelement) \ 3965 V(sqrdmlsh, NEON_SQRDMLSH_byelement) 3966 // clang-format on 3967 3968 #define DEFINE_ASM_FUNC(FN, OP) \ 3969 void Assembler::FN(const VRegister& vd, \ 3970 const VRegister& vn, \ 3971 const VRegister& vm, \ 3972 int vm_index) { \ 3973 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); \ 3974 NEONByElement(vd, vn, vm, vm_index, OP); \ 3975 } 3976 NEON_BYELEMENT_RDM_LIST(DEFINE_ASM_FUNC) 3977 #undef DEFINE_ASM_FUNC 3978 3979 3980 // clang-format off 3981 #define NEON_FPBYELEMENT_LIST(V) \ 3982 V(fmul, NEON_FMUL_byelement, NEON_FMUL_H_byelement) \ 3983 V(fmla, NEON_FMLA_byelement, NEON_FMLA_H_byelement) \ 3984 V(fmls, NEON_FMLS_byelement, NEON_FMLS_H_byelement) \ 3985 V(fmulx, NEON_FMULX_byelement, NEON_FMULX_H_byelement) 3986 // clang-format on 3987 3988 #define DEFINE_ASM_FUNC(FN, OP, OP_H) \ 3989 void Assembler::FN(const VRegister& vd, \ 3990 const VRegister& vn, \ 3991 const VRegister& vm, \ 3992 int vm_index) { \ 3993 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ 3994 if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 3995 NEONFPByElement(vd, vn, vm, vm_index, OP, OP_H); \ 3996 } 3997 NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC) 3998 #undef DEFINE_ASM_FUNC 3999 4000 4001 // clang-format off 4002 #define NEON_BYELEMENT_LONG_LIST(V) \ 4003 V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ 4004 V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ 4005 V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ 4006 V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 4007 V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ 4008 V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ 4009 V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ 4010 V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ 4011 V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ 4012 V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ 4013 V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ 4014 V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 4015 V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ 4016 V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 4017 V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ 4018 V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ 4019 V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ 4020 V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) 4021 // clang-format on 4022 4023 4024 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 4025 void Assembler::FN(const VRegister& vd, \ 4026 const VRegister& vn, \ 4027 const VRegister& vm, \ 4028 int vm_index) { \ 4029 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 4030 VIXL_ASSERT(AS); \ 4031 NEONByElementL(vd, vn, vm, vm_index, OP); \ 4032 } 4033 NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC) 4034 #undef DEFINE_ASM_FUNC 4035 4036 4037 void Assembler::suqadd(const VRegister& vd, const VRegister& vn) { 4038 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4039 NEON2RegMisc(vd, vn, NEON_SUQADD); 4040 } 4041 4042 4043 void Assembler::usqadd(const VRegister& vd, const VRegister& vn) { 4044 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4045 NEON2RegMisc(vd, vn, NEON_USQADD); 4046 } 4047 4048 4049 void Assembler::abs(const VRegister& vd, const VRegister& vn) { 4050 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4051 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4052 NEON2RegMisc(vd, vn, NEON_ABS); 4053 } 4054 4055 4056 void Assembler::sqabs(const VRegister& vd, const VRegister& vn) { 4057 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4058 NEON2RegMisc(vd, vn, NEON_SQABS); 4059 } 4060 4061 4062 void Assembler::neg(const VRegister& vd, const VRegister& vn) { 4063 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4064 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4065 NEON2RegMisc(vd, vn, NEON_NEG); 4066 } 4067 4068 4069 void Assembler::sqneg(const VRegister& vd, const VRegister& vn) { 4070 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4071 NEON2RegMisc(vd, vn, NEON_SQNEG); 4072 } 4073 4074 4075 void Assembler::NEONXtn(const VRegister& vd, 4076 const VRegister& vn, 4077 NEON2RegMiscOp vop) { 4078 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4079 Instr format, op = vop; 4080 if (vd.IsScalar()) { 4081 VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || 4082 (vd.Is1S() && vn.Is1D())); 4083 op |= NEON_Q | NEONScalar; 4084 format = SFormat(vd); 4085 } else { 4086 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || 4087 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || 4088 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); 4089 format = VFormat(vd); 4090 } 4091 Emit(format | op | Rn(vn) | Rd(vd)); 4092 } 4093 4094 4095 void Assembler::xtn(const VRegister& vd, const VRegister& vn) { 4096 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4097 VIXL_ASSERT(vd.IsVector() && vd.IsD()); 4098 NEONXtn(vd, vn, NEON_XTN); 4099 } 4100 4101 4102 void Assembler::xtn2(const VRegister& vd, const VRegister& vn) { 4103 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4104 VIXL_ASSERT(vd.IsVector() && vd.IsQ()); 4105 NEONXtn(vd, vn, NEON_XTN); 4106 } 4107 4108 4109 void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) { 4110 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4111 VIXL_ASSERT(vd.IsScalar() || vd.IsD()); 4112 NEONXtn(vd, vn, NEON_SQXTN); 4113 } 4114 4115 4116 void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) { 4117 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4118 VIXL_ASSERT(vd.IsVector() && vd.IsQ()); 4119 NEONXtn(vd, vn, NEON_SQXTN); 4120 } 4121 4122 4123 void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) { 4124 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4125 VIXL_ASSERT(vd.IsScalar() || vd.IsD()); 4126 NEONXtn(vd, vn, NEON_SQXTUN); 4127 } 4128 4129 4130 void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) { 4131 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4132 VIXL_ASSERT(vd.IsVector() && vd.IsQ()); 4133 NEONXtn(vd, vn, NEON_SQXTUN); 4134 } 4135 4136 4137 void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) { 4138 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4139 VIXL_ASSERT(vd.IsScalar() || vd.IsD()); 4140 NEONXtn(vd, vn, NEON_UQXTN); 4141 } 4142 4143 4144 void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) { 4145 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4146 VIXL_ASSERT(vd.IsVector() && vd.IsQ()); 4147 NEONXtn(vd, vn, NEON_UQXTN); 4148 } 4149 4150 4151 // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size". 4152 void Assembler::not_(const VRegister& vd, const VRegister& vn) { 4153 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4154 VIXL_ASSERT(AreSameFormat(vd, vn)); 4155 VIXL_ASSERT(vd.Is8B() || vd.Is16B()); 4156 Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); 4157 } 4158 4159 4160 void Assembler::rbit(const VRegister& vd, const VRegister& vn) { 4161 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4162 VIXL_ASSERT(AreSameFormat(vd, vn)); 4163 VIXL_ASSERT(vd.Is8B() || vd.Is16B()); 4164 Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); 4165 } 4166 4167 4168 void Assembler::ext(const VRegister& vd, 4169 const VRegister& vn, 4170 const VRegister& vm, 4171 int index) { 4172 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4173 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 4174 VIXL_ASSERT(vd.Is8B() || vd.Is16B()); 4175 VIXL_ASSERT((0 <= index) && (index < vd.GetLanes())); 4176 Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd)); 4177 } 4178 4179 4180 void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { 4181 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4182 Instr q, scalar; 4183 4184 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the 4185 // number of lanes, and T is b, h, s or d. 4186 int lane_size = vn.GetLaneSizeInBytes(); 4187 NEONFormatField format; 4188 switch (lane_size) { 4189 case 1: 4190 format = NEON_16B; 4191 break; 4192 case 2: 4193 format = NEON_8H; 4194 break; 4195 case 4: 4196 format = NEON_4S; 4197 break; 4198 default: 4199 VIXL_ASSERT(lane_size == 8); 4200 format = NEON_2D; 4201 break; 4202 } 4203 4204 if (vd.IsScalar()) { 4205 q = NEON_Q; 4206 scalar = NEONScalar; 4207 } else { 4208 VIXL_ASSERT(!vd.Is1D()); 4209 q = vd.IsD() ? 0 : NEON_Q; 4210 scalar = 0; 4211 } 4212 Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) | 4213 Rd(vd)); 4214 } 4215 4216 4217 void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) { 4218 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4219 VIXL_ASSERT(vd.IsScalar()); 4220 dup(vd, vn, vn_index); 4221 } 4222 4223 4224 void Assembler::dup(const VRegister& vd, const Register& rn) { 4225 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4226 VIXL_ASSERT(!vd.Is1D()); 4227 VIXL_ASSERT(vd.Is2D() == rn.IsX()); 4228 int q = vd.IsD() ? 0 : NEON_Q; 4229 Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); 4230 } 4231 4232 4233 void Assembler::ins(const VRegister& vd, 4234 int vd_index, 4235 const VRegister& vn, 4236 int vn_index) { 4237 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4238 VIXL_ASSERT(AreSameFormat(vd, vn)); 4239 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the 4240 // number of lanes, and T is b, h, s or d. 4241 int lane_size = vd.GetLaneSizeInBytes(); 4242 NEONFormatField format; 4243 switch (lane_size) { 4244 case 1: 4245 format = NEON_16B; 4246 break; 4247 case 2: 4248 format = NEON_8H; 4249 break; 4250 case 4: 4251 format = NEON_4S; 4252 break; 4253 default: 4254 VIXL_ASSERT(lane_size == 8); 4255 format = NEON_2D; 4256 break; 4257 } 4258 4259 VIXL_ASSERT( 4260 (0 <= vd_index) && 4261 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); 4262 VIXL_ASSERT( 4263 (0 <= vn_index) && 4264 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); 4265 Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | 4266 ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); 4267 } 4268 4269 4270 void Assembler::mov(const VRegister& vd, 4271 int vd_index, 4272 const VRegister& vn, 4273 int vn_index) { 4274 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4275 ins(vd, vd_index, vn, vn_index); 4276 } 4277 4278 4279 void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) { 4280 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4281 // We support vd arguments of the form vd.VxT() or vd.T(), where x is the 4282 // number of lanes, and T is b, h, s or d. 4283 int lane_size = vd.GetLaneSizeInBytes(); 4284 NEONFormatField format; 4285 switch (lane_size) { 4286 case 1: 4287 format = NEON_16B; 4288 VIXL_ASSERT(rn.IsW()); 4289 break; 4290 case 2: 4291 format = NEON_8H; 4292 VIXL_ASSERT(rn.IsW()); 4293 break; 4294 case 4: 4295 format = NEON_4S; 4296 VIXL_ASSERT(rn.IsW()); 4297 break; 4298 default: 4299 VIXL_ASSERT(lane_size == 8); 4300 VIXL_ASSERT(rn.IsX()); 4301 format = NEON_2D; 4302 break; 4303 } 4304 4305 VIXL_ASSERT( 4306 (0 <= vd_index) && 4307 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); 4308 Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); 4309 } 4310 4311 4312 void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) { 4313 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4314 ins(vd, vd_index, rn); 4315 } 4316 4317 4318 void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) { 4319 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4320 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the 4321 // number of lanes, and T is b, h, s or d. 4322 int lane_size = vn.GetLaneSizeInBytes(); 4323 NEONFormatField format; 4324 Instr q = 0; 4325 switch (lane_size) { 4326 case 1: 4327 format = NEON_16B; 4328 VIXL_ASSERT(rd.IsW()); 4329 break; 4330 case 2: 4331 format = NEON_8H; 4332 VIXL_ASSERT(rd.IsW()); 4333 break; 4334 case 4: 4335 format = NEON_4S; 4336 VIXL_ASSERT(rd.IsW()); 4337 break; 4338 default: 4339 VIXL_ASSERT(lane_size == 8); 4340 VIXL_ASSERT(rd.IsX()); 4341 format = NEON_2D; 4342 q = NEON_Q; 4343 break; 4344 } 4345 4346 VIXL_ASSERT( 4347 (0 <= vn_index) && 4348 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); 4349 Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); 4350 } 4351 4352 4353 void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) { 4354 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4355 VIXL_ASSERT(vn.GetSizeInBytes() >= 4); 4356 umov(rd, vn, vn_index); 4357 } 4358 4359 4360 void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) { 4361 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4362 // We support vn arguments of the form vn.VxT() or vn.T(), where x is the 4363 // number of lanes, and T is b, h, s. 4364 int lane_size = vn.GetLaneSizeInBytes(); 4365 NEONFormatField format; 4366 Instr q = 0; 4367 VIXL_ASSERT(lane_size != 8); 4368 switch (lane_size) { 4369 case 1: 4370 format = NEON_16B; 4371 break; 4372 case 2: 4373 format = NEON_8H; 4374 break; 4375 default: 4376 VIXL_ASSERT(lane_size == 4); 4377 VIXL_ASSERT(rd.IsX()); 4378 format = NEON_4S; 4379 break; 4380 } 4381 q = rd.IsW() ? 0 : NEON_Q; 4382 VIXL_ASSERT( 4383 (0 <= vn_index) && 4384 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); 4385 Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); 4386 } 4387 4388 4389 void Assembler::cls(const VRegister& vd, const VRegister& vn) { 4390 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4391 VIXL_ASSERT(AreSameFormat(vd, vn)); 4392 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); 4393 Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); 4394 } 4395 4396 4397 void Assembler::clz(const VRegister& vd, const VRegister& vn) { 4398 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4399 VIXL_ASSERT(AreSameFormat(vd, vn)); 4400 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); 4401 Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); 4402 } 4403 4404 4405 void Assembler::cnt(const VRegister& vd, const VRegister& vn) { 4406 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4407 VIXL_ASSERT(AreSameFormat(vd, vn)); 4408 VIXL_ASSERT(vd.Is8B() || vd.Is16B()); 4409 Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); 4410 } 4411 4412 4413 void Assembler::rev16(const VRegister& vd, const VRegister& vn) { 4414 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4415 VIXL_ASSERT(AreSameFormat(vd, vn)); 4416 VIXL_ASSERT(vd.Is8B() || vd.Is16B()); 4417 Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); 4418 } 4419 4420 4421 void Assembler::rev32(const VRegister& vd, const VRegister& vn) { 4422 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4423 VIXL_ASSERT(AreSameFormat(vd, vn)); 4424 VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); 4425 Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); 4426 } 4427 4428 4429 void Assembler::rev64(const VRegister& vd, const VRegister& vn) { 4430 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4431 VIXL_ASSERT(AreSameFormat(vd, vn)); 4432 VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); 4433 Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); 4434 } 4435 4436 4437 void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) { 4438 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4439 VIXL_ASSERT(AreSameFormat(vd, vn)); 4440 VIXL_ASSERT(vd.Is2S() || vd.Is4S()); 4441 Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); 4442 } 4443 4444 4445 void Assembler::urecpe(const VRegister& vd, const VRegister& vn) { 4446 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4447 VIXL_ASSERT(AreSameFormat(vd, vn)); 4448 VIXL_ASSERT(vd.Is2S() || vd.Is4S()); 4449 Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); 4450 } 4451 4452 4453 void Assembler::NEONAddlp(const VRegister& vd, 4454 const VRegister& vn, 4455 NEON2RegMiscOp op) { 4456 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4457 VIXL_ASSERT((op == NEON_SADDLP) || (op == NEON_UADDLP) || 4458 (op == NEON_SADALP) || (op == NEON_UADALP)); 4459 4460 VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || 4461 (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || 4462 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); 4463 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); 4464 } 4465 4466 4467 void Assembler::saddlp(const VRegister& vd, const VRegister& vn) { 4468 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4469 NEONAddlp(vd, vn, NEON_SADDLP); 4470 } 4471 4472 4473 void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) { 4474 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4475 NEONAddlp(vd, vn, NEON_UADDLP); 4476 } 4477 4478 4479 void Assembler::sadalp(const VRegister& vd, const VRegister& vn) { 4480 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4481 NEONAddlp(vd, vn, NEON_SADALP); 4482 } 4483 4484 4485 void Assembler::uadalp(const VRegister& vd, const VRegister& vn) { 4486 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4487 NEONAddlp(vd, vn, NEON_UADALP); 4488 } 4489 4490 4491 void Assembler::NEONAcrossLanesL(const VRegister& vd, 4492 const VRegister& vn, 4493 NEONAcrossLanesOp op) { 4494 VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || 4495 (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || 4496 (vn.Is4S() && vd.Is1D())); 4497 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); 4498 } 4499 4500 4501 void Assembler::saddlv(const VRegister& vd, const VRegister& vn) { 4502 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4503 NEONAcrossLanesL(vd, vn, NEON_SADDLV); 4504 } 4505 4506 4507 void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) { 4508 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4509 NEONAcrossLanesL(vd, vn, NEON_UADDLV); 4510 } 4511 4512 4513 void Assembler::NEONAcrossLanes(const VRegister& vd, 4514 const VRegister& vn, 4515 NEONAcrossLanesOp op, 4516 Instr op_half) { 4517 VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || 4518 (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || 4519 (vn.Is4S() && vd.Is1S())); 4520 if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { 4521 if (vd.Is1H()) { 4522 VIXL_ASSERT(op_half != 0); 4523 Instr vop = op_half; 4524 if (vn.Is8H()) { 4525 vop |= NEON_Q; 4526 } 4527 Emit(vop | Rn(vn) | Rd(vd)); 4528 } else { 4529 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); 4530 } 4531 } else { 4532 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); 4533 } 4534 } 4535 4536 // clang-format off 4537 #define NEON_ACROSSLANES_LIST(V) \ 4538 V(addv, NEON_ADDV) \ 4539 V(smaxv, NEON_SMAXV) \ 4540 V(sminv, NEON_SMINV) \ 4541 V(umaxv, NEON_UMAXV) \ 4542 V(uminv, NEON_UMINV) 4543 // clang-format on 4544 4545 #define DEFINE_ASM_FUNC(FN, OP) \ 4546 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 4547 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ 4548 NEONAcrossLanes(vd, vn, OP, 0); \ 4549 } 4550 NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) 4551 #undef DEFINE_ASM_FUNC 4552 4553 4554 // clang-format off 4555 #define NEON_ACROSSLANES_FP_LIST(V) \ 4556 V(fmaxv, NEON_FMAXV, NEON_FMAXV_H) \ 4557 V(fminv, NEON_FMINV, NEON_FMINV_H) \ 4558 V(fmaxnmv, NEON_FMAXNMV, NEON_FMAXNMV_H) \ 4559 V(fminnmv, NEON_FMINNMV, NEON_FMINNMV_H) \ 4560 // clang-format on 4561 4562 #define DEFINE_ASM_FUNC(FN, OP, OP_H) \ 4563 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 4564 VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ 4565 if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ 4566 VIXL_ASSERT(vd.Is1S() || vd.Is1H()); \ 4567 NEONAcrossLanes(vd, vn, OP, OP_H); \ 4568 } 4569 NEON_ACROSSLANES_FP_LIST(DEFINE_ASM_FUNC) 4570 #undef DEFINE_ASM_FUNC 4571 4572 4573 void Assembler::NEONPerm(const VRegister& vd, 4574 const VRegister& vn, 4575 const VRegister& vm, 4576 NEONPermOp op) { 4577 VIXL_ASSERT(AreSameFormat(vd, vn, vm)); 4578 VIXL_ASSERT(!vd.Is1D()); 4579 Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); 4580 } 4581 4582 4583 void Assembler::trn1(const VRegister& vd, 4584 const VRegister& vn, 4585 const VRegister& vm) { 4586 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4587 NEONPerm(vd, vn, vm, NEON_TRN1); 4588 } 4589 4590 4591 void Assembler::trn2(const VRegister& vd, 4592 const VRegister& vn, 4593 const VRegister& vm) { 4594 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4595 NEONPerm(vd, vn, vm, NEON_TRN2); 4596 } 4597 4598 4599 void Assembler::uzp1(const VRegister& vd, 4600 const VRegister& vn, 4601 const VRegister& vm) { 4602 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4603 NEONPerm(vd, vn, vm, NEON_UZP1); 4604 } 4605 4606 4607 void Assembler::uzp2(const VRegister& vd, 4608 const VRegister& vn, 4609 const VRegister& vm) { 4610 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4611 NEONPerm(vd, vn, vm, NEON_UZP2); 4612 } 4613 4614 4615 void Assembler::zip1(const VRegister& vd, 4616 const VRegister& vn, 4617 const VRegister& vm) { 4618 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4619 NEONPerm(vd, vn, vm, NEON_ZIP1); 4620 } 4621 4622 4623 void Assembler::zip2(const VRegister& vd, 4624 const VRegister& vn, 4625 const VRegister& vm) { 4626 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4627 NEONPerm(vd, vn, vm, NEON_ZIP2); 4628 } 4629 4630 4631 void Assembler::NEONShiftImmediate(const VRegister& vd, 4632 const VRegister& vn, 4633 NEONShiftImmediateOp op, 4634 int immh_immb) { 4635 VIXL_ASSERT(AreSameFormat(vd, vn)); 4636 Instr q, scalar; 4637 if (vn.IsScalar()) { 4638 q = NEON_Q; 4639 scalar = NEONScalar; 4640 } else { 4641 q = vd.IsD() ? 0 : NEON_Q; 4642 scalar = 0; 4643 } 4644 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); 4645 } 4646 4647 4648 void Assembler::NEONShiftLeftImmediate(const VRegister& vd, 4649 const VRegister& vn, 4650 int shift, 4651 NEONShiftImmediateOp op) { 4652 int laneSizeInBits = vn.GetLaneSizeInBits(); 4653 VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); 4654 NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); 4655 } 4656 4657 4658 void Assembler::NEONShiftRightImmediate(const VRegister& vd, 4659 const VRegister& vn, 4660 int shift, 4661 NEONShiftImmediateOp op) { 4662 int laneSizeInBits = vn.GetLaneSizeInBits(); 4663 VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); 4664 NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); 4665 } 4666 4667 4668 void Assembler::NEONShiftImmediateL(const VRegister& vd, 4669 const VRegister& vn, 4670 int shift, 4671 NEONShiftImmediateOp op) { 4672 int laneSizeInBits = vn.GetLaneSizeInBits(); 4673 VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); 4674 int immh_immb = (laneSizeInBits + shift) << 16; 4675 4676 VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || 4677 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || 4678 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); 4679 Instr q; 4680 q = vn.IsD() ? 0 : NEON_Q; 4681 Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); 4682 } 4683 4684 4685 void Assembler::NEONShiftImmediateN(const VRegister& vd, 4686 const VRegister& vn, 4687 int shift, 4688 NEONShiftImmediateOp op) { 4689 Instr q, scalar; 4690 int laneSizeInBits = vd.GetLaneSizeInBits(); 4691 VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); 4692 int immh_immb = (2 * laneSizeInBits - shift) << 16; 4693 4694 if (vn.IsScalar()) { 4695 VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || 4696 (vd.Is1S() && vn.Is1D())); 4697 q = NEON_Q; 4698 scalar = NEONScalar; 4699 } else { 4700 VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || 4701 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || 4702 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); 4703 scalar = 0; 4704 q = vd.IsD() ? 0 : NEON_Q; 4705 } 4706 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); 4707 } 4708 4709 4710 void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) { 4711 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4712 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4713 NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); 4714 } 4715 4716 4717 void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) { 4718 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4719 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4720 NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); 4721 } 4722 4723 4724 void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) { 4725 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4726 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); 4727 } 4728 4729 4730 void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) { 4731 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4732 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); 4733 } 4734 4735 4736 void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) { 4737 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4738 NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); 4739 } 4740 4741 4742 void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) { 4743 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4744 VIXL_ASSERT(vn.IsD()); 4745 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); 4746 } 4747 4748 4749 void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) { 4750 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4751 VIXL_ASSERT(vn.IsQ()); 4752 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); 4753 } 4754 4755 4756 void Assembler::sxtl(const VRegister& vd, const VRegister& vn) { 4757 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4758 sshll(vd, vn, 0); 4759 } 4760 4761 4762 void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) { 4763 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4764 sshll2(vd, vn, 0); 4765 } 4766 4767 4768 void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) { 4769 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4770 VIXL_ASSERT(vn.IsD()); 4771 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); 4772 } 4773 4774 4775 void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) { 4776 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4777 VIXL_ASSERT(vn.IsQ()); 4778 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); 4779 } 4780 4781 4782 void Assembler::uxtl(const VRegister& vd, const VRegister& vn) { 4783 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4784 ushll(vd, vn, 0); 4785 } 4786 4787 4788 void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) { 4789 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4790 ushll2(vd, vn, 0); 4791 } 4792 4793 4794 void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) { 4795 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4796 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4797 NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); 4798 } 4799 4800 4801 void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) { 4802 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4803 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4804 NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); 4805 } 4806 4807 4808 void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) { 4809 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4810 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4811 NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); 4812 } 4813 4814 4815 void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) { 4816 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4817 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4818 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); 4819 } 4820 4821 4822 void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) { 4823 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4824 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4825 NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); 4826 } 4827 4828 4829 void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) { 4830 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4831 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4832 NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); 4833 } 4834 4835 4836 void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) { 4837 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4838 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4839 NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); 4840 } 4841 4842 4843 void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) { 4844 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4845 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4846 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); 4847 } 4848 4849 4850 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { 4851 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4852 VIXL_ASSERT(vd.IsVector() || vd.Is1D()); 4853 NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); 4854 } 4855 4856 4857 void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) { 4858 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4859 VIXL_ASSERT(vn.IsVector() && vd.IsD()); 4860 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); 4861 } 4862 4863 4864 void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) { 4865 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4866 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4867 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); 4868 } 4869 4870 4871 void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) { 4872 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4873 VIXL_ASSERT(vn.IsVector() && vd.IsD()); 4874 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); 4875 } 4876 4877 4878 void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) { 4879 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4880 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4881 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); 4882 } 4883 4884 4885 void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) { 4886 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4887 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4888 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); 4889 } 4890 4891 4892 void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) { 4893 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4894 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4895 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); 4896 } 4897 4898 4899 void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) { 4900 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4901 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4902 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); 4903 } 4904 4905 4906 void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { 4907 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4908 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4909 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); 4910 } 4911 4912 4913 void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) { 4914 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4915 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4916 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); 4917 } 4918 4919 4920 void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) { 4921 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4922 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4923 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); 4924 } 4925 4926 4927 void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) { 4928 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4929 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4930 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); 4931 } 4932 4933 4934 void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) { 4935 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4936 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4937 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); 4938 } 4939 4940 4941 void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) { 4942 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4943 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4944 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); 4945 } 4946 4947 4948 void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) { 4949 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4950 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4951 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); 4952 } 4953 4954 4955 void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) { 4956 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4957 VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); 4958 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); 4959 } 4960 4961 4962 void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { 4963 VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); 4964 VIXL_ASSERT(vn.IsVector() && vd.IsQ()); 4965 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); 4966 } 4967 4968 4969 // Note: 4970 // For all ToImm instructions below, a difference in case 4971 // for the same letter indicates a negated bit. 4972 // If b is 1, then B is 0. 4973 uint32_t Assembler::FP16ToImm8(Float16 imm) { 4974 VIXL_ASSERT(IsImmFP16(imm)); 4975 // Half: aBbb.cdef.gh00.0000 (16 bits) 4976 uint16_t bits = Float16ToRawbits(imm); 4977 // bit7: a000.0000 4978 uint16_t bit7 = ((bits >> 15) & 0x1) << 7; 4979 // bit6: 0b00.0000 4980 uint16_t bit6 = ((bits >> 13) & 0x1) << 6; 4981 // bit5_to_0: 00cd.efgh 4982 uint16_t bit5_to_0 = (bits >> 6) & 0x3f; 4983 uint32_t result = static_cast<uint32_t>(bit7 | bit6 | bit5_to_0); 4984 return result; 4985 } 4986 4987 4988 Instr Assembler::ImmFP16(Float16 imm) { 4989 return FP16ToImm8(imm) << ImmFP_offset; 4990 } 4991 4992 4993 uint32_t Assembler::FP32ToImm8(float imm) { 4994 VIXL_ASSERT(IsImmFP32(imm)); 4995 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 4996 uint32_t bits = FloatToRawbits(imm); 4997 // bit7: a000.0000 4998 uint32_t bit7 = ((bits >> 31) & 0x1) << 7; 4999 // bit6: 0b00.0000 5000 uint32_t bit6 = ((bits >> 29) & 0x1) << 6; 5001 // bit5_to_0: 00cd.efgh 5002 uint32_t bit5_to_0 = (bits >> 19) & 0x3f; 5003 5004 return bit7 | bit6 | bit5_to_0; 5005 } 5006 5007 5008 Instr Assembler::ImmFP32(float imm) { return FP32ToImm8(imm) << ImmFP_offset; } 5009 5010 5011 uint32_t Assembler::FP64ToImm8(double imm) { 5012 VIXL_ASSERT(IsImmFP64(imm)); 5013 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 5014 // 0000.0000.0000.0000.0000.0000.0000.0000 5015 uint64_t bits = DoubleToRawbits(imm); 5016 // bit7: a000.0000 5017 uint64_t bit7 = ((bits >> 63) & 0x1) << 7; 5018 // bit6: 0b00.0000 5019 uint64_t bit6 = ((bits >> 61) & 0x1) << 6; 5020 // bit5_to_0: 00cd.efgh 5021 uint64_t bit5_to_0 = (bits >> 48) & 0x3f; 5022 5023 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0); 5024 } 5025 5026 5027 Instr Assembler::ImmFP64(double imm) { return FP64ToImm8(imm) << ImmFP_offset; } 5028 5029 5030 // Code generation helpers. 5031 void Assembler::MoveWide(const Register& rd, 5032 uint64_t imm, 5033 int shift, 5034 MoveWideImmediateOp mov_op) { 5035 // Ignore the top 32 bits of an immediate if we're moving to a W register. 5036 if (rd.Is32Bits()) { 5037 // Check that the top 32 bits are zero (a positive 32-bit number) or top 5038 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). 5039 VIXL_ASSERT(((imm >> kWRegSize) == 0) || 5040 ((imm >> (kWRegSize - 1)) == 0x1ffffffff)); 5041 imm &= kWRegMask; 5042 } 5043 5044 if (shift >= 0) { 5045 // Explicit shift specified. 5046 VIXL_ASSERT((shift == 0) || (shift == 16) || (shift == 32) || 5047 (shift == 48)); 5048 VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); 5049 shift /= 16; 5050 } else { 5051 // Calculate a new immediate and shift combination to encode the immediate 5052 // argument. 5053 shift = 0; 5054 if ((imm & 0xffffffffffff0000) == 0) { 5055 // Nothing to do. 5056 } else if ((imm & 0xffffffff0000ffff) == 0) { 5057 imm >>= 16; 5058 shift = 1; 5059 } else if ((imm & 0xffff0000ffffffff) == 0) { 5060 VIXL_ASSERT(rd.Is64Bits()); 5061 imm >>= 32; 5062 shift = 2; 5063 } else if ((imm & 0x0000ffffffffffff) == 0) { 5064 VIXL_ASSERT(rd.Is64Bits()); 5065 imm >>= 48; 5066 shift = 3; 5067 } 5068 } 5069 5070 VIXL_ASSERT(IsUint16(imm)); 5071 5072 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) | 5073 ShiftMoveWide(shift)); 5074 } 5075 5076 5077 void Assembler::AddSub(const Register& rd, 5078 const Register& rn, 5079 const Operand& operand, 5080 FlagsUpdate S, 5081 AddSubOp op) { 5082 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 5083 if (operand.IsImmediate()) { 5084 int64_t immediate = operand.GetImmediate(); 5085 VIXL_ASSERT(IsImmAddSub(immediate)); 5086 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); 5087 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | 5088 ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn)); 5089 } else if (operand.IsShiftedRegister()) { 5090 VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); 5091 VIXL_ASSERT(operand.GetShift() != ROR); 5092 5093 // For instructions of the form: 5094 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] 5095 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] 5096 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] 5097 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] 5098 // or their 64-bit register equivalents, convert the operand from shifted to 5099 // extended register mode, and emit an add/sub extended instruction. 5100 if (rn.IsSP() || rd.IsSP()) { 5101 VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags))); 5102 DataProcExtendedRegister(rd, 5103 rn, 5104 operand.ToExtendedRegister(), 5105 S, 5106 AddSubExtendedFixed | op); 5107 } else { 5108 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); 5109 } 5110 } else { 5111 VIXL_ASSERT(operand.IsExtendedRegister()); 5112 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); 5113 } 5114 } 5115 5116 5117 void Assembler::AddSubWithCarry(const Register& rd, 5118 const Register& rn, 5119 const Operand& operand, 5120 FlagsUpdate S, 5121 AddSubWithCarryOp op) { 5122 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 5123 VIXL_ASSERT(rd.GetSizeInBits() == operand.GetRegister().GetSizeInBits()); 5124 VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); 5125 Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | Rn(rn) | Rd(rd)); 5126 } 5127 5128 5129 void Assembler::hlt(int code) { 5130 VIXL_ASSERT(IsUint16(code)); 5131 Emit(HLT | ImmException(code)); 5132 } 5133 5134 5135 void Assembler::brk(int code) { 5136 VIXL_ASSERT(IsUint16(code)); 5137 Emit(BRK | ImmException(code)); 5138 } 5139 5140 5141 void Assembler::svc(int code) { Emit(SVC | ImmException(code)); } 5142 5143 5144 // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 5145 // reports a bogus uninitialised warning then. 5146 void Assembler::Logical(const Register& rd, 5147 const Register& rn, 5148 const Operand operand, 5149 LogicalOp op) { 5150 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 5151 if (operand.IsImmediate()) { 5152 int64_t immediate = operand.GetImmediate(); 5153 unsigned reg_size = rd.GetSizeInBits(); 5154 5155 VIXL_ASSERT(immediate != 0); 5156 VIXL_ASSERT(immediate != -1); 5157 VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate)); 5158 5159 // If the operation is NOT, invert the operation and immediate. 5160 if ((op & NOT) == NOT) { 5161 op = static_cast<LogicalOp>(op & ~NOT); 5162 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); 5163 } 5164 5165 unsigned n, imm_s, imm_r; 5166 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { 5167 // Immediate can be encoded in the instruction. 5168 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); 5169 } else { 5170 // This case is handled in the macro assembler. 5171 VIXL_UNREACHABLE(); 5172 } 5173 } else { 5174 VIXL_ASSERT(operand.IsShiftedRegister()); 5175 VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); 5176 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); 5177 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); 5178 } 5179 } 5180 5181 5182 void Assembler::LogicalImmediate(const Register& rd, 5183 const Register& rn, 5184 unsigned n, 5185 unsigned imm_s, 5186 unsigned imm_r, 5187 LogicalOp op) { 5188 unsigned reg_size = rd.GetSizeInBits(); 5189 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); 5190 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | 5191 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | 5192 Rn(rn)); 5193 } 5194 5195 5196 void Assembler::ConditionalCompare(const Register& rn, 5197 const Operand& operand, 5198 StatusFlags nzcv, 5199 Condition cond, 5200 ConditionalCompareOp op) { 5201 Instr ccmpop; 5202 if (operand.IsImmediate()) { 5203 int64_t immediate = operand.GetImmediate(); 5204 VIXL_ASSERT(IsImmConditionalCompare(immediate)); 5205 ccmpop = ConditionalCompareImmediateFixed | op | 5206 ImmCondCmp(static_cast<unsigned>(immediate)); 5207 } else { 5208 VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); 5209 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.GetRegister()); 5210 } 5211 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); 5212 } 5213 5214 5215 void Assembler::DataProcessing1Source(const Register& rd, 5216 const Register& rn, 5217 DataProcessing1SourceOp op) { 5218 VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); 5219 Emit(SF(rn) | op | Rn(rn) | Rd(rd)); 5220 } 5221 5222 5223 void Assembler::FPDataProcessing1Source(const VRegister& vd, 5224 const VRegister& vn, 5225 FPDataProcessing1SourceOp op) { 5226 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 5227 Emit(FPType(vn) | op | Rn(vn) | Rd(vd)); 5228 } 5229 5230 5231 void Assembler::FPDataProcessing3Source(const VRegister& vd, 5232 const VRegister& vn, 5233 const VRegister& vm, 5234 const VRegister& va, 5235 FPDataProcessing3SourceOp op) { 5236 VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); 5237 VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va)); 5238 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va)); 5239 } 5240 5241 5242 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, 5243 const int imm8, 5244 const int left_shift, 5245 NEONModifiedImmediateOp op) { 5246 VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() || 5247 vd.Is4S()); 5248 VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || (left_shift == 16) || 5249 (left_shift == 24)); 5250 VIXL_ASSERT(IsUint8(imm8)); 5251 5252 int cmode_1, cmode_2, cmode_3; 5253 if (vd.Is8B() || vd.Is16B()) { 5254 VIXL_ASSERT(op == NEONModifiedImmediate_MOVI); 5255 cmode_1 = 1; 5256 cmode_2 = 1; 5257 cmode_3 = 1; 5258 } else { 5259 cmode_1 = (left_shift >> 3) & 1; 5260 cmode_2 = left_shift >> 4; 5261 cmode_3 = 0; 5262 if (vd.Is4H() || vd.Is8H()) { 5263 VIXL_ASSERT((left_shift == 0) || (left_shift == 8)); 5264 cmode_3 = 1; 5265 } 5266 } 5267 int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1); 5268 5269 int q = vd.IsQ() ? NEON_Q : 0; 5270 5271 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); 5272 } 5273 5274 5275 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, 5276 const int imm8, 5277 const int shift_amount, 5278 NEONModifiedImmediateOp op) { 5279 VIXL_ASSERT(vd.Is2S() || vd.Is4S()); 5280 VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16)); 5281 VIXL_ASSERT(IsUint8(imm8)); 5282 5283 int cmode_0 = (shift_amount >> 4) & 1; 5284 int cmode = 0xc | cmode_0; 5285 5286 int q = vd.IsQ() ? NEON_Q : 0; 5287 5288 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); 5289 } 5290 5291 5292 void Assembler::EmitShift(const Register& rd, 5293 const Register& rn, 5294 Shift shift, 5295 unsigned shift_amount) { 5296 switch (shift) { 5297 case LSL: 5298 lsl(rd, rn, shift_amount); 5299 break; 5300 case LSR: 5301 lsr(rd, rn, shift_amount); 5302 break; 5303 case ASR: 5304 asr(rd, rn, shift_amount); 5305 break; 5306 case ROR: 5307 ror(rd, rn, shift_amount); 5308 break; 5309 default: 5310 VIXL_UNREACHABLE(); 5311 } 5312 } 5313 5314 5315 void Assembler::EmitExtendShift(const Register& rd, 5316 const Register& rn, 5317 Extend extend, 5318 unsigned left_shift) { 5319 VIXL_ASSERT(rd.GetSizeInBits() >= rn.GetSizeInBits()); 5320 unsigned reg_size = rd.GetSizeInBits(); 5321 // Use the correct size of register. 5322 Register rn_ = Register(rn.GetCode(), rd.GetSizeInBits()); 5323 // Bits extracted are high_bit:0. 5324 unsigned high_bit = (8 << (extend & 0x3)) - 1; 5325 // Number of bits left in the result that are not introduced by the shift. 5326 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); 5327 5328 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { 5329 switch (extend) { 5330 case UXTB: 5331 case UXTH: 5332 case UXTW: 5333 ubfm(rd, rn_, non_shift_bits, high_bit); 5334 break; 5335 case SXTB: 5336 case SXTH: 5337 case SXTW: 5338 sbfm(rd, rn_, non_shift_bits, high_bit); 5339 break; 5340 case UXTX: 5341 case SXTX: { 5342 VIXL_ASSERT(rn.GetSizeInBits() == kXRegSize); 5343 // Nothing to extend. Just shift. 5344 lsl(rd, rn_, left_shift); 5345 break; 5346 } 5347 default: 5348 VIXL_UNREACHABLE(); 5349 } 5350 } else { 5351 // No need to extend as the extended bits would be shifted away. 5352 lsl(rd, rn_, left_shift); 5353 } 5354 } 5355 5356 5357 void Assembler::DataProcShiftedRegister(const Register& rd, 5358 const Register& rn, 5359 const Operand& operand, 5360 FlagsUpdate S, 5361 Instr op) { 5362 VIXL_ASSERT(operand.IsShiftedRegister()); 5363 VIXL_ASSERT(rn.Is64Bits() || 5364 (rn.Is32Bits() && IsUint5(operand.GetShiftAmount()))); 5365 Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.GetShift()) | 5366 ImmDPShift(operand.GetShiftAmount()) | Rm(operand.GetRegister()) | 5367 Rn(rn) | Rd(rd)); 5368 } 5369 5370 5371 void Assembler::DataProcExtendedRegister(const Register& rd, 5372 const Register& rn, 5373 const Operand& operand, 5374 FlagsUpdate S, 5375 Instr op) { 5376 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); 5377 Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | 5378 ExtendMode(operand.GetExtend()) | 5379 ImmExtendShift(operand.GetShiftAmount()) | dest_reg | RnSP(rn)); 5380 } 5381 5382 5383 Instr Assembler::LoadStoreMemOperand(const MemOperand& addr, 5384 unsigned access_size, 5385 LoadStoreScalingOption option) { 5386 Instr base = RnSP(addr.GetBaseRegister()); 5387 int64_t offset = addr.GetOffset(); 5388 5389 if (addr.IsImmediateOffset()) { 5390 bool prefer_unscaled = 5391 (option == PreferUnscaledOffset) || (option == RequireUnscaledOffset); 5392 if (prefer_unscaled && IsImmLSUnscaled(offset)) { 5393 // Use the unscaled addressing mode. 5394 return base | LoadStoreUnscaledOffsetFixed | 5395 ImmLS(static_cast<int>(offset)); 5396 } 5397 5398 if ((option != RequireUnscaledOffset) && 5399 IsImmLSScaled(offset, access_size)) { 5400 // Use the scaled addressing mode. 5401 return base | LoadStoreUnsignedOffsetFixed | 5402 ImmLSUnsigned(static_cast<int>(offset) >> access_size); 5403 } 5404 5405 if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) { 5406 // Use the unscaled addressing mode. 5407 return base | LoadStoreUnscaledOffsetFixed | 5408 ImmLS(static_cast<int>(offset)); 5409 } 5410 } 5411 5412 // All remaining addressing modes are register-offset, pre-indexed or 5413 // post-indexed modes. 5414 VIXL_ASSERT((option != RequireUnscaledOffset) && 5415 (option != RequireScaledOffset)); 5416 5417 if (addr.IsRegisterOffset()) { 5418 Extend ext = addr.GetExtend(); 5419 Shift shift = addr.GetShift(); 5420 unsigned shift_amount = addr.GetShiftAmount(); 5421 5422 // LSL is encoded in the option field as UXTX. 5423 if (shift == LSL) { 5424 ext = UXTX; 5425 } 5426 5427 // Shifts are encoded in one bit, indicating a left shift by the memory 5428 // access size. 5429 VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size)); 5430 return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) | 5431 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0); 5432 } 5433 5434 if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) { 5435 return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset)); 5436 } 5437 5438 if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) { 5439 return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset)); 5440 } 5441 5442 // If this point is reached, the MemOperand (addr) cannot be encoded. 5443 VIXL_UNREACHABLE(); 5444 return 0; 5445 } 5446 5447 5448 void Assembler::LoadStore(const CPURegister& rt, 5449 const MemOperand& addr, 5450 LoadStoreOp op, 5451 LoadStoreScalingOption option) { 5452 VIXL_ASSERT(CPUHas(rt)); 5453 Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option)); 5454 } 5455 5456 5457 void Assembler::Prefetch(PrefetchOperation op, 5458 const MemOperand& addr, 5459 LoadStoreScalingOption option) { 5460 VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset()); 5461 5462 Instr prfop = ImmPrefetchOperation(op); 5463 Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option)); 5464 } 5465 5466 5467 bool Assembler::IsImmAddSub(int64_t immediate) { 5468 return IsUint12(immediate) || 5469 (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0)); 5470 } 5471 5472 5473 bool Assembler::IsImmConditionalCompare(int64_t immediate) { 5474 return IsUint5(immediate); 5475 } 5476 5477 5478 bool Assembler::IsImmFP16(Float16 imm) { 5479 // Valid values will have the form: 5480 // aBbb.cdef.gh00.000 5481 uint16_t bits = Float16ToRawbits(imm); 5482 // bits[6..0] are cleared. 5483 if ((bits & 0x3f) != 0) { 5484 return false; 5485 } 5486 5487 // bits[13..12] are all set or all cleared. 5488 uint16_t b_pattern = (bits >> 12) & 0x03; 5489 if (b_pattern != 0 && b_pattern != 0x03) { 5490 return false; 5491 } 5492 5493 // bit[15] and bit[14] are opposite. 5494 if (((bits ^ (bits << 1)) & 0x4000) == 0) { 5495 return false; 5496 } 5497 5498 return true; 5499 } 5500 5501 5502 bool Assembler::IsImmFP32(float imm) { 5503 // Valid values will have the form: 5504 // aBbb.bbbc.defg.h000.0000.0000.0000.0000 5505 uint32_t bits = FloatToRawbits(imm); 5506 // bits[19..0] are cleared. 5507 if ((bits & 0x7ffff) != 0) { 5508 return false; 5509 } 5510 5511 // bits[29..25] are all set or all cleared. 5512 uint32_t b_pattern = (bits >> 16) & 0x3e00; 5513 if (b_pattern != 0 && b_pattern != 0x3e00) { 5514 return false; 5515 } 5516 5517 // bit[30] and bit[29] are opposite. 5518 if (((bits ^ (bits << 1)) & 0x40000000) == 0) { 5519 return false; 5520 } 5521 5522 return true; 5523 } 5524 5525 5526 bool Assembler::IsImmFP64(double imm) { 5527 // Valid values will have the form: 5528 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 5529 // 0000.0000.0000.0000.0000.0000.0000.0000 5530 uint64_t bits = DoubleToRawbits(imm); 5531 // bits[47..0] are cleared. 5532 if ((bits & 0x0000ffffffffffff) != 0) { 5533 return false; 5534 } 5535 5536 // bits[61..54] are all set or all cleared. 5537 uint32_t b_pattern = (bits >> 48) & 0x3fc0; 5538 if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { 5539 return false; 5540 } 5541 5542 // bit[62] and bit[61] are opposite. 5543 if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { 5544 return false; 5545 } 5546 5547 return true; 5548 } 5549 5550 5551 bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) { 5552 VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); 5553 return IsMultiple(offset, 1 << access_size) && 5554 IsInt7(offset / (1 << access_size)); 5555 } 5556 5557 5558 bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) { 5559 VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); 5560 return IsMultiple(offset, 1 << access_size) && 5561 IsUint12(offset / (1 << access_size)); 5562 } 5563 5564 5565 bool Assembler::IsImmLSUnscaled(int64_t offset) { return IsInt9(offset); } 5566 5567 5568 // The movn instruction can generate immediates containing an arbitrary 16-bit 5569 // value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. 5570 bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) { 5571 return IsImmMovz(~imm, reg_size); 5572 } 5573 5574 5575 // The movz instruction can generate immediates containing an arbitrary 16-bit 5576 // value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. 5577 bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) { 5578 VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); 5579 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); 5580 } 5581 5582 5583 // Test if a given value can be encoded in the immediate field of a logical 5584 // instruction. 5585 // If it can be encoded, the function returns true, and values pointed to by n, 5586 // imm_s and imm_r are updated with immediates encoded in the format required 5587 // by the corresponding fields in the logical instruction. 5588 // If it can not be encoded, the function returns false, and the values pointed 5589 // to by n, imm_s and imm_r are undefined. 5590 bool Assembler::IsImmLogical(uint64_t value, 5591 unsigned width, 5592 unsigned* n, 5593 unsigned* imm_s, 5594 unsigned* imm_r) { 5595 VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize)); 5596 5597 bool negate = false; 5598 5599 // Logical immediates are encoded using parameters n, imm_s and imm_r using 5600 // the following table: 5601 // 5602 // N imms immr size S R 5603 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) 5604 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) 5605 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) 5606 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) 5607 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) 5608 // 0 11110s xxxxxr 2 UInt(s) UInt(r) 5609 // (s bits must not be all set) 5610 // 5611 // A pattern is constructed of size bits, where the least significant S+1 bits 5612 // are set. The pattern is rotated right by R, and repeated across a 32 or 5613 // 64-bit value, depending on destination register width. 5614 // 5615 // Put another way: the basic format of a logical immediate is a single 5616 // contiguous stretch of 1 bits, repeated across the whole word at intervals 5617 // given by a power of 2. To identify them quickly, we first locate the 5618 // lowest stretch of 1 bits, then the next 1 bit above that; that combination 5619 // is different for every logical immediate, so it gives us all the 5620 // information we need to identify the only logical immediate that our input 5621 // could be, and then we simply check if that's the value we actually have. 5622 // 5623 // (The rotation parameter does give the possibility of the stretch of 1 bits 5624 // going 'round the end' of the word. To deal with that, we observe that in 5625 // any situation where that happens the bitwise NOT of the value is also a 5626 // valid logical immediate. So we simply invert the input whenever its low bit 5627 // is set, and then we know that the rotated case can't arise.) 5628 5629 if (value & 1) { 5630 // If the low bit is 1, negate the value, and set a flag to remember that we 5631 // did (so that we can adjust the return values appropriately). 5632 negate = true; 5633 value = ~value; 5634 } 5635 5636 if (width == kWRegSize) { 5637 // To handle 32-bit logical immediates, the very easiest thing is to repeat 5638 // the input value twice to make a 64-bit word. The correct encoding of that 5639 // as a logical immediate will also be the correct encoding of the 32-bit 5640 // value. 5641 5642 // Avoid making the assumption that the most-significant 32 bits are zero by 5643 // shifting the value left and duplicating it. 5644 value <<= kWRegSize; 5645 value |= value >> kWRegSize; 5646 } 5647 5648 // The basic analysis idea: imagine our input word looks like this. 5649 // 5650 // 0011111000111110001111100011111000111110001111100011111000111110 5651 // c b a 5652 // |<--d-->| 5653 // 5654 // We find the lowest set bit (as an actual power-of-2 value, not its index) 5655 // and call it a. Then we add a to our original number, which wipes out the 5656 // bottommost stretch of set bits and replaces it with a 1 carried into the 5657 // next zero bit. Then we look for the new lowest set bit, which is in 5658 // position b, and subtract it, so now our number is just like the original 5659 // but with the lowest stretch of set bits completely gone. Now we find the 5660 // lowest set bit again, which is position c in the diagram above. Then we'll 5661 // measure the distance d between bit positions a and c (using CLZ), and that 5662 // tells us that the only valid logical immediate that could possibly be equal 5663 // to this number is the one in which a stretch of bits running from a to just 5664 // below b is replicated every d bits. 5665 uint64_t a = LowestSetBit(value); 5666 uint64_t value_plus_a = value + a; 5667 uint64_t b = LowestSetBit(value_plus_a); 5668 uint64_t value_plus_a_minus_b = value_plus_a - b; 5669 uint64_t c = LowestSetBit(value_plus_a_minus_b); 5670 5671 int d, clz_a, out_n; 5672 uint64_t mask; 5673 5674 if (c != 0) { 5675 // The general case, in which there is more than one stretch of set bits. 5676 // Compute the repeat distance d, and set up a bitmask covering the basic 5677 // unit of repetition (i.e. a word with the bottom d bits set). Also, in all 5678 // of these cases the N bit of the output will be zero. 5679 clz_a = CountLeadingZeros(a, kXRegSize); 5680 int clz_c = CountLeadingZeros(c, kXRegSize); 5681 d = clz_a - clz_c; 5682 mask = ((UINT64_C(1) << d) - 1); 5683 out_n = 0; 5684 } else { 5685 // Handle degenerate cases. 5686 // 5687 // If any of those 'find lowest set bit' operations didn't find a set bit at 5688 // all, then the word will have been zero thereafter, so in particular the 5689 // last lowest_set_bit operation will have returned zero. So we can test for 5690 // all the special case conditions in one go by seeing if c is zero. 5691 if (a == 0) { 5692 // The input was zero (or all 1 bits, which will come to here too after we 5693 // inverted it at the start of the function), for which we just return 5694 // false. 5695 return false; 5696 } else { 5697 // Otherwise, if c was zero but a was not, then there's just one stretch 5698 // of set bits in our word, meaning that we have the trivial case of 5699 // d == 64 and only one 'repetition'. Set up all the same variables as in 5700 // the general case above, and set the N bit in the output. 5701 clz_a = CountLeadingZeros(a, kXRegSize); 5702 d = 64; 5703 mask = ~UINT64_C(0); 5704 out_n = 1; 5705 } 5706 } 5707 5708 // If the repeat period d is not a power of two, it can't be encoded. 5709 if (!IsPowerOf2(d)) { 5710 return false; 5711 } 5712 5713 if (((b - a) & ~mask) != 0) { 5714 // If the bit stretch (b - a) does not fit within the mask derived from the 5715 // repeat period, then fail. 5716 return false; 5717 } 5718 5719 // The only possible option is b - a repeated every d bits. Now we're going to 5720 // actually construct the valid logical immediate derived from that 5721 // specification, and see if it equals our original input. 5722 // 5723 // To repeat a value every d bits, we multiply it by a number of the form 5724 // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can 5725 // be derived using a table lookup on CLZ(d). 5726 static const uint64_t multipliers[] = { 5727 0x0000000000000001UL, 5728 0x0000000100000001UL, 5729 0x0001000100010001UL, 5730 0x0101010101010101UL, 5731 0x1111111111111111UL, 5732 0x5555555555555555UL, 5733 }; 5734 uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57]; 5735 uint64_t candidate = (b - a) * multiplier; 5736 5737 if (value != candidate) { 5738 // The candidate pattern doesn't match our input value, so fail. 5739 return false; 5740 } 5741 5742 // We have a match! This is a valid logical immediate, so now we have to 5743 // construct the bits and pieces of the instruction encoding that generates 5744 // it. 5745 5746 // Count the set bits in our basic stretch. The special case of clz(0) == -1 5747 // makes the answer come out right for stretches that reach the very top of 5748 // the word (e.g. numbers like 0xffffc00000000000). 5749 int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize); 5750 int s = clz_a - clz_b; 5751 5752 // Decide how many bits to rotate right by, to put the low bit of that basic 5753 // stretch in position a. 5754 int r; 5755 if (negate) { 5756 // If we inverted the input right at the start of this function, here's 5757 // where we compensate: the number of set bits becomes the number of clear 5758 // bits, and the rotation count is based on position b rather than position 5759 // a (since b is the location of the 'lowest' 1 bit after inversion). 5760 s = d - s; 5761 r = (clz_b + 1) & (d - 1); 5762 } else { 5763 r = (clz_a + 1) & (d - 1); 5764 } 5765 5766 // Now we're done, except for having to encode the S output in such a way that 5767 // it gives both the number of set bits and the length of the repeated 5768 // segment. The s field is encoded like this: 5769 // 5770 // imms size S 5771 // ssssss 64 UInt(ssssss) 5772 // 0sssss 32 UInt(sssss) 5773 // 10ssss 16 UInt(ssss) 5774 // 110sss 8 UInt(sss) 5775 // 1110ss 4 UInt(ss) 5776 // 11110s 2 UInt(s) 5777 // 5778 // So we 'or' (2 * -d) with our computed s to form imms. 5779 if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) { 5780 *n = out_n; 5781 *imm_s = ((2 * -d) | (s - 1)) & 0x3f; 5782 *imm_r = r; 5783 } 5784 5785 return true; 5786 } 5787 5788 5789 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { 5790 VIXL_ASSERT(rt.IsValid()); 5791 if (rt.IsRegister()) { 5792 return rt.Is64Bits() ? LDR_x : LDR_w; 5793 } else { 5794 VIXL_ASSERT(rt.IsVRegister()); 5795 switch (rt.GetSizeInBits()) { 5796 case kBRegSize: 5797 return LDR_b; 5798 case kHRegSize: 5799 return LDR_h; 5800 case kSRegSize: 5801 return LDR_s; 5802 case kDRegSize: 5803 return LDR_d; 5804 default: 5805 VIXL_ASSERT(rt.IsQ()); 5806 return LDR_q; 5807 } 5808 } 5809 } 5810 5811 5812 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { 5813 VIXL_ASSERT(rt.IsValid()); 5814 if (rt.IsRegister()) { 5815 return rt.Is64Bits() ? STR_x : STR_w; 5816 } else { 5817 VIXL_ASSERT(rt.IsVRegister()); 5818 switch (rt.GetSizeInBits()) { 5819 case kBRegSize: 5820 return STR_b; 5821 case kHRegSize: 5822 return STR_h; 5823 case kSRegSize: 5824 return STR_s; 5825 case kDRegSize: 5826 return STR_d; 5827 default: 5828 VIXL_ASSERT(rt.IsQ()); 5829 return STR_q; 5830 } 5831 } 5832 } 5833 5834 5835 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, 5836 const CPURegister& rt2) { 5837 VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); 5838 USE(rt2); 5839 if (rt.IsRegister()) { 5840 return rt.Is64Bits() ? STP_x : STP_w; 5841 } else { 5842 VIXL_ASSERT(rt.IsVRegister()); 5843 switch (rt.GetSizeInBytes()) { 5844 case kSRegSizeInBytes: 5845 return STP_s; 5846 case kDRegSizeInBytes: 5847 return STP_d; 5848 default: 5849 VIXL_ASSERT(rt.IsQ()); 5850 return STP_q; 5851 } 5852 } 5853 } 5854 5855 5856 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, 5857 const CPURegister& rt2) { 5858 VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w); 5859 return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) | 5860 LoadStorePairLBit); 5861 } 5862 5863 5864 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( 5865 const CPURegister& rt, const CPURegister& rt2) { 5866 VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); 5867 USE(rt2); 5868 if (rt.IsRegister()) { 5869 return rt.Is64Bits() ? STNP_x : STNP_w; 5870 } else { 5871 VIXL_ASSERT(rt.IsVRegister()); 5872 switch (rt.GetSizeInBytes()) { 5873 case kSRegSizeInBytes: 5874 return STNP_s; 5875 case kDRegSizeInBytes: 5876 return STNP_d; 5877 default: 5878 VIXL_ASSERT(rt.IsQ()); 5879 return STNP_q; 5880 } 5881 } 5882 } 5883 5884 5885 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( 5886 const CPURegister& rt, const CPURegister& rt2) { 5887 VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w); 5888 return static_cast<LoadStorePairNonTemporalOp>( 5889 StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit); 5890 } 5891 5892 5893 LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { 5894 if (rt.IsRegister()) { 5895 return rt.IsX() ? LDR_x_lit : LDR_w_lit; 5896 } else { 5897 VIXL_ASSERT(rt.IsVRegister()); 5898 switch (rt.GetSizeInBytes()) { 5899 case kSRegSizeInBytes: 5900 return LDR_s_lit; 5901 case kDRegSizeInBytes: 5902 return LDR_d_lit; 5903 default: 5904 VIXL_ASSERT(rt.IsQ()); 5905 return LDR_q_lit; 5906 } 5907 } 5908 } 5909 5910 5911 bool Assembler::CPUHas(const CPURegister& rt) const { 5912 // Core registers are available without any particular CPU features. 5913 if (rt.IsRegister()) return true; 5914 VIXL_ASSERT(rt.IsVRegister()); 5915 // The architecture does not allow FP and NEON to be implemented separately, 5916 // but we can crudely categorise them based on register size, since FP only 5917 // uses D, S and (occasionally) H registers. 5918 if (rt.IsH() || rt.IsS() || rt.IsD()) { 5919 return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON); 5920 } 5921 VIXL_ASSERT(rt.IsB() || rt.IsQ()); 5922 return CPUHas(CPUFeatures::kNEON); 5923 } 5924 5925 5926 bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const { 5927 // This is currently only used for loads and stores, where rt and rt2 must 5928 // have the same size and type. We could extend this to cover other cases if 5929 // necessary, but for now we can avoid checking both registers. 5930 VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); 5931 USE(rt2); 5932 return CPUHas(rt); 5933 } 5934 5935 5936 bool AreAliased(const CPURegister& reg1, 5937 const CPURegister& reg2, 5938 const CPURegister& reg3, 5939 const CPURegister& reg4, 5940 const CPURegister& reg5, 5941 const CPURegister& reg6, 5942 const CPURegister& reg7, 5943 const CPURegister& reg8) { 5944 int number_of_valid_regs = 0; 5945 int number_of_valid_fpregs = 0; 5946 5947 RegList unique_regs = 0; 5948 RegList unique_fpregs = 0; 5949 5950 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; 5951 5952 for (size_t i = 0; i < ArrayLength(regs); i++) { 5953 if (regs[i].IsRegister()) { 5954 number_of_valid_regs++; 5955 unique_regs |= regs[i].GetBit(); 5956 } else if (regs[i].IsVRegister()) { 5957 number_of_valid_fpregs++; 5958 unique_fpregs |= regs[i].GetBit(); 5959 } else { 5960 VIXL_ASSERT(!regs[i].IsValid()); 5961 } 5962 } 5963 5964 int number_of_unique_regs = CountSetBits(unique_regs); 5965 int number_of_unique_fpregs = CountSetBits(unique_fpregs); 5966 5967 VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs); 5968 VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); 5969 5970 return (number_of_valid_regs != number_of_unique_regs) || 5971 (number_of_valid_fpregs != number_of_unique_fpregs); 5972 } 5973 5974 5975 bool AreSameSizeAndType(const CPURegister& reg1, 5976 const CPURegister& reg2, 5977 const CPURegister& reg3, 5978 const CPURegister& reg4, 5979 const CPURegister& reg5, 5980 const CPURegister& reg6, 5981 const CPURegister& reg7, 5982 const CPURegister& reg8) { 5983 VIXL_ASSERT(reg1.IsValid()); 5984 bool match = true; 5985 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); 5986 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); 5987 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); 5988 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); 5989 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); 5990 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); 5991 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); 5992 return match; 5993 } 5994 5995 bool AreEven(const CPURegister& reg1, 5996 const CPURegister& reg2, 5997 const CPURegister& reg3, 5998 const CPURegister& reg4, 5999 const CPURegister& reg5, 6000 const CPURegister& reg6, 6001 const CPURegister& reg7, 6002 const CPURegister& reg8) { 6003 VIXL_ASSERT(reg1.IsValid()); 6004 bool even = (reg1.GetCode() % 2) == 0; 6005 even &= !reg2.IsValid() || ((reg2.GetCode() % 2) == 0); 6006 even &= !reg3.IsValid() || ((reg3.GetCode() % 2) == 0); 6007 even &= !reg4.IsValid() || ((reg4.GetCode() % 2) == 0); 6008 even &= !reg5.IsValid() || ((reg5.GetCode() % 2) == 0); 6009 even &= !reg6.IsValid() || ((reg6.GetCode() % 2) == 0); 6010 even &= !reg7.IsValid() || ((reg7.GetCode() % 2) == 0); 6011 even &= !reg8.IsValid() || ((reg8.GetCode() % 2) == 0); 6012 return even; 6013 } 6014 6015 6016 bool AreConsecutive(const CPURegister& reg1, 6017 const CPURegister& reg2, 6018 const CPURegister& reg3, 6019 const CPURegister& reg4) { 6020 VIXL_ASSERT(reg1.IsValid()); 6021 6022 if (!reg2.IsValid()) { 6023 return true; 6024 } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfRegisters)) { 6025 return false; 6026 } 6027 6028 if (!reg3.IsValid()) { 6029 return true; 6030 } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfRegisters)) { 6031 return false; 6032 } 6033 6034 if (!reg4.IsValid()) { 6035 return true; 6036 } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfRegisters)) { 6037 return false; 6038 } 6039 6040 return true; 6041 } 6042 6043 6044 bool AreSameFormat(const VRegister& reg1, 6045 const VRegister& reg2, 6046 const VRegister& reg3, 6047 const VRegister& reg4) { 6048 VIXL_ASSERT(reg1.IsValid()); 6049 bool match = true; 6050 match &= !reg2.IsValid() || reg2.IsSameFormat(reg1); 6051 match &= !reg3.IsValid() || reg3.IsSameFormat(reg1); 6052 match &= !reg4.IsValid() || reg4.IsSameFormat(reg1); 6053 return match; 6054 } 6055 6056 6057 bool AreConsecutive(const VRegister& reg1, 6058 const VRegister& reg2, 6059 const VRegister& reg3, 6060 const VRegister& reg4) { 6061 VIXL_ASSERT(reg1.IsValid()); 6062 6063 if (!reg2.IsValid()) { 6064 return true; 6065 } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfVRegisters)) { 6066 return false; 6067 } 6068 6069 if (!reg3.IsValid()) { 6070 return true; 6071 } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfVRegisters)) { 6072 return false; 6073 } 6074 6075 if (!reg4.IsValid()) { 6076 return true; 6077 } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfVRegisters)) { 6078 return false; 6079 } 6080 6081 return true; 6082 } 6083 } // namespace aarch64 6084 } // namespace vixl 6085