1 // Copyright 2015, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 28 29 #include <cstring> 30 #include <cmath> 31 #include <limits> 32 33 #include "simulator-aarch64.h" 34 35 namespace vixl { 36 namespace aarch64 { 37 38 const Instruction* Simulator::kEndOfSimAddress = NULL; 39 40 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { 41 int width = msb - lsb + 1; 42 VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits)); 43 44 bits <<= lsb; 45 uint32_t mask = ((1 << width) - 1) << lsb; 46 VIXL_ASSERT((mask & write_ignore_mask_) == 0); 47 48 value_ = (value_ & ~mask) | (bits & mask); 49 } 50 51 52 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { 53 switch (id) { 54 case NZCV: 55 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); 56 case FPCR: 57 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); 58 default: 59 VIXL_UNREACHABLE(); 60 return SimSystemRegister(); 61 } 62 } 63 64 65 Simulator::Simulator(Decoder* decoder, FILE* stream) { 66 // Ensure that shift operations act as the simulator expects. 67 VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1); 68 VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7fffffff); 69 70 instruction_stats_ = false; 71 72 // Set up the decoder. 73 decoder_ = decoder; 74 decoder_->AppendVisitor(this); 75 76 stream_ = stream; 77 print_disasm_ = new PrintDisassembler(stream_); 78 SetColouredTrace(false); 79 trace_parameters_ = LOG_NONE; 80 81 ResetState(); 82 83 // Allocate and set up the simulator stack. 84 stack_ = new byte[stack_size_]; 85 stack_limit_ = stack_ + stack_protection_size_; 86 // Configure the starting stack pointer. 87 // - Find the top of the stack. 88 byte* tos = stack_ + stack_size_; 89 // - There's a protection region at both ends of the stack. 90 tos -= stack_protection_size_; 91 // - The stack pointer must be 16-byte aligned. 92 tos = AlignDown(tos, 16); 93 WriteSp(tos); 94 95 instrumentation_ = NULL; 96 97 // Print a warning about exclusive-access instructions, but only the first 98 // time they are encountered. This warning can be silenced using 99 // SilenceExclusiveAccessWarning(). 100 print_exclusive_access_warning_ = true; 101 } 102 103 104 void Simulator::ResetState() { 105 // Reset the system registers. 106 nzcv_ = SimSystemRegister::DefaultValueFor(NZCV); 107 fpcr_ = SimSystemRegister::DefaultValueFor(FPCR); 108 109 // Reset registers to 0. 110 pc_ = NULL; 111 pc_modified_ = false; 112 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 113 WriteXRegister(i, 0xbadbeef); 114 } 115 // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP. 116 uint64_t nan_bits[] = { 117 UINT64_C(0x7ff00cab7f8ba9e1), UINT64_C(0x7ff0dead7f8beef1), 118 }; 119 VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits[0] & kDRegMask))); 120 VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits[0] & kSRegMask))); 121 122 qreg_t q_bits; 123 VIXL_ASSERT(sizeof(q_bits) == sizeof(nan_bits)); 124 memcpy(&q_bits, nan_bits, sizeof(nan_bits)); 125 126 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 127 WriteQRegister(i, q_bits); 128 } 129 // Returning to address 0 exits the Simulator. 130 WriteLr(kEndOfSimAddress); 131 } 132 133 134 Simulator::~Simulator() { 135 delete[] stack_; 136 // The decoder may outlive the simulator. 137 decoder_->RemoveVisitor(print_disasm_); 138 delete print_disasm_; 139 140 decoder_->RemoveVisitor(instrumentation_); 141 delete instrumentation_; 142 } 143 144 145 void Simulator::Run() { 146 // Flush any written registers before executing anything, so that 147 // manually-set registers are logged _before_ the first instruction. 148 LogAllWrittenRegisters(); 149 150 while (pc_ != kEndOfSimAddress) { 151 ExecuteInstruction(); 152 } 153 } 154 155 156 void Simulator::RunFrom(const Instruction* first) { 157 WritePc(first, NoBranchLog); 158 Run(); 159 } 160 161 162 const char* Simulator::xreg_names[] = {"x0", "x1", "x2", "x3", "x4", "x5", 163 "x6", "x7", "x8", "x9", "x10", "x11", 164 "x12", "x13", "x14", "x15", "x16", "x17", 165 "x18", "x19", "x20", "x21", "x22", "x23", 166 "x24", "x25", "x26", "x27", "x28", "x29", 167 "lr", "xzr", "sp"}; 168 169 const char* Simulator::wreg_names[] = {"w0", "w1", "w2", "w3", "w4", "w5", 170 "w6", "w7", "w8", "w9", "w10", "w11", 171 "w12", "w13", "w14", "w15", "w16", "w17", 172 "w18", "w19", "w20", "w21", "w22", "w23", 173 "w24", "w25", "w26", "w27", "w28", "w29", 174 "w30", "wzr", "wsp"}; 175 176 const char* Simulator::sreg_names[] = {"s0", "s1", "s2", "s3", "s4", "s5", 177 "s6", "s7", "s8", "s9", "s10", "s11", 178 "s12", "s13", "s14", "s15", "s16", "s17", 179 "s18", "s19", "s20", "s21", "s22", "s23", 180 "s24", "s25", "s26", "s27", "s28", "s29", 181 "s30", "s31"}; 182 183 const char* Simulator::dreg_names[] = {"d0", "d1", "d2", "d3", "d4", "d5", 184 "d6", "d7", "d8", "d9", "d10", "d11", 185 "d12", "d13", "d14", "d15", "d16", "d17", 186 "d18", "d19", "d20", "d21", "d22", "d23", 187 "d24", "d25", "d26", "d27", "d28", "d29", 188 "d30", "d31"}; 189 190 const char* Simulator::vreg_names[] = {"v0", "v1", "v2", "v3", "v4", "v5", 191 "v6", "v7", "v8", "v9", "v10", "v11", 192 "v12", "v13", "v14", "v15", "v16", "v17", 193 "v18", "v19", "v20", "v21", "v22", "v23", 194 "v24", "v25", "v26", "v27", "v28", "v29", 195 "v30", "v31"}; 196 197 198 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { 199 VIXL_ASSERT(code < kNumberOfRegisters); 200 // If the code represents the stack pointer, index the name after zr. 201 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { 202 code = kZeroRegCode + 1; 203 } 204 return wreg_names[code]; 205 } 206 207 208 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { 209 VIXL_ASSERT(code < kNumberOfRegisters); 210 // If the code represents the stack pointer, index the name after zr. 211 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { 212 code = kZeroRegCode + 1; 213 } 214 return xreg_names[code]; 215 } 216 217 218 const char* Simulator::SRegNameForCode(unsigned code) { 219 VIXL_ASSERT(code < kNumberOfFPRegisters); 220 return sreg_names[code]; 221 } 222 223 224 const char* Simulator::DRegNameForCode(unsigned code) { 225 VIXL_ASSERT(code < kNumberOfFPRegisters); 226 return dreg_names[code]; 227 } 228 229 230 const char* Simulator::VRegNameForCode(unsigned code) { 231 VIXL_ASSERT(code < kNumberOfVRegisters); 232 return vreg_names[code]; 233 } 234 235 236 #define COLOUR(colour_code) "\033[0;" colour_code "m" 237 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m" 238 #define COLOUR_HIGHLIGHT "\033[43m" 239 #define NORMAL "" 240 #define GREY "30" 241 #define RED "31" 242 #define GREEN "32" 243 #define YELLOW "33" 244 #define BLUE "34" 245 #define MAGENTA "35" 246 #define CYAN "36" 247 #define WHITE "37" 248 void Simulator::SetColouredTrace(bool value) { 249 coloured_trace_ = value; 250 251 clr_normal = value ? COLOUR(NORMAL) : ""; 252 clr_flag_name = value ? COLOUR_BOLD(WHITE) : ""; 253 clr_flag_value = value ? COLOUR(NORMAL) : ""; 254 clr_reg_name = value ? COLOUR_BOLD(CYAN) : ""; 255 clr_reg_value = value ? COLOUR(CYAN) : ""; 256 clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : ""; 257 clr_vreg_value = value ? COLOUR(MAGENTA) : ""; 258 clr_memory_address = value ? COLOUR_BOLD(BLUE) : ""; 259 clr_warning = value ? COLOUR_BOLD(YELLOW) : ""; 260 clr_warning_message = value ? COLOUR(YELLOW) : ""; 261 clr_printf = value ? COLOUR(GREEN) : ""; 262 clr_branch_marker = value ? COLOUR(GREY) COLOUR_HIGHLIGHT : ""; 263 } 264 265 266 void Simulator::SetTraceParameters(int parameters) { 267 bool disasm_before = trace_parameters_ & LOG_DISASM; 268 trace_parameters_ = parameters; 269 bool disasm_after = trace_parameters_ & LOG_DISASM; 270 271 if (disasm_before != disasm_after) { 272 if (disasm_after) { 273 decoder_->InsertVisitorBefore(print_disasm_, this); 274 } else { 275 decoder_->RemoveVisitor(print_disasm_); 276 } 277 } 278 } 279 280 281 void Simulator::SetInstructionStats(bool value) { 282 if (value != instruction_stats_) { 283 if (value) { 284 if (instrumentation_ == NULL) { 285 // Set the sample period to 10, as the VIXL examples and tests are 286 // short. 287 instrumentation_ = new Instrument("vixl_stats.csv", 10); 288 } 289 decoder_->AppendVisitor(instrumentation_); 290 } else if (instrumentation_ != NULL) { 291 decoder_->RemoveVisitor(instrumentation_); 292 } 293 instruction_stats_ = value; 294 } 295 } 296 297 // Helpers --------------------------------------------------------------------- 298 uint64_t Simulator::AddWithCarry(unsigned reg_size, 299 bool set_flags, 300 uint64_t left, 301 uint64_t right, 302 int carry_in) { 303 VIXL_ASSERT((carry_in == 0) || (carry_in == 1)); 304 VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); 305 306 uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt; 307 uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask; 308 uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask; 309 310 left &= reg_mask; 311 right &= reg_mask; 312 uint64_t result = (left + right + carry_in) & reg_mask; 313 314 if (set_flags) { 315 ReadNzcv().SetN(CalcNFlag(result, reg_size)); 316 ReadNzcv().SetZ(CalcZFlag(result)); 317 318 // Compute the C flag by comparing the result to the max unsigned integer. 319 uint64_t max_uint_2op = max_uint - carry_in; 320 bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right); 321 ReadNzcv().SetC(C ? 1 : 0); 322 323 // Overflow iff the sign bit is the same for the two inputs and different 324 // for the result. 325 uint64_t left_sign = left & sign_mask; 326 uint64_t right_sign = right & sign_mask; 327 uint64_t result_sign = result & sign_mask; 328 bool V = (left_sign == right_sign) && (left_sign != result_sign); 329 ReadNzcv().SetV(V ? 1 : 0); 330 331 LogSystemRegister(NZCV); 332 } 333 return result; 334 } 335 336 337 int64_t Simulator::ShiftOperand(unsigned reg_size, 338 int64_t value, 339 Shift shift_type, 340 unsigned amount) const { 341 VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); 342 if (amount == 0) { 343 return value; 344 } 345 uint64_t uvalue = static_cast<uint64_t>(value); 346 uint64_t mask = kWRegMask; 347 bool is_negative = (uvalue & kWSignMask) != 0; 348 if (reg_size == kXRegSize) { 349 mask = kXRegMask; 350 is_negative = (uvalue & kXSignMask) != 0; 351 } 352 353 switch (shift_type) { 354 case LSL: 355 uvalue <<= amount; 356 break; 357 case LSR: 358 uvalue >>= amount; 359 break; 360 case ASR: 361 uvalue >>= amount; 362 if (is_negative) { 363 // Simulate sign-extension to 64 bits. 364 uvalue |= ~UINT64_C(0) << (reg_size - amount); 365 } 366 break; 367 case ROR: { 368 uvalue = RotateRight(uvalue, amount, reg_size); 369 break; 370 } 371 default: 372 VIXL_UNIMPLEMENTED(); 373 return 0; 374 } 375 uvalue &= mask; 376 377 int64_t result; 378 memcpy(&result, &uvalue, sizeof(result)); 379 return result; 380 } 381 382 383 int64_t Simulator::ExtendValue(unsigned reg_size, 384 int64_t value, 385 Extend extend_type, 386 unsigned left_shift) const { 387 switch (extend_type) { 388 case UXTB: 389 value &= kByteMask; 390 break; 391 case UXTH: 392 value &= kHalfWordMask; 393 break; 394 case UXTW: 395 value &= kWordMask; 396 break; 397 case SXTB: 398 value &= kByteMask; 399 if ((value & 0x80) != 0) { 400 value |= ~UINT64_C(0) << 8; 401 } 402 break; 403 case SXTH: 404 value &= kHalfWordMask; 405 if ((value & 0x8000) != 0) { 406 value |= ~UINT64_C(0) << 16; 407 } 408 break; 409 case SXTW: 410 value &= kWordMask; 411 if ((value & 0x80000000) != 0) { 412 value |= ~UINT64_C(0) << 32; 413 } 414 break; 415 case UXTX: 416 case SXTX: 417 break; 418 default: 419 VIXL_UNREACHABLE(); 420 } 421 return ShiftOperand(reg_size, value, LSL, left_shift); 422 } 423 424 425 void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) { 426 AssertSupportedFPCR(); 427 428 // TODO: This assumes that the C++ implementation handles comparisons in the 429 // way that we expect (as per AssertSupportedFPCR()). 430 bool process_exception = false; 431 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) { 432 ReadNzcv().SetRawValue(FPUnorderedFlag); 433 if (IsSignallingNaN(val0) || IsSignallingNaN(val1) || 434 (trap == EnableTrap)) { 435 process_exception = true; 436 } 437 } else if (val0 < val1) { 438 ReadNzcv().SetRawValue(FPLessThanFlag); 439 } else if (val0 > val1) { 440 ReadNzcv().SetRawValue(FPGreaterThanFlag); 441 } else if (val0 == val1) { 442 ReadNzcv().SetRawValue(FPEqualFlag); 443 } else { 444 VIXL_UNREACHABLE(); 445 } 446 LogSystemRegister(NZCV); 447 if (process_exception) FPProcessException(); 448 } 449 450 451 uint64_t Simulator::ComputeMemOperandAddress(const MemOperand& mem_op) const { 452 VIXL_ASSERT(mem_op.IsValid()); 453 int64_t base = ReadRegister<int64_t>(mem_op.GetBaseRegister()); 454 if (mem_op.IsImmediateOffset()) { 455 return base + mem_op.GetOffset(); 456 } else { 457 VIXL_ASSERT(mem_op.GetRegisterOffset().IsValid()); 458 int64_t offset = ReadRegister<int64_t>(mem_op.GetRegisterOffset()); 459 unsigned shift_amount = mem_op.GetShiftAmount(); 460 if (mem_op.GetShift() != NO_SHIFT) { 461 offset = ShiftOperand(kXRegSize, offset, mem_op.GetShift(), shift_amount); 462 } 463 if (mem_op.GetExtend() != NO_EXTEND) { 464 offset = ExtendValue(kXRegSize, offset, mem_op.GetExtend(), shift_amount); 465 } 466 return static_cast<uint64_t>(base + offset); 467 } 468 } 469 470 471 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize( 472 unsigned reg_size, unsigned lane_size) { 473 VIXL_ASSERT(reg_size >= lane_size); 474 475 uint32_t format = 0; 476 if (reg_size != lane_size) { 477 switch (reg_size) { 478 default: 479 VIXL_UNREACHABLE(); 480 break; 481 case kQRegSizeInBytes: 482 format = kPrintRegAsQVector; 483 break; 484 case kDRegSizeInBytes: 485 format = kPrintRegAsDVector; 486 break; 487 } 488 } 489 490 switch (lane_size) { 491 default: 492 VIXL_UNREACHABLE(); 493 break; 494 case kQRegSizeInBytes: 495 format |= kPrintReg1Q; 496 break; 497 case kDRegSizeInBytes: 498 format |= kPrintReg1D; 499 break; 500 case kSRegSizeInBytes: 501 format |= kPrintReg1S; 502 break; 503 case kHRegSizeInBytes: 504 format |= kPrintReg1H; 505 break; 506 case kBRegSizeInBytes: 507 format |= kPrintReg1B; 508 break; 509 } 510 // These sizes would be duplicate case labels. 511 VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); 512 VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); 513 VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D); 514 VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S); 515 516 return static_cast<PrintRegisterFormat>(format); 517 } 518 519 520 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat( 521 VectorFormat vform) { 522 switch (vform) { 523 default: 524 VIXL_UNREACHABLE(); 525 return kPrintReg16B; 526 case kFormat16B: 527 return kPrintReg16B; 528 case kFormat8B: 529 return kPrintReg8B; 530 case kFormat8H: 531 return kPrintReg8H; 532 case kFormat4H: 533 return kPrintReg4H; 534 case kFormat4S: 535 return kPrintReg4S; 536 case kFormat2S: 537 return kPrintReg2S; 538 case kFormat2D: 539 return kPrintReg2D; 540 case kFormat1D: 541 return kPrintReg1D; 542 543 case kFormatB: 544 return kPrintReg1B; 545 case kFormatH: 546 return kPrintReg1H; 547 case kFormatS: 548 return kPrintReg1S; 549 case kFormatD: 550 return kPrintReg1D; 551 } 552 } 553 554 555 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP( 556 VectorFormat vform) { 557 switch (vform) { 558 default: 559 VIXL_UNREACHABLE(); 560 return kPrintReg16B; 561 case kFormat4S: 562 return kPrintReg4SFP; 563 case kFormat2S: 564 return kPrintReg2SFP; 565 case kFormat2D: 566 return kPrintReg2DFP; 567 case kFormat1D: 568 return kPrintReg1DFP; 569 570 case kFormatS: 571 return kPrintReg1SFP; 572 case kFormatD: 573 return kPrintReg1DFP; 574 } 575 } 576 577 578 void Simulator::PrintWrittenRegisters() { 579 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 580 if (registers_[i].WrittenSinceLastLog()) PrintRegister(i); 581 } 582 } 583 584 585 void Simulator::PrintWrittenVRegisters() { 586 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 587 // At this point there is no type information, so print as a raw 1Q. 588 if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q); 589 } 590 } 591 592 593 void Simulator::PrintSystemRegisters() { 594 PrintSystemRegister(NZCV); 595 PrintSystemRegister(FPCR); 596 } 597 598 599 void Simulator::PrintRegisters() { 600 for (unsigned i = 0; i < kNumberOfRegisters; i++) { 601 PrintRegister(i); 602 } 603 } 604 605 606 void Simulator::PrintVRegisters() { 607 for (unsigned i = 0; i < kNumberOfVRegisters; i++) { 608 // At this point there is no type information, so print as a raw 1Q. 609 PrintVRegister(i, kPrintReg1Q); 610 } 611 } 612 613 614 // Print a register's name and raw value. 615 // 616 // Only the least-significant `size_in_bytes` bytes of the register are printed, 617 // but the value is aligned as if the whole register had been printed. 618 // 619 // For typical register updates, size_in_bytes should be set to kXRegSizeInBytes 620 // -- the default -- so that the whole register is printed. Other values of 621 // size_in_bytes are intended for use when the register hasn't actually been 622 // updated (such as in PrintWrite). 623 // 624 // No newline is printed. This allows the caller to print more details (such as 625 // a memory access annotation). 626 void Simulator::PrintRegisterRawHelper(unsigned code, 627 Reg31Mode r31mode, 628 int size_in_bytes) { 629 // The template for all supported sizes. 630 // "# x{code}: 0xffeeddccbbaa9988" 631 // "# w{code}: 0xbbaa9988" 632 // "# w{code}<15:0>: 0x9988" 633 // "# w{code}<7:0>: 0x88" 634 unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2; 635 636 const char* name = ""; 637 const char* suffix = ""; 638 switch (size_in_bytes) { 639 case kXRegSizeInBytes: 640 name = XRegNameForCode(code, r31mode); 641 break; 642 case kWRegSizeInBytes: 643 name = WRegNameForCode(code, r31mode); 644 break; 645 case 2: 646 name = WRegNameForCode(code, r31mode); 647 suffix = "<15:0>"; 648 padding_chars -= strlen(suffix); 649 break; 650 case 1: 651 name = WRegNameForCode(code, r31mode); 652 suffix = "<7:0>"; 653 padding_chars -= strlen(suffix); 654 break; 655 default: 656 VIXL_UNREACHABLE(); 657 } 658 fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix); 659 660 // Print leading padding spaces. 661 VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2)); 662 for (unsigned i = 0; i < padding_chars; i++) { 663 putc(' ', stream_); 664 } 665 666 // Print the specified bits in hexadecimal format. 667 uint64_t bits = ReadRegister<uint64_t>(code, r31mode); 668 bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8); 669 VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes); 670 671 int chars = size_in_bytes * 2; 672 fprintf(stream_, 673 "%s0x%0*" PRIx64 "%s", 674 clr_reg_value, 675 chars, 676 bits, 677 clr_normal); 678 } 679 680 681 void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { 682 registers_[code].NotifyRegisterLogged(); 683 684 // Don't print writes into xzr. 685 if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { 686 return; 687 } 688 689 // The template for all x and w registers: 690 // "# x{code}: 0x{value}" 691 // "# w{code}: 0x{value}" 692 693 PrintRegisterRawHelper(code, r31mode); 694 fprintf(stream_, "\n"); 695 } 696 697 698 // Print a register's name and raw value. 699 // 700 // The `bytes` and `lsb` arguments can be used to limit the bytes that are 701 // printed. These arguments are intended for use in cases where register hasn't 702 // actually been updated (such as in PrintVWrite). 703 // 704 // No newline is printed. This allows the caller to print more details (such as 705 // a floating-point interpretation or a memory access annotation). 706 void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) { 707 // The template for vector types: 708 // "# v{code}: 0xffeeddccbbaa99887766554433221100". 709 // An example with bytes=4 and lsb=8: 710 // "# v{code}: 0xbbaa9988 ". 711 fprintf(stream_, 712 "# %s%5s: %s", 713 clr_vreg_name, 714 VRegNameForCode(code), 715 clr_vreg_value); 716 717 int msb = lsb + bytes - 1; 718 int byte = kQRegSizeInBytes - 1; 719 720 // Print leading padding spaces. (Two spaces per byte.) 721 while (byte > msb) { 722 fprintf(stream_, " "); 723 byte--; 724 } 725 726 // Print the specified part of the value, byte by byte. 727 qreg_t rawbits = ReadQRegister(code); 728 fprintf(stream_, "0x"); 729 while (byte >= lsb) { 730 fprintf(stream_, "%02x", rawbits.val[byte]); 731 byte--; 732 } 733 734 // Print trailing padding spaces. 735 while (byte >= 0) { 736 fprintf(stream_, " "); 737 byte--; 738 } 739 fprintf(stream_, "%s", clr_normal); 740 } 741 742 743 // Print each of the specified lanes of a register as a float or double value. 744 // 745 // The `lane_count` and `lslane` arguments can be used to limit the lanes that 746 // are printed. These arguments are intended for use in cases where register 747 // hasn't actually been updated (such as in PrintVWrite). 748 // 749 // No newline is printed. This allows the caller to print more details (such as 750 // a memory access annotation). 751 void Simulator::PrintVRegisterFPHelper(unsigned code, 752 unsigned lane_size_in_bytes, 753 int lane_count, 754 int rightmost_lane) { 755 VIXL_ASSERT((lane_size_in_bytes == kSRegSizeInBytes) || 756 (lane_size_in_bytes == kDRegSizeInBytes)); 757 758 unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes); 759 VIXL_ASSERT(msb <= kQRegSizeInBytes); 760 761 // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register 762 // name is used: 763 // " (s{code}: {value})" 764 // " (d{code}: {value})" 765 // For vector types, "..." is used to represent one or more omitted lanes. 766 // " (..., {value}, {value}, ...)" 767 if ((lane_count == 1) && (rightmost_lane == 0)) { 768 const char* name = (lane_size_in_bytes == kSRegSizeInBytes) 769 ? SRegNameForCode(code) 770 : DRegNameForCode(code); 771 fprintf(stream_, " (%s%s: ", clr_vreg_name, name); 772 } else { 773 if (msb < (kQRegSizeInBytes - 1)) { 774 fprintf(stream_, " (..., "); 775 } else { 776 fprintf(stream_, " ("); 777 } 778 } 779 780 // Print the list of values. 781 const char* separator = ""; 782 int leftmost_lane = rightmost_lane + lane_count - 1; 783 for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) { 784 double value = (lane_size_in_bytes == kSRegSizeInBytes) 785 ? ReadVRegister(code).GetLane<float>(lane) 786 : ReadVRegister(code).GetLane<double>(lane); 787 if (std::isnan(value)) { 788 // The output for NaNs is implementation defined. Always print `nan`, so 789 // that traces are coherent across different implementations. 790 fprintf(stream_, "%s%snan%s", separator, clr_vreg_value, clr_normal); 791 } else { 792 fprintf(stream_, 793 "%s%s%#g%s", 794 separator, 795 clr_vreg_value, 796 value, 797 clr_normal); 798 } 799 separator = ", "; 800 } 801 802 if (rightmost_lane > 0) { 803 fprintf(stream_, ", ..."); 804 } 805 fprintf(stream_, ")"); 806 } 807 808 809 void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) { 810 vregisters_[code].NotifyRegisterLogged(); 811 812 int lane_size_log2 = format & kPrintRegLaneSizeMask; 813 814 int reg_size_log2; 815 if (format & kPrintRegAsQVector) { 816 reg_size_log2 = kQRegSizeInBytesLog2; 817 } else if (format & kPrintRegAsDVector) { 818 reg_size_log2 = kDRegSizeInBytesLog2; 819 } else { 820 // Scalar types. 821 reg_size_log2 = lane_size_log2; 822 } 823 824 int lane_count = 1 << (reg_size_log2 - lane_size_log2); 825 int lane_size = 1 << lane_size_log2; 826 827 // The template for vector types: 828 // "# v{code}: 0x{rawbits} (..., {value}, ...)". 829 // The template for scalar types: 830 // "# v{code}: 0x{rawbits} ({reg}:{value})". 831 // The values in parentheses after the bit representations are floating-point 832 // interpretations. They are displayed only if the kPrintVRegAsFP bit is set. 833 834 PrintVRegisterRawHelper(code); 835 if (format & kPrintRegAsFP) { 836 PrintVRegisterFPHelper(code, lane_size, lane_count); 837 } 838 839 fprintf(stream_, "\n"); 840 } 841 842 843 void Simulator::PrintSystemRegister(SystemRegister id) { 844 switch (id) { 845 case NZCV: 846 fprintf(stream_, 847 "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", 848 clr_flag_name, 849 clr_flag_value, 850 ReadNzcv().GetN(), 851 ReadNzcv().GetZ(), 852 ReadNzcv().GetC(), 853 ReadNzcv().GetV(), 854 clr_normal); 855 break; 856 case FPCR: { 857 static const char* rmode[] = {"0b00 (Round to Nearest)", 858 "0b01 (Round towards Plus Infinity)", 859 "0b10 (Round towards Minus Infinity)", 860 "0b11 (Round towards Zero)"}; 861 VIXL_ASSERT(ReadFpcr().GetRMode() < (sizeof(rmode) / sizeof(rmode[0]))); 862 fprintf(stream_, 863 "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", 864 clr_flag_name, 865 clr_flag_value, 866 ReadFpcr().GetAHP(), 867 ReadFpcr().GetDN(), 868 ReadFpcr().GetFZ(), 869 rmode[ReadFpcr().GetRMode()], 870 clr_normal); 871 break; 872 } 873 default: 874 VIXL_UNREACHABLE(); 875 } 876 } 877 878 879 void Simulator::PrintRead(uintptr_t address, 880 unsigned reg_code, 881 PrintRegisterFormat format) { 882 registers_[reg_code].NotifyRegisterLogged(); 883 884 USE(format); 885 886 // The template is "# {reg}: 0x{value} <- {address}". 887 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); 888 fprintf(stream_, 889 " <- %s0x%016" PRIxPTR "%s\n", 890 clr_memory_address, 891 address, 892 clr_normal); 893 } 894 895 896 void Simulator::PrintVRead(uintptr_t address, 897 unsigned reg_code, 898 PrintRegisterFormat format, 899 unsigned lane) { 900 vregisters_[reg_code].NotifyRegisterLogged(); 901 902 // The template is "# v{code}: 0x{rawbits} <- address". 903 PrintVRegisterRawHelper(reg_code); 904 if (format & kPrintRegAsFP) { 905 PrintVRegisterFPHelper(reg_code, 906 GetPrintRegLaneSizeInBytes(format), 907 GetPrintRegLaneCount(format), 908 lane); 909 } 910 fprintf(stream_, 911 " <- %s0x%016" PRIxPTR "%s\n", 912 clr_memory_address, 913 address, 914 clr_normal); 915 } 916 917 918 void Simulator::PrintWrite(uintptr_t address, 919 unsigned reg_code, 920 PrintRegisterFormat format) { 921 VIXL_ASSERT(GetPrintRegLaneCount(format) == 1); 922 923 // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy 924 // and readable, the value is aligned with the values in the register trace. 925 PrintRegisterRawHelper(reg_code, 926 Reg31IsZeroRegister, 927 GetPrintRegSizeInBytes(format)); 928 fprintf(stream_, 929 " -> %s0x%016" PRIxPTR "%s\n", 930 clr_memory_address, 931 address, 932 clr_normal); 933 } 934 935 936 void Simulator::PrintVWrite(uintptr_t address, 937 unsigned reg_code, 938 PrintRegisterFormat format, 939 unsigned lane) { 940 // The templates: 941 // "# v{code}: 0x{rawbits} -> {address}" 942 // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". 943 // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" 944 // Because this trace doesn't represent a change to the source register's 945 // value, only the relevant part of the value is printed. To keep the trace 946 // tidy and readable, the raw value is aligned with the other values in the 947 // register trace. 948 int lane_count = GetPrintRegLaneCount(format); 949 int lane_size = GetPrintRegLaneSizeInBytes(format); 950 int reg_size = GetPrintRegSizeInBytes(format); 951 PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); 952 if (format & kPrintRegAsFP) { 953 PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); 954 } 955 fprintf(stream_, 956 " -> %s0x%016" PRIxPTR "%s\n", 957 clr_memory_address, 958 address, 959 clr_normal); 960 } 961 962 963 void Simulator::PrintTakenBranch(const Instruction* target) { 964 fprintf(stream_, 965 "# %sBranch%s to 0x%016" PRIx64 ".\n", 966 clr_branch_marker, 967 clr_normal, 968 reinterpret_cast<uint64_t>(target)); 969 } 970 971 972 // Visitors--------------------------------------------------------------------- 973 974 void Simulator::VisitUnimplemented(const Instruction* instr) { 975 printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n", 976 reinterpret_cast<const void*>(instr), 977 instr->GetInstructionBits()); 978 VIXL_UNIMPLEMENTED(); 979 } 980 981 982 void Simulator::VisitUnallocated(const Instruction* instr) { 983 printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n", 984 reinterpret_cast<const void*>(instr), 985 instr->GetInstructionBits()); 986 VIXL_UNIMPLEMENTED(); 987 } 988 989 990 void Simulator::VisitPCRelAddressing(const Instruction* instr) { 991 VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) || 992 (instr->Mask(PCRelAddressingMask) == ADRP)); 993 994 WriteRegister(instr->GetRd(), instr->GetImmPCOffsetTarget()); 995 } 996 997 998 void Simulator::VisitUnconditionalBranch(const Instruction* instr) { 999 switch (instr->Mask(UnconditionalBranchMask)) { 1000 case BL: 1001 WriteLr(instr->GetNextInstruction()); 1002 VIXL_FALLTHROUGH(); 1003 case B: 1004 WritePc(instr->GetImmPCOffsetTarget()); 1005 break; 1006 default: 1007 VIXL_UNREACHABLE(); 1008 } 1009 } 1010 1011 1012 void Simulator::VisitConditionalBranch(const Instruction* instr) { 1013 VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); 1014 if (ConditionPassed(instr->GetConditionBranch())) { 1015 WritePc(instr->GetImmPCOffsetTarget()); 1016 } 1017 } 1018 1019 1020 void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) { 1021 const Instruction* target = Instruction::Cast(ReadXRegister(instr->GetRn())); 1022 1023 switch (instr->Mask(UnconditionalBranchToRegisterMask)) { 1024 case BLR: 1025 WriteLr(instr->GetNextInstruction()); 1026 VIXL_FALLTHROUGH(); 1027 case BR: 1028 case RET: 1029 WritePc(target); 1030 break; 1031 default: 1032 VIXL_UNREACHABLE(); 1033 } 1034 } 1035 1036 1037 void Simulator::VisitTestBranch(const Instruction* instr) { 1038 unsigned bit_pos = 1039 (instr->GetImmTestBranchBit5() << 5) | instr->GetImmTestBranchBit40(); 1040 bool bit_zero = ((ReadXRegister(instr->GetRt()) >> bit_pos) & 1) == 0; 1041 bool take_branch = false; 1042 switch (instr->Mask(TestBranchMask)) { 1043 case TBZ: 1044 take_branch = bit_zero; 1045 break; 1046 case TBNZ: 1047 take_branch = !bit_zero; 1048 break; 1049 default: 1050 VIXL_UNIMPLEMENTED(); 1051 } 1052 if (take_branch) { 1053 WritePc(instr->GetImmPCOffsetTarget()); 1054 } 1055 } 1056 1057 1058 void Simulator::VisitCompareBranch(const Instruction* instr) { 1059 unsigned rt = instr->GetRt(); 1060 bool take_branch = false; 1061 switch (instr->Mask(CompareBranchMask)) { 1062 case CBZ_w: 1063 take_branch = (ReadWRegister(rt) == 0); 1064 break; 1065 case CBZ_x: 1066 take_branch = (ReadXRegister(rt) == 0); 1067 break; 1068 case CBNZ_w: 1069 take_branch = (ReadWRegister(rt) != 0); 1070 break; 1071 case CBNZ_x: 1072 take_branch = (ReadXRegister(rt) != 0); 1073 break; 1074 default: 1075 VIXL_UNIMPLEMENTED(); 1076 } 1077 if (take_branch) { 1078 WritePc(instr->GetImmPCOffsetTarget()); 1079 } 1080 } 1081 1082 1083 void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) { 1084 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1085 bool set_flags = instr->GetFlagsUpdate(); 1086 int64_t new_val = 0; 1087 Instr operation = instr->Mask(AddSubOpMask); 1088 1089 switch (operation) { 1090 case ADD: 1091 case ADDS: { 1092 new_val = AddWithCarry(reg_size, 1093 set_flags, 1094 ReadRegister(reg_size, 1095 instr->GetRn(), 1096 instr->GetRnMode()), 1097 op2); 1098 break; 1099 } 1100 case SUB: 1101 case SUBS: { 1102 new_val = AddWithCarry(reg_size, 1103 set_flags, 1104 ReadRegister(reg_size, 1105 instr->GetRn(), 1106 instr->GetRnMode()), 1107 ~op2, 1108 1); 1109 break; 1110 } 1111 default: 1112 VIXL_UNREACHABLE(); 1113 } 1114 1115 WriteRegister(reg_size, 1116 instr->GetRd(), 1117 new_val, 1118 LogRegWrites, 1119 instr->GetRdMode()); 1120 } 1121 1122 1123 void Simulator::VisitAddSubShifted(const Instruction* instr) { 1124 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1125 int64_t op2 = ShiftOperand(reg_size, 1126 ReadRegister(reg_size, instr->GetRm()), 1127 static_cast<Shift>(instr->GetShiftDP()), 1128 instr->GetImmDPShift()); 1129 AddSubHelper(instr, op2); 1130 } 1131 1132 1133 void Simulator::VisitAddSubImmediate(const Instruction* instr) { 1134 int64_t op2 = instr->GetImmAddSub() 1135 << ((instr->GetShiftAddSub() == 1) ? 12 : 0); 1136 AddSubHelper(instr, op2); 1137 } 1138 1139 1140 void Simulator::VisitAddSubExtended(const Instruction* instr) { 1141 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1142 int64_t op2 = ExtendValue(reg_size, 1143 ReadRegister(reg_size, instr->GetRm()), 1144 static_cast<Extend>(instr->GetExtendMode()), 1145 instr->GetImmExtendShift()); 1146 AddSubHelper(instr, op2); 1147 } 1148 1149 1150 void Simulator::VisitAddSubWithCarry(const Instruction* instr) { 1151 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1152 int64_t op2 = ReadRegister(reg_size, instr->GetRm()); 1153 int64_t new_val; 1154 1155 if ((instr->Mask(AddSubOpMask) == SUB) || 1156 (instr->Mask(AddSubOpMask) == SUBS)) { 1157 op2 = ~op2; 1158 } 1159 1160 new_val = AddWithCarry(reg_size, 1161 instr->GetFlagsUpdate(), 1162 ReadRegister(reg_size, instr->GetRn()), 1163 op2, 1164 ReadC()); 1165 1166 WriteRegister(reg_size, instr->GetRd(), new_val); 1167 } 1168 1169 1170 void Simulator::VisitLogicalShifted(const Instruction* instr) { 1171 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1172 Shift shift_type = static_cast<Shift>(instr->GetShiftDP()); 1173 unsigned shift_amount = instr->GetImmDPShift(); 1174 int64_t op2 = ShiftOperand(reg_size, 1175 ReadRegister(reg_size, instr->GetRm()), 1176 shift_type, 1177 shift_amount); 1178 if (instr->Mask(NOT) == NOT) { 1179 op2 = ~op2; 1180 } 1181 LogicalHelper(instr, op2); 1182 } 1183 1184 1185 void Simulator::VisitLogicalImmediate(const Instruction* instr) { 1186 LogicalHelper(instr, instr->GetImmLogical()); 1187 } 1188 1189 1190 void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) { 1191 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1192 int64_t op1 = ReadRegister(reg_size, instr->GetRn()); 1193 int64_t result = 0; 1194 bool update_flags = false; 1195 1196 // Switch on the logical operation, stripping out the NOT bit, as it has a 1197 // different meaning for logical immediate instructions. 1198 switch (instr->Mask(LogicalOpMask & ~NOT)) { 1199 case ANDS: 1200 update_flags = true; 1201 VIXL_FALLTHROUGH(); 1202 case AND: 1203 result = op1 & op2; 1204 break; 1205 case ORR: 1206 result = op1 | op2; 1207 break; 1208 case EOR: 1209 result = op1 ^ op2; 1210 break; 1211 default: 1212 VIXL_UNIMPLEMENTED(); 1213 } 1214 1215 if (update_flags) { 1216 ReadNzcv().SetN(CalcNFlag(result, reg_size)); 1217 ReadNzcv().SetZ(CalcZFlag(result)); 1218 ReadNzcv().SetC(0); 1219 ReadNzcv().SetV(0); 1220 LogSystemRegister(NZCV); 1221 } 1222 1223 WriteRegister(reg_size, 1224 instr->GetRd(), 1225 result, 1226 LogRegWrites, 1227 instr->GetRdMode()); 1228 } 1229 1230 1231 void Simulator::VisitConditionalCompareRegister(const Instruction* instr) { 1232 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1233 ConditionalCompareHelper(instr, ReadRegister(reg_size, instr->GetRm())); 1234 } 1235 1236 1237 void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) { 1238 ConditionalCompareHelper(instr, instr->GetImmCondCmp()); 1239 } 1240 1241 1242 void Simulator::ConditionalCompareHelper(const Instruction* instr, 1243 int64_t op2) { 1244 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1245 int64_t op1 = ReadRegister(reg_size, instr->GetRn()); 1246 1247 if (ConditionPassed(instr->GetCondition())) { 1248 // If the condition passes, set the status flags to the result of comparing 1249 // the operands. 1250 if (instr->Mask(ConditionalCompareMask) == CCMP) { 1251 AddWithCarry(reg_size, true, op1, ~op2, 1); 1252 } else { 1253 VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); 1254 AddWithCarry(reg_size, true, op1, op2, 0); 1255 } 1256 } else { 1257 // If the condition fails, set the status flags to the nzcv immediate. 1258 ReadNzcv().SetFlags(instr->GetNzcv()); 1259 LogSystemRegister(NZCV); 1260 } 1261 } 1262 1263 1264 void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) { 1265 int offset = instr->GetImmLSUnsigned() << instr->GetSizeLS(); 1266 LoadStoreHelper(instr, offset, Offset); 1267 } 1268 1269 1270 void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) { 1271 LoadStoreHelper(instr, instr->GetImmLS(), Offset); 1272 } 1273 1274 1275 void Simulator::VisitLoadStorePreIndex(const Instruction* instr) { 1276 LoadStoreHelper(instr, instr->GetImmLS(), PreIndex); 1277 } 1278 1279 1280 void Simulator::VisitLoadStorePostIndex(const Instruction* instr) { 1281 LoadStoreHelper(instr, instr->GetImmLS(), PostIndex); 1282 } 1283 1284 1285 void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) { 1286 Extend ext = static_cast<Extend>(instr->GetExtendMode()); 1287 VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); 1288 unsigned shift_amount = instr->GetImmShiftLS() * instr->GetSizeLS(); 1289 1290 int64_t offset = 1291 ExtendValue(kXRegSize, ReadXRegister(instr->GetRm()), ext, shift_amount); 1292 LoadStoreHelper(instr, offset, Offset); 1293 } 1294 1295 1296 void Simulator::LoadStoreHelper(const Instruction* instr, 1297 int64_t offset, 1298 AddrMode addrmode) { 1299 unsigned srcdst = instr->GetRt(); 1300 uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); 1301 1302 LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask)); 1303 switch (op) { 1304 case LDRB_w: 1305 WriteWRegister(srcdst, Memory::Read<uint8_t>(address), NoRegLog); 1306 break; 1307 case LDRH_w: 1308 WriteWRegister(srcdst, Memory::Read<uint16_t>(address), NoRegLog); 1309 break; 1310 case LDR_w: 1311 WriteWRegister(srcdst, Memory::Read<uint32_t>(address), NoRegLog); 1312 break; 1313 case LDR_x: 1314 WriteXRegister(srcdst, Memory::Read<uint64_t>(address), NoRegLog); 1315 break; 1316 case LDRSB_w: 1317 WriteWRegister(srcdst, Memory::Read<int8_t>(address), NoRegLog); 1318 break; 1319 case LDRSH_w: 1320 WriteWRegister(srcdst, Memory::Read<int16_t>(address), NoRegLog); 1321 break; 1322 case LDRSB_x: 1323 WriteXRegister(srcdst, Memory::Read<int8_t>(address), NoRegLog); 1324 break; 1325 case LDRSH_x: 1326 WriteXRegister(srcdst, Memory::Read<int16_t>(address), NoRegLog); 1327 break; 1328 case LDRSW_x: 1329 WriteXRegister(srcdst, Memory::Read<int32_t>(address), NoRegLog); 1330 break; 1331 case LDR_b: 1332 WriteBRegister(srcdst, Memory::Read<uint8_t>(address), NoRegLog); 1333 break; 1334 case LDR_h: 1335 WriteHRegister(srcdst, Memory::Read<uint16_t>(address), NoRegLog); 1336 break; 1337 case LDR_s: 1338 WriteSRegister(srcdst, Memory::Read<float>(address), NoRegLog); 1339 break; 1340 case LDR_d: 1341 WriteDRegister(srcdst, Memory::Read<double>(address), NoRegLog); 1342 break; 1343 case LDR_q: 1344 WriteQRegister(srcdst, Memory::Read<qreg_t>(address), NoRegLog); 1345 break; 1346 1347 case STRB_w: 1348 Memory::Write<uint8_t>(address, ReadWRegister(srcdst)); 1349 break; 1350 case STRH_w: 1351 Memory::Write<uint16_t>(address, ReadWRegister(srcdst)); 1352 break; 1353 case STR_w: 1354 Memory::Write<uint32_t>(address, ReadWRegister(srcdst)); 1355 break; 1356 case STR_x: 1357 Memory::Write<uint64_t>(address, ReadXRegister(srcdst)); 1358 break; 1359 case STR_b: 1360 Memory::Write<uint8_t>(address, ReadBRegister(srcdst)); 1361 break; 1362 case STR_h: 1363 Memory::Write<uint16_t>(address, ReadHRegister(srcdst)); 1364 break; 1365 case STR_s: 1366 Memory::Write<float>(address, ReadSRegister(srcdst)); 1367 break; 1368 case STR_d: 1369 Memory::Write<double>(address, ReadDRegister(srcdst)); 1370 break; 1371 case STR_q: 1372 Memory::Write<qreg_t>(address, ReadQRegister(srcdst)); 1373 break; 1374 1375 // Ignore prfm hint instructions. 1376 case PRFM: 1377 break; 1378 1379 default: 1380 VIXL_UNIMPLEMENTED(); 1381 } 1382 1383 unsigned access_size = 1 << instr->GetSizeLS(); 1384 if (instr->IsLoad()) { 1385 if ((op == LDR_s) || (op == LDR_d)) { 1386 LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); 1387 } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) { 1388 LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1389 } else { 1390 LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1391 } 1392 } else if (instr->IsStore()) { 1393 if ((op == STR_s) || (op == STR_d)) { 1394 LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); 1395 } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) { 1396 LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1397 } else { 1398 LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); 1399 } 1400 } else { 1401 VIXL_ASSERT(op == PRFM); 1402 } 1403 1404 local_monitor_.MaybeClear(); 1405 } 1406 1407 1408 void Simulator::VisitLoadStorePairOffset(const Instruction* instr) { 1409 LoadStorePairHelper(instr, Offset); 1410 } 1411 1412 1413 void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) { 1414 LoadStorePairHelper(instr, PreIndex); 1415 } 1416 1417 1418 void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) { 1419 LoadStorePairHelper(instr, PostIndex); 1420 } 1421 1422 1423 void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) { 1424 LoadStorePairHelper(instr, Offset); 1425 } 1426 1427 1428 void Simulator::LoadStorePairHelper(const Instruction* instr, 1429 AddrMode addrmode) { 1430 unsigned rt = instr->GetRt(); 1431 unsigned rt2 = instr->GetRt2(); 1432 int element_size = 1 << instr->GetSizeLSPair(); 1433 int64_t offset = instr->GetImmLSPair() * element_size; 1434 uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); 1435 uintptr_t address2 = address + element_size; 1436 1437 LoadStorePairOp op = 1438 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); 1439 1440 // 'rt' and 'rt2' can only be aliased for stores. 1441 VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); 1442 1443 switch (op) { 1444 // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We 1445 // will print a more detailed log. 1446 case LDP_w: { 1447 WriteWRegister(rt, Memory::Read<uint32_t>(address), NoRegLog); 1448 WriteWRegister(rt2, Memory::Read<uint32_t>(address2), NoRegLog); 1449 break; 1450 } 1451 case LDP_s: { 1452 WriteSRegister(rt, Memory::Read<float>(address), NoRegLog); 1453 WriteSRegister(rt2, Memory::Read<float>(address2), NoRegLog); 1454 break; 1455 } 1456 case LDP_x: { 1457 WriteXRegister(rt, Memory::Read<uint64_t>(address), NoRegLog); 1458 WriteXRegister(rt2, Memory::Read<uint64_t>(address2), NoRegLog); 1459 break; 1460 } 1461 case LDP_d: { 1462 WriteDRegister(rt, Memory::Read<double>(address), NoRegLog); 1463 WriteDRegister(rt2, Memory::Read<double>(address2), NoRegLog); 1464 break; 1465 } 1466 case LDP_q: { 1467 WriteQRegister(rt, Memory::Read<qreg_t>(address), NoRegLog); 1468 WriteQRegister(rt2, Memory::Read<qreg_t>(address2), NoRegLog); 1469 break; 1470 } 1471 case LDPSW_x: { 1472 WriteXRegister(rt, Memory::Read<int32_t>(address), NoRegLog); 1473 WriteXRegister(rt2, Memory::Read<int32_t>(address2), NoRegLog); 1474 break; 1475 } 1476 case STP_w: { 1477 Memory::Write<uint32_t>(address, ReadWRegister(rt)); 1478 Memory::Write<uint32_t>(address2, ReadWRegister(rt2)); 1479 break; 1480 } 1481 case STP_s: { 1482 Memory::Write<float>(address, ReadSRegister(rt)); 1483 Memory::Write<float>(address2, ReadSRegister(rt2)); 1484 break; 1485 } 1486 case STP_x: { 1487 Memory::Write<uint64_t>(address, ReadXRegister(rt)); 1488 Memory::Write<uint64_t>(address2, ReadXRegister(rt2)); 1489 break; 1490 } 1491 case STP_d: { 1492 Memory::Write<double>(address, ReadDRegister(rt)); 1493 Memory::Write<double>(address2, ReadDRegister(rt2)); 1494 break; 1495 } 1496 case STP_q: { 1497 Memory::Write<qreg_t>(address, ReadQRegister(rt)); 1498 Memory::Write<qreg_t>(address2, ReadQRegister(rt2)); 1499 break; 1500 } 1501 default: 1502 VIXL_UNREACHABLE(); 1503 } 1504 1505 // Print a detailed trace (including the memory address) instead of the basic 1506 // register:value trace generated by set_*reg(). 1507 if (instr->IsLoad()) { 1508 if ((op == LDP_s) || (op == LDP_d)) { 1509 LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); 1510 LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); 1511 } else if (op == LDP_q) { 1512 LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size)); 1513 LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); 1514 } else { 1515 LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); 1516 LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); 1517 } 1518 } else { 1519 if ((op == STP_s) || (op == STP_d)) { 1520 LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); 1521 LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); 1522 } else if (op == STP_q) { 1523 LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); 1524 LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); 1525 } else { 1526 LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); 1527 LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); 1528 } 1529 } 1530 1531 local_monitor_.MaybeClear(); 1532 } 1533 1534 1535 void Simulator::PrintExclusiveAccessWarning() { 1536 if (print_exclusive_access_warning_) { 1537 fprintf(stderr, 1538 "%sWARNING:%s VIXL simulator support for " 1539 "load-/store-/clear-exclusive " 1540 "instructions is limited. Refer to the README for details.%s\n", 1541 clr_warning, 1542 clr_warning_message, 1543 clr_normal); 1544 print_exclusive_access_warning_ = false; 1545 } 1546 } 1547 1548 1549 void Simulator::VisitLoadStoreExclusive(const Instruction* instr) { 1550 PrintExclusiveAccessWarning(); 1551 1552 unsigned rs = instr->GetRs(); 1553 unsigned rt = instr->GetRt(); 1554 unsigned rt2 = instr->GetRt2(); 1555 unsigned rn = instr->GetRn(); 1556 1557 LoadStoreExclusive op = 1558 static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask)); 1559 1560 bool is_acquire_release = instr->GetLdStXAcquireRelease(); 1561 bool is_exclusive = !instr->GetLdStXNotExclusive(); 1562 bool is_load = instr->GetLdStXLoad(); 1563 bool is_pair = instr->GetLdStXPair(); 1564 1565 unsigned element_size = 1 << instr->GetLdStXSizeLog2(); 1566 unsigned access_size = is_pair ? element_size * 2 : element_size; 1567 uint64_t address = ReadRegister<uint64_t>(rn, Reg31IsStackPointer); 1568 1569 // Verify that the address is available to the host. 1570 VIXL_ASSERT(address == static_cast<uintptr_t>(address)); 1571 1572 // Check the alignment of `address`. 1573 if (AlignDown(address, access_size) != address) { 1574 VIXL_ALIGNMENT_EXCEPTION(); 1575 } 1576 1577 // The sp must be aligned to 16 bytes when it is accessed. 1578 if ((rn == 31) && (AlignDown(address, 16) != address)) { 1579 VIXL_ALIGNMENT_EXCEPTION(); 1580 } 1581 1582 if (is_load) { 1583 if (is_exclusive) { 1584 local_monitor_.MarkExclusive(address, access_size); 1585 } else { 1586 // Any non-exclusive load can clear the local monitor as a side effect. We 1587 // don't need to do this, but it is useful to stress the simulated code. 1588 local_monitor_.Clear(); 1589 } 1590 1591 // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We 1592 // will print a more detailed log. 1593 switch (op) { 1594 case LDXRB_w: 1595 case LDAXRB_w: 1596 case LDARB_w: 1597 WriteWRegister(rt, Memory::Read<uint8_t>(address), NoRegLog); 1598 break; 1599 case LDXRH_w: 1600 case LDAXRH_w: 1601 case LDARH_w: 1602 WriteWRegister(rt, Memory::Read<uint16_t>(address), NoRegLog); 1603 break; 1604 case LDXR_w: 1605 case LDAXR_w: 1606 case LDAR_w: 1607 WriteWRegister(rt, Memory::Read<uint32_t>(address), NoRegLog); 1608 break; 1609 case LDXR_x: 1610 case LDAXR_x: 1611 case LDAR_x: 1612 WriteXRegister(rt, Memory::Read<uint64_t>(address), NoRegLog); 1613 break; 1614 case LDXP_w: 1615 case LDAXP_w: 1616 WriteWRegister(rt, Memory::Read<uint32_t>(address), NoRegLog); 1617 WriteWRegister(rt2, 1618 Memory::Read<uint32_t>(address + element_size), 1619 NoRegLog); 1620 break; 1621 case LDXP_x: 1622 case LDAXP_x: 1623 WriteXRegister(rt, Memory::Read<uint64_t>(address), NoRegLog); 1624 WriteXRegister(rt2, 1625 Memory::Read<uint64_t>(address + element_size), 1626 NoRegLog); 1627 break; 1628 default: 1629 VIXL_UNREACHABLE(); 1630 } 1631 1632 if (is_acquire_release) { 1633 // Approximate load-acquire by issuing a full barrier after the load. 1634 __sync_synchronize(); 1635 } 1636 1637 LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); 1638 if (is_pair) { 1639 LogRead(address + element_size, 1640 rt2, 1641 GetPrintRegisterFormatForSize(element_size)); 1642 } 1643 } else { 1644 if (is_acquire_release) { 1645 // Approximate store-release by issuing a full barrier before the store. 1646 __sync_synchronize(); 1647 } 1648 1649 bool do_store = true; 1650 if (is_exclusive) { 1651 do_store = local_monitor_.IsExclusive(address, access_size) && 1652 global_monitor_.IsExclusive(address, access_size); 1653 WriteWRegister(rs, do_store ? 0 : 1); 1654 1655 // - All exclusive stores explicitly clear the local monitor. 1656 local_monitor_.Clear(); 1657 } else { 1658 // - Any other store can clear the local monitor as a side effect. 1659 local_monitor_.MaybeClear(); 1660 } 1661 1662 if (do_store) { 1663 switch (op) { 1664 case STXRB_w: 1665 case STLXRB_w: 1666 case STLRB_w: 1667 Memory::Write<uint8_t>(address, ReadWRegister(rt)); 1668 break; 1669 case STXRH_w: 1670 case STLXRH_w: 1671 case STLRH_w: 1672 Memory::Write<uint16_t>(address, ReadWRegister(rt)); 1673 break; 1674 case STXR_w: 1675 case STLXR_w: 1676 case STLR_w: 1677 Memory::Write<uint32_t>(address, ReadWRegister(rt)); 1678 break; 1679 case STXR_x: 1680 case STLXR_x: 1681 case STLR_x: 1682 Memory::Write<uint64_t>(address, ReadXRegister(rt)); 1683 break; 1684 case STXP_w: 1685 case STLXP_w: 1686 Memory::Write<uint32_t>(address, ReadWRegister(rt)); 1687 Memory::Write<uint32_t>(address + element_size, ReadWRegister(rt2)); 1688 break; 1689 case STXP_x: 1690 case STLXP_x: 1691 Memory::Write<uint64_t>(address, ReadXRegister(rt)); 1692 Memory::Write<uint64_t>(address + element_size, ReadXRegister(rt2)); 1693 break; 1694 default: 1695 VIXL_UNREACHABLE(); 1696 } 1697 1698 LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); 1699 if (is_pair) { 1700 LogWrite(address + element_size, 1701 rt2, 1702 GetPrintRegisterFormatForSize(element_size)); 1703 } 1704 } 1705 } 1706 } 1707 1708 1709 void Simulator::VisitLoadLiteral(const Instruction* instr) { 1710 unsigned rt = instr->GetRt(); 1711 uint64_t address = instr->GetLiteralAddress<uint64_t>(); 1712 1713 // Verify that the calculated address is available to the host. 1714 VIXL_ASSERT(address == static_cast<uintptr_t>(address)); 1715 1716 switch (instr->Mask(LoadLiteralMask)) { 1717 // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then 1718 // print a more detailed log. 1719 case LDR_w_lit: 1720 WriteWRegister(rt, Memory::Read<uint32_t>(address), NoRegLog); 1721 LogRead(address, rt, kPrintWReg); 1722 break; 1723 case LDR_x_lit: 1724 WriteXRegister(rt, Memory::Read<uint64_t>(address), NoRegLog); 1725 LogRead(address, rt, kPrintXReg); 1726 break; 1727 case LDR_s_lit: 1728 WriteSRegister(rt, Memory::Read<float>(address), NoRegLog); 1729 LogVRead(address, rt, kPrintSReg); 1730 break; 1731 case LDR_d_lit: 1732 WriteDRegister(rt, Memory::Read<double>(address), NoRegLog); 1733 LogVRead(address, rt, kPrintDReg); 1734 break; 1735 case LDR_q_lit: 1736 WriteQRegister(rt, Memory::Read<qreg_t>(address), NoRegLog); 1737 LogVRead(address, rt, kPrintReg1Q); 1738 break; 1739 case LDRSW_x_lit: 1740 WriteXRegister(rt, Memory::Read<int32_t>(address), NoRegLog); 1741 LogRead(address, rt, kPrintWReg); 1742 break; 1743 1744 // Ignore prfm hint instructions. 1745 case PRFM_lit: 1746 break; 1747 1748 default: 1749 VIXL_UNREACHABLE(); 1750 } 1751 1752 local_monitor_.MaybeClear(); 1753 } 1754 1755 1756 uintptr_t Simulator::AddressModeHelper(unsigned addr_reg, 1757 int64_t offset, 1758 AddrMode addrmode) { 1759 uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer); 1760 1761 if ((addr_reg == 31) && ((address % 16) != 0)) { 1762 // When the base register is SP the stack pointer is required to be 1763 // quadword aligned prior to the address calculation and write-backs. 1764 // Misalignment will cause a stack alignment fault. 1765 VIXL_ALIGNMENT_EXCEPTION(); 1766 } 1767 1768 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { 1769 VIXL_ASSERT(offset != 0); 1770 // Only preindex should log the register update here. For Postindex, the 1771 // update will be printed automatically by LogWrittenRegisters _after_ the 1772 // memory access itself is logged. 1773 RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog; 1774 WriteXRegister(addr_reg, address + offset, log_mode, Reg31IsStackPointer); 1775 } 1776 1777 if ((addrmode == Offset) || (addrmode == PreIndex)) { 1778 address += offset; 1779 } 1780 1781 // Verify that the calculated address is available to the host. 1782 VIXL_ASSERT(address == static_cast<uintptr_t>(address)); 1783 1784 return static_cast<uintptr_t>(address); 1785 } 1786 1787 1788 void Simulator::VisitMoveWideImmediate(const Instruction* instr) { 1789 MoveWideImmediateOp mov_op = 1790 static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask)); 1791 int64_t new_xn_val = 0; 1792 1793 bool is_64_bits = instr->GetSixtyFourBits() == 1; 1794 // Shift is limited for W operations. 1795 VIXL_ASSERT(is_64_bits || (instr->GetShiftMoveWide() < 2)); 1796 1797 // Get the shifted immediate. 1798 int64_t shift = instr->GetShiftMoveWide() * 16; 1799 int64_t shifted_imm16 = static_cast<int64_t>(instr->GetImmMoveWide()) 1800 << shift; 1801 1802 // Compute the new value. 1803 switch (mov_op) { 1804 case MOVN_w: 1805 case MOVN_x: { 1806 new_xn_val = ~shifted_imm16; 1807 if (!is_64_bits) new_xn_val &= kWRegMask; 1808 break; 1809 } 1810 case MOVK_w: 1811 case MOVK_x: { 1812 unsigned reg_code = instr->GetRd(); 1813 int64_t prev_xn_val = 1814 is_64_bits ? ReadXRegister(reg_code) : ReadWRegister(reg_code); 1815 new_xn_val = (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16; 1816 break; 1817 } 1818 case MOVZ_w: 1819 case MOVZ_x: { 1820 new_xn_val = shifted_imm16; 1821 break; 1822 } 1823 default: 1824 VIXL_UNREACHABLE(); 1825 } 1826 1827 // Update the destination register. 1828 WriteXRegister(instr->GetRd(), new_xn_val); 1829 } 1830 1831 1832 void Simulator::VisitConditionalSelect(const Instruction* instr) { 1833 uint64_t new_val = ReadXRegister(instr->GetRn()); 1834 1835 if (ConditionFailed(static_cast<Condition>(instr->GetCondition()))) { 1836 new_val = ReadXRegister(instr->GetRm()); 1837 switch (instr->Mask(ConditionalSelectMask)) { 1838 case CSEL_w: 1839 case CSEL_x: 1840 break; 1841 case CSINC_w: 1842 case CSINC_x: 1843 new_val++; 1844 break; 1845 case CSINV_w: 1846 case CSINV_x: 1847 new_val = ~new_val; 1848 break; 1849 case CSNEG_w: 1850 case CSNEG_x: 1851 new_val = -new_val; 1852 break; 1853 default: 1854 VIXL_UNIMPLEMENTED(); 1855 } 1856 } 1857 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1858 WriteRegister(reg_size, instr->GetRd(), new_val); 1859 } 1860 1861 1862 void Simulator::VisitDataProcessing1Source(const Instruction* instr) { 1863 unsigned dst = instr->GetRd(); 1864 unsigned src = instr->GetRn(); 1865 1866 switch (instr->Mask(DataProcessing1SourceMask)) { 1867 case RBIT_w: 1868 WriteWRegister(dst, ReverseBits(ReadWRegister(src))); 1869 break; 1870 case RBIT_x: 1871 WriteXRegister(dst, ReverseBits(ReadXRegister(src))); 1872 break; 1873 case REV16_w: 1874 WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 1)); 1875 break; 1876 case REV16_x: 1877 WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 1)); 1878 break; 1879 case REV_w: 1880 WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 2)); 1881 break; 1882 case REV32_x: 1883 WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 2)); 1884 break; 1885 case REV_x: 1886 WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 3)); 1887 break; 1888 case CLZ_w: 1889 WriteWRegister(dst, CountLeadingZeros(ReadWRegister(src))); 1890 break; 1891 case CLZ_x: 1892 WriteXRegister(dst, CountLeadingZeros(ReadXRegister(src))); 1893 break; 1894 case CLS_w: 1895 WriteWRegister(dst, CountLeadingSignBits(ReadWRegister(src))); 1896 break; 1897 case CLS_x: 1898 WriteXRegister(dst, CountLeadingSignBits(ReadXRegister(src))); 1899 break; 1900 default: 1901 VIXL_UNIMPLEMENTED(); 1902 } 1903 } 1904 1905 1906 uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) { 1907 VIXL_ASSERT((n > 32) && (n <= 64)); 1908 for (unsigned i = (n - 1); i >= 32; i--) { 1909 if (((data >> i) & 1) != 0) { 1910 uint64_t polysh32 = (uint64_t)poly << (i - 32); 1911 uint64_t mask = (UINT64_C(1) << i) - 1; 1912 data = ((data & mask) ^ polysh32); 1913 } 1914 } 1915 return data & 0xffffffff; 1916 } 1917 1918 1919 template <typename T> 1920 uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) { 1921 unsigned size = sizeof(val) * 8; // Number of bits in type T. 1922 VIXL_ASSERT((size == 8) || (size == 16) || (size == 32)); 1923 uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size; 1924 uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32; 1925 return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly)); 1926 } 1927 1928 1929 uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) { 1930 // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute 1931 // the CRC of each 32-bit word sequentially. 1932 acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly); 1933 return Crc32Checksum(acc, (uint32_t)(val >> 32), poly); 1934 } 1935 1936 1937 void Simulator::VisitDataProcessing2Source(const Instruction* instr) { 1938 Shift shift_op = NO_SHIFT; 1939 int64_t result = 0; 1940 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 1941 1942 switch (instr->Mask(DataProcessing2SourceMask)) { 1943 case SDIV_w: { 1944 int32_t rn = ReadWRegister(instr->GetRn()); 1945 int32_t rm = ReadWRegister(instr->GetRm()); 1946 if ((rn == kWMinInt) && (rm == -1)) { 1947 result = kWMinInt; 1948 } else if (rm == 0) { 1949 // Division by zero can be trapped, but not on A-class processors. 1950 result = 0; 1951 } else { 1952 result = rn / rm; 1953 } 1954 break; 1955 } 1956 case SDIV_x: { 1957 int64_t rn = ReadXRegister(instr->GetRn()); 1958 int64_t rm = ReadXRegister(instr->GetRm()); 1959 if ((rn == kXMinInt) && (rm == -1)) { 1960 result = kXMinInt; 1961 } else if (rm == 0) { 1962 // Division by zero can be trapped, but not on A-class processors. 1963 result = 0; 1964 } else { 1965 result = rn / rm; 1966 } 1967 break; 1968 } 1969 case UDIV_w: { 1970 uint32_t rn = static_cast<uint32_t>(ReadWRegister(instr->GetRn())); 1971 uint32_t rm = static_cast<uint32_t>(ReadWRegister(instr->GetRm())); 1972 if (rm == 0) { 1973 // Division by zero can be trapped, but not on A-class processors. 1974 result = 0; 1975 } else { 1976 result = rn / rm; 1977 } 1978 break; 1979 } 1980 case UDIV_x: { 1981 uint64_t rn = static_cast<uint64_t>(ReadXRegister(instr->GetRn())); 1982 uint64_t rm = static_cast<uint64_t>(ReadXRegister(instr->GetRm())); 1983 if (rm == 0) { 1984 // Division by zero can be trapped, but not on A-class processors. 1985 result = 0; 1986 } else { 1987 result = rn / rm; 1988 } 1989 break; 1990 } 1991 case LSLV_w: 1992 case LSLV_x: 1993 shift_op = LSL; 1994 break; 1995 case LSRV_w: 1996 case LSRV_x: 1997 shift_op = LSR; 1998 break; 1999 case ASRV_w: 2000 case ASRV_x: 2001 shift_op = ASR; 2002 break; 2003 case RORV_w: 2004 case RORV_x: 2005 shift_op = ROR; 2006 break; 2007 case CRC32B: { 2008 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2009 uint8_t val = ReadRegister<uint8_t>(instr->GetRm()); 2010 result = Crc32Checksum(acc, val, CRC32_POLY); 2011 break; 2012 } 2013 case CRC32H: { 2014 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2015 uint16_t val = ReadRegister<uint16_t>(instr->GetRm()); 2016 result = Crc32Checksum(acc, val, CRC32_POLY); 2017 break; 2018 } 2019 case CRC32W: { 2020 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2021 uint32_t val = ReadRegister<uint32_t>(instr->GetRm()); 2022 result = Crc32Checksum(acc, val, CRC32_POLY); 2023 break; 2024 } 2025 case CRC32X: { 2026 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2027 uint64_t val = ReadRegister<uint64_t>(instr->GetRm()); 2028 result = Crc32Checksum(acc, val, CRC32_POLY); 2029 reg_size = kWRegSize; 2030 break; 2031 } 2032 case CRC32CB: { 2033 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2034 uint8_t val = ReadRegister<uint8_t>(instr->GetRm()); 2035 result = Crc32Checksum(acc, val, CRC32C_POLY); 2036 break; 2037 } 2038 case CRC32CH: { 2039 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2040 uint16_t val = ReadRegister<uint16_t>(instr->GetRm()); 2041 result = Crc32Checksum(acc, val, CRC32C_POLY); 2042 break; 2043 } 2044 case CRC32CW: { 2045 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2046 uint32_t val = ReadRegister<uint32_t>(instr->GetRm()); 2047 result = Crc32Checksum(acc, val, CRC32C_POLY); 2048 break; 2049 } 2050 case CRC32CX: { 2051 uint32_t acc = ReadRegister<uint32_t>(instr->GetRn()); 2052 uint64_t val = ReadRegister<uint64_t>(instr->GetRm()); 2053 result = Crc32Checksum(acc, val, CRC32C_POLY); 2054 reg_size = kWRegSize; 2055 break; 2056 } 2057 default: 2058 VIXL_UNIMPLEMENTED(); 2059 } 2060 2061 if (shift_op != NO_SHIFT) { 2062 // Shift distance encoded in the least-significant five/six bits of the 2063 // register. 2064 int mask = (instr->GetSixtyFourBits() == 1) ? 0x3f : 0x1f; 2065 unsigned shift = ReadWRegister(instr->GetRm()) & mask; 2066 result = ShiftOperand(reg_size, 2067 ReadRegister(reg_size, instr->GetRn()), 2068 shift_op, 2069 shift); 2070 } 2071 WriteRegister(reg_size, instr->GetRd(), result); 2072 } 2073 2074 2075 // The algorithm used is adapted from the one described in section 8.2 of 2076 // Hacker's Delight, by Henry S. Warren, Jr. 2077 template <typename T> 2078 static int64_t MultiplyHigh(T u, T v) { 2079 uint64_t u0, v0, w0, u1, v1, w1, w2, t; 2080 uint64_t sign_mask = UINT64_C(0x8000000000000000); 2081 uint64_t sign_ext = 0; 2082 if (std::numeric_limits<T>::is_signed) { 2083 sign_ext = UINT64_C(0xffffffff00000000); 2084 } 2085 2086 VIXL_ASSERT(sizeof(u) == sizeof(uint64_t)); 2087 VIXL_ASSERT(sizeof(u) == sizeof(u0)); 2088 2089 u0 = u & 0xffffffff; 2090 u1 = u >> 32 | (((u & sign_mask) != 0) ? sign_ext : 0); 2091 v0 = v & 0xffffffff; 2092 v1 = v >> 32 | (((v & sign_mask) != 0) ? sign_ext : 0); 2093 2094 w0 = u0 * v0; 2095 t = u1 * v0 + (w0 >> 32); 2096 2097 w1 = t & 0xffffffff; 2098 w2 = t >> 32 | (((t & sign_mask) != 0) ? sign_ext : 0); 2099 w1 = u0 * v1 + w1; 2100 w1 = w1 >> 32 | (((w1 & sign_mask) != 0) ? sign_ext : 0); 2101 2102 uint64_t value = u1 * v1 + w2 + w1; 2103 int64_t result; 2104 memcpy(&result, &value, sizeof(result)); 2105 return result; 2106 } 2107 2108 2109 void Simulator::VisitDataProcessing3Source(const Instruction* instr) { 2110 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 2111 2112 uint64_t result = 0; 2113 // Extract and sign- or zero-extend 32-bit arguments for widening operations. 2114 uint64_t rn_u32 = ReadRegister<uint32_t>(instr->GetRn()); 2115 uint64_t rm_u32 = ReadRegister<uint32_t>(instr->GetRm()); 2116 int64_t rn_s32 = ReadRegister<int32_t>(instr->GetRn()); 2117 int64_t rm_s32 = ReadRegister<int32_t>(instr->GetRm()); 2118 uint64_t rn_u64 = ReadXRegister(instr->GetRn()); 2119 uint64_t rm_u64 = ReadXRegister(instr->GetRm()); 2120 switch (instr->Mask(DataProcessing3SourceMask)) { 2121 case MADD_w: 2122 case MADD_x: 2123 result = ReadXRegister(instr->GetRa()) + (rn_u64 * rm_u64); 2124 break; 2125 case MSUB_w: 2126 case MSUB_x: 2127 result = ReadXRegister(instr->GetRa()) - (rn_u64 * rm_u64); 2128 break; 2129 case SMADDL_x: 2130 result = ReadXRegister(instr->GetRa()) + 2131 static_cast<uint64_t>(rn_s32 * rm_s32); 2132 break; 2133 case SMSUBL_x: 2134 result = ReadXRegister(instr->GetRa()) - 2135 static_cast<uint64_t>(rn_s32 * rm_s32); 2136 break; 2137 case UMADDL_x: 2138 result = ReadXRegister(instr->GetRa()) + (rn_u32 * rm_u32); 2139 break; 2140 case UMSUBL_x: 2141 result = ReadXRegister(instr->GetRa()) - (rn_u32 * rm_u32); 2142 break; 2143 case UMULH_x: 2144 result = MultiplyHigh(ReadRegister<uint64_t>(instr->GetRn()), 2145 ReadRegister<uint64_t>(instr->GetRm())); 2146 break; 2147 case SMULH_x: 2148 result = MultiplyHigh(ReadXRegister(instr->GetRn()), 2149 ReadXRegister(instr->GetRm())); 2150 break; 2151 default: 2152 VIXL_UNIMPLEMENTED(); 2153 } 2154 WriteRegister(reg_size, instr->GetRd(), result); 2155 } 2156 2157 2158 void Simulator::VisitBitfield(const Instruction* instr) { 2159 unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; 2160 int64_t reg_mask = instr->GetSixtyFourBits() ? kXRegMask : kWRegMask; 2161 int R = instr->GetImmR(); 2162 int S = instr->GetImmS(); 2163 int diff = S - R; 2164 uint64_t mask; 2165 if (diff >= 0) { 2166 mask = ~UINT64_C(0) >> (64 - (diff + 1)); 2167 mask = (static_cast<unsigned>(diff) < (reg_size - 1)) ? mask : reg_mask; 2168 } else { 2169 mask = ~UINT64_C(0) >> (64 - (S + 1)); 2170 mask = RotateRight(mask, R, reg_size); 2171 diff += reg_size; 2172 } 2173 2174 // inzero indicates if the extracted bitfield is inserted into the 2175 // destination register value or in zero. 2176 // If extend is true, extend the sign of the extracted bitfield. 2177 bool inzero = false; 2178 bool extend = false; 2179 switch (instr->Mask(BitfieldMask)) { 2180 case BFM_x: 2181 case BFM_w: 2182 break; 2183 case SBFM_x: 2184 case SBFM_w: 2185 inzero = true; 2186 extend = true; 2187 break; 2188 case UBFM_x: 2189 case UBFM_w: 2190 inzero = true; 2191 break; 2192 default: 2193 VIXL_UNIMPLEMENTED(); 2194 } 2195 2196 uint64_t dst = inzero ? 0 : ReadRegister(reg_size, instr->GetRd()); 2197 uint64_t src = ReadRegister(reg_size, instr->GetRn()); 2198 // Rotate source bitfield into place. 2199 uint64_t result = RotateRight(src, R, reg_size); 2200 // Determine the sign extension. 2201 uint64_t topbits = (diff == 63) ? 0 : (~UINT64_C(0) << (diff + 1)); 2202 uint64_t signbits = extend && ((src >> S) & 1) ? topbits : 0; 2203 2204 // Merge sign extension, dest/zero and bitfield. 2205 result = signbits | (result & mask) | (dst & ~mask); 2206 2207 WriteRegister(reg_size, instr->GetRd(), result); 2208 } 2209 2210 2211 void Simulator::VisitExtract(const Instruction* instr) { 2212 unsigned lsb = instr->GetImmS(); 2213 unsigned reg_size = (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; 2214 uint64_t low_res = 2215 static_cast<uint64_t>(ReadRegister(reg_size, instr->GetRm())) >> lsb; 2216 uint64_t high_res = 2217 (lsb == 0) ? 0 : ReadRegister<uint64_t>(reg_size, instr->GetRn()) 2218 << (reg_size - lsb); 2219 WriteRegister(reg_size, instr->GetRd(), low_res | high_res); 2220 } 2221 2222 2223 void Simulator::VisitFPImmediate(const Instruction* instr) { 2224 AssertSupportedFPCR(); 2225 2226 unsigned dest = instr->GetRd(); 2227 switch (instr->Mask(FPImmediateMask)) { 2228 case FMOV_s_imm: 2229 WriteSRegister(dest, instr->GetImmFP32()); 2230 break; 2231 case FMOV_d_imm: 2232 WriteDRegister(dest, instr->GetImmFP64()); 2233 break; 2234 default: 2235 VIXL_UNREACHABLE(); 2236 } 2237 } 2238 2239 2240 void Simulator::VisitFPIntegerConvert(const Instruction* instr) { 2241 AssertSupportedFPCR(); 2242 2243 unsigned dst = instr->GetRd(); 2244 unsigned src = instr->GetRn(); 2245 2246 FPRounding round = ReadRMode(); 2247 2248 switch (instr->Mask(FPIntegerConvertMask)) { 2249 case FCVTAS_ws: 2250 WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieAway)); 2251 break; 2252 case FCVTAS_xs: 2253 WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieAway)); 2254 break; 2255 case FCVTAS_wd: 2256 WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieAway)); 2257 break; 2258 case FCVTAS_xd: 2259 WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieAway)); 2260 break; 2261 case FCVTAU_ws: 2262 WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieAway)); 2263 break; 2264 case FCVTAU_xs: 2265 WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieAway)); 2266 break; 2267 case FCVTAU_wd: 2268 WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieAway)); 2269 break; 2270 case FCVTAU_xd: 2271 WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieAway)); 2272 break; 2273 case FCVTMS_ws: 2274 WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPNegativeInfinity)); 2275 break; 2276 case FCVTMS_xs: 2277 WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPNegativeInfinity)); 2278 break; 2279 case FCVTMS_wd: 2280 WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPNegativeInfinity)); 2281 break; 2282 case FCVTMS_xd: 2283 WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPNegativeInfinity)); 2284 break; 2285 case FCVTMU_ws: 2286 WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPNegativeInfinity)); 2287 break; 2288 case FCVTMU_xs: 2289 WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPNegativeInfinity)); 2290 break; 2291 case FCVTMU_wd: 2292 WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPNegativeInfinity)); 2293 break; 2294 case FCVTMU_xd: 2295 WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPNegativeInfinity)); 2296 break; 2297 case FCVTPS_ws: 2298 WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPPositiveInfinity)); 2299 break; 2300 case FCVTPS_xs: 2301 WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPPositiveInfinity)); 2302 break; 2303 case FCVTPS_wd: 2304 WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPPositiveInfinity)); 2305 break; 2306 case FCVTPS_xd: 2307 WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPPositiveInfinity)); 2308 break; 2309 case FCVTPU_ws: 2310 WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPPositiveInfinity)); 2311 break; 2312 case FCVTPU_xs: 2313 WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPPositiveInfinity)); 2314 break; 2315 case FCVTPU_wd: 2316 WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPPositiveInfinity)); 2317 break; 2318 case FCVTPU_xd: 2319 WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPPositiveInfinity)); 2320 break; 2321 case FCVTNS_ws: 2322 WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieEven)); 2323 break; 2324 case FCVTNS_xs: 2325 WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieEven)); 2326 break; 2327 case FCVTNS_wd: 2328 WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieEven)); 2329 break; 2330 case FCVTNS_xd: 2331 WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieEven)); 2332 break; 2333 case FCVTNU_ws: 2334 WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieEven)); 2335 break; 2336 case FCVTNU_xs: 2337 WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieEven)); 2338 break; 2339 case FCVTNU_wd: 2340 WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieEven)); 2341 break; 2342 case FCVTNU_xd: 2343 WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieEven)); 2344 break; 2345 case FCVTZS_ws: 2346 WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPZero)); 2347 break; 2348 case FCVTZS_xs: 2349 WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPZero)); 2350 break; 2351 case FCVTZS_wd: 2352 WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPZero)); 2353 break; 2354 case FCVTZS_xd: 2355 WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPZero)); 2356 break; 2357 case FCVTZU_ws: 2358 WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPZero)); 2359 break; 2360 case FCVTZU_xs: 2361 WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPZero)); 2362 break; 2363 case FCVTZU_wd: 2364 WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPZero)); 2365 break; 2366 case FCVTZU_xd: 2367 WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPZero)); 2368 break; 2369 case FMOV_ws: 2370 WriteWRegister(dst, ReadSRegisterBits(src)); 2371 break; 2372 case FMOV_xd: 2373 WriteXRegister(dst, ReadDRegisterBits(src)); 2374 break; 2375 case FMOV_sw: 2376 WriteSRegisterBits(dst, ReadWRegister(src)); 2377 break; 2378 case FMOV_dx: 2379 WriteDRegisterBits(dst, ReadXRegister(src)); 2380 break; 2381 case FMOV_d1_x: 2382 LogicVRegister(ReadVRegister(dst)) 2383 .SetUint(kFormatD, 1, ReadXRegister(src)); 2384 break; 2385 case FMOV_x_d1: 2386 WriteXRegister(dst, LogicVRegister(ReadVRegister(src)).Uint(kFormatD, 1)); 2387 break; 2388 2389 // A 32-bit input can be handled in the same way as a 64-bit input, since 2390 // the sign- or zero-extension will not affect the conversion. 2391 case SCVTF_dx: 2392 WriteDRegister(dst, FixedToDouble(ReadXRegister(src), 0, round)); 2393 break; 2394 case SCVTF_dw: 2395 WriteDRegister(dst, FixedToDouble(ReadWRegister(src), 0, round)); 2396 break; 2397 case UCVTF_dx: 2398 WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), 0, round)); 2399 break; 2400 case UCVTF_dw: { 2401 WriteDRegister(dst, 2402 UFixedToDouble(static_cast<uint32_t>(ReadWRegister(src)), 2403 0, 2404 round)); 2405 break; 2406 } 2407 case SCVTF_sx: 2408 WriteSRegister(dst, FixedToFloat(ReadXRegister(src), 0, round)); 2409 break; 2410 case SCVTF_sw: 2411 WriteSRegister(dst, FixedToFloat(ReadWRegister(src), 0, round)); 2412 break; 2413 case UCVTF_sx: 2414 WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), 0, round)); 2415 break; 2416 case UCVTF_sw: { 2417 WriteSRegister(dst, 2418 UFixedToFloat(static_cast<uint32_t>(ReadWRegister(src)), 2419 0, 2420 round)); 2421 break; 2422 } 2423 2424 default: 2425 VIXL_UNREACHABLE(); 2426 } 2427 } 2428 2429 2430 void Simulator::VisitFPFixedPointConvert(const Instruction* instr) { 2431 AssertSupportedFPCR(); 2432 2433 unsigned dst = instr->GetRd(); 2434 unsigned src = instr->GetRn(); 2435 int fbits = 64 - instr->GetFPScale(); 2436 2437 FPRounding round = ReadRMode(); 2438 2439 switch (instr->Mask(FPFixedPointConvertMask)) { 2440 // A 32-bit input can be handled in the same way as a 64-bit input, since 2441 // the sign- or zero-extension will not affect the conversion. 2442 case SCVTF_dx_fixed: 2443 WriteDRegister(dst, FixedToDouble(ReadXRegister(src), fbits, round)); 2444 break; 2445 case SCVTF_dw_fixed: 2446 WriteDRegister(dst, FixedToDouble(ReadWRegister(src), fbits, round)); 2447 break; 2448 case UCVTF_dx_fixed: 2449 WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), fbits, round)); 2450 break; 2451 case UCVTF_dw_fixed: { 2452 WriteDRegister(dst, 2453 UFixedToDouble(static_cast<uint32_t>(ReadWRegister(src)), 2454 fbits, 2455 round)); 2456 break; 2457 } 2458 case SCVTF_sx_fixed: 2459 WriteSRegister(dst, FixedToFloat(ReadXRegister(src), fbits, round)); 2460 break; 2461 case SCVTF_sw_fixed: 2462 WriteSRegister(dst, FixedToFloat(ReadWRegister(src), fbits, round)); 2463 break; 2464 case UCVTF_sx_fixed: 2465 WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), fbits, round)); 2466 break; 2467 case UCVTF_sw_fixed: { 2468 WriteSRegister(dst, 2469 UFixedToFloat(static_cast<uint32_t>(ReadWRegister(src)), 2470 fbits, 2471 round)); 2472 break; 2473 } 2474 case FCVTZS_xd_fixed: 2475 WriteXRegister(dst, 2476 FPToInt64(ReadDRegister(src) * std::pow(2.0, fbits), 2477 FPZero)); 2478 break; 2479 case FCVTZS_wd_fixed: 2480 WriteWRegister(dst, 2481 FPToInt32(ReadDRegister(src) * std::pow(2.0, fbits), 2482 FPZero)); 2483 break; 2484 case FCVTZU_xd_fixed: 2485 WriteXRegister(dst, 2486 FPToUInt64(ReadDRegister(src) * std::pow(2.0, fbits), 2487 FPZero)); 2488 break; 2489 case FCVTZU_wd_fixed: 2490 WriteWRegister(dst, 2491 FPToUInt32(ReadDRegister(src) * std::pow(2.0, fbits), 2492 FPZero)); 2493 break; 2494 case FCVTZS_xs_fixed: 2495 WriteXRegister(dst, 2496 FPToInt64(ReadSRegister(src) * std::pow(2.0f, fbits), 2497 FPZero)); 2498 break; 2499 case FCVTZS_ws_fixed: 2500 WriteWRegister(dst, 2501 FPToInt32(ReadSRegister(src) * std::pow(2.0f, fbits), 2502 FPZero)); 2503 break; 2504 case FCVTZU_xs_fixed: 2505 WriteXRegister(dst, 2506 FPToUInt64(ReadSRegister(src) * std::pow(2.0f, fbits), 2507 FPZero)); 2508 break; 2509 case FCVTZU_ws_fixed: 2510 WriteWRegister(dst, 2511 FPToUInt32(ReadSRegister(src) * std::pow(2.0f, fbits), 2512 FPZero)); 2513 break; 2514 default: 2515 VIXL_UNREACHABLE(); 2516 } 2517 } 2518 2519 2520 void Simulator::VisitFPCompare(const Instruction* instr) { 2521 AssertSupportedFPCR(); 2522 2523 FPTrapFlags trap = DisableTrap; 2524 switch (instr->Mask(FPCompareMask)) { 2525 case FCMPE_s: 2526 trap = EnableTrap; 2527 VIXL_FALLTHROUGH(); 2528 case FCMP_s: 2529 FPCompare(ReadSRegister(instr->GetRn()), 2530 ReadSRegister(instr->GetRm()), 2531 trap); 2532 break; 2533 case FCMPE_d: 2534 trap = EnableTrap; 2535 VIXL_FALLTHROUGH(); 2536 case FCMP_d: 2537 FPCompare(ReadDRegister(instr->GetRn()), 2538 ReadDRegister(instr->GetRm()), 2539 trap); 2540 break; 2541 case FCMPE_s_zero: 2542 trap = EnableTrap; 2543 VIXL_FALLTHROUGH(); 2544 case FCMP_s_zero: 2545 FPCompare(ReadSRegister(instr->GetRn()), 0.0f, trap); 2546 break; 2547 case FCMPE_d_zero: 2548 trap = EnableTrap; 2549 VIXL_FALLTHROUGH(); 2550 case FCMP_d_zero: 2551 FPCompare(ReadDRegister(instr->GetRn()), 0.0, trap); 2552 break; 2553 default: 2554 VIXL_UNIMPLEMENTED(); 2555 } 2556 } 2557 2558 2559 void Simulator::VisitFPConditionalCompare(const Instruction* instr) { 2560 AssertSupportedFPCR(); 2561 2562 FPTrapFlags trap = DisableTrap; 2563 switch (instr->Mask(FPConditionalCompareMask)) { 2564 case FCCMPE_s: 2565 trap = EnableTrap; 2566 VIXL_FALLTHROUGH(); 2567 case FCCMP_s: 2568 if (ConditionPassed(instr->GetCondition())) { 2569 FPCompare(ReadSRegister(instr->GetRn()), 2570 ReadSRegister(instr->GetRm()), 2571 trap); 2572 } else { 2573 ReadNzcv().SetFlags(instr->GetNzcv()); 2574 LogSystemRegister(NZCV); 2575 } 2576 break; 2577 case FCCMPE_d: 2578 trap = EnableTrap; 2579 VIXL_FALLTHROUGH(); 2580 case FCCMP_d: 2581 if (ConditionPassed(instr->GetCondition())) { 2582 FPCompare(ReadDRegister(instr->GetRn()), 2583 ReadDRegister(instr->GetRm()), 2584 trap); 2585 } else { 2586 ReadNzcv().SetFlags(instr->GetNzcv()); 2587 LogSystemRegister(NZCV); 2588 } 2589 break; 2590 default: 2591 VIXL_UNIMPLEMENTED(); 2592 } 2593 } 2594 2595 2596 void Simulator::VisitFPConditionalSelect(const Instruction* instr) { 2597 AssertSupportedFPCR(); 2598 2599 Instr selected; 2600 if (ConditionPassed(instr->GetCondition())) { 2601 selected = instr->GetRn(); 2602 } else { 2603 selected = instr->GetRm(); 2604 } 2605 2606 switch (instr->Mask(FPConditionalSelectMask)) { 2607 case FCSEL_s: 2608 WriteSRegister(instr->GetRd(), ReadSRegister(selected)); 2609 break; 2610 case FCSEL_d: 2611 WriteDRegister(instr->GetRd(), ReadDRegister(selected)); 2612 break; 2613 default: 2614 VIXL_UNIMPLEMENTED(); 2615 } 2616 } 2617 2618 2619 void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) { 2620 AssertSupportedFPCR(); 2621 2622 FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode()); 2623 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; 2624 SimVRegister& rd = ReadVRegister(instr->GetRd()); 2625 SimVRegister& rn = ReadVRegister(instr->GetRn()); 2626 bool inexact_exception = false; 2627 2628 unsigned fd = instr->GetRd(); 2629 unsigned fn = instr->GetRn(); 2630 2631 switch (instr->Mask(FPDataProcessing1SourceMask)) { 2632 case FMOV_s: 2633 WriteSRegister(fd, ReadSRegister(fn)); 2634 return; 2635 case FMOV_d: 2636 WriteDRegister(fd, ReadDRegister(fn)); 2637 return; 2638 case FABS_s: 2639 case FABS_d: 2640 fabs_(vform, ReadVRegister(fd), ReadVRegister(fn)); 2641 // Explicitly log the register update whilst we have type information. 2642 LogVRegister(fd, GetPrintRegisterFormatFP(vform)); 2643 return; 2644 case FNEG_s: 2645 case FNEG_d: 2646 fneg(vform, ReadVRegister(fd), ReadVRegister(fn)); 2647 // Explicitly log the register update whilst we have type information. 2648 LogVRegister(fd, GetPrintRegisterFormatFP(vform)); 2649 return; 2650 case FCVT_ds: 2651 WriteDRegister(fd, FPToDouble(ReadSRegister(fn))); 2652 return; 2653 case FCVT_sd: 2654 WriteSRegister(fd, FPToFloat(ReadDRegister(fn), FPTieEven)); 2655 return; 2656 case FCVT_hs: 2657 WriteHRegister(fd, FPToFloat16(ReadSRegister(fn), FPTieEven)); 2658 return; 2659 case FCVT_sh: 2660 WriteSRegister(fd, FPToFloat(ReadHRegister(fn))); 2661 return; 2662 case FCVT_dh: 2663 WriteDRegister(fd, FPToDouble(FPToFloat(ReadHRegister(fn)))); 2664 return; 2665 case FCVT_hd: 2666 WriteHRegister(fd, FPToFloat16(ReadDRegister(fn), FPTieEven)); 2667 return; 2668 case FSQRT_s: 2669 case FSQRT_d: 2670 fsqrt(vform, rd, rn); 2671 // Explicitly log the register update whilst we have type information. 2672 LogVRegister(fd, GetPrintRegisterFormatFP(vform)); 2673 return; 2674 case FRINTI_s: 2675 case FRINTI_d: 2676 break; // Use FPCR rounding mode. 2677 case FRINTX_s: 2678 case FRINTX_d: 2679 inexact_exception = true; 2680 break; 2681 case FRINTA_s: 2682 case FRINTA_d: 2683 fpcr_rounding = FPTieAway; 2684 break; 2685 case FRINTM_s: 2686 case FRINTM_d: 2687 fpcr_rounding = FPNegativeInfinity; 2688 break; 2689 case FRINTN_s: 2690 case FRINTN_d: 2691 fpcr_rounding = FPTieEven; 2692 break; 2693 case FRINTP_s: 2694 case FRINTP_d: 2695 fpcr_rounding = FPPositiveInfinity; 2696 break; 2697 case FRINTZ_s: 2698 case FRINTZ_d: 2699 fpcr_rounding = FPZero; 2700 break; 2701 default: 2702 VIXL_UNIMPLEMENTED(); 2703 } 2704 2705 // Only FRINT* instructions fall through the switch above. 2706 frint(vform, rd, rn, fpcr_rounding, inexact_exception); 2707 // Explicitly log the register update whilst we have type information. 2708 LogVRegister(fd, GetPrintRegisterFormatFP(vform)); 2709 } 2710 2711 2712 void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) { 2713 AssertSupportedFPCR(); 2714 2715 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; 2716 SimVRegister& rd = ReadVRegister(instr->GetRd()); 2717 SimVRegister& rn = ReadVRegister(instr->GetRn()); 2718 SimVRegister& rm = ReadVRegister(instr->GetRm()); 2719 2720 switch (instr->Mask(FPDataProcessing2SourceMask)) { 2721 case FADD_s: 2722 case FADD_d: 2723 fadd(vform, rd, rn, rm); 2724 break; 2725 case FSUB_s: 2726 case FSUB_d: 2727 fsub(vform, rd, rn, rm); 2728 break; 2729 case FMUL_s: 2730 case FMUL_d: 2731 fmul(vform, rd, rn, rm); 2732 break; 2733 case FNMUL_s: 2734 case FNMUL_d: 2735 fnmul(vform, rd, rn, rm); 2736 break; 2737 case FDIV_s: 2738 case FDIV_d: 2739 fdiv(vform, rd, rn, rm); 2740 break; 2741 case FMAX_s: 2742 case FMAX_d: 2743 fmax(vform, rd, rn, rm); 2744 break; 2745 case FMIN_s: 2746 case FMIN_d: 2747 fmin(vform, rd, rn, rm); 2748 break; 2749 case FMAXNM_s: 2750 case FMAXNM_d: 2751 fmaxnm(vform, rd, rn, rm); 2752 break; 2753 case FMINNM_s: 2754 case FMINNM_d: 2755 fminnm(vform, rd, rn, rm); 2756 break; 2757 default: 2758 VIXL_UNREACHABLE(); 2759 } 2760 // Explicitly log the register update whilst we have type information. 2761 LogVRegister(instr->GetRd(), GetPrintRegisterFormatFP(vform)); 2762 } 2763 2764 2765 void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) { 2766 AssertSupportedFPCR(); 2767 2768 unsigned fd = instr->GetRd(); 2769 unsigned fn = instr->GetRn(); 2770 unsigned fm = instr->GetRm(); 2771 unsigned fa = instr->GetRa(); 2772 2773 switch (instr->Mask(FPDataProcessing3SourceMask)) { 2774 // fd = fa +/- (fn * fm) 2775 case FMADD_s: 2776 WriteSRegister(fd, 2777 FPMulAdd(ReadSRegister(fa), 2778 ReadSRegister(fn), 2779 ReadSRegister(fm))); 2780 break; 2781 case FMSUB_s: 2782 WriteSRegister(fd, 2783 FPMulAdd(ReadSRegister(fa), 2784 -ReadSRegister(fn), 2785 ReadSRegister(fm))); 2786 break; 2787 case FMADD_d: 2788 WriteDRegister(fd, 2789 FPMulAdd(ReadDRegister(fa), 2790 ReadDRegister(fn), 2791 ReadDRegister(fm))); 2792 break; 2793 case FMSUB_d: 2794 WriteDRegister(fd, 2795 FPMulAdd(ReadDRegister(fa), 2796 -ReadDRegister(fn), 2797 ReadDRegister(fm))); 2798 break; 2799 // Negated variants of the above. 2800 case FNMADD_s: 2801 WriteSRegister(fd, 2802 FPMulAdd(-ReadSRegister(fa), 2803 -ReadSRegister(fn), 2804 ReadSRegister(fm))); 2805 break; 2806 case FNMSUB_s: 2807 WriteSRegister(fd, 2808 FPMulAdd(-ReadSRegister(fa), 2809 ReadSRegister(fn), 2810 ReadSRegister(fm))); 2811 break; 2812 case FNMADD_d: 2813 WriteDRegister(fd, 2814 FPMulAdd(-ReadDRegister(fa), 2815 -ReadDRegister(fn), 2816 ReadDRegister(fm))); 2817 break; 2818 case FNMSUB_d: 2819 WriteDRegister(fd, 2820 FPMulAdd(-ReadDRegister(fa), 2821 ReadDRegister(fn), 2822 ReadDRegister(fm))); 2823 break; 2824 default: 2825 VIXL_UNIMPLEMENTED(); 2826 } 2827 } 2828 2829 2830 bool Simulator::FPProcessNaNs(const Instruction* instr) { 2831 unsigned fd = instr->GetRd(); 2832 unsigned fn = instr->GetRn(); 2833 unsigned fm = instr->GetRm(); 2834 bool done = false; 2835 2836 if (instr->Mask(FP64) == FP64) { 2837 double result = FPProcessNaNs(ReadDRegister(fn), ReadDRegister(fm)); 2838 if (std::isnan(result)) { 2839 WriteDRegister(fd, result); 2840 done = true; 2841 } 2842 } else { 2843 float result = FPProcessNaNs(ReadSRegister(fn), ReadSRegister(fm)); 2844 if (std::isnan(result)) { 2845 WriteSRegister(fd, result); 2846 done = true; 2847 } 2848 } 2849 2850 return done; 2851 } 2852 2853 2854 void Simulator::SysOp_W(int op, int64_t val) { 2855 switch (op) { 2856 case IVAU: 2857 case CVAC: 2858 case CVAU: 2859 case CIVAC: { 2860 // Perform a dummy memory access to ensure that we have read access 2861 // to the specified address. 2862 volatile uint8_t y = Memory::Read<uint8_t>(val); 2863 USE(y); 2864 // TODO: Implement "case ZVA:". 2865 break; 2866 } 2867 default: 2868 VIXL_UNIMPLEMENTED(); 2869 } 2870 } 2871 2872 2873 void Simulator::VisitSystem(const Instruction* instr) { 2874 // Some system instructions hijack their Op and Cp fields to represent a 2875 // range of immediates instead of indicating a different instruction. This 2876 // makes the decoding tricky. 2877 if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { 2878 VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX); 2879 switch (instr->Mask(SystemExclusiveMonitorMask)) { 2880 case CLREX: { 2881 PrintExclusiveAccessWarning(); 2882 ClearLocalMonitor(); 2883 break; 2884 } 2885 } 2886 } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { 2887 switch (instr->Mask(SystemSysRegMask)) { 2888 case MRS: { 2889 switch (instr->GetImmSystemRegister()) { 2890 case NZCV: 2891 WriteXRegister(instr->GetRt(), ReadNzcv().GetRawValue()); 2892 break; 2893 case FPCR: 2894 WriteXRegister(instr->GetRt(), ReadFpcr().GetRawValue()); 2895 break; 2896 default: 2897 VIXL_UNIMPLEMENTED(); 2898 } 2899 break; 2900 } 2901 case MSR: { 2902 switch (instr->GetImmSystemRegister()) { 2903 case NZCV: 2904 ReadNzcv().SetRawValue(ReadWRegister(instr->GetRt())); 2905 LogSystemRegister(NZCV); 2906 break; 2907 case FPCR: 2908 ReadFpcr().SetRawValue(ReadWRegister(instr->GetRt())); 2909 LogSystemRegister(FPCR); 2910 break; 2911 default: 2912 VIXL_UNIMPLEMENTED(); 2913 } 2914 break; 2915 } 2916 } 2917 } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { 2918 VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT); 2919 switch (instr->GetImmHint()) { 2920 case NOP: 2921 break; 2922 default: 2923 VIXL_UNIMPLEMENTED(); 2924 } 2925 } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { 2926 __sync_synchronize(); 2927 } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) { 2928 switch (instr->Mask(SystemSysMask)) { 2929 case SYS: 2930 SysOp_W(instr->GetSysOp(), ReadXRegister(instr->GetRt())); 2931 break; 2932 default: 2933 VIXL_UNIMPLEMENTED(); 2934 } 2935 } else { 2936 VIXL_UNIMPLEMENTED(); 2937 } 2938 } 2939 2940 2941 void Simulator::VisitException(const Instruction* instr) { 2942 switch (instr->Mask(ExceptionMask)) { 2943 case HLT: 2944 switch (instr->GetImmException()) { 2945 case kUnreachableOpcode: 2946 DoUnreachable(instr); 2947 return; 2948 case kTraceOpcode: 2949 DoTrace(instr); 2950 return; 2951 case kLogOpcode: 2952 DoLog(instr); 2953 return; 2954 case kPrintfOpcode: 2955 DoPrintf(instr); 2956 return; 2957 case kRuntimeCallOpcode: 2958 DoRuntimeCall(instr); 2959 return; 2960 default: 2961 HostBreakpoint(); 2962 return; 2963 } 2964 case BRK: 2965 HostBreakpoint(); 2966 return; 2967 default: 2968 VIXL_UNIMPLEMENTED(); 2969 } 2970 } 2971 2972 2973 void Simulator::VisitCrypto2RegSHA(const Instruction* instr) { 2974 VisitUnimplemented(instr); 2975 } 2976 2977 2978 void Simulator::VisitCrypto3RegSHA(const Instruction* instr) { 2979 VisitUnimplemented(instr); 2980 } 2981 2982 2983 void Simulator::VisitCryptoAES(const Instruction* instr) { 2984 VisitUnimplemented(instr); 2985 } 2986 2987 2988 void Simulator::VisitNEON2RegMisc(const Instruction* instr) { 2989 NEONFormatDecoder nfd(instr); 2990 VectorFormat vf = nfd.GetVectorFormat(); 2991 2992 static const NEONFormatMap map_lp = 2993 {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; 2994 VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp); 2995 2996 static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}}; 2997 VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl); 2998 2999 static const NEONFormatMap map_fcvtn = {{22, 30}, 3000 {NF_4H, NF_8H, NF_2S, NF_4S}}; 3001 VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn); 3002 3003 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3004 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3005 3006 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { 3007 // These instructions all use a two bit size field, except NOT and RBIT, 3008 // which use the field to encode the operation. 3009 switch (instr->Mask(NEON2RegMiscMask)) { 3010 case NEON_REV64: 3011 rev64(vf, rd, rn); 3012 break; 3013 case NEON_REV32: 3014 rev32(vf, rd, rn); 3015 break; 3016 case NEON_REV16: 3017 rev16(vf, rd, rn); 3018 break; 3019 case NEON_SUQADD: 3020 suqadd(vf, rd, rn); 3021 break; 3022 case NEON_USQADD: 3023 usqadd(vf, rd, rn); 3024 break; 3025 case NEON_CLS: 3026 cls(vf, rd, rn); 3027 break; 3028 case NEON_CLZ: 3029 clz(vf, rd, rn); 3030 break; 3031 case NEON_CNT: 3032 cnt(vf, rd, rn); 3033 break; 3034 case NEON_SQABS: 3035 abs(vf, rd, rn).SignedSaturate(vf); 3036 break; 3037 case NEON_SQNEG: 3038 neg(vf, rd, rn).SignedSaturate(vf); 3039 break; 3040 case NEON_CMGT_zero: 3041 cmp(vf, rd, rn, 0, gt); 3042 break; 3043 case NEON_CMGE_zero: 3044 cmp(vf, rd, rn, 0, ge); 3045 break; 3046 case NEON_CMEQ_zero: 3047 cmp(vf, rd, rn, 0, eq); 3048 break; 3049 case NEON_CMLE_zero: 3050 cmp(vf, rd, rn, 0, le); 3051 break; 3052 case NEON_CMLT_zero: 3053 cmp(vf, rd, rn, 0, lt); 3054 break; 3055 case NEON_ABS: 3056 abs(vf, rd, rn); 3057 break; 3058 case NEON_NEG: 3059 neg(vf, rd, rn); 3060 break; 3061 case NEON_SADDLP: 3062 saddlp(vf_lp, rd, rn); 3063 break; 3064 case NEON_UADDLP: 3065 uaddlp(vf_lp, rd, rn); 3066 break; 3067 case NEON_SADALP: 3068 sadalp(vf_lp, rd, rn); 3069 break; 3070 case NEON_UADALP: 3071 uadalp(vf_lp, rd, rn); 3072 break; 3073 case NEON_RBIT_NOT: 3074 vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); 3075 switch (instr->GetFPType()) { 3076 case 0: 3077 not_(vf, rd, rn); 3078 break; 3079 case 1: 3080 rbit(vf, rd, rn); 3081 break; 3082 default: 3083 VIXL_UNIMPLEMENTED(); 3084 } 3085 break; 3086 } 3087 } else { 3088 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap()); 3089 FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode()); 3090 bool inexact_exception = false; 3091 3092 // These instructions all use a one bit size field, except XTN, SQXTUN, 3093 // SHLL, SQXTN and UQXTN, which use a two bit size field. 3094 switch (instr->Mask(NEON2RegMiscFPMask)) { 3095 case NEON_FABS: 3096 fabs_(fpf, rd, rn); 3097 return; 3098 case NEON_FNEG: 3099 fneg(fpf, rd, rn); 3100 return; 3101 case NEON_FSQRT: 3102 fsqrt(fpf, rd, rn); 3103 return; 3104 case NEON_FCVTL: 3105 if (instr->Mask(NEON_Q)) { 3106 fcvtl2(vf_fcvtl, rd, rn); 3107 } else { 3108 fcvtl(vf_fcvtl, rd, rn); 3109 } 3110 return; 3111 case NEON_FCVTN: 3112 if (instr->Mask(NEON_Q)) { 3113 fcvtn2(vf_fcvtn, rd, rn); 3114 } else { 3115 fcvtn(vf_fcvtn, rd, rn); 3116 } 3117 return; 3118 case NEON_FCVTXN: 3119 if (instr->Mask(NEON_Q)) { 3120 fcvtxn2(vf_fcvtn, rd, rn); 3121 } else { 3122 fcvtxn(vf_fcvtn, rd, rn); 3123 } 3124 return; 3125 3126 // The following instructions break from the switch statement, rather 3127 // than return. 3128 case NEON_FRINTI: 3129 break; // Use FPCR rounding mode. 3130 case NEON_FRINTX: 3131 inexact_exception = true; 3132 break; 3133 case NEON_FRINTA: 3134 fpcr_rounding = FPTieAway; 3135 break; 3136 case NEON_FRINTM: 3137 fpcr_rounding = FPNegativeInfinity; 3138 break; 3139 case NEON_FRINTN: 3140 fpcr_rounding = FPTieEven; 3141 break; 3142 case NEON_FRINTP: 3143 fpcr_rounding = FPPositiveInfinity; 3144 break; 3145 case NEON_FRINTZ: 3146 fpcr_rounding = FPZero; 3147 break; 3148 3149 case NEON_FCVTNS: 3150 fcvts(fpf, rd, rn, FPTieEven); 3151 return; 3152 case NEON_FCVTNU: 3153 fcvtu(fpf, rd, rn, FPTieEven); 3154 return; 3155 case NEON_FCVTPS: 3156 fcvts(fpf, rd, rn, FPPositiveInfinity); 3157 return; 3158 case NEON_FCVTPU: 3159 fcvtu(fpf, rd, rn, FPPositiveInfinity); 3160 return; 3161 case NEON_FCVTMS: 3162 fcvts(fpf, rd, rn, FPNegativeInfinity); 3163 return; 3164 case NEON_FCVTMU: 3165 fcvtu(fpf, rd, rn, FPNegativeInfinity); 3166 return; 3167 case NEON_FCVTZS: 3168 fcvts(fpf, rd, rn, FPZero); 3169 return; 3170 case NEON_FCVTZU: 3171 fcvtu(fpf, rd, rn, FPZero); 3172 return; 3173 case NEON_FCVTAS: 3174 fcvts(fpf, rd, rn, FPTieAway); 3175 return; 3176 case NEON_FCVTAU: 3177 fcvtu(fpf, rd, rn, FPTieAway); 3178 return; 3179 case NEON_SCVTF: 3180 scvtf(fpf, rd, rn, 0, fpcr_rounding); 3181 return; 3182 case NEON_UCVTF: 3183 ucvtf(fpf, rd, rn, 0, fpcr_rounding); 3184 return; 3185 case NEON_URSQRTE: 3186 ursqrte(fpf, rd, rn); 3187 return; 3188 case NEON_URECPE: 3189 urecpe(fpf, rd, rn); 3190 return; 3191 case NEON_FRSQRTE: 3192 frsqrte(fpf, rd, rn); 3193 return; 3194 case NEON_FRECPE: 3195 frecpe(fpf, rd, rn, fpcr_rounding); 3196 return; 3197 case NEON_FCMGT_zero: 3198 fcmp_zero(fpf, rd, rn, gt); 3199 return; 3200 case NEON_FCMGE_zero: 3201 fcmp_zero(fpf, rd, rn, ge); 3202 return; 3203 case NEON_FCMEQ_zero: 3204 fcmp_zero(fpf, rd, rn, eq); 3205 return; 3206 case NEON_FCMLE_zero: 3207 fcmp_zero(fpf, rd, rn, le); 3208 return; 3209 case NEON_FCMLT_zero: 3210 fcmp_zero(fpf, rd, rn, lt); 3211 return; 3212 default: 3213 if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && 3214 (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { 3215 switch (instr->Mask(NEON2RegMiscMask)) { 3216 case NEON_XTN: 3217 xtn(vf, rd, rn); 3218 return; 3219 case NEON_SQXTN: 3220 sqxtn(vf, rd, rn); 3221 return; 3222 case NEON_UQXTN: 3223 uqxtn(vf, rd, rn); 3224 return; 3225 case NEON_SQXTUN: 3226 sqxtun(vf, rd, rn); 3227 return; 3228 case NEON_SHLL: 3229 vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); 3230 if (instr->Mask(NEON_Q)) { 3231 shll2(vf, rd, rn); 3232 } else { 3233 shll(vf, rd, rn); 3234 } 3235 return; 3236 default: 3237 VIXL_UNIMPLEMENTED(); 3238 } 3239 } else { 3240 VIXL_UNIMPLEMENTED(); 3241 } 3242 } 3243 3244 // Only FRINT* instructions fall through the switch above. 3245 frint(fpf, rd, rn, fpcr_rounding, inexact_exception); 3246 } 3247 } 3248 3249 3250 void Simulator::VisitNEON3Same(const Instruction* instr) { 3251 NEONFormatDecoder nfd(instr); 3252 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3253 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3254 SimVRegister& rm = ReadVRegister(instr->GetRm()); 3255 3256 if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { 3257 VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); 3258 switch (instr->Mask(NEON3SameLogicalMask)) { 3259 case NEON_AND: 3260 and_(vf, rd, rn, rm); 3261 break; 3262 case NEON_ORR: 3263 orr(vf, rd, rn, rm); 3264 break; 3265 case NEON_ORN: 3266 orn(vf, rd, rn, rm); 3267 break; 3268 case NEON_EOR: 3269 eor(vf, rd, rn, rm); 3270 break; 3271 case NEON_BIC: 3272 bic(vf, rd, rn, rm); 3273 break; 3274 case NEON_BIF: 3275 bif(vf, rd, rn, rm); 3276 break; 3277 case NEON_BIT: 3278 bit(vf, rd, rn, rm); 3279 break; 3280 case NEON_BSL: 3281 bsl(vf, rd, rn, rm); 3282 break; 3283 default: 3284 VIXL_UNIMPLEMENTED(); 3285 } 3286 } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { 3287 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); 3288 switch (instr->Mask(NEON3SameFPMask)) { 3289 case NEON_FADD: 3290 fadd(vf, rd, rn, rm); 3291 break; 3292 case NEON_FSUB: 3293 fsub(vf, rd, rn, rm); 3294 break; 3295 case NEON_FMUL: 3296 fmul(vf, rd, rn, rm); 3297 break; 3298 case NEON_FDIV: 3299 fdiv(vf, rd, rn, rm); 3300 break; 3301 case NEON_FMAX: 3302 fmax(vf, rd, rn, rm); 3303 break; 3304 case NEON_FMIN: 3305 fmin(vf, rd, rn, rm); 3306 break; 3307 case NEON_FMAXNM: 3308 fmaxnm(vf, rd, rn, rm); 3309 break; 3310 case NEON_FMINNM: 3311 fminnm(vf, rd, rn, rm); 3312 break; 3313 case NEON_FMLA: 3314 fmla(vf, rd, rn, rm); 3315 break; 3316 case NEON_FMLS: 3317 fmls(vf, rd, rn, rm); 3318 break; 3319 case NEON_FMULX: 3320 fmulx(vf, rd, rn, rm); 3321 break; 3322 case NEON_FACGE: 3323 fabscmp(vf, rd, rn, rm, ge); 3324 break; 3325 case NEON_FACGT: 3326 fabscmp(vf, rd, rn, rm, gt); 3327 break; 3328 case NEON_FCMEQ: 3329 fcmp(vf, rd, rn, rm, eq); 3330 break; 3331 case NEON_FCMGE: 3332 fcmp(vf, rd, rn, rm, ge); 3333 break; 3334 case NEON_FCMGT: 3335 fcmp(vf, rd, rn, rm, gt); 3336 break; 3337 case NEON_FRECPS: 3338 frecps(vf, rd, rn, rm); 3339 break; 3340 case NEON_FRSQRTS: 3341 frsqrts(vf, rd, rn, rm); 3342 break; 3343 case NEON_FABD: 3344 fabd(vf, rd, rn, rm); 3345 break; 3346 case NEON_FADDP: 3347 faddp(vf, rd, rn, rm); 3348 break; 3349 case NEON_FMAXP: 3350 fmaxp(vf, rd, rn, rm); 3351 break; 3352 case NEON_FMAXNMP: 3353 fmaxnmp(vf, rd, rn, rm); 3354 break; 3355 case NEON_FMINP: 3356 fminp(vf, rd, rn, rm); 3357 break; 3358 case NEON_FMINNMP: 3359 fminnmp(vf, rd, rn, rm); 3360 break; 3361 default: 3362 VIXL_UNIMPLEMENTED(); 3363 } 3364 } else { 3365 VectorFormat vf = nfd.GetVectorFormat(); 3366 switch (instr->Mask(NEON3SameMask)) { 3367 case NEON_ADD: 3368 add(vf, rd, rn, rm); 3369 break; 3370 case NEON_ADDP: 3371 addp(vf, rd, rn, rm); 3372 break; 3373 case NEON_CMEQ: 3374 cmp(vf, rd, rn, rm, eq); 3375 break; 3376 case NEON_CMGE: 3377 cmp(vf, rd, rn, rm, ge); 3378 break; 3379 case NEON_CMGT: 3380 cmp(vf, rd, rn, rm, gt); 3381 break; 3382 case NEON_CMHI: 3383 cmp(vf, rd, rn, rm, hi); 3384 break; 3385 case NEON_CMHS: 3386 cmp(vf, rd, rn, rm, hs); 3387 break; 3388 case NEON_CMTST: 3389 cmptst(vf, rd, rn, rm); 3390 break; 3391 case NEON_MLS: 3392 mls(vf, rd, rn, rm); 3393 break; 3394 case NEON_MLA: 3395 mla(vf, rd, rn, rm); 3396 break; 3397 case NEON_MUL: 3398 mul(vf, rd, rn, rm); 3399 break; 3400 case NEON_PMUL: 3401 pmul(vf, rd, rn, rm); 3402 break; 3403 case NEON_SMAX: 3404 smax(vf, rd, rn, rm); 3405 break; 3406 case NEON_SMAXP: 3407 smaxp(vf, rd, rn, rm); 3408 break; 3409 case NEON_SMIN: 3410 smin(vf, rd, rn, rm); 3411 break; 3412 case NEON_SMINP: 3413 sminp(vf, rd, rn, rm); 3414 break; 3415 case NEON_SUB: 3416 sub(vf, rd, rn, rm); 3417 break; 3418 case NEON_UMAX: 3419 umax(vf, rd, rn, rm); 3420 break; 3421 case NEON_UMAXP: 3422 umaxp(vf, rd, rn, rm); 3423 break; 3424 case NEON_UMIN: 3425 umin(vf, rd, rn, rm); 3426 break; 3427 case NEON_UMINP: 3428 uminp(vf, rd, rn, rm); 3429 break; 3430 case NEON_SSHL: 3431 sshl(vf, rd, rn, rm); 3432 break; 3433 case NEON_USHL: 3434 ushl(vf, rd, rn, rm); 3435 break; 3436 case NEON_SABD: 3437 absdiff(vf, rd, rn, rm, true); 3438 break; 3439 case NEON_UABD: 3440 absdiff(vf, rd, rn, rm, false); 3441 break; 3442 case NEON_SABA: 3443 saba(vf, rd, rn, rm); 3444 break; 3445 case NEON_UABA: 3446 uaba(vf, rd, rn, rm); 3447 break; 3448 case NEON_UQADD: 3449 add(vf, rd, rn, rm).UnsignedSaturate(vf); 3450 break; 3451 case NEON_SQADD: 3452 add(vf, rd, rn, rm).SignedSaturate(vf); 3453 break; 3454 case NEON_UQSUB: 3455 sub(vf, rd, rn, rm).UnsignedSaturate(vf); 3456 break; 3457 case NEON_SQSUB: 3458 sub(vf, rd, rn, rm).SignedSaturate(vf); 3459 break; 3460 case NEON_SQDMULH: 3461 sqdmulh(vf, rd, rn, rm); 3462 break; 3463 case NEON_SQRDMULH: 3464 sqrdmulh(vf, rd, rn, rm); 3465 break; 3466 case NEON_UQSHL: 3467 ushl(vf, rd, rn, rm).UnsignedSaturate(vf); 3468 break; 3469 case NEON_SQSHL: 3470 sshl(vf, rd, rn, rm).SignedSaturate(vf); 3471 break; 3472 case NEON_URSHL: 3473 ushl(vf, rd, rn, rm).Round(vf); 3474 break; 3475 case NEON_SRSHL: 3476 sshl(vf, rd, rn, rm).Round(vf); 3477 break; 3478 case NEON_UQRSHL: 3479 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); 3480 break; 3481 case NEON_SQRSHL: 3482 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); 3483 break; 3484 case NEON_UHADD: 3485 add(vf, rd, rn, rm).Uhalve(vf); 3486 break; 3487 case NEON_URHADD: 3488 add(vf, rd, rn, rm).Uhalve(vf).Round(vf); 3489 break; 3490 case NEON_SHADD: 3491 add(vf, rd, rn, rm).Halve(vf); 3492 break; 3493 case NEON_SRHADD: 3494 add(vf, rd, rn, rm).Halve(vf).Round(vf); 3495 break; 3496 case NEON_UHSUB: 3497 sub(vf, rd, rn, rm).Uhalve(vf); 3498 break; 3499 case NEON_SHSUB: 3500 sub(vf, rd, rn, rm).Halve(vf); 3501 break; 3502 default: 3503 VIXL_UNIMPLEMENTED(); 3504 } 3505 } 3506 } 3507 3508 3509 void Simulator::VisitNEON3Different(const Instruction* instr) { 3510 NEONFormatDecoder nfd(instr); 3511 VectorFormat vf = nfd.GetVectorFormat(); 3512 VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); 3513 3514 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3515 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3516 SimVRegister& rm = ReadVRegister(instr->GetRm()); 3517 3518 switch (instr->Mask(NEON3DifferentMask)) { 3519 case NEON_PMULL: 3520 pmull(vf_l, rd, rn, rm); 3521 break; 3522 case NEON_PMULL2: 3523 pmull2(vf_l, rd, rn, rm); 3524 break; 3525 case NEON_UADDL: 3526 uaddl(vf_l, rd, rn, rm); 3527 break; 3528 case NEON_UADDL2: 3529 uaddl2(vf_l, rd, rn, rm); 3530 break; 3531 case NEON_SADDL: 3532 saddl(vf_l, rd, rn, rm); 3533 break; 3534 case NEON_SADDL2: 3535 saddl2(vf_l, rd, rn, rm); 3536 break; 3537 case NEON_USUBL: 3538 usubl(vf_l, rd, rn, rm); 3539 break; 3540 case NEON_USUBL2: 3541 usubl2(vf_l, rd, rn, rm); 3542 break; 3543 case NEON_SSUBL: 3544 ssubl(vf_l, rd, rn, rm); 3545 break; 3546 case NEON_SSUBL2: 3547 ssubl2(vf_l, rd, rn, rm); 3548 break; 3549 case NEON_SABAL: 3550 sabal(vf_l, rd, rn, rm); 3551 break; 3552 case NEON_SABAL2: 3553 sabal2(vf_l, rd, rn, rm); 3554 break; 3555 case NEON_UABAL: 3556 uabal(vf_l, rd, rn, rm); 3557 break; 3558 case NEON_UABAL2: 3559 uabal2(vf_l, rd, rn, rm); 3560 break; 3561 case NEON_SABDL: 3562 sabdl(vf_l, rd, rn, rm); 3563 break; 3564 case NEON_SABDL2: 3565 sabdl2(vf_l, rd, rn, rm); 3566 break; 3567 case NEON_UABDL: 3568 uabdl(vf_l, rd, rn, rm); 3569 break; 3570 case NEON_UABDL2: 3571 uabdl2(vf_l, rd, rn, rm); 3572 break; 3573 case NEON_SMLAL: 3574 smlal(vf_l, rd, rn, rm); 3575 break; 3576 case NEON_SMLAL2: 3577 smlal2(vf_l, rd, rn, rm); 3578 break; 3579 case NEON_UMLAL: 3580 umlal(vf_l, rd, rn, rm); 3581 break; 3582 case NEON_UMLAL2: 3583 umlal2(vf_l, rd, rn, rm); 3584 break; 3585 case NEON_SMLSL: 3586 smlsl(vf_l, rd, rn, rm); 3587 break; 3588 case NEON_SMLSL2: 3589 smlsl2(vf_l, rd, rn, rm); 3590 break; 3591 case NEON_UMLSL: 3592 umlsl(vf_l, rd, rn, rm); 3593 break; 3594 case NEON_UMLSL2: 3595 umlsl2(vf_l, rd, rn, rm); 3596 break; 3597 case NEON_SMULL: 3598 smull(vf_l, rd, rn, rm); 3599 break; 3600 case NEON_SMULL2: 3601 smull2(vf_l, rd, rn, rm); 3602 break; 3603 case NEON_UMULL: 3604 umull(vf_l, rd, rn, rm); 3605 break; 3606 case NEON_UMULL2: 3607 umull2(vf_l, rd, rn, rm); 3608 break; 3609 case NEON_SQDMLAL: 3610 sqdmlal(vf_l, rd, rn, rm); 3611 break; 3612 case NEON_SQDMLAL2: 3613 sqdmlal2(vf_l, rd, rn, rm); 3614 break; 3615 case NEON_SQDMLSL: 3616 sqdmlsl(vf_l, rd, rn, rm); 3617 break; 3618 case NEON_SQDMLSL2: 3619 sqdmlsl2(vf_l, rd, rn, rm); 3620 break; 3621 case NEON_SQDMULL: 3622 sqdmull(vf_l, rd, rn, rm); 3623 break; 3624 case NEON_SQDMULL2: 3625 sqdmull2(vf_l, rd, rn, rm); 3626 break; 3627 case NEON_UADDW: 3628 uaddw(vf_l, rd, rn, rm); 3629 break; 3630 case NEON_UADDW2: 3631 uaddw2(vf_l, rd, rn, rm); 3632 break; 3633 case NEON_SADDW: 3634 saddw(vf_l, rd, rn, rm); 3635 break; 3636 case NEON_SADDW2: 3637 saddw2(vf_l, rd, rn, rm); 3638 break; 3639 case NEON_USUBW: 3640 usubw(vf_l, rd, rn, rm); 3641 break; 3642 case NEON_USUBW2: 3643 usubw2(vf_l, rd, rn, rm); 3644 break; 3645 case NEON_SSUBW: 3646 ssubw(vf_l, rd, rn, rm); 3647 break; 3648 case NEON_SSUBW2: 3649 ssubw2(vf_l, rd, rn, rm); 3650 break; 3651 case NEON_ADDHN: 3652 addhn(vf, rd, rn, rm); 3653 break; 3654 case NEON_ADDHN2: 3655 addhn2(vf, rd, rn, rm); 3656 break; 3657 case NEON_RADDHN: 3658 raddhn(vf, rd, rn, rm); 3659 break; 3660 case NEON_RADDHN2: 3661 raddhn2(vf, rd, rn, rm); 3662 break; 3663 case NEON_SUBHN: 3664 subhn(vf, rd, rn, rm); 3665 break; 3666 case NEON_SUBHN2: 3667 subhn2(vf, rd, rn, rm); 3668 break; 3669 case NEON_RSUBHN: 3670 rsubhn(vf, rd, rn, rm); 3671 break; 3672 case NEON_RSUBHN2: 3673 rsubhn2(vf, rd, rn, rm); 3674 break; 3675 default: 3676 VIXL_UNIMPLEMENTED(); 3677 } 3678 } 3679 3680 3681 void Simulator::VisitNEONAcrossLanes(const Instruction* instr) { 3682 NEONFormatDecoder nfd(instr); 3683 3684 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3685 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3686 3687 // The input operand's VectorFormat is passed for these instructions. 3688 if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { 3689 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); 3690 3691 switch (instr->Mask(NEONAcrossLanesFPMask)) { 3692 case NEON_FMAXV: 3693 fmaxv(vf, rd, rn); 3694 break; 3695 case NEON_FMINV: 3696 fminv(vf, rd, rn); 3697 break; 3698 case NEON_FMAXNMV: 3699 fmaxnmv(vf, rd, rn); 3700 break; 3701 case NEON_FMINNMV: 3702 fminnmv(vf, rd, rn); 3703 break; 3704 default: 3705 VIXL_UNIMPLEMENTED(); 3706 } 3707 } else { 3708 VectorFormat vf = nfd.GetVectorFormat(); 3709 3710 switch (instr->Mask(NEONAcrossLanesMask)) { 3711 case NEON_ADDV: 3712 addv(vf, rd, rn); 3713 break; 3714 case NEON_SMAXV: 3715 smaxv(vf, rd, rn); 3716 break; 3717 case NEON_SMINV: 3718 sminv(vf, rd, rn); 3719 break; 3720 case NEON_UMAXV: 3721 umaxv(vf, rd, rn); 3722 break; 3723 case NEON_UMINV: 3724 uminv(vf, rd, rn); 3725 break; 3726 case NEON_SADDLV: 3727 saddlv(vf, rd, rn); 3728 break; 3729 case NEON_UADDLV: 3730 uaddlv(vf, rd, rn); 3731 break; 3732 default: 3733 VIXL_UNIMPLEMENTED(); 3734 } 3735 } 3736 } 3737 3738 3739 void Simulator::VisitNEONByIndexedElement(const Instruction* instr) { 3740 NEONFormatDecoder nfd(instr); 3741 VectorFormat vf_r = nfd.GetVectorFormat(); 3742 VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); 3743 3744 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3745 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3746 3747 ByElementOp Op = NULL; 3748 3749 int rm_reg = instr->GetRm(); 3750 int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); 3751 if (instr->GetNEONSize() == 1) { 3752 rm_reg &= 0xf; 3753 index = (index << 1) | instr->GetNEONM(); 3754 } 3755 3756 switch (instr->Mask(NEONByIndexedElementMask)) { 3757 case NEON_MUL_byelement: 3758 Op = &Simulator::mul; 3759 vf = vf_r; 3760 break; 3761 case NEON_MLA_byelement: 3762 Op = &Simulator::mla; 3763 vf = vf_r; 3764 break; 3765 case NEON_MLS_byelement: 3766 Op = &Simulator::mls; 3767 vf = vf_r; 3768 break; 3769 case NEON_SQDMULH_byelement: 3770 Op = &Simulator::sqdmulh; 3771 vf = vf_r; 3772 break; 3773 case NEON_SQRDMULH_byelement: 3774 Op = &Simulator::sqrdmulh; 3775 vf = vf_r; 3776 break; 3777 case NEON_SMULL_byelement: 3778 if (instr->Mask(NEON_Q)) { 3779 Op = &Simulator::smull2; 3780 } else { 3781 Op = &Simulator::smull; 3782 } 3783 break; 3784 case NEON_UMULL_byelement: 3785 if (instr->Mask(NEON_Q)) { 3786 Op = &Simulator::umull2; 3787 } else { 3788 Op = &Simulator::umull; 3789 } 3790 break; 3791 case NEON_SMLAL_byelement: 3792 if (instr->Mask(NEON_Q)) { 3793 Op = &Simulator::smlal2; 3794 } else { 3795 Op = &Simulator::smlal; 3796 } 3797 break; 3798 case NEON_UMLAL_byelement: 3799 if (instr->Mask(NEON_Q)) { 3800 Op = &Simulator::umlal2; 3801 } else { 3802 Op = &Simulator::umlal; 3803 } 3804 break; 3805 case NEON_SMLSL_byelement: 3806 if (instr->Mask(NEON_Q)) { 3807 Op = &Simulator::smlsl2; 3808 } else { 3809 Op = &Simulator::smlsl; 3810 } 3811 break; 3812 case NEON_UMLSL_byelement: 3813 if (instr->Mask(NEON_Q)) { 3814 Op = &Simulator::umlsl2; 3815 } else { 3816 Op = &Simulator::umlsl; 3817 } 3818 break; 3819 case NEON_SQDMULL_byelement: 3820 if (instr->Mask(NEON_Q)) { 3821 Op = &Simulator::sqdmull2; 3822 } else { 3823 Op = &Simulator::sqdmull; 3824 } 3825 break; 3826 case NEON_SQDMLAL_byelement: 3827 if (instr->Mask(NEON_Q)) { 3828 Op = &Simulator::sqdmlal2; 3829 } else { 3830 Op = &Simulator::sqdmlal; 3831 } 3832 break; 3833 case NEON_SQDMLSL_byelement: 3834 if (instr->Mask(NEON_Q)) { 3835 Op = &Simulator::sqdmlsl2; 3836 } else { 3837 Op = &Simulator::sqdmlsl; 3838 } 3839 break; 3840 default: 3841 index = instr->GetNEONH(); 3842 if ((instr->GetFPType() & 1) == 0) { 3843 index = (index << 1) | instr->GetNEONL(); 3844 } 3845 3846 vf = nfd.GetVectorFormat(nfd.FPFormatMap()); 3847 3848 switch (instr->Mask(NEONByIndexedElementFPMask)) { 3849 case NEON_FMUL_byelement: 3850 Op = &Simulator::fmul; 3851 break; 3852 case NEON_FMLA_byelement: 3853 Op = &Simulator::fmla; 3854 break; 3855 case NEON_FMLS_byelement: 3856 Op = &Simulator::fmls; 3857 break; 3858 case NEON_FMULX_byelement: 3859 Op = &Simulator::fmulx; 3860 break; 3861 default: 3862 VIXL_UNIMPLEMENTED(); 3863 } 3864 } 3865 3866 (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); 3867 } 3868 3869 3870 void Simulator::VisitNEONCopy(const Instruction* instr) { 3871 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap()); 3872 VectorFormat vf = nfd.GetVectorFormat(); 3873 3874 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3875 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3876 int imm5 = instr->GetImmNEON5(); 3877 int tz = CountTrailingZeros(imm5, 32); 3878 int reg_index = imm5 >> (tz + 1); 3879 3880 if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { 3881 int imm4 = instr->GetImmNEON4(); 3882 int rn_index = imm4 >> tz; 3883 ins_element(vf, rd, reg_index, rn, rn_index); 3884 } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { 3885 ins_immediate(vf, rd, reg_index, ReadXRegister(instr->GetRn())); 3886 } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { 3887 uint64_t value = LogicVRegister(rn).Uint(vf, reg_index); 3888 value &= MaxUintFromFormat(vf); 3889 WriteXRegister(instr->GetRd(), value); 3890 } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) { 3891 int64_t value = LogicVRegister(rn).Int(vf, reg_index); 3892 if (instr->GetNEONQ()) { 3893 WriteXRegister(instr->GetRd(), value); 3894 } else { 3895 WriteWRegister(instr->GetRd(), (int32_t)value); 3896 } 3897 } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { 3898 dup_element(vf, rd, rn, reg_index); 3899 } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { 3900 dup_immediate(vf, rd, ReadXRegister(instr->GetRn())); 3901 } else { 3902 VIXL_UNIMPLEMENTED(); 3903 } 3904 } 3905 3906 3907 void Simulator::VisitNEONExtract(const Instruction* instr) { 3908 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); 3909 VectorFormat vf = nfd.GetVectorFormat(); 3910 SimVRegister& rd = ReadVRegister(instr->GetRd()); 3911 SimVRegister& rn = ReadVRegister(instr->GetRn()); 3912 SimVRegister& rm = ReadVRegister(instr->GetRm()); 3913 if (instr->Mask(NEONExtractMask) == NEON_EXT) { 3914 int index = instr->GetImmNEONExt(); 3915 ext(vf, rd, rn, rm, index); 3916 } else { 3917 VIXL_UNIMPLEMENTED(); 3918 } 3919 } 3920 3921 3922 void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr, 3923 AddrMode addr_mode) { 3924 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); 3925 VectorFormat vf = nfd.GetVectorFormat(); 3926 3927 uint64_t addr_base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); 3928 int reg_size = RegisterSizeInBytesFromFormat(vf); 3929 3930 int reg[4]; 3931 uint64_t addr[4]; 3932 for (int i = 0; i < 4; i++) { 3933 reg[i] = (instr->GetRt() + i) % kNumberOfVRegisters; 3934 addr[i] = addr_base + (i * reg_size); 3935 } 3936 int count = 1; 3937 bool log_read = true; 3938 3939 // Bit 23 determines whether this is an offset or post-index addressing mode. 3940 // In offset mode, bits 20 to 16 should be zero; these bits encode the 3941 // register or immediate in post-index mode. 3942 if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { 3943 VIXL_UNREACHABLE(); 3944 } 3945 3946 // We use the PostIndex mask here, as it works in this case for both Offset 3947 // and PostIndex addressing. 3948 switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { 3949 case NEON_LD1_4v: 3950 case NEON_LD1_4v_post: 3951 ld1(vf, ReadVRegister(reg[3]), addr[3]); 3952 count++; 3953 VIXL_FALLTHROUGH(); 3954 case NEON_LD1_3v: 3955 case NEON_LD1_3v_post: 3956 ld1(vf, ReadVRegister(reg[2]), addr[2]); 3957 count++; 3958 VIXL_FALLTHROUGH(); 3959 case NEON_LD1_2v: 3960 case NEON_LD1_2v_post: 3961 ld1(vf, ReadVRegister(reg[1]), addr[1]); 3962 count++; 3963 VIXL_FALLTHROUGH(); 3964 case NEON_LD1_1v: 3965 case NEON_LD1_1v_post: 3966 ld1(vf, ReadVRegister(reg[0]), addr[0]); 3967 break; 3968 case NEON_ST1_4v: 3969 case NEON_ST1_4v_post: 3970 st1(vf, ReadVRegister(reg[3]), addr[3]); 3971 count++; 3972 VIXL_FALLTHROUGH(); 3973 case NEON_ST1_3v: 3974 case NEON_ST1_3v_post: 3975 st1(vf, ReadVRegister(reg[2]), addr[2]); 3976 count++; 3977 VIXL_FALLTHROUGH(); 3978 case NEON_ST1_2v: 3979 case NEON_ST1_2v_post: 3980 st1(vf, ReadVRegister(reg[1]), addr[1]); 3981 count++; 3982 VIXL_FALLTHROUGH(); 3983 case NEON_ST1_1v: 3984 case NEON_ST1_1v_post: 3985 st1(vf, ReadVRegister(reg[0]), addr[0]); 3986 log_read = false; 3987 break; 3988 case NEON_LD2_post: 3989 case NEON_LD2: 3990 ld2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); 3991 count = 2; 3992 break; 3993 case NEON_ST2: 3994 case NEON_ST2_post: 3995 st2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); 3996 count = 2; 3997 log_read = false; 3998 break; 3999 case NEON_LD3_post: 4000 case NEON_LD3: 4001 ld3(vf, 4002 ReadVRegister(reg[0]), 4003 ReadVRegister(reg[1]), 4004 ReadVRegister(reg[2]), 4005 addr[0]); 4006 count = 3; 4007 break; 4008 case NEON_ST3: 4009 case NEON_ST3_post: 4010 st3(vf, 4011 ReadVRegister(reg[0]), 4012 ReadVRegister(reg[1]), 4013 ReadVRegister(reg[2]), 4014 addr[0]); 4015 count = 3; 4016 log_read = false; 4017 break; 4018 case NEON_ST4: 4019 case NEON_ST4_post: 4020 st4(vf, 4021 ReadVRegister(reg[0]), 4022 ReadVRegister(reg[1]), 4023 ReadVRegister(reg[2]), 4024 ReadVRegister(reg[3]), 4025 addr[0]); 4026 count = 4; 4027 log_read = false; 4028 break; 4029 case NEON_LD4_post: 4030 case NEON_LD4: 4031 ld4(vf, 4032 ReadVRegister(reg[0]), 4033 ReadVRegister(reg[1]), 4034 ReadVRegister(reg[2]), 4035 ReadVRegister(reg[3]), 4036 addr[0]); 4037 count = 4; 4038 break; 4039 default: 4040 VIXL_UNIMPLEMENTED(); 4041 } 4042 4043 // Explicitly log the register update whilst we have type information. 4044 for (int i = 0; i < count; i++) { 4045 // For de-interleaving loads, only print the base address. 4046 int lane_size = LaneSizeInBytesFromFormat(vf); 4047 PrintRegisterFormat format = GetPrintRegisterFormatTryFP( 4048 GetPrintRegisterFormatForSize(reg_size, lane_size)); 4049 if (log_read) { 4050 LogVRead(addr_base, reg[i], format); 4051 } else { 4052 LogVWrite(addr_base, reg[i], format); 4053 } 4054 } 4055 4056 if (addr_mode == PostIndex) { 4057 int rm = instr->GetRm(); 4058 // The immediate post index addressing mode is indicated by rm = 31. 4059 // The immediate is implied by the number of vector registers used. 4060 addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count 4061 : ReadXRegister(rm); 4062 WriteXRegister(instr->GetRn(), addr_base); 4063 } else { 4064 VIXL_ASSERT(addr_mode == Offset); 4065 } 4066 } 4067 4068 4069 void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { 4070 NEONLoadStoreMultiStructHelper(instr, Offset); 4071 } 4072 4073 4074 void Simulator::VisitNEONLoadStoreMultiStructPostIndex( 4075 const Instruction* instr) { 4076 NEONLoadStoreMultiStructHelper(instr, PostIndex); 4077 } 4078 4079 4080 void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr, 4081 AddrMode addr_mode) { 4082 uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); 4083 int rt = instr->GetRt(); 4084 4085 // Bit 23 determines whether this is an offset or post-index addressing mode. 4086 // In offset mode, bits 20 to 16 should be zero; these bits encode the 4087 // register or immediate in post-index mode. 4088 if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { 4089 VIXL_UNREACHABLE(); 4090 } 4091 4092 // We use the PostIndex mask here, as it works in this case for both Offset 4093 // and PostIndex addressing. 4094 bool do_load = false; 4095 4096 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); 4097 VectorFormat vf_t = nfd.GetVectorFormat(); 4098 4099 VectorFormat vf = kFormat16B; 4100 switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { 4101 case NEON_LD1_b: 4102 case NEON_LD1_b_post: 4103 case NEON_LD2_b: 4104 case NEON_LD2_b_post: 4105 case NEON_LD3_b: 4106 case NEON_LD3_b_post: 4107 case NEON_LD4_b: 4108 case NEON_LD4_b_post: 4109 do_load = true; 4110 VIXL_FALLTHROUGH(); 4111 case NEON_ST1_b: 4112 case NEON_ST1_b_post: 4113 case NEON_ST2_b: 4114 case NEON_ST2_b_post: 4115 case NEON_ST3_b: 4116 case NEON_ST3_b_post: 4117 case NEON_ST4_b: 4118 case NEON_ST4_b_post: 4119 break; 4120 4121 case NEON_LD1_h: 4122 case NEON_LD1_h_post: 4123 case NEON_LD2_h: 4124 case NEON_LD2_h_post: 4125 case NEON_LD3_h: 4126 case NEON_LD3_h_post: 4127 case NEON_LD4_h: 4128 case NEON_LD4_h_post: 4129 do_load = true; 4130 VIXL_FALLTHROUGH(); 4131 case NEON_ST1_h: 4132 case NEON_ST1_h_post: 4133 case NEON_ST2_h: 4134 case NEON_ST2_h_post: 4135 case NEON_ST3_h: 4136 case NEON_ST3_h_post: 4137 case NEON_ST4_h: 4138 case NEON_ST4_h_post: 4139 vf = kFormat8H; 4140 break; 4141 case NEON_LD1_s: 4142 case NEON_LD1_s_post: 4143 case NEON_LD2_s: 4144 case NEON_LD2_s_post: 4145 case NEON_LD3_s: 4146 case NEON_LD3_s_post: 4147 case NEON_LD4_s: 4148 case NEON_LD4_s_post: 4149 do_load = true; 4150 VIXL_FALLTHROUGH(); 4151 case NEON_ST1_s: 4152 case NEON_ST1_s_post: 4153 case NEON_ST2_s: 4154 case NEON_ST2_s_post: 4155 case NEON_ST3_s: 4156 case NEON_ST3_s_post: 4157 case NEON_ST4_s: 4158 case NEON_ST4_s_post: { 4159 VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); 4160 VIXL_STATIC_ASSERT((NEON_LD1_s_post | (1 << NEONLSSize_offset)) == 4161 NEON_LD1_d_post); 4162 VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); 4163 VIXL_STATIC_ASSERT((NEON_ST1_s_post | (1 << NEONLSSize_offset)) == 4164 NEON_ST1_d_post); 4165 vf = ((instr->GetNEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D; 4166 break; 4167 } 4168 4169 case NEON_LD1R: 4170 case NEON_LD1R_post: { 4171 vf = vf_t; 4172 ld1r(vf, ReadVRegister(rt), addr); 4173 do_load = true; 4174 break; 4175 } 4176 4177 case NEON_LD2R: 4178 case NEON_LD2R_post: { 4179 vf = vf_t; 4180 int rt2 = (rt + 1) % kNumberOfVRegisters; 4181 ld2r(vf, ReadVRegister(rt), ReadVRegister(rt2), addr); 4182 do_load = true; 4183 break; 4184 } 4185 4186 case NEON_LD3R: 4187 case NEON_LD3R_post: { 4188 vf = vf_t; 4189 int rt2 = (rt + 1) % kNumberOfVRegisters; 4190 int rt3 = (rt2 + 1) % kNumberOfVRegisters; 4191 ld3r(vf, ReadVRegister(rt), ReadVRegister(rt2), ReadVRegister(rt3), addr); 4192 do_load = true; 4193 break; 4194 } 4195 4196 case NEON_LD4R: 4197 case NEON_LD4R_post: { 4198 vf = vf_t; 4199 int rt2 = (rt + 1) % kNumberOfVRegisters; 4200 int rt3 = (rt2 + 1) % kNumberOfVRegisters; 4201 int rt4 = (rt3 + 1) % kNumberOfVRegisters; 4202 ld4r(vf, 4203 ReadVRegister(rt), 4204 ReadVRegister(rt2), 4205 ReadVRegister(rt3), 4206 ReadVRegister(rt4), 4207 addr); 4208 do_load = true; 4209 break; 4210 } 4211 default: 4212 VIXL_UNIMPLEMENTED(); 4213 } 4214 4215 PrintRegisterFormat print_format = 4216 GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf)); 4217 // Make sure that the print_format only includes a single lane. 4218 print_format = 4219 static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask); 4220 4221 int esize = LaneSizeInBytesFromFormat(vf); 4222 int index_shift = LaneSizeInBytesLog2FromFormat(vf); 4223 int lane = instr->GetNEONLSIndex(index_shift); 4224 int scale = 0; 4225 int rt2 = (rt + 1) % kNumberOfVRegisters; 4226 int rt3 = (rt2 + 1) % kNumberOfVRegisters; 4227 int rt4 = (rt3 + 1) % kNumberOfVRegisters; 4228 switch (instr->Mask(NEONLoadStoreSingleLenMask)) { 4229 case NEONLoadStoreSingle1: 4230 scale = 1; 4231 if (do_load) { 4232 ld1(vf, ReadVRegister(rt), lane, addr); 4233 LogVRead(addr, rt, print_format, lane); 4234 } else { 4235 st1(vf, ReadVRegister(rt), lane, addr); 4236 LogVWrite(addr, rt, print_format, lane); 4237 } 4238 break; 4239 case NEONLoadStoreSingle2: 4240 scale = 2; 4241 if (do_load) { 4242 ld2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); 4243 LogVRead(addr, rt, print_format, lane); 4244 LogVRead(addr + esize, rt2, print_format, lane); 4245 } else { 4246 st2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); 4247 LogVWrite(addr, rt, print_format, lane); 4248 LogVWrite(addr + esize, rt2, print_format, lane); 4249 } 4250 break; 4251 case NEONLoadStoreSingle3: 4252 scale = 3; 4253 if (do_load) { 4254 ld3(vf, 4255 ReadVRegister(rt), 4256 ReadVRegister(rt2), 4257 ReadVRegister(rt3), 4258 lane, 4259 addr); 4260 LogVRead(addr, rt, print_format, lane); 4261 LogVRead(addr + esize, rt2, print_format, lane); 4262 LogVRead(addr + (2 * esize), rt3, print_format, lane); 4263 } else { 4264 st3(vf, 4265 ReadVRegister(rt), 4266 ReadVRegister(rt2), 4267 ReadVRegister(rt3), 4268 lane, 4269 addr); 4270 LogVWrite(addr, rt, print_format, lane); 4271 LogVWrite(addr + esize, rt2, print_format, lane); 4272 LogVWrite(addr + (2 * esize), rt3, print_format, lane); 4273 } 4274 break; 4275 case NEONLoadStoreSingle4: 4276 scale = 4; 4277 if (do_load) { 4278 ld4(vf, 4279 ReadVRegister(rt), 4280 ReadVRegister(rt2), 4281 ReadVRegister(rt3), 4282 ReadVRegister(rt4), 4283 lane, 4284 addr); 4285 LogVRead(addr, rt, print_format, lane); 4286 LogVRead(addr + esize, rt2, print_format, lane); 4287 LogVRead(addr + (2 * esize), rt3, print_format, lane); 4288 LogVRead(addr + (3 * esize), rt4, print_format, lane); 4289 } else { 4290 st4(vf, 4291 ReadVRegister(rt), 4292 ReadVRegister(rt2), 4293 ReadVRegister(rt3), 4294 ReadVRegister(rt4), 4295 lane, 4296 addr); 4297 LogVWrite(addr, rt, print_format, lane); 4298 LogVWrite(addr + esize, rt2, print_format, lane); 4299 LogVWrite(addr + (2 * esize), rt3, print_format, lane); 4300 LogVWrite(addr + (3 * esize), rt4, print_format, lane); 4301 } 4302 break; 4303 default: 4304 VIXL_UNIMPLEMENTED(); 4305 } 4306 4307 if (addr_mode == PostIndex) { 4308 int rm = instr->GetRm(); 4309 int lane_size = LaneSizeInBytesFromFormat(vf); 4310 WriteXRegister(instr->GetRn(), 4311 addr + 4312 ((rm == 31) ? (scale * lane_size) : ReadXRegister(rm))); 4313 } 4314 } 4315 4316 4317 void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { 4318 NEONLoadStoreSingleStructHelper(instr, Offset); 4319 } 4320 4321 4322 void Simulator::VisitNEONLoadStoreSingleStructPostIndex( 4323 const Instruction* instr) { 4324 NEONLoadStoreSingleStructHelper(instr, PostIndex); 4325 } 4326 4327 4328 void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) { 4329 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4330 int cmode = instr->GetNEONCmode(); 4331 int cmode_3_1 = (cmode >> 1) & 7; 4332 int cmode_3 = (cmode >> 3) & 1; 4333 int cmode_2 = (cmode >> 2) & 1; 4334 int cmode_1 = (cmode >> 1) & 1; 4335 int cmode_0 = cmode & 1; 4336 int q = instr->GetNEONQ(); 4337 int op_bit = instr->GetNEONModImmOp(); 4338 uint64_t imm8 = instr->GetImmNEONabcdefgh(); 4339 4340 // Find the format and immediate value 4341 uint64_t imm = 0; 4342 VectorFormat vform = kFormatUndefined; 4343 switch (cmode_3_1) { 4344 case 0x0: 4345 case 0x1: 4346 case 0x2: 4347 case 0x3: 4348 vform = (q == 1) ? kFormat4S : kFormat2S; 4349 imm = imm8 << (8 * cmode_3_1); 4350 break; 4351 case 0x4: 4352 case 0x5: 4353 vform = (q == 1) ? kFormat8H : kFormat4H; 4354 imm = imm8 << (8 * cmode_1); 4355 break; 4356 case 0x6: 4357 vform = (q == 1) ? kFormat4S : kFormat2S; 4358 if (cmode_0 == 0) { 4359 imm = imm8 << 8 | 0x000000ff; 4360 } else { 4361 imm = imm8 << 16 | 0x0000ffff; 4362 } 4363 break; 4364 case 0x7: 4365 if (cmode_0 == 0 && op_bit == 0) { 4366 vform = q ? kFormat16B : kFormat8B; 4367 imm = imm8; 4368 } else if (cmode_0 == 0 && op_bit == 1) { 4369 vform = q ? kFormat2D : kFormat1D; 4370 imm = 0; 4371 for (int i = 0; i < 8; ++i) { 4372 if (imm8 & (1 << i)) { 4373 imm |= (UINT64_C(0xff) << (8 * i)); 4374 } 4375 } 4376 } else { // cmode_0 == 1, cmode == 0xf. 4377 if (op_bit == 0) { 4378 vform = q ? kFormat4S : kFormat2S; 4379 imm = FloatToRawbits(instr->GetImmNEONFP32()); 4380 } else if (q == 1) { 4381 vform = kFormat2D; 4382 imm = DoubleToRawbits(instr->GetImmNEONFP64()); 4383 } else { 4384 VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf)); 4385 VisitUnallocated(instr); 4386 } 4387 } 4388 break; 4389 default: 4390 VIXL_UNREACHABLE(); 4391 break; 4392 } 4393 4394 // Find the operation 4395 NEONModifiedImmediateOp op; 4396 if (cmode_3 == 0) { 4397 if (cmode_0 == 0) { 4398 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; 4399 } else { // cmode<0> == '1' 4400 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; 4401 } 4402 } else { // cmode<3> == '1' 4403 if (cmode_2 == 0) { 4404 if (cmode_0 == 0) { 4405 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; 4406 } else { // cmode<0> == '1' 4407 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; 4408 } 4409 } else { // cmode<2> == '1' 4410 if (cmode_1 == 0) { 4411 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; 4412 } else { // cmode<1> == '1' 4413 if (cmode_0 == 0) { 4414 op = NEONModifiedImmediate_MOVI; 4415 } else { // cmode<0> == '1' 4416 op = NEONModifiedImmediate_MOVI; 4417 } 4418 } 4419 } 4420 } 4421 4422 // Call the logic function 4423 if (op == NEONModifiedImmediate_ORR) { 4424 orr(vform, rd, rd, imm); 4425 } else if (op == NEONModifiedImmediate_BIC) { 4426 bic(vform, rd, rd, imm); 4427 } else if (op == NEONModifiedImmediate_MOVI) { 4428 movi(vform, rd, imm); 4429 } else if (op == NEONModifiedImmediate_MVNI) { 4430 mvni(vform, rd, imm); 4431 } else { 4432 VisitUnimplemented(instr); 4433 } 4434 } 4435 4436 4437 void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) { 4438 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); 4439 VectorFormat vf = nfd.GetVectorFormat(); 4440 4441 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4442 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4443 4444 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { 4445 // These instructions all use a two bit size field, except NOT and RBIT, 4446 // which use the field to encode the operation. 4447 switch (instr->Mask(NEONScalar2RegMiscMask)) { 4448 case NEON_CMEQ_zero_scalar: 4449 cmp(vf, rd, rn, 0, eq); 4450 break; 4451 case NEON_CMGE_zero_scalar: 4452 cmp(vf, rd, rn, 0, ge); 4453 break; 4454 case NEON_CMGT_zero_scalar: 4455 cmp(vf, rd, rn, 0, gt); 4456 break; 4457 case NEON_CMLT_zero_scalar: 4458 cmp(vf, rd, rn, 0, lt); 4459 break; 4460 case NEON_CMLE_zero_scalar: 4461 cmp(vf, rd, rn, 0, le); 4462 break; 4463 case NEON_ABS_scalar: 4464 abs(vf, rd, rn); 4465 break; 4466 case NEON_SQABS_scalar: 4467 abs(vf, rd, rn).SignedSaturate(vf); 4468 break; 4469 case NEON_NEG_scalar: 4470 neg(vf, rd, rn); 4471 break; 4472 case NEON_SQNEG_scalar: 4473 neg(vf, rd, rn).SignedSaturate(vf); 4474 break; 4475 case NEON_SUQADD_scalar: 4476 suqadd(vf, rd, rn); 4477 break; 4478 case NEON_USQADD_scalar: 4479 usqadd(vf, rd, rn); 4480 break; 4481 default: 4482 VIXL_UNIMPLEMENTED(); 4483 break; 4484 } 4485 } else { 4486 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); 4487 FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode()); 4488 4489 // These instructions all use a one bit size field, except SQXTUN, SQXTN 4490 // and UQXTN, which use a two bit size field. 4491 switch (instr->Mask(NEONScalar2RegMiscFPMask)) { 4492 case NEON_FRECPE_scalar: 4493 frecpe(fpf, rd, rn, fpcr_rounding); 4494 break; 4495 case NEON_FRECPX_scalar: 4496 frecpx(fpf, rd, rn); 4497 break; 4498 case NEON_FRSQRTE_scalar: 4499 frsqrte(fpf, rd, rn); 4500 break; 4501 case NEON_FCMGT_zero_scalar: 4502 fcmp_zero(fpf, rd, rn, gt); 4503 break; 4504 case NEON_FCMGE_zero_scalar: 4505 fcmp_zero(fpf, rd, rn, ge); 4506 break; 4507 case NEON_FCMEQ_zero_scalar: 4508 fcmp_zero(fpf, rd, rn, eq); 4509 break; 4510 case NEON_FCMLE_zero_scalar: 4511 fcmp_zero(fpf, rd, rn, le); 4512 break; 4513 case NEON_FCMLT_zero_scalar: 4514 fcmp_zero(fpf, rd, rn, lt); 4515 break; 4516 case NEON_SCVTF_scalar: 4517 scvtf(fpf, rd, rn, 0, fpcr_rounding); 4518 break; 4519 case NEON_UCVTF_scalar: 4520 ucvtf(fpf, rd, rn, 0, fpcr_rounding); 4521 break; 4522 case NEON_FCVTNS_scalar: 4523 fcvts(fpf, rd, rn, FPTieEven); 4524 break; 4525 case NEON_FCVTNU_scalar: 4526 fcvtu(fpf, rd, rn, FPTieEven); 4527 break; 4528 case NEON_FCVTPS_scalar: 4529 fcvts(fpf, rd, rn, FPPositiveInfinity); 4530 break; 4531 case NEON_FCVTPU_scalar: 4532 fcvtu(fpf, rd, rn, FPPositiveInfinity); 4533 break; 4534 case NEON_FCVTMS_scalar: 4535 fcvts(fpf, rd, rn, FPNegativeInfinity); 4536 break; 4537 case NEON_FCVTMU_scalar: 4538 fcvtu(fpf, rd, rn, FPNegativeInfinity); 4539 break; 4540 case NEON_FCVTZS_scalar: 4541 fcvts(fpf, rd, rn, FPZero); 4542 break; 4543 case NEON_FCVTZU_scalar: 4544 fcvtu(fpf, rd, rn, FPZero); 4545 break; 4546 case NEON_FCVTAS_scalar: 4547 fcvts(fpf, rd, rn, FPTieAway); 4548 break; 4549 case NEON_FCVTAU_scalar: 4550 fcvtu(fpf, rd, rn, FPTieAway); 4551 break; 4552 case NEON_FCVTXN_scalar: 4553 // Unlike all of the other FP instructions above, fcvtxn encodes dest 4554 // size S as size<0>=1. There's only one case, so we ignore the form. 4555 VIXL_ASSERT(instr->ExtractBit(22) == 1); 4556 fcvtxn(kFormatS, rd, rn); 4557 break; 4558 default: 4559 switch (instr->Mask(NEONScalar2RegMiscMask)) { 4560 case NEON_SQXTN_scalar: 4561 sqxtn(vf, rd, rn); 4562 break; 4563 case NEON_UQXTN_scalar: 4564 uqxtn(vf, rd, rn); 4565 break; 4566 case NEON_SQXTUN_scalar: 4567 sqxtun(vf, rd, rn); 4568 break; 4569 default: 4570 VIXL_UNIMPLEMENTED(); 4571 } 4572 } 4573 } 4574 } 4575 4576 4577 void Simulator::VisitNEONScalar3Diff(const Instruction* instr) { 4578 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); 4579 VectorFormat vf = nfd.GetVectorFormat(); 4580 4581 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4582 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4583 SimVRegister& rm = ReadVRegister(instr->GetRm()); 4584 switch (instr->Mask(NEONScalar3DiffMask)) { 4585 case NEON_SQDMLAL_scalar: 4586 sqdmlal(vf, rd, rn, rm); 4587 break; 4588 case NEON_SQDMLSL_scalar: 4589 sqdmlsl(vf, rd, rn, rm); 4590 break; 4591 case NEON_SQDMULL_scalar: 4592 sqdmull(vf, rd, rn, rm); 4593 break; 4594 default: 4595 VIXL_UNIMPLEMENTED(); 4596 } 4597 } 4598 4599 4600 void Simulator::VisitNEONScalar3Same(const Instruction* instr) { 4601 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); 4602 VectorFormat vf = nfd.GetVectorFormat(); 4603 4604 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4605 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4606 SimVRegister& rm = ReadVRegister(instr->GetRm()); 4607 4608 if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { 4609 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); 4610 switch (instr->Mask(NEONScalar3SameFPMask)) { 4611 case NEON_FMULX_scalar: 4612 fmulx(vf, rd, rn, rm); 4613 break; 4614 case NEON_FACGE_scalar: 4615 fabscmp(vf, rd, rn, rm, ge); 4616 break; 4617 case NEON_FACGT_scalar: 4618 fabscmp(vf, rd, rn, rm, gt); 4619 break; 4620 case NEON_FCMEQ_scalar: 4621 fcmp(vf, rd, rn, rm, eq); 4622 break; 4623 case NEON_FCMGE_scalar: 4624 fcmp(vf, rd, rn, rm, ge); 4625 break; 4626 case NEON_FCMGT_scalar: 4627 fcmp(vf, rd, rn, rm, gt); 4628 break; 4629 case NEON_FRECPS_scalar: 4630 frecps(vf, rd, rn, rm); 4631 break; 4632 case NEON_FRSQRTS_scalar: 4633 frsqrts(vf, rd, rn, rm); 4634 break; 4635 case NEON_FABD_scalar: 4636 fabd(vf, rd, rn, rm); 4637 break; 4638 default: 4639 VIXL_UNIMPLEMENTED(); 4640 } 4641 } else { 4642 switch (instr->Mask(NEONScalar3SameMask)) { 4643 case NEON_ADD_scalar: 4644 add(vf, rd, rn, rm); 4645 break; 4646 case NEON_SUB_scalar: 4647 sub(vf, rd, rn, rm); 4648 break; 4649 case NEON_CMEQ_scalar: 4650 cmp(vf, rd, rn, rm, eq); 4651 break; 4652 case NEON_CMGE_scalar: 4653 cmp(vf, rd, rn, rm, ge); 4654 break; 4655 case NEON_CMGT_scalar: 4656 cmp(vf, rd, rn, rm, gt); 4657 break; 4658 case NEON_CMHI_scalar: 4659 cmp(vf, rd, rn, rm, hi); 4660 break; 4661 case NEON_CMHS_scalar: 4662 cmp(vf, rd, rn, rm, hs); 4663 break; 4664 case NEON_CMTST_scalar: 4665 cmptst(vf, rd, rn, rm); 4666 break; 4667 case NEON_USHL_scalar: 4668 ushl(vf, rd, rn, rm); 4669 break; 4670 case NEON_SSHL_scalar: 4671 sshl(vf, rd, rn, rm); 4672 break; 4673 case NEON_SQDMULH_scalar: 4674 sqdmulh(vf, rd, rn, rm); 4675 break; 4676 case NEON_SQRDMULH_scalar: 4677 sqrdmulh(vf, rd, rn, rm); 4678 break; 4679 case NEON_UQADD_scalar: 4680 add(vf, rd, rn, rm).UnsignedSaturate(vf); 4681 break; 4682 case NEON_SQADD_scalar: 4683 add(vf, rd, rn, rm).SignedSaturate(vf); 4684 break; 4685 case NEON_UQSUB_scalar: 4686 sub(vf, rd, rn, rm).UnsignedSaturate(vf); 4687 break; 4688 case NEON_SQSUB_scalar: 4689 sub(vf, rd, rn, rm).SignedSaturate(vf); 4690 break; 4691 case NEON_UQSHL_scalar: 4692 ushl(vf, rd, rn, rm).UnsignedSaturate(vf); 4693 break; 4694 case NEON_SQSHL_scalar: 4695 sshl(vf, rd, rn, rm).SignedSaturate(vf); 4696 break; 4697 case NEON_URSHL_scalar: 4698 ushl(vf, rd, rn, rm).Round(vf); 4699 break; 4700 case NEON_SRSHL_scalar: 4701 sshl(vf, rd, rn, rm).Round(vf); 4702 break; 4703 case NEON_UQRSHL_scalar: 4704 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); 4705 break; 4706 case NEON_SQRSHL_scalar: 4707 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); 4708 break; 4709 default: 4710 VIXL_UNIMPLEMENTED(); 4711 } 4712 } 4713 } 4714 4715 4716 void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) { 4717 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); 4718 VectorFormat vf = nfd.GetVectorFormat(); 4719 VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap()); 4720 4721 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4722 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4723 ByElementOp Op = NULL; 4724 4725 int rm_reg = instr->GetRm(); 4726 int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); 4727 if (instr->GetNEONSize() == 1) { 4728 rm_reg &= 0xf; 4729 index = (index << 1) | instr->GetNEONM(); 4730 } 4731 4732 switch (instr->Mask(NEONScalarByIndexedElementMask)) { 4733 case NEON_SQDMULL_byelement_scalar: 4734 Op = &Simulator::sqdmull; 4735 break; 4736 case NEON_SQDMLAL_byelement_scalar: 4737 Op = &Simulator::sqdmlal; 4738 break; 4739 case NEON_SQDMLSL_byelement_scalar: 4740 Op = &Simulator::sqdmlsl; 4741 break; 4742 case NEON_SQDMULH_byelement_scalar: 4743 Op = &Simulator::sqdmulh; 4744 vf = vf_r; 4745 break; 4746 case NEON_SQRDMULH_byelement_scalar: 4747 Op = &Simulator::sqrdmulh; 4748 vf = vf_r; 4749 break; 4750 default: 4751 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); 4752 index = instr->GetNEONH(); 4753 if ((instr->GetFPType() & 1) == 0) { 4754 index = (index << 1) | instr->GetNEONL(); 4755 } 4756 switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { 4757 case NEON_FMUL_byelement_scalar: 4758 Op = &Simulator::fmul; 4759 break; 4760 case NEON_FMLA_byelement_scalar: 4761 Op = &Simulator::fmla; 4762 break; 4763 case NEON_FMLS_byelement_scalar: 4764 Op = &Simulator::fmls; 4765 break; 4766 case NEON_FMULX_byelement_scalar: 4767 Op = &Simulator::fmulx; 4768 break; 4769 default: 4770 VIXL_UNIMPLEMENTED(); 4771 } 4772 } 4773 4774 (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); 4775 } 4776 4777 4778 void Simulator::VisitNEONScalarCopy(const Instruction* instr) { 4779 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); 4780 VectorFormat vf = nfd.GetVectorFormat(); 4781 4782 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4783 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4784 4785 if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { 4786 int imm5 = instr->GetImmNEON5(); 4787 int tz = CountTrailingZeros(imm5, 32); 4788 int rn_index = imm5 >> (tz + 1); 4789 dup_element(vf, rd, rn, rn_index); 4790 } else { 4791 VIXL_UNIMPLEMENTED(); 4792 } 4793 } 4794 4795 4796 void Simulator::VisitNEONScalarPairwise(const Instruction* instr) { 4797 NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap()); 4798 VectorFormat vf = nfd.GetVectorFormat(); 4799 4800 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4801 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4802 switch (instr->Mask(NEONScalarPairwiseMask)) { 4803 case NEON_ADDP_scalar: 4804 addp(vf, rd, rn); 4805 break; 4806 case NEON_FADDP_scalar: 4807 faddp(vf, rd, rn); 4808 break; 4809 case NEON_FMAXP_scalar: 4810 fmaxp(vf, rd, rn); 4811 break; 4812 case NEON_FMAXNMP_scalar: 4813 fmaxnmp(vf, rd, rn); 4814 break; 4815 case NEON_FMINP_scalar: 4816 fminp(vf, rd, rn); 4817 break; 4818 case NEON_FMINNMP_scalar: 4819 fminnmp(vf, rd, rn); 4820 break; 4821 default: 4822 VIXL_UNIMPLEMENTED(); 4823 } 4824 } 4825 4826 4827 void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) { 4828 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4829 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4830 FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode()); 4831 4832 static const NEONFormatMap map = {{22, 21, 20, 19}, 4833 {NF_UNDEF, 4834 NF_B, 4835 NF_H, 4836 NF_H, 4837 NF_S, 4838 NF_S, 4839 NF_S, 4840 NF_S, 4841 NF_D, 4842 NF_D, 4843 NF_D, 4844 NF_D, 4845 NF_D, 4846 NF_D, 4847 NF_D, 4848 NF_D}}; 4849 NEONFormatDecoder nfd(instr, &map); 4850 VectorFormat vf = nfd.GetVectorFormat(); 4851 4852 int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); 4853 int immhimmb = instr->GetImmNEONImmhImmb(); 4854 int right_shift = (16 << highestSetBit) - immhimmb; 4855 int left_shift = immhimmb - (8 << highestSetBit); 4856 switch (instr->Mask(NEONScalarShiftImmediateMask)) { 4857 case NEON_SHL_scalar: 4858 shl(vf, rd, rn, left_shift); 4859 break; 4860 case NEON_SLI_scalar: 4861 sli(vf, rd, rn, left_shift); 4862 break; 4863 case NEON_SQSHL_imm_scalar: 4864 sqshl(vf, rd, rn, left_shift); 4865 break; 4866 case NEON_UQSHL_imm_scalar: 4867 uqshl(vf, rd, rn, left_shift); 4868 break; 4869 case NEON_SQSHLU_scalar: 4870 sqshlu(vf, rd, rn, left_shift); 4871 break; 4872 case NEON_SRI_scalar: 4873 sri(vf, rd, rn, right_shift); 4874 break; 4875 case NEON_SSHR_scalar: 4876 sshr(vf, rd, rn, right_shift); 4877 break; 4878 case NEON_USHR_scalar: 4879 ushr(vf, rd, rn, right_shift); 4880 break; 4881 case NEON_SRSHR_scalar: 4882 sshr(vf, rd, rn, right_shift).Round(vf); 4883 break; 4884 case NEON_URSHR_scalar: 4885 ushr(vf, rd, rn, right_shift).Round(vf); 4886 break; 4887 case NEON_SSRA_scalar: 4888 ssra(vf, rd, rn, right_shift); 4889 break; 4890 case NEON_USRA_scalar: 4891 usra(vf, rd, rn, right_shift); 4892 break; 4893 case NEON_SRSRA_scalar: 4894 srsra(vf, rd, rn, right_shift); 4895 break; 4896 case NEON_URSRA_scalar: 4897 ursra(vf, rd, rn, right_shift); 4898 break; 4899 case NEON_UQSHRN_scalar: 4900 uqshrn(vf, rd, rn, right_shift); 4901 break; 4902 case NEON_UQRSHRN_scalar: 4903 uqrshrn(vf, rd, rn, right_shift); 4904 break; 4905 case NEON_SQSHRN_scalar: 4906 sqshrn(vf, rd, rn, right_shift); 4907 break; 4908 case NEON_SQRSHRN_scalar: 4909 sqrshrn(vf, rd, rn, right_shift); 4910 break; 4911 case NEON_SQSHRUN_scalar: 4912 sqshrun(vf, rd, rn, right_shift); 4913 break; 4914 case NEON_SQRSHRUN_scalar: 4915 sqrshrun(vf, rd, rn, right_shift); 4916 break; 4917 case NEON_FCVTZS_imm_scalar: 4918 fcvts(vf, rd, rn, FPZero, right_shift); 4919 break; 4920 case NEON_FCVTZU_imm_scalar: 4921 fcvtu(vf, rd, rn, FPZero, right_shift); 4922 break; 4923 case NEON_SCVTF_imm_scalar: 4924 scvtf(vf, rd, rn, right_shift, fpcr_rounding); 4925 break; 4926 case NEON_UCVTF_imm_scalar: 4927 ucvtf(vf, rd, rn, right_shift, fpcr_rounding); 4928 break; 4929 default: 4930 VIXL_UNIMPLEMENTED(); 4931 } 4932 } 4933 4934 4935 void Simulator::VisitNEONShiftImmediate(const Instruction* instr) { 4936 SimVRegister& rd = ReadVRegister(instr->GetRd()); 4937 SimVRegister& rn = ReadVRegister(instr->GetRn()); 4938 FPRounding fpcr_rounding = static_cast<FPRounding>(ReadFpcr().GetRMode()); 4939 4940 // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, 4941 // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. 4942 static const NEONFormatMap map = {{22, 21, 20, 19, 30}, 4943 {NF_UNDEF, 4944 NF_UNDEF, 4945 NF_8B, 4946 NF_16B, 4947 NF_4H, 4948 NF_8H, 4949 NF_4H, 4950 NF_8H, 4951 NF_2S, 4952 NF_4S, 4953 NF_2S, 4954 NF_4S, 4955 NF_2S, 4956 NF_4S, 4957 NF_2S, 4958 NF_4S, 4959 NF_UNDEF, 4960 NF_2D, 4961 NF_UNDEF, 4962 NF_2D, 4963 NF_UNDEF, 4964 NF_2D, 4965 NF_UNDEF, 4966 NF_2D, 4967 NF_UNDEF, 4968 NF_2D, 4969 NF_UNDEF, 4970 NF_2D, 4971 NF_UNDEF, 4972 NF_2D, 4973 NF_UNDEF, 4974 NF_2D}}; 4975 NEONFormatDecoder nfd(instr, &map); 4976 VectorFormat vf = nfd.GetVectorFormat(); 4977 4978 // 0001->8H, 001x->4S, 01xx->2D, all others undefined. 4979 static const NEONFormatMap map_l = 4980 {{22, 21, 20, 19}, 4981 {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; 4982 VectorFormat vf_l = nfd.GetVectorFormat(&map_l); 4983 4984 int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); 4985 int immhimmb = instr->GetImmNEONImmhImmb(); 4986 int right_shift = (16 << highestSetBit) - immhimmb; 4987 int left_shift = immhimmb - (8 << highestSetBit); 4988 4989 switch (instr->Mask(NEONShiftImmediateMask)) { 4990 case NEON_SHL: 4991 shl(vf, rd, rn, left_shift); 4992 break; 4993 case NEON_SLI: 4994 sli(vf, rd, rn, left_shift); 4995 break; 4996 case NEON_SQSHLU: 4997 sqshlu(vf, rd, rn, left_shift); 4998 break; 4999 case NEON_SRI: 5000 sri(vf, rd, rn, right_shift); 5001 break; 5002 case NEON_SSHR: 5003 sshr(vf, rd, rn, right_shift); 5004 break; 5005 case NEON_USHR: 5006 ushr(vf, rd, rn, right_shift); 5007 break; 5008 case NEON_SRSHR: 5009 sshr(vf, rd, rn, right_shift).Round(vf); 5010 break; 5011 case NEON_URSHR: 5012 ushr(vf, rd, rn, right_shift).Round(vf); 5013 break; 5014 case NEON_SSRA: 5015 ssra(vf, rd, rn, right_shift); 5016 break; 5017 case NEON_USRA: 5018 usra(vf, rd, rn, right_shift); 5019 break; 5020 case NEON_SRSRA: 5021 srsra(vf, rd, rn, right_shift); 5022 break; 5023 case NEON_URSRA: 5024 ursra(vf, rd, rn, right_shift); 5025 break; 5026 case NEON_SQSHL_imm: 5027 sqshl(vf, rd, rn, left_shift); 5028 break; 5029 case NEON_UQSHL_imm: 5030 uqshl(vf, rd, rn, left_shift); 5031 break; 5032 case NEON_SCVTF_imm: 5033 scvtf(vf, rd, rn, right_shift, fpcr_rounding); 5034 break; 5035 case NEON_UCVTF_imm: 5036 ucvtf(vf, rd, rn, right_shift, fpcr_rounding); 5037 break; 5038 case NEON_FCVTZS_imm: 5039 fcvts(vf, rd, rn, FPZero, right_shift); 5040 break; 5041 case NEON_FCVTZU_imm: 5042 fcvtu(vf, rd, rn, FPZero, right_shift); 5043 break; 5044 case NEON_SSHLL: 5045 vf = vf_l; 5046 if (instr->Mask(NEON_Q)) { 5047 sshll2(vf, rd, rn, left_shift); 5048 } else { 5049 sshll(vf, rd, rn, left_shift); 5050 } 5051 break; 5052 case NEON_USHLL: 5053 vf = vf_l; 5054 if (instr->Mask(NEON_Q)) { 5055 ushll2(vf, rd, rn, left_shift); 5056 } else { 5057 ushll(vf, rd, rn, left_shift); 5058 } 5059 break; 5060 case NEON_SHRN: 5061 if (instr->Mask(NEON_Q)) { 5062 shrn2(vf, rd, rn, right_shift); 5063 } else { 5064 shrn(vf, rd, rn, right_shift); 5065 } 5066 break; 5067 case NEON_RSHRN: 5068 if (instr->Mask(NEON_Q)) { 5069 rshrn2(vf, rd, rn, right_shift); 5070 } else { 5071 rshrn(vf, rd, rn, right_shift); 5072 } 5073 break; 5074 case NEON_UQSHRN: 5075 if (instr->Mask(NEON_Q)) { 5076 uqshrn2(vf, rd, rn, right_shift); 5077 } else { 5078 uqshrn(vf, rd, rn, right_shift); 5079 } 5080 break; 5081 case NEON_UQRSHRN: 5082 if (instr->Mask(NEON_Q)) { 5083 uqrshrn2(vf, rd, rn, right_shift); 5084 } else { 5085 uqrshrn(vf, rd, rn, right_shift); 5086 } 5087 break; 5088 case NEON_SQSHRN: 5089 if (instr->Mask(NEON_Q)) { 5090 sqshrn2(vf, rd, rn, right_shift); 5091 } else { 5092 sqshrn(vf, rd, rn, right_shift); 5093 } 5094 break; 5095 case NEON_SQRSHRN: 5096 if (instr->Mask(NEON_Q)) { 5097 sqrshrn2(vf, rd, rn, right_shift); 5098 } else { 5099 sqrshrn(vf, rd, rn, right_shift); 5100 } 5101 break; 5102 case NEON_SQSHRUN: 5103 if (instr->Mask(NEON_Q)) { 5104 sqshrun2(vf, rd, rn, right_shift); 5105 } else { 5106 sqshrun(vf, rd, rn, right_shift); 5107 } 5108 break; 5109 case NEON_SQRSHRUN: 5110 if (instr->Mask(NEON_Q)) { 5111 sqrshrun2(vf, rd, rn, right_shift); 5112 } else { 5113 sqrshrun(vf, rd, rn, right_shift); 5114 } 5115 break; 5116 default: 5117 VIXL_UNIMPLEMENTED(); 5118 } 5119 } 5120 5121 5122 void Simulator::VisitNEONTable(const Instruction* instr) { 5123 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); 5124 VectorFormat vf = nfd.GetVectorFormat(); 5125 5126 SimVRegister& rd = ReadVRegister(instr->GetRd()); 5127 SimVRegister& rn = ReadVRegister(instr->GetRn()); 5128 SimVRegister& rn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfVRegisters); 5129 SimVRegister& rn3 = ReadVRegister((instr->GetRn() + 2) % kNumberOfVRegisters); 5130 SimVRegister& rn4 = ReadVRegister((instr->GetRn() + 3) % kNumberOfVRegisters); 5131 SimVRegister& rm = ReadVRegister(instr->GetRm()); 5132 5133 switch (instr->Mask(NEONTableMask)) { 5134 case NEON_TBL_1v: 5135 tbl(vf, rd, rn, rm); 5136 break; 5137 case NEON_TBL_2v: 5138 tbl(vf, rd, rn, rn2, rm); 5139 break; 5140 case NEON_TBL_3v: 5141 tbl(vf, rd, rn, rn2, rn3, rm); 5142 break; 5143 case NEON_TBL_4v: 5144 tbl(vf, rd, rn, rn2, rn3, rn4, rm); 5145 break; 5146 case NEON_TBX_1v: 5147 tbx(vf, rd, rn, rm); 5148 break; 5149 case NEON_TBX_2v: 5150 tbx(vf, rd, rn, rn2, rm); 5151 break; 5152 case NEON_TBX_3v: 5153 tbx(vf, rd, rn, rn2, rn3, rm); 5154 break; 5155 case NEON_TBX_4v: 5156 tbx(vf, rd, rn, rn2, rn3, rn4, rm); 5157 break; 5158 default: 5159 VIXL_UNIMPLEMENTED(); 5160 } 5161 } 5162 5163 5164 void Simulator::VisitNEONPerm(const Instruction* instr) { 5165 NEONFormatDecoder nfd(instr); 5166 VectorFormat vf = nfd.GetVectorFormat(); 5167 5168 SimVRegister& rd = ReadVRegister(instr->GetRd()); 5169 SimVRegister& rn = ReadVRegister(instr->GetRn()); 5170 SimVRegister& rm = ReadVRegister(instr->GetRm()); 5171 5172 switch (instr->Mask(NEONPermMask)) { 5173 case NEON_TRN1: 5174 trn1(vf, rd, rn, rm); 5175 break; 5176 case NEON_TRN2: 5177 trn2(vf, rd, rn, rm); 5178 break; 5179 case NEON_UZP1: 5180 uzp1(vf, rd, rn, rm); 5181 break; 5182 case NEON_UZP2: 5183 uzp2(vf, rd, rn, rm); 5184 break; 5185 case NEON_ZIP1: 5186 zip1(vf, rd, rn, rm); 5187 break; 5188 case NEON_ZIP2: 5189 zip2(vf, rd, rn, rm); 5190 break; 5191 default: 5192 VIXL_UNIMPLEMENTED(); 5193 } 5194 } 5195 5196 5197 void Simulator::DoUnreachable(const Instruction* instr) { 5198 VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && 5199 (instr->GetImmException() == kUnreachableOpcode)); 5200 5201 fprintf(stream_, 5202 "Hit UNREACHABLE marker at pc=%p.\n", 5203 reinterpret_cast<const void*>(instr)); 5204 abort(); 5205 } 5206 5207 5208 void Simulator::DoTrace(const Instruction* instr) { 5209 VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && 5210 (instr->GetImmException() == kTraceOpcode)); 5211 5212 // Read the arguments encoded inline in the instruction stream. 5213 uint32_t parameters; 5214 uint32_t command; 5215 5216 VIXL_STATIC_ASSERT(sizeof(*instr) == 1); 5217 memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); 5218 memcpy(&command, instr + kTraceCommandOffset, sizeof(command)); 5219 5220 switch (command) { 5221 case TRACE_ENABLE: 5222 SetTraceParameters(GetTraceParameters() | parameters); 5223 break; 5224 case TRACE_DISABLE: 5225 SetTraceParameters(GetTraceParameters() & ~parameters); 5226 break; 5227 default: 5228 VIXL_UNREACHABLE(); 5229 } 5230 5231 WritePc(instr->GetInstructionAtOffset(kTraceLength)); 5232 } 5233 5234 5235 void Simulator::DoLog(const Instruction* instr) { 5236 VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && 5237 (instr->GetImmException() == kLogOpcode)); 5238 5239 // Read the arguments encoded inline in the instruction stream. 5240 uint32_t parameters; 5241 5242 VIXL_STATIC_ASSERT(sizeof(*instr) == 1); 5243 memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); 5244 5245 // We don't support a one-shot LOG_DISASM. 5246 VIXL_ASSERT((parameters & LOG_DISASM) == 0); 5247 // Print the requested information. 5248 if (parameters & LOG_SYSREGS) PrintSystemRegisters(); 5249 if (parameters & LOG_REGS) PrintRegisters(); 5250 if (parameters & LOG_VREGS) PrintVRegisters(); 5251 5252 WritePc(instr->GetInstructionAtOffset(kLogLength)); 5253 } 5254 5255 5256 void Simulator::DoPrintf(const Instruction* instr) { 5257 VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && 5258 (instr->GetImmException() == kPrintfOpcode)); 5259 5260 // Read the arguments encoded inline in the instruction stream. 5261 uint32_t arg_count; 5262 uint32_t arg_pattern_list; 5263 VIXL_STATIC_ASSERT(sizeof(*instr) == 1); 5264 memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count)); 5265 memcpy(&arg_pattern_list, 5266 instr + kPrintfArgPatternListOffset, 5267 sizeof(arg_pattern_list)); 5268 5269 VIXL_ASSERT(arg_count <= kPrintfMaxArgCount); 5270 VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); 5271 5272 // We need to call the host printf function with a set of arguments defined by 5273 // arg_pattern_list. Because we don't know the types and sizes of the 5274 // arguments, this is very difficult to do in a robust and portable way. To 5275 // work around the problem, we pick apart the format string, and print one 5276 // format placeholder at a time. 5277 5278 // Allocate space for the format string. We take a copy, so we can modify it. 5279 // Leave enough space for one extra character per expected argument (plus the 5280 // '\0' termination). 5281 const char* format_base = ReadRegister<const char*>(0); 5282 VIXL_ASSERT(format_base != NULL); 5283 size_t length = strlen(format_base) + 1; 5284 char* const format = new char[length + arg_count]; 5285 5286 // A list of chunks, each with exactly one format placeholder. 5287 const char* chunks[kPrintfMaxArgCount]; 5288 5289 // Copy the format string and search for format placeholders. 5290 uint32_t placeholder_count = 0; 5291 char* format_scratch = format; 5292 for (size_t i = 0; i < length; i++) { 5293 if (format_base[i] != '%') { 5294 *format_scratch++ = format_base[i]; 5295 } else { 5296 if (format_base[i + 1] == '%') { 5297 // Ignore explicit "%%" sequences. 5298 *format_scratch++ = format_base[i]; 5299 i++; 5300 // Chunks after the first are passed as format strings to printf, so we 5301 // need to escape '%' characters in those chunks. 5302 if (placeholder_count > 0) *format_scratch++ = format_base[i]; 5303 } else { 5304 VIXL_CHECK(placeholder_count < arg_count); 5305 // Insert '\0' before placeholders, and store their locations. 5306 *format_scratch++ = '\0'; 5307 chunks[placeholder_count++] = format_scratch; 5308 *format_scratch++ = format_base[i]; 5309 } 5310 } 5311 } 5312 VIXL_CHECK(placeholder_count == arg_count); 5313 5314 // Finally, call printf with each chunk, passing the appropriate register 5315 // argument. Normally, printf returns the number of bytes transmitted, so we 5316 // can emulate a single printf call by adding the result from each chunk. If 5317 // any call returns a negative (error) value, though, just return that value. 5318 5319 printf("%s", clr_printf); 5320 5321 // Because '\0' is inserted before each placeholder, the first string in 5322 // 'format' contains no format placeholders and should be printed literally. 5323 int result = printf("%s", format); 5324 int pcs_r = 1; // Start at x1. x0 holds the format string. 5325 int pcs_f = 0; // Start at d0. 5326 if (result >= 0) { 5327 for (uint32_t i = 0; i < placeholder_count; i++) { 5328 int part_result = -1; 5329 5330 uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits); 5331 arg_pattern &= (1 << kPrintfArgPatternBits) - 1; 5332 switch (arg_pattern) { 5333 case kPrintfArgW: 5334 part_result = printf(chunks[i], ReadWRegister(pcs_r++)); 5335 break; 5336 case kPrintfArgX: 5337 part_result = printf(chunks[i], ReadXRegister(pcs_r++)); 5338 break; 5339 case kPrintfArgD: 5340 part_result = printf(chunks[i], ReadDRegister(pcs_f++)); 5341 break; 5342 default: 5343 VIXL_UNREACHABLE(); 5344 } 5345 5346 if (part_result < 0) { 5347 // Handle error values. 5348 result = part_result; 5349 break; 5350 } 5351 5352 result += part_result; 5353 } 5354 } 5355 5356 printf("%s", clr_normal); 5357 5358 // Printf returns its result in x0 (just like the C library's printf). 5359 WriteXRegister(0, result); 5360 5361 // The printf parameters are inlined in the code, so skip them. 5362 WritePc(instr->GetInstructionAtOffset(kPrintfLength)); 5363 5364 // Set LR as if we'd just called a native printf function. 5365 WriteLr(ReadPc()); 5366 5367 delete[] format; 5368 } 5369 5370 5371 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT 5372 void Simulator::DoRuntimeCall(const Instruction* instr) { 5373 VIXL_STATIC_ASSERT(kRuntimeCallAddressSize == sizeof(uintptr_t)); 5374 // The appropriate `Simulator::SimulateRuntimeCall()` wrapper and the function 5375 // to call are passed inlined in the assembly. 5376 uintptr_t call_wrapper_address = 5377 Memory::Read<uintptr_t>(instr + kRuntimeCallWrapperOffset); 5378 uintptr_t function_address = 5379 Memory::Read<uintptr_t>(instr + kRuntimeCallFunctionOffset); 5380 auto runtime_call_wrapper = 5381 reinterpret_cast<void (*)(Simulator*, uintptr_t)>(call_wrapper_address); 5382 runtime_call_wrapper(this, function_address); 5383 WritePc(instr->GetInstructionAtOffset(kRuntimeCallLength)); 5384 } 5385 #else 5386 void Simulator::DoRuntimeCall(const Instruction* instr) { 5387 USE(instr); 5388 VIXL_UNREACHABLE(); 5389 } 5390 #endif 5391 5392 } // namespace aarch64 5393 } // namespace vixl 5394 5395 #endif // VIXL_INCLUDE_SIMULATOR_AARCH64 5396