Home | History | Annotate | Download | only in aarch64
      1 // Copyright 2014, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include <cmath>
     28 
     29 #include "test-runner.h"
     30 #include "test-utils-aarch64.h"
     31 
     32 #include "aarch64/cpu-aarch64.h"
     33 #include "aarch64/disasm-aarch64.h"
     34 #include "aarch64/macro-assembler-aarch64.h"
     35 #include "aarch64/simulator-aarch64.h"
     36 
     37 #define __ masm->
     38 
     39 namespace vixl {
     40 namespace aarch64 {
     41 
     42 
     43 // This value is a signalling NaN as both a double and as a float (taking the
     44 // least-significant word).
     45 const double kFP64SignallingNaN = RawbitsToDouble(UINT64_C(0x7ff000007f800001));
     46 const float kFP32SignallingNaN = RawbitsToFloat(0x7f800001);
     47 
     48 // A similar value, but as a quiet NaN.
     49 const double kFP64QuietNaN = RawbitsToDouble(UINT64_C(0x7ff800007fc00001));
     50 const float kFP32QuietNaN = RawbitsToFloat(0x7fc00001);
     51 
     52 
     53 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
     54   if (result != expected) {
     55     printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
     56            expected,
     57            result);
     58   }
     59 
     60   return expected == result;
     61 }
     62 
     63 
     64 bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
     65   if (result != expected) {
     66     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
     67            expected,
     68            result);
     69   }
     70 
     71   return expected == result;
     72 }
     73 
     74 
     75 bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
     76   if ((result.h != expected.h) || (result.l != expected.l)) {
     77     printf("Expected 0x%016" PRIx64 "%016" PRIx64
     78            "\t "
     79            "Found 0x%016" PRIx64 "%016" PRIx64 "\n",
     80            expected.h,
     81            expected.l,
     82            result.h,
     83            result.l);
     84   }
     85 
     86   return ((expected.h == result.h) && (expected.l == result.l));
     87 }
     88 
     89 
     90 bool EqualFP32(float expected, const RegisterDump*, float result) {
     91   if (FloatToRawbits(expected) == FloatToRawbits(result)) {
     92     return true;
     93   } else {
     94     if (std::isnan(expected) || (expected == 0.0)) {
     95       printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
     96              FloatToRawbits(expected),
     97              FloatToRawbits(result));
     98     } else {
     99       printf("Expected %.9f (0x%08" PRIx32
    100              ")\t "
    101              "Found %.9f (0x%08" PRIx32 ")\n",
    102              expected,
    103              FloatToRawbits(expected),
    104              result,
    105              FloatToRawbits(result));
    106     }
    107     return false;
    108   }
    109 }
    110 
    111 
    112 bool EqualFP64(double expected, const RegisterDump*, double result) {
    113   if (DoubleToRawbits(expected) == DoubleToRawbits(result)) {
    114     return true;
    115   }
    116 
    117   if (std::isnan(expected) || (expected == 0.0)) {
    118     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
    119            DoubleToRawbits(expected),
    120            DoubleToRawbits(result));
    121   } else {
    122     printf("Expected %.17f (0x%016" PRIx64
    123            ")\t "
    124            "Found %.17f (0x%016" PRIx64 ")\n",
    125            expected,
    126            DoubleToRawbits(expected),
    127            result,
    128            DoubleToRawbits(result));
    129   }
    130   return false;
    131 }
    132 
    133 
    134 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
    135   VIXL_ASSERT(reg.Is32Bits());
    136   // Retrieve the corresponding X register so we can check that the upper part
    137   // was properly cleared.
    138   int64_t result_x = core->xreg(reg.GetCode());
    139   if ((result_x & 0xffffffff00000000) != 0) {
    140     printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
    141            expected,
    142            result_x);
    143     return false;
    144   }
    145   uint32_t result_w = core->wreg(reg.GetCode());
    146   return Equal32(expected, core, result_w);
    147 }
    148 
    149 
    150 bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg) {
    151   VIXL_ASSERT(reg.Is64Bits());
    152   uint64_t result = core->xreg(reg.GetCode());
    153   return Equal64(expected, core, result);
    154 }
    155 
    156 
    157 bool Equal128(uint64_t expected_h,
    158               uint64_t expected_l,
    159               const RegisterDump* core,
    160               const VRegister& vreg) {
    161   VIXL_ASSERT(vreg.Is128Bits());
    162   vec128_t expected = {expected_l, expected_h};
    163   vec128_t result = core->qreg(vreg.GetCode());
    164   return Equal128(expected, core, result);
    165 }
    166 
    167 
    168 bool EqualFP32(float expected,
    169                const RegisterDump* core,
    170                const FPRegister& fpreg) {
    171   VIXL_ASSERT(fpreg.Is32Bits());
    172   // Retrieve the corresponding D register so we can check that the upper part
    173   // was properly cleared.
    174   uint64_t result_64 = core->dreg_bits(fpreg.GetCode());
    175   if ((result_64 & 0xffffffff00000000) != 0) {
    176     printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
    177            FloatToRawbits(expected),
    178            expected,
    179            result_64);
    180     return false;
    181   }
    182 
    183   return EqualFP32(expected, core, core->sreg(fpreg.GetCode()));
    184 }
    185 
    186 
    187 bool EqualFP64(double expected,
    188                const RegisterDump* core,
    189                const FPRegister& fpreg) {
    190   VIXL_ASSERT(fpreg.Is64Bits());
    191   return EqualFP64(expected, core, core->dreg(fpreg.GetCode()));
    192 }
    193 
    194 
    195 bool Equal64(const Register& reg0,
    196              const RegisterDump* core,
    197              const Register& reg1) {
    198   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
    199   int64_t expected = core->xreg(reg0.GetCode());
    200   int64_t result = core->xreg(reg1.GetCode());
    201   return Equal64(expected, core, result);
    202 }
    203 
    204 
    205 bool Equal64(uint64_t expected,
    206              const RegisterDump* core,
    207              const VRegister& vreg) {
    208   VIXL_ASSERT(vreg.Is64Bits());
    209   uint64_t result = core->dreg_bits(vreg.GetCode());
    210   return Equal64(expected, core, result);
    211 }
    212 
    213 
    214 static char FlagN(uint32_t flags) { return (flags & NFlag) ? 'N' : 'n'; }
    215 
    216 
    217 static char FlagZ(uint32_t flags) { return (flags & ZFlag) ? 'Z' : 'z'; }
    218 
    219 
    220 static char FlagC(uint32_t flags) { return (flags & CFlag) ? 'C' : 'c'; }
    221 
    222 
    223 static char FlagV(uint32_t flags) { return (flags & VFlag) ? 'V' : 'v'; }
    224 
    225 
    226 bool EqualNzcv(uint32_t expected, uint32_t result) {
    227   VIXL_ASSERT((expected & ~NZCVFlag) == 0);
    228   VIXL_ASSERT((result & ~NZCVFlag) == 0);
    229   if (result != expected) {
    230     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
    231            FlagN(expected),
    232            FlagZ(expected),
    233            FlagC(expected),
    234            FlagV(expected),
    235            FlagN(result),
    236            FlagZ(result),
    237            FlagC(result),
    238            FlagV(result));
    239     return false;
    240   }
    241 
    242   return true;
    243 }
    244 
    245 
    246 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
    247   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
    248     if (a->xreg(i) != b->xreg(i)) {
    249       printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
    250              i,
    251              a->xreg(i),
    252              b->xreg(i));
    253       return false;
    254     }
    255   }
    256 
    257   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
    258     uint64_t a_bits = a->dreg_bits(i);
    259     uint64_t b_bits = b->dreg_bits(i);
    260     if (a_bits != b_bits) {
    261       printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
    262              i,
    263              a_bits,
    264              b_bits);
    265       return false;
    266     }
    267   }
    268 
    269   return true;
    270 }
    271 
    272 
    273 RegList PopulateRegisterArray(Register* w,
    274                               Register* x,
    275                               Register* r,
    276                               int reg_size,
    277                               int reg_count,
    278                               RegList allowed) {
    279   RegList list = 0;
    280   int i = 0;
    281   for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
    282     if (((UINT64_C(1) << n) & allowed) != 0) {
    283       // Only assign allowed registers.
    284       if (r) {
    285         r[i] = Register(n, reg_size);
    286       }
    287       if (x) {
    288         x[i] = Register(n, kXRegSize);
    289       }
    290       if (w) {
    291         w[i] = Register(n, kWRegSize);
    292       }
    293       list |= (UINT64_C(1) << n);
    294       i++;
    295     }
    296   }
    297   // Check that we got enough registers.
    298   VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
    299 
    300   return list;
    301 }
    302 
    303 
    304 RegList PopulateFPRegisterArray(FPRegister* s,
    305                                 FPRegister* d,
    306                                 FPRegister* v,
    307                                 int reg_size,
    308                                 int reg_count,
    309                                 RegList allowed) {
    310   RegList list = 0;
    311   int i = 0;
    312   for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
    313     if (((UINT64_C(1) << n) & allowed) != 0) {
    314       // Only assigned allowed registers.
    315       if (v) {
    316         v[i] = FPRegister(n, reg_size);
    317       }
    318       if (d) {
    319         d[i] = FPRegister(n, kDRegSize);
    320       }
    321       if (s) {
    322         s[i] = FPRegister(n, kSRegSize);
    323       }
    324       list |= (UINT64_C(1) << n);
    325       i++;
    326     }
    327   }
    328   // Check that we got enough registers.
    329   VIXL_ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
    330 
    331   return list;
    332 }
    333 
    334 
    335 void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
    336   Register first = NoReg;
    337   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
    338     if (reg_list & (UINT64_C(1) << i)) {
    339       Register xn(i, kXRegSize);
    340       // We should never write into sp here.
    341       VIXL_ASSERT(!xn.Is(sp));
    342       if (!xn.IsZero()) {
    343         if (!first.IsValid()) {
    344           // This is the first register we've hit, so construct the literal.
    345           __ Mov(xn, value);
    346           first = xn;
    347         } else {
    348           // We've already loaded the literal, so re-use the value already
    349           // loaded into the first register we hit.
    350           __ Mov(xn, first);
    351         }
    352       }
    353     }
    354   }
    355 }
    356 
    357 
    358 void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
    359   FPRegister first = NoFPReg;
    360   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
    361     if (reg_list & (UINT64_C(1) << i)) {
    362       FPRegister dn(i, kDRegSize);
    363       if (!first.IsValid()) {
    364         // This is the first register we've hit, so construct the literal.
    365         __ Fmov(dn, value);
    366         first = dn;
    367       } else {
    368         // We've already loaded the literal, so re-use the value already loaded
    369         // into the first register we hit.
    370         __ Fmov(dn, first);
    371       }
    372     }
    373   }
    374 }
    375 
    376 
    377 void Clobber(MacroAssembler* masm, CPURegList reg_list) {
    378   if (reg_list.GetType() == CPURegister::kRegister) {
    379     // This will always clobber X registers.
    380     Clobber(masm, reg_list.GetList());
    381   } else if (reg_list.GetType() == CPURegister::kVRegister) {
    382     // This will always clobber D registers.
    383     ClobberFP(masm, reg_list.GetList());
    384   } else {
    385     VIXL_UNREACHABLE();
    386   }
    387 }
    388 
    389 
    390 void RegisterDump::Dump(MacroAssembler* masm) {
    391   VIXL_ASSERT(__ StackPointer().Is(sp));
    392 
    393   // Ensure that we don't unintentionally clobber any registers.
    394   UseScratchRegisterScope temps(masm);
    395   temps.ExcludeAll();
    396 
    397   // Preserve some temporary registers.
    398   Register dump_base = x0;
    399   Register dump = x1;
    400   Register tmp = x2;
    401   Register dump_base_w = dump_base.W();
    402   Register dump_w = dump.W();
    403   Register tmp_w = tmp.W();
    404 
    405   // Offsets into the dump_ structure.
    406   const int x_offset = offsetof(dump_t, x_);
    407   const int w_offset = offsetof(dump_t, w_);
    408   const int d_offset = offsetof(dump_t, d_);
    409   const int s_offset = offsetof(dump_t, s_);
    410   const int q_offset = offsetof(dump_t, q_);
    411   const int sp_offset = offsetof(dump_t, sp_);
    412   const int wsp_offset = offsetof(dump_t, wsp_);
    413   const int flags_offset = offsetof(dump_t, flags_);
    414 
    415   __ Push(xzr, dump_base, dump, tmp);
    416 
    417   // Load the address where we will dump the state.
    418   __ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
    419 
    420   // Dump the stack pointer (sp and wsp).
    421   // The stack pointer cannot be stored directly; it needs to be moved into
    422   // another register first. Also, we pushed four X registers, so we need to
    423   // compensate here.
    424   __ Add(tmp, sp, 4 * kXRegSizeInBytes);
    425   __ Str(tmp, MemOperand(dump_base, sp_offset));
    426   __ Add(tmp_w, wsp, 4 * kXRegSizeInBytes);
    427   __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
    428 
    429   // Dump X registers.
    430   __ Add(dump, dump_base, x_offset);
    431   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
    432     __ Stp(Register::GetXRegFromCode(i),
    433            Register::GetXRegFromCode(i + 1),
    434            MemOperand(dump, i * kXRegSizeInBytes));
    435   }
    436 
    437   // Dump W registers.
    438   __ Add(dump, dump_base, w_offset);
    439   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
    440     __ Stp(Register::GetWRegFromCode(i),
    441            Register::GetWRegFromCode(i + 1),
    442            MemOperand(dump, i * kWRegSizeInBytes));
    443   }
    444 
    445   // Dump D registers.
    446   __ Add(dump, dump_base, d_offset);
    447   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
    448     __ Stp(FPRegister::GetDRegFromCode(i),
    449            FPRegister::GetDRegFromCode(i + 1),
    450            MemOperand(dump, i * kDRegSizeInBytes));
    451   }
    452 
    453   // Dump S registers.
    454   __ Add(dump, dump_base, s_offset);
    455   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
    456     __ Stp(FPRegister::GetSRegFromCode(i),
    457            FPRegister::GetSRegFromCode(i + 1),
    458            MemOperand(dump, i * kSRegSizeInBytes));
    459   }
    460 
    461   // Dump Q registers.
    462   __ Add(dump, dump_base, q_offset);
    463   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
    464     __ Stp(VRegister::GetQRegFromCode(i),
    465            VRegister::GetQRegFromCode(i + 1),
    466            MemOperand(dump, i * kQRegSizeInBytes));
    467   }
    468 
    469   // Dump the flags.
    470   __ Mrs(tmp, NZCV);
    471   __ Str(tmp, MemOperand(dump_base, flags_offset));
    472 
    473   // To dump the values that were in tmp amd dump, we need a new scratch
    474   // register. We can use any of the already dumped registers since we can
    475   // easily restore them.
    476   Register dump2_base = x10;
    477   Register dump2 = x11;
    478   VIXL_ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
    479 
    480   // Don't lose the dump_ address.
    481   __ Mov(dump2_base, dump_base);
    482 
    483   __ Pop(tmp, dump, dump_base, xzr);
    484 
    485   __ Add(dump2, dump2_base, w_offset);
    486   __ Str(dump_base_w,
    487          MemOperand(dump2, dump_base.GetCode() * kWRegSizeInBytes));
    488   __ Str(dump_w, MemOperand(dump2, dump.GetCode() * kWRegSizeInBytes));
    489   __ Str(tmp_w, MemOperand(dump2, tmp.GetCode() * kWRegSizeInBytes));
    490 
    491   __ Add(dump2, dump2_base, x_offset);
    492   __ Str(dump_base, MemOperand(dump2, dump_base.GetCode() * kXRegSizeInBytes));
    493   __ Str(dump, MemOperand(dump2, dump.GetCode() * kXRegSizeInBytes));
    494   __ Str(tmp, MemOperand(dump2, tmp.GetCode() * kXRegSizeInBytes));
    495 
    496   // Finally, restore dump2_base and dump2.
    497   __ Ldr(dump2_base,
    498          MemOperand(dump2, dump2_base.GetCode() * kXRegSizeInBytes));
    499   __ Ldr(dump2, MemOperand(dump2, dump2.GetCode() * kXRegSizeInBytes));
    500 
    501   completed_ = true;
    502 }
    503 
    504 }  // namespace aarch64
    505 }  // namespace vixl
    506