Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "jni_macro_assembler_arm64.h"
     18 
     19 #include "entrypoints/quick/quick_entrypoints.h"
     20 #include "managed_register_arm64.h"
     21 #include "offsets.h"
     22 #include "thread.h"
     23 
     24 using namespace vixl::aarch64;  // NOLINT(build/namespaces)
     25 
     26 namespace art {
     27 namespace arm64 {
     28 
     29 #ifdef ___
     30 #error "ARM64 Assembler macro already defined."
     31 #else
     32 #define ___   asm_.GetVIXLAssembler()->
     33 #endif
     34 
     35 #define reg_x(X) Arm64Assembler::reg_x(X)
     36 #define reg_w(W) Arm64Assembler::reg_w(W)
     37 #define reg_d(D) Arm64Assembler::reg_d(D)
     38 #define reg_s(S) Arm64Assembler::reg_s(S)
     39 
     40 Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
     41 }
     42 
     43 void Arm64JNIMacroAssembler::FinalizeCode() {
     44   for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
     45     EmitExceptionPoll(exception.get());
     46   }
     47   ___ FinalizeCode();
     48 }
     49 
     50 void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
     51   ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
     52 }
     53 
     54 void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
     55   StoreToOffset(TR, SP, offset.Int32Value());
     56 }
     57 
     58 // See Arm64 PCS Section 5.2.2.1.
     59 void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
     60   CHECK_ALIGNED(adjust, kStackAlignment);
     61   AddConstant(SP, -adjust);
     62   cfi().AdjustCFAOffset(adjust);
     63 }
     64 
     65 // See Arm64 PCS Section 5.2.2.1.
     66 void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
     67   CHECK_ALIGNED(adjust, kStackAlignment);
     68   AddConstant(SP, adjust);
     69   cfi().AdjustCFAOffset(-adjust);
     70 }
     71 
     72 void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
     73   AddConstant(rd, rd, value, cond);
     74 }
     75 
     76 void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
     77                                          XRegister rn,
     78                                          int32_t value,
     79                                          Condition cond) {
     80   if ((cond == al) || (cond == nv)) {
     81     // VIXL macro-assembler handles all variants.
     82     ___ Add(reg_x(rd), reg_x(rn), value);
     83   } else {
     84     // temp = rd + value
     85     // rd = cond ? temp : rn
     86     UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
     87     temps.Exclude(reg_x(rd), reg_x(rn));
     88     Register temp = temps.AcquireX();
     89     ___ Add(temp, reg_x(rn), value);
     90     ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
     91   }
     92 }
     93 
     94 void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
     95                                             WRegister source,
     96                                             XRegister base,
     97                                             int32_t offset) {
     98   switch (type) {
     99     case kStoreByte:
    100       ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
    101       break;
    102     case kStoreHalfword:
    103       ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
    104       break;
    105     case kStoreWord:
    106       ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
    107       break;
    108     default:
    109       LOG(FATAL) << "UNREACHABLE";
    110   }
    111 }
    112 
    113 void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
    114   CHECK_NE(source, SP);
    115   ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
    116 }
    117 
    118 void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
    119   ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
    120 }
    121 
    122 void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
    123   ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
    124 }
    125 
    126 void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
    127   Arm64ManagedRegister src = m_src.AsArm64();
    128   if (src.IsNoRegister()) {
    129     CHECK_EQ(0u, size);
    130   } else if (src.IsWRegister()) {
    131     CHECK_EQ(4u, size);
    132     StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
    133   } else if (src.IsXRegister()) {
    134     CHECK_EQ(8u, size);
    135     StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
    136   } else if (src.IsSRegister()) {
    137     StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
    138   } else {
    139     CHECK(src.IsDRegister()) << src;
    140     StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
    141   }
    142 }
    143 
    144 void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
    145   Arm64ManagedRegister src = m_src.AsArm64();
    146   CHECK(src.IsXRegister()) << src;
    147   StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
    148                  offs.Int32Value());
    149 }
    150 
    151 void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
    152   Arm64ManagedRegister src = m_src.AsArm64();
    153   CHECK(src.IsXRegister()) << src;
    154   StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
    155 }
    156 
    157 void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
    158                                                    uint32_t imm,
    159                                                    ManagedRegister m_scratch) {
    160   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    161   CHECK(scratch.IsXRegister()) << scratch;
    162   LoadImmediate(scratch.AsXRegister(), imm);
    163   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
    164                  offs.Int32Value());
    165 }
    166 
    167 void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
    168                                                       FrameOffset fr_offs,
    169                                                       ManagedRegister m_scratch) {
    170   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    171   CHECK(scratch.IsXRegister()) << scratch;
    172   AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    173   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    174 }
    175 
    176 void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
    177   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    178   Register temp = temps.AcquireX();
    179   ___ Mov(temp, reg_x(SP));
    180   ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
    181 }
    182 
    183 void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
    184                                            ManagedRegister m_source,
    185                                            FrameOffset in_off,
    186                                            ManagedRegister m_scratch) {
    187   Arm64ManagedRegister source = m_source.AsArm64();
    188   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    189   StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
    190   LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
    191   StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
    192 }
    193 
    194 // Load routines.
    195 void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
    196   if ((cond == al) || (cond == nv)) {
    197     ___ Mov(reg_x(dest), value);
    198   } else {
    199     // temp = value
    200     // rd = cond ? temp : rd
    201     if (value != 0) {
    202       UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    203       temps.Exclude(reg_x(dest));
    204       Register temp = temps.AcquireX();
    205       ___ Mov(temp, value);
    206       ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
    207     } else {
    208       ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
    209     }
    210   }
    211 }
    212 
    213 void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
    214                                              WRegister dest,
    215                                              XRegister base,
    216                                              int32_t offset) {
    217   switch (type) {
    218     case kLoadSignedByte:
    219       ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
    220       break;
    221     case kLoadSignedHalfword:
    222       ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
    223       break;
    224     case kLoadUnsignedByte:
    225       ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
    226       break;
    227     case kLoadUnsignedHalfword:
    228       ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
    229       break;
    230     case kLoadWord:
    231       ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
    232       break;
    233     default:
    234         LOG(FATAL) << "UNREACHABLE";
    235   }
    236 }
    237 
    238 // Note: We can extend this member by adding load type info - see
    239 // sign extended A64 load variants.
    240 void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
    241   CHECK_NE(dest, SP);
    242   ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
    243 }
    244 
    245 void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
    246   ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
    247 }
    248 
    249 void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
    250   ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
    251 }
    252 
    253 void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
    254                                   XRegister base,
    255                                   int32_t offset,
    256                                   size_t size) {
    257   if (dest.IsNoRegister()) {
    258     CHECK_EQ(0u, size) << dest;
    259   } else if (dest.IsWRegister()) {
    260     CHECK_EQ(4u, size) << dest;
    261     ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
    262   } else if (dest.IsXRegister()) {
    263     CHECK_NE(dest.AsXRegister(), SP) << dest;
    264 
    265     if (size == 1u) {
    266       ___ Ldrb(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
    267     } else if (size == 4u) {
    268       ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
    269     }  else {
    270       CHECK_EQ(8u, size) << dest;
    271       ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
    272     }
    273   } else if (dest.IsSRegister()) {
    274     ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
    275   } else {
    276     CHECK(dest.IsDRegister()) << dest;
    277     ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
    278   }
    279 }
    280 
    281 void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
    282   return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
    283 }
    284 
    285 void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
    286                                             ThreadOffset64 src,
    287                                             size_t size) {
    288   return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
    289 }
    290 
    291 void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
    292   Arm64ManagedRegister dst = m_dst.AsArm64();
    293   CHECK(dst.IsXRegister()) << dst;
    294   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
    295 }
    296 
    297 void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
    298                                      ManagedRegister m_base,
    299                                      MemberOffset offs,
    300                                      bool unpoison_reference) {
    301   Arm64ManagedRegister dst = m_dst.AsArm64();
    302   Arm64ManagedRegister base = m_base.AsArm64();
    303   CHECK(dst.IsXRegister() && base.IsXRegister());
    304   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
    305                   offs.Int32Value());
    306   if (unpoison_reference) {
    307     WRegister ref_reg = dst.AsOverlappingWRegister();
    308     asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
    309   }
    310 }
    311 
    312 void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
    313                                         ManagedRegister m_base,
    314                                         Offset offs) {
    315   Arm64ManagedRegister dst = m_dst.AsArm64();
    316   Arm64ManagedRegister base = m_base.AsArm64();
    317   CHECK(dst.IsXRegister() && base.IsXRegister());
    318   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
    319   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    320   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
    321   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
    322 }
    323 
    324 void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
    325   Arm64ManagedRegister dst = m_dst.AsArm64();
    326   CHECK(dst.IsXRegister()) << dst;
    327   LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
    328 }
    329 
    330 // Copying routines.
    331 void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
    332   Arm64ManagedRegister dst = m_dst.AsArm64();
    333   Arm64ManagedRegister src = m_src.AsArm64();
    334   if (!dst.Equals(src)) {
    335     if (dst.IsXRegister()) {
    336       if (size == 4) {
    337         CHECK(src.IsWRegister());
    338         ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
    339       } else {
    340         if (src.IsXRegister()) {
    341           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
    342         } else {
    343           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
    344         }
    345       }
    346     } else if (dst.IsWRegister()) {
    347       CHECK(src.IsWRegister()) << src;
    348       ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
    349     } else if (dst.IsSRegister()) {
    350       CHECK(src.IsSRegister()) << src;
    351       ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
    352     } else {
    353       CHECK(dst.IsDRegister()) << dst;
    354       CHECK(src.IsDRegister()) << src;
    355       ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
    356     }
    357   }
    358 }
    359 
    360 void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
    361                                                   ThreadOffset64 tr_offs,
    362                                                   ManagedRegister m_scratch) {
    363   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    364   CHECK(scratch.IsXRegister()) << scratch;
    365   LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    366   StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    367 }
    368 
    369 void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
    370                                                 FrameOffset fr_offs,
    371                                                 ManagedRegister m_scratch) {
    372   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    373   CHECK(scratch.IsXRegister()) << scratch;
    374   LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    375   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    376 }
    377 
    378 void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
    379   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    380   CHECK(scratch.IsXRegister()) << scratch;
    381   LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
    382                   SP, src.Int32Value());
    383   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
    384                  SP, dest.Int32Value());
    385 }
    386 
    387 void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
    388                                   FrameOffset src,
    389                                   ManagedRegister m_scratch,
    390                                   size_t size) {
    391   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    392   CHECK(scratch.IsXRegister()) << scratch;
    393   CHECK(size == 4 || size == 8) << size;
    394   if (size == 4) {
    395     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
    396     StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
    397   } else if (size == 8) {
    398     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
    399     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
    400   } else {
    401     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    402   }
    403 }
    404 
    405 void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
    406                                   ManagedRegister src_base,
    407                                   Offset src_offset,
    408                                   ManagedRegister m_scratch,
    409                                   size_t size) {
    410   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    411   Arm64ManagedRegister base = src_base.AsArm64();
    412   CHECK(base.IsXRegister()) << base;
    413   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    414   CHECK(size == 4 || size == 8) << size;
    415   if (size == 4) {
    416     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
    417                    src_offset.Int32Value());
    418     StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
    419   } else if (size == 8) {
    420     LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
    421     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
    422   } else {
    423     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    424   }
    425 }
    426 
    427 void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
    428                                   Offset dest_offs,
    429                                   FrameOffset src,
    430                                   ManagedRegister m_scratch,
    431                                   size_t size) {
    432   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    433   Arm64ManagedRegister base = m_dest_base.AsArm64();
    434   CHECK(base.IsXRegister()) << base;
    435   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    436   CHECK(size == 4 || size == 8) << size;
    437   if (size == 4) {
    438     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
    439     StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
    440                    dest_offs.Int32Value());
    441   } else if (size == 8) {
    442     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
    443     StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
    444   } else {
    445     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    446   }
    447 }
    448 
    449 void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
    450                                   FrameOffset /*src_base*/,
    451                                   Offset /*src_offset*/,
    452                                   ManagedRegister /*mscratch*/,
    453                                   size_t /*size*/) {
    454   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
    455 }
    456 
    457 void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
    458                                   Offset dest_offset,
    459                                   ManagedRegister m_src,
    460                                   Offset src_offset,
    461                                   ManagedRegister m_scratch,
    462                                   size_t size) {
    463   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    464   Arm64ManagedRegister src = m_src.AsArm64();
    465   Arm64ManagedRegister dest = m_dest.AsArm64();
    466   CHECK(dest.IsXRegister()) << dest;
    467   CHECK(src.IsXRegister()) << src;
    468   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    469   CHECK(size == 4 || size == 8) << size;
    470   if (size == 4) {
    471     if (scratch.IsWRegister()) {
    472       LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
    473                     src_offset.Int32Value());
    474       StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
    475                    dest_offset.Int32Value());
    476     } else {
    477       LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
    478                     src_offset.Int32Value());
    479       StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
    480                    dest_offset.Int32Value());
    481     }
    482   } else if (size == 8) {
    483     LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
    484     StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
    485   } else {
    486     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    487   }
    488 }
    489 
    490 void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
    491                                   Offset /*dest_offset*/,
    492                                   FrameOffset /*src*/,
    493                                   Offset /*src_offset*/,
    494                                   ManagedRegister /*scratch*/,
    495                                   size_t /*size*/) {
    496   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
    497 }
    498 
    499 void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
    500   // TODO: Should we check that m_scratch is IP? - see arm.
    501   ___ Dmb(InnerShareable, BarrierAll);
    502 }
    503 
    504 void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
    505   Arm64ManagedRegister reg = mreg.AsArm64();
    506   CHECK(size == 1 || size == 2) << size;
    507   CHECK(reg.IsWRegister()) << reg;
    508   if (size == 1) {
    509     ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    510   } else {
    511     ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    512   }
    513 }
    514 
    515 void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
    516   Arm64ManagedRegister reg = mreg.AsArm64();
    517   CHECK(size == 1 || size == 2) << size;
    518   CHECK(reg.IsWRegister()) << reg;
    519   if (size == 1) {
    520     ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    521   } else {
    522     ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    523   }
    524 }
    525 
    526 void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
    527   // TODO: not validating references.
    528 }
    529 
    530 void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
    531   // TODO: not validating references.
    532 }
    533 
    534 void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
    535   Arm64ManagedRegister base = m_base.AsArm64();
    536   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    537   CHECK(base.IsXRegister()) << base;
    538   CHECK(scratch.IsXRegister()) << scratch;
    539   LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
    540   ___ Blr(reg_x(scratch.AsXRegister()));
    541 }
    542 
    543 void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
    544   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    545   CHECK(scratch.IsXRegister()) << scratch;
    546   // Call *(*(SP + base) + offset)
    547   LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
    548   LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
    549   ___ Blr(reg_x(scratch.AsXRegister()));
    550 }
    551 
    552 void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
    553                                             ManagedRegister scratch ATTRIBUTE_UNUSED) {
    554   UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
    555 }
    556 
    557 void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
    558                                                     FrameOffset handle_scope_offs,
    559                                                     ManagedRegister m_in_reg,
    560                                                     bool null_allowed) {
    561   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
    562   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
    563   // For now we only hold stale handle scope entries in x registers.
    564   CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
    565   CHECK(out_reg.IsXRegister()) << out_reg;
    566   if (null_allowed) {
    567     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    568     // the address in the handle scope holding the reference.
    569     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
    570     if (in_reg.IsNoRegister()) {
    571       LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
    572                       handle_scope_offs.Int32Value());
    573       in_reg = out_reg;
    574     }
    575     ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
    576     if (!out_reg.Equals(in_reg)) {
    577       LoadImmediate(out_reg.AsXRegister(), 0, eq);
    578     }
    579     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
    580   } else {
    581     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
    582   }
    583 }
    584 
    585 void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
    586                                                     FrameOffset handle_scope_offset,
    587                                                     ManagedRegister m_scratch,
    588                                                     bool null_allowed) {
    589   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    590   CHECK(scratch.IsXRegister()) << scratch;
    591   if (null_allowed) {
    592     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
    593                     handle_scope_offset.Int32Value());
    594     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    595     // the address in the handle scope holding the reference.
    596     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
    597     ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
    598     // Move this logic in add constants with flags.
    599     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
    600   } else {
    601     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
    602   }
    603   StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
    604 }
    605 
    606 void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
    607                                                           ManagedRegister m_in_reg) {
    608   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
    609   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
    610   CHECK(out_reg.IsXRegister()) << out_reg;
    611   CHECK(in_reg.IsXRegister()) << in_reg;
    612   vixl::aarch64::Label exit;
    613   if (!out_reg.Equals(in_reg)) {
    614     // FIXME: Who sets the flags here?
    615     LoadImmediate(out_reg.AsXRegister(), 0, eq);
    616   }
    617   ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
    618   LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
    619   ___ Bind(&exit);
    620 }
    621 
    622 void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
    623   CHECK_ALIGNED(stack_adjust, kStackAlignment);
    624   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    625   exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
    626   LoadFromOffset(scratch.AsXRegister(),
    627                  TR,
    628                  Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
    629   ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
    630 }
    631 
    632 std::unique_ptr<JNIMacroLabel> Arm64JNIMacroAssembler::CreateLabel() {
    633   return std::unique_ptr<JNIMacroLabel>(new Arm64JNIMacroLabel());
    634 }
    635 
    636 void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
    637   CHECK(label != nullptr);
    638   ___ B(Arm64JNIMacroLabel::Cast(label)->AsArm64());
    639 }
    640 
    641 void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label,
    642                                   JNIMacroUnaryCondition condition,
    643                                   ManagedRegister test) {
    644   CHECK(label != nullptr);
    645 
    646   switch (condition) {
    647     case JNIMacroUnaryCondition::kZero:
    648       ___ Cbz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
    649       break;
    650     case JNIMacroUnaryCondition::kNotZero:
    651       ___ Cbnz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
    652       break;
    653     default:
    654       LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
    655       UNREACHABLE();
    656   }
    657 }
    658 
    659 void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
    660   CHECK(label != nullptr);
    661   ___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
    662 }
    663 
    664 void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception* exception) {
    665   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    666   temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
    667   Register temp = temps.AcquireX();
    668 
    669   // Bind exception poll entry.
    670   ___ Bind(exception->Entry());
    671   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
    672     DecreaseFrameSize(exception->stack_adjust_);
    673   }
    674   // Pass exception object as argument.
    675   // Don't care about preserving X0 as this won't return.
    676   ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
    677   ___ Ldr(temp,
    678           MEM_OP(reg_x(TR),
    679                  QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
    680 
    681   ___ Blr(temp);
    682   // Call should never return.
    683   ___ Brk();
    684 }
    685 
    686 void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
    687                                         ManagedRegister method_reg,
    688                                         ArrayRef<const ManagedRegister> callee_save_regs,
    689                                         const ManagedRegisterEntrySpills& entry_spills) {
    690   // Setup VIXL CPURegList for callee-saves.
    691   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
    692   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
    693   for (auto r : callee_save_regs) {
    694     Arm64ManagedRegister reg = r.AsArm64();
    695     if (reg.IsXRegister()) {
    696       core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
    697     } else {
    698       DCHECK(reg.IsDRegister());
    699       fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
    700     }
    701   }
    702   size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
    703   size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
    704 
    705   // Increase frame to required size.
    706   DCHECK_ALIGNED(frame_size, kStackAlignment);
    707   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
    708   IncreaseFrameSize(frame_size);
    709 
    710   // Save callee-saves.
    711   asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
    712   asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
    713 
    714   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
    715 
    716   // Write ArtMethod*
    717   DCHECK(X0 == method_reg.AsArm64().AsXRegister());
    718   StoreToOffset(X0, SP, 0);
    719 
    720   // Write out entry spills
    721   int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
    722   for (size_t i = 0; i < entry_spills.size(); ++i) {
    723     Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
    724     if (reg.IsNoRegister()) {
    725       // only increment stack offset.
    726       ManagedRegisterSpill spill = entry_spills.at(i);
    727       offset += spill.getSize();
    728     } else if (reg.IsXRegister()) {
    729       StoreToOffset(reg.AsXRegister(), SP, offset);
    730       offset += 8;
    731     } else if (reg.IsWRegister()) {
    732       StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
    733       offset += 4;
    734     } else if (reg.IsDRegister()) {
    735       StoreDToOffset(reg.AsDRegister(), SP, offset);
    736       offset += 8;
    737     } else if (reg.IsSRegister()) {
    738       StoreSToOffset(reg.AsSRegister(), SP, offset);
    739       offset += 4;
    740     }
    741   }
    742 }
    743 
    744 void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
    745                                          ArrayRef<const ManagedRegister> callee_save_regs,
    746                                          bool may_suspend) {
    747   // Setup VIXL CPURegList for callee-saves.
    748   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
    749   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
    750   for (auto r : callee_save_regs) {
    751     Arm64ManagedRegister reg = r.AsArm64();
    752     if (reg.IsXRegister()) {
    753       core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
    754     } else {
    755       DCHECK(reg.IsDRegister());
    756       fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
    757     }
    758   }
    759   size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
    760   size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
    761 
    762   // For now we only check that the size of the frame is large enough to hold spills and method
    763   // reference.
    764   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
    765   DCHECK_ALIGNED(frame_size, kStackAlignment);
    766 
    767   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
    768 
    769   cfi().RememberState();
    770 
    771   // Restore callee-saves.
    772   asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
    773   asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
    774 
    775   if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
    776     vixl::aarch64::Register mr = reg_x(MR);  // Marking Register.
    777     vixl::aarch64::Register tr = reg_x(TR);  // Thread Register.
    778 
    779     if (may_suspend) {
    780       // The method may be suspended; refresh the Marking Register.
    781       ___ Ldr(mr.W(), MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
    782     } else {
    783       // The method shall not be suspended; no need to refresh the Marking Register.
    784 
    785       // Check that the Marking Register is a callee-save register,
    786       // and thus has been preserved by native code following the
    787       // AAPCS64 calling convention.
    788       DCHECK(core_reg_list.IncludesAliasOf(mr))
    789           << "core_reg_list should contain Marking Register X" << mr.GetCode();
    790 
    791       // The following condition is a compile-time one, so it does not have a run-time cost.
    792       if (kIsDebugBuild) {
    793         // The following condition is a run-time one; it is executed after the
    794         // previous compile-time test, to avoid penalizing non-debug builds.
    795         if (emit_run_time_checks_in_debug_mode_) {
    796           // Emit a run-time check verifying that the Marking Register is up-to-date.
    797           UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    798           Register temp = temps.AcquireW();
    799           // Ensure we are not clobbering a callee-save register that was restored before.
    800           DCHECK(!core_reg_list.IncludesAliasOf(temp.X()))
    801               << "core_reg_list should not contain scratch register X" << temp.GetCode();
    802           asm_.GenerateMarkingRegisterCheck(temp);
    803         }
    804       }
    805     }
    806   }
    807 
    808   // Decrease frame size to start of callee saved regs.
    809   DecreaseFrameSize(frame_size);
    810 
    811   // Return to LR.
    812   ___ Ret();
    813 
    814   // The CFI should be restored for any code that follows the exit block.
    815   cfi().RestoreState();
    816   cfi().DefCFAOffset(frame_size);
    817 }
    818 
    819 #undef ___
    820 
    821 }  // namespace arm64
    822 }  // namespace art
    823