Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "jni_macro_assembler_arm64.h"
     18 
     19 #include "base/logging.h"
     20 #include "entrypoints/quick/quick_entrypoints.h"
     21 #include "managed_register_arm64.h"
     22 #include "offsets.h"
     23 #include "thread.h"
     24 
     25 using namespace vixl::aarch64;  // NOLINT(build/namespaces)
     26 
     27 namespace art {
     28 namespace arm64 {
     29 
     30 #ifdef ___
     31 #error "ARM64 Assembler macro already defined."
     32 #else
     33 #define ___   asm_.GetVIXLAssembler()->
     34 #endif
     35 
     36 #define reg_x(X) Arm64Assembler::reg_x(X)
     37 #define reg_w(W) Arm64Assembler::reg_w(W)
     38 #define reg_d(D) Arm64Assembler::reg_d(D)
     39 #define reg_s(S) Arm64Assembler::reg_s(S)
     40 
     41 Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
     42 }
     43 
     44 void Arm64JNIMacroAssembler::FinalizeCode() {
     45   for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
     46     EmitExceptionPoll(exception.get());
     47   }
     48   ___ FinalizeCode();
     49 }
     50 
     51 void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
     52   ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
     53 }
     54 
     55 void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
     56   StoreToOffset(TR, SP, offset.Int32Value());
     57 }
     58 
     59 // See Arm64 PCS Section 5.2.2.1.
     60 void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
     61   CHECK_ALIGNED(adjust, kStackAlignment);
     62   AddConstant(SP, -adjust);
     63   cfi().AdjustCFAOffset(adjust);
     64 }
     65 
     66 // See Arm64 PCS Section 5.2.2.1.
     67 void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
     68   CHECK_ALIGNED(adjust, kStackAlignment);
     69   AddConstant(SP, adjust);
     70   cfi().AdjustCFAOffset(-adjust);
     71 }
     72 
     73 void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
     74   AddConstant(rd, rd, value, cond);
     75 }
     76 
     77 void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
     78                                          XRegister rn,
     79                                          int32_t value,
     80                                          Condition cond) {
     81   if ((cond == al) || (cond == nv)) {
     82     // VIXL macro-assembler handles all variants.
     83     ___ Add(reg_x(rd), reg_x(rn), value);
     84   } else {
     85     // temp = rd + value
     86     // rd = cond ? temp : rn
     87     UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
     88     temps.Exclude(reg_x(rd), reg_x(rn));
     89     Register temp = temps.AcquireX();
     90     ___ Add(temp, reg_x(rn), value);
     91     ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
     92   }
     93 }
     94 
     95 void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
     96                                             WRegister source,
     97                                             XRegister base,
     98                                             int32_t offset) {
     99   switch (type) {
    100     case kStoreByte:
    101       ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
    102       break;
    103     case kStoreHalfword:
    104       ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
    105       break;
    106     case kStoreWord:
    107       ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
    108       break;
    109     default:
    110       LOG(FATAL) << "UNREACHABLE";
    111   }
    112 }
    113 
    114 void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
    115   CHECK_NE(source, SP);
    116   ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
    117 }
    118 
    119 void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
    120   ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
    121 }
    122 
    123 void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
    124   ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
    125 }
    126 
    127 void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
    128   Arm64ManagedRegister src = m_src.AsArm64();
    129   if (src.IsNoRegister()) {
    130     CHECK_EQ(0u, size);
    131   } else if (src.IsWRegister()) {
    132     CHECK_EQ(4u, size);
    133     StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
    134   } else if (src.IsXRegister()) {
    135     CHECK_EQ(8u, size);
    136     StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
    137   } else if (src.IsSRegister()) {
    138     StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
    139   } else {
    140     CHECK(src.IsDRegister()) << src;
    141     StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
    142   }
    143 }
    144 
    145 void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
    146   Arm64ManagedRegister src = m_src.AsArm64();
    147   CHECK(src.IsXRegister()) << src;
    148   StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
    149                  offs.Int32Value());
    150 }
    151 
    152 void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
    153   Arm64ManagedRegister src = m_src.AsArm64();
    154   CHECK(src.IsXRegister()) << src;
    155   StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
    156 }
    157 
    158 void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
    159                                                    uint32_t imm,
    160                                                    ManagedRegister m_scratch) {
    161   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    162   CHECK(scratch.IsXRegister()) << scratch;
    163   LoadImmediate(scratch.AsXRegister(), imm);
    164   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
    165                  offs.Int32Value());
    166 }
    167 
    168 void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
    169                                                       FrameOffset fr_offs,
    170                                                       ManagedRegister m_scratch) {
    171   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    172   CHECK(scratch.IsXRegister()) << scratch;
    173   AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    174   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    175 }
    176 
    177 void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
    178   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    179   Register temp = temps.AcquireX();
    180   ___ Mov(temp, reg_x(SP));
    181   ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
    182 }
    183 
    184 void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
    185                                            ManagedRegister m_source,
    186                                            FrameOffset in_off,
    187                                            ManagedRegister m_scratch) {
    188   Arm64ManagedRegister source = m_source.AsArm64();
    189   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    190   StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
    191   LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
    192   StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
    193 }
    194 
    195 // Load routines.
    196 void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
    197   if ((cond == al) || (cond == nv)) {
    198     ___ Mov(reg_x(dest), value);
    199   } else {
    200     // temp = value
    201     // rd = cond ? temp : rd
    202     if (value != 0) {
    203       UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    204       temps.Exclude(reg_x(dest));
    205       Register temp = temps.AcquireX();
    206       ___ Mov(temp, value);
    207       ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
    208     } else {
    209       ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
    210     }
    211   }
    212 }
    213 
    214 void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
    215                                              WRegister dest,
    216                                              XRegister base,
    217                                              int32_t offset) {
    218   switch (type) {
    219     case kLoadSignedByte:
    220       ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
    221       break;
    222     case kLoadSignedHalfword:
    223       ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
    224       break;
    225     case kLoadUnsignedByte:
    226       ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
    227       break;
    228     case kLoadUnsignedHalfword:
    229       ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
    230       break;
    231     case kLoadWord:
    232       ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
    233       break;
    234     default:
    235         LOG(FATAL) << "UNREACHABLE";
    236   }
    237 }
    238 
    239 // Note: We can extend this member by adding load type info - see
    240 // sign extended A64 load variants.
    241 void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
    242   CHECK_NE(dest, SP);
    243   ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
    244 }
    245 
    246 void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
    247   ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
    248 }
    249 
    250 void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
    251   ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
    252 }
    253 
    254 void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
    255                                   XRegister base,
    256                                   int32_t offset,
    257                                   size_t size) {
    258   if (dest.IsNoRegister()) {
    259     CHECK_EQ(0u, size) << dest;
    260   } else if (dest.IsWRegister()) {
    261     CHECK_EQ(4u, size) << dest;
    262     ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
    263   } else if (dest.IsXRegister()) {
    264     CHECK_NE(dest.AsXRegister(), SP) << dest;
    265 
    266     if (size == 1u) {
    267       ___ Ldrb(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
    268     } else if (size == 4u) {
    269       ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
    270     }  else {
    271       CHECK_EQ(8u, size) << dest;
    272       ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
    273     }
    274   } else if (dest.IsSRegister()) {
    275     ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
    276   } else {
    277     CHECK(dest.IsDRegister()) << dest;
    278     ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
    279   }
    280 }
    281 
    282 void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
    283   return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
    284 }
    285 
    286 void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
    287                                             ThreadOffset64 src,
    288                                             size_t size) {
    289   return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
    290 }
    291 
    292 void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
    293   Arm64ManagedRegister dst = m_dst.AsArm64();
    294   CHECK(dst.IsXRegister()) << dst;
    295   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
    296 }
    297 
    298 void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
    299                                      ManagedRegister m_base,
    300                                      MemberOffset offs,
    301                                      bool unpoison_reference) {
    302   Arm64ManagedRegister dst = m_dst.AsArm64();
    303   Arm64ManagedRegister base = m_base.AsArm64();
    304   CHECK(dst.IsXRegister() && base.IsXRegister());
    305   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
    306                   offs.Int32Value());
    307   if (unpoison_reference) {
    308     WRegister ref_reg = dst.AsOverlappingWRegister();
    309     asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
    310   }
    311 }
    312 
    313 void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
    314                                         ManagedRegister m_base,
    315                                         Offset offs) {
    316   Arm64ManagedRegister dst = m_dst.AsArm64();
    317   Arm64ManagedRegister base = m_base.AsArm64();
    318   CHECK(dst.IsXRegister() && base.IsXRegister());
    319   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
    320   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    321   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
    322   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
    323 }
    324 
    325 void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
    326   Arm64ManagedRegister dst = m_dst.AsArm64();
    327   CHECK(dst.IsXRegister()) << dst;
    328   LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
    329 }
    330 
    331 // Copying routines.
    332 void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
    333   Arm64ManagedRegister dst = m_dst.AsArm64();
    334   Arm64ManagedRegister src = m_src.AsArm64();
    335   if (!dst.Equals(src)) {
    336     if (dst.IsXRegister()) {
    337       if (size == 4) {
    338         CHECK(src.IsWRegister());
    339         ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
    340       } else {
    341         if (src.IsXRegister()) {
    342           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
    343         } else {
    344           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
    345         }
    346       }
    347     } else if (dst.IsWRegister()) {
    348       CHECK(src.IsWRegister()) << src;
    349       ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
    350     } else if (dst.IsSRegister()) {
    351       CHECK(src.IsSRegister()) << src;
    352       ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
    353     } else {
    354       CHECK(dst.IsDRegister()) << dst;
    355       CHECK(src.IsDRegister()) << src;
    356       ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
    357     }
    358   }
    359 }
    360 
    361 void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
    362                                                   ThreadOffset64 tr_offs,
    363                                                   ManagedRegister m_scratch) {
    364   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    365   CHECK(scratch.IsXRegister()) << scratch;
    366   LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    367   StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    368 }
    369 
    370 void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
    371                                                 FrameOffset fr_offs,
    372                                                 ManagedRegister m_scratch) {
    373   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    374   CHECK(scratch.IsXRegister()) << scratch;
    375   LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
    376   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
    377 }
    378 
    379 void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
    380   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    381   CHECK(scratch.IsXRegister()) << scratch;
    382   LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
    383                   SP, src.Int32Value());
    384   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
    385                  SP, dest.Int32Value());
    386 }
    387 
    388 void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
    389                                   FrameOffset src,
    390                                   ManagedRegister m_scratch,
    391                                   size_t size) {
    392   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    393   CHECK(scratch.IsXRegister()) << scratch;
    394   CHECK(size == 4 || size == 8) << size;
    395   if (size == 4) {
    396     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
    397     StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
    398   } else if (size == 8) {
    399     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
    400     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
    401   } else {
    402     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    403   }
    404 }
    405 
    406 void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
    407                                   ManagedRegister src_base,
    408                                   Offset src_offset,
    409                                   ManagedRegister m_scratch,
    410                                   size_t size) {
    411   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    412   Arm64ManagedRegister base = src_base.AsArm64();
    413   CHECK(base.IsXRegister()) << base;
    414   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    415   CHECK(size == 4 || size == 8) << size;
    416   if (size == 4) {
    417     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
    418                    src_offset.Int32Value());
    419     StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
    420   } else if (size == 8) {
    421     LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
    422     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
    423   } else {
    424     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    425   }
    426 }
    427 
    428 void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
    429                                   Offset dest_offs,
    430                                   FrameOffset src,
    431                                   ManagedRegister m_scratch,
    432                                   size_t size) {
    433   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    434   Arm64ManagedRegister base = m_dest_base.AsArm64();
    435   CHECK(base.IsXRegister()) << base;
    436   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    437   CHECK(size == 4 || size == 8) << size;
    438   if (size == 4) {
    439     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
    440     StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
    441                    dest_offs.Int32Value());
    442   } else if (size == 8) {
    443     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
    444     StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
    445   } else {
    446     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    447   }
    448 }
    449 
    450 void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
    451                                   FrameOffset /*src_base*/,
    452                                   Offset /*src_offset*/,
    453                                   ManagedRegister /*mscratch*/,
    454                                   size_t /*size*/) {
    455   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
    456 }
    457 
    458 void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
    459                                   Offset dest_offset,
    460                                   ManagedRegister m_src,
    461                                   Offset src_offset,
    462                                   ManagedRegister m_scratch,
    463                                   size_t size) {
    464   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    465   Arm64ManagedRegister src = m_src.AsArm64();
    466   Arm64ManagedRegister dest = m_dest.AsArm64();
    467   CHECK(dest.IsXRegister()) << dest;
    468   CHECK(src.IsXRegister()) << src;
    469   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
    470   CHECK(size == 4 || size == 8) << size;
    471   if (size == 4) {
    472     if (scratch.IsWRegister()) {
    473       LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
    474                     src_offset.Int32Value());
    475       StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
    476                    dest_offset.Int32Value());
    477     } else {
    478       LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
    479                     src_offset.Int32Value());
    480       StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
    481                    dest_offset.Int32Value());
    482     }
    483   } else if (size == 8) {
    484     LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
    485     StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
    486   } else {
    487     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
    488   }
    489 }
    490 
    491 void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
    492                                   Offset /*dest_offset*/,
    493                                   FrameOffset /*src*/,
    494                                   Offset /*src_offset*/,
    495                                   ManagedRegister /*scratch*/,
    496                                   size_t /*size*/) {
    497   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
    498 }
    499 
    500 void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
    501   // TODO: Should we check that m_scratch is IP? - see arm.
    502   ___ Dmb(InnerShareable, BarrierAll);
    503 }
    504 
    505 void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
    506   Arm64ManagedRegister reg = mreg.AsArm64();
    507   CHECK(size == 1 || size == 2) << size;
    508   CHECK(reg.IsWRegister()) << reg;
    509   if (size == 1) {
    510     ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    511   } else {
    512     ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    513   }
    514 }
    515 
    516 void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
    517   Arm64ManagedRegister reg = mreg.AsArm64();
    518   CHECK(size == 1 || size == 2) << size;
    519   CHECK(reg.IsWRegister()) << reg;
    520   if (size == 1) {
    521     ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    522   } else {
    523     ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
    524   }
    525 }
    526 
    527 void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
    528   // TODO: not validating references.
    529 }
    530 
    531 void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
    532   // TODO: not validating references.
    533 }
    534 
    535 void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
    536   Arm64ManagedRegister base = m_base.AsArm64();
    537   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    538   CHECK(base.IsXRegister()) << base;
    539   CHECK(scratch.IsXRegister()) << scratch;
    540   LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
    541   ___ Blr(reg_x(scratch.AsXRegister()));
    542 }
    543 
    544 void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
    545   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    546   CHECK(scratch.IsXRegister()) << scratch;
    547   // Call *(*(SP + base) + offset)
    548   LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
    549   LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
    550   ___ Blr(reg_x(scratch.AsXRegister()));
    551 }
    552 
    553 void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
    554                                             ManagedRegister scratch ATTRIBUTE_UNUSED) {
    555   UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
    556 }
    557 
    558 void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
    559                                                     FrameOffset handle_scope_offs,
    560                                                     ManagedRegister m_in_reg,
    561                                                     bool null_allowed) {
    562   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
    563   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
    564   // For now we only hold stale handle scope entries in x registers.
    565   CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
    566   CHECK(out_reg.IsXRegister()) << out_reg;
    567   if (null_allowed) {
    568     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    569     // the address in the handle scope holding the reference.
    570     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
    571     if (in_reg.IsNoRegister()) {
    572       LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
    573                       handle_scope_offs.Int32Value());
    574       in_reg = out_reg;
    575     }
    576     ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
    577     if (!out_reg.Equals(in_reg)) {
    578       LoadImmediate(out_reg.AsXRegister(), 0, eq);
    579     }
    580     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
    581   } else {
    582     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
    583   }
    584 }
    585 
    586 void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
    587                                                     FrameOffset handle_scope_offset,
    588                                                     ManagedRegister m_scratch,
    589                                                     bool null_allowed) {
    590   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    591   CHECK(scratch.IsXRegister()) << scratch;
    592   if (null_allowed) {
    593     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
    594                     handle_scope_offset.Int32Value());
    595     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    596     // the address in the handle scope holding the reference.
    597     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
    598     ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
    599     // Move this logic in add constants with flags.
    600     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
    601   } else {
    602     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
    603   }
    604   StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
    605 }
    606 
    607 void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
    608                                                           ManagedRegister m_in_reg) {
    609   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
    610   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
    611   CHECK(out_reg.IsXRegister()) << out_reg;
    612   CHECK(in_reg.IsXRegister()) << in_reg;
    613   vixl::aarch64::Label exit;
    614   if (!out_reg.Equals(in_reg)) {
    615     // FIXME: Who sets the flags here?
    616     LoadImmediate(out_reg.AsXRegister(), 0, eq);
    617   }
    618   ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
    619   LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
    620   ___ Bind(&exit);
    621 }
    622 
    623 void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
    624   CHECK_ALIGNED(stack_adjust, kStackAlignment);
    625   Arm64ManagedRegister scratch = m_scratch.AsArm64();
    626   exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
    627   LoadFromOffset(scratch.AsXRegister(),
    628                  TR,
    629                  Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
    630   ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
    631 }
    632 
    633 std::unique_ptr<JNIMacroLabel> Arm64JNIMacroAssembler::CreateLabel() {
    634   return std::unique_ptr<JNIMacroLabel>(new Arm64JNIMacroLabel());
    635 }
    636 
    637 void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
    638   CHECK(label != nullptr);
    639   ___ B(Arm64JNIMacroLabel::Cast(label)->AsArm64());
    640 }
    641 
    642 void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label,
    643                                   JNIMacroUnaryCondition condition,
    644                                   ManagedRegister test) {
    645   CHECK(label != nullptr);
    646 
    647   switch (condition) {
    648     case JNIMacroUnaryCondition::kZero:
    649       ___ Cbz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
    650       break;
    651     case JNIMacroUnaryCondition::kNotZero:
    652       ___ Cbnz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
    653       break;
    654     default:
    655       LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
    656       UNREACHABLE();
    657   }
    658 }
    659 
    660 void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
    661   CHECK(label != nullptr);
    662   ___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
    663 }
    664 
    665 void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
    666   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
    667   temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
    668   Register temp = temps.AcquireX();
    669 
    670   // Bind exception poll entry.
    671   ___ Bind(exception->Entry());
    672   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
    673     DecreaseFrameSize(exception->stack_adjust_);
    674   }
    675   // Pass exception object as argument.
    676   // Don't care about preserving X0 as this won't return.
    677   ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
    678   ___ Ldr(temp,
    679           MEM_OP(reg_x(TR),
    680                  QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
    681 
    682   ___ Blr(temp);
    683   // Call should never return.
    684   ___ Brk();
    685 }
    686 
    687 void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
    688                                         ManagedRegister method_reg,
    689                                         ArrayRef<const ManagedRegister> callee_save_regs,
    690                                         const ManagedRegisterEntrySpills& entry_spills) {
    691   // Setup VIXL CPURegList for callee-saves.
    692   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
    693   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
    694   for (auto r : callee_save_regs) {
    695     Arm64ManagedRegister reg = r.AsArm64();
    696     if (reg.IsXRegister()) {
    697       core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
    698     } else {
    699       DCHECK(reg.IsDRegister());
    700       fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
    701     }
    702   }
    703   size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
    704   size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
    705 
    706   // Increase frame to required size.
    707   DCHECK_ALIGNED(frame_size, kStackAlignment);
    708   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
    709   IncreaseFrameSize(frame_size);
    710 
    711   // Save callee-saves.
    712   asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
    713   asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
    714 
    715   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
    716 
    717   // Write ArtMethod*
    718   DCHECK(X0 == method_reg.AsArm64().AsXRegister());
    719   StoreToOffset(X0, SP, 0);
    720 
    721   // Write out entry spills
    722   int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
    723   for (size_t i = 0; i < entry_spills.size(); ++i) {
    724     Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
    725     if (reg.IsNoRegister()) {
    726       // only increment stack offset.
    727       ManagedRegisterSpill spill = entry_spills.at(i);
    728       offset += spill.getSize();
    729     } else if (reg.IsXRegister()) {
    730       StoreToOffset(reg.AsXRegister(), SP, offset);
    731       offset += 8;
    732     } else if (reg.IsWRegister()) {
    733       StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
    734       offset += 4;
    735     } else if (reg.IsDRegister()) {
    736       StoreDToOffset(reg.AsDRegister(), SP, offset);
    737       offset += 8;
    738     } else if (reg.IsSRegister()) {
    739       StoreSToOffset(reg.AsSRegister(), SP, offset);
    740       offset += 4;
    741     }
    742   }
    743 }
    744 
    745 void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
    746                                          ArrayRef<const ManagedRegister> callee_save_regs) {
    747   // Setup VIXL CPURegList for callee-saves.
    748   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
    749   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
    750   for (auto r : callee_save_regs) {
    751     Arm64ManagedRegister reg = r.AsArm64();
    752     if (reg.IsXRegister()) {
    753       core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
    754     } else {
    755       DCHECK(reg.IsDRegister());
    756       fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
    757     }
    758   }
    759   size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
    760   size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
    761 
    762   // For now we only check that the size of the frame is large enough to hold spills and method
    763   // reference.
    764   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
    765   DCHECK_ALIGNED(frame_size, kStackAlignment);
    766 
    767   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
    768 
    769   cfi().RememberState();
    770 
    771   // Restore callee-saves.
    772   asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
    773   asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
    774 
    775   // Decrease frame size to start of callee saved regs.
    776   DecreaseFrameSize(frame_size);
    777 
    778   // Pop callee saved and return to LR.
    779   ___ Ret();
    780 
    781   // The CFI should be restored for any code that follows the exit block.
    782   cfi().RestoreState();
    783   cfi().DefCFAOffset(frame_size);
    784 }
    785 
    786 #undef ___
    787 
    788 }  // namespace arm64
    789 }  // namespace art
    790