Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "assembler_arm64.h"
     18 #include "entrypoints/quick/quick_entrypoints.h"
     19 #include "heap_poisoning.h"
     20 #include "offsets.h"
     21 #include "thread.h"
     22 
     23 using namespace vixl::aarch64;  // NOLINT(build/namespaces)
     24 
     25 namespace art {
     26 namespace arm64 {
     27 
     28 #ifdef ___
     29 #error "ARM64 Assembler macro already defined."
     30 #else
     31 #define ___   vixl_masm_.
     32 #endif
     33 
     34 void Arm64Assembler::FinalizeCode() {
     35   ___ FinalizeCode();
     36 }
     37 
     38 size_t Arm64Assembler::CodeSize() const {
     39   return vixl_masm_.GetSizeOfCodeGenerated();
     40 }
     41 
     42 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
     43   return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
     44 }
     45 
     46 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
     47   // Copy the instructions from the buffer.
     48   MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
     49   region.CopyFrom(0, from);
     50 }
     51 
     52 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
     53   Arm64ManagedRegister dst = m_dst.AsArm64();
     54   Arm64ManagedRegister base = m_base.AsArm64();
     55   CHECK(dst.IsXRegister() && base.IsXRegister());
     56   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
     57   UseScratchRegisterScope temps(&vixl_masm_);
     58   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
     59   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
     60 }
     61 
     62 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
     63   Arm64ManagedRegister base = m_base.AsArm64();
     64   Arm64ManagedRegister scratch = m_scratch.AsArm64();
     65   CHECK(base.IsXRegister()) << base;
     66   CHECK(scratch.IsXRegister()) << scratch;
     67   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
     68   UseScratchRegisterScope temps(&vixl_masm_);
     69   temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
     70   ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
     71   ___ Br(reg_x(scratch.AsXRegister()));
     72 }
     73 
     74 static inline dwarf::Reg DWARFReg(CPURegister reg) {
     75   if (reg.IsFPRegister()) {
     76     return dwarf::Reg::Arm64Fp(reg.GetCode());
     77   } else {
     78     DCHECK_LT(reg.GetCode(), 31u);  // X0 - X30.
     79     return dwarf::Reg::Arm64Core(reg.GetCode());
     80   }
     81 }
     82 
     83 void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
     84   int size = registers.GetRegisterSizeInBytes();
     85   const Register sp = vixl_masm_.StackPointer();
     86   // Since we are operating on register pairs, we would like to align on
     87   // double the standard size; on the other hand, we don't want to insert
     88   // an extra store, which will happen if the number of registers is even.
     89   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
     90     const CPURegister& dst0 = registers.PopLowestIndex();
     91     ___ Str(dst0, MemOperand(sp, offset));
     92     cfi_.RelOffset(DWARFReg(dst0), offset);
     93     offset += size;
     94   }
     95   while (registers.GetCount() >= 2) {
     96     const CPURegister& dst0 = registers.PopLowestIndex();
     97     const CPURegister& dst1 = registers.PopLowestIndex();
     98     ___ Stp(dst0, dst1, MemOperand(sp, offset));
     99     cfi_.RelOffset(DWARFReg(dst0), offset);
    100     cfi_.RelOffset(DWARFReg(dst1), offset + size);
    101     offset += 2 * size;
    102   }
    103   if (!registers.IsEmpty()) {
    104     const CPURegister& dst0 = registers.PopLowestIndex();
    105     ___ Str(dst0, MemOperand(sp, offset));
    106     cfi_.RelOffset(DWARFReg(dst0), offset);
    107   }
    108   DCHECK(registers.IsEmpty());
    109 }
    110 
    111 void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
    112   int size = registers.GetRegisterSizeInBytes();
    113   const Register sp = vixl_masm_.StackPointer();
    114   // Be consistent with the logic for spilling registers.
    115   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
    116     const CPURegister& dst0 = registers.PopLowestIndex();
    117     ___ Ldr(dst0, MemOperand(sp, offset));
    118     cfi_.Restore(DWARFReg(dst0));
    119     offset += size;
    120   }
    121   while (registers.GetCount() >= 2) {
    122     const CPURegister& dst0 = registers.PopLowestIndex();
    123     const CPURegister& dst1 = registers.PopLowestIndex();
    124     ___ Ldp(dst0, dst1, MemOperand(sp, offset));
    125     cfi_.Restore(DWARFReg(dst0));
    126     cfi_.Restore(DWARFReg(dst1));
    127     offset += 2 * size;
    128   }
    129   if (!registers.IsEmpty()) {
    130     const CPURegister& dst0 = registers.PopLowestIndex();
    131     ___ Ldr(dst0, MemOperand(sp, offset));
    132     cfi_.Restore(DWARFReg(dst0));
    133   }
    134   DCHECK(registers.IsEmpty());
    135 }
    136 
    137 void Arm64Assembler::PoisonHeapReference(Register reg) {
    138   DCHECK(reg.IsW());
    139   // reg = -reg.
    140   ___ Neg(reg, Operand(reg));
    141 }
    142 
    143 void Arm64Assembler::UnpoisonHeapReference(Register reg) {
    144   DCHECK(reg.IsW());
    145   // reg = -reg.
    146   ___ Neg(reg, Operand(reg));
    147 }
    148 
    149 void Arm64Assembler::MaybePoisonHeapReference(Register reg) {
    150   if (kPoisonHeapReferences) {
    151     PoisonHeapReference(reg);
    152   }
    153 }
    154 
    155 void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
    156   if (kPoisonHeapReferences) {
    157     UnpoisonHeapReference(reg);
    158   }
    159 }
    160 
    161 void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
    162   // The Marking Register is only used in the Baker read barrier configuration.
    163   DCHECK(kEmitCompilerReadBarrier);
    164   DCHECK(kUseBakerReadBarrier);
    165 
    166   vixl::aarch64::Register mr = reg_x(MR);  // Marking Register.
    167   vixl::aarch64::Register tr = reg_x(TR);  // Thread Register.
    168   vixl::aarch64::Label mr_is_ok;
    169 
    170   // temp = self.tls32_.is.gc_marking
    171   ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
    172   // Check that mr == self.tls32_.is.gc_marking.
    173   ___ Cmp(mr.W(), temp);
    174   ___ B(eq, &mr_is_ok);
    175   ___ Brk(code);
    176   ___ Bind(&mr_is_ok);
    177 }
    178 
    179 #undef ___
    180 
    181 }  // namespace arm64
    182 }  // namespace art
    183