Home | History | Annotate | Download | only in arm64
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "base/logging.h"
     18 #include "calling_convention_arm64.h"
     19 #include "handle_scope-inl.h"
     20 #include "utils/arm64/managed_register_arm64.h"
     21 
     22 namespace art {
     23 namespace arm64 {
     24 
     25 static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
     26 
     27 // Up to how many float-like (float, double) args can be enregistered.
     28 // The rest of the args must go on the stack.
     29 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
     30 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
     31 // enregistered. The rest of the args must go on the stack.
     32 constexpr size_t kMaxIntLikeRegisterArguments = 8u;
     33 
     34 static const XRegister kXArgumentRegisters[] = {
     35   X0, X1, X2, X3, X4, X5, X6, X7
     36 };
     37 
     38 static const WRegister kWArgumentRegisters[] = {
     39   W0, W1, W2, W3, W4, W5, W6, W7
     40 };
     41 
     42 static const DRegister kDArgumentRegisters[] = {
     43   D0, D1, D2, D3, D4, D5, D6, D7
     44 };
     45 
     46 static const SRegister kSArgumentRegisters[] = {
     47   S0, S1, S2, S3, S4, S5, S6, S7
     48 };
     49 
     50 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     51     // Core registers.
     52     // Note: The native jni function may call to some VM runtime functions which may suspend
     53     // or trigger GC. And the jni method frame will become top quick frame in those cases.
     54     // So we need to satisfy GC to save LR and callee-save registers which is similar to
     55     // CalleeSaveMethod(RefOnly) frame.
     56     // Jni function is the native function which the java code wants to call.
     57     // Jni method is the method that is compiled by jni compiler.
     58     // Call chain: managed code(java) --> jni method --> jni function.
     59     // Thread register(X19) is saved on stack.
     60     Arm64ManagedRegister::FromXRegister(X19),
     61     Arm64ManagedRegister::FromXRegister(X20),
     62     Arm64ManagedRegister::FromXRegister(X21),
     63     Arm64ManagedRegister::FromXRegister(X22),
     64     Arm64ManagedRegister::FromXRegister(X23),
     65     Arm64ManagedRegister::FromXRegister(X24),
     66     Arm64ManagedRegister::FromXRegister(X25),
     67     Arm64ManagedRegister::FromXRegister(X26),
     68     Arm64ManagedRegister::FromXRegister(X27),
     69     Arm64ManagedRegister::FromXRegister(X28),
     70     Arm64ManagedRegister::FromXRegister(X29),
     71     Arm64ManagedRegister::FromXRegister(LR),
     72     // Hard float registers.
     73     // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2,
     74     // we may break on java_method_2 and we still need to find out the values of DEX registers
     75     // in java_method_1. So all callee-saves(in managed code) need to be saved.
     76     Arm64ManagedRegister::FromDRegister(D8),
     77     Arm64ManagedRegister::FromDRegister(D9),
     78     Arm64ManagedRegister::FromDRegister(D10),
     79     Arm64ManagedRegister::FromDRegister(D11),
     80     Arm64ManagedRegister::FromDRegister(D12),
     81     Arm64ManagedRegister::FromDRegister(D13),
     82     Arm64ManagedRegister::FromDRegister(D14),
     83     Arm64ManagedRegister::FromDRegister(D15),
     84 };
     85 
     86 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
     87   uint32_t result = 0u;
     88   for (auto&& r : kCalleeSaveRegisters) {
     89     if (r.AsArm64().IsXRegister()) {
     90       result |= (1 << r.AsArm64().AsXRegister());
     91     }
     92   }
     93   return result;
     94 }
     95 
     96 static constexpr uint32_t CalculateFpCalleeSpillMask() {
     97   uint32_t result = 0;
     98   for (auto&& r : kCalleeSaveRegisters) {
     99     if (r.AsArm64().IsDRegister()) {
    100       result |= (1 << r.AsArm64().AsDRegister());
    101     }
    102   }
    103   return result;
    104 }
    105 
    106 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
    107 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
    108 
    109 // Calling convention
    110 ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
    111   // X20 is safe to use as a scratch register:
    112   // - with Baker read barriers, it is reserved as Marking Register,
    113   //   and thus does not actually need to be saved/restored; it is
    114   //   refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
    115   // - in other cases, it is saved on entry (in
    116   //   Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
    117   //   Arm64JNIMacroAssembler::RemoveFrame).
    118   return Arm64ManagedRegister::FromXRegister(X20);
    119 }
    120 
    121 ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
    122   // X20 is safe to use as a scratch register:
    123   // - with Baker read barriers, it is reserved as Marking Register,
    124   //   and thus does not actually need to be saved/restored; it is
    125   //   refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
    126   // - in other cases, it is saved on entry (in
    127   //   Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
    128   //   Arm64JNIMacroAssembler::RemoveFrame).
    129   return Arm64ManagedRegister::FromXRegister(X20);
    130 }
    131 
    132 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
    133   if (shorty[0] == 'F') {
    134     return Arm64ManagedRegister::FromSRegister(S0);
    135   } else if (shorty[0] == 'D') {
    136     return Arm64ManagedRegister::FromDRegister(D0);
    137   } else if (shorty[0] == 'J') {
    138     return Arm64ManagedRegister::FromXRegister(X0);
    139   } else if (shorty[0] == 'V') {
    140     return Arm64ManagedRegister::NoRegister();
    141   } else {
    142     return Arm64ManagedRegister::FromWRegister(W0);
    143   }
    144 }
    145 
    146 ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() {
    147   return ReturnRegisterForShorty(GetShorty());
    148 }
    149 
    150 ManagedRegister Arm64JniCallingConvention::ReturnRegister() {
    151   return ReturnRegisterForShorty(GetShorty());
    152 }
    153 
    154 ManagedRegister Arm64JniCallingConvention::IntReturnRegister() {
    155   return Arm64ManagedRegister::FromWRegister(W0);
    156 }
    157 
    158 // Managed runtime calling convention
    159 
    160 ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() {
    161   return Arm64ManagedRegister::FromXRegister(X0);
    162 }
    163 
    164 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
    165   return false;  // Everything moved to stack on entry.
    166 }
    167 
    168 bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
    169   return true;
    170 }
    171 
    172 ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
    173   LOG(FATAL) << "Should not reach here";
    174   return ManagedRegister::NoRegister();
    175 }
    176 
    177 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
    178   CHECK(IsCurrentParamOnStack());
    179   FrameOffset result =
    180       FrameOffset(displacement_.Int32Value() +  // displacement
    181                   kFramePointerSize +  // Method ref
    182                   (itr_slots_ * sizeof(uint32_t)));  // offset into in args
    183   return result;
    184 }
    185 
    186 const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
    187   // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
    188   // all arguments are on the stack.
    189   if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
    190     int gp_reg_index = 1;   // we start from X1/W1, X0 holds ArtMethod*.
    191     int fp_reg_index = 0;   // D0/S0.
    192 
    193     // We need to choose the correct register (D/S or X/W) since the managed
    194     // stack uses 32bit stack slots.
    195     ResetIterator(FrameOffset(0));
    196     while (HasNext()) {
    197       if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
    198           if (fp_reg_index < 8) {
    199             if (!IsCurrentParamADouble()) {
    200               entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
    201             } else {
    202               entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
    203             }
    204             fp_reg_index++;
    205           } else {  // just increase the stack offset.
    206             if (!IsCurrentParamADouble()) {
    207               entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
    208             } else {
    209               entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
    210             }
    211           }
    212       } else {  // GP regs.
    213         if (gp_reg_index < 8) {
    214           if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
    215             entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index]));
    216           } else {
    217             entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
    218           }
    219           gp_reg_index++;
    220         } else {  // just increase the stack offset.
    221           if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
    222               entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
    223           } else {
    224               entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
    225           }
    226         }
    227       }
    228       Next();
    229     }
    230   }
    231   return entry_spills_;
    232 }
    233 
    234 // JNI calling convention
    235 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
    236                                                      bool is_synchronized,
    237                                                      bool is_critical_native,
    238                                                      const char* shorty)
    239     : JniCallingConvention(is_static,
    240                            is_synchronized,
    241                            is_critical_native,
    242                            shorty,
    243                            kArm64PointerSize) {
    244 }
    245 
    246 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
    247   return kCoreCalleeSpillMask;
    248 }
    249 
    250 uint32_t Arm64JniCallingConvention::FpSpillMask() const {
    251   return kFpCalleeSpillMask;
    252 }
    253 
    254 ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
    255   return ManagedRegister::NoRegister();
    256 }
    257 
    258 size_t Arm64JniCallingConvention::FrameSize() {
    259   // Method*, callee save area size, local reference segment state
    260   //
    261   // (Unlike x86_64, do not include return address, and the segment state is uint32
    262   // instead of pointer).
    263   size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
    264   size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
    265 
    266   size_t frame_data_size = method_ptr_size + callee_save_area_size;
    267   if (LIKELY(HasLocalReferenceSegmentState())) {
    268     frame_data_size += sizeof(uint32_t);
    269   }
    270   // References plus 2 words for HandleScope header
    271   size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
    272 
    273   size_t total_size = frame_data_size;
    274   if (LIKELY(HasHandleScope())) {
    275     // HandleScope is sometimes excluded.
    276     total_size += handle_scope_size;                                 // handle scope size
    277   }
    278 
    279   // Plus return value spill area size
    280   total_size += SizeOfReturnValue();
    281 
    282   return RoundUp(total_size, kStackAlignment);
    283 }
    284 
    285 size_t Arm64JniCallingConvention::OutArgSize() {
    286   // Same as X86_64
    287   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
    288 }
    289 
    290 ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() const {
    291   // Same as X86_64
    292   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
    293 }
    294 
    295 bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
    296   if (IsCurrentParamAFloatOrDouble()) {
    297     return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments);
    298   } else {
    299     return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments);
    300   }
    301   // TODO: Can we just call CurrentParamRegister to figure this out?
    302 }
    303 
    304 bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
    305   // Is this ever not the same for all the architectures?
    306   return !IsCurrentParamInRegister();
    307 }
    308 
    309 ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
    310   CHECK(IsCurrentParamInRegister());
    311   if (IsCurrentParamAFloatOrDouble()) {
    312     CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments);
    313     if (IsCurrentParamADouble()) {
    314       return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
    315     } else {
    316       return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
    317     }
    318   } else {
    319     int gp_reg = itr_args_ - itr_float_and_doubles_;
    320     CHECK_LT(static_cast<unsigned int>(gp_reg), kMaxIntLikeRegisterArguments);
    321     if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv())  {
    322       return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]);
    323     } else {
    324       return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]);
    325     }
    326   }
    327 }
    328 
    329 FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
    330   CHECK(IsCurrentParamOnStack());
    331   size_t args_on_stack = itr_args_
    332                   - std::min(kMaxFloatOrDoubleRegisterArguments,
    333                              static_cast<size_t>(itr_float_and_doubles_))
    334                   - std::min(kMaxIntLikeRegisterArguments,
    335                              static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
    336   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
    337   CHECK_LT(offset, OutArgSize());
    338   return FrameOffset(offset);
    339   // TODO: Seems identical to X86_64 code.
    340 }
    341 
    342 size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
    343   // all arguments including JNI args
    344   size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
    345 
    346   DCHECK_GE(all_args, NumFloatOrDoubleArgs());
    347 
    348   size_t all_stack_args =
    349       all_args
    350       - std::min(kMaxFloatOrDoubleRegisterArguments,
    351                  static_cast<size_t>(NumFloatOrDoubleArgs()))
    352       - std::min(kMaxIntLikeRegisterArguments,
    353                  static_cast<size_t>((all_args - NumFloatOrDoubleArgs())));
    354 
    355   // TODO: Seems similar to X86_64 code except it doesn't count return pc.
    356 
    357   return all_stack_args;
    358 }
    359 
    360 }  // namespace arm64
    361 }  // namespace art
    362