Home | History | Annotate | Download | only in x86_64
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "calling_convention_x86_64.h"
     18 
     19 #include <android-base/logging.h>
     20 
     21 #include "arch/instruction_set.h"
     22 #include "base/bit_utils.h"
     23 #include "handle_scope-inl.h"
     24 #include "utils/x86_64/managed_register_x86_64.h"
     25 
     26 namespace art {
     27 namespace x86_64 {
     28 
     29 constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
     30 static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
     31 static_assert(kStackAlignment >= 16u, "System V AMD64 ABI requires at least 16 byte stack alignment");
     32 
     33 // XMM0..XMM7 can be used to pass the first 8 floating args. The rest must go on the stack.
     34 // -- Managed and JNI calling conventions.
     35 constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
     36 // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
     37 // enregistered. The rest of the args must go on the stack.
     38 // -- JNI calling convention only (Managed excludes RDI, so it's actually 5).
     39 constexpr size_t kMaxIntLikeRegisterArguments = 6u;
     40 
     41 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     42     // Core registers.
     43     X86_64ManagedRegister::FromCpuRegister(RBX),
     44     X86_64ManagedRegister::FromCpuRegister(RBP),
     45     X86_64ManagedRegister::FromCpuRegister(R12),
     46     X86_64ManagedRegister::FromCpuRegister(R13),
     47     X86_64ManagedRegister::FromCpuRegister(R14),
     48     X86_64ManagedRegister::FromCpuRegister(R15),
     49     // Hard float registers.
     50     X86_64ManagedRegister::FromXmmRegister(XMM12),
     51     X86_64ManagedRegister::FromXmmRegister(XMM13),
     52     X86_64ManagedRegister::FromXmmRegister(XMM14),
     53     X86_64ManagedRegister::FromXmmRegister(XMM15),
     54 };
     55 
     56 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
     57   // The spilled PC gets a special marker.
     58   uint32_t result = 1 << kNumberOfCpuRegisters;
     59   for (auto&& r : kCalleeSaveRegisters) {
     60     if (r.AsX86_64().IsCpuRegister()) {
     61       result |= (1 << r.AsX86_64().AsCpuRegister().AsRegister());
     62     }
     63   }
     64   return result;
     65 }
     66 
     67 static constexpr uint32_t CalculateFpCalleeSpillMask() {
     68   uint32_t result = 0;
     69   for (auto&& r : kCalleeSaveRegisters) {
     70     if (r.AsX86_64().IsXmmRegister()) {
     71       result |= (1 << r.AsX86_64().AsXmmRegister().AsFloatRegister());
     72     }
     73   }
     74   return result;
     75 }
     76 
     77 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
     78 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
     79 
     80 // Calling convention
     81 
     82 ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
     83   return X86_64ManagedRegister::FromCpuRegister(RAX);
     84 }
     85 
     86 ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
     87   return X86_64ManagedRegister::FromCpuRegister(RAX);
     88 }
     89 
     90 ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
     91   return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
     92 }
     93 
     94 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
     95   if (shorty[0] == 'F' || shorty[0] == 'D') {
     96     return X86_64ManagedRegister::FromXmmRegister(XMM0);
     97   } else if (shorty[0] == 'J') {
     98     return X86_64ManagedRegister::FromCpuRegister(RAX);
     99   } else if (shorty[0] == 'V') {
    100     return ManagedRegister::NoRegister();
    101   } else {
    102     return X86_64ManagedRegister::FromCpuRegister(RAX);
    103   }
    104 }
    105 
    106 ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
    107   return ReturnRegisterForShorty(GetShorty(), false);
    108 }
    109 
    110 ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
    111   return ReturnRegisterForShorty(GetShorty(), true);
    112 }
    113 
    114 ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
    115   return X86_64ManagedRegister::FromCpuRegister(RAX);
    116 }
    117 
    118 // Managed runtime calling convention
    119 
    120 ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
    121   return X86_64ManagedRegister::FromCpuRegister(RDI);
    122 }
    123 
    124 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
    125   return !IsCurrentParamOnStack();
    126 }
    127 
    128 bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
    129   // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
    130   return true;
    131 }
    132 
    133 ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
    134   ManagedRegister res = ManagedRegister::NoRegister();
    135   if (!IsCurrentParamAFloatOrDouble()) {
    136     switch (itr_args_ - itr_float_and_doubles_) {
    137     case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
    138     case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
    139     case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
    140     case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
    141     case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
    142     }
    143   } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
    144     // First eight float parameters are passed via XMM0..XMM7
    145     res = X86_64ManagedRegister::FromXmmRegister(
    146                                  static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
    147   }
    148   return res;
    149 }
    150 
    151 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
    152   return FrameOffset(displacement_.Int32Value() +  // displacement
    153                      static_cast<size_t>(kX86_64PointerSize) +  // Method ref
    154                      itr_slots_ * sizeof(uint32_t));  // offset into in args
    155 }
    156 
    157 const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
    158   // We spill the argument registers on X86 to free them up for scratch use, we then assume
    159   // all arguments are on the stack.
    160   if (entry_spills_.size() == 0) {
    161     ResetIterator(FrameOffset(0));
    162     while (HasNext()) {
    163       ManagedRegister in_reg = CurrentParamRegister();
    164       if (!in_reg.IsNoRegister()) {
    165         int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
    166         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
    167         ManagedRegisterSpill spill(in_reg, size, spill_offset);
    168         entry_spills_.push_back(spill);
    169       }
    170       Next();
    171     }
    172   }
    173   return entry_spills_;
    174 }
    175 
    176 // JNI calling convention
    177 
    178 X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static,
    179                                                        bool is_synchronized,
    180                                                        bool is_critical_native,
    181                                                        const char* shorty)
    182     : JniCallingConvention(is_static,
    183                            is_synchronized,
    184                            is_critical_native,
    185                            shorty,
    186                            kX86_64PointerSize) {
    187 }
    188 
    189 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
    190   return kCoreCalleeSpillMask;
    191 }
    192 
    193 uint32_t X86_64JniCallingConvention::FpSpillMask() const {
    194   return kFpCalleeSpillMask;
    195 }
    196 
    197 size_t X86_64JniCallingConvention::FrameSize() {
    198   // Method*, PC return address and callee save area size, local reference segment state
    199   const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
    200   const size_t pc_return_addr_size = kFramePointerSize;
    201   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
    202   size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
    203 
    204   if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
    205     // Local reference segment state is sometimes excluded.
    206     frame_data_size += kFramePointerSize;
    207   }
    208 
    209   // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
    210   const size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
    211 
    212   size_t total_size = frame_data_size;
    213   if (LIKELY(HasHandleScope())) {
    214     // HandleScope is sometimes excluded.
    215     total_size += handle_scope_size;                                 // handle scope size
    216   }
    217 
    218   // Plus return value spill area size
    219   total_size += SizeOfReturnValue();
    220 
    221   return RoundUp(total_size, kStackAlignment);
    222 }
    223 
    224 size_t X86_64JniCallingConvention::OutArgSize() {
    225   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
    226 }
    227 
    228 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveRegisters() const {
    229   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
    230 }
    231 
    232 bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
    233   return !IsCurrentParamOnStack();
    234 }
    235 
    236 bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
    237   return CurrentParamRegister().IsNoRegister();
    238 }
    239 
    240 ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
    241   ManagedRegister res = ManagedRegister::NoRegister();
    242   if (!IsCurrentParamAFloatOrDouble()) {
    243     switch (itr_args_ - itr_float_and_doubles_) {
    244     case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
    245     case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
    246     case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
    247     case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
    248     case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
    249     case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
    250     static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)");
    251     }
    252   } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) {
    253     // First eight float parameters are passed via XMM0..XMM7
    254     res = X86_64ManagedRegister::FromXmmRegister(
    255                                  static_cast<FloatRegister>(XMM0 + itr_float_and_doubles_));
    256   }
    257   return res;
    258 }
    259 
    260 FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
    261   CHECK(IsCurrentParamOnStack());
    262   size_t args_on_stack = itr_args_
    263       - std::min(kMaxFloatOrDoubleRegisterArguments,
    264                  static_cast<size_t>(itr_float_and_doubles_))
    265           // Float arguments passed through Xmm0..Xmm7
    266       - std::min(kMaxIntLikeRegisterArguments,
    267                  static_cast<size_t>(itr_args_ - itr_float_and_doubles_));
    268           // Integer arguments passed through GPR
    269   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
    270   CHECK_LT(offset, OutArgSize());
    271   return FrameOffset(offset);
    272 }
    273 
    274 // TODO: Calling this "NumberArgs" is misleading.
    275 // It's really more like NumberSlots (like itr_slots_)
    276 // because doubles/longs get counted twice.
    277 size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
    278   size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
    279   // regular argument parameters and this
    280   size_t param_args = NumArgs() + NumLongOrDoubleArgs();
    281   // count JNIEnv* and return pc (pushed after Method*)
    282   size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
    283   size_t total_args = static_args + param_args + internal_args;
    284 
    285   // Float arguments passed through Xmm0..Xmm7
    286   // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
    287   size_t total_stack_args = total_args
    288                             - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast<size_t>(NumFloatOrDoubleArgs()))
    289                             - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(NumArgs() - NumFloatOrDoubleArgs()));
    290 
    291   return total_stack_args;
    292 }
    293 
    294 }  // namespace x86_64
    295 }  // namespace art
    296