Home | History | Annotate | Download | only in mips64
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "calling_convention_mips64.h"
     18 
     19 #include "base/logging.h"
     20 #include "handle_scope-inl.h"
     21 #include "utils/mips64/managed_register_mips64.h"
     22 
     23 namespace art {
     24 namespace mips64 {
     25 
     26 // Up to kow many args can be enregistered. The rest of the args must go on the stack.
     27 constexpr size_t kMaxRegisterArguments = 8u;
     28 
     29 static const GpuRegister kGpuArgumentRegisters[] = {
     30   A0, A1, A2, A3, A4, A5, A6, A7
     31 };
     32 
     33 static const FpuRegister kFpuArgumentRegisters[] = {
     34   F12, F13, F14, F15, F16, F17, F18, F19
     35 };
     36 
     37 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     38     // Core registers.
     39     Mips64ManagedRegister::FromGpuRegister(S2),
     40     Mips64ManagedRegister::FromGpuRegister(S3),
     41     Mips64ManagedRegister::FromGpuRegister(S4),
     42     Mips64ManagedRegister::FromGpuRegister(S5),
     43     Mips64ManagedRegister::FromGpuRegister(S6),
     44     Mips64ManagedRegister::FromGpuRegister(S7),
     45     Mips64ManagedRegister::FromGpuRegister(GP),
     46     Mips64ManagedRegister::FromGpuRegister(S8),
     47     // No hard float callee saves.
     48 };
     49 
     50 static constexpr uint32_t CalculateCoreCalleeSpillMask() {
     51   // RA is a special callee save which is not reported by CalleeSaveRegisters().
     52   uint32_t result = 1 << RA;
     53   for (auto&& r : kCalleeSaveRegisters) {
     54     if (r.AsMips64().IsGpuRegister()) {
     55       result |= (1 << r.AsMips64().AsGpuRegister());
     56     }
     57   }
     58   return result;
     59 }
     60 
     61 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
     62 static constexpr uint32_t kFpCalleeSpillMask = 0u;
     63 
     64 // Calling convention
     65 ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
     66   return Mips64ManagedRegister::FromGpuRegister(T9);
     67 }
     68 
     69 ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
     70   return Mips64ManagedRegister::FromGpuRegister(T9);
     71 }
     72 
     73 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
     74   if (shorty[0] == 'F' || shorty[0] == 'D') {
     75     return Mips64ManagedRegister::FromFpuRegister(F0);
     76   } else if (shorty[0] == 'V') {
     77     return Mips64ManagedRegister::NoRegister();
     78   } else {
     79     return Mips64ManagedRegister::FromGpuRegister(V0);
     80   }
     81 }
     82 
     83 ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
     84   return ReturnRegisterForShorty(GetShorty());
     85 }
     86 
     87 ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
     88   return ReturnRegisterForShorty(GetShorty());
     89 }
     90 
     91 ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
     92   return Mips64ManagedRegister::FromGpuRegister(V0);
     93 }
     94 
     95 // Managed runtime calling convention
     96 
     97 ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
     98   return Mips64ManagedRegister::FromGpuRegister(A0);
     99 }
    100 
    101 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
    102   return false;  // Everything moved to stack on entry.
    103 }
    104 
    105 bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
    106   return true;
    107 }
    108 
    109 ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
    110   LOG(FATAL) << "Should not reach here";
    111   return ManagedRegister::NoRegister();
    112 }
    113 
    114 FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
    115   CHECK(IsCurrentParamOnStack());
    116   FrameOffset result =
    117       FrameOffset(displacement_.Int32Value() +  // displacement
    118                   kFramePointerSize +  // Method ref
    119                   (itr_slots_ * sizeof(uint32_t)));  // offset into in args
    120   return result;
    121 }
    122 
    123 const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
    124   // We spill the argument registers on MIPS64 to free them up for scratch use,
    125   // we then assume all arguments are on the stack.
    126   if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
    127     int reg_index = 1;   // we start from A1, A0 holds ArtMethod*.
    128 
    129     // We need to choose the correct register size since the managed
    130     // stack uses 32bit stack slots.
    131     ResetIterator(FrameOffset(0));
    132     while (HasNext()) {
    133       if (reg_index < 8) {
    134         if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
    135           FpuRegister arg = kFpuArgumentRegisters[reg_index];
    136           Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
    137           entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
    138         } else {  // GP regs.
    139           GpuRegister arg = kGpuArgumentRegisters[reg_index];
    140           Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
    141           entry_spills_.push_back(reg,
    142                                   (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
    143         }
    144         // e.g. A1, A2, F3, A4, F5, F6, A7
    145         reg_index++;
    146       }
    147 
    148       Next();
    149     }
    150   }
    151   return entry_spills_;
    152 }
    153 
    154 // JNI calling convention
    155 
    156 Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
    157                                                        bool is_synchronized,
    158                                                        bool is_critical_native,
    159                                                        const char* shorty)
    160     : JniCallingConvention(is_static,
    161                            is_synchronized,
    162                            is_critical_native,
    163                            shorty,
    164                            kMips64PointerSize) {
    165 }
    166 
    167 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
    168   return kCoreCalleeSpillMask;
    169 }
    170 
    171 uint32_t Mips64JniCallingConvention::FpSpillMask() const {
    172   return kFpCalleeSpillMask;
    173 }
    174 
    175 ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
    176   return Mips64ManagedRegister::FromGpuRegister(AT);
    177 }
    178 
    179 size_t Mips64JniCallingConvention::FrameSize() {
    180   // ArtMethod*, RA and callee save area size, local reference segment state.
    181   size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
    182   size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
    183 
    184   size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
    185   if (LIKELY(HasLocalReferenceSegmentState())) {                     // Local ref. segment state.
    186     // Local reference segment state is sometimes excluded.
    187     frame_data_size += sizeof(uint32_t);
    188   }
    189   // References plus 2 words for HandleScope header.
    190   size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
    191 
    192   size_t total_size = frame_data_size;
    193   if (LIKELY(HasHandleScope())) {
    194     // HandleScope is sometimes excluded.
    195     total_size += handle_scope_size;                                 // Handle scope size.
    196   }
    197 
    198   // Plus return value spill area size.
    199   total_size += SizeOfReturnValue();
    200 
    201   return RoundUp(total_size, kStackAlignment);
    202 }
    203 
    204 size_t Mips64JniCallingConvention::OutArgSize() {
    205   return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
    206 }
    207 
    208 ArrayRef<const ManagedRegister> Mips64JniCallingConvention::CalleeSaveRegisters() const {
    209   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
    210 }
    211 
    212 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
    213   return itr_args_ < kMaxRegisterArguments;
    214 }
    215 
    216 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
    217   return !IsCurrentParamInRegister();
    218 }
    219 
    220 ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
    221   CHECK(IsCurrentParamInRegister());
    222   if (IsCurrentParamAFloatOrDouble()) {
    223     return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
    224   } else {
    225     return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
    226   }
    227 }
    228 
    229 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
    230   CHECK(IsCurrentParamOnStack());
    231   size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
    232   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
    233   CHECK_LT(offset, OutArgSize());
    234   return FrameOffset(offset);
    235 }
    236 
    237 size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
    238   // all arguments including JNI args
    239   size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
    240 
    241   // Nothing on the stack unless there are more than 8 arguments
    242   return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
    243 }
    244 }  // namespace mips64
    245 }  // namespace art
    246