Home | History | Annotate | Download | only in trampolines
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "trampoline_compiler.h"
     18 
     19 #include "base/arena_allocator.h"
     20 #include "jni_env_ext.h"
     21 
     22 #ifdef ART_ENABLE_CODEGEN_arm
     23 #include "utils/arm/assembler_arm_vixl.h"
     24 #endif
     25 
     26 #ifdef ART_ENABLE_CODEGEN_arm64
     27 #include "utils/arm64/assembler_arm64.h"
     28 #endif
     29 
     30 #ifdef ART_ENABLE_CODEGEN_mips
     31 #include "utils/mips/assembler_mips.h"
     32 #endif
     33 
     34 #ifdef ART_ENABLE_CODEGEN_mips64
     35 #include "utils/mips64/assembler_mips64.h"
     36 #endif
     37 
     38 #ifdef ART_ENABLE_CODEGEN_x86
     39 #include "utils/x86/assembler_x86.h"
     40 #endif
     41 
     42 #ifdef ART_ENABLE_CODEGEN_x86_64
     43 #include "utils/x86_64/assembler_x86_64.h"
     44 #endif
     45 
     46 #define __ assembler.
     47 
     48 namespace art {
     49 
     50 #ifdef ART_ENABLE_CODEGEN_arm
     51 namespace arm {
     52 
     53 #ifdef ___
     54 #error "ARM Assembler macro already defined."
     55 #else
     56 #define ___ assembler.GetVIXLAssembler()->
     57 #endif
     58 
     59 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
     60     ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
     61   using vixl::aarch32::MemOperand;
     62   using vixl::aarch32::pc;
     63   using vixl::aarch32::r0;
     64   ArmVIXLAssembler assembler(arena);
     65 
     66   switch (abi) {
     67     case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
     68       ___ Ldr(pc, MemOperand(r0, offset.Int32Value()));
     69       break;
     70     case kJniAbi: {  // Load via Thread* held in JNIEnv* in first argument (R0).
     71       vixl::aarch32::UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
     72       const vixl::aarch32::Register temp_reg = temps.Acquire();
     73 
     74       // VIXL will use the destination as a scratch register if
     75       // the offset is not encodable as an immediate operand.
     76       ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(4).Int32Value()));
     77       ___ Ldr(pc, MemOperand(temp_reg, offset.Int32Value()));
     78       break;
     79     }
     80     case kQuickAbi:  // TR holds Thread*.
     81       ___ Ldr(pc, MemOperand(tr, offset.Int32Value()));
     82   }
     83 
     84   __ FinalizeCode();
     85   size_t cs = __ CodeSize();
     86   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
     87   MemoryRegion code(entry_stub->data(), entry_stub->size());
     88   __ FinalizeInstructions(code);
     89 
     90   return std::move(entry_stub);
     91 }
     92 
     93 #undef ___
     94 
     95 }  // namespace arm
     96 #endif  // ART_ENABLE_CODEGEN_arm
     97 
     98 #ifdef ART_ENABLE_CODEGEN_arm64
     99 namespace arm64 {
    100 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
    101     ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
    102   Arm64Assembler assembler(arena);
    103 
    104   switch (abi) {
    105     case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
    106       __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()),
    107           Arm64ManagedRegister::FromXRegister(IP1));
    108 
    109       break;
    110     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
    111       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
    112                       Arm64ManagedRegister::FromXRegister(X0),
    113                       Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
    114 
    115       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
    116                 Arm64ManagedRegister::FromXRegister(IP0));
    117 
    118       break;
    119     case kQuickAbi:  // X18 holds Thread*.
    120       __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
    121                 Arm64ManagedRegister::FromXRegister(IP0));
    122 
    123       break;
    124   }
    125 
    126   __ FinalizeCode();
    127   size_t cs = __ CodeSize();
    128   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    129   MemoryRegion code(entry_stub->data(), entry_stub->size());
    130   __ FinalizeInstructions(code);
    131 
    132   return std::move(entry_stub);
    133 }
    134 }  // namespace arm64
    135 #endif  // ART_ENABLE_CODEGEN_arm64
    136 
    137 #ifdef ART_ENABLE_CODEGEN_mips
    138 namespace mips {
    139 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
    140     ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
    141   MipsAssembler assembler(arena);
    142 
    143   switch (abi) {
    144     case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
    145       __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
    146       break;
    147     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
    148       __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
    149       __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
    150       break;
    151     case kQuickAbi:  // S1 holds Thread*.
    152       __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
    153   }
    154   __ Jr(T9);
    155   __ NopIfNoReordering();
    156   __ Break();
    157 
    158   __ FinalizeCode();
    159   size_t cs = __ CodeSize();
    160   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    161   MemoryRegion code(entry_stub->data(), entry_stub->size());
    162   __ FinalizeInstructions(code);
    163 
    164   return std::move(entry_stub);
    165 }
    166 }  // namespace mips
    167 #endif  // ART_ENABLE_CODEGEN_mips
    168 
    169 #ifdef ART_ENABLE_CODEGEN_mips64
    170 namespace mips64 {
    171 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
    172     ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
    173   Mips64Assembler assembler(arena);
    174 
    175   switch (abi) {
    176     case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
    177       __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
    178       break;
    179     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
    180       __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
    181       __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
    182       break;
    183     case kQuickAbi:  // Fall-through.
    184       __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
    185   }
    186   __ Jr(T9);
    187   __ Nop();
    188   __ Break();
    189 
    190   __ FinalizeCode();
    191   size_t cs = __ CodeSize();
    192   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    193   MemoryRegion code(entry_stub->data(), entry_stub->size());
    194   __ FinalizeInstructions(code);
    195 
    196   return std::move(entry_stub);
    197 }
    198 }  // namespace mips64
    199 #endif  // ART_ENABLE_CODEGEN_mips
    200 
    201 #ifdef ART_ENABLE_CODEGEN_x86
    202 namespace x86 {
    203 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
    204                                                                     ThreadOffset32 offset) {
    205   X86Assembler assembler(arena);
    206 
    207   // All x86 trampolines call via the Thread* held in fs.
    208   __ fs()->jmp(Address::Absolute(offset));
    209   __ int3();
    210 
    211   __ FinalizeCode();
    212   size_t cs = __ CodeSize();
    213   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    214   MemoryRegion code(entry_stub->data(), entry_stub->size());
    215   __ FinalizeInstructions(code);
    216 
    217   return std::move(entry_stub);
    218 }
    219 }  // namespace x86
    220 #endif  // ART_ENABLE_CODEGEN_x86
    221 
    222 #ifdef ART_ENABLE_CODEGEN_x86_64
    223 namespace x86_64 {
    224 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
    225                                                                     ThreadOffset64 offset) {
    226   x86_64::X86_64Assembler assembler(arena);
    227 
    228   // All x86 trampolines call via the Thread* held in gs.
    229   __ gs()->jmp(x86_64::Address::Absolute(offset, true));
    230   __ int3();
    231 
    232   __ FinalizeCode();
    233   size_t cs = __ CodeSize();
    234   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    235   MemoryRegion code(entry_stub->data(), entry_stub->size());
    236   __ FinalizeInstructions(code);
    237 
    238   return std::move(entry_stub);
    239 }
    240 }  // namespace x86_64
    241 #endif  // ART_ENABLE_CODEGEN_x86_64
    242 
    243 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
    244                                                                EntryPointCallingConvention abi,
    245                                                                ThreadOffset64 offset) {
    246   ArenaPool pool;
    247   ArenaAllocator arena(&pool);
    248   switch (isa) {
    249 #ifdef ART_ENABLE_CODEGEN_arm64
    250     case kArm64:
    251       return arm64::CreateTrampoline(&arena, abi, offset);
    252 #endif
    253 #ifdef ART_ENABLE_CODEGEN_mips64
    254     case kMips64:
    255       return mips64::CreateTrampoline(&arena, abi, offset);
    256 #endif
    257 #ifdef ART_ENABLE_CODEGEN_x86_64
    258     case kX86_64:
    259       return x86_64::CreateTrampoline(&arena, offset);
    260 #endif
    261     default:
    262       UNUSED(abi);
    263       UNUSED(offset);
    264       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
    265       UNREACHABLE();
    266   }
    267 }
    268 
    269 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
    270                                                                EntryPointCallingConvention abi,
    271                                                                ThreadOffset32 offset) {
    272   ArenaPool pool;
    273   ArenaAllocator arena(&pool);
    274   switch (isa) {
    275 #ifdef ART_ENABLE_CODEGEN_arm
    276     case kArm:
    277     case kThumb2:
    278       return arm::CreateTrampoline(&arena, abi, offset);
    279 #endif
    280 #ifdef ART_ENABLE_CODEGEN_mips
    281     case kMips:
    282       return mips::CreateTrampoline(&arena, abi, offset);
    283 #endif
    284 #ifdef ART_ENABLE_CODEGEN_x86
    285     case kX86:
    286       UNUSED(abi);
    287       return x86::CreateTrampoline(&arena, offset);
    288 #endif
    289     default:
    290       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
    291       UNREACHABLE();
    292   }
    293 }
    294 
    295 }  // namespace art
    296