Home | History | Annotate | Download | only in trampolines
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "trampoline_compiler.h"
     18 
     19 #include "jni_env_ext.h"
     20 #include "utils/arm/assembler_arm.h"
     21 #include "utils/arm64/assembler_arm64.h"
     22 #include "utils/mips/assembler_mips.h"
     23 #include "utils/mips64/assembler_mips64.h"
     24 #include "utils/x86/assembler_x86.h"
     25 #include "utils/x86_64/assembler_x86_64.h"
     26 
     27 #define __ assembler->
     28 
     29 namespace art {
     30 
     31 namespace arm {
     32 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
     33                                                     ThreadOffset<4> offset) {
     34   std::unique_ptr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kThumb2)));
     35 
     36   switch (abi) {
     37     case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
     38       __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
     39       break;
     40     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
     41       __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
     42       __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
     43       break;
     44     case kQuickAbi:  // R9 holds Thread*.
     45       __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
     46   }
     47   __ bkpt(0);
     48 
     49   size_t cs = assembler->CodeSize();
     50   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
     51   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
     52   assembler->FinalizeInstructions(code);
     53 
     54   return entry_stub.release();
     55 }
     56 }  // namespace arm
     57 
     58 namespace arm64 {
     59 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
     60                                                     ThreadOffset<8> offset) {
     61   std::unique_ptr<Arm64Assembler> assembler(static_cast<Arm64Assembler*>(Assembler::Create(kArm64)));
     62 
     63   switch (abi) {
     64     case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
     65       __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()),
     66           Arm64ManagedRegister::FromXRegister(IP1));
     67 
     68       break;
     69     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
     70       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
     71                       Arm64ManagedRegister::FromXRegister(X0),
     72                       Offset(JNIEnvExt::SelfOffset().Int32Value()));
     73 
     74       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
     75                 Arm64ManagedRegister::FromXRegister(IP0));
     76 
     77       break;
     78     case kQuickAbi:  // X18 holds Thread*.
     79       __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
     80                 Arm64ManagedRegister::FromXRegister(IP0));
     81 
     82       break;
     83   }
     84 
     85   assembler->EmitSlowPaths();
     86   size_t cs = assembler->CodeSize();
     87   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
     88   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
     89   assembler->FinalizeInstructions(code);
     90 
     91   return entry_stub.release();
     92 }
     93 }  // namespace arm64
     94 
     95 namespace mips {
     96 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
     97                                                     ThreadOffset<4> offset) {
     98   std::unique_ptr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
     99 
    100   switch (abi) {
    101     case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
    102       __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
    103       break;
    104     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
    105       __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
    106       __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
    107       break;
    108     case kQuickAbi:  // S1 holds Thread*.
    109       __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
    110   }
    111   __ Jr(T9);
    112   __ Nop();
    113   __ Break();
    114 
    115   size_t cs = assembler->CodeSize();
    116   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    117   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
    118   assembler->FinalizeInstructions(code);
    119 
    120   return entry_stub.release();
    121 }
    122 }  // namespace mips
    123 
    124 namespace mips64 {
    125 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
    126                                                     ThreadOffset<8> offset) {
    127   std::unique_ptr<Mips64Assembler> assembler(static_cast<Mips64Assembler*>(Assembler::Create(kMips64)));
    128 
    129   switch (abi) {
    130     case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
    131       __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
    132       break;
    133     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
    134       __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
    135       __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
    136       break;
    137     case kQuickAbi:  // Fall-through.
    138       __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
    139   }
    140   __ Jr(T9);
    141   __ Nop();
    142   __ Break();
    143 
    144   size_t cs = assembler->CodeSize();
    145   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    146   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
    147   assembler->FinalizeInstructions(code);
    148 
    149   return entry_stub.release();
    150 }
    151 }  // namespace mips64
    152 
    153 namespace x86 {
    154 static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) {
    155   std::unique_ptr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
    156 
    157   // All x86 trampolines call via the Thread* held in fs.
    158   __ fs()->jmp(Address::Absolute(offset));
    159   __ int3();
    160 
    161   size_t cs = assembler->CodeSize();
    162   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    163   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
    164   assembler->FinalizeInstructions(code);
    165 
    166   return entry_stub.release();
    167 }
    168 }  // namespace x86
    169 
    170 namespace x86_64 {
    171 static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) {
    172   std::unique_ptr<x86_64::X86_64Assembler>
    173       assembler(static_cast<x86_64::X86_64Assembler*>(Assembler::Create(kX86_64)));
    174 
    175   // All x86 trampolines call via the Thread* held in gs.
    176   __ gs()->jmp(x86_64::Address::Absolute(offset, true));
    177   __ int3();
    178 
    179   size_t cs = assembler->CodeSize();
    180   std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
    181   MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
    182   assembler->FinalizeInstructions(code);
    183 
    184   return entry_stub.release();
    185 }
    186 }  // namespace x86_64
    187 
    188 const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
    189                                                ThreadOffset<8> offset) {
    190   switch (isa) {
    191     case kArm64:
    192       return arm64::CreateTrampoline(abi, offset);
    193     case kMips64:
    194       return mips64::CreateTrampoline(abi, offset);
    195     case kX86_64:
    196       return x86_64::CreateTrampoline(offset);
    197     default:
    198       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
    199       UNREACHABLE();
    200   }
    201 }
    202 
    203 const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
    204                                                ThreadOffset<4> offset) {
    205   switch (isa) {
    206     case kArm:
    207     case kThumb2:
    208       return arm::CreateTrampoline(abi, offset);
    209     case kMips:
    210       return mips::CreateTrampoline(abi, offset);
    211     case kX86:
    212       return x86::CreateTrampoline(offset);
    213     default:
    214       LOG(FATAL) << "Unexpected InstructionSet: " << isa;
    215       UNREACHABLE();
    216   }
    217 }
    218 
    219 }  // namespace art
    220