1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "trampoline_compiler.h" 18 19 #include "jni_internal.h" 20 #include "utils/arm/assembler_arm.h" 21 #include "utils/arm64/assembler_arm64.h" 22 #include "utils/mips/assembler_mips.h" 23 #include "utils/x86/assembler_x86.h" 24 #include "utils/x86_64/assembler_x86_64.h" 25 26 #define __ assembler-> 27 28 namespace art { 29 30 namespace arm { 31 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, 32 ThreadOffset<4> offset) { 33 std::unique_ptr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kThumb2))); 34 35 switch (abi) { 36 case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI. 37 __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value()); 38 break; 39 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (R0). 40 __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value()); 41 __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value()); 42 break; 43 case kPortableAbi: // R9 holds Thread*. 44 case kQuickAbi: // Fall-through. 45 __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value()); 46 } 47 __ bkpt(0); 48 49 size_t cs = assembler->CodeSize(); 50 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs)); 51 MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); 52 assembler->FinalizeInstructions(code); 53 54 return entry_stub.release(); 55 } 56 } // namespace arm 57 58 namespace arm64 { 59 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, 60 ThreadOffset<8> offset) { 61 std::unique_ptr<Arm64Assembler> assembler(static_cast<Arm64Assembler*>(Assembler::Create(kArm64))); 62 63 switch (abi) { 64 case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI. 65 __ JumpTo(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()), 66 Arm64ManagedRegister::FromCoreRegister(IP1)); 67 68 break; 69 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (X0). 70 __ LoadRawPtr(Arm64ManagedRegister::FromCoreRegister(IP1), 71 Arm64ManagedRegister::FromCoreRegister(X0), 72 Offset(JNIEnvExt::SelfOffset().Int32Value())); 73 74 __ JumpTo(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()), 75 Arm64ManagedRegister::FromCoreRegister(IP0)); 76 77 break; 78 case kPortableAbi: // X18 holds Thread*. 79 case kQuickAbi: // Fall-through. 80 __ JumpTo(Arm64ManagedRegister::FromCoreRegister(TR), Offset(offset.Int32Value()), 81 Arm64ManagedRegister::FromCoreRegister(IP0)); 82 83 break; 84 } 85 86 size_t cs = assembler->CodeSize(); 87 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs)); 88 MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); 89 assembler->FinalizeInstructions(code); 90 91 return entry_stub.release(); 92 } 93 } // namespace arm64 94 95 namespace mips { 96 static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, 97 ThreadOffset<4> offset) { 98 std::unique_ptr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips))); 99 100 switch (abi) { 101 case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI. 102 __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value()); 103 break; 104 case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (A0). 105 __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value()); 106 __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value()); 107 break; 108 case kPortableAbi: // S1 holds Thread*. 109 case kQuickAbi: // Fall-through. 110 __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value()); 111 } 112 __ Jr(T9); 113 __ Nop(); 114 __ Break(); 115 116 size_t cs = assembler->CodeSize(); 117 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs)); 118 MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); 119 assembler->FinalizeInstructions(code); 120 121 return entry_stub.release(); 122 } 123 } // namespace mips 124 125 namespace x86 { 126 static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) { 127 std::unique_ptr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86))); 128 129 // All x86 trampolines call via the Thread* held in fs. 130 __ fs()->jmp(Address::Absolute(offset)); 131 __ int3(); 132 133 size_t cs = assembler->CodeSize(); 134 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs)); 135 MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); 136 assembler->FinalizeInstructions(code); 137 138 return entry_stub.release(); 139 } 140 } // namespace x86 141 142 namespace x86_64 { 143 static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) { 144 std::unique_ptr<x86_64::X86_64Assembler> 145 assembler(static_cast<x86_64::X86_64Assembler*>(Assembler::Create(kX86_64))); 146 147 // All x86 trampolines call via the Thread* held in gs. 148 __ gs()->jmp(x86_64::Address::Absolute(offset, true)); 149 __ int3(); 150 151 size_t cs = assembler->CodeSize(); 152 std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs)); 153 MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); 154 assembler->FinalizeInstructions(code); 155 156 return entry_stub.release(); 157 } 158 } // namespace x86_64 159 160 const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi, 161 ThreadOffset<8> offset) { 162 switch (isa) { 163 case kArm64: 164 return arm64::CreateTrampoline(abi, offset); 165 case kX86_64: 166 return x86_64::CreateTrampoline(offset); 167 default: 168 LOG(FATAL) << "Unexpected InstructionSet: " << isa; 169 return nullptr; 170 } 171 } 172 173 const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi, 174 ThreadOffset<4> offset) { 175 switch (isa) { 176 case kArm: 177 case kThumb2: 178 return arm::CreateTrampoline(abi, offset); 179 case kMips: 180 return mips::CreateTrampoline(abi, offset); 181 case kX86: 182 return x86::CreateTrampoline(offset); 183 default: 184 LOG(FATAL) << "Unexpected InstructionSet: " << isa; 185 return nullptr; 186 } 187 } 188 189 } // namespace art 190