1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "jni_compiler.h" 18 19 #include <algorithm> 20 #include <memory> 21 #include <vector> 22 #include <fstream> 23 24 #include "art_method.h" 25 #include "base/arena_allocator.h" 26 #include "base/logging.h" 27 #include "base/macros.h" 28 #include "calling_convention.h" 29 #include "class_linker.h" 30 #include "compiled_method.h" 31 #include "dex_file-inl.h" 32 #include "driver/compiler_driver.h" 33 #include "driver/compiler_options.h" 34 #include "entrypoints/quick/quick_entrypoints.h" 35 #include "jni_env_ext.h" 36 #include "utils/assembler.h" 37 #include "utils/managed_register.h" 38 #include "utils/arm/managed_register_arm.h" 39 #include "utils/arm64/managed_register_arm64.h" 40 #include "utils/mips/managed_register_mips.h" 41 #include "utils/mips64/managed_register_mips64.h" 42 #include "utils/x86/managed_register_x86.h" 43 #include "thread.h" 44 45 #define __ jni_asm-> 46 47 namespace art { 48 49 static void CopyParameter(Assembler* jni_asm, 50 ManagedRuntimeCallingConvention* mr_conv, 51 JniCallingConvention* jni_conv, 52 size_t frame_size, size_t out_arg_size); 53 static void SetNativeParameter(Assembler* jni_asm, 54 JniCallingConvention* jni_conv, 55 ManagedRegister in_reg); 56 57 // Generate the JNI bridge for the given method, general contract: 58 // - Arguments are in the managed runtime format, either on stack or in 59 // registers, a reference to the method object is supplied as part of this 60 // convention. 61 // 62 CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, 63 uint32_t access_flags, uint32_t method_idx, 64 const DexFile& dex_file) { 65 const bool is_native = (access_flags & kAccNative) != 0; 66 CHECK(is_native); 67 const bool is_static = (access_flags & kAccStatic) != 0; 68 const bool is_synchronized = (access_flags & kAccSynchronized) != 0; 69 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); 70 InstructionSet instruction_set = driver->GetInstructionSet(); 71 const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures(); 72 const bool is_64_bit_target = Is64BitInstructionSet(instruction_set); 73 74 ArenaPool pool; 75 ArenaAllocator arena(&pool); 76 77 // Calling conventions used to iterate over parameters to method 78 std::unique_ptr<JniCallingConvention> main_jni_conv( 79 JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, instruction_set)); 80 bool reference_return = main_jni_conv->IsReturnAReference(); 81 82 std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv( 83 ManagedRuntimeCallingConvention::Create( 84 &arena, is_static, is_synchronized, shorty, instruction_set)); 85 86 // Calling conventions to call into JNI method "end" possibly passing a returned reference, the 87 // method and the current thread. 88 const char* jni_end_shorty; 89 if (reference_return && is_synchronized) { 90 jni_end_shorty = "ILL"; 91 } else if (reference_return) { 92 jni_end_shorty = "IL"; 93 } else if (is_synchronized) { 94 jni_end_shorty = "VL"; 95 } else { 96 jni_end_shorty = "V"; 97 } 98 99 std::unique_ptr<JniCallingConvention> end_jni_conv(JniCallingConvention::Create( 100 &arena, is_static, is_synchronized, jni_end_shorty, instruction_set)); 101 102 // Assembler that holds generated instructions 103 std::unique_ptr<Assembler> jni_asm( 104 Assembler::Create(&arena, instruction_set, instruction_set_features)); 105 jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo()); 106 107 // Offsets into data structures 108 // TODO: if cross compiling these offsets are for the host not the target 109 const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions)); 110 const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter)); 111 const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit)); 112 113 // 1. Build the frame saving all callee saves 114 const size_t frame_size(main_jni_conv->FrameSize()); 115 const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters(); 116 __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills()); 117 DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size)); 118 119 // 2. Set up the HandleScope 120 mr_conv->ResetIterator(FrameOffset(frame_size)); 121 main_jni_conv->ResetIterator(FrameOffset(0)); 122 __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(), 123 main_jni_conv->ReferenceCount(), 124 mr_conv->InterproceduralScratchRegister()); 125 126 if (is_64_bit_target) { 127 __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(), 128 Thread::TopHandleScopeOffset<8>(), 129 mr_conv->InterproceduralScratchRegister()); 130 __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(), 131 main_jni_conv->HandleScopeOffset(), 132 mr_conv->InterproceduralScratchRegister()); 133 } else { 134 __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(), 135 Thread::TopHandleScopeOffset<4>(), 136 mr_conv->InterproceduralScratchRegister()); 137 __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(), 138 main_jni_conv->HandleScopeOffset(), 139 mr_conv->InterproceduralScratchRegister()); 140 } 141 142 // 3. Place incoming reference arguments into handle scope 143 main_jni_conv->Next(); // Skip JNIEnv* 144 // 3.5. Create Class argument for static methods out of passed method 145 if (is_static) { 146 FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); 147 // Check handle scope offset is within frame 148 CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); 149 // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod. 150 // Note this LoadRef() does not include read barrier. It will be handled below. 151 __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), 152 mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); 153 __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); 154 __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister()); 155 main_jni_conv->Next(); // in handle scope so move to next argument 156 } 157 while (mr_conv->HasNext()) { 158 CHECK(main_jni_conv->HasNext()); 159 bool ref_param = main_jni_conv->IsCurrentParamAReference(); 160 CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); 161 // References need placing in handle scope and the entry value passing 162 if (ref_param) { 163 // Compute handle scope entry, note null is placed in the handle scope but its boxed value 164 // must be null. 165 FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); 166 // Check handle scope offset is within frame and doesn't run into the saved segment state. 167 CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); 168 CHECK_NE(handle_scope_offset.Uint32Value(), 169 main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value()); 170 bool input_in_reg = mr_conv->IsCurrentParamInRegister(); 171 bool input_on_stack = mr_conv->IsCurrentParamOnStack(); 172 CHECK(input_in_reg || input_on_stack); 173 174 if (input_in_reg) { 175 ManagedRegister in_reg = mr_conv->CurrentParamRegister(); 176 __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull()); 177 __ StoreRef(handle_scope_offset, in_reg); 178 } else if (input_on_stack) { 179 FrameOffset in_off = mr_conv->CurrentParamStackOffset(); 180 __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull()); 181 __ CopyRef(handle_scope_offset, in_off, 182 mr_conv->InterproceduralScratchRegister()); 183 } 184 } 185 mr_conv->Next(); 186 main_jni_conv->Next(); 187 } 188 189 // 4. Write out the end of the quick frames. 190 if (is_64_bit_target) { 191 __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>()); 192 } else { 193 __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>()); 194 } 195 196 // 5. Move frame down to allow space for out going args. 197 const size_t main_out_arg_size = main_jni_conv->OutArgSize(); 198 size_t current_out_arg_size = main_out_arg_size; 199 __ IncreaseFrameSize(main_out_arg_size); 200 201 // Call the read barrier for the declaring class loaded from the method for a static call. 202 // Note that we always have outgoing param space available for at least two params. 203 if (kUseReadBarrier && is_static) { 204 ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni); 205 ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni); 206 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 207 main_jni_conv->Next(); // Skip JNIEnv. 208 FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); 209 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 210 // Pass the handle for the class as the first argument. 211 if (main_jni_conv->IsCurrentParamOnStack()) { 212 FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); 213 __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, 214 mr_conv->InterproceduralScratchRegister(), 215 false); 216 } else { 217 ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); 218 __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset, 219 ManagedRegister::NoRegister(), false); 220 } 221 main_jni_conv->Next(); 222 // Pass the current thread as the second argument and call. 223 if (main_jni_conv->IsCurrentParamInRegister()) { 224 __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); 225 if (is_64_bit_target) { 226 __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier64), 227 main_jni_conv->InterproceduralScratchRegister()); 228 } else { 229 __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier32), 230 main_jni_conv->InterproceduralScratchRegister()); 231 } 232 } else { 233 __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), 234 main_jni_conv->InterproceduralScratchRegister()); 235 if (is_64_bit_target) { 236 __ CallFromThread64(read_barrier64, main_jni_conv->InterproceduralScratchRegister()); 237 } else { 238 __ CallFromThread32(read_barrier32, main_jni_conv->InterproceduralScratchRegister()); 239 } 240 } 241 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset. 242 } 243 244 // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable 245 // can occur. The result is the saved JNI local state that is restored by the exit call. We 246 // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer 247 // arguments. 248 ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized) 249 : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart); 250 ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized) 251 : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart); 252 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 253 FrameOffset locked_object_handle_scope_offset(0); 254 if (is_synchronized) { 255 // Pass object for locking. 256 main_jni_conv->Next(); // Skip JNIEnv. 257 locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); 258 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 259 if (main_jni_conv->IsCurrentParamOnStack()) { 260 FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); 261 __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, 262 mr_conv->InterproceduralScratchRegister(), false); 263 } else { 264 ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); 265 __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, 266 ManagedRegister::NoRegister(), false); 267 } 268 main_jni_conv->Next(); 269 } 270 if (main_jni_conv->IsCurrentParamInRegister()) { 271 __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); 272 if (is_64_bit_target) { 273 __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64), 274 main_jni_conv->InterproceduralScratchRegister()); 275 } else { 276 __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32), 277 main_jni_conv->InterproceduralScratchRegister()); 278 } 279 } else { 280 __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), 281 main_jni_conv->InterproceduralScratchRegister()); 282 if (is_64_bit_target) { 283 __ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister()); 284 } else { 285 __ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister()); 286 } 287 } 288 if (is_synchronized) { // Check for exceptions from monitor enter. 289 __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size); 290 } 291 FrameOffset saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset(); 292 __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4); 293 294 // 7. Iterate over arguments placing values from managed calling convention in 295 // to the convention required for a native call (shuffling). For references 296 // place an index/pointer to the reference after checking whether it is 297 // null (which must be encoded as null). 298 // Note: we do this prior to materializing the JNIEnv* and static's jclass to 299 // give as many free registers for the shuffle as possible. 300 mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); 301 uint32_t args_count = 0; 302 while (mr_conv->HasNext()) { 303 args_count++; 304 mr_conv->Next(); 305 } 306 307 // Do a backward pass over arguments, so that the generated code will be "mov 308 // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3." 309 // TODO: A reverse iterator to improve readability. 310 for (uint32_t i = 0; i < args_count; ++i) { 311 mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); 312 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 313 main_jni_conv->Next(); // Skip JNIEnv*. 314 if (is_static) { 315 main_jni_conv->Next(); // Skip Class for now. 316 } 317 // Skip to the argument we're interested in. 318 for (uint32_t j = 0; j < args_count - i - 1; ++j) { 319 mr_conv->Next(); 320 main_jni_conv->Next(); 321 } 322 CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size); 323 } 324 if (is_static) { 325 // Create argument for Class 326 mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); 327 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 328 main_jni_conv->Next(); // Skip JNIEnv* 329 FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); 330 if (main_jni_conv->IsCurrentParamOnStack()) { 331 FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); 332 __ CreateHandleScopeEntry(out_off, handle_scope_offset, 333 mr_conv->InterproceduralScratchRegister(), 334 false); 335 } else { 336 ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); 337 __ CreateHandleScopeEntry(out_reg, handle_scope_offset, 338 ManagedRegister::NoRegister(), false); 339 } 340 } 341 342 // 8. Create 1st argument, the JNI environment ptr. 343 main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); 344 // Register that will hold local indirect reference table 345 if (main_jni_conv->IsCurrentParamInRegister()) { 346 ManagedRegister jni_env = main_jni_conv->CurrentParamRegister(); 347 DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister())); 348 if (is_64_bit_target) { 349 __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>()); 350 } else { 351 __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>()); 352 } 353 } else { 354 FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); 355 if (is_64_bit_target) { 356 __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(), 357 main_jni_conv->InterproceduralScratchRegister()); 358 } else { 359 __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(), 360 main_jni_conv->InterproceduralScratchRegister()); 361 } 362 } 363 364 // 9. Plant call to native code associated with method. 365 MemberOffset jni_entrypoint_offset = ArtMethod::EntryPointFromJniOffset( 366 InstructionSetPointerSize(instruction_set)); 367 __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset, 368 mr_conv->InterproceduralScratchRegister()); 369 370 // 10. Fix differences in result widths. 371 if (main_jni_conv->RequiresSmallResultTypeExtension()) { 372 if (main_jni_conv->GetReturnType() == Primitive::kPrimByte || 373 main_jni_conv->GetReturnType() == Primitive::kPrimShort) { 374 __ SignExtend(main_jni_conv->ReturnRegister(), 375 Primitive::ComponentSize(main_jni_conv->GetReturnType())); 376 } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean || 377 main_jni_conv->GetReturnType() == Primitive::kPrimChar) { 378 __ ZeroExtend(main_jni_conv->ReturnRegister(), 379 Primitive::ComponentSize(main_jni_conv->GetReturnType())); 380 } 381 } 382 383 // 11. Save return value 384 FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation(); 385 if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { 386 if ((instruction_set == kMips || instruction_set == kMips64) && 387 main_jni_conv->GetReturnType() == Primitive::kPrimDouble && 388 return_save_location.Uint32Value() % 8 != 0) { 389 // Ensure doubles are 8-byte aligned for MIPS 390 return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize); 391 } 392 CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size); 393 __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue()); 394 } 395 396 // Increase frame size for out args if needed by the end_jni_conv. 397 const size_t end_out_arg_size = end_jni_conv->OutArgSize(); 398 if (end_out_arg_size > current_out_arg_size) { 399 size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size; 400 current_out_arg_size = end_out_arg_size; 401 __ IncreaseFrameSize(out_arg_size_diff); 402 saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff); 403 locked_object_handle_scope_offset = 404 FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff); 405 return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff); 406 } 407 // thread. 408 end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); 409 ThreadOffset<4> jni_end32(-1); 410 ThreadOffset<8> jni_end64(-1); 411 if (reference_return) { 412 // Pass result. 413 jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized) 414 : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference); 415 jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized) 416 : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference); 417 SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); 418 end_jni_conv->Next(); 419 } else { 420 jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized) 421 : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd); 422 jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized) 423 : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd); 424 } 425 // Pass saved local reference state. 426 if (end_jni_conv->IsCurrentParamOnStack()) { 427 FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); 428 __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4); 429 } else { 430 ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); 431 __ Load(out_reg, saved_cookie_offset, 4); 432 } 433 end_jni_conv->Next(); 434 if (is_synchronized) { 435 // Pass object for unlocking. 436 if (end_jni_conv->IsCurrentParamOnStack()) { 437 FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); 438 __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, 439 end_jni_conv->InterproceduralScratchRegister(), 440 false); 441 } else { 442 ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); 443 __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, 444 ManagedRegister::NoRegister(), false); 445 } 446 end_jni_conv->Next(); 447 } 448 if (end_jni_conv->IsCurrentParamInRegister()) { 449 __ GetCurrentThread(end_jni_conv->CurrentParamRegister()); 450 if (is_64_bit_target) { 451 __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64), 452 end_jni_conv->InterproceduralScratchRegister()); 453 } else { 454 __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end32), 455 end_jni_conv->InterproceduralScratchRegister()); 456 } 457 } else { 458 __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(), 459 end_jni_conv->InterproceduralScratchRegister()); 460 if (is_64_bit_target) { 461 __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister()); 462 } else { 463 __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister()); 464 } 465 } 466 467 // 13. Reload return value 468 if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { 469 __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue()); 470 } 471 472 // 14. Move frame up now we're done with the out arg space. 473 __ DecreaseFrameSize(current_out_arg_size); 474 475 // 15. Process pending exceptions from JNI call or monitor exit. 476 __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0); 477 478 // 16. Remove activation - need to restore callee save registers since the GC may have changed 479 // them. 480 DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size)); 481 __ RemoveFrame(frame_size, callee_save_regs); 482 DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size)); 483 484 // 17. Finalize code generation 485 __ FinalizeCode(); 486 size_t cs = __ CodeSize(); 487 std::vector<uint8_t> managed_code(cs); 488 MemoryRegion code(&managed_code[0], managed_code.size()); 489 __ FinalizeInstructions(code); 490 491 return CompiledMethod::SwapAllocCompiledMethod(driver, 492 instruction_set, 493 ArrayRef<const uint8_t>(managed_code), 494 frame_size, 495 main_jni_conv->CoreSpillMask(), 496 main_jni_conv->FpSpillMask(), 497 ArrayRef<const SrcMapElem>(), 498 ArrayRef<const uint8_t>(), // vmap_table. 499 ArrayRef<const uint8_t>(*jni_asm->cfi().data()), 500 ArrayRef<const LinkerPatch>()); 501 } 502 503 // Copy a single parameter from the managed to the JNI calling convention. 504 static void CopyParameter(Assembler* jni_asm, 505 ManagedRuntimeCallingConvention* mr_conv, 506 JniCallingConvention* jni_conv, 507 size_t frame_size, size_t out_arg_size) { 508 bool input_in_reg = mr_conv->IsCurrentParamInRegister(); 509 bool output_in_reg = jni_conv->IsCurrentParamInRegister(); 510 FrameOffset handle_scope_offset(0); 511 bool null_allowed = false; 512 bool ref_param = jni_conv->IsCurrentParamAReference(); 513 CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); 514 // input may be in register, on stack or both - but not none! 515 CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); 516 if (output_in_reg) { // output shouldn't straddle registers and stack 517 CHECK(!jni_conv->IsCurrentParamOnStack()); 518 } else { 519 CHECK(jni_conv->IsCurrentParamOnStack()); 520 } 521 // References need placing in handle scope and the entry address passing. 522 if (ref_param) { 523 null_allowed = mr_conv->IsCurrentArgPossiblyNull(); 524 // Compute handle scope offset. Note null is placed in the handle scope but the jobject 525 // passed to the native code must be null (not a pointer into the handle scope 526 // as with regular references). 527 handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset(); 528 // Check handle scope offset is within frame. 529 CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size)); 530 } 531 if (input_in_reg && output_in_reg) { 532 ManagedRegister in_reg = mr_conv->CurrentParamRegister(); 533 ManagedRegister out_reg = jni_conv->CurrentParamRegister(); 534 if (ref_param) { 535 __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed); 536 } else { 537 if (!mr_conv->IsCurrentParamOnStack()) { 538 // regular non-straddling move 539 __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); 540 } else { 541 UNIMPLEMENTED(FATAL); // we currently don't expect to see this case 542 } 543 } 544 } else if (!input_in_reg && !output_in_reg) { 545 FrameOffset out_off = jni_conv->CurrentParamStackOffset(); 546 if (ref_param) { 547 __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), 548 null_allowed); 549 } else { 550 FrameOffset in_off = mr_conv->CurrentParamStackOffset(); 551 size_t param_size = mr_conv->CurrentParamSize(); 552 CHECK_EQ(param_size, jni_conv->CurrentParamSize()); 553 __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); 554 } 555 } else if (!input_in_reg && output_in_reg) { 556 FrameOffset in_off = mr_conv->CurrentParamStackOffset(); 557 ManagedRegister out_reg = jni_conv->CurrentParamRegister(); 558 // Check that incoming stack arguments are above the current stack frame. 559 CHECK_GT(in_off.Uint32Value(), frame_size); 560 if (ref_param) { 561 __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed); 562 } else { 563 size_t param_size = mr_conv->CurrentParamSize(); 564 CHECK_EQ(param_size, jni_conv->CurrentParamSize()); 565 __ Load(out_reg, in_off, param_size); 566 } 567 } else { 568 CHECK(input_in_reg && !output_in_reg); 569 ManagedRegister in_reg = mr_conv->CurrentParamRegister(); 570 FrameOffset out_off = jni_conv->CurrentParamStackOffset(); 571 // Check outgoing argument is within frame 572 CHECK_LT(out_off.Uint32Value(), frame_size); 573 if (ref_param) { 574 // TODO: recycle value in in_reg rather than reload from handle scope 575 __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), 576 null_allowed); 577 } else { 578 size_t param_size = mr_conv->CurrentParamSize(); 579 CHECK_EQ(param_size, jni_conv->CurrentParamSize()); 580 if (!mr_conv->IsCurrentParamOnStack()) { 581 // regular non-straddling store 582 __ Store(out_off, in_reg, param_size); 583 } else { 584 // store where input straddles registers and stack 585 CHECK_EQ(param_size, 8u); 586 FrameOffset in_off = mr_conv->CurrentParamStackOffset(); 587 __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); 588 } 589 } 590 } 591 } 592 593 static void SetNativeParameter(Assembler* jni_asm, 594 JniCallingConvention* jni_conv, 595 ManagedRegister in_reg) { 596 if (jni_conv->IsCurrentParamOnStack()) { 597 FrameOffset dest = jni_conv->CurrentParamStackOffset(); 598 __ StoreRawPtr(dest, in_reg); 599 } else { 600 if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { 601 __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); 602 } 603 } 604 } 605 606 CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, 607 uint32_t method_idx, const DexFile& dex_file) { 608 return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file); 609 } 610 611 } // namespace art 612