1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "art_method-inl.h" 18 #include "base/callee_save_type.h" 19 #include "base/enums.h" 20 #include "callee_save_frame.h" 21 #include "common_throws.h" 22 #include "debug_print.h" 23 #include "debugger.h" 24 #include "dex/dex_file-inl.h" 25 #include "dex/dex_file_types.h" 26 #include "dex/dex_instruction-inl.h" 27 #include "dex/method_reference.h" 28 #include "entrypoints/entrypoint_utils-inl.h" 29 #include "entrypoints/runtime_asm_entrypoints.h" 30 #include "gc/accounting/card_table-inl.h" 31 #include "imt_conflict_table.h" 32 #include "imtable-inl.h" 33 #include "index_bss_mapping.h" 34 #include "instrumentation.h" 35 #include "interpreter/interpreter.h" 36 #include "jit/jit.h" 37 #include "linear_alloc.h" 38 #include "method_handles.h" 39 #include "mirror/class-inl.h" 40 #include "mirror/dex_cache-inl.h" 41 #include "mirror/method.h" 42 #include "mirror/method_handle_impl.h" 43 #include "mirror/object-inl.h" 44 #include "mirror/object_array-inl.h" 45 #include "oat_file.h" 46 #include "oat_quick_method_header.h" 47 #include "quick_exception_handler.h" 48 #include "runtime.h" 49 #include "scoped_thread_state_change-inl.h" 50 #include "stack.h" 51 #include "thread-inl.h" 52 #include "well_known_classes.h" 53 54 namespace art { 55 56 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame. 57 class QuickArgumentVisitor { 58 // Number of bytes for each out register in the caller method's frame. 59 static constexpr size_t kBytesStackArgLocation = 4; 60 // Frame size in bytes of a callee-save frame for RefsAndArgs. 61 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 62 GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs); 63 #if defined(__arm__) 64 // The callee save frame is pointed to by SP. 65 // | argN | | 66 // | ... | | 67 // | arg4 | | 68 // | arg3 spill | | Caller's frame 69 // | arg2 spill | | 70 // | arg1 spill | | 71 // | Method* | --- 72 // | LR | 73 // | ... | 4x6 bytes callee saves 74 // | R3 | 75 // | R2 | 76 // | R1 | 77 // | S15 | 78 // | : | 79 // | S0 | 80 // | | 4x2 bytes padding 81 // | Method* | <- sp 82 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 83 static constexpr bool kAlignPairRegister = true; 84 static constexpr bool kQuickSoftFloatAbi = false; 85 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true; 86 static constexpr bool kQuickSkipOddFpRegisters = false; 87 static constexpr size_t kNumQuickGprArgs = 3; 88 static constexpr size_t kNumQuickFprArgs = 16; 89 static constexpr bool kGprFprLockstep = false; 90 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 91 arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg. 92 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 93 arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg. 94 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 95 arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address. 96 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 97 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 98 } 99 #elif defined(__aarch64__) 100 // The callee save frame is pointed to by SP. 101 // | argN | | 102 // | ... | | 103 // | arg4 | | 104 // | arg3 spill | | Caller's frame 105 // | arg2 spill | | 106 // | arg1 spill | | 107 // | Method* | --- 108 // | LR | 109 // | X29 | 110 // | : | 111 // | X20 | 112 // | X7 | 113 // | : | 114 // | X1 | 115 // | D7 | 116 // | : | 117 // | D0 | 118 // | | padding 119 // | Method* | <- sp 120 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 121 static constexpr bool kAlignPairRegister = false; 122 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 123 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 124 static constexpr bool kQuickSkipOddFpRegisters = false; 125 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 126 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 127 static constexpr bool kGprFprLockstep = false; 128 // Offset of first FPR arg. 129 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 130 arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 131 // Offset of first GPR arg. 132 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 133 arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 134 // Offset of return address. 135 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 136 arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); 137 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 138 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 139 } 140 #elif defined(__mips__) && !defined(__LP64__) 141 // The callee save frame is pointed to by SP. 142 // | argN | | 143 // | ... | | 144 // | arg4 | | 145 // | arg3 spill | | Caller's frame 146 // | arg2 spill | | 147 // | arg1 spill | | 148 // | Method* | --- 149 // | RA | 150 // | ... | callee saves 151 // | T1 | arg5 152 // | T0 | arg4 153 // | A3 | arg3 154 // | A2 | arg2 155 // | A1 | arg1 156 // | F19 | 157 // | F18 | f_arg5 158 // | F17 | 159 // | F16 | f_arg4 160 // | F15 | 161 // | F14 | f_arg3 162 // | F13 | 163 // | F12 | f_arg2 164 // | F11 | 165 // | F10 | f_arg1 166 // | F9 | 167 // | F8 | f_arg0 168 // | | padding 169 // | A0/Method* | <- sp 170 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 171 static constexpr bool kAlignPairRegister = true; 172 static constexpr bool kQuickSoftFloatAbi = false; 173 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 174 static constexpr bool kQuickSkipOddFpRegisters = true; 175 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 176 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 177 // passed only in even numbered registers and each 178 // double occupies two registers. 179 static constexpr bool kGprFprLockstep = false; 180 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg. 181 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg. 182 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address. 183 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 184 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 185 } 186 #elif defined(__mips__) && defined(__LP64__) 187 // The callee save frame is pointed to by SP. 188 // | argN | | 189 // | ... | | 190 // | arg4 | | 191 // | arg3 spill | | Caller's frame 192 // | arg2 spill | | 193 // | arg1 spill | | 194 // | Method* | --- 195 // | RA | 196 // | ... | callee saves 197 // | A7 | arg7 198 // | A6 | arg6 199 // | A5 | arg5 200 // | A4 | arg4 201 // | A3 | arg3 202 // | A2 | arg2 203 // | A1 | arg1 204 // | F19 | f_arg7 205 // | F18 | f_arg6 206 // | F17 | f_arg5 207 // | F16 | f_arg4 208 // | F15 | f_arg3 209 // | F14 | f_arg2 210 // | F13 | f_arg1 211 // | F12 | f_arg0 212 // | | padding 213 // | A0/Method* | <- sp 214 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 215 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 216 static constexpr bool kAlignPairRegister = false; 217 static constexpr bool kQuickSoftFloatAbi = false; 218 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 219 static constexpr bool kQuickSkipOddFpRegisters = false; 220 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 221 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 222 static constexpr bool kGprFprLockstep = true; 223 224 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13). 225 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 226 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 227 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 228 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 229 } 230 #elif defined(__i386__) 231 // The callee save frame is pointed to by SP. 232 // | argN | | 233 // | ... | | 234 // | arg4 | | 235 // | arg3 spill | | Caller's frame 236 // | arg2 spill | | 237 // | arg1 spill | | 238 // | Method* | --- 239 // | Return | 240 // | EBP,ESI,EDI | callee saves 241 // | EBX | arg3 242 // | EDX | arg2 243 // | ECX | arg1 244 // | XMM3 | float arg 4 245 // | XMM2 | float arg 3 246 // | XMM1 | float arg 2 247 // | XMM0 | float arg 1 248 // | EAX/Method* | <- sp 249 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 250 static constexpr bool kAlignPairRegister = false; 251 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 252 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 253 static constexpr bool kQuickSkipOddFpRegisters = false; 254 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 255 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 256 static constexpr bool kGprFprLockstep = false; 257 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 258 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 259 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 260 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 261 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 262 } 263 #elif defined(__x86_64__) 264 // The callee save frame is pointed to by SP. 265 // | argN | | 266 // | ... | | 267 // | reg. arg spills | | Caller's frame 268 // | Method* | --- 269 // | Return | 270 // | R15 | callee save 271 // | R14 | callee save 272 // | R13 | callee save 273 // | R12 | callee save 274 // | R9 | arg5 275 // | R8 | arg4 276 // | RSI/R6 | arg1 277 // | RBP/R5 | callee save 278 // | RBX/R3 | callee save 279 // | RDX/R2 | arg2 280 // | RCX/R1 | arg3 281 // | XMM7 | float arg 8 282 // | XMM6 | float arg 7 283 // | XMM5 | float arg 6 284 // | XMM4 | float arg 5 285 // | XMM3 | float arg 4 286 // | XMM2 | float arg 3 287 // | XMM1 | float arg 2 288 // | XMM0 | float arg 1 289 // | Padding | 290 // | RDI/Method* | <- sp 291 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 292 static constexpr bool kAlignPairRegister = false; 293 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 294 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 295 static constexpr bool kQuickSkipOddFpRegisters = false; 296 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 297 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 298 static constexpr bool kGprFprLockstep = false; 299 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 300 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 301 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 302 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 303 switch (gpr_index) { 304 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 305 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 306 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 307 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 308 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 309 default: 310 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 311 return 0; 312 } 313 } 314 #else 315 #error "Unsupported architecture" 316 #endif 317 318 public: 319 // Special handling for proxy methods. Proxy methods are instance methods so the 320 // 'this' object is the 1st argument. They also have the same frame layout as the 321 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 322 // 1st GPR. 323 static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp) 324 REQUIRES_SHARED(Locks::mutator_lock_) { 325 CHECK((*sp)->IsProxyMethod()); 326 CHECK_GT(kNumQuickGprArgs, 0u); 327 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 328 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 329 GprIndexToGprOffset(kThisGprIndex); 330 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 331 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address); 332 } 333 334 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 335 DCHECK((*sp)->IsCalleeSaveMethod()); 336 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs); 337 } 338 339 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 340 DCHECK((*sp)->IsCalleeSaveMethod()); 341 uint8_t* previous_sp = 342 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 343 return *reinterpret_cast<ArtMethod**>(previous_sp); 344 } 345 346 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 347 DCHECK((*sp)->IsCalleeSaveMethod()); 348 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 349 CalleeSaveType::kSaveRefsAndArgs); 350 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 351 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 352 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 353 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 354 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 355 356 if (current_code->IsOptimized()) { 357 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 358 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 359 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 360 DCHECK(stack_map.IsValid()); 361 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 362 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 363 return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 364 inline_info.GetDepth(encoding.inline_info.encoding)-1); 365 } else { 366 return stack_map.GetDexPc(encoding.stack_map.encoding); 367 } 368 } else { 369 return current_code->ToDexPc(*caller_sp, outer_pc); 370 } 371 } 372 373 static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index) 374 REQUIRES_SHARED(Locks::mutator_lock_) { 375 DCHECK((*sp)->IsCalleeSaveMethod()); 376 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 377 CalleeSaveType::kSaveRefsAndArgs); 378 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 379 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 380 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 381 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 382 if (!current_code->IsOptimized()) { 383 return false; 384 } 385 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 386 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 387 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 388 MethodInfo method_info = current_code->GetOptimizedMethodInfo(); 389 InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding)); 390 if (invoke.IsValid()) { 391 *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding)); 392 *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info); 393 return true; 394 } 395 return false; 396 } 397 398 // For the given quick ref and args quick frame, return the caller's PC. 399 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 400 DCHECK((*sp)->IsCalleeSaveMethod()); 401 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 402 return *reinterpret_cast<uintptr_t*>(lr); 403 } 404 405 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 406 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 407 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 408 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 409 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 410 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 411 + sizeof(ArtMethod*)), // Skip ArtMethod*. 412 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 413 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 414 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 415 "Number of Quick FPR arguments unexpected"); 416 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 417 "Double alignment unexpected"); 418 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 419 // next register is even. 420 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 421 "Number of Quick FPR arguments not even"); 422 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 423 } 424 425 virtual ~QuickArgumentVisitor() {} 426 427 virtual void Visit() = 0; 428 429 Primitive::Type GetParamPrimitiveType() const { 430 return cur_type_; 431 } 432 433 uint8_t* GetParamAddress() const { 434 if (!kQuickSoftFloatAbi) { 435 Primitive::Type type = GetParamPrimitiveType(); 436 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 437 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 438 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 439 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 440 } 441 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 442 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 443 } 444 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 445 } 446 } 447 if (gpr_index_ < kNumQuickGprArgs) { 448 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 449 } 450 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 451 } 452 453 bool IsSplitLongOrDouble() const { 454 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 455 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 456 return is_split_long_or_double_; 457 } else { 458 return false; // An optimization for when GPR and FPRs are 64bit. 459 } 460 } 461 462 bool IsParamAReference() const { 463 return GetParamPrimitiveType() == Primitive::kPrimNot; 464 } 465 466 bool IsParamALongOrDouble() const { 467 Primitive::Type type = GetParamPrimitiveType(); 468 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 469 } 470 471 uint64_t ReadSplitLongParam() const { 472 // The splitted long is always available through the stack. 473 return *reinterpret_cast<uint64_t*>(stack_args_ 474 + stack_index_ * kBytesStackArgLocation); 475 } 476 477 void IncGprIndex() { 478 gpr_index_++; 479 if (kGprFprLockstep) { 480 fpr_index_++; 481 } 482 } 483 484 void IncFprIndex() { 485 fpr_index_++; 486 if (kGprFprLockstep) { 487 gpr_index_++; 488 } 489 } 490 491 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 492 // (a) 'stack_args_' should point to the first method's argument 493 // (b) whatever the argument type it is, the 'stack_index_' should 494 // be moved forward along with every visiting. 495 gpr_index_ = 0; 496 fpr_index_ = 0; 497 if (kQuickDoubleRegAlignedFloatBackFilled) { 498 fpr_double_index_ = 0; 499 } 500 stack_index_ = 0; 501 if (!is_static_) { // Handle this. 502 cur_type_ = Primitive::kPrimNot; 503 is_split_long_or_double_ = false; 504 Visit(); 505 stack_index_++; 506 if (kNumQuickGprArgs > 0) { 507 IncGprIndex(); 508 } 509 } 510 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 511 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 512 switch (cur_type_) { 513 case Primitive::kPrimNot: 514 case Primitive::kPrimBoolean: 515 case Primitive::kPrimByte: 516 case Primitive::kPrimChar: 517 case Primitive::kPrimShort: 518 case Primitive::kPrimInt: 519 is_split_long_or_double_ = false; 520 Visit(); 521 stack_index_++; 522 if (gpr_index_ < kNumQuickGprArgs) { 523 IncGprIndex(); 524 } 525 break; 526 case Primitive::kPrimFloat: 527 is_split_long_or_double_ = false; 528 Visit(); 529 stack_index_++; 530 if (kQuickSoftFloatAbi) { 531 if (gpr_index_ < kNumQuickGprArgs) { 532 IncGprIndex(); 533 } 534 } else { 535 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 536 IncFprIndex(); 537 if (kQuickDoubleRegAlignedFloatBackFilled) { 538 // Double should not overlap with float. 539 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 540 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 541 // Float should not overlap with double. 542 if (fpr_index_ % 2 == 0) { 543 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 544 } 545 } else if (kQuickSkipOddFpRegisters) { 546 IncFprIndex(); 547 } 548 } 549 } 550 break; 551 case Primitive::kPrimDouble: 552 case Primitive::kPrimLong: 553 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 554 if (cur_type_ == Primitive::kPrimLong && 555 #if defined(__mips__) && !defined(__LP64__) 556 (gpr_index_ == 0 || gpr_index_ == 2) && 557 #else 558 gpr_index_ == 0 && 559 #endif 560 kAlignPairRegister) { 561 // Currently, this is only for ARM and MIPS, where we align long parameters with 562 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 563 // R2 (on ARM) or A2(T0) (on MIPS) instead. 564 IncGprIndex(); 565 } 566 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 567 ((gpr_index_ + 1) == kNumQuickGprArgs); 568 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 569 // We don't want to split this. Pass over this register. 570 gpr_index_++; 571 is_split_long_or_double_ = false; 572 } 573 Visit(); 574 if (kBytesStackArgLocation == 4) { 575 stack_index_+= 2; 576 } else { 577 CHECK_EQ(kBytesStackArgLocation, 8U); 578 stack_index_++; 579 } 580 if (gpr_index_ < kNumQuickGprArgs) { 581 IncGprIndex(); 582 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 583 if (gpr_index_ < kNumQuickGprArgs) { 584 IncGprIndex(); 585 } 586 } 587 } 588 } else { 589 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 590 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 591 Visit(); 592 if (kBytesStackArgLocation == 4) { 593 stack_index_+= 2; 594 } else { 595 CHECK_EQ(kBytesStackArgLocation, 8U); 596 stack_index_++; 597 } 598 if (kQuickDoubleRegAlignedFloatBackFilled) { 599 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 600 fpr_double_index_ += 2; 601 // Float should not overlap with double. 602 if (fpr_index_ % 2 == 0) { 603 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 604 } 605 } 606 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 607 IncFprIndex(); 608 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 609 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 610 IncFprIndex(); 611 } 612 } 613 } 614 } 615 break; 616 default: 617 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 618 } 619 } 620 } 621 622 protected: 623 const bool is_static_; 624 const char* const shorty_; 625 const uint32_t shorty_len_; 626 627 private: 628 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 629 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 630 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 631 uint32_t gpr_index_; // Index into spilled GPRs. 632 // Index into spilled FPRs. 633 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 634 // holds a higher register number. 635 uint32_t fpr_index_; 636 // Index into spilled FPRs for aligned double. 637 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 638 // terms of singles, may be behind fpr_index. 639 uint32_t fpr_double_index_; 640 uint32_t stack_index_; // Index into arguments on the stack. 641 // The current type of argument during VisitArguments. 642 Primitive::Type cur_type_; 643 // Does a 64bit parameter straddle the register and stack arguments? 644 bool is_split_long_or_double_; 645 }; 646 647 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 648 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 649 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 650 REQUIRES_SHARED(Locks::mutator_lock_) { 651 return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr(); 652 } 653 654 // Visits arguments on the stack placing them into the shadow frame. 655 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 656 public: 657 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 658 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 659 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 660 661 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 662 663 private: 664 ShadowFrame* const sf_; 665 uint32_t cur_reg_; 666 667 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 668 }; 669 670 void BuildQuickShadowFrameVisitor::Visit() { 671 Primitive::Type type = GetParamPrimitiveType(); 672 switch (type) { 673 case Primitive::kPrimLong: // Fall-through. 674 case Primitive::kPrimDouble: 675 if (IsSplitLongOrDouble()) { 676 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 677 } else { 678 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 679 } 680 ++cur_reg_; 681 break; 682 case Primitive::kPrimNot: { 683 StackReference<mirror::Object>* stack_ref = 684 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 685 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 686 } 687 break; 688 case Primitive::kPrimBoolean: // Fall-through. 689 case Primitive::kPrimByte: // Fall-through. 690 case Primitive::kPrimChar: // Fall-through. 691 case Primitive::kPrimShort: // Fall-through. 692 case Primitive::kPrimInt: // Fall-through. 693 case Primitive::kPrimFloat: 694 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 695 break; 696 case Primitive::kPrimVoid: 697 LOG(FATAL) << "UNREACHABLE"; 698 UNREACHABLE(); 699 } 700 ++cur_reg_; 701 } 702 703 // Don't inline. See b/65159206. 704 NO_INLINE 705 static void HandleDeoptimization(JValue* result, 706 ArtMethod* method, 707 ShadowFrame* deopt_frame, 708 ManagedStack* fragment) 709 REQUIRES_SHARED(Locks::mutator_lock_) { 710 // Coming from partial-fragment deopt. 711 Thread* self = Thread::Current(); 712 if (kIsDebugBuild) { 713 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 714 // of the call-stack) corresponds to the called method. 715 ShadowFrame* linked = deopt_frame; 716 while (linked->GetLink() != nullptr) { 717 linked = linked->GetLink(); 718 } 719 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 720 << ArtMethod::PrettyMethod(linked->GetMethod()); 721 } 722 723 if (VLOG_IS_ON(deopt)) { 724 // Print out the stack to verify that it was a partial-fragment deopt. 725 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 726 QuickExceptionHandler::DumpFramesWithType(self, true); 727 } 728 729 ObjPtr<mirror::Throwable> pending_exception; 730 bool from_code = false; 731 DeoptimizationMethodType method_type; 732 self->PopDeoptimizationContext(/* out */ result, 733 /* out */ &pending_exception, 734 /* out */ &from_code, 735 /* out */ &method_type); 736 737 // Push a transition back into managed code onto the linked list in thread. 738 self->PushManagedStackFragment(fragment); 739 740 // Ensure that the stack is still in order. 741 if (kIsDebugBuild) { 742 class DummyStackVisitor : public StackVisitor { 743 public: 744 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 745 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 746 747 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 748 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 749 // logic. Just always say we want to continue. 750 return true; 751 } 752 }; 753 DummyStackVisitor dsv(self); 754 dsv.WalkStack(); 755 } 756 757 // Restore the exception that was pending before deoptimization then interpret the 758 // deoptimized frames. 759 if (pending_exception != nullptr) { 760 self->SetException(pending_exception); 761 } 762 interpreter::EnterInterpreterFromDeoptimize(self, 763 deopt_frame, 764 result, 765 from_code, 766 DeoptimizationMethodType::kDefault); 767 } 768 769 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 770 REQUIRES_SHARED(Locks::mutator_lock_) { 771 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 772 // frame. 773 ScopedQuickEntrypointChecks sqec(self); 774 775 if (UNLIKELY(!method->IsInvokable())) { 776 method->ThrowInvocationTimeError(); 777 return 0; 778 } 779 780 JValue tmp_value; 781 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 782 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 783 ManagedStack fragment; 784 785 DCHECK(!method->IsNative()) << method->PrettyMethod(); 786 uint32_t shorty_len = 0; 787 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 788 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod(); 789 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData()); 790 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 791 792 JValue result; 793 794 if (UNLIKELY(deopt_frame != nullptr)) { 795 HandleDeoptimization(&result, method, deopt_frame, &fragment); 796 } else { 797 const char* old_cause = self->StartAssertNoThreadSuspension( 798 "Building interpreter shadow frame"); 799 uint16_t num_regs = accessor.RegistersSize(); 800 // No last shadow coming from quick. 801 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 802 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 803 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 804 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize(); 805 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 806 shadow_frame, first_arg_reg); 807 shadow_frame_builder.VisitArguments(); 808 const bool needs_initialization = 809 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 810 // Push a transition back into managed code onto the linked list in thread. 811 self->PushManagedStackFragment(&fragment); 812 self->PushShadowFrame(shadow_frame); 813 self->EndAssertNoThreadSuspension(old_cause); 814 815 if (needs_initialization) { 816 // Ensure static method's class is initialized. 817 StackHandleScope<1> hs(self); 818 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 819 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 820 DCHECK(Thread::Current()->IsExceptionPending()) 821 << shadow_frame->GetMethod()->PrettyMethod(); 822 self->PopManagedStackFragment(fragment); 823 return 0; 824 } 825 } 826 827 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame); 828 } 829 830 // Pop transition. 831 self->PopManagedStackFragment(fragment); 832 833 // Request a stack deoptimization if needed 834 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 835 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 836 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 837 // should be done and it knows the real return pc. 838 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 839 Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 840 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 841 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 842 << caller->PrettyMethod(); 843 } else { 844 // Push the context of the deoptimization stack so we can restore the return value and the 845 // exception before executing the deoptimized frames. 846 self->PushDeoptimizationContext( 847 result, 848 shorty[0] == 'L' || shorty[0] == '[', /* class or array */ 849 self->GetException(), 850 false /* from_code */, 851 DeoptimizationMethodType::kDefault); 852 853 // Set special exception to cause deoptimization. 854 self->SetException(Thread::GetDeoptimizationException()); 855 } 856 } 857 858 // No need to restore the args since the method has already been run by the interpreter. 859 return result.GetJ(); 860 } 861 862 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted 863 // to jobjects. 864 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 865 public: 866 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 867 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 868 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 869 870 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 871 872 private: 873 ScopedObjectAccessUnchecked* const soa_; 874 std::vector<jvalue>* const args_; 875 876 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 877 }; 878 879 void BuildQuickArgumentVisitor::Visit() { 880 jvalue val; 881 Primitive::Type type = GetParamPrimitiveType(); 882 switch (type) { 883 case Primitive::kPrimNot: { 884 StackReference<mirror::Object>* stack_ref = 885 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 886 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 887 break; 888 } 889 case Primitive::kPrimLong: // Fall-through. 890 case Primitive::kPrimDouble: 891 if (IsSplitLongOrDouble()) { 892 val.j = ReadSplitLongParam(); 893 } else { 894 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 895 } 896 break; 897 case Primitive::kPrimBoolean: // Fall-through. 898 case Primitive::kPrimByte: // Fall-through. 899 case Primitive::kPrimChar: // Fall-through. 900 case Primitive::kPrimShort: // Fall-through. 901 case Primitive::kPrimInt: // Fall-through. 902 case Primitive::kPrimFloat: 903 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 904 break; 905 case Primitive::kPrimVoid: 906 LOG(FATAL) << "UNREACHABLE"; 907 UNREACHABLE(); 908 } 909 args_->push_back(val); 910 } 911 912 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 913 // which is responsible for recording callee save registers. We explicitly place into jobjects the 914 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 915 // field within the proxy object, which will box the primitive arguments and deal with error cases. 916 extern "C" uint64_t artQuickProxyInvokeHandler( 917 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 918 REQUIRES_SHARED(Locks::mutator_lock_) { 919 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 920 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 921 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 922 const char* old_cause = 923 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 924 // Register the top of the managed stack, making stack crawlable. 925 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 926 self->VerifyStack(); 927 // Start new JNI local reference state. 928 JNIEnvExt* env = self->GetJniEnv(); 929 ScopedObjectAccessUnchecked soa(env); 930 ScopedJniEnvLocalRefState env_state(env); 931 // Create local ref. copies of proxy method and the receiver. 932 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 933 934 // Placing arguments into args vector and remove the receiver. 935 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 936 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 937 << non_proxy_method->PrettyMethod(); 938 std::vector<jvalue> args; 939 uint32_t shorty_len = 0; 940 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 941 BuildQuickArgumentVisitor local_ref_visitor( 942 sp, /* is_static */ false, shorty, shorty_len, &soa, &args); 943 944 local_ref_visitor.VisitArguments(); 945 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 946 args.erase(args.begin()); 947 948 // Convert proxy method into expected interface method. 949 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 950 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 951 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 952 self->EndAssertNoThreadSuspension(old_cause); 953 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 954 DCHECK(!Runtime::Current()->IsActiveTransaction()); 955 ObjPtr<mirror::Method> interface_reflect_method = 956 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), interface_method); 957 if (interface_reflect_method == nullptr) { 958 soa.Self()->AssertPendingOOMException(); 959 return 0; 960 } 961 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method); 962 963 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 964 // that performs allocations. 965 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 966 return result.GetJ(); 967 } 968 969 // Visitor returning a reference argument at a given position in a Quick stack frame. 970 // NOTE: Only used for testing purposes. 971 class GetQuickReferenceArgumentAtVisitor FINAL : public QuickArgumentVisitor { 972 public: 973 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp, 974 const char* shorty, 975 uint32_t shorty_len, 976 size_t arg_pos) 977 : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len), 978 cur_pos_(0u), 979 arg_pos_(arg_pos), 980 ref_arg_(nullptr) { 981 CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments"; 982 } 983 984 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE { 985 if (cur_pos_ == arg_pos_) { 986 Primitive::Type type = GetParamPrimitiveType(); 987 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference"; 988 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 989 } 990 ++cur_pos_; 991 } 992 993 StackReference<mirror::Object>* GetReferenceArgument() { 994 return ref_arg_; 995 } 996 997 private: 998 // The position of the currently visited argument. 999 size_t cur_pos_; 1000 // The position of the searched argument. 1001 const size_t arg_pos_; 1002 // The reference argument, if found. 1003 StackReference<mirror::Object>* ref_arg_; 1004 1005 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor); 1006 }; 1007 1008 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`. 1009 // NOTE: Only used for testing purposes. 1010 extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(size_t arg_pos, 1011 ArtMethod** sp) 1012 REQUIRES_SHARED(Locks::mutator_lock_) { 1013 ArtMethod* proxy_method = *sp; 1014 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1015 CHECK(!non_proxy_method->IsStatic()) 1016 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod(); 1017 uint32_t shorty_len = 0; 1018 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 1019 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, shorty_len, arg_pos); 1020 ref_arg_visitor.VisitArguments(); 1021 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument(); 1022 return ref_arg; 1023 } 1024 1025 // Visitor returning all the reference arguments in a Quick stack frame. 1026 class GetQuickReferenceArgumentsVisitor FINAL : public QuickArgumentVisitor { 1027 public: 1028 GetQuickReferenceArgumentsVisitor(ArtMethod** sp, 1029 bool is_static, 1030 const char* shorty, 1031 uint32_t shorty_len) 1032 : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {} 1033 1034 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE { 1035 Primitive::Type type = GetParamPrimitiveType(); 1036 if (type == Primitive::kPrimNot) { 1037 StackReference<mirror::Object>* ref_arg = 1038 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1039 ref_args_.push_back(ref_arg); 1040 } 1041 } 1042 1043 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() { 1044 return ref_args_; 1045 } 1046 1047 private: 1048 // The reference arguments. 1049 std::vector<StackReference<mirror::Object>*> ref_args_; 1050 1051 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor); 1052 }; 1053 1054 // Returning all reference arguments in Quick stack frame at address `sp`. 1055 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp) 1056 REQUIRES_SHARED(Locks::mutator_lock_) { 1057 ArtMethod* proxy_method = *sp; 1058 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1059 CHECK(!non_proxy_method->IsStatic()) 1060 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod(); 1061 uint32_t shorty_len = 0; 1062 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 1063 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len); 1064 ref_args_visitor.VisitArguments(); 1065 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments(); 1066 return ref_args; 1067 } 1068 1069 // Read object references held in arguments from quick frames and place in a JNI local references, 1070 // so they don't get garbage collected. 1071 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 1072 public: 1073 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 1074 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 1075 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 1076 1077 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 1078 1079 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 1080 1081 private: 1082 ScopedObjectAccessUnchecked* const soa_; 1083 // References which we must update when exiting in case the GC moved the objects. 1084 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 1085 1086 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 1087 }; 1088 1089 void RememberForGcArgumentVisitor::Visit() { 1090 if (IsParamAReference()) { 1091 StackReference<mirror::Object>* stack_ref = 1092 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1093 jobject reference = 1094 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 1095 references_.push_back(std::make_pair(reference, stack_ref)); 1096 } 1097 } 1098 1099 void RememberForGcArgumentVisitor::FixupReferences() { 1100 // Fixup any references which may have changed. 1101 for (const auto& pair : references_) { 1102 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 1103 soa_->Env()->DeleteLocalRef(pair.first); 1104 } 1105 } 1106 1107 extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, 1108 mirror::Object* this_object, 1109 Thread* self, 1110 ArtMethod** sp) 1111 REQUIRES_SHARED(Locks::mutator_lock_) { 1112 const void* result; 1113 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip 1114 // that part. 1115 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1116 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1117 if (instrumentation->IsDeoptimized(method)) { 1118 result = GetQuickToInterpreterBridge(); 1119 } else { 1120 result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize); 1121 DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result)); 1122 } 1123 1124 bool interpreter_entry = (result == GetQuickToInterpreterBridge()); 1125 bool is_static = method->IsStatic(); 1126 uint32_t shorty_len; 1127 const char* shorty = 1128 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len); 1129 1130 ScopedObjectAccessUnchecked soa(self); 1131 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa); 1132 visitor.VisitArguments(); 1133 1134 instrumentation->PushInstrumentationStackFrame(self, 1135 is_static ? nullptr : this_object, 1136 method, 1137 QuickArgumentVisitor::GetCallingPc(sp), 1138 interpreter_entry); 1139 1140 visitor.FixupReferences(); 1141 if (UNLIKELY(self->IsExceptionPending())) { 1142 return nullptr; 1143 } 1144 CHECK(result != nullptr) << method->PrettyMethod(); 1145 return result; 1146 } 1147 1148 extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, 1149 ArtMethod** sp, 1150 uint64_t* gpr_result, 1151 uint64_t* fpr_result) 1152 REQUIRES_SHARED(Locks::mutator_lock_) { 1153 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); 1154 CHECK(gpr_result != nullptr); 1155 CHECK(fpr_result != nullptr); 1156 // Instrumentation exit stub must not be entered with a pending exception. 1157 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception " 1158 << self->GetException()->Dump(); 1159 // Compute address of return PC and sanity check that it currently holds 0. 1160 size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, 1161 CalleeSaveType::kSaveEverything); 1162 uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + 1163 return_pc_offset); 1164 CHECK_EQ(*return_pc, 0U); 1165 1166 // Pop the frame filling in the return pc. The low half of the return value is 0 when 1167 // deoptimization shouldn't be performed with the high-half having the return address. When 1168 // deoptimization should be performed the low half is zero and the high-half the address of the 1169 // deoptimization entry point. 1170 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1171 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame( 1172 self, return_pc, gpr_result, fpr_result); 1173 if (self->IsExceptionPending() || self->ObserveAsyncException()) { 1174 return GetTwoWordFailureValue(); 1175 } 1176 return return_or_deoptimize_pc; 1177 } 1178 1179 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc) 1180 REQUIRES_SHARED(Locks::mutator_lock_) { 1181 if (dex_pc == static_cast<uint32_t>(-1)) { 1182 CHECK(method == jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt)); 1183 return "<native>"; 1184 } else { 1185 CodeItemInstructionAccessor accessor = method->DexInstructions(); 1186 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits()); 1187 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile()); 1188 } 1189 } 1190 1191 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass) 1192 REQUIRES_SHARED(Locks::mutator_lock_) { 1193 std::string storage; 1194 const char* descriptor = klass->GetDescriptor(&storage); 1195 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor); 1196 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile(); 1197 if (oat_dex_file != nullptr) { 1198 const OatFile* oat_file = oat_dex_file->GetOatFile(); 1199 const char* dex2oat_cmdline = 1200 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey); 1201 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation() 1202 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>"); 1203 } 1204 } 1205 1206 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 1207 // Mimick the search for the caller and dump some data while doing so. 1208 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240."; 1209 1210 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs; 1211 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type)); 1212 1213 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type); 1214 auto** caller_sp = reinterpret_cast<ArtMethod**>( 1215 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 1216 const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type); 1217 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>( 1218 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset)); 1219 ArtMethod* outer_method = *caller_sp; 1220 1221 if (UNLIKELY(caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) { 1222 LOG(FATAL_WITHOUT_ABORT) << "Method: " << outer_method->PrettyMethod() 1223 << " native pc: " << caller_pc << " Instrumented!"; 1224 return; 1225 } 1226 1227 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc); 1228 CHECK(current_code != nullptr); 1229 CHECK(current_code->IsOptimized()); 1230 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc); 1231 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 1232 MethodInfo method_info = current_code->GetOptimizedMethodInfo(); 1233 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 1234 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 1235 CHECK(stack_map.IsValid()); 1236 uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding); 1237 1238 // Log the outer method and its associated dex file and class table pointer which can be used 1239 // to find out if the inlined methods were defined by other dex file(s) or class loader(s). 1240 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1241 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod() 1242 << " native pc: " << caller_pc 1243 << " dex pc: " << dex_pc 1244 << " dex file: " << outer_method->GetDexFile()->GetLocation() 1245 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader()); 1246 DumpB74410240ClassData(outer_method->GetDeclaringClass()); 1247 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc); 1248 1249 ArtMethod* caller = outer_method; 1250 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 1251 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 1252 const InlineInfoEncoding& inline_info_encoding = encoding.inline_info.encoding; 1253 size_t depth = inline_info.GetDepth(inline_info_encoding); 1254 for (size_t d = 0; d < depth; ++d) { 1255 const char* tag = ""; 1256 dex_pc = inline_info.GetDexPcAtDepth(inline_info_encoding, d); 1257 if (inline_info.EncodesArtMethodAtDepth(inline_info_encoding, d)) { 1258 tag = "encoded "; 1259 caller = inline_info.GetArtMethodAtDepth(inline_info_encoding, d); 1260 } else { 1261 uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info_encoding, 1262 method_info, 1263 d); 1264 if (dex_pc == static_cast<uint32_t>(-1)) { 1265 tag = "special "; 1266 CHECK_EQ(d + 1u, depth); 1267 caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt); 1268 CHECK_EQ(caller->GetDexMethodIndex(), method_index); 1269 } else { 1270 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache(); 1271 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader(); 1272 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader); 1273 CHECK(caller != nullptr); 1274 } 1275 } 1276 LOG(FATAL_WITHOUT_ABORT) << "Inlined method #" << d << ": " << tag << caller->PrettyMethod() 1277 << " dex pc: " << dex_pc 1278 << " dex file: " << caller->GetDexFile()->GetLocation() 1279 << " class table: " 1280 << class_linker->ClassTableForClassLoader(caller->GetClassLoader()); 1281 DumpB74410240ClassData(caller->GetDeclaringClass()); 1282 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc); 1283 } 1284 } 1285 } 1286 1287 // Lazily resolve a method for quick. Called by stub code. 1288 extern "C" const void* artQuickResolutionTrampoline( 1289 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 1290 REQUIRES_SHARED(Locks::mutator_lock_) { 1291 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 1292 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 1293 // does not have the same stack layout as the callee-save method). 1294 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1295 // Start new JNI local reference state 1296 JNIEnvExt* env = self->GetJniEnv(); 1297 ScopedObjectAccessUnchecked soa(env); 1298 ScopedJniEnvLocalRefState env_state(env); 1299 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 1300 1301 // Compute details about the called method (avoid GCs) 1302 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 1303 InvokeType invoke_type; 1304 MethodReference called_method(nullptr, 0); 1305 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 1306 ArtMethod* caller = nullptr; 1307 if (!called_method_known_on_entry) { 1308 caller = QuickArgumentVisitor::GetCallingMethod(sp); 1309 called_method.dex_file = caller->GetDexFile(); 1310 1311 InvokeType stack_map_invoke_type; 1312 uint32_t stack_map_dex_method_idx; 1313 const bool found_stack_map = QuickArgumentVisitor::GetInvokeType(sp, 1314 &stack_map_invoke_type, 1315 &stack_map_dex_method_idx); 1316 // For debug builds, we make sure both of the paths are consistent by also looking at the dex 1317 // code. 1318 if (!found_stack_map || kIsDebugBuild) { 1319 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 1320 CodeItemInstructionAccessor accessor(caller->DexInstructions()); 1321 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits()); 1322 const Instruction& instr = accessor.InstructionAt(dex_pc); 1323 Instruction::Code instr_code = instr.Opcode(); 1324 bool is_range; 1325 switch (instr_code) { 1326 case Instruction::INVOKE_DIRECT: 1327 invoke_type = kDirect; 1328 is_range = false; 1329 break; 1330 case Instruction::INVOKE_DIRECT_RANGE: 1331 invoke_type = kDirect; 1332 is_range = true; 1333 break; 1334 case Instruction::INVOKE_STATIC: 1335 invoke_type = kStatic; 1336 is_range = false; 1337 break; 1338 case Instruction::INVOKE_STATIC_RANGE: 1339 invoke_type = kStatic; 1340 is_range = true; 1341 break; 1342 case Instruction::INVOKE_SUPER: 1343 invoke_type = kSuper; 1344 is_range = false; 1345 break; 1346 case Instruction::INVOKE_SUPER_RANGE: 1347 invoke_type = kSuper; 1348 is_range = true; 1349 break; 1350 case Instruction::INVOKE_VIRTUAL: 1351 invoke_type = kVirtual; 1352 is_range = false; 1353 break; 1354 case Instruction::INVOKE_VIRTUAL_RANGE: 1355 invoke_type = kVirtual; 1356 is_range = true; 1357 break; 1358 case Instruction::INVOKE_INTERFACE: 1359 invoke_type = kInterface; 1360 is_range = false; 1361 break; 1362 case Instruction::INVOKE_INTERFACE_RANGE: 1363 invoke_type = kInterface; 1364 is_range = true; 1365 break; 1366 default: 1367 DumpB74410240DebugData(sp); 1368 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr); 1369 UNREACHABLE(); 1370 } 1371 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c(); 1372 // Check that the invoke matches what we expected, note that this path only happens for debug 1373 // builds. 1374 if (found_stack_map) { 1375 DCHECK_EQ(stack_map_invoke_type, invoke_type); 1376 if (invoke_type != kSuper) { 1377 // Super may be sharpened. 1378 DCHECK_EQ(stack_map_dex_method_idx, called_method.index) 1379 << called_method.dex_file->PrettyMethod(stack_map_dex_method_idx) << " " 1380 << called_method.PrettyMethod(); 1381 } 1382 } else { 1383 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " " 1384 << called_method.index; 1385 } 1386 } else { 1387 invoke_type = stack_map_invoke_type; 1388 called_method.index = stack_map_dex_method_idx; 1389 } 1390 } else { 1391 invoke_type = kStatic; 1392 called_method.dex_file = called->GetDexFile(); 1393 called_method.index = called->GetDexMethodIndex(); 1394 } 1395 uint32_t shorty_len; 1396 const char* shorty = 1397 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len); 1398 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1399 visitor.VisitArguments(); 1400 self->EndAssertNoThreadSuspension(old_cause); 1401 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1402 // Resolve method filling in dex cache. 1403 if (!called_method_known_on_entry) { 1404 StackHandleScope<1> hs(self); 1405 mirror::Object* dummy = nullptr; 1406 HandleWrapper<mirror::Object> h_receiver( 1407 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1408 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1409 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 1410 self, called_method.index, caller, invoke_type); 1411 1412 // Update .bss entry in oat file if any. 1413 if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) { 1414 size_t bss_offset = IndexBssMappingLookup::GetBssOffset( 1415 called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(), 1416 called_method.index, 1417 called_method.dex_file->NumMethodIds(), 1418 static_cast<size_t>(kRuntimePointerSize)); 1419 if (bss_offset != IndexBssMappingLookup::npos) { 1420 DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize)); 1421 const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile(); 1422 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>( 1423 oat_file->BssBegin() + bss_offset)); 1424 DCHECK_GE(method_entry, oat_file->GetBssMethods().data()); 1425 DCHECK_LT(method_entry, 1426 oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size()); 1427 *method_entry = called; 1428 } 1429 } 1430 } 1431 const void* code = nullptr; 1432 if (LIKELY(!self->IsExceptionPending())) { 1433 // Incompatible class change should have been handled in resolve method. 1434 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1435 << called->PrettyMethod() << " " << invoke_type; 1436 if (virtual_or_interface || invoke_type == kSuper) { 1437 // Refine called method based on receiver for kVirtual/kInterface, and 1438 // caller for kSuper. 1439 ArtMethod* orig_called = called; 1440 if (invoke_type == kVirtual) { 1441 CHECK(receiver != nullptr) << invoke_type; 1442 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1443 } else if (invoke_type == kInterface) { 1444 CHECK(receiver != nullptr) << invoke_type; 1445 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1446 } else { 1447 DCHECK_EQ(invoke_type, kSuper); 1448 CHECK(caller != nullptr) << invoke_type; 1449 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType( 1450 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller); 1451 if (ref_class->IsInterface()) { 1452 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1453 } else { 1454 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1455 called->GetMethodIndex(), kRuntimePointerSize); 1456 } 1457 } 1458 1459 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1460 << mirror::Object::PrettyTypeOf(receiver) << " " 1461 << invoke_type << " " << orig_called->GetVtableIndex(); 1462 } 1463 1464 // Ensure that the called method's class is initialized. 1465 StackHandleScope<1> hs(soa.Self()); 1466 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1467 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1468 if (LIKELY(called_class->IsInitialized())) { 1469 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1470 // If we are single-stepping or the called method is deoptimized (by a 1471 // breakpoint, for example), then we have to execute the called method 1472 // with the interpreter. 1473 code = GetQuickToInterpreterBridge(); 1474 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1475 // If the caller is deoptimized (by a breakpoint, for example), we have to 1476 // continue its execution with interpreter when returning from the called 1477 // method. Because we do not want to execute the called method with the 1478 // interpreter, we wrap its execution into the instrumentation stubs. 1479 // When the called method returns, it will execute the instrumentation 1480 // exit hook that will determine the need of the interpreter with a call 1481 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1482 // it is needed. 1483 code = GetQuickInstrumentationEntryPoint(); 1484 } else { 1485 code = called->GetEntryPointFromQuickCompiledCode(); 1486 } 1487 } else if (called_class->IsInitializing()) { 1488 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1489 // If we are single-stepping or the called method is deoptimized (by a 1490 // breakpoint, for example), then we have to execute the called method 1491 // with the interpreter. 1492 code = GetQuickToInterpreterBridge(); 1493 } else if (invoke_type == kStatic) { 1494 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1495 // until class is initialized to stop races between threads). 1496 code = linker->GetQuickOatCodeFor(called); 1497 } else { 1498 // No trampoline for non-static methods. 1499 code = called->GetEntryPointFromQuickCompiledCode(); 1500 } 1501 } else { 1502 DCHECK(called_class->IsErroneous()); 1503 } 1504 } 1505 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1506 // Fixup any locally saved objects may have moved during a GC. 1507 visitor.FixupReferences(); 1508 // Place called method in callee-save frame to be placed as first argument to quick method. 1509 *sp = called; 1510 1511 return code; 1512 } 1513 1514 /* 1515 * This class uses a couple of observations to unite the different calling conventions through 1516 * a few constants. 1517 * 1518 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1519 * possible alignment. 1520 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1521 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1522 * when we have to split things 1523 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1524 * and we can use Int handling directly. 1525 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1526 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1527 * extension should be compatible with Aarch64, which mandates copying the available bits 1528 * into LSB and leaving the rest unspecified. 1529 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1530 * the stack. 1531 * 6) There is only little endian. 1532 * 1533 * 1534 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1535 * follows: 1536 * 1537 * void PushGpr(uintptr_t): Add a value for the next GPR 1538 * 1539 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1540 * padding, that is, think the architecture is 32b and aligns 64b. 1541 * 1542 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1543 * split this if necessary. The current state will have aligned, if 1544 * necessary. 1545 * 1546 * void PushStack(uintptr_t): Push a value to the stack. 1547 * 1548 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1549 * as this might be important for null initialization. 1550 * Must return the jobject, that is, the reference to the 1551 * entry in the HandleScope (nullptr if necessary). 1552 * 1553 */ 1554 template<class T> class BuildNativeCallFrameStateMachine { 1555 public: 1556 #if defined(__arm__) 1557 // TODO: These are all dummy values! 1558 static constexpr bool kNativeSoftFloatAbi = true; 1559 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1560 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1561 1562 static constexpr size_t kRegistersNeededForLong = 2; 1563 static constexpr size_t kRegistersNeededForDouble = 2; 1564 static constexpr bool kMultiRegistersAligned = true; 1565 static constexpr bool kMultiFPRegistersWidened = false; 1566 static constexpr bool kMultiGPRegistersWidened = false; 1567 static constexpr bool kAlignLongOnStack = true; 1568 static constexpr bool kAlignDoubleOnStack = true; 1569 #elif defined(__aarch64__) 1570 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1571 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1572 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1573 1574 static constexpr size_t kRegistersNeededForLong = 1; 1575 static constexpr size_t kRegistersNeededForDouble = 1; 1576 static constexpr bool kMultiRegistersAligned = false; 1577 static constexpr bool kMultiFPRegistersWidened = false; 1578 static constexpr bool kMultiGPRegistersWidened = false; 1579 static constexpr bool kAlignLongOnStack = false; 1580 static constexpr bool kAlignDoubleOnStack = false; 1581 #elif defined(__mips__) && !defined(__LP64__) 1582 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1583 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1584 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1585 1586 static constexpr size_t kRegistersNeededForLong = 2; 1587 static constexpr size_t kRegistersNeededForDouble = 2; 1588 static constexpr bool kMultiRegistersAligned = true; 1589 static constexpr bool kMultiFPRegistersWidened = true; 1590 static constexpr bool kMultiGPRegistersWidened = false; 1591 static constexpr bool kAlignLongOnStack = true; 1592 static constexpr bool kAlignDoubleOnStack = true; 1593 #elif defined(__mips__) && defined(__LP64__) 1594 // Let the code prepare GPRs only and we will load the FPRs with same data. 1595 static constexpr bool kNativeSoftFloatAbi = true; 1596 static constexpr size_t kNumNativeGprArgs = 8; 1597 static constexpr size_t kNumNativeFprArgs = 0; 1598 1599 static constexpr size_t kRegistersNeededForLong = 1; 1600 static constexpr size_t kRegistersNeededForDouble = 1; 1601 static constexpr bool kMultiRegistersAligned = false; 1602 static constexpr bool kMultiFPRegistersWidened = false; 1603 static constexpr bool kMultiGPRegistersWidened = true; 1604 static constexpr bool kAlignLongOnStack = false; 1605 static constexpr bool kAlignDoubleOnStack = false; 1606 #elif defined(__i386__) 1607 // TODO: Check these! 1608 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1609 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1610 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1611 1612 static constexpr size_t kRegistersNeededForLong = 2; 1613 static constexpr size_t kRegistersNeededForDouble = 2; 1614 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1615 static constexpr bool kMultiFPRegistersWidened = false; 1616 static constexpr bool kMultiGPRegistersWidened = false; 1617 static constexpr bool kAlignLongOnStack = false; 1618 static constexpr bool kAlignDoubleOnStack = false; 1619 #elif defined(__x86_64__) 1620 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1621 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1622 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1623 1624 static constexpr size_t kRegistersNeededForLong = 1; 1625 static constexpr size_t kRegistersNeededForDouble = 1; 1626 static constexpr bool kMultiRegistersAligned = false; 1627 static constexpr bool kMultiFPRegistersWidened = false; 1628 static constexpr bool kMultiGPRegistersWidened = false; 1629 static constexpr bool kAlignLongOnStack = false; 1630 static constexpr bool kAlignDoubleOnStack = false; 1631 #else 1632 #error "Unsupported architecture" 1633 #endif 1634 1635 public: 1636 explicit BuildNativeCallFrameStateMachine(T* delegate) 1637 : gpr_index_(kNumNativeGprArgs), 1638 fpr_index_(kNumNativeFprArgs), 1639 stack_entries_(0), 1640 delegate_(delegate) { 1641 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1642 // the next register is even; counting down is just to make the compiler happy... 1643 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1644 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1645 } 1646 1647 virtual ~BuildNativeCallFrameStateMachine() {} 1648 1649 bool HavePointerGpr() const { 1650 return gpr_index_ > 0; 1651 } 1652 1653 void AdvancePointer(const void* val) { 1654 if (HavePointerGpr()) { 1655 gpr_index_--; 1656 PushGpr(reinterpret_cast<uintptr_t>(val)); 1657 } else { 1658 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1659 PushStack(reinterpret_cast<uintptr_t>(val)); 1660 gpr_index_ = 0; 1661 } 1662 } 1663 1664 bool HaveHandleScopeGpr() const { 1665 return gpr_index_ > 0; 1666 } 1667 1668 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1669 uintptr_t handle = PushHandle(ptr); 1670 if (HaveHandleScopeGpr()) { 1671 gpr_index_--; 1672 PushGpr(handle); 1673 } else { 1674 stack_entries_++; 1675 PushStack(handle); 1676 gpr_index_ = 0; 1677 } 1678 } 1679 1680 bool HaveIntGpr() const { 1681 return gpr_index_ > 0; 1682 } 1683 1684 void AdvanceInt(uint32_t val) { 1685 if (HaveIntGpr()) { 1686 gpr_index_--; 1687 if (kMultiGPRegistersWidened) { 1688 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1689 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1690 } else { 1691 PushGpr(val); 1692 } 1693 } else { 1694 stack_entries_++; 1695 if (kMultiGPRegistersWidened) { 1696 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1697 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1698 } else { 1699 PushStack(val); 1700 } 1701 gpr_index_ = 0; 1702 } 1703 } 1704 1705 bool HaveLongGpr() const { 1706 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1707 } 1708 1709 bool LongGprNeedsPadding() const { 1710 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1711 kAlignLongOnStack && // and when it needs alignment 1712 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1713 } 1714 1715 bool LongStackNeedsPadding() const { 1716 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1717 kAlignLongOnStack && // and when it needs 8B alignment 1718 (stack_entries_ & 1) == 1; // counter is odd 1719 } 1720 1721 void AdvanceLong(uint64_t val) { 1722 if (HaveLongGpr()) { 1723 if (LongGprNeedsPadding()) { 1724 PushGpr(0); 1725 gpr_index_--; 1726 } 1727 if (kRegistersNeededForLong == 1) { 1728 PushGpr(static_cast<uintptr_t>(val)); 1729 } else { 1730 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1731 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1732 } 1733 gpr_index_ -= kRegistersNeededForLong; 1734 } else { 1735 if (LongStackNeedsPadding()) { 1736 PushStack(0); 1737 stack_entries_++; 1738 } 1739 if (kRegistersNeededForLong == 1) { 1740 PushStack(static_cast<uintptr_t>(val)); 1741 stack_entries_++; 1742 } else { 1743 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1744 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1745 stack_entries_ += 2; 1746 } 1747 gpr_index_ = 0; 1748 } 1749 } 1750 1751 bool HaveFloatFpr() const { 1752 return fpr_index_ > 0; 1753 } 1754 1755 void AdvanceFloat(float val) { 1756 if (kNativeSoftFloatAbi) { 1757 AdvanceInt(bit_cast<uint32_t, float>(val)); 1758 } else { 1759 if (HaveFloatFpr()) { 1760 fpr_index_--; 1761 if (kRegistersNeededForDouble == 1) { 1762 if (kMultiFPRegistersWidened) { 1763 PushFpr8(bit_cast<uint64_t, double>(val)); 1764 } else { 1765 // No widening, just use the bits. 1766 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1767 } 1768 } else { 1769 PushFpr4(val); 1770 } 1771 } else { 1772 stack_entries_++; 1773 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1774 // Need to widen before storing: Note the "double" in the template instantiation. 1775 // Note: We need to jump through those hoops to make the compiler happy. 1776 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1777 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1778 } else { 1779 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1780 } 1781 fpr_index_ = 0; 1782 } 1783 } 1784 } 1785 1786 bool HaveDoubleFpr() const { 1787 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1788 } 1789 1790 bool DoubleFprNeedsPadding() const { 1791 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1792 kAlignDoubleOnStack && // and when it needs alignment 1793 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1794 } 1795 1796 bool DoubleStackNeedsPadding() const { 1797 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1798 kAlignDoubleOnStack && // and when it needs 8B alignment 1799 (stack_entries_ & 1) == 1; // counter is odd 1800 } 1801 1802 void AdvanceDouble(uint64_t val) { 1803 if (kNativeSoftFloatAbi) { 1804 AdvanceLong(val); 1805 } else { 1806 if (HaveDoubleFpr()) { 1807 if (DoubleFprNeedsPadding()) { 1808 PushFpr4(0); 1809 fpr_index_--; 1810 } 1811 PushFpr8(val); 1812 fpr_index_ -= kRegistersNeededForDouble; 1813 } else { 1814 if (DoubleStackNeedsPadding()) { 1815 PushStack(0); 1816 stack_entries_++; 1817 } 1818 if (kRegistersNeededForDouble == 1) { 1819 PushStack(static_cast<uintptr_t>(val)); 1820 stack_entries_++; 1821 } else { 1822 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1823 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1824 stack_entries_ += 2; 1825 } 1826 fpr_index_ = 0; 1827 } 1828 } 1829 } 1830 1831 uint32_t GetStackEntries() const { 1832 return stack_entries_; 1833 } 1834 1835 uint32_t GetNumberOfUsedGprs() const { 1836 return kNumNativeGprArgs - gpr_index_; 1837 } 1838 1839 uint32_t GetNumberOfUsedFprs() const { 1840 return kNumNativeFprArgs - fpr_index_; 1841 } 1842 1843 private: 1844 void PushGpr(uintptr_t val) { 1845 delegate_->PushGpr(val); 1846 } 1847 void PushFpr4(float val) { 1848 delegate_->PushFpr4(val); 1849 } 1850 void PushFpr8(uint64_t val) { 1851 delegate_->PushFpr8(val); 1852 } 1853 void PushStack(uintptr_t val) { 1854 delegate_->PushStack(val); 1855 } 1856 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1857 return delegate_->PushHandle(ref); 1858 } 1859 1860 uint32_t gpr_index_; // Number of free GPRs 1861 uint32_t fpr_index_; // Number of free FPRs 1862 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1863 // extended 1864 T* const delegate_; // What Push implementation gets called 1865 }; 1866 1867 // Computes the sizes of register stacks and call stack area. Handling of references can be extended 1868 // in subclasses. 1869 // 1870 // To handle native pointers, use "L" in the shorty for an object reference, which simulates 1871 // them with handles. 1872 class ComputeNativeCallFrameSize { 1873 public: 1874 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1875 1876 virtual ~ComputeNativeCallFrameSize() {} 1877 1878 uint32_t GetStackSize() const { 1879 return num_stack_entries_ * sizeof(uintptr_t); 1880 } 1881 1882 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1883 sp8 -= GetStackSize(); 1884 // Align by kStackAlignment. 1885 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1886 return sp8; 1887 } 1888 1889 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1890 const { 1891 // Assumption is OK right now, as we have soft-float arm 1892 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1893 sp8 -= fregs * sizeof(uintptr_t); 1894 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1895 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1896 sp8 -= iregs * sizeof(uintptr_t); 1897 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1898 return sp8; 1899 } 1900 1901 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1902 uint32_t** start_fpr) const { 1903 // Native call stack. 1904 sp8 = LayoutCallStack(sp8); 1905 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1906 1907 // Put fprs and gprs below. 1908 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1909 1910 // Return the new bottom. 1911 return sp8; 1912 } 1913 1914 virtual void WalkHeader( 1915 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1916 REQUIRES_SHARED(Locks::mutator_lock_) { 1917 } 1918 1919 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1920 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1921 1922 WalkHeader(&sm); 1923 1924 for (uint32_t i = 1; i < shorty_len; ++i) { 1925 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1926 switch (cur_type_) { 1927 case Primitive::kPrimNot: 1928 // TODO: fix abuse of mirror types. 1929 sm.AdvanceHandleScope( 1930 reinterpret_cast<mirror::Object*>(0x12345678)); 1931 break; 1932 1933 case Primitive::kPrimBoolean: 1934 case Primitive::kPrimByte: 1935 case Primitive::kPrimChar: 1936 case Primitive::kPrimShort: 1937 case Primitive::kPrimInt: 1938 sm.AdvanceInt(0); 1939 break; 1940 case Primitive::kPrimFloat: 1941 sm.AdvanceFloat(0); 1942 break; 1943 case Primitive::kPrimDouble: 1944 sm.AdvanceDouble(0); 1945 break; 1946 case Primitive::kPrimLong: 1947 sm.AdvanceLong(0); 1948 break; 1949 default: 1950 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1951 UNREACHABLE(); 1952 } 1953 } 1954 1955 num_stack_entries_ = sm.GetStackEntries(); 1956 } 1957 1958 void PushGpr(uintptr_t /* val */) { 1959 // not optimizing registers, yet 1960 } 1961 1962 void PushFpr4(float /* val */) { 1963 // not optimizing registers, yet 1964 } 1965 1966 void PushFpr8(uint64_t /* val */) { 1967 // not optimizing registers, yet 1968 } 1969 1970 void PushStack(uintptr_t /* val */) { 1971 // counting is already done in the superclass 1972 } 1973 1974 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1975 return reinterpret_cast<uintptr_t>(nullptr); 1976 } 1977 1978 protected: 1979 uint32_t num_stack_entries_; 1980 }; 1981 1982 class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1983 public: 1984 explicit ComputeGenericJniFrameSize(bool critical_native) 1985 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1986 1987 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1988 // is at *m = sp. Will update to point to the bottom of the save frame. 1989 // 1990 // Note: assumes ComputeAll() has been run before. 1991 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1992 REQUIRES_SHARED(Locks::mutator_lock_) { 1993 ArtMethod* method = **m; 1994 1995 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1996 1997 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1998 1999 // First, fix up the layout of the callee-save frame. 2000 // We have to squeeze in the HandleScope, and relocate the method pointer. 2001 2002 // "Free" the slot for the method. 2003 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 2004 2005 // Under the callee saves put handle scope and new method stack reference. 2006 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 2007 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 2008 2009 sp8 -= scope_and_method; 2010 // Align by kStackAlignment. 2011 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 2012 2013 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 2014 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 2015 num_handle_scope_references_); 2016 2017 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 2018 uint8_t* method_pointer = sp8; 2019 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 2020 *new_method_ref = method; 2021 *m = new_method_ref; 2022 } 2023 2024 // Adds space for the cookie. Note: may leave stack unaligned. 2025 void LayoutCookie(uint8_t** sp) const { 2026 // Reference cookie and padding 2027 *sp -= 8; 2028 } 2029 2030 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 2031 // Returns the new bottom. Note: this may be unaligned. 2032 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 2033 REQUIRES_SHARED(Locks::mutator_lock_) { 2034 // First, fix up the layout of the callee-save frame. 2035 // We have to squeeze in the HandleScope, and relocate the method pointer. 2036 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 2037 2038 // The bottom of the callee-save frame is now where the method is, *m. 2039 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 2040 2041 // Add space for cookie. 2042 LayoutCookie(&sp8); 2043 2044 return sp8; 2045 } 2046 2047 // WARNING: After this, *sp won't be pointing to the method anymore! 2048 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 2049 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 2050 uint32_t** start_fpr) 2051 REQUIRES_SHARED(Locks::mutator_lock_) { 2052 Walk(shorty, shorty_len); 2053 2054 // JNI part. 2055 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 2056 2057 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 2058 2059 // Return the new bottom. 2060 return sp8; 2061 } 2062 2063 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 2064 2065 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 2066 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 2067 REQUIRES_SHARED(Locks::mutator_lock_); 2068 2069 private: 2070 uint32_t num_handle_scope_references_; 2071 const bool critical_native_; 2072 }; 2073 2074 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 2075 num_handle_scope_references_++; 2076 return reinterpret_cast<uintptr_t>(nullptr); 2077 } 2078 2079 void ComputeGenericJniFrameSize::WalkHeader( 2080 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 2081 // First 2 parameters are always excluded for @CriticalNative. 2082 if (UNLIKELY(critical_native_)) { 2083 return; 2084 } 2085 2086 // JNIEnv 2087 sm->AdvancePointer(nullptr); 2088 2089 // Class object or this as first argument 2090 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 2091 } 2092 2093 // Class to push values to three separate regions. Used to fill the native call part. Adheres to 2094 // the template requirements of BuildGenericJniFrameStateMachine. 2095 class FillNativeCall { 2096 public: 2097 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 2098 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 2099 2100 virtual ~FillNativeCall() {} 2101 2102 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 2103 cur_gpr_reg_ = gpr_regs; 2104 cur_fpr_reg_ = fpr_regs; 2105 cur_stack_arg_ = stack_args; 2106 } 2107 2108 void PushGpr(uintptr_t val) { 2109 *cur_gpr_reg_ = val; 2110 cur_gpr_reg_++; 2111 } 2112 2113 void PushFpr4(float val) { 2114 *cur_fpr_reg_ = val; 2115 cur_fpr_reg_++; 2116 } 2117 2118 void PushFpr8(uint64_t val) { 2119 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 2120 *tmp = val; 2121 cur_fpr_reg_ += 2; 2122 } 2123 2124 void PushStack(uintptr_t val) { 2125 *cur_stack_arg_ = val; 2126 cur_stack_arg_++; 2127 } 2128 2129 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 2130 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 2131 UNREACHABLE(); 2132 } 2133 2134 private: 2135 uintptr_t* cur_gpr_reg_; 2136 uint32_t* cur_fpr_reg_; 2137 uintptr_t* cur_stack_arg_; 2138 }; 2139 2140 // Visits arguments on the stack placing them into a region lower down the stack for the benefit 2141 // of transitioning into native code. 2142 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 2143 public: 2144 BuildGenericJniFrameVisitor(Thread* self, 2145 bool is_static, 2146 bool critical_native, 2147 const char* shorty, 2148 uint32_t shorty_len, 2149 ArtMethod*** sp) 2150 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 2151 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 2152 sm_(&jni_call_) { 2153 ComputeGenericJniFrameSize fsc(critical_native); 2154 uintptr_t* start_gpr_reg; 2155 uint32_t* start_fpr_reg; 2156 uintptr_t* start_stack_arg; 2157 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 2158 &handle_scope_, 2159 &start_stack_arg, 2160 &start_gpr_reg, &start_fpr_reg); 2161 2162 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 2163 2164 // First 2 parameters are always excluded for CriticalNative methods. 2165 if (LIKELY(!critical_native)) { 2166 // jni environment is always first argument 2167 sm_.AdvancePointer(self->GetJniEnv()); 2168 2169 if (is_static) { 2170 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 2171 } // else "this" reference is already handled by QuickArgumentVisitor. 2172 } 2173 } 2174 2175 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 2176 2177 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 2178 2179 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 2180 return handle_scope_->GetHandle(0).GetReference(); 2181 } 2182 2183 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 2184 return handle_scope_->GetHandle(0).ToJObject(); 2185 } 2186 2187 void* GetBottomOfUsedArea() const { 2188 return bottom_of_used_area_; 2189 } 2190 2191 private: 2192 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 2193 class FillJniCall FINAL : public FillNativeCall { 2194 public: 2195 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 2196 HandleScope* handle_scope, bool critical_native) 2197 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 2198 handle_scope_(handle_scope), 2199 cur_entry_(0), 2200 critical_native_(critical_native) {} 2201 2202 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 2203 2204 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 2205 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 2206 handle_scope_ = scope; 2207 cur_entry_ = 0U; 2208 } 2209 2210 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 2211 // Initialize padding entries. 2212 size_t expected_slots = handle_scope_->NumberOfReferences(); 2213 while (cur_entry_ < expected_slots) { 2214 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 2215 } 2216 2217 if (!critical_native_) { 2218 // Non-critical natives have at least the self class (jclass) or this (jobject). 2219 DCHECK_NE(cur_entry_, 0U); 2220 } 2221 } 2222 2223 bool CriticalNative() const { 2224 return critical_native_; 2225 } 2226 2227 private: 2228 HandleScope* handle_scope_; 2229 size_t cur_entry_; 2230 const bool critical_native_; 2231 }; 2232 2233 HandleScope* handle_scope_; 2234 FillJniCall jni_call_; 2235 void* bottom_of_used_area_; 2236 2237 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 2238 2239 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 2240 }; 2241 2242 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 2243 uintptr_t tmp; 2244 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 2245 h.Assign(ref); 2246 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 2247 cur_entry_++; 2248 return tmp; 2249 } 2250 2251 void BuildGenericJniFrameVisitor::Visit() { 2252 Primitive::Type type = GetParamPrimitiveType(); 2253 switch (type) { 2254 case Primitive::kPrimLong: { 2255 jlong long_arg; 2256 if (IsSplitLongOrDouble()) { 2257 long_arg = ReadSplitLongParam(); 2258 } else { 2259 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 2260 } 2261 sm_.AdvanceLong(long_arg); 2262 break; 2263 } 2264 case Primitive::kPrimDouble: { 2265 uint64_t double_arg; 2266 if (IsSplitLongOrDouble()) { 2267 // Read into union so that we don't case to a double. 2268 double_arg = ReadSplitLongParam(); 2269 } else { 2270 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 2271 } 2272 sm_.AdvanceDouble(double_arg); 2273 break; 2274 } 2275 case Primitive::kPrimNot: { 2276 StackReference<mirror::Object>* stack_ref = 2277 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 2278 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 2279 break; 2280 } 2281 case Primitive::kPrimFloat: 2282 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 2283 break; 2284 case Primitive::kPrimBoolean: // Fall-through. 2285 case Primitive::kPrimByte: // Fall-through. 2286 case Primitive::kPrimChar: // Fall-through. 2287 case Primitive::kPrimShort: // Fall-through. 2288 case Primitive::kPrimInt: // Fall-through. 2289 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 2290 break; 2291 case Primitive::kPrimVoid: 2292 LOG(FATAL) << "UNREACHABLE"; 2293 UNREACHABLE(); 2294 } 2295 } 2296 2297 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 2298 // Clear out rest of the scope. 2299 jni_call_.ResetRemainingScopeSlots(); 2300 if (!jni_call_.CriticalNative()) { 2301 // Install HandleScope. 2302 self->PushHandleScope(handle_scope_); 2303 } 2304 } 2305 2306 #if defined(__arm__) || defined(__aarch64__) 2307 extern "C" const void* artFindNativeMethod(); 2308 #else 2309 extern "C" const void* artFindNativeMethod(Thread* self); 2310 #endif 2311 2312 static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 2313 uint32_t cookie, 2314 bool fast_native ATTRIBUTE_UNUSED, 2315 jobject l, 2316 jobject lock) { 2317 // TODO: add entrypoints for @FastNative returning objects. 2318 if (lock != nullptr) { 2319 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 2320 } else { 2321 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 2322 } 2323 } 2324 2325 static void artQuickGenericJniEndJNINonRef(Thread* self, 2326 uint32_t cookie, 2327 bool fast_native, 2328 jobject lock) { 2329 if (lock != nullptr) { 2330 JniMethodEndSynchronized(cookie, lock, self); 2331 // Ignore "fast_native" here because synchronized functions aren't very fast. 2332 } else { 2333 if (UNLIKELY(fast_native)) { 2334 JniMethodFastEnd(cookie, self); 2335 } else { 2336 JniMethodEnd(cookie, self); 2337 } 2338 } 2339 } 2340 2341 /* 2342 * Initializes an alloca region assumed to be directly below sp for a native call: 2343 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2344 * The final element on the stack is a pointer to the native code. 2345 * 2346 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2347 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2348 * 2349 * The return of this function denotes: 2350 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2351 * 2) An error, if the value is negative. 2352 */ 2353 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2354 REQUIRES_SHARED(Locks::mutator_lock_) { 2355 // Note: We cannot walk the stack properly until fixed up below. 2356 ArtMethod* called = *sp; 2357 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2358 Runtime* runtime = Runtime::Current(); 2359 jit::Jit* jit = runtime->GetJit(); 2360 if (jit != nullptr) { 2361 jit->AddSamples(self, called, 1u, /*with_backedges*/ false); 2362 } 2363 uint32_t shorty_len = 0; 2364 const char* shorty = called->GetShorty(&shorty_len); 2365 bool critical_native = called->IsCriticalNative(); 2366 bool fast_native = called->IsFastNative(); 2367 bool normal_native = !critical_native && !fast_native; 2368 2369 // Run the visitor and update sp. 2370 BuildGenericJniFrameVisitor visitor(self, 2371 called->IsStatic(), 2372 critical_native, 2373 shorty, 2374 shorty_len, 2375 &sp); 2376 { 2377 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2378 visitor.VisitArguments(); 2379 // FinalizeHandleScope pushes the handle scope on the thread. 2380 visitor.FinalizeHandleScope(self); 2381 } 2382 2383 // Fix up managed-stack things in Thread. After this we can walk the stack. 2384 self->SetTopOfStackTagged(sp); 2385 2386 self->VerifyStack(); 2387 2388 uint32_t cookie; 2389 uint32_t* sp32; 2390 // Skip calling JniMethodStart for @CriticalNative. 2391 if (LIKELY(!critical_native)) { 2392 // Start JNI, save the cookie. 2393 if (called->IsSynchronized()) { 2394 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2395 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2396 if (self->IsExceptionPending()) { 2397 self->PopHandleScope(); 2398 // A negative value denotes an error. 2399 return GetTwoWordFailureValue(); 2400 } 2401 } else { 2402 if (fast_native) { 2403 cookie = JniMethodFastStart(self); 2404 } else { 2405 DCHECK(normal_native); 2406 cookie = JniMethodStart(self); 2407 } 2408 } 2409 sp32 = reinterpret_cast<uint32_t*>(sp); 2410 *(sp32 - 1) = cookie; 2411 } 2412 2413 // Retrieve the stored native code. 2414 void const* nativeCode = called->GetEntryPointFromJni(); 2415 2416 // There are two cases for the content of nativeCode: 2417 // 1) Pointer to the native function. 2418 // 2) Pointer to the trampoline for native code binding. 2419 // In the second case, we need to execute the binding and continue with the actual native function 2420 // pointer. 2421 DCHECK(nativeCode != nullptr); 2422 if (nativeCode == GetJniDlsymLookupStub()) { 2423 #if defined(__arm__) || defined(__aarch64__) 2424 nativeCode = artFindNativeMethod(); 2425 #else 2426 nativeCode = artFindNativeMethod(self); 2427 #endif 2428 2429 if (nativeCode == nullptr) { 2430 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2431 2432 // @CriticalNative calls do not need to call back into JniMethodEnd. 2433 if (LIKELY(!critical_native)) { 2434 // End JNI, as the assembly will move to deliver the exception. 2435 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2436 if (shorty[0] == 'L') { 2437 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2438 } else { 2439 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2440 } 2441 } 2442 2443 return GetTwoWordFailureValue(); 2444 } 2445 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2446 } 2447 2448 #if defined(__mips__) && !defined(__LP64__) 2449 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2450 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2451 // and load into floating-point registers. 2452 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2453 // view): 2454 // (1) 2455 // | DOUBLE | DOUBLE | other args, if any 2456 // | F12 | F13 | F14 | F15 | 2457 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2458 // (2) 2459 // | DOUBLE | FLOAT | (PAD) | other args, if any 2460 // | F12 | F13 | F14 | | 2461 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2462 // (3) 2463 // | FLOAT | (PAD) | DOUBLE | other args, if any 2464 // | F12 | | F14 | F15 | 2465 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2466 // (4) 2467 // | FLOAT | FLOAT | other args, if any 2468 // | F12 | F14 | 2469 // | SP+0 | SP+4 | SP+8 2470 // As you can see, only the last case (4) is special. In all others we can just 2471 // load F12/F13 and F14/F15 in the same manner. 2472 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2473 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2474 if (nativeCode != nullptr && 2475 shorty != nullptr && 2476 shorty_len >= 3 && 2477 shorty[1] == 'F' && 2478 shorty[2] == 'F') { 2479 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2480 } 2481 #endif 2482 2483 // Return native code addr(lo) and bottom of alloca address(hi). 2484 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2485 reinterpret_cast<uintptr_t>(nativeCode)); 2486 } 2487 2488 // Defined in quick_jni_entrypoints.cc. 2489 extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2490 jvalue result, uint64_t result_f, ArtMethod* called, 2491 HandleScope* handle_scope); 2492 /* 2493 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2494 * unlocking. 2495 */ 2496 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2497 jvalue result, 2498 uint64_t result_f) { 2499 // We're here just back from a native call. We don't have the shared mutator lock at this point 2500 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2501 // anything that requires a mutator lock before that would cause problems as GC may have the 2502 // exclusive mutator lock and may be moving objects, etc. 2503 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2504 DCHECK(self->GetManagedStack()->GetTopQuickFrameTag()); 2505 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2506 ArtMethod* called = *sp; 2507 uint32_t cookie = *(sp32 - 1); 2508 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2509 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2510 } 2511 2512 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2513 // for the method pointer. 2514 // 2515 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2516 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2517 2518 template <InvokeType type, bool access_check> 2519 static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2520 ObjPtr<mirror::Object> this_object, 2521 Thread* self, 2522 ArtMethod** sp) { 2523 ScopedQuickEntrypointChecks sqec(self); 2524 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2525 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2526 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method); 2527 if (UNLIKELY(method == nullptr)) { 2528 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2529 uint32_t shorty_len; 2530 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2531 { 2532 // Remember the args in case a GC happens in FindMethodFromCode. 2533 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2534 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2535 visitor.VisitArguments(); 2536 method = FindMethodFromCode<type, access_check>(method_idx, 2537 &this_object, 2538 caller_method, 2539 self); 2540 visitor.FixupReferences(); 2541 } 2542 2543 if (UNLIKELY(method == nullptr)) { 2544 CHECK(self->IsExceptionPending()); 2545 return GetTwoWordFailureValue(); // Failure. 2546 } 2547 } 2548 DCHECK(!self->IsExceptionPending()); 2549 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2550 2551 // When we return, the caller will branch to this address, so it had better not be 0! 2552 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2553 << " location: " 2554 << method->GetDexFile()->GetLocation(); 2555 2556 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2557 reinterpret_cast<uintptr_t>(method)); 2558 } 2559 2560 // Explicit artInvokeCommon template function declarations to please analysis tool. 2561 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2562 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2563 TwoWordReturn artInvokeCommon<type, access_check>( \ 2564 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2565 2566 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2567 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2568 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2569 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2570 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2571 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2572 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2573 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2574 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2575 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2576 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2577 2578 // See comments in runtime_support_asm.S 2579 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2580 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2581 REQUIRES_SHARED(Locks::mutator_lock_) { 2582 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2583 } 2584 2585 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2586 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2587 REQUIRES_SHARED(Locks::mutator_lock_) { 2588 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2589 } 2590 2591 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2592 uint32_t method_idx, 2593 mirror::Object* this_object ATTRIBUTE_UNUSED, 2594 Thread* self, 2595 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2596 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2597 // it doesn't cause ObjPtr alignment failure check. 2598 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2599 } 2600 2601 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2602 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2603 REQUIRES_SHARED(Locks::mutator_lock_) { 2604 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2605 } 2606 2607 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2608 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2609 REQUIRES_SHARED(Locks::mutator_lock_) { 2610 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2611 } 2612 2613 // Helper function for art_quick_imt_conflict_trampoline to look up the interface method. 2614 extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer) 2615 REQUIRES_SHARED(Locks::mutator_lock_) { 2616 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 2617 DCHECK(!referrer->IsProxyMethod()); 2618 ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( 2619 method_index, referrer->GetDexCache(), referrer->GetClassLoader()); 2620 DCHECK(result == nullptr || 2621 result->GetDeclaringClass()->IsInterface() || 2622 result->GetDeclaringClass() == 2623 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object)) 2624 << result->PrettyMethod(); 2625 return result; 2626 } 2627 2628 // Determine target of interface dispatch. The interface method and this object are known non-null. 2629 // The interface method is the method returned by the dex cache in the conflict trampoline. 2630 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2631 mirror::Object* raw_this_object, 2632 Thread* self, 2633 ArtMethod** sp) 2634 REQUIRES_SHARED(Locks::mutator_lock_) { 2635 ScopedQuickEntrypointChecks sqec(self); 2636 StackHandleScope<2> hs(self); 2637 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object); 2638 Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass()); 2639 2640 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2641 ArtMethod* method = nullptr; 2642 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2643 2644 if (UNLIKELY(interface_method == nullptr)) { 2645 // The interface method is unresolved, so resolve it in the dex file of the caller. 2646 // Fetch the dex_method_idx of the target interface method from the caller. 2647 uint32_t dex_method_idx; 2648 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2649 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc); 2650 Instruction::Code instr_code = instr.Opcode(); 2651 DCHECK(instr_code == Instruction::INVOKE_INTERFACE || 2652 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2653 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr); 2654 if (instr_code == Instruction::INVOKE_INTERFACE) { 2655 dex_method_idx = instr.VRegB_35c(); 2656 } else { 2657 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2658 dex_method_idx = instr.VRegB_3rc(); 2659 } 2660 2661 const DexFile& dex_file = caller_method->GetDeclaringClass()->GetDexFile(); 2662 uint32_t shorty_len; 2663 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx), 2664 &shorty_len); 2665 { 2666 // Remember the args in case a GC happens in ClassLinker::ResolveMethod(). 2667 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2668 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2669 visitor.VisitArguments(); 2670 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2671 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( 2672 self, dex_method_idx, caller_method, kInterface); 2673 visitor.FixupReferences(); 2674 } 2675 2676 if (UNLIKELY(interface_method == nullptr)) { 2677 CHECK(self->IsExceptionPending()); 2678 return GetTwoWordFailureValue(); // Failure. 2679 } 2680 } 2681 2682 DCHECK(!interface_method->IsRuntimeMethod()); 2683 // Look whether we have a match in the ImtConflictTable. 2684 uint32_t imt_index = ImTable::GetImtIndex(interface_method); 2685 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2686 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2687 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2688 DCHECK(current_table != nullptr); 2689 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2690 } else { 2691 // It seems we aren't really a conflict method! 2692 if (kIsDebugBuild) { 2693 ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2694 CHECK_EQ(conflict_method, m) 2695 << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / " 2696 << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass(); 2697 } 2698 method = conflict_method; 2699 } 2700 if (method != nullptr) { 2701 return GetTwoWordSuccessValue( 2702 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2703 reinterpret_cast<uintptr_t>(method)); 2704 } 2705 2706 // No match, use the IfTable. 2707 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2708 if (UNLIKELY(method == nullptr)) { 2709 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2710 interface_method, this_object.Get(), caller_method); 2711 return GetTwoWordFailureValue(); // Failure. 2712 } 2713 2714 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2715 // We create a new table with the new pair { interface_method, method }. 2716 DCHECK(conflict_method->IsRuntimeMethod()); 2717 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2718 cls.Get(), 2719 conflict_method, 2720 interface_method, 2721 method, 2722 /*force_new_conflict_method*/false); 2723 if (new_conflict_method != conflict_method) { 2724 // Update the IMT if we create a new conflict method. No fence needed here, as the 2725 // data is consistent. 2726 imt->Set(imt_index, 2727 new_conflict_method, 2728 kRuntimePointerSize); 2729 } 2730 2731 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2732 2733 // When we return, the caller will branch to this address, so it had better not be 0! 2734 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2735 << " location: " << method->GetDexFile()->GetLocation(); 2736 2737 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2738 reinterpret_cast<uintptr_t>(method)); 2739 } 2740 2741 // Returns shorty type so the caller can determine how to put |result| 2742 // into expected registers. The shorty type is static so the compiler 2743 // could call different flavors of this code path depending on the 2744 // shorty type though this would require different entry points for 2745 // each type. 2746 extern "C" uintptr_t artInvokePolymorphic( 2747 JValue* result, 2748 mirror::Object* raw_receiver, 2749 Thread* self, 2750 ArtMethod** sp) 2751 REQUIRES_SHARED(Locks::mutator_lock_) { 2752 ScopedQuickEntrypointChecks sqec(self); 2753 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2754 2755 // Start new JNI local reference state 2756 JNIEnvExt* env = self->GetJniEnv(); 2757 ScopedObjectAccessUnchecked soa(env); 2758 ScopedJniEnvLocalRefState env_state(env); 2759 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2760 2761 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2762 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2763 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2764 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc); 2765 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC || 2766 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2767 const uint32_t proto_idx = inst.VRegH(); 2768 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx); 2769 const size_t shorty_length = strlen(shorty); 2770 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2771 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2772 gc_visitor.VisitArguments(); 2773 2774 // Wrap raw_receiver in a Handle for safety. 2775 StackHandleScope<3> hs(self); 2776 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver)); 2777 raw_receiver = nullptr; 2778 self->EndAssertNoThreadSuspension(old_cause); 2779 2780 // Resolve method. 2781 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2782 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 2783 self, inst.VRegB(), caller_method, kVirtual); 2784 2785 if (UNLIKELY(receiver_handle.IsNull())) { 2786 ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual); 2787 return static_cast<uintptr_t>('V'); 2788 } 2789 2790 // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996). 2791 DCHECK_EQ(resolved_method->GetDeclaringClass(), 2792 WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle)); 2793 2794 Handle<mirror::MethodHandle> method_handle(hs.NewHandle( 2795 ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get())))); 2796 2797 Handle<mirror::MethodType> method_type( 2798 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method))); 2799 2800 // This implies we couldn't resolve one or more types in this method handle. 2801 if (UNLIKELY(method_type.IsNull())) { 2802 CHECK(self->IsExceptionPending()); 2803 return static_cast<uintptr_t>('V'); 2804 } 2805 2806 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA()); 2807 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2808 2809 // Fix references before constructing the shadow frame. 2810 gc_visitor.FixupReferences(); 2811 2812 // Construct shadow frame placing arguments consecutively from |first_arg|. 2813 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2814 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc(); 2815 const size_t first_arg = 0; 2816 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2817 CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); 2818 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2819 ScopedStackedShadowFramePusher 2820 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2821 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2822 kMethodIsStatic, 2823 shorty, 2824 strlen(shorty), 2825 shadow_frame, 2826 first_arg); 2827 shadow_frame_builder.VisitArguments(); 2828 2829 // Push a transition back into managed code onto the linked list in thread. 2830 ManagedStack fragment; 2831 self->PushManagedStackFragment(&fragment); 2832 2833 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2834 // consecutive order. 2835 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1); 2836 bool isExact = (jni::EncodeArtMethod(resolved_method) == 2837 WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact); 2838 bool success = false; 2839 if (isExact) { 2840 success = MethodHandleInvokeExact(self, 2841 *shadow_frame, 2842 method_handle, 2843 method_type, 2844 &operands, 2845 result); 2846 } else { 2847 success = MethodHandleInvoke(self, 2848 *shadow_frame, 2849 method_handle, 2850 method_type, 2851 &operands, 2852 result); 2853 } 2854 DCHECK(success || self->IsExceptionPending()); 2855 2856 // Pop transition record. 2857 self->PopManagedStackFragment(fragment); 2858 2859 return static_cast<uintptr_t>(shorty[0]); 2860 } 2861 2862 } // namespace art 2863