1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "art_method-inl.h" 18 #include "base/callee_save_type.h" 19 #include "base/enums.h" 20 #include "callee_save_frame.h" 21 #include "common_throws.h" 22 #include "class_root.h" 23 #include "debug_print.h" 24 #include "debugger.h" 25 #include "dex/dex_file-inl.h" 26 #include "dex/dex_file_types.h" 27 #include "dex/dex_instruction-inl.h" 28 #include "dex/method_reference.h" 29 #include "entrypoints/entrypoint_utils-inl.h" 30 #include "entrypoints/quick/callee_save_frame.h" 31 #include "entrypoints/runtime_asm_entrypoints.h" 32 #include "gc/accounting/card_table-inl.h" 33 #include "imt_conflict_table.h" 34 #include "imtable-inl.h" 35 #include "index_bss_mapping.h" 36 #include "instrumentation.h" 37 #include "interpreter/interpreter.h" 38 #include "interpreter/interpreter_common.h" 39 #include "interpreter/shadow_frame-inl.h" 40 #include "jit/jit.h" 41 #include "jit/jit_code_cache.h" 42 #include "linear_alloc.h" 43 #include "method_handles.h" 44 #include "mirror/class-inl.h" 45 #include "mirror/dex_cache-inl.h" 46 #include "mirror/method.h" 47 #include "mirror/method_handle_impl.h" 48 #include "mirror/object-inl.h" 49 #include "mirror/object_array-inl.h" 50 #include "mirror/var_handle.h" 51 #include "oat_file.h" 52 #include "oat_quick_method_header.h" 53 #include "quick_exception_handler.h" 54 #include "runtime.h" 55 #include "scoped_thread_state_change-inl.h" 56 #include "stack.h" 57 #include "thread-inl.h" 58 #include "var_handles.h" 59 #include "well_known_classes.h" 60 61 namespace art { 62 63 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame. 64 class QuickArgumentVisitor { 65 // Number of bytes for each out register in the caller method's frame. 66 static constexpr size_t kBytesStackArgLocation = 4; 67 // Frame size in bytes of a callee-save frame for RefsAndArgs. 68 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 69 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs); 70 // Offset of first GPR arg. 71 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 72 RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 73 // Offset of first FPR arg. 74 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 75 RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 76 // Offset of return address. 77 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset = 78 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs); 79 #if defined(__arm__) 80 // The callee save frame is pointed to by SP. 81 // | argN | | 82 // | ... | | 83 // | arg4 | | 84 // | arg3 spill | | Caller's frame 85 // | arg2 spill | | 86 // | arg1 spill | | 87 // | Method* | --- 88 // | LR | 89 // | ... | 4x6 bytes callee saves 90 // | R3 | 91 // | R2 | 92 // | R1 | 93 // | S15 | 94 // | : | 95 // | S0 | 96 // | | 4x2 bytes padding 97 // | Method* | <- sp 98 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 99 static constexpr bool kAlignPairRegister = true; 100 static constexpr bool kQuickSoftFloatAbi = false; 101 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true; 102 static constexpr bool kQuickSkipOddFpRegisters = false; 103 static constexpr size_t kNumQuickGprArgs = 3; 104 static constexpr size_t kNumQuickFprArgs = 16; 105 static constexpr bool kGprFprLockstep = false; 106 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 107 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 108 } 109 #elif defined(__aarch64__) 110 // The callee save frame is pointed to by SP. 111 // | argN | | 112 // | ... | | 113 // | arg4 | | 114 // | arg3 spill | | Caller's frame 115 // | arg2 spill | | 116 // | arg1 spill | | 117 // | Method* | --- 118 // | LR | 119 // | X29 | 120 // | : | 121 // | X20 | 122 // | X7 | 123 // | : | 124 // | X1 | 125 // | D7 | 126 // | : | 127 // | D0 | 128 // | | padding 129 // | Method* | <- sp 130 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 131 static constexpr bool kAlignPairRegister = false; 132 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 133 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 134 static constexpr bool kQuickSkipOddFpRegisters = false; 135 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 136 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 137 static constexpr bool kGprFprLockstep = false; 138 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 139 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 140 } 141 #elif defined(__mips__) && !defined(__LP64__) 142 // The callee save frame is pointed to by SP. 143 // | argN | | 144 // | ... | | 145 // | arg4 | | 146 // | arg3 spill | | Caller's frame 147 // | arg2 spill | | 148 // | arg1 spill | | 149 // | Method* | --- 150 // | RA | 151 // | ... | callee saves 152 // | T1 | arg5 153 // | T0 | arg4 154 // | A3 | arg3 155 // | A2 | arg2 156 // | A1 | arg1 157 // | F19 | 158 // | F18 | f_arg5 159 // | F17 | 160 // | F16 | f_arg4 161 // | F15 | 162 // | F14 | f_arg3 163 // | F13 | 164 // | F12 | f_arg2 165 // | F11 | 166 // | F10 | f_arg1 167 // | F9 | 168 // | F8 | f_arg0 169 // | | padding 170 // | A0/Method* | <- sp 171 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 172 static constexpr bool kAlignPairRegister = true; 173 static constexpr bool kQuickSoftFloatAbi = false; 174 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 175 static constexpr bool kQuickSkipOddFpRegisters = true; 176 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 177 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 178 // passed only in even numbered registers and each 179 // double occupies two registers. 180 static constexpr bool kGprFprLockstep = false; 181 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 182 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 183 } 184 #elif defined(__mips__) && defined(__LP64__) 185 // The callee save frame is pointed to by SP. 186 // | argN | | 187 // | ... | | 188 // | arg4 | | 189 // | arg3 spill | | Caller's frame 190 // | arg2 spill | | 191 // | arg1 spill | | 192 // | Method* | --- 193 // | RA | 194 // | ... | callee saves 195 // | A7 | arg7 196 // | A6 | arg6 197 // | A5 | arg5 198 // | A4 | arg4 199 // | A3 | arg3 200 // | A2 | arg2 201 // | A1 | arg1 202 // | F19 | f_arg7 203 // | F18 | f_arg6 204 // | F17 | f_arg5 205 // | F16 | f_arg4 206 // | F15 | f_arg3 207 // | F14 | f_arg2 208 // | F13 | f_arg1 209 // | F12 | f_arg0 210 // | | padding 211 // | A0/Method* | <- sp 212 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 213 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 214 static constexpr bool kAlignPairRegister = false; 215 static constexpr bool kQuickSoftFloatAbi = false; 216 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 217 static constexpr bool kQuickSkipOddFpRegisters = false; 218 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 219 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 220 static constexpr bool kGprFprLockstep = true; 221 222 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 223 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 224 } 225 #elif defined(__i386__) 226 // The callee save frame is pointed to by SP. 227 // | argN | | 228 // | ... | | 229 // | arg4 | | 230 // | arg3 spill | | Caller's frame 231 // | arg2 spill | | 232 // | arg1 spill | | 233 // | Method* | --- 234 // | Return | 235 // | EBP,ESI,EDI | callee saves 236 // | EBX | arg3 237 // | EDX | arg2 238 // | ECX | arg1 239 // | XMM3 | float arg 4 240 // | XMM2 | float arg 3 241 // | XMM1 | float arg 2 242 // | XMM0 | float arg 1 243 // | EAX/Method* | <- sp 244 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 245 static constexpr bool kAlignPairRegister = false; 246 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 247 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 248 static constexpr bool kQuickSkipOddFpRegisters = false; 249 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 250 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 251 static constexpr bool kGprFprLockstep = false; 252 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 253 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 254 } 255 #elif defined(__x86_64__) 256 // The callee save frame is pointed to by SP. 257 // | argN | | 258 // | ... | | 259 // | reg. arg spills | | Caller's frame 260 // | Method* | --- 261 // | Return | 262 // | R15 | callee save 263 // | R14 | callee save 264 // | R13 | callee save 265 // | R12 | callee save 266 // | R9 | arg5 267 // | R8 | arg4 268 // | RSI/R6 | arg1 269 // | RBP/R5 | callee save 270 // | RBX/R3 | callee save 271 // | RDX/R2 | arg2 272 // | RCX/R1 | arg3 273 // | XMM7 | float arg 8 274 // | XMM6 | float arg 7 275 // | XMM5 | float arg 6 276 // | XMM4 | float arg 5 277 // | XMM3 | float arg 4 278 // | XMM2 | float arg 3 279 // | XMM1 | float arg 2 280 // | XMM0 | float arg 1 281 // | Padding | 282 // | RDI/Method* | <- sp 283 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 284 static constexpr bool kAlignPairRegister = false; 285 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 286 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 287 static constexpr bool kQuickSkipOddFpRegisters = false; 288 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 289 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 290 static constexpr bool kGprFprLockstep = false; 291 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 292 switch (gpr_index) { 293 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 294 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 295 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 296 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 297 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 298 default: 299 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 300 UNREACHABLE(); 301 } 302 } 303 #else 304 #error "Unsupported architecture" 305 #endif 306 307 public: 308 // Special handling for proxy methods. Proxy methods are instance methods so the 309 // 'this' object is the 1st argument. They also have the same frame layout as the 310 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 311 // 1st GPR. 312 static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp) 313 REQUIRES_SHARED(Locks::mutator_lock_) { 314 CHECK((*sp)->IsProxyMethod()); 315 CHECK_GT(kNumQuickGprArgs, 0u); 316 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 317 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 318 GprIndexToGprOffset(kThisGprIndex); 319 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 320 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address); 321 } 322 323 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 324 DCHECK((*sp)->IsCalleeSaveMethod()); 325 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs); 326 } 327 328 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 329 DCHECK((*sp)->IsCalleeSaveMethod()); 330 uint8_t* previous_sp = 331 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 332 return *reinterpret_cast<ArtMethod**>(previous_sp); 333 } 334 335 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 336 DCHECK((*sp)->IsCalleeSaveMethod()); 337 constexpr size_t callee_frame_size = 338 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs); 339 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 340 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 341 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 342 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 343 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 344 345 if (current_code->IsOptimized()) { 346 CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly); 347 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset); 348 DCHECK(stack_map.IsValid()); 349 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map); 350 if (!inline_infos.empty()) { 351 return inline_infos.back().GetDexPc(); 352 } else { 353 return stack_map.GetDexPc(); 354 } 355 } else { 356 return current_code->ToDexPc(*caller_sp, outer_pc); 357 } 358 } 359 360 // For the given quick ref and args quick frame, return the caller's PC. 361 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 362 DCHECK((*sp)->IsCalleeSaveMethod()); 363 uint8_t* return_adress_spill = 364 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset; 365 return *reinterpret_cast<uintptr_t*>(return_adress_spill); 366 } 367 368 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 369 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 370 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 371 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 372 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 373 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 374 + sizeof(ArtMethod*)), // Skip ArtMethod*. 375 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 376 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 377 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 378 "Number of Quick FPR arguments unexpected"); 379 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 380 "Double alignment unexpected"); 381 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 382 // next register is even. 383 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 384 "Number of Quick FPR arguments not even"); 385 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 386 } 387 388 virtual ~QuickArgumentVisitor() {} 389 390 virtual void Visit() = 0; 391 392 Primitive::Type GetParamPrimitiveType() const { 393 return cur_type_; 394 } 395 396 uint8_t* GetParamAddress() const { 397 if (!kQuickSoftFloatAbi) { 398 Primitive::Type type = GetParamPrimitiveType(); 399 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 400 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 401 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 402 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 403 } 404 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 405 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 406 } 407 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 408 } 409 } 410 if (gpr_index_ < kNumQuickGprArgs) { 411 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 412 } 413 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 414 } 415 416 bool IsSplitLongOrDouble() const { 417 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 418 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 419 return is_split_long_or_double_; 420 } else { 421 return false; // An optimization for when GPR and FPRs are 64bit. 422 } 423 } 424 425 bool IsParamAReference() const { 426 return GetParamPrimitiveType() == Primitive::kPrimNot; 427 } 428 429 bool IsParamALongOrDouble() const { 430 Primitive::Type type = GetParamPrimitiveType(); 431 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 432 } 433 434 uint64_t ReadSplitLongParam() const { 435 // The splitted long is always available through the stack. 436 return *reinterpret_cast<uint64_t*>(stack_args_ 437 + stack_index_ * kBytesStackArgLocation); 438 } 439 440 void IncGprIndex() { 441 gpr_index_++; 442 if (kGprFprLockstep) { 443 fpr_index_++; 444 } 445 } 446 447 void IncFprIndex() { 448 fpr_index_++; 449 if (kGprFprLockstep) { 450 gpr_index_++; 451 } 452 } 453 454 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 455 // (a) 'stack_args_' should point to the first method's argument 456 // (b) whatever the argument type it is, the 'stack_index_' should 457 // be moved forward along with every visiting. 458 gpr_index_ = 0; 459 fpr_index_ = 0; 460 if (kQuickDoubleRegAlignedFloatBackFilled) { 461 fpr_double_index_ = 0; 462 } 463 stack_index_ = 0; 464 if (!is_static_) { // Handle this. 465 cur_type_ = Primitive::kPrimNot; 466 is_split_long_or_double_ = false; 467 Visit(); 468 stack_index_++; 469 if (kNumQuickGprArgs > 0) { 470 IncGprIndex(); 471 } 472 } 473 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 474 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 475 switch (cur_type_) { 476 case Primitive::kPrimNot: 477 case Primitive::kPrimBoolean: 478 case Primitive::kPrimByte: 479 case Primitive::kPrimChar: 480 case Primitive::kPrimShort: 481 case Primitive::kPrimInt: 482 is_split_long_or_double_ = false; 483 Visit(); 484 stack_index_++; 485 if (gpr_index_ < kNumQuickGprArgs) { 486 IncGprIndex(); 487 } 488 break; 489 case Primitive::kPrimFloat: 490 is_split_long_or_double_ = false; 491 Visit(); 492 stack_index_++; 493 if (kQuickSoftFloatAbi) { 494 if (gpr_index_ < kNumQuickGprArgs) { 495 IncGprIndex(); 496 } 497 } else { 498 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 499 IncFprIndex(); 500 if (kQuickDoubleRegAlignedFloatBackFilled) { 501 // Double should not overlap with float. 502 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 503 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 504 // Float should not overlap with double. 505 if (fpr_index_ % 2 == 0) { 506 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 507 } 508 } else if (kQuickSkipOddFpRegisters) { 509 IncFprIndex(); 510 } 511 } 512 } 513 break; 514 case Primitive::kPrimDouble: 515 case Primitive::kPrimLong: 516 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 517 if (cur_type_ == Primitive::kPrimLong && 518 #if defined(__mips__) && !defined(__LP64__) 519 (gpr_index_ == 0 || gpr_index_ == 2) && 520 #else 521 gpr_index_ == 0 && 522 #endif 523 kAlignPairRegister) { 524 // Currently, this is only for ARM and MIPS, where we align long parameters with 525 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 526 // R2 (on ARM) or A2(T0) (on MIPS) instead. 527 IncGprIndex(); 528 } 529 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 530 ((gpr_index_ + 1) == kNumQuickGprArgs); 531 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 532 // We don't want to split this. Pass over this register. 533 gpr_index_++; 534 is_split_long_or_double_ = false; 535 } 536 Visit(); 537 if (kBytesStackArgLocation == 4) { 538 stack_index_+= 2; 539 } else { 540 CHECK_EQ(kBytesStackArgLocation, 8U); 541 stack_index_++; 542 } 543 if (gpr_index_ < kNumQuickGprArgs) { 544 IncGprIndex(); 545 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 546 if (gpr_index_ < kNumQuickGprArgs) { 547 IncGprIndex(); 548 } 549 } 550 } 551 } else { 552 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 553 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 554 Visit(); 555 if (kBytesStackArgLocation == 4) { 556 stack_index_+= 2; 557 } else { 558 CHECK_EQ(kBytesStackArgLocation, 8U); 559 stack_index_++; 560 } 561 if (kQuickDoubleRegAlignedFloatBackFilled) { 562 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 563 fpr_double_index_ += 2; 564 // Float should not overlap with double. 565 if (fpr_index_ % 2 == 0) { 566 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 567 } 568 } 569 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 570 IncFprIndex(); 571 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 572 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 573 IncFprIndex(); 574 } 575 } 576 } 577 } 578 break; 579 default: 580 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 581 } 582 } 583 } 584 585 protected: 586 const bool is_static_; 587 const char* const shorty_; 588 const uint32_t shorty_len_; 589 590 private: 591 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 592 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 593 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 594 uint32_t gpr_index_; // Index into spilled GPRs. 595 // Index into spilled FPRs. 596 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 597 // holds a higher register number. 598 uint32_t fpr_index_; 599 // Index into spilled FPRs for aligned double. 600 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 601 // terms of singles, may be behind fpr_index. 602 uint32_t fpr_double_index_; 603 uint32_t stack_index_; // Index into arguments on the stack. 604 // The current type of argument during VisitArguments. 605 Primitive::Type cur_type_; 606 // Does a 64bit parameter straddle the register and stack arguments? 607 bool is_split_long_or_double_; 608 }; 609 610 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 611 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 612 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 613 REQUIRES_SHARED(Locks::mutator_lock_) { 614 return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr(); 615 } 616 617 // Visits arguments on the stack placing them into the shadow frame. 618 class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor { 619 public: 620 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 621 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 622 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 623 624 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override; 625 626 private: 627 ShadowFrame* const sf_; 628 uint32_t cur_reg_; 629 630 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 631 }; 632 633 void BuildQuickShadowFrameVisitor::Visit() { 634 Primitive::Type type = GetParamPrimitiveType(); 635 switch (type) { 636 case Primitive::kPrimLong: // Fall-through. 637 case Primitive::kPrimDouble: 638 if (IsSplitLongOrDouble()) { 639 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 640 } else { 641 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 642 } 643 ++cur_reg_; 644 break; 645 case Primitive::kPrimNot: { 646 StackReference<mirror::Object>* stack_ref = 647 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 648 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 649 } 650 break; 651 case Primitive::kPrimBoolean: // Fall-through. 652 case Primitive::kPrimByte: // Fall-through. 653 case Primitive::kPrimChar: // Fall-through. 654 case Primitive::kPrimShort: // Fall-through. 655 case Primitive::kPrimInt: // Fall-through. 656 case Primitive::kPrimFloat: 657 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 658 break; 659 case Primitive::kPrimVoid: 660 LOG(FATAL) << "UNREACHABLE"; 661 UNREACHABLE(); 662 } 663 ++cur_reg_; 664 } 665 666 // Don't inline. See b/65159206. 667 NO_INLINE 668 static void HandleDeoptimization(JValue* result, 669 ArtMethod* method, 670 ShadowFrame* deopt_frame, 671 ManagedStack* fragment) 672 REQUIRES_SHARED(Locks::mutator_lock_) { 673 // Coming from partial-fragment deopt. 674 Thread* self = Thread::Current(); 675 if (kIsDebugBuild) { 676 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 677 // of the call-stack) corresponds to the called method. 678 ShadowFrame* linked = deopt_frame; 679 while (linked->GetLink() != nullptr) { 680 linked = linked->GetLink(); 681 } 682 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 683 << ArtMethod::PrettyMethod(linked->GetMethod()); 684 } 685 686 if (VLOG_IS_ON(deopt)) { 687 // Print out the stack to verify that it was a partial-fragment deopt. 688 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 689 QuickExceptionHandler::DumpFramesWithType(self, true); 690 } 691 692 ObjPtr<mirror::Throwable> pending_exception; 693 bool from_code = false; 694 DeoptimizationMethodType method_type; 695 self->PopDeoptimizationContext(/* out */ result, 696 /* out */ &pending_exception, 697 /* out */ &from_code, 698 /* out */ &method_type); 699 700 // Push a transition back into managed code onto the linked list in thread. 701 self->PushManagedStackFragment(fragment); 702 703 // Ensure that the stack is still in order. 704 if (kIsDebugBuild) { 705 class DummyStackVisitor : public StackVisitor { 706 public: 707 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 708 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 709 710 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { 711 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 712 // logic. Just always say we want to continue. 713 return true; 714 } 715 }; 716 DummyStackVisitor dsv(self); 717 dsv.WalkStack(); 718 } 719 720 // Restore the exception that was pending before deoptimization then interpret the 721 // deoptimized frames. 722 if (pending_exception != nullptr) { 723 self->SetException(pending_exception); 724 } 725 interpreter::EnterInterpreterFromDeoptimize(self, 726 deopt_frame, 727 result, 728 from_code, 729 DeoptimizationMethodType::kDefault); 730 } 731 732 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 733 REQUIRES_SHARED(Locks::mutator_lock_) { 734 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 735 // frame. 736 ScopedQuickEntrypointChecks sqec(self); 737 738 if (UNLIKELY(!method->IsInvokable())) { 739 method->ThrowInvocationTimeError(); 740 return 0; 741 } 742 743 JValue tmp_value; 744 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 745 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 746 ManagedStack fragment; 747 748 DCHECK(!method->IsNative()) << method->PrettyMethod(); 749 uint32_t shorty_len = 0; 750 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 751 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod(); 752 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData()); 753 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 754 755 JValue result; 756 bool force_frame_pop = false; 757 758 if (UNLIKELY(deopt_frame != nullptr)) { 759 HandleDeoptimization(&result, method, deopt_frame, &fragment); 760 } else { 761 const char* old_cause = self->StartAssertNoThreadSuspension( 762 "Building interpreter shadow frame"); 763 uint16_t num_regs = accessor.RegistersSize(); 764 // No last shadow coming from quick. 765 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 766 CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0); 767 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 768 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize(); 769 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 770 shadow_frame, first_arg_reg); 771 shadow_frame_builder.VisitArguments(); 772 const bool needs_initialization = 773 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 774 // Push a transition back into managed code onto the linked list in thread. 775 self->PushManagedStackFragment(&fragment); 776 self->PushShadowFrame(shadow_frame); 777 self->EndAssertNoThreadSuspension(old_cause); 778 779 if (needs_initialization) { 780 // Ensure static method's class is initialized. 781 StackHandleScope<1> hs(self); 782 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 783 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 784 DCHECK(Thread::Current()->IsExceptionPending()) 785 << shadow_frame->GetMethod()->PrettyMethod(); 786 self->PopManagedStackFragment(fragment); 787 return 0; 788 } 789 } 790 791 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame); 792 force_frame_pop = shadow_frame->GetForcePopFrame(); 793 } 794 795 // Pop transition. 796 self->PopManagedStackFragment(fragment); 797 798 // Request a stack deoptimization if needed 799 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 800 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 801 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 802 // should be done and it knows the real return pc. NB If the upcall is null we don't need to do 803 // anything. This can happen during shutdown or early startup. 804 if (UNLIKELY( 805 caller != nullptr && 806 caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 807 (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) { 808 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 809 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 810 << caller->PrettyMethod(); 811 } else { 812 VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod() 813 << " to " << caller->PrettyMethod() 814 << (force_frame_pop ? " for frame-pop" : ""); 815 DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result."; 816 if (force_frame_pop && self->GetException() != nullptr) { 817 LOG(WARNING) << "Suppressing exception for instruction-retry: " 818 << self->GetException()->Dump(); 819 } 820 // Push the context of the deoptimization stack so we can restore the return value and the 821 // exception before executing the deoptimized frames. 822 self->PushDeoptimizationContext( 823 result, 824 shorty[0] == 'L' || shorty[0] == '[', /* class or array */ 825 force_frame_pop ? nullptr : self->GetException(), 826 /* from_code= */ false, 827 DeoptimizationMethodType::kDefault); 828 829 // Set special exception to cause deoptimization. 830 self->SetException(Thread::GetDeoptimizationException()); 831 } 832 } 833 834 // No need to restore the args since the method has already been run by the interpreter. 835 return result.GetJ(); 836 } 837 838 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted 839 // to jobjects. 840 class BuildQuickArgumentVisitor final : public QuickArgumentVisitor { 841 public: 842 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 843 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 844 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 845 846 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override; 847 848 private: 849 ScopedObjectAccessUnchecked* const soa_; 850 std::vector<jvalue>* const args_; 851 852 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 853 }; 854 855 void BuildQuickArgumentVisitor::Visit() { 856 jvalue val; 857 Primitive::Type type = GetParamPrimitiveType(); 858 switch (type) { 859 case Primitive::kPrimNot: { 860 StackReference<mirror::Object>* stack_ref = 861 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 862 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 863 break; 864 } 865 case Primitive::kPrimLong: // Fall-through. 866 case Primitive::kPrimDouble: 867 if (IsSplitLongOrDouble()) { 868 val.j = ReadSplitLongParam(); 869 } else { 870 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 871 } 872 break; 873 case Primitive::kPrimBoolean: // Fall-through. 874 case Primitive::kPrimByte: // Fall-through. 875 case Primitive::kPrimChar: // Fall-through. 876 case Primitive::kPrimShort: // Fall-through. 877 case Primitive::kPrimInt: // Fall-through. 878 case Primitive::kPrimFloat: 879 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 880 break; 881 case Primitive::kPrimVoid: 882 LOG(FATAL) << "UNREACHABLE"; 883 UNREACHABLE(); 884 } 885 args_->push_back(val); 886 } 887 888 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 889 // which is responsible for recording callee save registers. We explicitly place into jobjects the 890 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 891 // field within the proxy object, which will box the primitive arguments and deal with error cases. 892 extern "C" uint64_t artQuickProxyInvokeHandler( 893 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 894 REQUIRES_SHARED(Locks::mutator_lock_) { 895 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 896 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 897 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 898 const char* old_cause = 899 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 900 // Register the top of the managed stack, making stack crawlable. 901 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 902 self->VerifyStack(); 903 // Start new JNI local reference state. 904 JNIEnvExt* env = self->GetJniEnv(); 905 ScopedObjectAccessUnchecked soa(env); 906 ScopedJniEnvLocalRefState env_state(env); 907 // Create local ref. copies of proxy method and the receiver. 908 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 909 910 // Placing arguments into args vector and remove the receiver. 911 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 912 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 913 << non_proxy_method->PrettyMethod(); 914 std::vector<jvalue> args; 915 uint32_t shorty_len = 0; 916 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 917 BuildQuickArgumentVisitor local_ref_visitor( 918 sp, /* is_static= */ false, shorty, shorty_len, &soa, &args); 919 920 local_ref_visitor.VisitArguments(); 921 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 922 args.erase(args.begin()); 923 924 // Convert proxy method into expected interface method. 925 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 926 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 927 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 928 self->EndAssertNoThreadSuspension(old_cause); 929 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 930 DCHECK(!Runtime::Current()->IsActiveTransaction()); 931 ObjPtr<mirror::Method> interface_reflect_method = 932 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), interface_method); 933 if (interface_reflect_method == nullptr) { 934 soa.Self()->AssertPendingOOMException(); 935 return 0; 936 } 937 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method); 938 939 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 940 // that performs allocations or instrumentation events. 941 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation(); 942 if (instr->HasMethodEntryListeners()) { 943 instr->MethodEnterEvent(soa.Self(), 944 soa.Decode<mirror::Object>(rcvr_jobj).Ptr(), 945 proxy_method, 946 0); 947 if (soa.Self()->IsExceptionPending()) { 948 instr->MethodUnwindEvent(self, 949 soa.Decode<mirror::Object>(rcvr_jobj).Ptr(), 950 proxy_method, 951 0); 952 return 0; 953 } 954 } 955 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 956 if (soa.Self()->IsExceptionPending()) { 957 if (instr->HasMethodUnwindListeners()) { 958 instr->MethodUnwindEvent(self, 959 soa.Decode<mirror::Object>(rcvr_jobj).Ptr(), 960 proxy_method, 961 0); 962 } 963 } else if (instr->HasMethodExitListeners()) { 964 instr->MethodExitEvent(self, 965 soa.Decode<mirror::Object>(rcvr_jobj).Ptr(), 966 proxy_method, 967 0, 968 result); 969 } 970 return result.GetJ(); 971 } 972 973 // Visitor returning a reference argument at a given position in a Quick stack frame. 974 // NOTE: Only used for testing purposes. 975 class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor { 976 public: 977 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp, 978 const char* shorty, 979 uint32_t shorty_len, 980 size_t arg_pos) 981 : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len), 982 cur_pos_(0u), 983 arg_pos_(arg_pos), 984 ref_arg_(nullptr) { 985 CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments"; 986 } 987 988 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override { 989 if (cur_pos_ == arg_pos_) { 990 Primitive::Type type = GetParamPrimitiveType(); 991 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference"; 992 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 993 } 994 ++cur_pos_; 995 } 996 997 StackReference<mirror::Object>* GetReferenceArgument() { 998 return ref_arg_; 999 } 1000 1001 private: 1002 // The position of the currently visited argument. 1003 size_t cur_pos_; 1004 // The position of the searched argument. 1005 const size_t arg_pos_; 1006 // The reference argument, if found. 1007 StackReference<mirror::Object>* ref_arg_; 1008 1009 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor); 1010 }; 1011 1012 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`. 1013 // NOTE: Only used for testing purposes. 1014 extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(size_t arg_pos, 1015 ArtMethod** sp) 1016 REQUIRES_SHARED(Locks::mutator_lock_) { 1017 ArtMethod* proxy_method = *sp; 1018 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1019 CHECK(!non_proxy_method->IsStatic()) 1020 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod(); 1021 uint32_t shorty_len = 0; 1022 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 1023 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, shorty_len, arg_pos); 1024 ref_arg_visitor.VisitArguments(); 1025 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument(); 1026 return ref_arg; 1027 } 1028 1029 // Visitor returning all the reference arguments in a Quick stack frame. 1030 class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor { 1031 public: 1032 GetQuickReferenceArgumentsVisitor(ArtMethod** sp, 1033 bool is_static, 1034 const char* shorty, 1035 uint32_t shorty_len) 1036 : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {} 1037 1038 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override { 1039 Primitive::Type type = GetParamPrimitiveType(); 1040 if (type == Primitive::kPrimNot) { 1041 StackReference<mirror::Object>* ref_arg = 1042 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1043 ref_args_.push_back(ref_arg); 1044 } 1045 } 1046 1047 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() { 1048 return ref_args_; 1049 } 1050 1051 private: 1052 // The reference arguments. 1053 std::vector<StackReference<mirror::Object>*> ref_args_; 1054 1055 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor); 1056 }; 1057 1058 // Returning all reference arguments in Quick stack frame at address `sp`. 1059 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp) 1060 REQUIRES_SHARED(Locks::mutator_lock_) { 1061 ArtMethod* proxy_method = *sp; 1062 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1063 CHECK(!non_proxy_method->IsStatic()) 1064 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod(); 1065 uint32_t shorty_len = 0; 1066 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 1067 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len); 1068 ref_args_visitor.VisitArguments(); 1069 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments(); 1070 return ref_args; 1071 } 1072 1073 // Read object references held in arguments from quick frames and place in a JNI local references, 1074 // so they don't get garbage collected. 1075 class RememberForGcArgumentVisitor final : public QuickArgumentVisitor { 1076 public: 1077 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 1078 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 1079 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 1080 1081 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override; 1082 1083 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 1084 1085 private: 1086 ScopedObjectAccessUnchecked* const soa_; 1087 // References which we must update when exiting in case the GC moved the objects. 1088 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 1089 1090 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 1091 }; 1092 1093 void RememberForGcArgumentVisitor::Visit() { 1094 if (IsParamAReference()) { 1095 StackReference<mirror::Object>* stack_ref = 1096 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1097 jobject reference = 1098 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 1099 references_.push_back(std::make_pair(reference, stack_ref)); 1100 } 1101 } 1102 1103 void RememberForGcArgumentVisitor::FixupReferences() { 1104 // Fixup any references which may have changed. 1105 for (const auto& pair : references_) { 1106 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 1107 soa_->Env()->DeleteLocalRef(pair.first); 1108 } 1109 } 1110 1111 extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, 1112 mirror::Object* this_object, 1113 Thread* self, 1114 ArtMethod** sp) 1115 REQUIRES_SHARED(Locks::mutator_lock_) { 1116 const void* result; 1117 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip 1118 // that part. 1119 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1120 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1121 DCHECK(!method->IsProxyMethod()) 1122 << "Proxy method " << method->PrettyMethod() 1123 << " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")" 1124 << " should not hit instrumentation entrypoint."; 1125 if (instrumentation->IsDeoptimized(method)) { 1126 result = GetQuickToInterpreterBridge(); 1127 } else { 1128 // This will get the entry point either from the oat file, the JIT or the appropriate bridge 1129 // method if none of those can be found. 1130 result = instrumentation->GetCodeForInvoke(method); 1131 jit::Jit* jit = Runtime::Current()->GetJit(); 1132 DCHECK_NE(result, GetQuickInstrumentationEntryPoint()) << method->PrettyMethod(); 1133 DCHECK(jit == nullptr || 1134 // Native methods come through here in Interpreter entrypoints. We might not have 1135 // disabled jit-gc but that is fine since we won't return jit-code for native methods. 1136 method->IsNative() || 1137 !jit->GetCodeCache()->GetGarbageCollectCode()); 1138 DCHECK(!method->IsNative() || 1139 jit == nullptr || 1140 !jit->GetCodeCache()->ContainsPc(result)) 1141 << method->PrettyMethod() << " code will jump to possibly cleaned up jit code!"; 1142 } 1143 1144 bool interpreter_entry = (result == GetQuickToInterpreterBridge()); 1145 bool is_static = method->IsStatic(); 1146 uint32_t shorty_len; 1147 const char* shorty = 1148 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len); 1149 1150 ScopedObjectAccessUnchecked soa(self); 1151 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa); 1152 visitor.VisitArguments(); 1153 1154 instrumentation->PushInstrumentationStackFrame(self, 1155 is_static ? nullptr : this_object, 1156 method, 1157 QuickArgumentVisitor::GetCallingPc(sp), 1158 interpreter_entry); 1159 1160 visitor.FixupReferences(); 1161 if (UNLIKELY(self->IsExceptionPending())) { 1162 return nullptr; 1163 } 1164 CHECK(result != nullptr) << method->PrettyMethod(); 1165 return result; 1166 } 1167 1168 extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, 1169 ArtMethod** sp, 1170 uint64_t* gpr_result, 1171 uint64_t* fpr_result) 1172 REQUIRES_SHARED(Locks::mutator_lock_) { 1173 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); 1174 CHECK(gpr_result != nullptr); 1175 CHECK(fpr_result != nullptr); 1176 // Instrumentation exit stub must not be entered with a pending exception. 1177 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception " 1178 << self->GetException()->Dump(); 1179 // Compute address of return PC and sanity check that it currently holds 0. 1180 constexpr size_t return_pc_offset = 1181 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything); 1182 uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + 1183 return_pc_offset); 1184 CHECK_EQ(*return_pc, 0U); 1185 1186 // Pop the frame filling in the return pc. The low half of the return value is 0 when 1187 // deoptimization shouldn't be performed with the high-half having the return address. When 1188 // deoptimization should be performed the low half is zero and the high-half the address of the 1189 // deoptimization entry point. 1190 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1191 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame( 1192 self, return_pc, gpr_result, fpr_result); 1193 if (self->IsExceptionPending() || self->ObserveAsyncException()) { 1194 return GetTwoWordFailureValue(); 1195 } 1196 return return_or_deoptimize_pc; 1197 } 1198 1199 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc) 1200 REQUIRES_SHARED(Locks::mutator_lock_) { 1201 if (dex_pc == static_cast<uint32_t>(-1)) { 1202 CHECK(method == jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt)); 1203 return "<native>"; 1204 } else { 1205 CodeItemInstructionAccessor accessor = method->DexInstructions(); 1206 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits()); 1207 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile()); 1208 } 1209 } 1210 1211 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass) 1212 REQUIRES_SHARED(Locks::mutator_lock_) { 1213 std::string storage; 1214 const char* descriptor = klass->GetDescriptor(&storage); 1215 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor); 1216 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile(); 1217 if (oat_dex_file != nullptr) { 1218 const OatFile* oat_file = oat_dex_file->GetOatFile(); 1219 const char* dex2oat_cmdline = 1220 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey); 1221 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation() 1222 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>"); 1223 } 1224 } 1225 1226 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 1227 // Mimick the search for the caller and dump some data while doing so. 1228 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240."; 1229 1230 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs; 1231 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type)); 1232 1233 constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type); 1234 auto** caller_sp = reinterpret_cast<ArtMethod**>( 1235 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 1236 constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type); 1237 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>( 1238 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset)); 1239 ArtMethod* outer_method = *caller_sp; 1240 1241 if (UNLIKELY(caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) { 1242 LOG(FATAL_WITHOUT_ABORT) << "Method: " << outer_method->PrettyMethod() 1243 << " native pc: " << caller_pc << " Instrumented!"; 1244 return; 1245 } 1246 1247 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc); 1248 CHECK(current_code != nullptr); 1249 CHECK(current_code->IsOptimized()); 1250 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc); 1251 CodeInfo code_info(current_code); 1252 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset); 1253 CHECK(stack_map.IsValid()); 1254 uint32_t dex_pc = stack_map.GetDexPc(); 1255 1256 // Log the outer method and its associated dex file and class table pointer which can be used 1257 // to find out if the inlined methods were defined by other dex file(s) or class loader(s). 1258 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1259 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod() 1260 << " native pc: " << caller_pc 1261 << " dex pc: " << dex_pc 1262 << " dex file: " << outer_method->GetDexFile()->GetLocation() 1263 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader()); 1264 DumpB74410240ClassData(outer_method->GetDeclaringClass()); 1265 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc); 1266 1267 ArtMethod* caller = outer_method; 1268 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map); 1269 for (InlineInfo inline_info : inline_infos) { 1270 const char* tag = ""; 1271 dex_pc = inline_info.GetDexPc(); 1272 if (inline_info.EncodesArtMethod()) { 1273 tag = "encoded "; 1274 caller = inline_info.GetArtMethod(); 1275 } else { 1276 uint32_t method_index = code_info.GetMethodIndexOf(inline_info); 1277 if (dex_pc == static_cast<uint32_t>(-1)) { 1278 tag = "special "; 1279 CHECK(inline_info.Equals(inline_infos.back())); 1280 caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt); 1281 CHECK_EQ(caller->GetDexMethodIndex(), method_index); 1282 } else { 1283 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache(); 1284 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader(); 1285 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader); 1286 CHECK(caller != nullptr); 1287 } 1288 } 1289 LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row() 1290 << ": " << tag << caller->PrettyMethod() 1291 << " dex pc: " << dex_pc 1292 << " dex file: " << caller->GetDexFile()->GetLocation() 1293 << " class table: " 1294 << class_linker->ClassTableForClassLoader(caller->GetClassLoader()); 1295 DumpB74410240ClassData(caller->GetDeclaringClass()); 1296 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc); 1297 } 1298 } 1299 1300 // Lazily resolve a method for quick. Called by stub code. 1301 extern "C" const void* artQuickResolutionTrampoline( 1302 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 1303 REQUIRES_SHARED(Locks::mutator_lock_) { 1304 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 1305 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 1306 // does not have the same stack layout as the callee-save method). 1307 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1308 // Start new JNI local reference state 1309 JNIEnvExt* env = self->GetJniEnv(); 1310 ScopedObjectAccessUnchecked soa(env); 1311 ScopedJniEnvLocalRefState env_state(env); 1312 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 1313 1314 // Compute details about the called method (avoid GCs) 1315 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 1316 InvokeType invoke_type; 1317 MethodReference called_method(nullptr, 0); 1318 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 1319 ArtMethod* caller = nullptr; 1320 if (!called_method_known_on_entry) { 1321 caller = QuickArgumentVisitor::GetCallingMethod(sp); 1322 called_method.dex_file = caller->GetDexFile(); 1323 1324 { 1325 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 1326 CodeItemInstructionAccessor accessor(caller->DexInstructions()); 1327 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits()); 1328 const Instruction& instr = accessor.InstructionAt(dex_pc); 1329 Instruction::Code instr_code = instr.Opcode(); 1330 bool is_range; 1331 switch (instr_code) { 1332 case Instruction::INVOKE_DIRECT: 1333 invoke_type = kDirect; 1334 is_range = false; 1335 break; 1336 case Instruction::INVOKE_DIRECT_RANGE: 1337 invoke_type = kDirect; 1338 is_range = true; 1339 break; 1340 case Instruction::INVOKE_STATIC: 1341 invoke_type = kStatic; 1342 is_range = false; 1343 break; 1344 case Instruction::INVOKE_STATIC_RANGE: 1345 invoke_type = kStatic; 1346 is_range = true; 1347 break; 1348 case Instruction::INVOKE_SUPER: 1349 invoke_type = kSuper; 1350 is_range = false; 1351 break; 1352 case Instruction::INVOKE_SUPER_RANGE: 1353 invoke_type = kSuper; 1354 is_range = true; 1355 break; 1356 case Instruction::INVOKE_VIRTUAL: 1357 invoke_type = kVirtual; 1358 is_range = false; 1359 break; 1360 case Instruction::INVOKE_VIRTUAL_RANGE: 1361 invoke_type = kVirtual; 1362 is_range = true; 1363 break; 1364 case Instruction::INVOKE_INTERFACE: 1365 invoke_type = kInterface; 1366 is_range = false; 1367 break; 1368 case Instruction::INVOKE_INTERFACE_RANGE: 1369 invoke_type = kInterface; 1370 is_range = true; 1371 break; 1372 default: 1373 DumpB74410240DebugData(sp); 1374 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr); 1375 UNREACHABLE(); 1376 } 1377 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c(); 1378 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " " 1379 << called_method.index; 1380 } 1381 } else { 1382 invoke_type = kStatic; 1383 called_method.dex_file = called->GetDexFile(); 1384 called_method.index = called->GetDexMethodIndex(); 1385 } 1386 uint32_t shorty_len; 1387 const char* shorty = 1388 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len); 1389 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1390 visitor.VisitArguments(); 1391 self->EndAssertNoThreadSuspension(old_cause); 1392 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1393 // Resolve method filling in dex cache. 1394 if (!called_method_known_on_entry) { 1395 StackHandleScope<1> hs(self); 1396 mirror::Object* dummy = nullptr; 1397 HandleWrapper<mirror::Object> h_receiver( 1398 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1399 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1400 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 1401 self, called_method.index, caller, invoke_type); 1402 1403 // Update .bss entry in oat file if any. 1404 if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) { 1405 size_t bss_offset = IndexBssMappingLookup::GetBssOffset( 1406 called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(), 1407 called_method.index, 1408 called_method.dex_file->NumMethodIds(), 1409 static_cast<size_t>(kRuntimePointerSize)); 1410 if (bss_offset != IndexBssMappingLookup::npos) { 1411 DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize)); 1412 const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile(); 1413 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>( 1414 oat_file->BssBegin() + bss_offset)); 1415 DCHECK_GE(method_entry, oat_file->GetBssMethods().data()); 1416 DCHECK_LT(method_entry, 1417 oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size()); 1418 *method_entry = called; 1419 } 1420 } 1421 } 1422 const void* code = nullptr; 1423 if (LIKELY(!self->IsExceptionPending())) { 1424 // Incompatible class change should have been handled in resolve method. 1425 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1426 << called->PrettyMethod() << " " << invoke_type; 1427 if (virtual_or_interface || invoke_type == kSuper) { 1428 // Refine called method based on receiver for kVirtual/kInterface, and 1429 // caller for kSuper. 1430 ArtMethod* orig_called = called; 1431 if (invoke_type == kVirtual) { 1432 CHECK(receiver != nullptr) << invoke_type; 1433 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1434 } else if (invoke_type == kInterface) { 1435 CHECK(receiver != nullptr) << invoke_type; 1436 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1437 } else { 1438 DCHECK_EQ(invoke_type, kSuper); 1439 CHECK(caller != nullptr) << invoke_type; 1440 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType( 1441 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller); 1442 if (ref_class->IsInterface()) { 1443 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1444 } else { 1445 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1446 called->GetMethodIndex(), kRuntimePointerSize); 1447 } 1448 } 1449 1450 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1451 << mirror::Object::PrettyTypeOf(receiver) << " " 1452 << invoke_type << " " << orig_called->GetVtableIndex(); 1453 } 1454 1455 // Ensure that the called method's class is initialized. 1456 StackHandleScope<1> hs(soa.Self()); 1457 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1458 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1459 bool force_interpreter = self->IsForceInterpreter() && !called->IsNative(); 1460 if (LIKELY(called_class->IsInitialized())) { 1461 if (UNLIKELY(force_interpreter || 1462 Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1463 // If we are single-stepping or the called method is deoptimized (by a 1464 // breakpoint, for example), then we have to execute the called method 1465 // with the interpreter. 1466 code = GetQuickToInterpreterBridge(); 1467 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1468 // If the caller is deoptimized (by a breakpoint, for example), we have to 1469 // continue its execution with interpreter when returning from the called 1470 // method. Because we do not want to execute the called method with the 1471 // interpreter, we wrap its execution into the instrumentation stubs. 1472 // When the called method returns, it will execute the instrumentation 1473 // exit hook that will determine the need of the interpreter with a call 1474 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1475 // it is needed. 1476 code = GetQuickInstrumentationEntryPoint(); 1477 } else { 1478 code = called->GetEntryPointFromQuickCompiledCode(); 1479 } 1480 } else if (called_class->IsInitializing()) { 1481 if (UNLIKELY(force_interpreter || 1482 Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1483 // If we are single-stepping or the called method is deoptimized (by a 1484 // breakpoint, for example), then we have to execute the called method 1485 // with the interpreter. 1486 code = GetQuickToInterpreterBridge(); 1487 } else if (invoke_type == kStatic) { 1488 // Class is still initializing, go to JIT or oat and grab code (trampoline must be 1489 // left in place until class is initialized to stop races between threads). 1490 if (Runtime::Current()->GetJit() != nullptr) { 1491 code = Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteSavedEntryPoint(called); 1492 } 1493 if (code == nullptr) { 1494 code = linker->GetQuickOatCodeFor(called); 1495 } 1496 } else { 1497 // No trampoline for non-static methods. 1498 code = called->GetEntryPointFromQuickCompiledCode(); 1499 } 1500 } else { 1501 DCHECK(called_class->IsErroneous()); 1502 } 1503 } 1504 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1505 // Fixup any locally saved objects may have moved during a GC. 1506 visitor.FixupReferences(); 1507 // Place called method in callee-save frame to be placed as first argument to quick method. 1508 *sp = called; 1509 1510 return code; 1511 } 1512 1513 /* 1514 * This class uses a couple of observations to unite the different calling conventions through 1515 * a few constants. 1516 * 1517 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1518 * possible alignment. 1519 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1520 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1521 * when we have to split things 1522 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1523 * and we can use Int handling directly. 1524 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1525 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1526 * extension should be compatible with Aarch64, which mandates copying the available bits 1527 * into LSB and leaving the rest unspecified. 1528 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1529 * the stack. 1530 * 6) There is only little endian. 1531 * 1532 * 1533 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1534 * follows: 1535 * 1536 * void PushGpr(uintptr_t): Add a value for the next GPR 1537 * 1538 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1539 * padding, that is, think the architecture is 32b and aligns 64b. 1540 * 1541 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1542 * split this if necessary. The current state will have aligned, if 1543 * necessary. 1544 * 1545 * void PushStack(uintptr_t): Push a value to the stack. 1546 * 1547 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1548 * as this might be important for null initialization. 1549 * Must return the jobject, that is, the reference to the 1550 * entry in the HandleScope (nullptr if necessary). 1551 * 1552 */ 1553 template<class T> class BuildNativeCallFrameStateMachine { 1554 public: 1555 #if defined(__arm__) 1556 // TODO: These are all dummy values! 1557 static constexpr bool kNativeSoftFloatAbi = true; 1558 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1559 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1560 1561 static constexpr size_t kRegistersNeededForLong = 2; 1562 static constexpr size_t kRegistersNeededForDouble = 2; 1563 static constexpr bool kMultiRegistersAligned = true; 1564 static constexpr bool kMultiFPRegistersWidened = false; 1565 static constexpr bool kMultiGPRegistersWidened = false; 1566 static constexpr bool kAlignLongOnStack = true; 1567 static constexpr bool kAlignDoubleOnStack = true; 1568 #elif defined(__aarch64__) 1569 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1570 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1571 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1572 1573 static constexpr size_t kRegistersNeededForLong = 1; 1574 static constexpr size_t kRegistersNeededForDouble = 1; 1575 static constexpr bool kMultiRegistersAligned = false; 1576 static constexpr bool kMultiFPRegistersWidened = false; 1577 static constexpr bool kMultiGPRegistersWidened = false; 1578 static constexpr bool kAlignLongOnStack = false; 1579 static constexpr bool kAlignDoubleOnStack = false; 1580 #elif defined(__mips__) && !defined(__LP64__) 1581 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1582 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1583 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1584 1585 static constexpr size_t kRegistersNeededForLong = 2; 1586 static constexpr size_t kRegistersNeededForDouble = 2; 1587 static constexpr bool kMultiRegistersAligned = true; 1588 static constexpr bool kMultiFPRegistersWidened = true; 1589 static constexpr bool kMultiGPRegistersWidened = false; 1590 static constexpr bool kAlignLongOnStack = true; 1591 static constexpr bool kAlignDoubleOnStack = true; 1592 #elif defined(__mips__) && defined(__LP64__) 1593 // Let the code prepare GPRs only and we will load the FPRs with same data. 1594 static constexpr bool kNativeSoftFloatAbi = true; 1595 static constexpr size_t kNumNativeGprArgs = 8; 1596 static constexpr size_t kNumNativeFprArgs = 0; 1597 1598 static constexpr size_t kRegistersNeededForLong = 1; 1599 static constexpr size_t kRegistersNeededForDouble = 1; 1600 static constexpr bool kMultiRegistersAligned = false; 1601 static constexpr bool kMultiFPRegistersWidened = false; 1602 static constexpr bool kMultiGPRegistersWidened = true; 1603 static constexpr bool kAlignLongOnStack = false; 1604 static constexpr bool kAlignDoubleOnStack = false; 1605 #elif defined(__i386__) 1606 // TODO: Check these! 1607 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1608 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1609 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1610 1611 static constexpr size_t kRegistersNeededForLong = 2; 1612 static constexpr size_t kRegistersNeededForDouble = 2; 1613 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1614 static constexpr bool kMultiFPRegistersWidened = false; 1615 static constexpr bool kMultiGPRegistersWidened = false; 1616 static constexpr bool kAlignLongOnStack = false; 1617 static constexpr bool kAlignDoubleOnStack = false; 1618 #elif defined(__x86_64__) 1619 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1620 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1621 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1622 1623 static constexpr size_t kRegistersNeededForLong = 1; 1624 static constexpr size_t kRegistersNeededForDouble = 1; 1625 static constexpr bool kMultiRegistersAligned = false; 1626 static constexpr bool kMultiFPRegistersWidened = false; 1627 static constexpr bool kMultiGPRegistersWidened = false; 1628 static constexpr bool kAlignLongOnStack = false; 1629 static constexpr bool kAlignDoubleOnStack = false; 1630 #else 1631 #error "Unsupported architecture" 1632 #endif 1633 1634 public: 1635 explicit BuildNativeCallFrameStateMachine(T* delegate) 1636 : gpr_index_(kNumNativeGprArgs), 1637 fpr_index_(kNumNativeFprArgs), 1638 stack_entries_(0), 1639 delegate_(delegate) { 1640 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1641 // the next register is even; counting down is just to make the compiler happy... 1642 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1643 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1644 } 1645 1646 virtual ~BuildNativeCallFrameStateMachine() {} 1647 1648 bool HavePointerGpr() const { 1649 return gpr_index_ > 0; 1650 } 1651 1652 void AdvancePointer(const void* val) { 1653 if (HavePointerGpr()) { 1654 gpr_index_--; 1655 PushGpr(reinterpret_cast<uintptr_t>(val)); 1656 } else { 1657 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1658 PushStack(reinterpret_cast<uintptr_t>(val)); 1659 gpr_index_ = 0; 1660 } 1661 } 1662 1663 bool HaveHandleScopeGpr() const { 1664 return gpr_index_ > 0; 1665 } 1666 1667 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1668 uintptr_t handle = PushHandle(ptr); 1669 if (HaveHandleScopeGpr()) { 1670 gpr_index_--; 1671 PushGpr(handle); 1672 } else { 1673 stack_entries_++; 1674 PushStack(handle); 1675 gpr_index_ = 0; 1676 } 1677 } 1678 1679 bool HaveIntGpr() const { 1680 return gpr_index_ > 0; 1681 } 1682 1683 void AdvanceInt(uint32_t val) { 1684 if (HaveIntGpr()) { 1685 gpr_index_--; 1686 if (kMultiGPRegistersWidened) { 1687 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1688 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1689 } else { 1690 PushGpr(val); 1691 } 1692 } else { 1693 stack_entries_++; 1694 if (kMultiGPRegistersWidened) { 1695 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1696 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1697 } else { 1698 PushStack(val); 1699 } 1700 gpr_index_ = 0; 1701 } 1702 } 1703 1704 bool HaveLongGpr() const { 1705 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1706 } 1707 1708 bool LongGprNeedsPadding() const { 1709 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1710 kAlignLongOnStack && // and when it needs alignment 1711 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1712 } 1713 1714 bool LongStackNeedsPadding() const { 1715 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1716 kAlignLongOnStack && // and when it needs 8B alignment 1717 (stack_entries_ & 1) == 1; // counter is odd 1718 } 1719 1720 void AdvanceLong(uint64_t val) { 1721 if (HaveLongGpr()) { 1722 if (LongGprNeedsPadding()) { 1723 PushGpr(0); 1724 gpr_index_--; 1725 } 1726 if (kRegistersNeededForLong == 1) { 1727 PushGpr(static_cast<uintptr_t>(val)); 1728 } else { 1729 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1730 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1731 } 1732 gpr_index_ -= kRegistersNeededForLong; 1733 } else { 1734 if (LongStackNeedsPadding()) { 1735 PushStack(0); 1736 stack_entries_++; 1737 } 1738 if (kRegistersNeededForLong == 1) { 1739 PushStack(static_cast<uintptr_t>(val)); 1740 stack_entries_++; 1741 } else { 1742 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1743 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1744 stack_entries_ += 2; 1745 } 1746 gpr_index_ = 0; 1747 } 1748 } 1749 1750 bool HaveFloatFpr() const { 1751 return fpr_index_ > 0; 1752 } 1753 1754 void AdvanceFloat(float val) { 1755 if (kNativeSoftFloatAbi) { 1756 AdvanceInt(bit_cast<uint32_t, float>(val)); 1757 } else { 1758 if (HaveFloatFpr()) { 1759 fpr_index_--; 1760 if (kRegistersNeededForDouble == 1) { 1761 if (kMultiFPRegistersWidened) { 1762 PushFpr8(bit_cast<uint64_t, double>(val)); 1763 } else { 1764 // No widening, just use the bits. 1765 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1766 } 1767 } else { 1768 PushFpr4(val); 1769 } 1770 } else { 1771 stack_entries_++; 1772 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1773 // Need to widen before storing: Note the "double" in the template instantiation. 1774 // Note: We need to jump through those hoops to make the compiler happy. 1775 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1776 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1777 } else { 1778 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1779 } 1780 fpr_index_ = 0; 1781 } 1782 } 1783 } 1784 1785 bool HaveDoubleFpr() const { 1786 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1787 } 1788 1789 bool DoubleFprNeedsPadding() const { 1790 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1791 kAlignDoubleOnStack && // and when it needs alignment 1792 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1793 } 1794 1795 bool DoubleStackNeedsPadding() const { 1796 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1797 kAlignDoubleOnStack && // and when it needs 8B alignment 1798 (stack_entries_ & 1) == 1; // counter is odd 1799 } 1800 1801 void AdvanceDouble(uint64_t val) { 1802 if (kNativeSoftFloatAbi) { 1803 AdvanceLong(val); 1804 } else { 1805 if (HaveDoubleFpr()) { 1806 if (DoubleFprNeedsPadding()) { 1807 PushFpr4(0); 1808 fpr_index_--; 1809 } 1810 PushFpr8(val); 1811 fpr_index_ -= kRegistersNeededForDouble; 1812 } else { 1813 if (DoubleStackNeedsPadding()) { 1814 PushStack(0); 1815 stack_entries_++; 1816 } 1817 if (kRegistersNeededForDouble == 1) { 1818 PushStack(static_cast<uintptr_t>(val)); 1819 stack_entries_++; 1820 } else { 1821 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1822 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1823 stack_entries_ += 2; 1824 } 1825 fpr_index_ = 0; 1826 } 1827 } 1828 } 1829 1830 uint32_t GetStackEntries() const { 1831 return stack_entries_; 1832 } 1833 1834 uint32_t GetNumberOfUsedGprs() const { 1835 return kNumNativeGprArgs - gpr_index_; 1836 } 1837 1838 uint32_t GetNumberOfUsedFprs() const { 1839 return kNumNativeFprArgs - fpr_index_; 1840 } 1841 1842 private: 1843 void PushGpr(uintptr_t val) { 1844 delegate_->PushGpr(val); 1845 } 1846 void PushFpr4(float val) { 1847 delegate_->PushFpr4(val); 1848 } 1849 void PushFpr8(uint64_t val) { 1850 delegate_->PushFpr8(val); 1851 } 1852 void PushStack(uintptr_t val) { 1853 delegate_->PushStack(val); 1854 } 1855 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1856 return delegate_->PushHandle(ref); 1857 } 1858 1859 uint32_t gpr_index_; // Number of free GPRs 1860 uint32_t fpr_index_; // Number of free FPRs 1861 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1862 // extended 1863 T* const delegate_; // What Push implementation gets called 1864 }; 1865 1866 // Computes the sizes of register stacks and call stack area. Handling of references can be extended 1867 // in subclasses. 1868 // 1869 // To handle native pointers, use "L" in the shorty for an object reference, which simulates 1870 // them with handles. 1871 class ComputeNativeCallFrameSize { 1872 public: 1873 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1874 1875 virtual ~ComputeNativeCallFrameSize() {} 1876 1877 uint32_t GetStackSize() const { 1878 return num_stack_entries_ * sizeof(uintptr_t); 1879 } 1880 1881 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1882 sp8 -= GetStackSize(); 1883 // Align by kStackAlignment. 1884 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1885 return sp8; 1886 } 1887 1888 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1889 const { 1890 // Assumption is OK right now, as we have soft-float arm 1891 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1892 sp8 -= fregs * sizeof(uintptr_t); 1893 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1894 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1895 sp8 -= iregs * sizeof(uintptr_t); 1896 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1897 return sp8; 1898 } 1899 1900 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1901 uint32_t** start_fpr) const { 1902 // Native call stack. 1903 sp8 = LayoutCallStack(sp8); 1904 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1905 1906 // Put fprs and gprs below. 1907 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1908 1909 // Return the new bottom. 1910 return sp8; 1911 } 1912 1913 virtual void WalkHeader( 1914 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1915 REQUIRES_SHARED(Locks::mutator_lock_) { 1916 } 1917 1918 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1919 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1920 1921 WalkHeader(&sm); 1922 1923 for (uint32_t i = 1; i < shorty_len; ++i) { 1924 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1925 switch (cur_type_) { 1926 case Primitive::kPrimNot: 1927 // TODO: fix abuse of mirror types. 1928 sm.AdvanceHandleScope( 1929 reinterpret_cast<mirror::Object*>(0x12345678)); 1930 break; 1931 1932 case Primitive::kPrimBoolean: 1933 case Primitive::kPrimByte: 1934 case Primitive::kPrimChar: 1935 case Primitive::kPrimShort: 1936 case Primitive::kPrimInt: 1937 sm.AdvanceInt(0); 1938 break; 1939 case Primitive::kPrimFloat: 1940 sm.AdvanceFloat(0); 1941 break; 1942 case Primitive::kPrimDouble: 1943 sm.AdvanceDouble(0); 1944 break; 1945 case Primitive::kPrimLong: 1946 sm.AdvanceLong(0); 1947 break; 1948 default: 1949 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1950 UNREACHABLE(); 1951 } 1952 } 1953 1954 num_stack_entries_ = sm.GetStackEntries(); 1955 } 1956 1957 void PushGpr(uintptr_t /* val */) { 1958 // not optimizing registers, yet 1959 } 1960 1961 void PushFpr4(float /* val */) { 1962 // not optimizing registers, yet 1963 } 1964 1965 void PushFpr8(uint64_t /* val */) { 1966 // not optimizing registers, yet 1967 } 1968 1969 void PushStack(uintptr_t /* val */) { 1970 // counting is already done in the superclass 1971 } 1972 1973 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1974 return reinterpret_cast<uintptr_t>(nullptr); 1975 } 1976 1977 protected: 1978 uint32_t num_stack_entries_; 1979 }; 1980 1981 class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize { 1982 public: 1983 explicit ComputeGenericJniFrameSize(bool critical_native) 1984 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1985 1986 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1987 // is at *m = sp. Will update to point to the bottom of the save frame. 1988 // 1989 // Note: assumes ComputeAll() has been run before. 1990 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1991 REQUIRES_SHARED(Locks::mutator_lock_) { 1992 ArtMethod* method = **m; 1993 1994 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1995 1996 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1997 1998 // First, fix up the layout of the callee-save frame. 1999 // We have to squeeze in the HandleScope, and relocate the method pointer. 2000 2001 // "Free" the slot for the method. 2002 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 2003 2004 // Under the callee saves put handle scope and new method stack reference. 2005 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 2006 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 2007 2008 sp8 -= scope_and_method; 2009 // Align by kStackAlignment. 2010 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 2011 2012 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 2013 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 2014 num_handle_scope_references_); 2015 2016 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 2017 uint8_t* method_pointer = sp8; 2018 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 2019 *new_method_ref = method; 2020 *m = new_method_ref; 2021 } 2022 2023 // Adds space for the cookie. Note: may leave stack unaligned. 2024 void LayoutCookie(uint8_t** sp) const { 2025 // Reference cookie and padding 2026 *sp -= 8; 2027 } 2028 2029 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 2030 // Returns the new bottom. Note: this may be unaligned. 2031 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 2032 REQUIRES_SHARED(Locks::mutator_lock_) { 2033 // First, fix up the layout of the callee-save frame. 2034 // We have to squeeze in the HandleScope, and relocate the method pointer. 2035 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 2036 2037 // The bottom of the callee-save frame is now where the method is, *m. 2038 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 2039 2040 // Add space for cookie. 2041 LayoutCookie(&sp8); 2042 2043 return sp8; 2044 } 2045 2046 // WARNING: After this, *sp won't be pointing to the method anymore! 2047 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 2048 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 2049 uint32_t** start_fpr) 2050 REQUIRES_SHARED(Locks::mutator_lock_) { 2051 Walk(shorty, shorty_len); 2052 2053 // JNI part. 2054 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 2055 2056 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 2057 2058 // Return the new bottom. 2059 return sp8; 2060 } 2061 2062 uintptr_t PushHandle(mirror::Object* /* ptr */) override; 2063 2064 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 2065 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override 2066 REQUIRES_SHARED(Locks::mutator_lock_); 2067 2068 private: 2069 uint32_t num_handle_scope_references_; 2070 const bool critical_native_; 2071 }; 2072 2073 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 2074 num_handle_scope_references_++; 2075 return reinterpret_cast<uintptr_t>(nullptr); 2076 } 2077 2078 void ComputeGenericJniFrameSize::WalkHeader( 2079 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 2080 // First 2 parameters are always excluded for @CriticalNative. 2081 if (UNLIKELY(critical_native_)) { 2082 return; 2083 } 2084 2085 // JNIEnv 2086 sm->AdvancePointer(nullptr); 2087 2088 // Class object or this as first argument 2089 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 2090 } 2091 2092 // Class to push values to three separate regions. Used to fill the native call part. Adheres to 2093 // the template requirements of BuildGenericJniFrameStateMachine. 2094 class FillNativeCall { 2095 public: 2096 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 2097 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 2098 2099 virtual ~FillNativeCall() {} 2100 2101 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 2102 cur_gpr_reg_ = gpr_regs; 2103 cur_fpr_reg_ = fpr_regs; 2104 cur_stack_arg_ = stack_args; 2105 } 2106 2107 void PushGpr(uintptr_t val) { 2108 *cur_gpr_reg_ = val; 2109 cur_gpr_reg_++; 2110 } 2111 2112 void PushFpr4(float val) { 2113 *cur_fpr_reg_ = val; 2114 cur_fpr_reg_++; 2115 } 2116 2117 void PushFpr8(uint64_t val) { 2118 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 2119 *tmp = val; 2120 cur_fpr_reg_ += 2; 2121 } 2122 2123 void PushStack(uintptr_t val) { 2124 *cur_stack_arg_ = val; 2125 cur_stack_arg_++; 2126 } 2127 2128 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 2129 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 2130 UNREACHABLE(); 2131 } 2132 2133 private: 2134 uintptr_t* cur_gpr_reg_; 2135 uint32_t* cur_fpr_reg_; 2136 uintptr_t* cur_stack_arg_; 2137 }; 2138 2139 // Visits arguments on the stack placing them into a region lower down the stack for the benefit 2140 // of transitioning into native code. 2141 class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor { 2142 public: 2143 BuildGenericJniFrameVisitor(Thread* self, 2144 bool is_static, 2145 bool critical_native, 2146 const char* shorty, 2147 uint32_t shorty_len, 2148 ArtMethod*** sp) 2149 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 2150 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 2151 sm_(&jni_call_) { 2152 ComputeGenericJniFrameSize fsc(critical_native); 2153 uintptr_t* start_gpr_reg; 2154 uint32_t* start_fpr_reg; 2155 uintptr_t* start_stack_arg; 2156 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 2157 &handle_scope_, 2158 &start_stack_arg, 2159 &start_gpr_reg, &start_fpr_reg); 2160 2161 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 2162 2163 // First 2 parameters are always excluded for CriticalNative methods. 2164 if (LIKELY(!critical_native)) { 2165 // jni environment is always first argument 2166 sm_.AdvancePointer(self->GetJniEnv()); 2167 2168 if (is_static) { 2169 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass().Ptr()); 2170 } // else "this" reference is already handled by QuickArgumentVisitor. 2171 } 2172 } 2173 2174 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override; 2175 2176 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 2177 2178 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 2179 return handle_scope_->GetHandle(0).GetReference(); 2180 } 2181 2182 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 2183 return handle_scope_->GetHandle(0).ToJObject(); 2184 } 2185 2186 void* GetBottomOfUsedArea() const { 2187 return bottom_of_used_area_; 2188 } 2189 2190 private: 2191 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 2192 class FillJniCall final : public FillNativeCall { 2193 public: 2194 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 2195 HandleScope* handle_scope, bool critical_native) 2196 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 2197 handle_scope_(handle_scope), 2198 cur_entry_(0), 2199 critical_native_(critical_native) {} 2200 2201 uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_); 2202 2203 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 2204 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 2205 handle_scope_ = scope; 2206 cur_entry_ = 0U; 2207 } 2208 2209 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 2210 // Initialize padding entries. 2211 size_t expected_slots = handle_scope_->NumberOfReferences(); 2212 while (cur_entry_ < expected_slots) { 2213 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 2214 } 2215 2216 if (!critical_native_) { 2217 // Non-critical natives have at least the self class (jclass) or this (jobject). 2218 DCHECK_NE(cur_entry_, 0U); 2219 } 2220 } 2221 2222 bool CriticalNative() const { 2223 return critical_native_; 2224 } 2225 2226 private: 2227 HandleScope* handle_scope_; 2228 size_t cur_entry_; 2229 const bool critical_native_; 2230 }; 2231 2232 HandleScope* handle_scope_; 2233 FillJniCall jni_call_; 2234 void* bottom_of_used_area_; 2235 2236 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 2237 2238 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 2239 }; 2240 2241 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 2242 uintptr_t tmp; 2243 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 2244 h.Assign(ref); 2245 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 2246 cur_entry_++; 2247 return tmp; 2248 } 2249 2250 void BuildGenericJniFrameVisitor::Visit() { 2251 Primitive::Type type = GetParamPrimitiveType(); 2252 switch (type) { 2253 case Primitive::kPrimLong: { 2254 jlong long_arg; 2255 if (IsSplitLongOrDouble()) { 2256 long_arg = ReadSplitLongParam(); 2257 } else { 2258 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 2259 } 2260 sm_.AdvanceLong(long_arg); 2261 break; 2262 } 2263 case Primitive::kPrimDouble: { 2264 uint64_t double_arg; 2265 if (IsSplitLongOrDouble()) { 2266 // Read into union so that we don't case to a double. 2267 double_arg = ReadSplitLongParam(); 2268 } else { 2269 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 2270 } 2271 sm_.AdvanceDouble(double_arg); 2272 break; 2273 } 2274 case Primitive::kPrimNot: { 2275 StackReference<mirror::Object>* stack_ref = 2276 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 2277 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 2278 break; 2279 } 2280 case Primitive::kPrimFloat: 2281 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 2282 break; 2283 case Primitive::kPrimBoolean: // Fall-through. 2284 case Primitive::kPrimByte: // Fall-through. 2285 case Primitive::kPrimChar: // Fall-through. 2286 case Primitive::kPrimShort: // Fall-through. 2287 case Primitive::kPrimInt: // Fall-through. 2288 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 2289 break; 2290 case Primitive::kPrimVoid: 2291 LOG(FATAL) << "UNREACHABLE"; 2292 UNREACHABLE(); 2293 } 2294 } 2295 2296 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 2297 // Clear out rest of the scope. 2298 jni_call_.ResetRemainingScopeSlots(); 2299 if (!jni_call_.CriticalNative()) { 2300 // Install HandleScope. 2301 self->PushHandleScope(handle_scope_); 2302 } 2303 } 2304 2305 #if defined(__arm__) || defined(__aarch64__) 2306 extern "C" const void* artFindNativeMethod(); 2307 #else 2308 extern "C" const void* artFindNativeMethod(Thread* self); 2309 #endif 2310 2311 static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 2312 uint32_t cookie, 2313 bool fast_native ATTRIBUTE_UNUSED, 2314 jobject l, 2315 jobject lock) { 2316 // TODO: add entrypoints for @FastNative returning objects. 2317 if (lock != nullptr) { 2318 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 2319 } else { 2320 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 2321 } 2322 } 2323 2324 static void artQuickGenericJniEndJNINonRef(Thread* self, 2325 uint32_t cookie, 2326 bool fast_native, 2327 jobject lock) { 2328 if (lock != nullptr) { 2329 JniMethodEndSynchronized(cookie, lock, self); 2330 // Ignore "fast_native" here because synchronized functions aren't very fast. 2331 } else { 2332 if (UNLIKELY(fast_native)) { 2333 JniMethodFastEnd(cookie, self); 2334 } else { 2335 JniMethodEnd(cookie, self); 2336 } 2337 } 2338 } 2339 2340 /* 2341 * Initializes an alloca region assumed to be directly below sp for a native call: 2342 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2343 * The final element on the stack is a pointer to the native code. 2344 * 2345 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2346 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2347 * 2348 * The return of this function denotes: 2349 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2350 * 2) An error, if the value is negative. 2351 */ 2352 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2353 REQUIRES_SHARED(Locks::mutator_lock_) { 2354 // Note: We cannot walk the stack properly until fixed up below. 2355 ArtMethod* called = *sp; 2356 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2357 Runtime* runtime = Runtime::Current(); 2358 uint32_t shorty_len = 0; 2359 const char* shorty = called->GetShorty(&shorty_len); 2360 bool critical_native = called->IsCriticalNative(); 2361 bool fast_native = called->IsFastNative(); 2362 bool normal_native = !critical_native && !fast_native; 2363 2364 // Run the visitor and update sp. 2365 BuildGenericJniFrameVisitor visitor(self, 2366 called->IsStatic(), 2367 critical_native, 2368 shorty, 2369 shorty_len, 2370 &sp); 2371 { 2372 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2373 visitor.VisitArguments(); 2374 // FinalizeHandleScope pushes the handle scope on the thread. 2375 visitor.FinalizeHandleScope(self); 2376 } 2377 2378 // Fix up managed-stack things in Thread. After this we can walk the stack. 2379 self->SetTopOfStackTagged(sp); 2380 2381 self->VerifyStack(); 2382 2383 // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use. 2384 jit::Jit* jit = runtime->GetJit(); 2385 if (jit != nullptr) { 2386 jit->MethodEntered(self, called); 2387 } 2388 2389 uint32_t cookie; 2390 uint32_t* sp32; 2391 // Skip calling JniMethodStart for @CriticalNative. 2392 if (LIKELY(!critical_native)) { 2393 // Start JNI, save the cookie. 2394 if (called->IsSynchronized()) { 2395 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2396 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2397 if (self->IsExceptionPending()) { 2398 self->PopHandleScope(); 2399 // A negative value denotes an error. 2400 return GetTwoWordFailureValue(); 2401 } 2402 } else { 2403 if (fast_native) { 2404 cookie = JniMethodFastStart(self); 2405 } else { 2406 DCHECK(normal_native); 2407 cookie = JniMethodStart(self); 2408 } 2409 } 2410 sp32 = reinterpret_cast<uint32_t*>(sp); 2411 *(sp32 - 1) = cookie; 2412 } 2413 2414 // Retrieve the stored native code. 2415 void const* nativeCode = called->GetEntryPointFromJni(); 2416 2417 // There are two cases for the content of nativeCode: 2418 // 1) Pointer to the native function. 2419 // 2) Pointer to the trampoline for native code binding. 2420 // In the second case, we need to execute the binding and continue with the actual native function 2421 // pointer. 2422 DCHECK(nativeCode != nullptr); 2423 if (nativeCode == GetJniDlsymLookupStub()) { 2424 #if defined(__arm__) || defined(__aarch64__) 2425 nativeCode = artFindNativeMethod(); 2426 #else 2427 nativeCode = artFindNativeMethod(self); 2428 #endif 2429 2430 if (nativeCode == nullptr) { 2431 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2432 2433 // @CriticalNative calls do not need to call back into JniMethodEnd. 2434 if (LIKELY(!critical_native)) { 2435 // End JNI, as the assembly will move to deliver the exception. 2436 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2437 if (shorty[0] == 'L') { 2438 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2439 } else { 2440 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2441 } 2442 } 2443 2444 return GetTwoWordFailureValue(); 2445 } 2446 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2447 } 2448 2449 #if defined(__mips__) && !defined(__LP64__) 2450 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2451 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2452 // and load into floating-point registers. 2453 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2454 // view): 2455 // (1) 2456 // | DOUBLE | DOUBLE | other args, if any 2457 // | F12 | F13 | F14 | F15 | 2458 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2459 // (2) 2460 // | DOUBLE | FLOAT | (PAD) | other args, if any 2461 // | F12 | F13 | F14 | | 2462 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2463 // (3) 2464 // | FLOAT | (PAD) | DOUBLE | other args, if any 2465 // | F12 | | F14 | F15 | 2466 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2467 // (4) 2468 // | FLOAT | FLOAT | other args, if any 2469 // | F12 | F14 | 2470 // | SP+0 | SP+4 | SP+8 2471 // As you can see, only the last case (4) is special. In all others we can just 2472 // load F12/F13 and F14/F15 in the same manner. 2473 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2474 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2475 if (nativeCode != nullptr && 2476 shorty != nullptr && 2477 shorty_len >= 3 && 2478 shorty[1] == 'F' && 2479 shorty[2] == 'F') { 2480 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2481 } 2482 #endif 2483 2484 VLOG(third_party_jni) << "GenericJNI: " 2485 << called->PrettyMethod() 2486 << " -> " 2487 << std::hex << reinterpret_cast<uintptr_t>(nativeCode); 2488 2489 // Return native code addr(lo) and bottom of alloca address(hi). 2490 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2491 reinterpret_cast<uintptr_t>(nativeCode)); 2492 } 2493 2494 // Defined in quick_jni_entrypoints.cc. 2495 extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2496 jvalue result, uint64_t result_f, ArtMethod* called, 2497 HandleScope* handle_scope); 2498 /* 2499 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2500 * unlocking. 2501 */ 2502 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2503 jvalue result, 2504 uint64_t result_f) { 2505 // We're here just back from a native call. We don't have the shared mutator lock at this point 2506 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2507 // anything that requires a mutator lock before that would cause problems as GC may have the 2508 // exclusive mutator lock and may be moving objects, etc. 2509 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2510 DCHECK(self->GetManagedStack()->GetTopQuickFrameTag()); 2511 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2512 ArtMethod* called = *sp; 2513 uint32_t cookie = *(sp32 - 1); 2514 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2515 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2516 } 2517 2518 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2519 // for the method pointer. 2520 // 2521 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2522 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2523 2524 template <InvokeType type, bool access_check> 2525 static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2526 ObjPtr<mirror::Object> this_object, 2527 Thread* self, 2528 ArtMethod** sp) { 2529 ScopedQuickEntrypointChecks sqec(self); 2530 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2531 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2532 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method); 2533 if (UNLIKELY(method == nullptr)) { 2534 const DexFile* dex_file = caller_method->GetDexFile(); 2535 uint32_t shorty_len; 2536 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2537 { 2538 // Remember the args in case a GC happens in FindMethodFromCode. 2539 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2540 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2541 visitor.VisitArguments(); 2542 method = FindMethodFromCode<type, access_check>(method_idx, 2543 &this_object, 2544 caller_method, 2545 self); 2546 visitor.FixupReferences(); 2547 } 2548 2549 if (UNLIKELY(method == nullptr)) { 2550 CHECK(self->IsExceptionPending()); 2551 return GetTwoWordFailureValue(); // Failure. 2552 } 2553 } 2554 DCHECK(!self->IsExceptionPending()); 2555 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2556 2557 // When we return, the caller will branch to this address, so it had better not be 0! 2558 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2559 << " location: " 2560 << method->GetDexFile()->GetLocation(); 2561 2562 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2563 reinterpret_cast<uintptr_t>(method)); 2564 } 2565 2566 // Explicit artInvokeCommon template function declarations to please analysis tool. 2567 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2568 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2569 TwoWordReturn artInvokeCommon<type, access_check>( \ 2570 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2571 2572 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2573 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2574 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2575 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2576 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2577 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2578 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2579 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2580 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2581 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2582 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2583 2584 // See comments in runtime_support_asm.S 2585 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2586 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2587 REQUIRES_SHARED(Locks::mutator_lock_) { 2588 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2589 } 2590 2591 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2592 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2593 REQUIRES_SHARED(Locks::mutator_lock_) { 2594 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2595 } 2596 2597 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2598 uint32_t method_idx, 2599 mirror::Object* this_object ATTRIBUTE_UNUSED, 2600 Thread* self, 2601 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2602 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2603 // it doesn't cause ObjPtr alignment failure check. 2604 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2605 } 2606 2607 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2608 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2609 REQUIRES_SHARED(Locks::mutator_lock_) { 2610 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2611 } 2612 2613 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2614 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2615 REQUIRES_SHARED(Locks::mutator_lock_) { 2616 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2617 } 2618 2619 // Helper function for art_quick_imt_conflict_trampoline to look up the interface method. 2620 extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer) 2621 REQUIRES_SHARED(Locks::mutator_lock_) { 2622 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 2623 DCHECK(!referrer->IsProxyMethod()); 2624 ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( 2625 method_index, referrer->GetDexCache(), referrer->GetClassLoader()); 2626 DCHECK(result == nullptr || 2627 result->GetDeclaringClass()->IsInterface() || 2628 result->GetDeclaringClass() == 2629 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object)) 2630 << result->PrettyMethod(); 2631 return result; 2632 } 2633 2634 // Determine target of interface dispatch. The interface method and this object are known non-null. 2635 // The interface method is the method returned by the dex cache in the conflict trampoline. 2636 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2637 mirror::Object* raw_this_object, 2638 Thread* self, 2639 ArtMethod** sp) 2640 REQUIRES_SHARED(Locks::mutator_lock_) { 2641 ScopedQuickEntrypointChecks sqec(self); 2642 StackHandleScope<2> hs(self); 2643 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object); 2644 Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass()); 2645 2646 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2647 ArtMethod* method = nullptr; 2648 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2649 2650 if (UNLIKELY(interface_method == nullptr)) { 2651 // The interface method is unresolved, so resolve it in the dex file of the caller. 2652 // Fetch the dex_method_idx of the target interface method from the caller. 2653 uint32_t dex_method_idx; 2654 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2655 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc); 2656 Instruction::Code instr_code = instr.Opcode(); 2657 DCHECK(instr_code == Instruction::INVOKE_INTERFACE || 2658 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2659 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr); 2660 if (instr_code == Instruction::INVOKE_INTERFACE) { 2661 dex_method_idx = instr.VRegB_35c(); 2662 } else { 2663 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2664 dex_method_idx = instr.VRegB_3rc(); 2665 } 2666 2667 const DexFile& dex_file = *caller_method->GetDexFile(); 2668 uint32_t shorty_len; 2669 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx), 2670 &shorty_len); 2671 { 2672 // Remember the args in case a GC happens in ClassLinker::ResolveMethod(). 2673 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2674 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2675 visitor.VisitArguments(); 2676 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2677 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( 2678 self, dex_method_idx, caller_method, kInterface); 2679 visitor.FixupReferences(); 2680 } 2681 2682 if (UNLIKELY(interface_method == nullptr)) { 2683 CHECK(self->IsExceptionPending()); 2684 return GetTwoWordFailureValue(); // Failure. 2685 } 2686 } 2687 2688 DCHECK(!interface_method->IsRuntimeMethod()); 2689 // Look whether we have a match in the ImtConflictTable. 2690 uint32_t imt_index = interface_method->GetImtIndex(); 2691 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2692 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2693 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2694 DCHECK(current_table != nullptr); 2695 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2696 } else { 2697 // It seems we aren't really a conflict method! 2698 if (kIsDebugBuild) { 2699 ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2700 CHECK_EQ(conflict_method, m) 2701 << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / " 2702 << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass(); 2703 } 2704 method = conflict_method; 2705 } 2706 if (method != nullptr) { 2707 return GetTwoWordSuccessValue( 2708 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2709 reinterpret_cast<uintptr_t>(method)); 2710 } 2711 2712 // No match, use the IfTable. 2713 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2714 if (UNLIKELY(method == nullptr)) { 2715 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2716 interface_method, this_object.Get(), caller_method); 2717 return GetTwoWordFailureValue(); // Failure. 2718 } 2719 2720 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2721 // We create a new table with the new pair { interface_method, method }. 2722 DCHECK(conflict_method->IsRuntimeMethod()); 2723 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2724 cls.Get(), 2725 conflict_method, 2726 interface_method, 2727 method, 2728 /*force_new_conflict_method=*/false); 2729 if (new_conflict_method != conflict_method) { 2730 // Update the IMT if we create a new conflict method. No fence needed here, as the 2731 // data is consistent. 2732 imt->Set(imt_index, 2733 new_conflict_method, 2734 kRuntimePointerSize); 2735 } 2736 2737 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2738 2739 // When we return, the caller will branch to this address, so it had better not be 0! 2740 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2741 << " location: " << method->GetDexFile()->GetLocation(); 2742 2743 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2744 reinterpret_cast<uintptr_t>(method)); 2745 } 2746 2747 // Returns uint64_t representing raw bits from JValue. 2748 extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp) 2749 REQUIRES_SHARED(Locks::mutator_lock_) { 2750 ScopedQuickEntrypointChecks sqec(self); 2751 DCHECK(raw_receiver != nullptr); 2752 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2753 2754 // Start new JNI local reference state 2755 JNIEnvExt* env = self->GetJniEnv(); 2756 ScopedObjectAccessUnchecked soa(env); 2757 ScopedJniEnvLocalRefState env_state(env); 2758 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2759 2760 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2761 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2762 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2763 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc); 2764 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC || 2765 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2766 const dex::ProtoIndex proto_idx(inst.VRegH()); 2767 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx); 2768 const size_t shorty_length = strlen(shorty); 2769 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2770 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2771 gc_visitor.VisitArguments(); 2772 2773 // Wrap raw_receiver in a Handle for safety. 2774 StackHandleScope<3> hs(self); 2775 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver)); 2776 raw_receiver = nullptr; 2777 self->EndAssertNoThreadSuspension(old_cause); 2778 2779 // Resolve method. 2780 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2781 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 2782 self, inst.VRegB(), caller_method, kVirtual); 2783 2784 Handle<mirror::MethodType> method_type( 2785 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method))); 2786 if (UNLIKELY(method_type.IsNull())) { 2787 // This implies we couldn't resolve one or more types in this method handle. 2788 CHECK(self->IsExceptionPending()); 2789 return 0UL; 2790 } 2791 2792 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA()); 2793 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2794 2795 // Fix references before constructing the shadow frame. 2796 gc_visitor.FixupReferences(); 2797 2798 // Construct shadow frame placing arguments consecutively from |first_arg|. 2799 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2800 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc(); 2801 const size_t first_arg = 0; 2802 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2803 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc); 2804 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2805 ScopedStackedShadowFramePusher 2806 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2807 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2808 kMethodIsStatic, 2809 shorty, 2810 strlen(shorty), 2811 shadow_frame, 2812 first_arg); 2813 shadow_frame_builder.VisitArguments(); 2814 2815 // Push a transition back into managed code onto the linked list in thread. 2816 ManagedStack fragment; 2817 self->PushManagedStackFragment(&fragment); 2818 2819 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2820 // consecutive order. 2821 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1); 2822 Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic()); 2823 JValue result; 2824 bool success = false; 2825 if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) { 2826 Handle<mirror::MethodHandle> method_handle(hs.NewHandle( 2827 ObjPtr<mirror::MethodHandle>::DownCast(receiver_handle.Get()))); 2828 if (intrinsic == Intrinsics::kMethodHandleInvokeExact) { 2829 success = MethodHandleInvokeExact(self, 2830 *shadow_frame, 2831 method_handle, 2832 method_type, 2833 &operands, 2834 &result); 2835 } else { 2836 DCHECK_EQ(static_cast<uint32_t>(intrinsic), 2837 static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke)); 2838 success = MethodHandleInvoke(self, 2839 *shadow_frame, 2840 method_handle, 2841 method_type, 2842 &operands, 2843 &result); 2844 } 2845 } else { 2846 DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass()); 2847 Handle<mirror::VarHandle> var_handle(hs.NewHandle( 2848 ObjPtr<mirror::VarHandle>::DownCast(receiver_handle.Get()))); 2849 mirror::VarHandle::AccessMode access_mode = 2850 mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic); 2851 success = VarHandleInvokeAccessor(self, 2852 *shadow_frame, 2853 var_handle, 2854 method_type, 2855 access_mode, 2856 &operands, 2857 &result); 2858 } 2859 2860 DCHECK(success || self->IsExceptionPending()); 2861 2862 // Pop transition record. 2863 self->PopManagedStackFragment(fragment); 2864 2865 return result.GetJ(); 2866 } 2867 2868 // Returns uint64_t representing raw bits from JValue. 2869 extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp) 2870 REQUIRES_SHARED(Locks::mutator_lock_) { 2871 ScopedQuickEntrypointChecks sqec(self); 2872 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2873 2874 // invoke-custom is effectively a static call (no receiver). 2875 static constexpr bool kMethodIsStatic = true; 2876 2877 // Start new JNI local reference state 2878 JNIEnvExt* env = self->GetJniEnv(); 2879 ScopedObjectAccessUnchecked soa(env); 2880 ScopedJniEnvLocalRefState env_state(env); 2881 2882 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2883 2884 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2885 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2886 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2887 const DexFile* dex_file = caller_method->GetDexFile(); 2888 const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx)); 2889 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx); 2890 const uint32_t shorty_len = strlen(shorty); 2891 2892 // Construct the shadow frame placing arguments consecutively from |first_arg|. 2893 const size_t first_arg = 0; 2894 const size_t num_vregs = ArtMethod::NumArgRegisters(shorty); 2895 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2896 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc); 2897 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2898 ScopedStackedShadowFramePusher 2899 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2900 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2901 kMethodIsStatic, 2902 shorty, 2903 shorty_len, 2904 shadow_frame, 2905 first_arg); 2906 shadow_frame_builder.VisitArguments(); 2907 2908 // Push a transition back into managed code onto the linked list in thread. 2909 ManagedStack fragment; 2910 self->PushManagedStackFragment(&fragment); 2911 self->EndAssertNoThreadSuspension(old_cause); 2912 2913 // Perform the invoke-custom operation. 2914 RangeInstructionOperands operands(first_arg, num_vregs); 2915 JValue result; 2916 bool success = 2917 interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result); 2918 DCHECK(success || self->IsExceptionPending()); 2919 2920 // Pop transition record. 2921 self->PopManagedStackFragment(fragment); 2922 2923 return result.GetJ(); 2924 } 2925 2926 } // namespace art 2927