1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "jit.h" 18 19 #include <dlfcn.h> 20 21 #include "art_method-inl.h" 22 #include "base/enums.h" 23 #include "base/logging.h" // For VLOG. 24 #include "base/memory_tool.h" 25 #include "base/runtime_debug.h" 26 #include "base/utils.h" 27 #include "debugger.h" 28 #include "entrypoints/runtime_asm_entrypoints.h" 29 #include "interpreter/interpreter.h" 30 #include "java_vm_ext.h" 31 #include "jit_code_cache.h" 32 #include "oat_file_manager.h" 33 #include "oat_quick_method_header.h" 34 #include "profile_compilation_info.h" 35 #include "profile_saver.h" 36 #include "runtime.h" 37 #include "runtime_options.h" 38 #include "stack.h" 39 #include "stack_map.h" 40 #include "thread-inl.h" 41 #include "thread_list.h" 42 43 namespace art { 44 namespace jit { 45 46 static constexpr bool kEnableOnStackReplacement = true; 47 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device. 48 static constexpr int kJitPoolThreadPthreadPriority = 9; 49 50 // Different compilation threshold constants. These can be overridden on the command line. 51 static constexpr size_t kJitDefaultCompileThreshold = 10000; // Non-debug default. 52 static constexpr size_t kJitStressDefaultCompileThreshold = 100; // Fast-debug build. 53 static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2; // Slow-debug build. 54 55 // JIT compiler 56 void* Jit::jit_library_handle_ = nullptr; 57 void* Jit::jit_compiler_handle_ = nullptr; 58 void* (*Jit::jit_load_)(bool*) = nullptr; 59 void (*Jit::jit_unload_)(void*) = nullptr; 60 bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr; 61 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr; 62 bool Jit::generate_debug_info_ = false; 63 64 struct StressModeHelper { 65 DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode); 66 }; 67 DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode); 68 69 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) { 70 auto* jit_options = new JitOptions; 71 jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation); 72 73 jit_options->code_cache_initial_capacity_ = 74 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity); 75 jit_options->code_cache_max_capacity_ = 76 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity); 77 jit_options->dump_info_on_shutdown_ = 78 options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown); 79 jit_options->profile_saver_options_ = 80 options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts); 81 82 if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) { 83 jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold); 84 } else { 85 jit_options->compile_threshold_ = 86 kIsDebugBuild 87 ? (StressModeHelper::kSlowMode 88 ? kJitSlowStressDefaultCompileThreshold 89 : kJitStressDefaultCompileThreshold) 90 : kJitDefaultCompileThreshold; 91 } 92 if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) { 93 LOG(FATAL) << "Method compilation threshold is above its internal limit."; 94 } 95 96 if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) { 97 jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold); 98 if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) { 99 LOG(FATAL) << "Method warmup threshold is above its internal limit."; 100 } 101 } else { 102 jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2; 103 } 104 105 if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) { 106 jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold); 107 if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { 108 LOG(FATAL) << "Method on stack replacement threshold is above its internal limit."; 109 } 110 } else { 111 jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2; 112 if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { 113 jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max(); 114 } 115 } 116 117 if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) { 118 jit_options->priority_thread_weight_ = 119 *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight); 120 if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) { 121 LOG(FATAL) << "Priority thread weight is above the warmup threshold."; 122 } else if (jit_options->priority_thread_weight_ == 0) { 123 LOG(FATAL) << "Priority thread weight cannot be 0."; 124 } 125 } else { 126 jit_options->priority_thread_weight_ = std::max( 127 jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio, 128 static_cast<size_t>(1)); 129 } 130 131 if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) { 132 jit_options->invoke_transition_weight_ = 133 *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight); 134 if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) { 135 LOG(FATAL) << "Invoke transition weight is above the warmup threshold."; 136 } else if (jit_options->invoke_transition_weight_ == 0) { 137 LOG(FATAL) << "Invoke transition weight cannot be 0."; 138 } 139 } else { 140 jit_options->invoke_transition_weight_ = std::max( 141 jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio, 142 static_cast<size_t>(1)); 143 } 144 145 return jit_options; 146 } 147 148 bool Jit::ShouldUsePriorityThreadWeight(Thread* self) { 149 return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState(); 150 } 151 152 void Jit::DumpInfo(std::ostream& os) { 153 code_cache_->Dump(os); 154 cumulative_timings_.Dump(os); 155 MutexLock mu(Thread::Current(), lock_); 156 memory_use_.PrintMemoryUse(os); 157 } 158 159 void Jit::DumpForSigQuit(std::ostream& os) { 160 DumpInfo(os); 161 ProfileSaver::DumpInstanceInfo(os); 162 } 163 164 void Jit::AddTimingLogger(const TimingLogger& logger) { 165 cumulative_timings_.AddLogger(logger); 166 } 167 168 Jit::Jit() : dump_info_on_shutdown_(false), 169 cumulative_timings_("JIT timings"), 170 memory_use_("Memory used for compilation", 16), 171 lock_("JIT memory use lock"), 172 use_jit_compilation_(true), 173 hot_method_threshold_(0), 174 warm_method_threshold_(0), 175 osr_method_threshold_(0), 176 priority_thread_weight_(0), 177 invoke_transition_weight_(0) {} 178 179 Jit* Jit::Create(JitOptions* options, std::string* error_msg) { 180 DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled()); 181 std::unique_ptr<Jit> jit(new Jit); 182 jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown(); 183 if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) { 184 return nullptr; 185 } 186 bool code_cache_only_for_profile_data = !options->UseJitCompilation(); 187 jit->code_cache_.reset(JitCodeCache::Create( 188 options->GetCodeCacheInitialCapacity(), 189 options->GetCodeCacheMaxCapacity(), 190 jit->generate_debug_info_, 191 code_cache_only_for_profile_data, 192 error_msg)); 193 if (jit->GetCodeCache() == nullptr) { 194 return nullptr; 195 } 196 jit->use_jit_compilation_ = options->UseJitCompilation(); 197 jit->profile_saver_options_ = options->GetProfileSaverOptions(); 198 VLOG(jit) << "JIT created with initial_capacity=" 199 << PrettySize(options->GetCodeCacheInitialCapacity()) 200 << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity()) 201 << ", compile_threshold=" << options->GetCompileThreshold() 202 << ", profile_saver_options=" << options->GetProfileSaverOptions(); 203 204 205 jit->hot_method_threshold_ = options->GetCompileThreshold(); 206 jit->warm_method_threshold_ = options->GetWarmupThreshold(); 207 jit->osr_method_threshold_ = options->GetOsrThreshold(); 208 jit->priority_thread_weight_ = options->GetPriorityThreadWeight(); 209 jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight(); 210 211 jit->CreateThreadPool(); 212 213 // Notify native debugger about the classes already loaded before the creation of the jit. 214 jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker()); 215 return jit.release(); 216 } 217 218 bool Jit::LoadCompilerLibrary(std::string* error_msg) { 219 jit_library_handle_ = dlopen( 220 kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW); 221 if (jit_library_handle_ == nullptr) { 222 std::ostringstream oss; 223 oss << "JIT could not load libart-compiler.so: " << dlerror(); 224 *error_msg = oss.str(); 225 return false; 226 } 227 jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load")); 228 if (jit_load_ == nullptr) { 229 dlclose(jit_library_handle_); 230 *error_msg = "JIT couldn't find jit_load entry point"; 231 return false; 232 } 233 jit_unload_ = reinterpret_cast<void (*)(void*)>( 234 dlsym(jit_library_handle_, "jit_unload")); 235 if (jit_unload_ == nullptr) { 236 dlclose(jit_library_handle_); 237 *error_msg = "JIT couldn't find jit_unload entry point"; 238 return false; 239 } 240 jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>( 241 dlsym(jit_library_handle_, "jit_compile_method")); 242 if (jit_compile_method_ == nullptr) { 243 dlclose(jit_library_handle_); 244 *error_msg = "JIT couldn't find jit_compile_method entry point"; 245 return false; 246 } 247 jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>( 248 dlsym(jit_library_handle_, "jit_types_loaded")); 249 if (jit_types_loaded_ == nullptr) { 250 dlclose(jit_library_handle_); 251 *error_msg = "JIT couldn't find jit_types_loaded entry point"; 252 return false; 253 } 254 return true; 255 } 256 257 bool Jit::LoadCompiler(std::string* error_msg) { 258 if (jit_library_handle_ == nullptr && !LoadCompilerLibrary(error_msg)) { 259 return false; 260 } 261 bool will_generate_debug_symbols = false; 262 VLOG(jit) << "Calling JitLoad interpreter_only=" 263 << Runtime::Current()->GetInstrumentation()->InterpretOnly(); 264 jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols); 265 if (jit_compiler_handle_ == nullptr) { 266 dlclose(jit_library_handle_); 267 *error_msg = "JIT couldn't load compiler"; 268 return false; 269 } 270 generate_debug_info_ = will_generate_debug_symbols; 271 return true; 272 } 273 274 bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { 275 DCHECK(Runtime::Current()->UseJitCompilation()); 276 DCHECK(!method->IsRuntimeMethod()); 277 278 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks(); 279 // Don't compile the method if it has breakpoints. 280 if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) { 281 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() 282 << " due to not being safe to jit according to runtime-callbacks. For example, there" 283 << " could be breakpoints in this method."; 284 return false; 285 } 286 287 // Don't compile the method if we are supposed to be deoptimized. 288 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 289 if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) { 290 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization"; 291 return false; 292 } 293 294 // If we get a request to compile a proxy method, we pass the actual Java method 295 // of that proxy method, as the compiler does not expect a proxy method. 296 ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 297 if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) { 298 return false; 299 } 300 301 VLOG(jit) << "Compiling method " 302 << ArtMethod::PrettyMethod(method_to_compile) 303 << " osr=" << std::boolalpha << osr; 304 bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr); 305 code_cache_->DoneCompiling(method_to_compile, self, osr); 306 if (!success) { 307 VLOG(jit) << "Failed to compile method " 308 << ArtMethod::PrettyMethod(method_to_compile) 309 << " osr=" << std::boolalpha << osr; 310 } 311 if (kIsDebugBuild) { 312 if (self->IsExceptionPending()) { 313 mirror::Throwable* exception = self->GetException(); 314 LOG(FATAL) << "No pending exception expected after compiling " 315 << ArtMethod::PrettyMethod(method) 316 << ": " 317 << exception->Dump(); 318 } 319 } 320 return success; 321 } 322 323 void Jit::CreateThreadPool() { 324 // There is a DCHECK in the 'AddSamples' method to ensure the tread pool 325 // is not null when we instrument. 326 327 // We need peers as we may report the JIT thread, e.g., in the debugger. 328 constexpr bool kJitPoolNeedsPeers = true; 329 thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers)); 330 331 thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority); 332 Start(); 333 } 334 335 void Jit::DeleteThreadPool() { 336 Thread* self = Thread::Current(); 337 DCHECK(Runtime::Current()->IsShuttingDown(self)); 338 if (thread_pool_ != nullptr) { 339 std::unique_ptr<ThreadPool> pool; 340 { 341 ScopedSuspendAll ssa(__FUNCTION__); 342 // Clear thread_pool_ field while the threads are suspended. 343 // A mutator in the 'AddSamples' method will check against it. 344 pool = std::move(thread_pool_); 345 } 346 347 // When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue. 348 if (!RUNNING_ON_MEMORY_TOOL) { 349 pool->StopWorkers(self); 350 pool->RemoveAllTasks(self); 351 } 352 // We could just suspend all threads, but we know those threads 353 // will finish in a short period, so it's not worth adding a suspend logic 354 // here. Besides, this is only done for shutdown. 355 pool->Wait(self, false, false); 356 } 357 } 358 359 void Jit::StartProfileSaver(const std::string& filename, 360 const std::vector<std::string>& code_paths) { 361 if (profile_saver_options_.IsEnabled()) { 362 ProfileSaver::Start(profile_saver_options_, 363 filename, 364 code_cache_.get(), 365 code_paths); 366 } 367 } 368 369 void Jit::StopProfileSaver() { 370 if (profile_saver_options_.IsEnabled() && ProfileSaver::IsStarted()) { 371 ProfileSaver::Stop(dump_info_on_shutdown_); 372 } 373 } 374 375 bool Jit::JitAtFirstUse() { 376 return HotMethodThreshold() == 0; 377 } 378 379 bool Jit::CanInvokeCompiledCode(ArtMethod* method) { 380 return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode()); 381 } 382 383 Jit::~Jit() { 384 DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted()); 385 if (dump_info_on_shutdown_) { 386 DumpInfo(LOG_STREAM(INFO)); 387 Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO)); 388 } 389 DeleteThreadPool(); 390 if (jit_compiler_handle_ != nullptr) { 391 jit_unload_(jit_compiler_handle_); 392 jit_compiler_handle_ = nullptr; 393 } 394 if (jit_library_handle_ != nullptr) { 395 dlclose(jit_library_handle_); 396 jit_library_handle_ = nullptr; 397 } 398 } 399 400 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) { 401 if (!Runtime::Current()->UseJitCompilation()) { 402 // No need to notify if we only use the JIT to save profiles. 403 return; 404 } 405 jit::Jit* jit = Runtime::Current()->GetJit(); 406 if (jit->generate_debug_info_) { 407 DCHECK(jit->jit_types_loaded_ != nullptr); 408 jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1); 409 } 410 } 411 412 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) { 413 struct CollectClasses : public ClassVisitor { 414 bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 415 classes_.push_back(klass.Ptr()); 416 return true; 417 } 418 std::vector<mirror::Class*> classes_; 419 }; 420 421 if (generate_debug_info_) { 422 ScopedObjectAccess so(Thread::Current()); 423 424 CollectClasses visitor; 425 linker->VisitClasses(&visitor); 426 jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size()); 427 } 428 } 429 430 extern "C" void art_quick_osr_stub(void** stack, 431 uint32_t stack_size_in_bytes, 432 const uint8_t* native_pc, 433 JValue* result, 434 const char* shorty, 435 Thread* self); 436 437 bool Jit::MaybeDoOnStackReplacement(Thread* thread, 438 ArtMethod* method, 439 uint32_t dex_pc, 440 int32_t dex_pc_offset, 441 JValue* result) { 442 if (!kEnableOnStackReplacement) { 443 return false; 444 } 445 446 Jit* jit = Runtime::Current()->GetJit(); 447 if (jit == nullptr) { 448 return false; 449 } 450 451 if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) { 452 // Don't attempt to do an OSR if we are close to the stack limit. Since 453 // the interpreter frames are still on stack, OSR has the potential 454 // to stack overflow even for a simple loop. 455 // b/27094810. 456 return false; 457 } 458 459 // Get the actual Java method if this method is from a proxy class. The compiler 460 // and the JIT code cache do not expect methods from proxy classes. 461 method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 462 463 // Cheap check if the method has been compiled already. That's an indicator that we should 464 // osr into it. 465 if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { 466 return false; 467 } 468 469 // Fetch some data before looking up for an OSR method. We don't want thread 470 // suspension once we hold an OSR method, as the JIT code cache could delete the OSR 471 // method while we are being suspended. 472 CodeItemDataAccessor accessor(method->DexInstructionData()); 473 const size_t number_of_vregs = accessor.RegistersSize(); 474 const char* shorty = method->GetShorty(); 475 std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : ""); 476 void** memory = nullptr; 477 size_t frame_size = 0; 478 ShadowFrame* shadow_frame = nullptr; 479 const uint8_t* native_pc = nullptr; 480 481 { 482 ScopedAssertNoThreadSuspension sts("Holding OSR method"); 483 const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method); 484 if (osr_method == nullptr) { 485 // No osr method yet, just return to the interpreter. 486 return false; 487 } 488 489 CodeInfo code_info = osr_method->GetOptimizedCodeInfo(); 490 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 491 492 // Find stack map starting at the target dex_pc. 493 StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding); 494 if (!stack_map.IsValid()) { 495 // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the 496 // hope that the next branch has one. 497 return false; 498 } 499 500 // Before allowing the jump, make sure no code is actively inspecting the method to avoid 501 // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively 502 // disable OSR when single stepping, but that's currently hard to know at this point. 503 if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) { 504 return false; 505 } 506 507 // We found a stack map, now fill the frame with dex register values from the interpreter's 508 // shadow frame. 509 DexRegisterMap vreg_map = 510 code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs); 511 512 frame_size = osr_method->GetFrameSizeInBytes(); 513 514 // Allocate memory to put shadow frame values. The osr stub will copy that memory to 515 // stack. 516 // Note that we could pass the shadow frame to the stub, and let it copy the values there, 517 // but that is engineering complexity not worth the effort for something like OSR. 518 memory = reinterpret_cast<void**>(malloc(frame_size)); 519 CHECK(memory != nullptr); 520 memset(memory, 0, frame_size); 521 522 // Art ABI: ArtMethod is at the bottom of the stack. 523 memory[0] = method; 524 525 shadow_frame = thread->PopShadowFrame(); 526 if (!vreg_map.IsValid()) { 527 // If we don't have a dex register map, then there are no live dex registers at 528 // this dex pc. 529 } else { 530 for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) { 531 DexRegisterLocation::Kind location = 532 vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding); 533 if (location == DexRegisterLocation::Kind::kNone) { 534 // Dex register is dead or uninitialized. 535 continue; 536 } 537 538 if (location == DexRegisterLocation::Kind::kConstant) { 539 // We skip constants because the compiled code knows how to handle them. 540 continue; 541 } 542 543 DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack); 544 545 int32_t vreg_value = shadow_frame->GetVReg(vreg); 546 int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg, 547 number_of_vregs, 548 code_info, 549 encoding); 550 DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size)); 551 DCHECK_GT(slot_offset, 0); 552 (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value; 553 } 554 } 555 556 native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) + 557 osr_method->GetEntryPoint(); 558 VLOG(jit) << "Jumping to " 559 << method_name 560 << "@" 561 << std::hex << reinterpret_cast<uintptr_t>(native_pc); 562 } 563 564 { 565 ManagedStack fragment; 566 thread->PushManagedStackFragment(&fragment); 567 (*art_quick_osr_stub)(memory, 568 frame_size, 569 native_pc, 570 result, 571 shorty, 572 thread); 573 574 if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) { 575 thread->DeoptimizeWithDeoptimizationException(result); 576 } 577 thread->PopManagedStackFragment(fragment); 578 } 579 free(memory); 580 thread->PushShadowFrame(shadow_frame); 581 VLOG(jit) << "Done running OSR code for " << method_name; 582 return true; 583 } 584 585 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) { 586 if (bytes > 4 * MB) { 587 LOG(INFO) << "Compiler allocated " 588 << PrettySize(bytes) 589 << " to compile " 590 << ArtMethod::PrettyMethod(method); 591 } 592 MutexLock mu(Thread::Current(), lock_); 593 memory_use_.AddValue(bytes); 594 } 595 596 class JitCompileTask FINAL : public Task { 597 public: 598 enum TaskKind { 599 kAllocateProfile, 600 kCompile, 601 kCompileOsr 602 }; 603 604 JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) { 605 ScopedObjectAccess soa(Thread::Current()); 606 // Add a global ref to the class to prevent class unloading until compilation is done. 607 klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass()); 608 CHECK(klass_ != nullptr); 609 } 610 611 ~JitCompileTask() { 612 ScopedObjectAccess soa(Thread::Current()); 613 soa.Vm()->DeleteGlobalRef(soa.Self(), klass_); 614 } 615 616 void Run(Thread* self) OVERRIDE { 617 ScopedObjectAccess soa(self); 618 if (kind_ == kCompile) { 619 Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false); 620 } else if (kind_ == kCompileOsr) { 621 Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true); 622 } else { 623 DCHECK(kind_ == kAllocateProfile); 624 if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) { 625 VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_); 626 } 627 } 628 ProfileSaver::NotifyJitActivity(); 629 } 630 631 void Finalize() OVERRIDE { 632 delete this; 633 } 634 635 private: 636 ArtMethod* const method_; 637 const TaskKind kind_; 638 jobject klass_; 639 640 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask); 641 }; 642 643 void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) { 644 if (thread_pool_ == nullptr) { 645 // Should only see this when shutting down. 646 DCHECK(Runtime::Current()->IsShuttingDown(self)); 647 return; 648 } 649 650 if (method->IsClassInitializer() || !method->IsCompilable()) { 651 // We do not want to compile such methods. 652 return; 653 } 654 if (hot_method_threshold_ == 0) { 655 // Tests might request JIT on first use (compiled synchronously in the interpreter). 656 return; 657 } 658 DCHECK(thread_pool_ != nullptr); 659 DCHECK_GT(warm_method_threshold_, 0); 660 DCHECK_GT(hot_method_threshold_, warm_method_threshold_); 661 DCHECK_GT(osr_method_threshold_, hot_method_threshold_); 662 DCHECK_GE(priority_thread_weight_, 1); 663 DCHECK_LE(priority_thread_weight_, hot_method_threshold_); 664 665 int32_t starting_count = method->GetCounter(); 666 if (Jit::ShouldUsePriorityThreadWeight(self)) { 667 count *= priority_thread_weight_; 668 } 669 int32_t new_count = starting_count + count; // int32 here to avoid wrap-around; 670 // Note: Native method have no "warm" state or profiling info. 671 if (LIKELY(!method->IsNative()) && starting_count < warm_method_threshold_) { 672 if ((new_count >= warm_method_threshold_) && 673 (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) { 674 bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false); 675 if (success) { 676 VLOG(jit) << "Start profiling " << method->PrettyMethod(); 677 } 678 679 if (thread_pool_ == nullptr) { 680 // Calling ProfilingInfo::Create might put us in a suspended state, which could 681 // lead to the thread pool being deleted when we are shutting down. 682 DCHECK(Runtime::Current()->IsShuttingDown(self)); 683 return; 684 } 685 686 if (!success) { 687 // We failed allocating. Instead of doing the collection on the Java thread, we push 688 // an allocation to a compiler thread, that will do the collection. 689 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile)); 690 } 691 } 692 // Avoid jumping more than one state at a time. 693 new_count = std::min(new_count, hot_method_threshold_ - 1); 694 } else if (use_jit_compilation_) { 695 if (starting_count < hot_method_threshold_) { 696 if ((new_count >= hot_method_threshold_) && 697 !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { 698 DCHECK(thread_pool_ != nullptr); 699 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); 700 } 701 // Avoid jumping more than one state at a time. 702 new_count = std::min(new_count, osr_method_threshold_ - 1); 703 } else if (starting_count < osr_method_threshold_) { 704 if (!with_backedges) { 705 // If the samples don't contain any back edge, we don't increment the hotness. 706 return; 707 } 708 DCHECK(!method->IsNative()); // No back edges reported for native methods. 709 if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { 710 DCHECK(thread_pool_ != nullptr); 711 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); 712 } 713 } 714 } 715 // Update hotness counter 716 method->SetCounter(new_count); 717 } 718 719 void Jit::MethodEntered(Thread* thread, ArtMethod* method) { 720 Runtime* runtime = Runtime::Current(); 721 if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) { 722 // The compiler requires a ProfilingInfo object. 723 ProfilingInfo::Create(thread, 724 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), 725 /* retry_allocation */ true); 726 JitCompileTask compile_task(method, JitCompileTask::kCompile); 727 compile_task.Run(thread); 728 return; 729 } 730 731 ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); 732 // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it 733 // instead of interpreting the method. 734 if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) { 735 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( 736 method, profiling_info->GetSavedEntryPoint()); 737 } else { 738 AddSamples(thread, method, 1, /* with_backedges */false); 739 } 740 } 741 742 void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object, 743 ArtMethod* caller, 744 uint32_t dex_pc, 745 ArtMethod* callee ATTRIBUTE_UNUSED) { 746 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 747 DCHECK(this_object != nullptr); 748 ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize); 749 if (info != nullptr) { 750 info->AddInvokeInfo(dex_pc, this_object->GetClass()); 751 } 752 } 753 754 void Jit::WaitForCompilationToFinish(Thread* self) { 755 if (thread_pool_ != nullptr) { 756 thread_pool_->Wait(self, false, false); 757 } 758 } 759 760 void Jit::Stop() { 761 Thread* self = Thread::Current(); 762 // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice. 763 WaitForCompilationToFinish(self); 764 GetThreadPool()->StopWorkers(self); 765 WaitForCompilationToFinish(self); 766 } 767 768 void Jit::Start() { 769 GetThreadPool()->StartWorkers(Thread::Current()); 770 } 771 772 ScopedJitSuspend::ScopedJitSuspend() { 773 jit::Jit* jit = Runtime::Current()->GetJit(); 774 was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr); 775 if (was_on_) { 776 jit->Stop(); 777 } 778 } 779 780 ScopedJitSuspend::~ScopedJitSuspend() { 781 if (was_on_) { 782 DCHECK(Runtime::Current()->GetJit() != nullptr); 783 DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr); 784 Runtime::Current()->GetJit()->Start(); 785 } 786 } 787 788 } // namespace jit 789 } // namespace art 790