1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "jit.h" 18 19 #include <dlfcn.h> 20 21 #include "art_method-inl.h" 22 #include "base/enums.h" 23 #include "debugger.h" 24 #include "entrypoints/runtime_asm_entrypoints.h" 25 #include "interpreter/interpreter.h" 26 #include "java_vm_ext.h" 27 #include "jit_code_cache.h" 28 #include "oat_file_manager.h" 29 #include "oat_quick_method_header.h" 30 #include "profile_compilation_info.h" 31 #include "profile_saver.h" 32 #include "runtime.h" 33 #include "runtime_options.h" 34 #include "stack_map.h" 35 #include "thread_list.h" 36 #include "utils.h" 37 38 namespace art { 39 namespace jit { 40 41 static constexpr bool kEnableOnStackReplacement = true; 42 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device. 43 static constexpr int kJitPoolThreadPthreadPriority = 9; 44 45 // JIT compiler 46 void* Jit::jit_library_handle_= nullptr; 47 void* Jit::jit_compiler_handle_ = nullptr; 48 void* (*Jit::jit_load_)(bool*) = nullptr; 49 void (*Jit::jit_unload_)(void*) = nullptr; 50 bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr; 51 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr; 52 bool Jit::generate_debug_info_ = false; 53 54 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) { 55 auto* jit_options = new JitOptions; 56 jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation); 57 58 jit_options->code_cache_initial_capacity_ = 59 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity); 60 jit_options->code_cache_max_capacity_ = 61 options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity); 62 jit_options->dump_info_on_shutdown_ = 63 options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown); 64 jit_options->profile_saver_options_ = 65 options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts); 66 67 jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold); 68 if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) { 69 LOG(FATAL) << "Method compilation threshold is above its internal limit."; 70 } 71 72 if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) { 73 jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold); 74 if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) { 75 LOG(FATAL) << "Method warmup threshold is above its internal limit."; 76 } 77 } else { 78 jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2; 79 } 80 81 if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) { 82 jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold); 83 if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { 84 LOG(FATAL) << "Method on stack replacement threshold is above its internal limit."; 85 } 86 } else { 87 jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2; 88 if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { 89 jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max(); 90 } 91 } 92 93 if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) { 94 jit_options->priority_thread_weight_ = 95 *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight); 96 if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) { 97 LOG(FATAL) << "Priority thread weight is above the warmup threshold."; 98 } else if (jit_options->priority_thread_weight_ == 0) { 99 LOG(FATAL) << "Priority thread weight cannot be 0."; 100 } 101 } else { 102 jit_options->priority_thread_weight_ = std::max( 103 jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio, 104 static_cast<size_t>(1)); 105 } 106 107 if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) { 108 jit_options->invoke_transition_weight_ = 109 *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight); 110 if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) { 111 LOG(FATAL) << "Invoke transition weight is above the warmup threshold."; 112 } else if (jit_options->invoke_transition_weight_ == 0) { 113 LOG(FATAL) << "Invoke transition weight cannot be 0."; 114 } 115 } else { 116 jit_options->invoke_transition_weight_ = std::max( 117 jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio, 118 static_cast<size_t>(1)); 119 } 120 121 return jit_options; 122 } 123 124 bool Jit::ShouldUsePriorityThreadWeight() { 125 return Runtime::Current()->InJankPerceptibleProcessState() 126 && Thread::Current()->IsJitSensitiveThread(); 127 } 128 129 void Jit::DumpInfo(std::ostream& os) { 130 code_cache_->Dump(os); 131 cumulative_timings_.Dump(os); 132 MutexLock mu(Thread::Current(), lock_); 133 memory_use_.PrintMemoryUse(os); 134 } 135 136 void Jit::DumpForSigQuit(std::ostream& os) { 137 DumpInfo(os); 138 ProfileSaver::DumpInstanceInfo(os); 139 } 140 141 void Jit::AddTimingLogger(const TimingLogger& logger) { 142 cumulative_timings_.AddLogger(logger); 143 } 144 145 Jit::Jit() : dump_info_on_shutdown_(false), 146 cumulative_timings_("JIT timings"), 147 memory_use_("Memory used for compilation", 16), 148 lock_("JIT memory use lock"), 149 use_jit_compilation_(true), 150 hot_method_threshold_(0), 151 warm_method_threshold_(0), 152 osr_method_threshold_(0), 153 priority_thread_weight_(0), 154 invoke_transition_weight_(0) {} 155 156 Jit* Jit::Create(JitOptions* options, std::string* error_msg) { 157 DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled()); 158 std::unique_ptr<Jit> jit(new Jit); 159 jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown(); 160 if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) { 161 return nullptr; 162 } 163 jit->code_cache_.reset(JitCodeCache::Create( 164 options->GetCodeCacheInitialCapacity(), 165 options->GetCodeCacheMaxCapacity(), 166 jit->generate_debug_info_, 167 error_msg)); 168 if (jit->GetCodeCache() == nullptr) { 169 return nullptr; 170 } 171 jit->use_jit_compilation_ = options->UseJitCompilation(); 172 jit->profile_saver_options_ = options->GetProfileSaverOptions(); 173 VLOG(jit) << "JIT created with initial_capacity=" 174 << PrettySize(options->GetCodeCacheInitialCapacity()) 175 << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity()) 176 << ", compile_threshold=" << options->GetCompileThreshold() 177 << ", profile_saver_options=" << options->GetProfileSaverOptions(); 178 179 180 jit->hot_method_threshold_ = options->GetCompileThreshold(); 181 jit->warm_method_threshold_ = options->GetWarmupThreshold(); 182 jit->osr_method_threshold_ = options->GetOsrThreshold(); 183 jit->priority_thread_weight_ = options->GetPriorityThreadWeight(); 184 jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight(); 185 186 jit->CreateThreadPool(); 187 188 // Notify native debugger about the classes already loaded before the creation of the jit. 189 jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker()); 190 return jit.release(); 191 } 192 193 bool Jit::LoadCompilerLibrary(std::string* error_msg) { 194 jit_library_handle_ = dlopen( 195 kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW); 196 if (jit_library_handle_ == nullptr) { 197 std::ostringstream oss; 198 oss << "JIT could not load libart-compiler.so: " << dlerror(); 199 *error_msg = oss.str(); 200 return false; 201 } 202 jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load")); 203 if (jit_load_ == nullptr) { 204 dlclose(jit_library_handle_); 205 *error_msg = "JIT couldn't find jit_load entry point"; 206 return false; 207 } 208 jit_unload_ = reinterpret_cast<void (*)(void*)>( 209 dlsym(jit_library_handle_, "jit_unload")); 210 if (jit_unload_ == nullptr) { 211 dlclose(jit_library_handle_); 212 *error_msg = "JIT couldn't find jit_unload entry point"; 213 return false; 214 } 215 jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>( 216 dlsym(jit_library_handle_, "jit_compile_method")); 217 if (jit_compile_method_ == nullptr) { 218 dlclose(jit_library_handle_); 219 *error_msg = "JIT couldn't find jit_compile_method entry point"; 220 return false; 221 } 222 jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>( 223 dlsym(jit_library_handle_, "jit_types_loaded")); 224 if (jit_types_loaded_ == nullptr) { 225 dlclose(jit_library_handle_); 226 *error_msg = "JIT couldn't find jit_types_loaded entry point"; 227 return false; 228 } 229 return true; 230 } 231 232 bool Jit::LoadCompiler(std::string* error_msg) { 233 if (jit_library_handle_ == nullptr && !LoadCompilerLibrary(error_msg)) { 234 return false; 235 } 236 bool will_generate_debug_symbols = false; 237 VLOG(jit) << "Calling JitLoad interpreter_only=" 238 << Runtime::Current()->GetInstrumentation()->InterpretOnly(); 239 jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols); 240 if (jit_compiler_handle_ == nullptr) { 241 dlclose(jit_library_handle_); 242 *error_msg = "JIT couldn't load compiler"; 243 return false; 244 } 245 generate_debug_info_ = will_generate_debug_symbols; 246 return true; 247 } 248 249 bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { 250 DCHECK(Runtime::Current()->UseJitCompilation()); 251 DCHECK(!method->IsRuntimeMethod()); 252 253 // Don't compile the method if it has breakpoints. 254 if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) { 255 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to breakpoint"; 256 return false; 257 } 258 259 // Don't compile the method if we are supposed to be deoptimized. 260 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 261 if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) { 262 VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to deoptimization"; 263 return false; 264 } 265 266 // If we get a request to compile a proxy method, we pass the actual Java method 267 // of that proxy method, as the compiler does not expect a proxy method. 268 ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 269 if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) { 270 return false; 271 } 272 273 VLOG(jit) << "Compiling method " 274 << ArtMethod::PrettyMethod(method_to_compile) 275 << " osr=" << std::boolalpha << osr; 276 bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr); 277 code_cache_->DoneCompiling(method_to_compile, self, osr); 278 if (!success) { 279 VLOG(jit) << "Failed to compile method " 280 << ArtMethod::PrettyMethod(method_to_compile) 281 << " osr=" << std::boolalpha << osr; 282 } 283 if (kIsDebugBuild) { 284 if (self->IsExceptionPending()) { 285 mirror::Throwable* exception = self->GetException(); 286 LOG(FATAL) << "No pending exception expected after compiling " 287 << ArtMethod::PrettyMethod(method) 288 << ": " 289 << exception->Dump(); 290 } 291 } 292 return success; 293 } 294 295 void Jit::CreateThreadPool() { 296 // There is a DCHECK in the 'AddSamples' method to ensure the tread pool 297 // is not null when we instrument. 298 299 // We need peers as we may report the JIT thread, e.g., in the debugger. 300 constexpr bool kJitPoolNeedsPeers = true; 301 thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers)); 302 303 thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority); 304 Start(); 305 } 306 307 void Jit::DeleteThreadPool() { 308 Thread* self = Thread::Current(); 309 DCHECK(Runtime::Current()->IsShuttingDown(self)); 310 if (thread_pool_ != nullptr) { 311 ThreadPool* cache = nullptr; 312 { 313 ScopedSuspendAll ssa(__FUNCTION__); 314 // Clear thread_pool_ field while the threads are suspended. 315 // A mutator in the 'AddSamples' method will check against it. 316 cache = thread_pool_.release(); 317 } 318 cache->StopWorkers(self); 319 cache->RemoveAllTasks(self); 320 // We could just suspend all threads, but we know those threads 321 // will finish in a short period, so it's not worth adding a suspend logic 322 // here. Besides, this is only done for shutdown. 323 cache->Wait(self, false, false); 324 delete cache; 325 } 326 } 327 328 void Jit::StartProfileSaver(const std::string& filename, 329 const std::vector<std::string>& code_paths) { 330 if (profile_saver_options_.IsEnabled()) { 331 ProfileSaver::Start(profile_saver_options_, 332 filename, 333 code_cache_.get(), 334 code_paths); 335 } 336 } 337 338 void Jit::StopProfileSaver() { 339 if (profile_saver_options_.IsEnabled() && ProfileSaver::IsStarted()) { 340 ProfileSaver::Stop(dump_info_on_shutdown_); 341 } 342 } 343 344 bool Jit::JitAtFirstUse() { 345 return HotMethodThreshold() == 0; 346 } 347 348 bool Jit::CanInvokeCompiledCode(ArtMethod* method) { 349 return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode()); 350 } 351 352 Jit::~Jit() { 353 DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted()); 354 if (dump_info_on_shutdown_) { 355 DumpInfo(LOG_STREAM(INFO)); 356 Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO)); 357 } 358 DeleteThreadPool(); 359 if (jit_compiler_handle_ != nullptr) { 360 jit_unload_(jit_compiler_handle_); 361 jit_compiler_handle_ = nullptr; 362 } 363 if (jit_library_handle_ != nullptr) { 364 dlclose(jit_library_handle_); 365 jit_library_handle_ = nullptr; 366 } 367 } 368 369 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) { 370 if (!Runtime::Current()->UseJitCompilation()) { 371 // No need to notify if we only use the JIT to save profiles. 372 return; 373 } 374 jit::Jit* jit = Runtime::Current()->GetJit(); 375 if (jit->generate_debug_info_) { 376 DCHECK(jit->jit_types_loaded_ != nullptr); 377 jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1); 378 } 379 } 380 381 void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) { 382 struct CollectClasses : public ClassVisitor { 383 bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 384 classes_.push_back(klass.Ptr()); 385 return true; 386 } 387 std::vector<mirror::Class*> classes_; 388 }; 389 390 if (generate_debug_info_) { 391 ScopedObjectAccess so(Thread::Current()); 392 393 CollectClasses visitor; 394 linker->VisitClasses(&visitor); 395 jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size()); 396 } 397 } 398 399 extern "C" void art_quick_osr_stub(void** stack, 400 uint32_t stack_size_in_bytes, 401 const uint8_t* native_pc, 402 JValue* result, 403 const char* shorty, 404 Thread* self); 405 406 bool Jit::MaybeDoOnStackReplacement(Thread* thread, 407 ArtMethod* method, 408 uint32_t dex_pc, 409 int32_t dex_pc_offset, 410 JValue* result) { 411 if (!kEnableOnStackReplacement) { 412 return false; 413 } 414 415 Jit* jit = Runtime::Current()->GetJit(); 416 if (jit == nullptr) { 417 return false; 418 } 419 420 if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) { 421 // Don't attempt to do an OSR if we are close to the stack limit. Since 422 // the interpreter frames are still on stack, OSR has the potential 423 // to stack overflow even for a simple loop. 424 // b/27094810. 425 return false; 426 } 427 428 // Get the actual Java method if this method is from a proxy class. The compiler 429 // and the JIT code cache do not expect methods from proxy classes. 430 method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 431 432 // Cheap check if the method has been compiled already. That's an indicator that we should 433 // osr into it. 434 if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { 435 return false; 436 } 437 438 // Fetch some data before looking up for an OSR method. We don't want thread 439 // suspension once we hold an OSR method, as the JIT code cache could delete the OSR 440 // method while we are being suspended. 441 const size_t number_of_vregs = method->GetCodeItem()->registers_size_; 442 const char* shorty = method->GetShorty(); 443 std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : ""); 444 void** memory = nullptr; 445 size_t frame_size = 0; 446 ShadowFrame* shadow_frame = nullptr; 447 const uint8_t* native_pc = nullptr; 448 449 { 450 ScopedAssertNoThreadSuspension sts("Holding OSR method"); 451 const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method); 452 if (osr_method == nullptr) { 453 // No osr method yet, just return to the interpreter. 454 return false; 455 } 456 457 CodeInfo code_info = osr_method->GetOptimizedCodeInfo(); 458 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 459 460 // Find stack map starting at the target dex_pc. 461 StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding); 462 if (!stack_map.IsValid()) { 463 // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the 464 // hope that the next branch has one. 465 return false; 466 } 467 468 // Before allowing the jump, make sure the debugger is not active to avoid jumping from 469 // interpreter to OSR while e.g. single stepping. Note that we could selectively disable 470 // OSR when single stepping, but that's currently hard to know at this point. 471 if (Dbg::IsDebuggerActive()) { 472 return false; 473 } 474 475 // We found a stack map, now fill the frame with dex register values from the interpreter's 476 // shadow frame. 477 DexRegisterMap vreg_map = 478 code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs); 479 480 frame_size = osr_method->GetFrameSizeInBytes(); 481 482 // Allocate memory to put shadow frame values. The osr stub will copy that memory to 483 // stack. 484 // Note that we could pass the shadow frame to the stub, and let it copy the values there, 485 // but that is engineering complexity not worth the effort for something like OSR. 486 memory = reinterpret_cast<void**>(malloc(frame_size)); 487 CHECK(memory != nullptr); 488 memset(memory, 0, frame_size); 489 490 // Art ABI: ArtMethod is at the bottom of the stack. 491 memory[0] = method; 492 493 shadow_frame = thread->PopShadowFrame(); 494 if (!vreg_map.IsValid()) { 495 // If we don't have a dex register map, then there are no live dex registers at 496 // this dex pc. 497 } else { 498 for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) { 499 DexRegisterLocation::Kind location = 500 vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding); 501 if (location == DexRegisterLocation::Kind::kNone) { 502 // Dex register is dead or uninitialized. 503 continue; 504 } 505 506 if (location == DexRegisterLocation::Kind::kConstant) { 507 // We skip constants because the compiled code knows how to handle them. 508 continue; 509 } 510 511 DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack); 512 513 int32_t vreg_value = shadow_frame->GetVReg(vreg); 514 int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg, 515 number_of_vregs, 516 code_info, 517 encoding); 518 DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size)); 519 DCHECK_GT(slot_offset, 0); 520 (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value; 521 } 522 } 523 524 native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) + 525 osr_method->GetEntryPoint(); 526 VLOG(jit) << "Jumping to " 527 << method_name 528 << "@" 529 << std::hex << reinterpret_cast<uintptr_t>(native_pc); 530 } 531 532 { 533 ManagedStack fragment; 534 thread->PushManagedStackFragment(&fragment); 535 (*art_quick_osr_stub)(memory, 536 frame_size, 537 native_pc, 538 result, 539 shorty, 540 thread); 541 542 if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) { 543 thread->DeoptimizeWithDeoptimizationException(result); 544 } 545 thread->PopManagedStackFragment(fragment); 546 } 547 free(memory); 548 thread->PushShadowFrame(shadow_frame); 549 VLOG(jit) << "Done running OSR code for " << method_name; 550 return true; 551 } 552 553 void Jit::AddMemoryUsage(ArtMethod* method, size_t bytes) { 554 if (bytes > 4 * MB) { 555 LOG(INFO) << "Compiler allocated " 556 << PrettySize(bytes) 557 << " to compile " 558 << ArtMethod::PrettyMethod(method); 559 } 560 MutexLock mu(Thread::Current(), lock_); 561 memory_use_.AddValue(bytes); 562 } 563 564 class JitCompileTask FINAL : public Task { 565 public: 566 enum TaskKind { 567 kAllocateProfile, 568 kCompile, 569 kCompileOsr 570 }; 571 572 JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) { 573 ScopedObjectAccess soa(Thread::Current()); 574 // Add a global ref to the class to prevent class unloading until compilation is done. 575 klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass()); 576 CHECK(klass_ != nullptr); 577 } 578 579 ~JitCompileTask() { 580 ScopedObjectAccess soa(Thread::Current()); 581 soa.Vm()->DeleteGlobalRef(soa.Self(), klass_); 582 } 583 584 void Run(Thread* self) OVERRIDE { 585 ScopedObjectAccess soa(self); 586 if (kind_ == kCompile) { 587 Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false); 588 } else if (kind_ == kCompileOsr) { 589 Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true); 590 } else { 591 DCHECK(kind_ == kAllocateProfile); 592 if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) { 593 VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_); 594 } 595 } 596 ProfileSaver::NotifyJitActivity(); 597 } 598 599 void Finalize() OVERRIDE { 600 delete this; 601 } 602 603 private: 604 ArtMethod* const method_; 605 const TaskKind kind_; 606 jobject klass_; 607 608 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask); 609 }; 610 611 void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) { 612 if (thread_pool_ == nullptr) { 613 // Should only see this when shutting down. 614 DCHECK(Runtime::Current()->IsShuttingDown(self)); 615 return; 616 } 617 618 if (method->IsClassInitializer() || method->IsNative() || !method->IsCompilable()) { 619 // We do not want to compile such methods. 620 return; 621 } 622 DCHECK(thread_pool_ != nullptr); 623 DCHECK_GT(warm_method_threshold_, 0); 624 DCHECK_GT(hot_method_threshold_, warm_method_threshold_); 625 DCHECK_GT(osr_method_threshold_, hot_method_threshold_); 626 DCHECK_GE(priority_thread_weight_, 1); 627 DCHECK_LE(priority_thread_weight_, hot_method_threshold_); 628 629 int32_t starting_count = method->GetCounter(); 630 if (Jit::ShouldUsePriorityThreadWeight()) { 631 count *= priority_thread_weight_; 632 } 633 int32_t new_count = starting_count + count; // int32 here to avoid wrap-around; 634 if (starting_count < warm_method_threshold_) { 635 if ((new_count >= warm_method_threshold_) && 636 (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) { 637 bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false); 638 if (success) { 639 VLOG(jit) << "Start profiling " << method->PrettyMethod(); 640 } 641 642 if (thread_pool_ == nullptr) { 643 // Calling ProfilingInfo::Create might put us in a suspended state, which could 644 // lead to the thread pool being deleted when we are shutting down. 645 DCHECK(Runtime::Current()->IsShuttingDown(self)); 646 return; 647 } 648 649 if (!success) { 650 // We failed allocating. Instead of doing the collection on the Java thread, we push 651 // an allocation to a compiler thread, that will do the collection. 652 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile)); 653 } 654 } 655 // Avoid jumping more than one state at a time. 656 new_count = std::min(new_count, hot_method_threshold_ - 1); 657 } else if (use_jit_compilation_) { 658 if (starting_count < hot_method_threshold_) { 659 if ((new_count >= hot_method_threshold_) && 660 !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { 661 DCHECK(thread_pool_ != nullptr); 662 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); 663 } 664 // Avoid jumping more than one state at a time. 665 new_count = std::min(new_count, osr_method_threshold_ - 1); 666 } else if (starting_count < osr_method_threshold_) { 667 if (!with_backedges) { 668 // If the samples don't contain any back edge, we don't increment the hotness. 669 return; 670 } 671 if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { 672 DCHECK(thread_pool_ != nullptr); 673 thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); 674 } 675 } 676 } 677 // Update hotness counter 678 method->SetCounter(new_count); 679 } 680 681 void Jit::MethodEntered(Thread* thread, ArtMethod* method) { 682 Runtime* runtime = Runtime::Current(); 683 if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) { 684 // The compiler requires a ProfilingInfo object. 685 ProfilingInfo::Create(thread, method, /* retry_allocation */ true); 686 JitCompileTask compile_task(method, JitCompileTask::kCompile); 687 compile_task.Run(thread); 688 return; 689 } 690 691 ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); 692 // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it 693 // instead of interpreting the method. 694 if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) { 695 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( 696 method, profiling_info->GetSavedEntryPoint()); 697 } else { 698 AddSamples(thread, method, 1, /* with_backedges */false); 699 } 700 } 701 702 void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object, 703 ArtMethod* caller, 704 uint32_t dex_pc, 705 ArtMethod* callee ATTRIBUTE_UNUSED) { 706 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 707 DCHECK(this_object != nullptr); 708 ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize); 709 if (info != nullptr) { 710 info->AddInvokeInfo(dex_pc, this_object->GetClass()); 711 } 712 } 713 714 void Jit::WaitForCompilationToFinish(Thread* self) { 715 if (thread_pool_ != nullptr) { 716 thread_pool_->Wait(self, false, false); 717 } 718 } 719 720 void Jit::Stop() { 721 Thread* self = Thread::Current(); 722 // TODO(ngeoffray): change API to not require calling WaitForCompilationToFinish twice. 723 WaitForCompilationToFinish(self); 724 GetThreadPool()->StopWorkers(self); 725 WaitForCompilationToFinish(self); 726 } 727 728 void Jit::Start() { 729 GetThreadPool()->StartWorkers(Thread::Current()); 730 } 731 732 ScopedJitSuspend::ScopedJitSuspend() { 733 jit::Jit* jit = Runtime::Current()->GetJit(); 734 was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr); 735 if (was_on_) { 736 jit->Stop(); 737 } 738 } 739 740 ScopedJitSuspend::~ScopedJitSuspend() { 741 if (was_on_) { 742 DCHECK(Runtime::Current()->GetJit() != nullptr); 743 DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr); 744 Runtime::Current()->GetJit()->Start(); 745 } 746 } 747 748 } // namespace jit 749 } // namespace art 750