Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "runtime.h"
     18 
     19 // sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
     20 #include <sys/mount.h>
     21 #ifdef __linux__
     22 #include <linux/fs.h>
     23 #include <sys/prctl.h>
     24 #endif
     25 
     26 #include <fcntl.h>
     27 #include <signal.h>
     28 #include <sys/syscall.h>
     29 
     30 #if defined(__APPLE__)
     31 #include <crt_externs.h>  // for _NSGetEnviron
     32 #endif
     33 
     34 #include <cstdio>
     35 #include <cstdlib>
     36 #include <limits>
     37 #include <thread>
     38 #include <vector>
     39 
     40 #include "android-base/strings.h"
     41 
     42 #include "aot_class_linker.h"
     43 #include "arch/arm/registers_arm.h"
     44 #include "arch/arm64/registers_arm64.h"
     45 #include "arch/context.h"
     46 #include "arch/instruction_set_features.h"
     47 #include "arch/mips/registers_mips.h"
     48 #include "arch/mips64/registers_mips64.h"
     49 #include "arch/x86/registers_x86.h"
     50 #include "arch/x86_64/registers_x86_64.h"
     51 #include "art_field-inl.h"
     52 #include "art_method-inl.h"
     53 #include "asm_support.h"
     54 #include "base/aborting.h"
     55 #include "base/arena_allocator.h"
     56 #include "base/atomic.h"
     57 #include "base/dumpable.h"
     58 #include "base/enums.h"
     59 #include "base/file_utils.h"
     60 #include "base/malloc_arena_pool.h"
     61 #include "base/mem_map_arena_pool.h"
     62 #include "base/memory_tool.h"
     63 #include "base/mutex.h"
     64 #include "base/os.h"
     65 #include "base/quasi_atomic.h"
     66 #include "base/sdk_version.h"
     67 #include "base/stl_util.h"
     68 #include "base/systrace.h"
     69 #include "base/unix_file/fd_file.h"
     70 #include "base/utils.h"
     71 #include "class_linker-inl.h"
     72 #include "class_root.h"
     73 #include "compiler_callbacks.h"
     74 #include "debugger.h"
     75 #include "dex/art_dex_file_loader.h"
     76 #include "dex/dex_file_loader.h"
     77 #include "elf_file.h"
     78 #include "entrypoints/runtime_asm_entrypoints.h"
     79 #include "experimental_flags.h"
     80 #include "fault_handler.h"
     81 #include "gc/accounting/card_table-inl.h"
     82 #include "gc/heap.h"
     83 #include "gc/scoped_gc_critical_section.h"
     84 #include "gc/space/image_space.h"
     85 #include "gc/space/space-inl.h"
     86 #include "gc/system_weak.h"
     87 #include "handle_scope-inl.h"
     88 #include "hidden_api.h"
     89 #include "image-inl.h"
     90 #include "instrumentation.h"
     91 #include "intern_table-inl.h"
     92 #include "interpreter/interpreter.h"
     93 #include "jit/jit.h"
     94 #include "jit/jit_code_cache.h"
     95 #include "jit/profile_saver.h"
     96 #include "jni/java_vm_ext.h"
     97 #include "jni/jni_internal.h"
     98 #include "linear_alloc.h"
     99 #include "memory_representation.h"
    100 #include "mirror/array.h"
    101 #include "mirror/class-alloc-inl.h"
    102 #include "mirror/class-inl.h"
    103 #include "mirror/class_ext.h"
    104 #include "mirror/class_loader.h"
    105 #include "mirror/emulated_stack_frame.h"
    106 #include "mirror/field.h"
    107 #include "mirror/method.h"
    108 #include "mirror/method_handle_impl.h"
    109 #include "mirror/method_handles_lookup.h"
    110 #include "mirror/method_type.h"
    111 #include "mirror/stack_trace_element.h"
    112 #include "mirror/throwable.h"
    113 #include "mirror/var_handle.h"
    114 #include "monitor.h"
    115 #include "native/dalvik_system_DexFile.h"
    116 #include "native/dalvik_system_VMDebug.h"
    117 #include "native/dalvik_system_VMRuntime.h"
    118 #include "native/dalvik_system_VMStack.h"
    119 #include "native/dalvik_system_ZygoteHooks.h"
    120 #include "native/java_lang_Class.h"
    121 #include "native/java_lang_Object.h"
    122 #include "native/java_lang_String.h"
    123 #include "native/java_lang_StringFactory.h"
    124 #include "native/java_lang_System.h"
    125 #include "native/java_lang_Thread.h"
    126 #include "native/java_lang_Throwable.h"
    127 #include "native/java_lang_VMClassLoader.h"
    128 #include "native/java_lang_invoke_MethodHandleImpl.h"
    129 #include "native/java_lang_ref_FinalizerReference.h"
    130 #include "native/java_lang_ref_Reference.h"
    131 #include "native/java_lang_reflect_Array.h"
    132 #include "native/java_lang_reflect_Constructor.h"
    133 #include "native/java_lang_reflect_Executable.h"
    134 #include "native/java_lang_reflect_Field.h"
    135 #include "native/java_lang_reflect_Method.h"
    136 #include "native/java_lang_reflect_Parameter.h"
    137 #include "native/java_lang_reflect_Proxy.h"
    138 #include "native/java_util_concurrent_atomic_AtomicLong.h"
    139 #include "native/libcore_util_CharsetUtils.h"
    140 #include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
    141 #include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
    142 #include "native/sun_misc_Unsafe.h"
    143 #include "native_bridge_art_interface.h"
    144 #include "native_stack_dump.h"
    145 #include "nativehelper/scoped_local_ref.h"
    146 #include "oat_file.h"
    147 #include "oat_file_manager.h"
    148 #include "object_callbacks.h"
    149 #include "parsed_options.h"
    150 #include "quick/quick_method_frame_info.h"
    151 #include "reflection.h"
    152 #include "runtime_callbacks.h"
    153 #include "runtime_intrinsics.h"
    154 #include "runtime_options.h"
    155 #include "scoped_thread_state_change-inl.h"
    156 #include "sigchain.h"
    157 #include "signal_catcher.h"
    158 #include "signal_set.h"
    159 #include "thread.h"
    160 #include "thread_list.h"
    161 #include "ti/agent.h"
    162 #include "trace.h"
    163 #include "transaction.h"
    164 #include "vdex_file.h"
    165 #include "verifier/class_verifier.h"
    166 #include "well_known_classes.h"
    167 
    168 #ifdef ART_TARGET_ANDROID
    169 #include <android/set_abort_message.h>
    170 #endif
    171 
    172 // Static asserts to check the values of generated assembly-support macros.
    173 #define ASM_DEFINE(NAME, EXPR) static_assert((NAME) == (EXPR), "Unexpected value of " #NAME);
    174 #include "asm_defines.def"
    175 #undef ASM_DEFINE
    176 
    177 namespace art {
    178 
    179 // If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
    180 static constexpr bool kEnableJavaStackTraceHandler = false;
    181 // Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
    182 // linking.
    183 static constexpr double kLowMemoryMinLoadFactor = 0.5;
    184 static constexpr double kLowMemoryMaxLoadFactor = 0.8;
    185 static constexpr double kNormalMinLoadFactor = 0.4;
    186 static constexpr double kNormalMaxLoadFactor = 0.7;
    187 
    188 // Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read
    189 // barrier config.
    190 static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
    191 
    192 static constexpr const char* kApexBootImageLocation = "/system/framework/apex.art";
    193 
    194 Runtime* Runtime::instance_ = nullptr;
    195 
    196 struct TraceConfig {
    197   Trace::TraceMode trace_mode;
    198   Trace::TraceOutputMode trace_output_mode;
    199   std::string trace_file;
    200   size_t trace_file_size;
    201 };
    202 
    203 namespace {
    204 
    205 #ifdef __APPLE__
    206 inline char** GetEnviron() {
    207   // When Google Test is built as a framework on MacOS X, the environ variable
    208   // is unavailable. Apple's documentation (man environ) recommends using
    209   // _NSGetEnviron() instead.
    210   return *_NSGetEnviron();
    211 }
    212 #else
    213 // Some POSIX platforms expect you to declare environ. extern "C" makes
    214 // it reside in the global namespace.
    215 extern "C" char** environ;
    216 inline char** GetEnviron() { return environ; }
    217 #endif
    218 
    219 void CheckConstants() {
    220   CHECK_EQ(mirror::Array::kFirstElementOffset, mirror::Array::FirstElementOffset());
    221 }
    222 
    223 }  // namespace
    224 
    225 Runtime::Runtime()
    226     : resolution_method_(nullptr),
    227       imt_conflict_method_(nullptr),
    228       imt_unimplemented_method_(nullptr),
    229       instruction_set_(InstructionSet::kNone),
    230       compiler_callbacks_(nullptr),
    231       is_zygote_(false),
    232       is_system_server_(false),
    233       must_relocate_(false),
    234       is_concurrent_gc_enabled_(true),
    235       is_explicit_gc_disabled_(false),
    236       image_dex2oat_enabled_(true),
    237       default_stack_size_(0),
    238       heap_(nullptr),
    239       max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
    240       monitor_list_(nullptr),
    241       monitor_pool_(nullptr),
    242       thread_list_(nullptr),
    243       intern_table_(nullptr),
    244       class_linker_(nullptr),
    245       signal_catcher_(nullptr),
    246       java_vm_(nullptr),
    247       thread_pool_ref_count_(0u),
    248       fault_message_(nullptr),
    249       threads_being_born_(0),
    250       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
    251       shutting_down_(false),
    252       shutting_down_started_(false),
    253       started_(false),
    254       finished_starting_(false),
    255       vfprintf_(nullptr),
    256       exit_(nullptr),
    257       abort_(nullptr),
    258       stats_enabled_(false),
    259       is_running_on_memory_tool_(kRunningOnMemoryTool),
    260       instrumentation_(),
    261       main_thread_group_(nullptr),
    262       system_thread_group_(nullptr),
    263       system_class_loader_(nullptr),
    264       dump_gc_performance_on_shutdown_(false),
    265       preinitialization_transactions_(),
    266       verify_(verifier::VerifyMode::kNone),
    267       allow_dex_file_fallback_(true),
    268       target_sdk_version_(static_cast<uint32_t>(SdkVersion::kUnset)),
    269       implicit_null_checks_(false),
    270       implicit_so_checks_(false),
    271       implicit_suspend_checks_(false),
    272       no_sig_chain_(false),
    273       force_native_bridge_(false),
    274       is_native_bridge_loaded_(false),
    275       is_native_debuggable_(false),
    276       async_exceptions_thrown_(false),
    277       non_standard_exits_enabled_(false),
    278       is_java_debuggable_(false),
    279       zygote_max_failed_boots_(0),
    280       experimental_flags_(ExperimentalFlags::kNone),
    281       oat_file_manager_(nullptr),
    282       is_low_memory_mode_(false),
    283       safe_mode_(false),
    284       hidden_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
    285       core_platform_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
    286       dedupe_hidden_api_warnings_(true),
    287       hidden_api_access_event_log_rate_(0),
    288       dump_native_stack_on_sig_quit_(true),
    289       pruned_dalvik_cache_(false),
    290       // Initially assume we perceive jank in case the process state is never updated.
    291       process_state_(kProcessStateJankPerceptible),
    292       zygote_no_threads_(false),
    293       verifier_logging_threshold_ms_(100) {
    294   static_assert(Runtime::kCalleeSaveSize ==
    295                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
    296   CheckConstants();
    297 
    298   std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
    299   interpreter::CheckInterpreterAsmConstants();
    300   callbacks_.reset(new RuntimeCallbacks());
    301   for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
    302     deoptimization_counts_[i] = 0u;
    303   }
    304 }
    305 
    306 Runtime::~Runtime() {
    307   ScopedTrace trace("Runtime shutdown");
    308   if (is_native_bridge_loaded_) {
    309     UnloadNativeBridge();
    310   }
    311 
    312   Thread* self = Thread::Current();
    313   const bool attach_shutdown_thread = self == nullptr;
    314   if (attach_shutdown_thread) {
    315     // We can only create a peer if the runtime is actually started. This is only not true during
    316     // some tests. If there is extreme memory pressure the allocation of the thread peer can fail.
    317     // In this case we will just try again without allocating a peer so that shutdown can continue.
    318     // Very few things are actually capable of distinguishing between the peer & peerless states so
    319     // this should be fine.
    320     bool thread_attached = AttachCurrentThread("Shutdown thread",
    321                                                /* as_daemon= */ false,
    322                                                GetSystemThreadGroup(),
    323                                                /* create_peer= */ IsStarted());
    324     if (UNLIKELY(!thread_attached)) {
    325       LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
    326       CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
    327                                 /* as_daemon= */   false,
    328                                 /* thread_group=*/ nullptr,
    329                                 /* create_peer= */ false));
    330     }
    331     self = Thread::Current();
    332   } else {
    333     LOG(WARNING) << "Current thread not detached in Runtime shutdown";
    334   }
    335 
    336   if (dump_gc_performance_on_shutdown_) {
    337     heap_->CalculatePreGcWeightedAllocatedBytes();
    338     uint64_t process_cpu_end_time = ProcessCpuNanoTime();
    339     ScopedLogSeverity sls(LogSeverity::INFO);
    340     // This can't be called from the Heap destructor below because it
    341     // could call RosAlloc::InspectAll() which needs the thread_list
    342     // to be still alive.
    343     heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
    344 
    345     uint64_t process_cpu_time = process_cpu_end_time - heap_->GetProcessCpuStartTime();
    346     uint64_t gc_cpu_time = heap_->GetTotalGcCpuTime();
    347     float ratio = static_cast<float>(gc_cpu_time) / process_cpu_time;
    348     LOG_STREAM(INFO) << "GC CPU time " << PrettyDuration(gc_cpu_time)
    349         << " out of process CPU time " << PrettyDuration(process_cpu_time)
    350         << " (" << ratio << ")"
    351         << "\n";
    352     double pre_gc_weighted_allocated_bytes =
    353         heap_->GetPreGcWeightedAllocatedBytes() / process_cpu_time;
    354     // Here we don't use process_cpu_time for normalization, because VM shutdown is not a real
    355     // GC. Both numerator and denominator take into account until the end of the last GC,
    356     // instead of the whole process life time like pre_gc_weighted_allocated_bytes.
    357     double post_gc_weighted_allocated_bytes =
    358         heap_->GetPostGcWeightedAllocatedBytes() /
    359           (heap_->GetPostGCLastProcessCpuTime() - heap_->GetProcessCpuStartTime());
    360 
    361     LOG_STREAM(INFO) << "Average bytes allocated at GC start, weighted by CPU time between GCs: "
    362         << static_cast<uint64_t>(pre_gc_weighted_allocated_bytes)
    363         << " (" <<  PrettySize(pre_gc_weighted_allocated_bytes)  << ")";
    364     LOG_STREAM(INFO) << "Average bytes allocated at GC end, weighted by CPU time between GCs: "
    365         << static_cast<uint64_t>(post_gc_weighted_allocated_bytes)
    366         << " (" <<  PrettySize(post_gc_weighted_allocated_bytes)  << ")"
    367         << "\n";
    368   }
    369 
    370   // Wait for the workers of thread pools to be created since there can't be any
    371   // threads attaching during shutdown.
    372   WaitForThreadPoolWorkersToStart();
    373   if (jit_ != nullptr) {
    374     jit_->WaitForWorkersToBeCreated();
    375     // Stop the profile saver thread before marking the runtime as shutting down.
    376     // The saver will try to dump the profiles before being sopped and that
    377     // requires holding the mutator lock.
    378     jit_->StopProfileSaver();
    379   }
    380   if (oat_file_manager_ != nullptr) {
    381     oat_file_manager_->WaitForWorkersToBeCreated();
    382   }
    383 
    384   {
    385     ScopedTrace trace2("Wait for shutdown cond");
    386     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    387     shutting_down_started_ = true;
    388     while (threads_being_born_ > 0) {
    389       shutdown_cond_->Wait(self);
    390     }
    391     shutting_down_ = true;
    392   }
    393   // Shutdown and wait for the daemons.
    394   CHECK(self != nullptr);
    395   if (IsFinishedStarting()) {
    396     ScopedTrace trace2("Waiting for Daemons");
    397     self->ClearException();
    398     self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
    399                                             WellKnownClasses::java_lang_Daemons_stop);
    400   }
    401 
    402   // Shutdown any trace running.
    403   Trace::Shutdown();
    404 
    405   // Report death. Clients me require a working thread, still, so do it before GC completes and
    406   // all non-daemon threads are done.
    407   {
    408     ScopedObjectAccess soa(self);
    409     callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kDeath);
    410   }
    411 
    412   if (attach_shutdown_thread) {
    413     DetachCurrentThread();
    414     self = nullptr;
    415   }
    416 
    417   // Make sure to let the GC complete if it is running.
    418   heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
    419   heap_->DeleteThreadPool();
    420   if (jit_ != nullptr) {
    421     ScopedTrace trace2("Delete jit");
    422     VLOG(jit) << "Deleting jit thread pool";
    423     // Delete thread pool before the thread list since we don't want to wait forever on the
    424     // JIT compiler threads.
    425     jit_->DeleteThreadPool();
    426   }
    427   if (oat_file_manager_ != nullptr) {
    428     oat_file_manager_->DeleteThreadPool();
    429   }
    430   DeleteThreadPool();
    431   CHECK(thread_pool_ == nullptr);
    432 
    433   // Make sure our internal threads are dead before we start tearing down things they're using.
    434   GetRuntimeCallbacks()->StopDebugger();
    435   delete signal_catcher_;
    436 
    437   // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
    438   {
    439     ScopedTrace trace2("Delete thread list");
    440     thread_list_->ShutDown();
    441   }
    442 
    443   // TODO Maybe do some locking.
    444   for (auto& agent : agents_) {
    445     agent->Unload();
    446   }
    447 
    448   // TODO Maybe do some locking
    449   for (auto& plugin : plugins_) {
    450     plugin.Unload();
    451   }
    452 
    453   // Finally delete the thread list.
    454   delete thread_list_;
    455 
    456   // Delete the JIT after thread list to ensure that there is no remaining threads which could be
    457   // accessing the instrumentation when we delete it.
    458   if (jit_ != nullptr) {
    459     VLOG(jit) << "Deleting jit";
    460     jit_.reset(nullptr);
    461     jit_code_cache_.reset(nullptr);
    462   }
    463 
    464   // Shutdown the fault manager if it was initialized.
    465   fault_manager.Shutdown();
    466 
    467   ScopedTrace trace2("Delete state");
    468   delete monitor_list_;
    469   delete monitor_pool_;
    470   delete class_linker_;
    471   delete heap_;
    472   delete intern_table_;
    473   delete oat_file_manager_;
    474   Thread::Shutdown();
    475   QuasiAtomic::Shutdown();
    476   verifier::ClassVerifier::Shutdown();
    477 
    478   // Destroy allocators before shutting down the MemMap because they may use it.
    479   java_vm_.reset();
    480   linear_alloc_.reset();
    481   low_4gb_arena_pool_.reset();
    482   arena_pool_.reset();
    483   jit_arena_pool_.reset();
    484   protected_fault_page_.Reset();
    485   MemMap::Shutdown();
    486 
    487   // TODO: acquire a static mutex on Runtime to avoid racing.
    488   CHECK(instance_ == nullptr || instance_ == this);
    489   instance_ = nullptr;
    490 
    491   // Well-known classes must be deleted or it is impossible to successfully start another Runtime
    492   // instance. We rely on a small initialization order issue in Runtime::Start() that requires
    493   // elements of WellKnownClasses to be null, see b/65500943.
    494   WellKnownClasses::Clear();
    495 
    496   JniShutdownNativeCallerCheck();
    497 }
    498 
    499 struct AbortState {
    500   void Dump(std::ostream& os) const {
    501     if (gAborting > 1) {
    502       os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
    503       DumpRecursiveAbort(os);
    504       return;
    505     }
    506     gAborting++;
    507     os << "Runtime aborting...\n";
    508     if (Runtime::Current() == nullptr) {
    509       os << "(Runtime does not yet exist!)\n";
    510       DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
    511       return;
    512     }
    513     Thread* self = Thread::Current();
    514 
    515     // Dump all threads first and then the aborting thread. While this is counter the logical flow,
    516     // it improves the chance of relevant data surviving in the Android logs.
    517 
    518     DumpAllThreads(os, self);
    519 
    520     if (self == nullptr) {
    521       os << "(Aborting thread was not attached to runtime!)\n";
    522       DumpKernelStack(os, GetTid(), "  kernel: ", false);
    523       DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
    524     } else {
    525       os << "Aborting thread:\n";
    526       if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
    527         DumpThread(os, self);
    528       } else {
    529         if (Locks::mutator_lock_->SharedTryLock(self)) {
    530           DumpThread(os, self);
    531           Locks::mutator_lock_->SharedUnlock(self);
    532         }
    533       }
    534     }
    535   }
    536 
    537   // No thread-safety analysis as we do explicitly test for holding the mutator lock.
    538   void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
    539     DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
    540     self->Dump(os);
    541     if (self->IsExceptionPending()) {
    542       mirror::Throwable* exception = self->GetException();
    543       os << "Pending exception " << exception->Dump();
    544     }
    545   }
    546 
    547   void DumpAllThreads(std::ostream& os, Thread* self) const {
    548     Runtime* runtime = Runtime::Current();
    549     if (runtime != nullptr) {
    550       ThreadList* thread_list = runtime->GetThreadList();
    551       if (thread_list != nullptr) {
    552         // Dump requires ThreadListLock and ThreadSuspendCountLock to not be held (they will be
    553         // grabbed).
    554         // TODO(b/134167395): Change Dump to work with the locks held, and have a loop with timeout
    555         //                    acquiring the locks.
    556         bool tll_already_held = Locks::thread_list_lock_->IsExclusiveHeld(self);
    557         bool tscl_already_held = Locks::thread_suspend_count_lock_->IsExclusiveHeld(self);
    558         if (tll_already_held || tscl_already_held) {
    559           os << "Skipping all-threads dump as locks are held:"
    560              << (tll_already_held ? "" : " thread_list_lock")
    561              << (tscl_already_held ? "" : " thread_suspend_count_lock")
    562              << "\n";
    563           return;
    564         }
    565         bool ml_already_exlusively_held = Locks::mutator_lock_->IsExclusiveHeld(self);
    566         if (ml_already_exlusively_held) {
    567           os << "Skipping all-threads dump as mutator lock is exclusively held.";
    568           return;
    569         }
    570         bool ml_already_held = Locks::mutator_lock_->IsSharedHeld(self);
    571         if (!ml_already_held) {
    572           os << "Dumping all threads without mutator lock held\n";
    573         }
    574         os << "All threads:\n";
    575         thread_list->Dump(os);
    576       }
    577     }
    578   }
    579 
    580   // For recursive aborts.
    581   void DumpRecursiveAbort(std::ostream& os) const NO_THREAD_SAFETY_ANALYSIS {
    582     // The only thing we'll attempt is dumping the native stack of the current thread. We will only
    583     // try this if we haven't exceeded an arbitrary amount of recursions, to recover and actually
    584     // die.
    585     // Note: as we're using a global counter for the recursive abort detection, there is a potential
    586     //       race here and it is not OK to just print when the counter is "2" (one from
    587     //       Runtime::Abort(), one from previous Dump() call). Use a number that seems large enough.
    588     static constexpr size_t kOnlyPrintWhenRecursionLessThan = 100u;
    589     if (gAborting < kOnlyPrintWhenRecursionLessThan) {
    590       gAborting++;
    591       DumpNativeStack(os, GetTid());
    592     }
    593   }
    594 };
    595 
    596 void Runtime::Abort(const char* msg) {
    597   auto old_value = gAborting.fetch_add(1);  // set before taking any locks
    598 
    599   // Only set the first abort message.
    600   if (old_value == 0) {
    601 #ifdef ART_TARGET_ANDROID
    602     android_set_abort_message(msg);
    603 #else
    604     // Set the runtime fault message in case our unexpected-signal code will run.
    605     Runtime* current = Runtime::Current();
    606     if (current != nullptr) {
    607       current->SetFaultMessage(msg);
    608     }
    609 #endif
    610   }
    611 
    612   {
    613     // Ensure that we don't have multiple threads trying to abort at once,
    614     // which would result in significantly worse diagnostics.
    615     ScopedThreadStateChange tsc(Thread::Current(), kNativeForAbort);
    616     Locks::abort_lock_->ExclusiveLock(Thread::Current());
    617   }
    618 
    619   // Get any pending output out of the way.
    620   fflush(nullptr);
    621 
    622   // Many people have difficulty distinguish aborts from crashes,
    623   // so be explicit.
    624   // Note: use cerr on the host to print log lines immediately, so we get at least some output
    625   //       in case of recursive aborts. We lose annotation with the source file and line number
    626   //       here, which is a minor issue. The same is significantly more complicated on device,
    627   //       which is why we ignore the issue there.
    628   AbortState state;
    629   if (kIsTargetBuild) {
    630     LOG(FATAL_WITHOUT_ABORT) << Dumpable<AbortState>(state);
    631   } else {
    632     std::cerr << Dumpable<AbortState>(state);
    633   }
    634 
    635   // Sometimes we dump long messages, and the Android abort message only retains the first line.
    636   // In those cases, just log the message again, to avoid logcat limits.
    637   if (msg != nullptr && strchr(msg, '\n') != nullptr) {
    638     LOG(FATAL_WITHOUT_ABORT) << msg;
    639   }
    640 
    641   // Call the abort hook if we have one.
    642   if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
    643     LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
    644     Runtime::Current()->abort_();
    645     // notreached
    646     LOG(FATAL_WITHOUT_ABORT) << "Unexpectedly returned from abort hook!";
    647   }
    648 
    649   abort();
    650   // notreached
    651 }
    652 
    653 void Runtime::PreZygoteFork() {
    654   if (GetJit() != nullptr) {
    655     GetJit()->PreZygoteFork();
    656   }
    657   heap_->PreZygoteFork();
    658 }
    659 
    660 void Runtime::PostZygoteFork() {
    661   if (GetJit() != nullptr) {
    662     GetJit()->PostZygoteFork();
    663   }
    664 }
    665 
    666 void Runtime::CallExitHook(jint status) {
    667   if (exit_ != nullptr) {
    668     ScopedThreadStateChange tsc(Thread::Current(), kNative);
    669     exit_(status);
    670     LOG(WARNING) << "Exit hook returned instead of exiting!";
    671   }
    672 }
    673 
    674 void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
    675   GetInternTable()->SweepInternTableWeaks(visitor);
    676   GetMonitorList()->SweepMonitorList(visitor);
    677   GetJavaVM()->SweepJniWeakGlobals(visitor);
    678   GetHeap()->SweepAllocationRecords(visitor);
    679   if (GetJit() != nullptr) {
    680     // Visit JIT literal tables. Objects in these tables are classes and strings
    681     // and only classes can be affected by class unloading. The strings always
    682     // stay alive as they are strongly interned.
    683     // TODO: Move this closer to CleanupClassLoaders, to avoid blocking weak accesses
    684     // from mutators. See b/32167580.
    685     GetJit()->GetCodeCache()->SweepRootTables(visitor);
    686   }
    687 
    688   // All other generic system-weak holders.
    689   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
    690     holder->Sweep(visitor);
    691   }
    692 }
    693 
    694 bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
    695                            bool ignore_unrecognized,
    696                            RuntimeArgumentMap* runtime_options) {
    697   Locks::Init();
    698   InitLogging(/* argv= */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
    699   bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
    700   if (!parsed) {
    701     LOG(ERROR) << "Failed to parse options";
    702     return false;
    703   }
    704   return true;
    705 }
    706 
    707 // Callback to check whether it is safe to call Abort (e.g., to use a call to
    708 // LOG(FATAL)).  It is only safe to call Abort if the runtime has been created,
    709 // properly initialized, and has not shut down.
    710 static bool IsSafeToCallAbort() NO_THREAD_SAFETY_ANALYSIS {
    711   Runtime* runtime = Runtime::Current();
    712   return runtime != nullptr && runtime->IsStarted() && !runtime->IsShuttingDownLocked();
    713 }
    714 
    715 bool Runtime::Create(RuntimeArgumentMap&& runtime_options) {
    716   // TODO: acquire a static mutex on Runtime to avoid racing.
    717   if (Runtime::instance_ != nullptr) {
    718     return false;
    719   }
    720   instance_ = new Runtime;
    721   Locks::SetClientCallback(IsSafeToCallAbort);
    722   if (!instance_->Init(std::move(runtime_options))) {
    723     // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
    724     // leak memory, instead. Fix the destructor. b/19100793.
    725     // delete instance_;
    726     instance_ = nullptr;
    727     return false;
    728   }
    729   return true;
    730 }
    731 
    732 bool Runtime::Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
    733   RuntimeArgumentMap runtime_options;
    734   return ParseOptions(raw_options, ignore_unrecognized, &runtime_options) &&
    735       Create(std::move(runtime_options));
    736 }
    737 
    738 static jobject CreateSystemClassLoader(Runtime* runtime) {
    739   if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
    740     return nullptr;
    741   }
    742 
    743   ScopedObjectAccess soa(Thread::Current());
    744   ClassLinker* cl = Runtime::Current()->GetClassLinker();
    745   auto pointer_size = cl->GetImagePointerSize();
    746 
    747   StackHandleScope<2> hs(soa.Self());
    748   Handle<mirror::Class> class_loader_class(
    749       hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader)));
    750   CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
    751 
    752   ArtMethod* getSystemClassLoader = class_loader_class->FindClassMethod(
    753       "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
    754   CHECK(getSystemClassLoader != nullptr);
    755   CHECK(getSystemClassLoader->IsStatic());
    756 
    757   JValue result = InvokeWithJValues(soa,
    758                                     nullptr,
    759                                     jni::EncodeArtMethod(getSystemClassLoader),
    760                                     nullptr);
    761   JNIEnv* env = soa.Self()->GetJniEnv();
    762   ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
    763   CHECK(system_class_loader.get() != nullptr);
    764 
    765   soa.Self()->SetClassLoaderOverride(system_class_loader.get());
    766 
    767   Handle<mirror::Class> thread_class(
    768       hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread)));
    769   CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
    770 
    771   ArtField* contextClassLoader =
    772       thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
    773   CHECK(contextClassLoader != nullptr);
    774 
    775   // We can't run in a transaction yet.
    776   contextClassLoader->SetObject<false>(
    777       soa.Self()->GetPeer(),
    778       soa.Decode<mirror::ClassLoader>(system_class_loader.get()).Ptr());
    779 
    780   return env->NewGlobalRef(system_class_loader.get());
    781 }
    782 
    783 std::string Runtime::GetCompilerExecutable() const {
    784   if (!compiler_executable_.empty()) {
    785     return compiler_executable_;
    786   }
    787   std::string compiler_executable(GetAndroidRoot());
    788   compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
    789   return compiler_executable;
    790 }
    791 
    792 void Runtime::RunRootClinits(Thread* self) {
    793   class_linker_->RunRootClinits(self);
    794 
    795   GcRoot<mirror::Throwable>* exceptions[] = {
    796       &pre_allocated_OutOfMemoryError_when_throwing_exception_,
    797       // &pre_allocated_OutOfMemoryError_when_throwing_oome_,             // Same class as above.
    798       // &pre_allocated_OutOfMemoryError_when_handling_stack_overflow_,   // Same class as above.
    799       &pre_allocated_NoClassDefFoundError_,
    800   };
    801   for (GcRoot<mirror::Throwable>* exception : exceptions) {
    802     StackHandleScope<1> hs(self);
    803     Handle<mirror::Class> klass = hs.NewHandle<mirror::Class>(exception->Read()->GetClass());
    804     class_linker_->EnsureInitialized(self, klass, true, true);
    805     self->AssertNoPendingException();
    806   }
    807 }
    808 
    809 bool Runtime::Start() {
    810   VLOG(startup) << "Runtime::Start entering";
    811 
    812   CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
    813 
    814   // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
    815   // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
    816 #if defined(__linux__) && !defined(ART_TARGET_ANDROID) && defined(__x86_64__)
    817   if (kIsDebugBuild) {
    818     CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
    819   }
    820 #endif
    821 
    822   // Restore main thread state to kNative as expected by native code.
    823   Thread* self = Thread::Current();
    824 
    825   self->TransitionFromRunnableToSuspended(kNative);
    826 
    827   DoAndMaybeSwitchInterpreter([=](){ started_ = true; });
    828 
    829   if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
    830     ScopedObjectAccess soa(self);
    831     StackHandleScope<2> hs(soa.Self());
    832 
    833     ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassLinker()->GetClassRoots();
    834     auto class_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::Class>(class_roots)));
    835     auto field_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::Field>(class_roots)));
    836 
    837     class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
    838     self->AssertNoPendingException();
    839     // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
    840     class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
    841     self->AssertNoPendingException();
    842   }
    843 
    844   // InitNativeMethods needs to be after started_ so that the classes
    845   // it touches will have methods linked to the oat file if necessary.
    846   {
    847     ScopedTrace trace2("InitNativeMethods");
    848     InitNativeMethods();
    849   }
    850 
    851   // IntializeIntrinsics needs to be called after the WellKnownClasses::Init in InitNativeMethods
    852   // because in checking the invocation types of intrinsic methods ArtMethod::GetInvokeType()
    853   // needs the SignaturePolymorphic annotation class which is initialized in WellKnownClasses::Init.
    854   InitializeIntrinsics();
    855 
    856   // Initialize well known thread group values that may be accessed threads while attaching.
    857   InitThreadGroups(self);
    858 
    859   Thread::FinishStartup();
    860 
    861   // Create the JIT either if we have to use JIT compilation or save profiling info. This is
    862   // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
    863   // ThreadGroup to exist.
    864   //
    865   // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
    866   // recoding profiles. Maybe we should consider changing the name to be more clear it's
    867   // not only about compiling. b/28295073.
    868   if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
    869     // Try to load compiler pre zygote to reduce PSS. b/27744947
    870     std::string error_msg;
    871     if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
    872       LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
    873     }
    874     CreateJitCodeCache(/*rwx_memory_allowed=*/true);
    875     CreateJit();
    876   }
    877 
    878   // Send the start phase event. We have to wait till here as this is when the main thread peer
    879   // has just been generated, important root clinits have been run and JNI is completely functional.
    880   {
    881     ScopedObjectAccess soa(self);
    882     callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kStart);
    883   }
    884 
    885   system_class_loader_ = CreateSystemClassLoader(this);
    886 
    887   if (!is_zygote_) {
    888     if (is_native_bridge_loaded_) {
    889       PreInitializeNativeBridge(".");
    890     }
    891     NativeBridgeAction action = force_native_bridge_
    892         ? NativeBridgeAction::kInitialize
    893         : NativeBridgeAction::kUnload;
    894     InitNonZygoteOrPostFork(self->GetJniEnv(),
    895                             /* is_system_server= */ false,
    896                             action,
    897                             GetInstructionSetString(kRuntimeISA));
    898   }
    899 
    900   StartDaemonThreads();
    901 
    902   // Make sure the environment is still clean (no lingering local refs from starting daemon
    903   // threads).
    904   {
    905     ScopedObjectAccess soa(self);
    906     self->GetJniEnv()->AssertLocalsEmpty();
    907   }
    908 
    909   // Send the initialized phase event. Send it after starting the Daemon threads so that agents
    910   // cannot delay the daemon threads from starting forever.
    911   {
    912     ScopedObjectAccess soa(self);
    913     callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInit);
    914   }
    915 
    916   {
    917     ScopedObjectAccess soa(self);
    918     self->GetJniEnv()->AssertLocalsEmpty();
    919   }
    920 
    921   VLOG(startup) << "Runtime::Start exiting";
    922   finished_starting_ = true;
    923 
    924   if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
    925     ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
    926     Trace::Start(trace_config_->trace_file.c_str(),
    927                  static_cast<int>(trace_config_->trace_file_size),
    928                  0,
    929                  trace_config_->trace_output_mode,
    930                  trace_config_->trace_mode,
    931                  0);
    932   }
    933 
    934   // In case we have a profile path passed as a command line argument,
    935   // register the current class path for profiling now. Note that we cannot do
    936   // this before we create the JIT and having it here is the most convenient way.
    937   // This is used when testing profiles with dalvikvm command as there is no
    938   // framework to register the dex files for profiling.
    939   if (jit_.get() != nullptr && jit_options_->GetSaveProfilingInfo() &&
    940       !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) {
    941     std::vector<std::string> dex_filenames;
    942     Split(class_path_string_, ':', &dex_filenames);
    943     RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath());
    944   }
    945 
    946   return true;
    947 }
    948 
    949 void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
    950   DCHECK_GT(threads_being_born_, 0U);
    951   threads_being_born_--;
    952   if (shutting_down_started_ && threads_being_born_ == 0) {
    953     shutdown_cond_->Broadcast(Thread::Current());
    954   }
    955 }
    956 
    957 void Runtime::InitNonZygoteOrPostFork(
    958     JNIEnv* env,
    959     bool is_system_server,
    960     NativeBridgeAction action,
    961     const char* isa,
    962     bool profile_system_server) {
    963   is_zygote_ = false;
    964 
    965   if (is_native_bridge_loaded_) {
    966     switch (action) {
    967       case NativeBridgeAction::kUnload:
    968         UnloadNativeBridge();
    969         is_native_bridge_loaded_ = false;
    970         break;
    971 
    972       case NativeBridgeAction::kInitialize:
    973         InitializeNativeBridge(env, isa);
    974         break;
    975     }
    976   }
    977 
    978   if (is_system_server) {
    979     jit_options_->SetSaveProfilingInfo(profile_system_server);
    980     if (profile_system_server) {
    981       jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
    982       VLOG(profiler) << "Enabling system server profiles";
    983     }
    984   }
    985 
    986   // Create the thread pools.
    987   heap_->CreateThreadPool();
    988   {
    989     ScopedTrace timing("CreateThreadPool");
    990     constexpr size_t kStackSize = 64 * KB;
    991     constexpr size_t kMaxRuntimeWorkers = 4u;
    992     const size_t num_workers =
    993         std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
    994     MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
    995     CHECK(thread_pool_ == nullptr);
    996     thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
    997     thread_pool_->StartWorkers(Thread::Current());
    998   }
    999 
   1000   // Reset the gc performance data at zygote fork so that the GCs
   1001   // before fork aren't attributed to an app.
   1002   heap_->ResetGcPerformanceInfo();
   1003 
   1004   StartSignalCatcher();
   1005 
   1006   // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
   1007   // this will pause the runtime (in the internal debugger implementation), so we probably want
   1008   // this to come last.
   1009   ScopedObjectAccess soa(Thread::Current());
   1010   GetRuntimeCallbacks()->StartDebugger();
   1011 }
   1012 
   1013 void Runtime::StartSignalCatcher() {
   1014   if (!is_zygote_) {
   1015     signal_catcher_ = new SignalCatcher();
   1016   }
   1017 }
   1018 
   1019 bool Runtime::IsShuttingDown(Thread* self) {
   1020   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
   1021   return IsShuttingDownLocked();
   1022 }
   1023 
   1024 void Runtime::StartDaemonThreads() {
   1025   ScopedTrace trace(__FUNCTION__);
   1026   VLOG(startup) << "Runtime::StartDaemonThreads entering";
   1027 
   1028   Thread* self = Thread::Current();
   1029 
   1030   // Must be in the kNative state for calling native methods.
   1031   CHECK_EQ(self->GetState(), kNative);
   1032 
   1033   JNIEnv* env = self->GetJniEnv();
   1034   env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
   1035                             WellKnownClasses::java_lang_Daemons_start);
   1036   if (env->ExceptionCheck()) {
   1037     env->ExceptionDescribe();
   1038     LOG(FATAL) << "Error starting java.lang.Daemons";
   1039   }
   1040 
   1041   VLOG(startup) << "Runtime::StartDaemonThreads exiting";
   1042 }
   1043 
   1044 static size_t OpenBootDexFiles(ArrayRef<const std::string> dex_filenames,
   1045                                ArrayRef<const std::string> dex_locations,
   1046                                std::vector<std::unique_ptr<const DexFile>>* dex_files) {
   1047   DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
   1048   size_t failure_count = 0;
   1049   const ArtDexFileLoader dex_file_loader;
   1050   for (size_t i = 0; i < dex_filenames.size(); i++) {
   1051     const char* dex_filename = dex_filenames[i].c_str();
   1052     const char* dex_location = dex_locations[i].c_str();
   1053     static constexpr bool kVerifyChecksum = true;
   1054     std::string error_msg;
   1055     if (!OS::FileExists(dex_filename)) {
   1056       LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
   1057       continue;
   1058     }
   1059     bool verify = Runtime::Current()->IsVerificationEnabled();
   1060     // In the case we're using the apex boot image, we don't have support yet
   1061     // on reading vdex files of boot classpath. So just assume all boot classpath
   1062     // dex files have been verified (this should always be the case as the default boot
   1063     // image has been generated at build time).
   1064     if (Runtime::Current()->IsUsingApexBootImageLocation() && !kIsDebugBuild) {
   1065       verify = false;
   1066     }
   1067     if (!dex_file_loader.Open(dex_filename,
   1068                               dex_location,
   1069                               verify,
   1070                               kVerifyChecksum,
   1071                               &error_msg,
   1072                               dex_files)) {
   1073       LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
   1074       ++failure_count;
   1075     }
   1076   }
   1077   return failure_count;
   1078 }
   1079 
   1080 void Runtime::SetSentinel(mirror::Object* sentinel) {
   1081   CHECK(sentinel_.Read() == nullptr);
   1082   CHECK(sentinel != nullptr);
   1083   CHECK(!heap_->IsMovableObject(sentinel));
   1084   sentinel_ = GcRoot<mirror::Object>(sentinel);
   1085 }
   1086 
   1087 GcRoot<mirror::Object> Runtime::GetSentinel() {
   1088   return sentinel_;
   1089 }
   1090 
   1091 static inline void CreatePreAllocatedException(Thread* self,
   1092                                                Runtime* runtime,
   1093                                                GcRoot<mirror::Throwable>* exception,
   1094                                                const char* exception_class_descriptor,
   1095                                                const char* msg)
   1096     REQUIRES_SHARED(Locks::mutator_lock_) {
   1097   DCHECK_EQ(self, Thread::Current());
   1098   ClassLinker* class_linker = runtime->GetClassLinker();
   1099   // Allocate an object without initializing the class to allow non-trivial Throwable.<clinit>().
   1100   ObjPtr<mirror::Class> klass = class_linker->FindSystemClass(self, exception_class_descriptor);
   1101   CHECK(klass != nullptr);
   1102   gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
   1103   ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
   1104       klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
   1105   CHECK(exception_object != nullptr);
   1106   *exception = GcRoot<mirror::Throwable>(exception_object);
   1107   // Initialize the "detailMessage" field.
   1108   ObjPtr<mirror::String> message = mirror::String::AllocFromModifiedUtf8(self, msg);
   1109   CHECK(message != nullptr);
   1110   ObjPtr<mirror::Class> throwable = GetClassRoot<mirror::Throwable>(class_linker);
   1111   ArtField* detailMessageField =
   1112       throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
   1113   CHECK(detailMessageField != nullptr);
   1114   detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
   1115 }
   1116 
   1117 bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
   1118   // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
   1119   // Take a snapshot of the environment at the time the runtime was created, for use by Exec, etc.
   1120   env_snapshot_.TakeSnapshot();
   1121 
   1122   using Opt = RuntimeArgumentMap;
   1123   Opt runtime_options(std::move(runtime_options_in));
   1124   ScopedTrace trace(__FUNCTION__);
   1125   CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
   1126 
   1127   // Early override for logging output.
   1128   if (runtime_options.Exists(Opt::UseStderrLogger)) {
   1129     android::base::SetLogger(android::base::StderrLogger);
   1130   }
   1131 
   1132   MemMap::Init();
   1133 
   1134   // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
   1135   // If we cannot reserve it, log a warning.
   1136   // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
   1137   //       is out-of-the-way enough that it should not collide with boot image mapping.
   1138   // Note: Don't request an error message. That will lead to a maps dump in the case of failure,
   1139   //       leading to logspam.
   1140   {
   1141     constexpr uintptr_t kSentinelAddr =
   1142         RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
   1143     protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
   1144                                                  reinterpret_cast<uint8_t*>(kSentinelAddr),
   1145                                                  kPageSize,
   1146                                                  PROT_NONE,
   1147                                                  /*low_4gb=*/ true,
   1148                                                  /*reuse=*/ false,
   1149                                                  /*reservation=*/ nullptr,
   1150                                                  /*error_msg=*/ nullptr);
   1151     if (!protected_fault_page_.IsValid()) {
   1152       LOG(WARNING) << "Could not reserve sentinel fault page";
   1153     } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
   1154       LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
   1155       protected_fault_page_.Reset();
   1156     }
   1157   }
   1158 
   1159   VLOG(startup) << "Runtime::Init -verbose:startup enabled";
   1160 
   1161   QuasiAtomic::Startup();
   1162 
   1163   oat_file_manager_ = new OatFileManager;
   1164 
   1165   Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
   1166   Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
   1167                 runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
   1168 
   1169   image_location_ = runtime_options.GetOrDefault(Opt::Image);
   1170   {
   1171     std::string error_msg;
   1172     is_using_apex_boot_image_location_ = (image_location_ == kApexBootImageLocation);
   1173   }
   1174 
   1175   SetInstructionSet(runtime_options.GetOrDefault(Opt::ImageInstructionSet));
   1176   boot_class_path_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
   1177   boot_class_path_locations_ = runtime_options.ReleaseOrDefault(Opt::BootClassPathLocations);
   1178   DCHECK(boot_class_path_locations_.empty() ||
   1179          boot_class_path_locations_.size() == boot_class_path_.size());
   1180   if (boot_class_path_.empty()) {
   1181     // Try to extract the boot class path from the system boot image.
   1182     if (image_location_.empty()) {
   1183       LOG(ERROR) << "Empty boot class path, cannot continue without image.";
   1184       return false;
   1185     }
   1186     std::string system_oat_filename = ImageHeader::GetOatLocationFromImageLocation(
   1187         GetSystemImageFilename(image_location_.c_str(), instruction_set_));
   1188     std::string system_oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location_);
   1189     std::string error_msg;
   1190     std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
   1191                                                     system_oat_filename,
   1192                                                     system_oat_location,
   1193                                                     /*executable=*/ false,
   1194                                                     /*low_4gb=*/ false,
   1195                                                     /*abs_dex_location=*/ nullptr,
   1196                                                     /*reservation=*/ nullptr,
   1197                                                     &error_msg));
   1198     if (oat_file == nullptr) {
   1199       LOG(ERROR) << "Could not open boot oat file for extracting boot class path: " << error_msg;
   1200       return false;
   1201     }
   1202     const OatHeader& oat_header = oat_file->GetOatHeader();
   1203     const char* oat_boot_class_path = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
   1204     if (oat_boot_class_path != nullptr) {
   1205       Split(oat_boot_class_path, ':', &boot_class_path_);
   1206     }
   1207     if (boot_class_path_.empty()) {
   1208       LOG(ERROR) << "Boot class path missing from boot image oat file " << oat_file->GetLocation();
   1209       return false;
   1210     }
   1211   }
   1212 
   1213   class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
   1214   properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
   1215 
   1216   compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
   1217   must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
   1218   is_zygote_ = runtime_options.Exists(Opt::Zygote);
   1219   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
   1220   image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
   1221   dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
   1222 
   1223   vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
   1224   exit_ = runtime_options.GetOrDefault(Opt::HookExit);
   1225   abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
   1226 
   1227   default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
   1228 
   1229   compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
   1230   compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
   1231   for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
   1232     if (option == "--debuggable") {
   1233       SetJavaDebuggable(true);
   1234       break;
   1235     }
   1236   }
   1237   image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
   1238 
   1239   finalizer_timeout_ms_ = runtime_options.GetOrDefault(Opt::FinalizerTimeoutMs);
   1240   max_spins_before_thin_lock_inflation_ =
   1241       runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
   1242 
   1243   monitor_list_ = new MonitorList;
   1244   monitor_pool_ = MonitorPool::Create();
   1245   thread_list_ = new ThreadList(runtime_options.GetOrDefault(Opt::ThreadSuspendTimeout));
   1246   intern_table_ = new InternTable;
   1247 
   1248   verify_ = runtime_options.GetOrDefault(Opt::Verify);
   1249   allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
   1250 
   1251   target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
   1252 
   1253   // Set hidden API enforcement policy. The checks are disabled by default and
   1254   // we only enable them if:
   1255   // (a) runtime was started with a command line flag that enables the checks, or
   1256   // (b) Zygote forked a new process that is not exempt (see ZygoteHooks).
   1257   hidden_api_policy_ = runtime_options.GetOrDefault(Opt::HiddenApiPolicy);
   1258   DCHECK(!is_zygote_ || hidden_api_policy_ == hiddenapi::EnforcementPolicy::kDisabled);
   1259 
   1260   // Set core platform API enforcement policy. The checks are disabled by default and
   1261   // can be enabled with a command line flag. AndroidRuntime will pass the flag if
   1262   // a system property is set.
   1263   core_platform_api_policy_ = runtime_options.GetOrDefault(Opt::CorePlatformApiPolicy);
   1264   if (core_platform_api_policy_ != hiddenapi::EnforcementPolicy::kDisabled) {
   1265     LOG(INFO) << "Core platform API reporting enabled, enforcing="
   1266         << (core_platform_api_policy_ == hiddenapi::EnforcementPolicy::kEnabled ? "true" : "false");
   1267   }
   1268 
   1269   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
   1270   force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
   1271 
   1272   Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
   1273 
   1274   fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint);
   1275 
   1276   if (runtime_options.GetOrDefault(Opt::Interpret)) {
   1277     GetInstrumentation()->ForceInterpretOnly();
   1278   }
   1279 
   1280   zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
   1281   experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
   1282   is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
   1283   madvise_random_access_ = runtime_options.GetOrDefault(Opt::MadviseRandomAccess);
   1284 
   1285   plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
   1286   agent_specs_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
   1287   // TODO Add back in -agentlib
   1288   // for (auto lib : runtime_options.ReleaseOrDefault(Opt::AgentLib)) {
   1289   //   agents_.push_back(lib);
   1290   // }
   1291 
   1292   float foreground_heap_growth_multiplier;
   1293   if (is_low_memory_mode_ && !runtime_options.Exists(Opt::ForegroundHeapGrowthMultiplier)) {
   1294     // If low memory mode, use 1.0 as the multiplier by default.
   1295     foreground_heap_growth_multiplier = 1.0f;
   1296   } else {
   1297     foreground_heap_growth_multiplier =
   1298         runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) +
   1299             kExtraDefaultHeapGrowthMultiplier;
   1300   }
   1301   XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
   1302 
   1303   // Generational CC collection is currently only compatible with Baker read barriers.
   1304   bool use_generational_cc = kUseBakerReadBarrier && xgc_option.generational_cc;
   1305 
   1306   image_space_loading_order_ = runtime_options.GetOrDefault(Opt::ImageSpaceLoadingOrder);
   1307 
   1308   heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
   1309                        runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
   1310                        runtime_options.GetOrDefault(Opt::HeapMinFree),
   1311                        runtime_options.GetOrDefault(Opt::HeapMaxFree),
   1312                        runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
   1313                        foreground_heap_growth_multiplier,
   1314                        runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
   1315                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
   1316                        GetBootClassPath(),
   1317                        GetBootClassPathLocations(),
   1318                        image_location_,
   1319                        instruction_set_,
   1320                        // Override the collector type to CC if the read barrier config.
   1321                        kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
   1322                        kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
   1323                                        : runtime_options.GetOrDefault(Opt::BackgroundGc),
   1324                        runtime_options.GetOrDefault(Opt::LargeObjectSpace),
   1325                        runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
   1326                        runtime_options.GetOrDefault(Opt::ParallelGCThreads),
   1327                        runtime_options.GetOrDefault(Opt::ConcGCThreads),
   1328                        runtime_options.Exists(Opt::LowMemoryMode),
   1329                        runtime_options.GetOrDefault(Opt::LongPauseLogThreshold),
   1330                        runtime_options.GetOrDefault(Opt::LongGCLogThreshold),
   1331                        runtime_options.Exists(Opt::IgnoreMaxFootprint),
   1332                        runtime_options.GetOrDefault(Opt::UseTLAB),
   1333                        xgc_option.verify_pre_gc_heap_,
   1334                        xgc_option.verify_pre_sweeping_heap_,
   1335                        xgc_option.verify_post_gc_heap_,
   1336                        xgc_option.verify_pre_gc_rosalloc_,
   1337                        xgc_option.verify_pre_sweeping_rosalloc_,
   1338                        xgc_option.verify_post_gc_rosalloc_,
   1339                        xgc_option.gcstress_,
   1340                        xgc_option.measure_,
   1341                        runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
   1342                        use_generational_cc,
   1343                        runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs),
   1344                        runtime_options.Exists(Opt::DumpRegionInfoBeforeGC),
   1345                        runtime_options.Exists(Opt::DumpRegionInfoAfterGC),
   1346                        image_space_loading_order_);
   1347 
   1348   if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
   1349     LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
   1350     return false;
   1351   }
   1352 
   1353   dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
   1354 
   1355   jdwp_options_ = runtime_options.GetOrDefault(Opt::JdwpOptions);
   1356   jdwp_provider_ = CanonicalizeJdwpProvider(runtime_options.GetOrDefault(Opt::JdwpProvider),
   1357                                             IsJavaDebuggable());
   1358   switch (jdwp_provider_) {
   1359     case JdwpProvider::kNone: {
   1360       VLOG(jdwp) << "Disabling all JDWP support.";
   1361       if (!jdwp_options_.empty()) {
   1362         bool has_transport = jdwp_options_.find("transport") != std::string::npos;
   1363         const char* transport_internal = !has_transport ? "transport=dt_android_adb," : "";
   1364         std::string adb_connection_args =
   1365             std::string("  -XjdwpProvider:adbconnection -XjdwpOptions:") + jdwp_options_;
   1366         LOG(WARNING) << "Jdwp options given when jdwp is disabled! You probably want to enable "
   1367                      << "jdwp with one of:" << std::endl
   1368                      << "  -XjdwpProvider:internal "
   1369                      << "-XjdwpOptions:" << transport_internal << jdwp_options_ << std::endl
   1370                      << "  -Xplugin:libopenjdkjvmti" << (kIsDebugBuild ? "d" : "") << ".so "
   1371                      << "-agentpath:libjdwp.so=" << jdwp_options_ << std::endl
   1372                      << (has_transport ? "" : adb_connection_args);
   1373       }
   1374       break;
   1375     }
   1376     case JdwpProvider::kInternal: {
   1377       if (runtime_options.Exists(Opt::JdwpOptions)) {
   1378         JDWP::JdwpOptions ops;
   1379         if (!JDWP::ParseJdwpOptions(runtime_options.GetOrDefault(Opt::JdwpOptions), &ops)) {
   1380           LOG(ERROR) << "failed to parse jdwp options!";
   1381           return false;
   1382         }
   1383         Dbg::ConfigureJdwp(ops);
   1384       }
   1385       break;
   1386     }
   1387     case JdwpProvider::kAdbConnection: {
   1388       constexpr const char* plugin_name = kIsDebugBuild ? "libadbconnectiond.so"
   1389                                                         : "libadbconnection.so";
   1390       plugins_.push_back(Plugin::Create(plugin_name));
   1391       break;
   1392     }
   1393     case JdwpProvider::kUnset: {
   1394       LOG(FATAL) << "Illegal jdwp provider " << jdwp_provider_ << " was not filtered out!";
   1395     }
   1396   }
   1397   callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
   1398   callbacks_->AddClassLoadCallback(Dbg::GetClassLoadCallback());
   1399 
   1400   jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
   1401   if (IsAotCompiler()) {
   1402     // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
   1403     // this case.
   1404     // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
   1405     // null and we don't create the jit.
   1406     jit_options_->SetUseJitCompilation(false);
   1407     jit_options_->SetSaveProfilingInfo(false);
   1408   }
   1409 
   1410   // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
   1411   // can't be trimmed as easily.
   1412   const bool use_malloc = IsAotCompiler();
   1413   if (use_malloc) {
   1414     arena_pool_.reset(new MallocArenaPool());
   1415     jit_arena_pool_.reset(new MallocArenaPool());
   1416   } else {
   1417     arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false));
   1418     jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata"));
   1419   }
   1420 
   1421   if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
   1422     // 4gb, no malloc. Explanation in header.
   1423     low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true));
   1424   }
   1425   linear_alloc_.reset(CreateLinearAlloc());
   1426 
   1427   BlockSignals();
   1428   InitPlatformSignalHandlers();
   1429 
   1430   // Change the implicit checks flags based on runtime architecture.
   1431   switch (kRuntimeISA) {
   1432     case InstructionSet::kArm:
   1433     case InstructionSet::kThumb2:
   1434     case InstructionSet::kX86:
   1435     case InstructionSet::kArm64:
   1436     case InstructionSet::kX86_64:
   1437     case InstructionSet::kMips:
   1438     case InstructionSet::kMips64:
   1439       implicit_null_checks_ = true;
   1440       // Historical note: Installing stack protection was not playing well with Valgrind.
   1441       implicit_so_checks_ = true;
   1442       break;
   1443     default:
   1444       // Keep the defaults.
   1445       break;
   1446   }
   1447 
   1448   if (!no_sig_chain_) {
   1449     // Dex2Oat's Runtime does not need the signal chain or the fault handler.
   1450     if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
   1451       fault_manager.Init();
   1452 
   1453       // These need to be in a specific order.  The null point check handler must be
   1454       // after the suspend check and stack overflow check handlers.
   1455       //
   1456       // Note: the instances attach themselves to the fault manager and are handled by it. The
   1457       //       manager will delete the instance on Shutdown().
   1458       if (implicit_suspend_checks_) {
   1459         new SuspensionHandler(&fault_manager);
   1460       }
   1461 
   1462       if (implicit_so_checks_) {
   1463         new StackOverflowHandler(&fault_manager);
   1464       }
   1465 
   1466       if (implicit_null_checks_) {
   1467         new NullPointerHandler(&fault_manager);
   1468       }
   1469 
   1470       if (kEnableJavaStackTraceHandler) {
   1471         new JavaStackTraceHandler(&fault_manager);
   1472       }
   1473     }
   1474   }
   1475 
   1476   verifier_logging_threshold_ms_ = runtime_options.GetOrDefault(Opt::VerifierLoggingThreshold);
   1477 
   1478   std::string error_msg;
   1479   java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
   1480   if (java_vm_.get() == nullptr) {
   1481     LOG(ERROR) << "Could not initialize JavaVMExt: " << error_msg;
   1482     return false;
   1483   }
   1484 
   1485   // Add the JniEnv handler.
   1486   // TODO Refactor this stuff.
   1487   java_vm_->AddEnvironmentHook(JNIEnvExt::GetEnvHandler);
   1488 
   1489   Thread::Startup();
   1490 
   1491   // ClassLinker needs an attached thread, but we can't fully attach a thread without creating
   1492   // objects. We can't supply a thread group yet; it will be fixed later. Since we are the main
   1493   // thread, we do not get a java peer.
   1494   Thread* self = Thread::Attach("main", false, nullptr, false);
   1495   CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
   1496   CHECK(self != nullptr);
   1497 
   1498   self->SetIsRuntimeThread(IsAotCompiler());
   1499 
   1500   // Set us to runnable so tools using a runtime can allocate and GC by default
   1501   self->TransitionFromSuspendedToRunnable();
   1502 
   1503   // Now we're attached, we can take the heap locks and validate the heap.
   1504   GetHeap()->EnableObjectValidation();
   1505 
   1506   CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
   1507 
   1508   if (UNLIKELY(IsAotCompiler())) {
   1509     class_linker_ = new AotClassLinker(intern_table_);
   1510   } else {
   1511     class_linker_ = new ClassLinker(
   1512         intern_table_,
   1513         runtime_options.GetOrDefault(Opt::FastClassNotFoundException));
   1514   }
   1515   if (GetHeap()->HasBootImageSpace()) {
   1516     bool result = class_linker_->InitFromBootImage(&error_msg);
   1517     if (!result) {
   1518       LOG(ERROR) << "Could not initialize from image: " << error_msg;
   1519       return false;
   1520     }
   1521     if (kIsDebugBuild) {
   1522       for (auto image_space : GetHeap()->GetBootImageSpaces()) {
   1523         image_space->VerifyImageAllocations();
   1524       }
   1525     }
   1526     {
   1527       ScopedTrace trace2("AddImageStringsToTable");
   1528       for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) {
   1529         GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
   1530       }
   1531     }
   1532     if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) {
   1533       // The boot image did not contain all boot class path components. Load the rest.
   1534       DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size());
   1535       size_t start = heap_->GetBootImageSpaces().size();
   1536       DCHECK_LT(start, GetBootClassPath().size());
   1537       std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path;
   1538       if (runtime_options.Exists(Opt::BootClassPathDexList)) {
   1539         extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
   1540       } else {
   1541         OpenBootDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start),
   1542                          ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start),
   1543                          &extra_boot_class_path);
   1544       }
   1545       class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
   1546     }
   1547     if (IsJavaDebuggable()) {
   1548       // Now that we have loaded the boot image, deoptimize its methods if we are running
   1549       // debuggable, as the code may have been compiled non-debuggable.
   1550       ScopedThreadSuspension sts(self, ThreadState::kNative);
   1551       ScopedSuspendAll ssa(__FUNCTION__);
   1552       DeoptimizeBootImage();
   1553     }
   1554   } else {
   1555     std::vector<std::unique_ptr<const DexFile>> boot_class_path;
   1556     if (runtime_options.Exists(Opt::BootClassPathDexList)) {
   1557       boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
   1558     } else {
   1559       OpenBootDexFiles(ArrayRef<const std::string>(GetBootClassPath()),
   1560                        ArrayRef<const std::string>(GetBootClassPathLocations()),
   1561                        &boot_class_path);
   1562     }
   1563     if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
   1564       LOG(ERROR) << "Could not initialize without image: " << error_msg;
   1565       return false;
   1566     }
   1567 
   1568     // TODO: Should we move the following to InitWithoutImage?
   1569     SetInstructionSet(instruction_set_);
   1570     for (uint32_t i = 0; i < kCalleeSaveSize; i++) {
   1571       CalleeSaveType type = CalleeSaveType(i);
   1572       if (!HasCalleeSaveMethod(type)) {
   1573         SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
   1574       }
   1575     }
   1576   }
   1577 
   1578   CHECK(class_linker_ != nullptr);
   1579 
   1580   verifier::ClassVerifier::Init();
   1581 
   1582   if (runtime_options.Exists(Opt::MethodTrace)) {
   1583     trace_config_.reset(new TraceConfig());
   1584     trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
   1585     trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
   1586     trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
   1587     trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
   1588         Trace::TraceOutputMode::kStreaming :
   1589         Trace::TraceOutputMode::kFile;
   1590   }
   1591 
   1592   // TODO: move this to just be an Trace::Start argument
   1593   Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
   1594 
   1595   if (GetHeap()->HasBootImageSpace()) {
   1596     const ImageHeader& image_header = GetHeap()->GetBootImageSpaces()[0]->GetImageHeader();
   1597     pre_allocated_OutOfMemoryError_when_throwing_exception_ = GcRoot<mirror::Throwable>(
   1598         image_header.GetImageRoot(ImageHeader::kOomeWhenThrowingException)->AsThrowable());
   1599     DCHECK(pre_allocated_OutOfMemoryError_when_throwing_exception_.Read()->GetClass()
   1600                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
   1601     pre_allocated_OutOfMemoryError_when_throwing_oome_ = GcRoot<mirror::Throwable>(
   1602         image_header.GetImageRoot(ImageHeader::kOomeWhenThrowingOome)->AsThrowable());
   1603     DCHECK(pre_allocated_OutOfMemoryError_when_throwing_oome_.Read()->GetClass()
   1604                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
   1605     pre_allocated_OutOfMemoryError_when_handling_stack_overflow_ = GcRoot<mirror::Throwable>(
   1606         image_header.GetImageRoot(ImageHeader::kOomeWhenHandlingStackOverflow)->AsThrowable());
   1607     DCHECK(pre_allocated_OutOfMemoryError_when_handling_stack_overflow_.Read()->GetClass()
   1608                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
   1609     pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(
   1610         image_header.GetImageRoot(ImageHeader::kNoClassDefFoundError)->AsThrowable());
   1611     DCHECK(pre_allocated_NoClassDefFoundError_.Read()->GetClass()
   1612                ->DescriptorEquals("Ljava/lang/NoClassDefFoundError;"));
   1613   } else {
   1614     // Pre-allocate an OutOfMemoryError for the case when we fail to
   1615     // allocate the exception to be thrown.
   1616     CreatePreAllocatedException(self,
   1617                                 this,
   1618                                 &pre_allocated_OutOfMemoryError_when_throwing_exception_,
   1619                                 "Ljava/lang/OutOfMemoryError;",
   1620                                 "OutOfMemoryError thrown while trying to throw an exception; "
   1621                                     "no stack trace available");
   1622     // Pre-allocate an OutOfMemoryError for the double-OOME case.
   1623     CreatePreAllocatedException(self,
   1624                                 this,
   1625                                 &pre_allocated_OutOfMemoryError_when_throwing_oome_,
   1626                                 "Ljava/lang/OutOfMemoryError;",
   1627                                 "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
   1628                                     "no stack trace available");
   1629     // Pre-allocate an OutOfMemoryError for the case when we fail to
   1630     // allocate while handling a stack overflow.
   1631     CreatePreAllocatedException(self,
   1632                                 this,
   1633                                 &pre_allocated_OutOfMemoryError_when_handling_stack_overflow_,
   1634                                 "Ljava/lang/OutOfMemoryError;",
   1635                                 "OutOfMemoryError thrown while trying to handle a stack overflow; "
   1636                                     "no stack trace available");
   1637 
   1638     // Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
   1639     // ahead of checking the application's class loader.
   1640     CreatePreAllocatedException(self,
   1641                                 this,
   1642                                 &pre_allocated_NoClassDefFoundError_,
   1643                                 "Ljava/lang/NoClassDefFoundError;",
   1644                                 "Class not found using the boot class loader; "
   1645                                     "no stack trace available");
   1646   }
   1647 
   1648   // Runtime initialization is largely done now.
   1649   // We load plugins first since that can modify the runtime state slightly.
   1650   // Load all plugins
   1651   {
   1652     // The init method of plugins expect the state of the thread to be non runnable.
   1653     ScopedThreadSuspension sts(self, ThreadState::kNative);
   1654     for (auto& plugin : plugins_) {
   1655       std::string err;
   1656       if (!plugin.Load(&err)) {
   1657         LOG(FATAL) << plugin << " failed to load: " << err;
   1658       }
   1659     }
   1660   }
   1661 
   1662   // Look for a native bridge.
   1663   //
   1664   // The intended flow here is, in the case of a running system:
   1665   //
   1666   // Runtime::Init() (zygote):
   1667   //   LoadNativeBridge -> dlopen from cmd line parameter.
   1668   //  |
   1669   //  V
   1670   // Runtime::Start() (zygote):
   1671   //   No-op wrt native bridge.
   1672   //  |
   1673   //  | start app
   1674   //  V
   1675   // DidForkFromZygote(action)
   1676   //   action = kUnload -> dlclose native bridge.
   1677   //   action = kInitialize -> initialize library
   1678   //
   1679   //
   1680   // The intended flow here is, in the case of a simple dalvikvm call:
   1681   //
   1682   // Runtime::Init():
   1683   //   LoadNativeBridge -> dlopen from cmd line parameter.
   1684   //  |
   1685   //  V
   1686   // Runtime::Start():
   1687   //   DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
   1688   //   No-op wrt native bridge.
   1689   {
   1690     std::string native_bridge_file_name = runtime_options.ReleaseOrDefault(Opt::NativeBridge);
   1691     is_native_bridge_loaded_ = LoadNativeBridge(native_bridge_file_name);
   1692   }
   1693 
   1694   // Startup agents
   1695   // TODO Maybe we should start a new thread to run these on. Investigate RI behavior more.
   1696   for (auto& agent_spec : agent_specs_) {
   1697     // TODO Check err
   1698     int res = 0;
   1699     std::string err = "";
   1700     ti::LoadError error;
   1701     std::unique_ptr<ti::Agent> agent = agent_spec.Load(&res, &error, &err);
   1702 
   1703     if (agent != nullptr) {
   1704       agents_.push_back(std::move(agent));
   1705       continue;
   1706     }
   1707 
   1708     switch (error) {
   1709       case ti::LoadError::kInitializationError:
   1710         LOG(FATAL) << "Unable to initialize agent!";
   1711         UNREACHABLE();
   1712 
   1713       case ti::LoadError::kLoadingError:
   1714         LOG(ERROR) << "Unable to load an agent: " << err;
   1715         continue;
   1716 
   1717       case ti::LoadError::kNoError:
   1718         break;
   1719     }
   1720     LOG(FATAL) << "Unreachable";
   1721     UNREACHABLE();
   1722   }
   1723   {
   1724     ScopedObjectAccess soa(self);
   1725     callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
   1726   }
   1727 
   1728   VLOG(startup) << "Runtime::Init exiting";
   1729 
   1730   // Set OnlyUseSystemOatFiles only after boot classpath has been set up.
   1731   if (is_zygote_ || runtime_options.Exists(Opt::OnlyUseSystemOatFiles)) {
   1732     oat_file_manager_->SetOnlyUseSystemOatFiles(/*enforce=*/ true,
   1733                                                 /*assert_no_files_loaded=*/ true);
   1734   }
   1735 
   1736   return true;
   1737 }
   1738 
   1739 static bool EnsureJvmtiPlugin(Runtime* runtime,
   1740                               std::vector<Plugin>* plugins,
   1741                               std::string* error_msg) {
   1742   constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
   1743 
   1744   // Is the plugin already loaded?
   1745   for (const Plugin& p : *plugins) {
   1746     if (p.GetLibrary() == plugin_name) {
   1747       return true;
   1748     }
   1749   }
   1750 
   1751   // TODO Rename Dbg::IsJdwpAllowed is IsDebuggingAllowed.
   1752   DCHECK(Dbg::IsJdwpAllowed() || !runtime->IsJavaDebuggable())
   1753       << "Being debuggable requires that jdwp (i.e. debugging) is allowed.";
   1754   // Is the process debuggable? Otherwise, do not attempt to load the plugin unless we are
   1755   // specifically allowed.
   1756   if (!Dbg::IsJdwpAllowed()) {
   1757     *error_msg = "Process is not allowed to load openjdkjvmti plugin. Process must be debuggable";
   1758     return false;
   1759   }
   1760 
   1761   Plugin new_plugin = Plugin::Create(plugin_name);
   1762 
   1763   if (!new_plugin.Load(error_msg)) {
   1764     return false;
   1765   }
   1766 
   1767   plugins->push_back(std::move(new_plugin));
   1768   return true;
   1769 }
   1770 
   1771 // Attach a new agent and add it to the list of runtime agents
   1772 //
   1773 // TODO: once we decide on the threading model for agents,
   1774 //   revisit this and make sure we're doing this on the right thread
   1775 //   (and we synchronize access to any shared data structures like "agents_")
   1776 //
   1777 void Runtime::AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader) {
   1778   std::string error_msg;
   1779   if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
   1780     LOG(WARNING) << "Could not load plugin: " << error_msg;
   1781     ScopedObjectAccess soa(Thread::Current());
   1782     ThrowIOException("%s", error_msg.c_str());
   1783     return;
   1784   }
   1785 
   1786   ti::AgentSpec agent_spec(agent_arg);
   1787 
   1788   int res = 0;
   1789   ti::LoadError error;
   1790   std::unique_ptr<ti::Agent> agent = agent_spec.Attach(env, class_loader, &res, &error, &error_msg);
   1791 
   1792   if (agent != nullptr) {
   1793     agents_.push_back(std::move(agent));
   1794   } else {
   1795     LOG(WARNING) << "Agent attach failed (result=" << error << ") : " << error_msg;
   1796     ScopedObjectAccess soa(Thread::Current());
   1797     ThrowIOException("%s", error_msg.c_str());
   1798   }
   1799 }
   1800 
   1801 void Runtime::InitNativeMethods() {
   1802   VLOG(startup) << "Runtime::InitNativeMethods entering";
   1803   Thread* self = Thread::Current();
   1804   JNIEnv* env = self->GetJniEnv();
   1805 
   1806   // Must be in the kNative state for calling native methods (JNI_OnLoad code).
   1807   CHECK_EQ(self->GetState(), kNative);
   1808 
   1809   // Set up the native methods provided by the runtime itself.
   1810   RegisterRuntimeNativeMethods(env);
   1811 
   1812   // Initialize classes used in JNI. The initialization requires runtime native
   1813   // methods to be loaded first.
   1814   WellKnownClasses::Init(env);
   1815 
   1816   // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
   1817   // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
   1818   // libcore can't because it's the library that implements System.loadLibrary!
   1819   {
   1820     std::string error_msg;
   1821     if (!java_vm_->LoadNativeLibrary(
   1822           env, "libjavacore.so", nullptr, WellKnownClasses::java_lang_Object, &error_msg)) {
   1823       LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
   1824     }
   1825   }
   1826   {
   1827     constexpr const char* kOpenJdkLibrary = kIsDebugBuild
   1828                                                 ? "libopenjdkd.so"
   1829                                                 : "libopenjdk.so";
   1830     std::string error_msg;
   1831     if (!java_vm_->LoadNativeLibrary(
   1832           env, kOpenJdkLibrary, nullptr, WellKnownClasses::java_lang_Object, &error_msg)) {
   1833       LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
   1834     }
   1835   }
   1836 
   1837   // Initialize well known classes that may invoke runtime native methods.
   1838   WellKnownClasses::LateInit(env);
   1839 
   1840   // Having loaded native libraries for Managed Core library, enable field and
   1841   // method resolution checks via JNI from native code.
   1842   JniInitializeNativeCallerCheck();
   1843 
   1844   VLOG(startup) << "Runtime::InitNativeMethods exiting";
   1845 }
   1846 
   1847 void Runtime::ReclaimArenaPoolMemory() {
   1848   arena_pool_->LockReclaimMemory();
   1849 }
   1850 
   1851 void Runtime::InitThreadGroups(Thread* self) {
   1852   JNIEnvExt* env = self->GetJniEnv();
   1853   ScopedJniEnvLocalRefState env_state(env);
   1854   main_thread_group_ =
   1855       env->NewGlobalRef(env->GetStaticObjectField(
   1856           WellKnownClasses::java_lang_ThreadGroup,
   1857           WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
   1858   CHECK(main_thread_group_ != nullptr || IsAotCompiler());
   1859   system_thread_group_ =
   1860       env->NewGlobalRef(env->GetStaticObjectField(
   1861           WellKnownClasses::java_lang_ThreadGroup,
   1862           WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
   1863   CHECK(system_thread_group_ != nullptr || IsAotCompiler());
   1864 }
   1865 
   1866 jobject Runtime::GetMainThreadGroup() const {
   1867   CHECK(main_thread_group_ != nullptr || IsAotCompiler());
   1868   return main_thread_group_;
   1869 }
   1870 
   1871 jobject Runtime::GetSystemThreadGroup() const {
   1872   CHECK(system_thread_group_ != nullptr || IsAotCompiler());
   1873   return system_thread_group_;
   1874 }
   1875 
   1876 jobject Runtime::GetSystemClassLoader() const {
   1877   CHECK(system_class_loader_ != nullptr || IsAotCompiler());
   1878   return system_class_loader_;
   1879 }
   1880 
   1881 void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
   1882   register_dalvik_system_DexFile(env);
   1883   register_dalvik_system_VMDebug(env);
   1884   register_dalvik_system_VMRuntime(env);
   1885   register_dalvik_system_VMStack(env);
   1886   register_dalvik_system_ZygoteHooks(env);
   1887   register_java_lang_Class(env);
   1888   register_java_lang_Object(env);
   1889   register_java_lang_invoke_MethodHandleImpl(env);
   1890   register_java_lang_ref_FinalizerReference(env);
   1891   register_java_lang_reflect_Array(env);
   1892   register_java_lang_reflect_Constructor(env);
   1893   register_java_lang_reflect_Executable(env);
   1894   register_java_lang_reflect_Field(env);
   1895   register_java_lang_reflect_Method(env);
   1896   register_java_lang_reflect_Parameter(env);
   1897   register_java_lang_reflect_Proxy(env);
   1898   register_java_lang_ref_Reference(env);
   1899   register_java_lang_String(env);
   1900   register_java_lang_StringFactory(env);
   1901   register_java_lang_System(env);
   1902   register_java_lang_Thread(env);
   1903   register_java_lang_Throwable(env);
   1904   register_java_lang_VMClassLoader(env);
   1905   register_java_util_concurrent_atomic_AtomicLong(env);
   1906   register_libcore_util_CharsetUtils(env);
   1907   register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
   1908   register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
   1909   register_sun_misc_Unsafe(env);
   1910 }
   1911 
   1912 std::ostream& operator<<(std::ostream& os, const DeoptimizationKind& kind) {
   1913   os << GetDeoptimizationKindName(kind);
   1914   return os;
   1915 }
   1916 
   1917 void Runtime::DumpDeoptimizations(std::ostream& os) {
   1918   for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
   1919     if (deoptimization_counts_[i] != 0) {
   1920       os << "Number of "
   1921          << GetDeoptimizationKindName(static_cast<DeoptimizationKind>(i))
   1922          << " deoptimizations: "
   1923          << deoptimization_counts_[i]
   1924          << "\n";
   1925     }
   1926   }
   1927 }
   1928 
   1929 void Runtime::DumpForSigQuit(std::ostream& os) {
   1930   GetClassLinker()->DumpForSigQuit(os);
   1931   GetInternTable()->DumpForSigQuit(os);
   1932   GetJavaVM()->DumpForSigQuit(os);
   1933   GetHeap()->DumpForSigQuit(os);
   1934   oat_file_manager_->DumpForSigQuit(os);
   1935   if (GetJit() != nullptr) {
   1936     GetJit()->DumpForSigQuit(os);
   1937   } else {
   1938     os << "Running non JIT\n";
   1939   }
   1940   DumpDeoptimizations(os);
   1941   TrackedAllocators::Dump(os);
   1942   os << "\n";
   1943 
   1944   thread_list_->DumpForSigQuit(os);
   1945   BaseMutex::DumpAll(os);
   1946 
   1947   // Inform anyone else who is interested in SigQuit.
   1948   {
   1949     ScopedObjectAccess soa(Thread::Current());
   1950     callbacks_->SigQuit();
   1951   }
   1952 }
   1953 
   1954 void Runtime::DumpLockHolders(std::ostream& os) {
   1955   uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid();
   1956   pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
   1957   pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
   1958   pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
   1959   if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
   1960     os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n"
   1961        << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
   1962        << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
   1963        << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
   1964   }
   1965 }
   1966 
   1967 void Runtime::SetStatsEnabled(bool new_state) {
   1968   Thread* self = Thread::Current();
   1969   MutexLock mu(self, *Locks::instrument_entrypoints_lock_);
   1970   if (new_state == true) {
   1971     GetStats()->Clear(~0);
   1972     // TODO: wouldn't it make more sense to clear _all_ threads' stats?
   1973     self->GetStats()->Clear(~0);
   1974     if (stats_enabled_ != new_state) {
   1975       GetInstrumentation()->InstrumentQuickAllocEntryPointsLocked();
   1976     }
   1977   } else if (stats_enabled_ != new_state) {
   1978     GetInstrumentation()->UninstrumentQuickAllocEntryPointsLocked();
   1979   }
   1980   stats_enabled_ = new_state;
   1981 }
   1982 
   1983 void Runtime::ResetStats(int kinds) {
   1984   GetStats()->Clear(kinds & 0xffff);
   1985   // TODO: wouldn't it make more sense to clear _all_ threads' stats?
   1986   Thread::Current()->GetStats()->Clear(kinds >> 16);
   1987 }
   1988 
   1989 int32_t Runtime::GetStat(int kind) {
   1990   RuntimeStats* stats;
   1991   if (kind < (1<<16)) {
   1992     stats = GetStats();
   1993   } else {
   1994     stats = Thread::Current()->GetStats();
   1995     kind >>= 16;
   1996   }
   1997   switch (kind) {
   1998   case KIND_ALLOCATED_OBJECTS:
   1999     return stats->allocated_objects;
   2000   case KIND_ALLOCATED_BYTES:
   2001     return stats->allocated_bytes;
   2002   case KIND_FREED_OBJECTS:
   2003     return stats->freed_objects;
   2004   case KIND_FREED_BYTES:
   2005     return stats->freed_bytes;
   2006   case KIND_GC_INVOCATIONS:
   2007     return stats->gc_for_alloc_count;
   2008   case KIND_CLASS_INIT_COUNT:
   2009     return stats->class_init_count;
   2010   case KIND_CLASS_INIT_TIME:
   2011     // Convert ns to us, reduce to 32 bits.
   2012     return static_cast<int>(stats->class_init_time_ns / 1000);
   2013   case KIND_EXT_ALLOCATED_OBJECTS:
   2014   case KIND_EXT_ALLOCATED_BYTES:
   2015   case KIND_EXT_FREED_OBJECTS:
   2016   case KIND_EXT_FREED_BYTES:
   2017     return 0;  // backward compatibility
   2018   default:
   2019     LOG(FATAL) << "Unknown statistic " << kind;
   2020     UNREACHABLE();
   2021   }
   2022 }
   2023 
   2024 void Runtime::BlockSignals() {
   2025   SignalSet signals;
   2026   signals.Add(SIGPIPE);
   2027   // SIGQUIT is used to dump the runtime's state (including stack traces).
   2028   signals.Add(SIGQUIT);
   2029   // SIGUSR1 is used to initiate a GC.
   2030   signals.Add(SIGUSR1);
   2031   signals.Block();
   2032 }
   2033 
   2034 bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
   2035                                   bool create_peer) {
   2036   ScopedTrace trace(__FUNCTION__);
   2037   Thread* self = Thread::Attach(thread_name, as_daemon, thread_group, create_peer);
   2038   // Run ThreadGroup.add to notify the group that this thread is now started.
   2039   if (self != nullptr && create_peer && !IsAotCompiler()) {
   2040     ScopedObjectAccess soa(self);
   2041     self->NotifyThreadGroup(soa, thread_group);
   2042   }
   2043   return self != nullptr;
   2044 }
   2045 
   2046 void Runtime::DetachCurrentThread() {
   2047   ScopedTrace trace(__FUNCTION__);
   2048   Thread* self = Thread::Current();
   2049   if (self == nullptr) {
   2050     LOG(FATAL) << "attempting to detach thread that is not attached";
   2051   }
   2052   if (self->HasManagedStack()) {
   2053     LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
   2054   }
   2055   thread_list_->Unregister(self);
   2056 }
   2057 
   2058 mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryErrorWhenThrowingException() {
   2059   mirror::Throwable* oome = pre_allocated_OutOfMemoryError_when_throwing_exception_.Read();
   2060   if (oome == nullptr) {
   2061     LOG(ERROR) << "Failed to return pre-allocated OOME-when-throwing-exception";
   2062   }
   2063   return oome;
   2064 }
   2065 
   2066 mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME() {
   2067   mirror::Throwable* oome = pre_allocated_OutOfMemoryError_when_throwing_oome_.Read();
   2068   if (oome == nullptr) {
   2069     LOG(ERROR) << "Failed to return pre-allocated OOME-when-throwing-OOME";
   2070   }
   2071   return oome;
   2072 }
   2073 
   2074 mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow() {
   2075   mirror::Throwable* oome = pre_allocated_OutOfMemoryError_when_handling_stack_overflow_.Read();
   2076   if (oome == nullptr) {
   2077     LOG(ERROR) << "Failed to return pre-allocated OOME-when-handling-stack-overflow";
   2078   }
   2079   return oome;
   2080 }
   2081 
   2082 mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
   2083   mirror::Throwable* ncdfe = pre_allocated_NoClassDefFoundError_.Read();
   2084   if (ncdfe == nullptr) {
   2085     LOG(ERROR) << "Failed to return pre-allocated NoClassDefFoundError";
   2086   }
   2087   return ncdfe;
   2088 }
   2089 
   2090 void Runtime::VisitConstantRoots(RootVisitor* visitor) {
   2091   // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
   2092   // null.
   2093   BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
   2094   const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
   2095   if (HasResolutionMethod()) {
   2096     resolution_method_->VisitRoots(buffered_visitor, pointer_size);
   2097   }
   2098   if (HasImtConflictMethod()) {
   2099     imt_conflict_method_->VisitRoots(buffered_visitor, pointer_size);
   2100   }
   2101   if (imt_unimplemented_method_ != nullptr) {
   2102     imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
   2103   }
   2104   for (uint32_t i = 0; i < kCalleeSaveSize; ++i) {
   2105     auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
   2106     if (m != nullptr) {
   2107       m->VisitRoots(buffered_visitor, pointer_size);
   2108     }
   2109   }
   2110 }
   2111 
   2112 void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
   2113   intern_table_->VisitRoots(visitor, flags);
   2114   class_linker_->VisitRoots(visitor, flags);
   2115   heap_->VisitAllocationRecords(visitor);
   2116   if ((flags & kVisitRootFlagNewRoots) == 0) {
   2117     // Guaranteed to have no new roots in the constant roots.
   2118     VisitConstantRoots(visitor);
   2119   }
   2120   Dbg::VisitRoots(visitor);
   2121 }
   2122 
   2123 void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
   2124   for (auto& transaction : preinitialization_transactions_) {
   2125     transaction->VisitRoots(visitor);
   2126   }
   2127 }
   2128 
   2129 void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
   2130   java_vm_->VisitRoots(visitor);
   2131   sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
   2132   pre_allocated_OutOfMemoryError_when_throwing_exception_
   2133       .VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
   2134   pre_allocated_OutOfMemoryError_when_throwing_oome_
   2135       .VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
   2136   pre_allocated_OutOfMemoryError_when_handling_stack_overflow_
   2137       .VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
   2138   pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
   2139   VisitImageRoots(visitor);
   2140   verifier::ClassVerifier::VisitStaticRoots(visitor);
   2141   VisitTransactionRoots(visitor);
   2142 }
   2143 
   2144 void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
   2145   VisitThreadRoots(visitor, flags);
   2146   VisitNonThreadRoots(visitor);
   2147 }
   2148 
   2149 void Runtime::VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags) {
   2150   thread_list_->VisitRoots(visitor, flags);
   2151 }
   2152 
   2153 void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
   2154   VisitNonConcurrentRoots(visitor, flags);
   2155   VisitConcurrentRoots(visitor, flags);
   2156 }
   2157 
   2158 void Runtime::VisitImageRoots(RootVisitor* visitor) {
   2159   for (auto* space : GetHeap()->GetContinuousSpaces()) {
   2160     if (space->IsImageSpace()) {
   2161       auto* image_space = space->AsImageSpace();
   2162       const auto& image_header = image_space->GetImageHeader();
   2163       for (int32_t i = 0, size = image_header.GetImageRoots()->GetLength(); i != size; ++i) {
   2164         mirror::Object* obj =
   2165             image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i)).Ptr();
   2166         if (obj != nullptr) {
   2167           mirror::Object* after_obj = obj;
   2168           visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
   2169           CHECK_EQ(after_obj, obj);
   2170         }
   2171       }
   2172     }
   2173   }
   2174 }
   2175 
   2176 static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc) {
   2177   const PointerSize image_pointer_size = class_linker->GetImagePointerSize();
   2178   const size_t method_alignment = ArtMethod::Alignment(image_pointer_size);
   2179   const size_t method_size = ArtMethod::Size(image_pointer_size);
   2180   LengthPrefixedArray<ArtMethod>* method_array = class_linker->AllocArtMethodArray(
   2181       Thread::Current(),
   2182       linear_alloc,
   2183       1);
   2184   ArtMethod* method = &method_array->At(0, method_size, method_alignment);
   2185   CHECK(method != nullptr);
   2186   method->SetDexMethodIndex(dex::kDexNoIndex);
   2187   CHECK(method->IsRuntimeMethod());
   2188   return method;
   2189 }
   2190 
   2191 ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
   2192   ClassLinker* const class_linker = GetClassLinker();
   2193   ArtMethod* method = CreateRuntimeMethod(class_linker, linear_alloc);
   2194   // When compiling, the code pointer will get set later when the image is loaded.
   2195   const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
   2196   if (IsAotCompiler()) {
   2197     method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
   2198   } else {
   2199     method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
   2200   }
   2201   // Create empty conflict table.
   2202   method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc),
   2203                               pointer_size);
   2204   return method;
   2205 }
   2206 
   2207 void Runtime::SetImtConflictMethod(ArtMethod* method) {
   2208   CHECK(method != nullptr);
   2209   CHECK(method->IsRuntimeMethod());
   2210   imt_conflict_method_ = method;
   2211 }
   2212 
   2213 ArtMethod* Runtime::CreateResolutionMethod() {
   2214   auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
   2215   // When compiling, the code pointer will get set later when the image is loaded.
   2216   if (IsAotCompiler()) {
   2217     PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
   2218     method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
   2219   } else {
   2220     method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
   2221   }
   2222   return method;
   2223 }
   2224 
   2225 ArtMethod* Runtime::CreateCalleeSaveMethod() {
   2226   auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
   2227   PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
   2228   method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
   2229   DCHECK_NE(instruction_set_, InstructionSet::kNone);
   2230   DCHECK(method->IsRuntimeMethod());
   2231   return method;
   2232 }
   2233 
   2234 void Runtime::DisallowNewSystemWeaks() {
   2235   CHECK(!kUseReadBarrier);
   2236   monitor_list_->DisallowNewMonitors();
   2237   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
   2238   java_vm_->DisallowNewWeakGlobals();
   2239   heap_->DisallowNewAllocationRecords();
   2240   if (GetJit() != nullptr) {
   2241     GetJit()->GetCodeCache()->DisallowInlineCacheAccess();
   2242   }
   2243 
   2244   // All other generic system-weak holders.
   2245   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
   2246     holder->Disallow();
   2247   }
   2248 }
   2249 
   2250 void Runtime::AllowNewSystemWeaks() {
   2251   CHECK(!kUseReadBarrier);
   2252   monitor_list_->AllowNewMonitors();
   2253   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
   2254   java_vm_->AllowNewWeakGlobals();
   2255   heap_->AllowNewAllocationRecords();
   2256   if (GetJit() != nullptr) {
   2257     GetJit()->GetCodeCache()->AllowInlineCacheAccess();
   2258   }
   2259 
   2260   // All other generic system-weak holders.
   2261   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
   2262     holder->Allow();
   2263   }
   2264 }
   2265 
   2266 void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
   2267   // This is used for the read barrier case that uses the thread-local
   2268   // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled
   2269   // (see ThreadList::RunCheckpoint).
   2270   monitor_list_->BroadcastForNewMonitors();
   2271   intern_table_->BroadcastForNewInterns();
   2272   java_vm_->BroadcastForNewWeakGlobals();
   2273   heap_->BroadcastForNewAllocationRecords();
   2274   if (GetJit() != nullptr) {
   2275     GetJit()->GetCodeCache()->BroadcastForInlineCacheAccess();
   2276   }
   2277 
   2278   // All other generic system-weak holders.
   2279   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
   2280     holder->Broadcast(broadcast_for_checkpoint);
   2281   }
   2282 }
   2283 
   2284 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
   2285   instruction_set_ = instruction_set;
   2286   switch (instruction_set) {
   2287     case InstructionSet::kThumb2:
   2288       // kThumb2 is the same as kArm, use the canonical value.
   2289       instruction_set_ = InstructionSet::kArm;
   2290       break;
   2291     case InstructionSet::kArm:
   2292     case InstructionSet::kArm64:
   2293     case InstructionSet::kMips:
   2294     case InstructionSet::kMips64:
   2295     case InstructionSet::kX86:
   2296     case InstructionSet::kX86_64:
   2297       break;
   2298     default:
   2299       UNIMPLEMENTED(FATAL) << instruction_set_;
   2300       UNREACHABLE();
   2301   }
   2302 }
   2303 
   2304 void Runtime::ClearInstructionSet() {
   2305   instruction_set_ = InstructionSet::kNone;
   2306 }
   2307 
   2308 void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
   2309   DCHECK_LT(static_cast<uint32_t>(type), kCalleeSaveSize);
   2310   CHECK(method != nullptr);
   2311   callee_save_methods_[static_cast<size_t>(type)] = reinterpret_cast<uintptr_t>(method);
   2312 }
   2313 
   2314 void Runtime::ClearCalleeSaveMethods() {
   2315   for (size_t i = 0; i < kCalleeSaveSize; ++i) {
   2316     callee_save_methods_[i] = reinterpret_cast<uintptr_t>(nullptr);
   2317   }
   2318 }
   2319 
   2320 void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
   2321                               const std::string& profile_output_filename) {
   2322   if (jit_.get() == nullptr) {
   2323     // We are not JITing. Nothing to do.
   2324     return;
   2325   }
   2326 
   2327   VLOG(profiler) << "Register app with " << profile_output_filename
   2328       << " " << android::base::Join(code_paths, ':');
   2329 
   2330   if (profile_output_filename.empty()) {
   2331     LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
   2332     return;
   2333   }
   2334   if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
   2335     LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
   2336     return;
   2337   }
   2338   if (code_paths.empty()) {
   2339     LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
   2340     return;
   2341   }
   2342 
   2343   jit_->StartProfileSaver(profile_output_filename, code_paths);
   2344 }
   2345 
   2346 // Transaction support.
   2347 bool Runtime::IsActiveTransaction() const {
   2348   return !preinitialization_transactions_.empty() && !GetTransaction()->IsRollingBack();
   2349 }
   2350 
   2351 void Runtime::EnterTransactionMode() {
   2352   DCHECK(IsAotCompiler());
   2353   DCHECK(!IsActiveTransaction());
   2354   preinitialization_transactions_.push_back(std::make_unique<Transaction>());
   2355 }
   2356 
   2357 void Runtime::EnterTransactionMode(bool strict, mirror::Class* root) {
   2358   DCHECK(IsAotCompiler());
   2359   preinitialization_transactions_.push_back(std::make_unique<Transaction>(strict, root));
   2360 }
   2361 
   2362 void Runtime::ExitTransactionMode() {
   2363   DCHECK(IsAotCompiler());
   2364   DCHECK(IsActiveTransaction());
   2365   preinitialization_transactions_.pop_back();
   2366 }
   2367 
   2368 void Runtime::RollbackAndExitTransactionMode() {
   2369   DCHECK(IsAotCompiler());
   2370   DCHECK(IsActiveTransaction());
   2371   preinitialization_transactions_.back()->Rollback();
   2372   preinitialization_transactions_.pop_back();
   2373 }
   2374 
   2375 bool Runtime::IsTransactionAborted() const {
   2376   if (!IsActiveTransaction()) {
   2377     return false;
   2378   } else {
   2379     DCHECK(IsAotCompiler());
   2380     return GetTransaction()->IsAborted();
   2381   }
   2382 }
   2383 
   2384 void Runtime::RollbackAllTransactions() {
   2385   // If transaction is aborted, all transactions will be kept in the list.
   2386   // Rollback and exit all of them.
   2387   while (IsActiveTransaction()) {
   2388     RollbackAndExitTransactionMode();
   2389   }
   2390 }
   2391 
   2392 bool Runtime::IsActiveStrictTransactionMode() const {
   2393   return IsActiveTransaction() && GetTransaction()->IsStrict();
   2394 }
   2395 
   2396 const std::unique_ptr<Transaction>& Runtime::GetTransaction() const {
   2397   DCHECK(!preinitialization_transactions_.empty());
   2398   return preinitialization_transactions_.back();
   2399 }
   2400 
   2401 void Runtime::AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) {
   2402   DCHECK(IsAotCompiler());
   2403   DCHECK(IsActiveTransaction());
   2404   // Throwing an exception may cause its class initialization. If we mark the transaction
   2405   // aborted before that, we may warn with a false alarm. Throwing the exception before
   2406   // marking the transaction aborted avoids that.
   2407   // But now the transaction can be nested, and abort the transaction will relax the constraints
   2408   // for constructing stack trace.
   2409   GetTransaction()->Abort(abort_message);
   2410   GetTransaction()->ThrowAbortError(self, &abort_message);
   2411 }
   2412 
   2413 void Runtime::ThrowTransactionAbortError(Thread* self) {
   2414   DCHECK(IsAotCompiler());
   2415   DCHECK(IsActiveTransaction());
   2416   // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
   2417   GetTransaction()->ThrowAbortError(self, nullptr);
   2418 }
   2419 
   2420 void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
   2421                                       uint8_t value, bool is_volatile) const {
   2422   DCHECK(IsAotCompiler());
   2423   DCHECK(IsActiveTransaction());
   2424   GetTransaction()->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
   2425 }
   2426 
   2427 void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
   2428                                    int8_t value, bool is_volatile) const {
   2429   DCHECK(IsAotCompiler());
   2430   DCHECK(IsActiveTransaction());
   2431   GetTransaction()->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
   2432 }
   2433 
   2434 void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
   2435                                    uint16_t value, bool is_volatile) const {
   2436   DCHECK(IsAotCompiler());
   2437   DCHECK(IsActiveTransaction());
   2438   GetTransaction()->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
   2439 }
   2440 
   2441 void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
   2442                                     int16_t value, bool is_volatile) const {
   2443   DCHECK(IsAotCompiler());
   2444   DCHECK(IsActiveTransaction());
   2445   GetTransaction()->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
   2446 }
   2447 
   2448 void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
   2449                                  uint32_t value, bool is_volatile) const {
   2450   DCHECK(IsAotCompiler());
   2451   DCHECK(IsActiveTransaction());
   2452   GetTransaction()->RecordWriteField32(obj, field_offset, value, is_volatile);
   2453 }
   2454 
   2455 void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
   2456                                  uint64_t value, bool is_volatile) const {
   2457   DCHECK(IsAotCompiler());
   2458   DCHECK(IsActiveTransaction());
   2459   GetTransaction()->RecordWriteField64(obj, field_offset, value, is_volatile);
   2460 }
   2461 
   2462 void Runtime::RecordWriteFieldReference(mirror::Object* obj,
   2463                                         MemberOffset field_offset,
   2464                                         ObjPtr<mirror::Object> value,
   2465                                         bool is_volatile) const {
   2466   DCHECK(IsAotCompiler());
   2467   DCHECK(IsActiveTransaction());
   2468   GetTransaction()->RecordWriteFieldReference(obj,
   2469                                                             field_offset,
   2470                                                             value.Ptr(),
   2471                                                             is_volatile);
   2472 }
   2473 
   2474 void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
   2475   DCHECK(IsAotCompiler());
   2476   DCHECK(IsActiveTransaction());
   2477   GetTransaction()->RecordWriteArray(array, index, value);
   2478 }
   2479 
   2480 void Runtime::RecordStrongStringInsertion(ObjPtr<mirror::String> s) const {
   2481   DCHECK(IsAotCompiler());
   2482   DCHECK(IsActiveTransaction());
   2483   GetTransaction()->RecordStrongStringInsertion(s);
   2484 }
   2485 
   2486 void Runtime::RecordWeakStringInsertion(ObjPtr<mirror::String> s) const {
   2487   DCHECK(IsAotCompiler());
   2488   DCHECK(IsActiveTransaction());
   2489   GetTransaction()->RecordWeakStringInsertion(s);
   2490 }
   2491 
   2492 void Runtime::RecordStrongStringRemoval(ObjPtr<mirror::String> s) const {
   2493   DCHECK(IsAotCompiler());
   2494   DCHECK(IsActiveTransaction());
   2495   GetTransaction()->RecordStrongStringRemoval(s);
   2496 }
   2497 
   2498 void Runtime::RecordWeakStringRemoval(ObjPtr<mirror::String> s) const {
   2499   DCHECK(IsAotCompiler());
   2500   DCHECK(IsActiveTransaction());
   2501   GetTransaction()->RecordWeakStringRemoval(s);
   2502 }
   2503 
   2504 void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
   2505                                   dex::StringIndex string_idx) const {
   2506   DCHECK(IsAotCompiler());
   2507   DCHECK(IsActiveTransaction());
   2508   GetTransaction()->RecordResolveString(dex_cache, string_idx);
   2509 }
   2510 
   2511 void Runtime::SetFaultMessage(const std::string& message) {
   2512   std::string* new_msg = new std::string(message);
   2513   std::string* cur_msg = fault_message_.exchange(new_msg);
   2514   delete cur_msg;
   2515 }
   2516 
   2517 std::string Runtime::GetFaultMessage() {
   2518   // Retrieve the message. Temporarily replace with null so that SetFaultMessage will not delete
   2519   // the string in parallel.
   2520   std::string* cur_msg = fault_message_.exchange(nullptr);
   2521 
   2522   // Make a copy of the string.
   2523   std::string ret = cur_msg == nullptr ? "" : *cur_msg;
   2524 
   2525   // Put the message back if it hasn't been updated.
   2526   std::string* null_str = nullptr;
   2527   if (!fault_message_.compare_exchange_strong(null_str, cur_msg)) {
   2528     // Already replaced.
   2529     delete cur_msg;
   2530   }
   2531 
   2532   return ret;
   2533 }
   2534 
   2535 void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
   2536     const {
   2537   if (GetInstrumentation()->InterpretOnly()) {
   2538     argv->push_back("--compiler-filter=quicken");
   2539   }
   2540 
   2541   // Make the dex2oat instruction set match that of the launching runtime. If we have multiple
   2542   // architecture support, dex2oat may be compiled as a different instruction-set than that
   2543   // currently being executed.
   2544   std::string instruction_set("--instruction-set=");
   2545   instruction_set += GetInstructionSetString(kRuntimeISA);
   2546   argv->push_back(instruction_set);
   2547 
   2548   if (InstructionSetFeatures::IsRuntimeDetectionSupported()) {
   2549     argv->push_back("--instruction-set-features=runtime");
   2550   } else {
   2551     std::unique_ptr<const InstructionSetFeatures> features(
   2552         InstructionSetFeatures::FromCppDefines());
   2553     std::string feature_string("--instruction-set-features=");
   2554     feature_string += features->GetFeatureString();
   2555     argv->push_back(feature_string);
   2556   }
   2557 }
   2558 
   2559 void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
   2560   if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
   2561     DCHECK(!jit_options_->UseJitCompilation());
   2562   }
   2563 
   2564   if (!jit_options_->UseJitCompilation() && !jit_options_->GetSaveProfilingInfo()) {
   2565     return;
   2566   }
   2567 
   2568   std::string error_msg;
   2569   bool profiling_only = !jit_options_->UseJitCompilation();
   2570   jit_code_cache_.reset(jit::JitCodeCache::Create(profiling_only,
   2571                                                   rwx_memory_allowed,
   2572                                                   IsZygote(),
   2573                                                   &error_msg));
   2574   if (jit_code_cache_.get() == nullptr) {
   2575     LOG(WARNING) << "Failed to create JIT Code Cache: " << error_msg;
   2576   }
   2577 }
   2578 
   2579 void Runtime::CreateJit() {
   2580   DCHECK(jit_ == nullptr);
   2581   if (jit_code_cache_.get() == nullptr) {
   2582     if (!IsSafeMode()) {
   2583       LOG(WARNING) << "Missing code cache, cannot create JIT.";
   2584     }
   2585     return;
   2586   }
   2587   if (IsSafeMode()) {
   2588     LOG(INFO) << "Not creating JIT because of SafeMode.";
   2589     jit_code_cache_.reset();
   2590     return;
   2591   }
   2592 
   2593   jit::Jit* jit = jit::Jit::Create(jit_code_cache_.get(), jit_options_.get());
   2594   DoAndMaybeSwitchInterpreter([=](){ jit_.reset(jit); });
   2595   if (jit == nullptr) {
   2596     LOG(WARNING) << "Failed to allocate JIT";
   2597     // Release JIT code cache resources (several MB of memory).
   2598     jit_code_cache_.reset();
   2599   } else {
   2600     jit->CreateThreadPool();
   2601   }
   2602 }
   2603 
   2604 bool Runtime::CanRelocate() const {
   2605   return !IsAotCompiler();
   2606 }
   2607 
   2608 bool Runtime::IsCompilingBootImage() const {
   2609   return IsCompiler() && compiler_callbacks_->IsBootImage();
   2610 }
   2611 
   2612 void Runtime::SetResolutionMethod(ArtMethod* method) {
   2613   CHECK(method != nullptr);
   2614   CHECK(method->IsRuntimeMethod()) << method;
   2615   resolution_method_ = method;
   2616 }
   2617 
   2618 void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
   2619   CHECK(method != nullptr);
   2620   CHECK(method->IsRuntimeMethod());
   2621   imt_unimplemented_method_ = method;
   2622 }
   2623 
   2624 void Runtime::FixupConflictTables() {
   2625   // We can only do this after the class linker is created.
   2626   const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
   2627   if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
   2628     imt_unimplemented_method_->SetImtConflictTable(
   2629         ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
   2630         pointer_size);
   2631   }
   2632   if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
   2633     imt_conflict_method_->SetImtConflictTable(
   2634           ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
   2635           pointer_size);
   2636   }
   2637 }
   2638 
   2639 void Runtime::DisableVerifier() {
   2640   verify_ = verifier::VerifyMode::kNone;
   2641 }
   2642 
   2643 bool Runtime::IsVerificationEnabled() const {
   2644   return verify_ == verifier::VerifyMode::kEnable ||
   2645       verify_ == verifier::VerifyMode::kSoftFail;
   2646 }
   2647 
   2648 bool Runtime::IsVerificationSoftFail() const {
   2649   return verify_ == verifier::VerifyMode::kSoftFail;
   2650 }
   2651 
   2652 bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
   2653   // We only support async deopt (ie the compiled code is not explicitly asking for
   2654   // deopt, but something else like the debugger) in debuggable JIT code.
   2655   // We could look at the oat file where `code` is being defined,
   2656   // and check whether it's been compiled debuggable, but we decided to
   2657   // only rely on the JIT for debuggable apps.
   2658   return IsJavaDebuggable() &&
   2659       GetJit() != nullptr &&
   2660       GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
   2661 }
   2662 
   2663 LinearAlloc* Runtime::CreateLinearAlloc() {
   2664   // For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
   2665   // 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
   2666   // when we have 64 bit ArtMethod pointers.
   2667   return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
   2668       ? new LinearAlloc(low_4gb_arena_pool_.get())
   2669       : new LinearAlloc(arena_pool_.get());
   2670 }
   2671 
   2672 double Runtime::GetHashTableMinLoadFactor() const {
   2673   return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
   2674 }
   2675 
   2676 double Runtime::GetHashTableMaxLoadFactor() const {
   2677   return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
   2678 }
   2679 
   2680 void Runtime::UpdateProcessState(ProcessState process_state) {
   2681   ProcessState old_process_state = process_state_;
   2682   process_state_ = process_state;
   2683   GetHeap()->UpdateProcessState(old_process_state, process_state);
   2684 }
   2685 
   2686 void Runtime::RegisterSensitiveThread() const {
   2687   Thread::SetJitSensitiveThread();
   2688 }
   2689 
   2690 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
   2691 bool Runtime::UseJitCompilation() const {
   2692   return (jit_ != nullptr) && jit_->UseJitCompilation();
   2693 }
   2694 
   2695 void Runtime::EnvSnapshot::TakeSnapshot() {
   2696   char** env = GetEnviron();
   2697   for (size_t i = 0; env[i] != nullptr; ++i) {
   2698     name_value_pairs_.emplace_back(new std::string(env[i]));
   2699   }
   2700   // The strings in name_value_pairs_ retain ownership of the c_str, but we assign pointers
   2701   // for quick use by GetSnapshot.  This avoids allocation and copying cost at Exec.
   2702   c_env_vector_.reset(new char*[name_value_pairs_.size() + 1]);
   2703   for (size_t i = 0; env[i] != nullptr; ++i) {
   2704     c_env_vector_[i] = const_cast<char*>(name_value_pairs_[i]->c_str());
   2705   }
   2706   c_env_vector_[name_value_pairs_.size()] = nullptr;
   2707 }
   2708 
   2709 char** Runtime::EnvSnapshot::GetSnapshot() const {
   2710   return c_env_vector_.get();
   2711 }
   2712 
   2713 void Runtime::AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
   2714   gc::ScopedGCCriticalSection gcs(Thread::Current(),
   2715                                   gc::kGcCauseAddRemoveSystemWeakHolder,
   2716                                   gc::kCollectorTypeAddRemoveSystemWeakHolder);
   2717   // Note: The ScopedGCCriticalSection also ensures that the rest of the function is in
   2718   //       a critical section.
   2719   system_weak_holders_.push_back(holder);
   2720 }
   2721 
   2722 void Runtime::RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
   2723   gc::ScopedGCCriticalSection gcs(Thread::Current(),
   2724                                   gc::kGcCauseAddRemoveSystemWeakHolder,
   2725                                   gc::kCollectorTypeAddRemoveSystemWeakHolder);
   2726   auto it = std::find(system_weak_holders_.begin(), system_weak_holders_.end(), holder);
   2727   if (it != system_weak_holders_.end()) {
   2728     system_weak_holders_.erase(it);
   2729   }
   2730 }
   2731 
   2732 RuntimeCallbacks* Runtime::GetRuntimeCallbacks() {
   2733   return callbacks_.get();
   2734 }
   2735 
   2736 // Used to patch boot image method entry point to interpreter bridge.
   2737 class UpdateEntryPointsClassVisitor : public ClassVisitor {
   2738  public:
   2739   explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
   2740       : instrumentation_(instrumentation) {}
   2741 
   2742   bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES(Locks::mutator_lock_) {
   2743     DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
   2744     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   2745     for (auto& m : klass->GetMethods(pointer_size)) {
   2746       const void* code = m.GetEntryPointFromQuickCompiledCode();
   2747       if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
   2748           !m.IsNative() &&
   2749           !m.IsProxyMethod()) {
   2750         instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
   2751       }
   2752     }
   2753     return true;
   2754   }
   2755 
   2756  private:
   2757   instrumentation::Instrumentation* const instrumentation_;
   2758 };
   2759 
   2760 void Runtime::SetJavaDebuggable(bool value) {
   2761   is_java_debuggable_ = value;
   2762   // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
   2763 }
   2764 
   2765 void Runtime::DeoptimizeBootImage() {
   2766   // If we've already started and we are setting this runtime to debuggable,
   2767   // we patch entry points of methods in boot image to interpreter bridge, as
   2768   // boot image code may be AOT compiled as not debuggable.
   2769   if (!GetInstrumentation()->IsForcedInterpretOnly()) {
   2770     UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
   2771     GetClassLinker()->VisitClasses(&visitor);
   2772     jit::Jit* jit = GetJit();
   2773     if (jit != nullptr) {
   2774       // Code JITted by the zygote is not compiled debuggable.
   2775       jit->GetCodeCache()->ClearEntryPointsInZygoteExecSpace();
   2776     }
   2777   }
   2778 }
   2779 
   2780 Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage()
   2781     : thread_pool_(Runtime::Current()->AcquireThreadPool()) {}
   2782 
   2783 Runtime::ScopedThreadPoolUsage::~ScopedThreadPoolUsage() {
   2784   Runtime::Current()->ReleaseThreadPool();
   2785 }
   2786 
   2787 bool Runtime::DeleteThreadPool() {
   2788   // Make sure workers are started to prevent thread shutdown errors.
   2789   WaitForThreadPoolWorkersToStart();
   2790   std::unique_ptr<ThreadPool> thread_pool;
   2791   {
   2792     MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
   2793     if (thread_pool_ref_count_ == 0) {
   2794       thread_pool = std::move(thread_pool_);
   2795     }
   2796   }
   2797   return thread_pool != nullptr;
   2798 }
   2799 
   2800 ThreadPool* Runtime::AcquireThreadPool() {
   2801   MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
   2802   ++thread_pool_ref_count_;
   2803   return thread_pool_.get();
   2804 }
   2805 
   2806 void Runtime::ReleaseThreadPool() {
   2807   MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
   2808   CHECK_GT(thread_pool_ref_count_, 0u);
   2809   --thread_pool_ref_count_;
   2810 }
   2811 
   2812 void Runtime::WaitForThreadPoolWorkersToStart() {
   2813   // Need to make sure workers are created before deleting the pool.
   2814   ScopedThreadPoolUsage stpu;
   2815   if (stpu.GetThreadPool() != nullptr) {
   2816     stpu.GetThreadPool()->WaitForWorkersToBeCreated();
   2817   }
   2818 }
   2819 
   2820 void Runtime::NotifyStartupCompleted() {
   2821   bool expected = false;
   2822   if (!startup_completed_.compare_exchange_strong(expected, true, std::memory_order_seq_cst)) {
   2823     // Right now NotifyStartupCompleted will be called up to twice, once from profiler and up to
   2824     // once externally. For this reason there are no asserts.
   2825     return;
   2826   }
   2827   VLOG(startup) << "Startup completed notified";
   2828 
   2829   {
   2830     ScopedTrace trace("Releasing app image spaces metadata");
   2831     ScopedObjectAccess soa(Thread::Current());
   2832     for (gc::space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
   2833       if (space->IsImageSpace()) {
   2834         gc::space::ImageSpace* image_space = space->AsImageSpace();
   2835         if (image_space->GetImageHeader().IsAppImage()) {
   2836           image_space->DisablePreResolvedStrings();
   2837         }
   2838       }
   2839     }
   2840     // Request empty checkpoint to make sure no threads are accessing the section when we madvise
   2841     // it. Avoid using RunEmptyCheckpoint since only one concurrent caller is supported. We could
   2842     // add a GC critical section here but that may cause significant jank if the GC is running.
   2843     {
   2844       class EmptyClosure : public Closure {
   2845        public:
   2846         explicit EmptyClosure(Barrier* barrier) : barrier_(barrier) {}
   2847         void Run(Thread* thread ATTRIBUTE_UNUSED) override {
   2848           barrier_->Pass(Thread::Current());
   2849         }
   2850 
   2851        private:
   2852         Barrier* const barrier_;
   2853       };
   2854       Barrier barrier(0);
   2855       EmptyClosure closure(&barrier);
   2856       size_t threads_running_checkpoint = GetThreadList()->RunCheckpoint(&closure);
   2857       // Now that we have run our checkpoint, move to a suspended state and wait
   2858       // for other threads to run the checkpoint.
   2859       Thread* self = Thread::Current();
   2860       ScopedThreadSuspension sts(self, kSuspended);
   2861       if (threads_running_checkpoint != 0) {
   2862         barrier.Increment(self, threads_running_checkpoint);
   2863       }
   2864     }
   2865     for (gc::space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
   2866       if (space->IsImageSpace()) {
   2867         gc::space::ImageSpace* image_space = space->AsImageSpace();
   2868         if (image_space->GetImageHeader().IsAppImage()) {
   2869           image_space->ReleaseMetadata();
   2870         }
   2871       }
   2872     }
   2873   }
   2874 
   2875   // Notify the profiler saver that startup is now completed.
   2876   ProfileSaver::NotifyStartupCompleted();
   2877 
   2878   {
   2879     // Delete the thread pool used for app image loading startup is completed.
   2880     ScopedTrace trace2("Delete thread pool");
   2881     DeleteThreadPool();
   2882   }
   2883 }
   2884 
   2885 bool Runtime::GetStartupCompleted() const {
   2886   return startup_completed_.load(std::memory_order_seq_cst);
   2887 }
   2888 
   2889 }  // namespace art
   2890