Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_RUNTIME_H_
     18 #define ART_RUNTIME_RUNTIME_H_
     19 
     20 #include <jni.h>
     21 #include <stdio.h>
     22 
     23 #include <iosfwd>
     24 #include <set>
     25 #include <string>
     26 #include <utility>
     27 #include <vector>
     28 
     29 #include "arch/instruction_set.h"
     30 #include "base/macros.h"
     31 #include "base/mutex.h"
     32 #include "deoptimization_kind.h"
     33 #include "dex_file_types.h"
     34 #include "experimental_flags.h"
     35 #include "gc_root.h"
     36 #include "instrumentation.h"
     37 #include "obj_ptr.h"
     38 #include "offsets.h"
     39 #include "process_state.h"
     40 #include "quick/quick_method_frame_info.h"
     41 #include "runtime_stats.h"
     42 
     43 namespace art {
     44 
     45 namespace gc {
     46   class AbstractSystemWeakHolder;
     47   class Heap;
     48 }  // namespace gc
     49 
     50 namespace jit {
     51   class Jit;
     52   class JitOptions;
     53 }  // namespace jit
     54 
     55 namespace mirror {
     56   class Array;
     57   class ClassLoader;
     58   class DexCache;
     59   template<class T> class ObjectArray;
     60   template<class T> class PrimitiveArray;
     61   typedef PrimitiveArray<int8_t> ByteArray;
     62   class String;
     63   class Throwable;
     64 }  // namespace mirror
     65 namespace ti {
     66   class Agent;
     67 }  // namespace ti
     68 namespace verifier {
     69   class MethodVerifier;
     70   enum class VerifyMode : int8_t;
     71 }  // namespace verifier
     72 class ArenaPool;
     73 class ArtMethod;
     74 enum class CalleeSaveType: uint32_t;
     75 class ClassLinker;
     76 class CompilerCallbacks;
     77 class DexFile;
     78 class InternTable;
     79 class IsMarkedVisitor;
     80 class JavaVMExt;
     81 class LinearAlloc;
     82 class MemMap;
     83 class MonitorList;
     84 class MonitorPool;
     85 class NullPointerHandler;
     86 class OatFileManager;
     87 class Plugin;
     88 struct RuntimeArgumentMap;
     89 class RuntimeCallbacks;
     90 class SignalCatcher;
     91 class StackOverflowHandler;
     92 class SuspensionHandler;
     93 class ThreadList;
     94 class Trace;
     95 struct TraceConfig;
     96 class Transaction;
     97 
     98 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
     99 
    100 class Runtime {
    101  public:
    102   // Parse raw runtime options.
    103   static bool ParseOptions(const RuntimeOptions& raw_options,
    104                            bool ignore_unrecognized,
    105                            RuntimeArgumentMap* runtime_options);
    106 
    107   // Creates and initializes a new runtime.
    108   static bool Create(RuntimeArgumentMap&& runtime_options)
    109       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    110 
    111   // Creates and initializes a new runtime.
    112   static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
    113       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    114 
    115   // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
    116   bool IsAotCompiler() const {
    117     return !UseJitCompilation() && IsCompiler();
    118   }
    119 
    120   // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
    121   bool IsCompiler() const {
    122     return compiler_callbacks_ != nullptr;
    123   }
    124 
    125   // If a compiler, are we compiling a boot image?
    126   bool IsCompilingBootImage() const;
    127 
    128   bool CanRelocate() const;
    129 
    130   bool ShouldRelocate() const {
    131     return must_relocate_ && CanRelocate();
    132   }
    133 
    134   bool MustRelocateIfPossible() const {
    135     return must_relocate_;
    136   }
    137 
    138   bool IsDex2OatEnabled() const {
    139     return dex2oat_enabled_ && IsImageDex2OatEnabled();
    140   }
    141 
    142   bool IsImageDex2OatEnabled() const {
    143     return image_dex2oat_enabled_;
    144   }
    145 
    146   CompilerCallbacks* GetCompilerCallbacks() {
    147     return compiler_callbacks_;
    148   }
    149 
    150   void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
    151     CHECK(callbacks != nullptr);
    152     compiler_callbacks_ = callbacks;
    153   }
    154 
    155   bool IsZygote() const {
    156     return is_zygote_;
    157   }
    158 
    159   bool IsExplicitGcDisabled() const {
    160     return is_explicit_gc_disabled_;
    161   }
    162 
    163   std::string GetCompilerExecutable() const;
    164   std::string GetPatchoatExecutable() const;
    165 
    166   const std::vector<std::string>& GetCompilerOptions() const {
    167     return compiler_options_;
    168   }
    169 
    170   void AddCompilerOption(const std::string& option) {
    171     compiler_options_.push_back(option);
    172   }
    173 
    174   const std::vector<std::string>& GetImageCompilerOptions() const {
    175     return image_compiler_options_;
    176   }
    177 
    178   const std::string& GetImageLocation() const {
    179     return image_location_;
    180   }
    181 
    182   // Starts a runtime, which may cause threads to be started and code to run.
    183   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
    184 
    185   bool IsShuttingDown(Thread* self);
    186   bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
    187     return shutting_down_;
    188   }
    189 
    190   size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
    191     return threads_being_born_;
    192   }
    193 
    194   void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
    195     threads_being_born_++;
    196   }
    197 
    198   void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
    199 
    200   bool IsStarted() const {
    201     return started_;
    202   }
    203 
    204   bool IsFinishedStarting() const {
    205     return finished_starting_;
    206   }
    207 
    208   static Runtime* Current() {
    209     return instance_;
    210   }
    211 
    212   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
    213   // callers should prefer.
    214   NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
    215 
    216   // Returns the "main" ThreadGroup, used when attaching user threads.
    217   jobject GetMainThreadGroup() const;
    218 
    219   // Returns the "system" ThreadGroup, used when attaching our internal threads.
    220   jobject GetSystemThreadGroup() const;
    221 
    222   // Returns the system ClassLoader which represents the CLASSPATH.
    223   jobject GetSystemClassLoader() const;
    224 
    225   // Attaches the calling native thread to the runtime.
    226   bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
    227                            bool create_peer);
    228 
    229   void CallExitHook(jint status);
    230 
    231   // Detaches the current native thread from the runtime.
    232   void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
    233 
    234   void DumpDeoptimizations(std::ostream& os);
    235   void DumpForSigQuit(std::ostream& os);
    236   void DumpLockHolders(std::ostream& os);
    237 
    238   ~Runtime();
    239 
    240   const std::string& GetBootClassPathString() const {
    241     return boot_class_path_string_;
    242   }
    243 
    244   const std::string& GetClassPathString() const {
    245     return class_path_string_;
    246   }
    247 
    248   ClassLinker* GetClassLinker() const {
    249     return class_linker_;
    250   }
    251 
    252   size_t GetDefaultStackSize() const {
    253     return default_stack_size_;
    254   }
    255 
    256   gc::Heap* GetHeap() const {
    257     return heap_;
    258   }
    259 
    260   InternTable* GetInternTable() const {
    261     DCHECK(intern_table_ != nullptr);
    262     return intern_table_;
    263   }
    264 
    265   JavaVMExt* GetJavaVM() const {
    266     return java_vm_.get();
    267   }
    268 
    269   size_t GetMaxSpinsBeforeThinLockInflation() const {
    270     return max_spins_before_thin_lock_inflation_;
    271   }
    272 
    273   MonitorList* GetMonitorList() const {
    274     return monitor_list_;
    275   }
    276 
    277   MonitorPool* GetMonitorPool() const {
    278     return monitor_pool_;
    279   }
    280 
    281   // Is the given object the special object used to mark a cleared JNI weak global?
    282   bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
    283 
    284   // Get the special object used to mark a cleared JNI weak global.
    285   mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
    286 
    287   mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
    288 
    289   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
    290       REQUIRES_SHARED(Locks::mutator_lock_);
    291 
    292   const std::vector<std::string>& GetProperties() const {
    293     return properties_;
    294   }
    295 
    296   ThreadList* GetThreadList() const {
    297     return thread_list_;
    298   }
    299 
    300   static const char* GetVersion() {
    301     return "2.1.0";
    302   }
    303 
    304   bool IsMethodHandlesEnabled() const {
    305     return true;
    306   }
    307 
    308   void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
    309   void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
    310   // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
    311   // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
    312   // access is reenabled.
    313   void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
    314 
    315   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
    316   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
    317   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
    318       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
    319       REQUIRES_SHARED(Locks::mutator_lock_);
    320 
    321   // Visit image roots, only used for hprof since the GC uses the image space mod union table
    322   // instead.
    323   void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
    324 
    325   // Visit all of the roots we can do safely do concurrently.
    326   void VisitConcurrentRoots(RootVisitor* visitor,
    327                             VisitRootFlags flags = kVisitRootFlagAllRoots)
    328       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
    329       REQUIRES_SHARED(Locks::mutator_lock_);
    330 
    331   // Visit all of the non thread roots, we can do this with mutators unpaused.
    332   void VisitNonThreadRoots(RootVisitor* visitor)
    333       REQUIRES_SHARED(Locks::mutator_lock_);
    334 
    335   void VisitTransactionRoots(RootVisitor* visitor)
    336       REQUIRES_SHARED(Locks::mutator_lock_);
    337 
    338   // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
    339   // system weak is updated to be the visitor's returned value.
    340   void SweepSystemWeaks(IsMarkedVisitor* visitor)
    341       REQUIRES_SHARED(Locks::mutator_lock_);
    342 
    343   // Returns a special method that calls into a trampoline for runtime method resolution
    344   ArtMethod* GetResolutionMethod();
    345 
    346   bool HasResolutionMethod() const {
    347     return resolution_method_ != nullptr;
    348   }
    349 
    350   void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    351   void ClearResolutionMethod() {
    352     resolution_method_ = nullptr;
    353   }
    354 
    355   ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
    356 
    357   // Returns a special method that calls into a trampoline for runtime imt conflicts.
    358   ArtMethod* GetImtConflictMethod();
    359   ArtMethod* GetImtUnimplementedMethod();
    360 
    361   bool HasImtConflictMethod() const {
    362     return imt_conflict_method_ != nullptr;
    363   }
    364 
    365   void ClearImtConflictMethod() {
    366     imt_conflict_method_ = nullptr;
    367   }
    368 
    369   void FixupConflictTables();
    370   void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    371   void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    372 
    373   ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
    374       REQUIRES_SHARED(Locks::mutator_lock_);
    375 
    376   void ClearImtUnimplementedMethod() {
    377     imt_unimplemented_method_ = nullptr;
    378   }
    379 
    380   bool HasCalleeSaveMethod(CalleeSaveType type) const {
    381     return callee_save_methods_[static_cast<size_t>(type)] != 0u;
    382   }
    383 
    384   ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
    385       REQUIRES_SHARED(Locks::mutator_lock_);
    386 
    387   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
    388       REQUIRES_SHARED(Locks::mutator_lock_);
    389 
    390   QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
    391     return callee_save_method_frame_infos_[static_cast<size_t>(type)];
    392   }
    393 
    394   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
    395       REQUIRES_SHARED(Locks::mutator_lock_);
    396 
    397   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
    398     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
    399   }
    400 
    401   InstructionSet GetInstructionSet() const {
    402     return instruction_set_;
    403   }
    404 
    405   void SetInstructionSet(InstructionSet instruction_set);
    406   void ClearInstructionSet();
    407 
    408   void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
    409   void ClearCalleeSaveMethods();
    410 
    411   ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
    412 
    413   int32_t GetStat(int kind);
    414 
    415   RuntimeStats* GetStats() {
    416     return &stats_;
    417   }
    418 
    419   bool HasStatsEnabled() const {
    420     return stats_enabled_;
    421   }
    422 
    423   void ResetStats(int kinds);
    424 
    425   void SetStatsEnabled(bool new_state)
    426       REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
    427 
    428   enum class NativeBridgeAction {  // private
    429     kUnload,
    430     kInitialize
    431   };
    432 
    433   jit::Jit* GetJit() const {
    434     return jit_.get();
    435   }
    436 
    437   // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
    438   bool UseJitCompilation() const;
    439 
    440   void PreZygoteFork();
    441   void InitNonZygoteOrPostFork(
    442       JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
    443 
    444   const instrumentation::Instrumentation* GetInstrumentation() const {
    445     return &instrumentation_;
    446   }
    447 
    448   instrumentation::Instrumentation* GetInstrumentation() {
    449     return &instrumentation_;
    450   }
    451 
    452   void RegisterAppInfo(const std::vector<std::string>& code_paths,
    453                        const std::string& profile_output_filename);
    454 
    455   // Transaction support.
    456   bool IsActiveTransaction() const {
    457     return preinitialization_transaction_ != nullptr;
    458   }
    459   void EnterTransactionMode(Transaction* transaction);
    460   void ExitTransactionMode();
    461   bool IsTransactionAborted() const;
    462 
    463   void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
    464       REQUIRES_SHARED(Locks::mutator_lock_);
    465   void ThrowTransactionAbortError(Thread* self)
    466       REQUIRES_SHARED(Locks::mutator_lock_);
    467 
    468   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
    469                                bool is_volatile) const;
    470   void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
    471                             bool is_volatile) const;
    472   void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
    473                             bool is_volatile) const;
    474   void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
    475                           bool is_volatile) const;
    476   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
    477                           bool is_volatile) const;
    478   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
    479                           bool is_volatile) const;
    480   void RecordWriteFieldReference(mirror::Object* obj,
    481                                  MemberOffset field_offset,
    482                                  ObjPtr<mirror::Object> value,
    483                                  bool is_volatile) const
    484       REQUIRES_SHARED(Locks::mutator_lock_);
    485   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
    486       REQUIRES_SHARED(Locks::mutator_lock_);
    487   void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
    488       REQUIRES(Locks::intern_table_lock_);
    489   void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
    490       REQUIRES(Locks::intern_table_lock_);
    491   void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
    492       REQUIRES(Locks::intern_table_lock_);
    493   void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
    494       REQUIRES(Locks::intern_table_lock_);
    495   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
    496       REQUIRES_SHARED(Locks::mutator_lock_);
    497 
    498   void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
    499   // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
    500   // with the unexpected_signal_lock_.
    501   const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
    502     return fault_message_;
    503   }
    504 
    505   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
    506 
    507   bool ExplicitStackOverflowChecks() const {
    508     return !implicit_so_checks_;
    509   }
    510 
    511   bool IsVerificationEnabled() const;
    512   bool IsVerificationSoftFail() const;
    513 
    514   bool IsDexFileFallbackEnabled() const {
    515     return allow_dex_file_fallback_;
    516   }
    517 
    518   const std::vector<std::string>& GetCpuAbilist() const {
    519     return cpu_abilist_;
    520   }
    521 
    522   bool IsRunningOnMemoryTool() const {
    523     return is_running_on_memory_tool_;
    524   }
    525 
    526   void SetTargetSdkVersion(int32_t version) {
    527     target_sdk_version_ = version;
    528   }
    529 
    530   int32_t GetTargetSdkVersion() const {
    531     return target_sdk_version_;
    532   }
    533 
    534   uint32_t GetZygoteMaxFailedBoots() const {
    535     return zygote_max_failed_boots_;
    536   }
    537 
    538   bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
    539     return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
    540   }
    541 
    542   // Create the JIT and instrumentation and code cache.
    543   void CreateJit();
    544 
    545   ArenaPool* GetArenaPool() {
    546     return arena_pool_.get();
    547   }
    548   ArenaPool* GetJitArenaPool() {
    549     return jit_arena_pool_.get();
    550   }
    551   const ArenaPool* GetArenaPool() const {
    552     return arena_pool_.get();
    553   }
    554 
    555   void ReclaimArenaPoolMemory();
    556 
    557   LinearAlloc* GetLinearAlloc() {
    558     return linear_alloc_.get();
    559   }
    560 
    561   jit::JitOptions* GetJITOptions() {
    562     return jit_options_.get();
    563   }
    564 
    565   bool IsJavaDebuggable() const {
    566     return is_java_debuggable_;
    567   }
    568 
    569   void SetJavaDebuggable(bool value);
    570 
    571   // Deoptimize the boot image, called for Java debuggable apps.
    572   void DeoptimizeBootImage();
    573 
    574   bool IsNativeDebuggable() const {
    575     return is_native_debuggable_;
    576   }
    577 
    578   void SetNativeDebuggable(bool value) {
    579     is_native_debuggable_ = value;
    580   }
    581 
    582   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
    583   std::string GetFingerprint() {
    584     return fingerprint_;
    585   }
    586 
    587   // Called from class linker.
    588   void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
    589 
    590   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
    591   LinearAlloc* CreateLinearAlloc();
    592 
    593   OatFileManager& GetOatFileManager() const {
    594     DCHECK(oat_file_manager_ != nullptr);
    595     return *oat_file_manager_;
    596   }
    597 
    598   double GetHashTableMinLoadFactor() const;
    599   double GetHashTableMaxLoadFactor() const;
    600 
    601   void SetSafeMode(bool mode) {
    602     safe_mode_ = mode;
    603   }
    604 
    605   bool GetDumpNativeStackOnSigQuit() const {
    606     return dump_native_stack_on_sig_quit_;
    607   }
    608 
    609   bool GetPrunedDalvikCache() const {
    610     return pruned_dalvik_cache_;
    611   }
    612 
    613   void SetPrunedDalvikCache(bool pruned) {
    614     pruned_dalvik_cache_ = pruned;
    615   }
    616 
    617   void UpdateProcessState(ProcessState process_state);
    618 
    619   // Returns true if we currently care about long mutator pause.
    620   bool InJankPerceptibleProcessState() const {
    621     return process_state_ == kProcessStateJankPerceptible;
    622   }
    623 
    624   void RegisterSensitiveThread() const;
    625 
    626   void SetZygoteNoThreadSection(bool val) {
    627     zygote_no_threads_ = val;
    628   }
    629 
    630   bool IsZygoteNoThreadSection() const {
    631     return zygote_no_threads_;
    632   }
    633 
    634   // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
    635   // optimization that makes it impossible to deoptimize.
    636   bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
    637 
    638   // Returns a saved copy of the environment (getenv/setenv values).
    639   // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
    640   char** GetEnvSnapshot() const {
    641     return env_snapshot_.GetSnapshot();
    642   }
    643 
    644   void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
    645   void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
    646 
    647   void AttachAgent(const std::string& agent_arg);
    648 
    649   const std::list<ti::Agent>& GetAgents() const {
    650     return agents_;
    651   }
    652 
    653   RuntimeCallbacks* GetRuntimeCallbacks();
    654 
    655   void InitThreadGroups(Thread* self);
    656 
    657   void SetDumpGCPerformanceOnShutdown(bool value) {
    658     dump_gc_performance_on_shutdown_ = value;
    659   }
    660 
    661   void IncrementDeoptimizationCount(DeoptimizationKind kind) {
    662     DCHECK_LE(kind, DeoptimizationKind::kLast);
    663     deoptimization_counts_[static_cast<size_t>(kind)]++;
    664   }
    665 
    666   uint32_t GetNumberOfDeoptimizations() const {
    667     uint32_t result = 0;
    668     for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
    669       result += deoptimization_counts_[i];
    670     }
    671     return result;
    672   }
    673 
    674   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
    675   // This is beneficial for low RAM devices since it reduces page cache thrashing.
    676   bool MAdviseRandomAccess() const {
    677     return madvise_random_access_;
    678   }
    679 
    680  private:
    681   static void InitPlatformSignalHandlers();
    682 
    683   Runtime();
    684 
    685   void BlockSignals();
    686 
    687   bool Init(RuntimeArgumentMap&& runtime_options)
    688       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    689   void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
    690   void RegisterRuntimeNativeMethods(JNIEnv* env);
    691 
    692   void StartDaemonThreads();
    693   void StartSignalCatcher();
    694 
    695   void MaybeSaveJitProfilingInfo();
    696 
    697   // Visit all of the thread roots.
    698   void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
    699       REQUIRES_SHARED(Locks::mutator_lock_);
    700 
    701   // Visit all other roots which must be done with mutators suspended.
    702   void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
    703       REQUIRES_SHARED(Locks::mutator_lock_);
    704 
    705   // Constant roots are the roots which never change after the runtime is initialized, they only
    706   // need to be visited once per GC cycle.
    707   void VisitConstantRoots(RootVisitor* visitor)
    708       REQUIRES_SHARED(Locks::mutator_lock_);
    709 
    710   // A pointer to the active runtime or null.
    711   static Runtime* instance_;
    712 
    713   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
    714   static constexpr int kProfileForground = 0;
    715   static constexpr int kProfileBackground = 1;
    716 
    717   static constexpr uint32_t kCalleeSaveSize = 4u;
    718 
    719   // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
    720   uint64_t callee_save_methods_[kCalleeSaveSize];
    721   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
    722   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
    723   ArtMethod* resolution_method_;
    724   ArtMethod* imt_conflict_method_;
    725   // Unresolved method has the same behavior as the conflict method, it is used by the class linker
    726   // for differentiating between unfilled imt slots vs conflict slots in superclasses.
    727   ArtMethod* imt_unimplemented_method_;
    728 
    729   // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
    730   // JDWP (invalid references).
    731   GcRoot<mirror::Object> sentinel_;
    732 
    733   InstructionSet instruction_set_;
    734   QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
    735 
    736   CompilerCallbacks* compiler_callbacks_;
    737   bool is_zygote_;
    738   bool must_relocate_;
    739   bool is_concurrent_gc_enabled_;
    740   bool is_explicit_gc_disabled_;
    741   bool dex2oat_enabled_;
    742   bool image_dex2oat_enabled_;
    743 
    744   std::string compiler_executable_;
    745   std::string patchoat_executable_;
    746   std::vector<std::string> compiler_options_;
    747   std::vector<std::string> image_compiler_options_;
    748   std::string image_location_;
    749 
    750   std::string boot_class_path_string_;
    751   std::string class_path_string_;
    752   std::vector<std::string> properties_;
    753 
    754   std::list<ti::Agent> agents_;
    755   std::vector<Plugin> plugins_;
    756 
    757   // The default stack size for managed threads created by the runtime.
    758   size_t default_stack_size_;
    759 
    760   gc::Heap* heap_;
    761 
    762   std::unique_ptr<ArenaPool> jit_arena_pool_;
    763   std::unique_ptr<ArenaPool> arena_pool_;
    764   // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
    765   // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
    766   // since the field arrays are int arrays in this case.
    767   std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
    768 
    769   // Shared linear alloc for now.
    770   std::unique_ptr<LinearAlloc> linear_alloc_;
    771 
    772   // The number of spins that are done before thread suspension is used to forcibly inflate.
    773   size_t max_spins_before_thin_lock_inflation_;
    774   MonitorList* monitor_list_;
    775   MonitorPool* monitor_pool_;
    776 
    777   ThreadList* thread_list_;
    778 
    779   InternTable* intern_table_;
    780 
    781   ClassLinker* class_linker_;
    782 
    783   SignalCatcher* signal_catcher_;
    784 
    785   // If true, the runtime will connect to tombstoned via a socket to
    786   // request an open file descriptor to write its traces to.
    787   bool use_tombstoned_traces_;
    788 
    789   // Location to which traces must be written on SIGQUIT. Only used if
    790   // tombstoned_traces_ == false.
    791   std::string stack_trace_file_;
    792 
    793   std::unique_ptr<JavaVMExt> java_vm_;
    794 
    795   std::unique_ptr<jit::Jit> jit_;
    796   std::unique_ptr<jit::JitOptions> jit_options_;
    797 
    798   // Fault message, printed when we get a SIGSEGV.
    799   Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    800   std::string fault_message_ GUARDED_BY(fault_message_lock_);
    801 
    802   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
    803   // the shutdown lock so that threads aren't born while we're shutting down.
    804   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    805 
    806   // Waited upon until no threads are being born.
    807   std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    808 
    809   // Set when runtime shutdown is past the point that new threads may attach.
    810   bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    811 
    812   // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
    813   bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    814 
    815   bool started_;
    816 
    817   // New flag added which tells us if the runtime has finished starting. If
    818   // this flag is set then the Daemon threads are created and the class loader
    819   // is created. This flag is needed for knowing if its safe to request CMS.
    820   bool finished_starting_;
    821 
    822   // Hooks supported by JNI_CreateJavaVM
    823   jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
    824   void (*exit_)(jint status);
    825   void (*abort_)();
    826 
    827   bool stats_enabled_;
    828   RuntimeStats stats_;
    829 
    830   const bool is_running_on_memory_tool_;
    831 
    832   std::unique_ptr<TraceConfig> trace_config_;
    833 
    834   instrumentation::Instrumentation instrumentation_;
    835 
    836   jobject main_thread_group_;
    837   jobject system_thread_group_;
    838 
    839   // As returned by ClassLoader.getSystemClassLoader().
    840   jobject system_class_loader_;
    841 
    842   // If true, then we dump the GC cumulative timings on shutdown.
    843   bool dump_gc_performance_on_shutdown_;
    844 
    845   // Transaction used for pre-initializing classes at compilation time.
    846   Transaction* preinitialization_transaction_;
    847 
    848   // If kNone, verification is disabled. kEnable by default.
    849   verifier::VerifyMode verify_;
    850 
    851   // If true, the runtime may use dex files directly with the interpreter if an oat file is not
    852   // available/usable.
    853   bool allow_dex_file_fallback_;
    854 
    855   // List of supported cpu abis.
    856   std::vector<std::string> cpu_abilist_;
    857 
    858   // Specifies target SDK version to allow workarounds for certain API levels.
    859   int32_t target_sdk_version_;
    860 
    861   // Implicit checks flags.
    862   bool implicit_null_checks_;       // NullPointer checks are implicit.
    863   bool implicit_so_checks_;         // StackOverflow checks are implicit.
    864   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
    865 
    866   // Whether or not the sig chain (and implicitly the fault handler) should be
    867   // disabled. Tools like dex2oat or patchoat don't need them. This enables
    868   // building a statically link version of dex2oat.
    869   bool no_sig_chain_;
    870 
    871   // Force the use of native bridge even if the app ISA matches the runtime ISA.
    872   bool force_native_bridge_;
    873 
    874   // Whether or not a native bridge has been loaded.
    875   //
    876   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
    877   // if standard dlopen fails to load native library associated with native activity, it calls to
    878   // the native bridge to load it and then gets the trampoline for the entry to native activity.
    879   //
    880   // The option 'native_bridge_library_filename' specifies the name of the native bridge.
    881   // When non-empty the native bridge will be loaded from the given file. An empty value means
    882   // that there's no native bridge.
    883   bool is_native_bridge_loaded_;
    884 
    885   // Whether we are running under native debugger.
    886   bool is_native_debuggable_;
    887 
    888   // Whether Java code needs to be debuggable.
    889   bool is_java_debuggable_;
    890 
    891   // The maximum number of failed boots we allow before pruning the dalvik cache
    892   // and trying again. This option is only inspected when we're running as a
    893   // zygote.
    894   uint32_t zygote_max_failed_boots_;
    895 
    896   // Enable experimental opcodes that aren't fully specified yet. The intent is to
    897   // eventually publish them as public-usable opcodes, but they aren't ready yet.
    898   //
    899   // Experimental opcodes should not be used by other production code.
    900   ExperimentalFlags experimental_flags_;
    901 
    902   // Contains the build fingerprint, if given as a parameter.
    903   std::string fingerprint_;
    904 
    905   // Oat file manager, keeps track of what oat files are open.
    906   OatFileManager* oat_file_manager_;
    907 
    908   // Whether or not we are on a low RAM device.
    909   bool is_low_memory_mode_;
    910 
    911   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
    912   // This is beneficial for low RAM devices since it reduces page cache thrashing.
    913   bool madvise_random_access_;
    914 
    915   // Whether the application should run in safe mode, that is, interpreter only.
    916   bool safe_mode_;
    917 
    918   // Whether threads should dump their native stack on SIGQUIT.
    919   bool dump_native_stack_on_sig_quit_;
    920 
    921   // Whether the dalvik cache was pruned when initializing the runtime.
    922   bool pruned_dalvik_cache_;
    923 
    924   // Whether or not we currently care about pause times.
    925   ProcessState process_state_;
    926 
    927   // Whether zygote code is in a section that should not start threads.
    928   bool zygote_no_threads_;
    929 
    930   // Saved environment.
    931   class EnvSnapshot {
    932    public:
    933     EnvSnapshot() = default;
    934     void TakeSnapshot();
    935     char** GetSnapshot() const;
    936 
    937    private:
    938     std::unique_ptr<char*[]> c_env_vector_;
    939     std::vector<std::unique_ptr<std::string>> name_value_pairs_;
    940 
    941     DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
    942   } env_snapshot_;
    943 
    944   // Generic system-weak holders.
    945   std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
    946 
    947   std::unique_ptr<RuntimeCallbacks> callbacks_;
    948 
    949   std::atomic<uint32_t> deoptimization_counts_[
    950       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
    951 
    952   std::unique_ptr<MemMap> protected_fault_page_;
    953 
    954   DISALLOW_COPY_AND_ASSIGN(Runtime);
    955 };
    956 
    957 }  // namespace art
    958 
    959 #endif  // ART_RUNTIME_RUNTIME_H_
    960