Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_RUNTIME_H_
     18 #define ART_RUNTIME_RUNTIME_H_
     19 
     20 #include <jni.h>
     21 #include <stdio.h>
     22 
     23 #include <iosfwd>
     24 #include <set>
     25 #include <string>
     26 #include <utility>
     27 #include <memory>
     28 #include <vector>
     29 
     30 #include "arch/instruction_set.h"
     31 #include "base/macros.h"
     32 #include "base/mutex.h"
     33 #include "deoptimization_kind.h"
     34 #include "dex/dex_file_types.h"
     35 #include "experimental_flags.h"
     36 #include "gc_root.h"
     37 #include "instrumentation.h"
     38 #include "jdwp_provider.h"
     39 #include "obj_ptr.h"
     40 #include "offsets.h"
     41 #include "process_state.h"
     42 #include "quick/quick_method_frame_info.h"
     43 #include "runtime_stats.h"
     44 
     45 namespace art {
     46 
     47 namespace gc {
     48 class AbstractSystemWeakHolder;
     49 class Heap;
     50 }  // namespace gc
     51 
     52 namespace hiddenapi {
     53 enum class EnforcementPolicy;
     54 }  // namespace hiddenapi
     55 
     56 namespace jit {
     57 class Jit;
     58 class JitOptions;
     59 }  // namespace jit
     60 
     61 namespace mirror {
     62 class Array;
     63 class ClassLoader;
     64 class DexCache;
     65 template<class T> class ObjectArray;
     66 template<class T> class PrimitiveArray;
     67 typedef PrimitiveArray<int8_t> ByteArray;
     68 class String;
     69 class Throwable;
     70 }  // namespace mirror
     71 namespace ti {
     72 class Agent;
     73 class AgentSpec;
     74 }  // namespace ti
     75 namespace verifier {
     76 class MethodVerifier;
     77 enum class VerifyMode : int8_t;
     78 }  // namespace verifier
     79 class ArenaPool;
     80 class ArtMethod;
     81 enum class CalleeSaveType: uint32_t;
     82 class ClassLinker;
     83 class CompilerCallbacks;
     84 class DexFile;
     85 class InternTable;
     86 class IsMarkedVisitor;
     87 class JavaVMExt;
     88 class LinearAlloc;
     89 class MemMap;
     90 class MonitorList;
     91 class MonitorPool;
     92 class NullPointerHandler;
     93 class OatFileManager;
     94 class Plugin;
     95 struct RuntimeArgumentMap;
     96 class RuntimeCallbacks;
     97 class SignalCatcher;
     98 class StackOverflowHandler;
     99 class SuspensionHandler;
    100 class ThreadList;
    101 class Trace;
    102 struct TraceConfig;
    103 class Transaction;
    104 
    105 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
    106 
    107 class Runtime {
    108  public:
    109   // Parse raw runtime options.
    110   static bool ParseOptions(const RuntimeOptions& raw_options,
    111                            bool ignore_unrecognized,
    112                            RuntimeArgumentMap* runtime_options);
    113 
    114   // Creates and initializes a new runtime.
    115   static bool Create(RuntimeArgumentMap&& runtime_options)
    116       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    117 
    118   // Creates and initializes a new runtime.
    119   static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
    120       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    121 
    122   // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
    123   bool IsAotCompiler() const {
    124     return !UseJitCompilation() && IsCompiler();
    125   }
    126 
    127   // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
    128   bool IsCompiler() const {
    129     return compiler_callbacks_ != nullptr;
    130   }
    131 
    132   // If a compiler, are we compiling a boot image?
    133   bool IsCompilingBootImage() const;
    134 
    135   bool CanRelocate() const;
    136 
    137   bool ShouldRelocate() const {
    138     return must_relocate_ && CanRelocate();
    139   }
    140 
    141   bool MustRelocateIfPossible() const {
    142     return must_relocate_;
    143   }
    144 
    145   bool IsDex2OatEnabled() const {
    146     return dex2oat_enabled_ && IsImageDex2OatEnabled();
    147   }
    148 
    149   bool IsImageDex2OatEnabled() const {
    150     return image_dex2oat_enabled_;
    151   }
    152 
    153   CompilerCallbacks* GetCompilerCallbacks() {
    154     return compiler_callbacks_;
    155   }
    156 
    157   void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
    158     CHECK(callbacks != nullptr);
    159     compiler_callbacks_ = callbacks;
    160   }
    161 
    162   bool IsZygote() const {
    163     return is_zygote_;
    164   }
    165 
    166   bool IsExplicitGcDisabled() const {
    167     return is_explicit_gc_disabled_;
    168   }
    169 
    170   std::string GetCompilerExecutable() const;
    171   std::string GetPatchoatExecutable() const;
    172 
    173   const std::vector<std::string>& GetCompilerOptions() const {
    174     return compiler_options_;
    175   }
    176 
    177   void AddCompilerOption(const std::string& option) {
    178     compiler_options_.push_back(option);
    179   }
    180 
    181   const std::vector<std::string>& GetImageCompilerOptions() const {
    182     return image_compiler_options_;
    183   }
    184 
    185   const std::string& GetImageLocation() const {
    186     return image_location_;
    187   }
    188 
    189   // Starts a runtime, which may cause threads to be started and code to run.
    190   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
    191 
    192   bool IsShuttingDown(Thread* self);
    193   bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
    194     return shutting_down_;
    195   }
    196 
    197   size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
    198     return threads_being_born_;
    199   }
    200 
    201   void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
    202     threads_being_born_++;
    203   }
    204 
    205   void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
    206 
    207   bool IsStarted() const {
    208     return started_;
    209   }
    210 
    211   bool IsFinishedStarting() const {
    212     return finished_starting_;
    213   }
    214 
    215   static Runtime* Current() {
    216     return instance_;
    217   }
    218 
    219   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
    220   // callers should prefer.
    221   NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
    222 
    223   // Returns the "main" ThreadGroup, used when attaching user threads.
    224   jobject GetMainThreadGroup() const;
    225 
    226   // Returns the "system" ThreadGroup, used when attaching our internal threads.
    227   jobject GetSystemThreadGroup() const;
    228 
    229   // Returns the system ClassLoader which represents the CLASSPATH.
    230   jobject GetSystemClassLoader() const;
    231 
    232   // Attaches the calling native thread to the runtime.
    233   bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
    234                            bool create_peer);
    235 
    236   void CallExitHook(jint status);
    237 
    238   // Detaches the current native thread from the runtime.
    239   void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
    240 
    241   void DumpDeoptimizations(std::ostream& os);
    242   void DumpForSigQuit(std::ostream& os);
    243   void DumpLockHolders(std::ostream& os);
    244 
    245   ~Runtime();
    246 
    247   const std::string& GetBootClassPathString() const {
    248     return boot_class_path_string_;
    249   }
    250 
    251   const std::string& GetClassPathString() const {
    252     return class_path_string_;
    253   }
    254 
    255   ClassLinker* GetClassLinker() const {
    256     return class_linker_;
    257   }
    258 
    259   size_t GetDefaultStackSize() const {
    260     return default_stack_size_;
    261   }
    262 
    263   gc::Heap* GetHeap() const {
    264     return heap_;
    265   }
    266 
    267   InternTable* GetInternTable() const {
    268     DCHECK(intern_table_ != nullptr);
    269     return intern_table_;
    270   }
    271 
    272   JavaVMExt* GetJavaVM() const {
    273     return java_vm_.get();
    274   }
    275 
    276   size_t GetMaxSpinsBeforeThinLockInflation() const {
    277     return max_spins_before_thin_lock_inflation_;
    278   }
    279 
    280   MonitorList* GetMonitorList() const {
    281     return monitor_list_;
    282   }
    283 
    284   MonitorPool* GetMonitorPool() const {
    285     return monitor_pool_;
    286   }
    287 
    288   // Is the given object the special object used to mark a cleared JNI weak global?
    289   bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
    290 
    291   // Get the special object used to mark a cleared JNI weak global.
    292   mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
    293 
    294   mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
    295 
    296   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
    297       REQUIRES_SHARED(Locks::mutator_lock_);
    298 
    299   const std::vector<std::string>& GetProperties() const {
    300     return properties_;
    301   }
    302 
    303   ThreadList* GetThreadList() const {
    304     return thread_list_;
    305   }
    306 
    307   static const char* GetVersion() {
    308     return "2.1.0";
    309   }
    310 
    311   bool IsMethodHandlesEnabled() const {
    312     return true;
    313   }
    314 
    315   void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
    316   void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
    317   // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
    318   // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
    319   // access is reenabled.
    320   void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
    321 
    322   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
    323   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
    324   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
    325       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
    326       REQUIRES_SHARED(Locks::mutator_lock_);
    327 
    328   // Visit image roots, only used for hprof since the GC uses the image space mod union table
    329   // instead.
    330   void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
    331 
    332   // Visit all of the roots we can safely visit concurrently.
    333   void VisitConcurrentRoots(RootVisitor* visitor,
    334                             VisitRootFlags flags = kVisitRootFlagAllRoots)
    335       REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
    336       REQUIRES_SHARED(Locks::mutator_lock_);
    337 
    338   // Visit all of the non thread roots, we can do this with mutators unpaused.
    339   void VisitNonThreadRoots(RootVisitor* visitor)
    340       REQUIRES_SHARED(Locks::mutator_lock_);
    341 
    342   void VisitTransactionRoots(RootVisitor* visitor)
    343       REQUIRES_SHARED(Locks::mutator_lock_);
    344 
    345   // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
    346   // system weak is updated to be the visitor's returned value.
    347   void SweepSystemWeaks(IsMarkedVisitor* visitor)
    348       REQUIRES_SHARED(Locks::mutator_lock_);
    349 
    350   // Returns a special method that calls into a trampoline for runtime method resolution
    351   ArtMethod* GetResolutionMethod();
    352 
    353   bool HasResolutionMethod() const {
    354     return resolution_method_ != nullptr;
    355   }
    356 
    357   void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    358   void ClearResolutionMethod() {
    359     resolution_method_ = nullptr;
    360   }
    361 
    362   ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
    363 
    364   // Returns a special method that calls into a trampoline for runtime imt conflicts.
    365   ArtMethod* GetImtConflictMethod();
    366   ArtMethod* GetImtUnimplementedMethod();
    367 
    368   bool HasImtConflictMethod() const {
    369     return imt_conflict_method_ != nullptr;
    370   }
    371 
    372   void ClearImtConflictMethod() {
    373     imt_conflict_method_ = nullptr;
    374   }
    375 
    376   void FixupConflictTables();
    377   void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    378   void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    379 
    380   ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
    381       REQUIRES_SHARED(Locks::mutator_lock_);
    382 
    383   void ClearImtUnimplementedMethod() {
    384     imt_unimplemented_method_ = nullptr;
    385   }
    386 
    387   bool HasCalleeSaveMethod(CalleeSaveType type) const {
    388     return callee_save_methods_[static_cast<size_t>(type)] != 0u;
    389   }
    390 
    391   ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
    392       REQUIRES_SHARED(Locks::mutator_lock_);
    393 
    394   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
    395       REQUIRES_SHARED(Locks::mutator_lock_);
    396 
    397   QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
    398     return callee_save_method_frame_infos_[static_cast<size_t>(type)];
    399   }
    400 
    401   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
    402       REQUIRES_SHARED(Locks::mutator_lock_);
    403 
    404   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
    405     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
    406   }
    407 
    408   InstructionSet GetInstructionSet() const {
    409     return instruction_set_;
    410   }
    411 
    412   void SetInstructionSet(InstructionSet instruction_set);
    413   void ClearInstructionSet();
    414 
    415   void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
    416   void ClearCalleeSaveMethods();
    417 
    418   ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
    419 
    420   int32_t GetStat(int kind);
    421 
    422   RuntimeStats* GetStats() {
    423     return &stats_;
    424   }
    425 
    426   bool HasStatsEnabled() const {
    427     return stats_enabled_;
    428   }
    429 
    430   void ResetStats(int kinds);
    431 
    432   void SetStatsEnabled(bool new_state)
    433       REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
    434 
    435   enum class NativeBridgeAction {  // private
    436     kUnload,
    437     kInitialize
    438   };
    439 
    440   jit::Jit* GetJit() const {
    441     return jit_.get();
    442   }
    443 
    444   // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
    445   bool UseJitCompilation() const;
    446 
    447   void PreZygoteFork();
    448   void InitNonZygoteOrPostFork(
    449       JNIEnv* env,
    450       bool is_system_server,
    451       NativeBridgeAction action,
    452       const char* isa,
    453       bool profile_system_server = false);
    454 
    455   const instrumentation::Instrumentation* GetInstrumentation() const {
    456     return &instrumentation_;
    457   }
    458 
    459   instrumentation::Instrumentation* GetInstrumentation() {
    460     return &instrumentation_;
    461   }
    462 
    463   void RegisterAppInfo(const std::vector<std::string>& code_paths,
    464                        const std::string& profile_output_filename);
    465 
    466   // Transaction support.
    467   bool IsActiveTransaction() const;
    468   void EnterTransactionMode();
    469   void EnterTransactionMode(bool strict, mirror::Class* root);
    470   void ExitTransactionMode();
    471   void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
    472   // Transaction rollback and exit transaction are always done together, it's convenience to
    473   // do them in one function.
    474   void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
    475   bool IsTransactionAborted() const;
    476   const std::unique_ptr<Transaction>& GetTransaction() const;
    477   bool IsActiveStrictTransactionMode() const;
    478 
    479   void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
    480       REQUIRES_SHARED(Locks::mutator_lock_);
    481   void ThrowTransactionAbortError(Thread* self)
    482       REQUIRES_SHARED(Locks::mutator_lock_);
    483 
    484   void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
    485                                bool is_volatile) const;
    486   void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
    487                             bool is_volatile) const;
    488   void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
    489                             bool is_volatile) const;
    490   void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
    491                           bool is_volatile) const;
    492   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
    493                           bool is_volatile) const;
    494   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
    495                           bool is_volatile) const;
    496   void RecordWriteFieldReference(mirror::Object* obj,
    497                                  MemberOffset field_offset,
    498                                  ObjPtr<mirror::Object> value,
    499                                  bool is_volatile) const
    500       REQUIRES_SHARED(Locks::mutator_lock_);
    501   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
    502       REQUIRES_SHARED(Locks::mutator_lock_);
    503   void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
    504       REQUIRES(Locks::intern_table_lock_);
    505   void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
    506       REQUIRES(Locks::intern_table_lock_);
    507   void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
    508       REQUIRES(Locks::intern_table_lock_);
    509   void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
    510       REQUIRES(Locks::intern_table_lock_);
    511   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
    512       REQUIRES_SHARED(Locks::mutator_lock_);
    513 
    514   void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
    515   // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
    516   // with the unexpected_signal_lock_.
    517   const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
    518     return fault_message_;
    519   }
    520 
    521   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
    522 
    523   bool ExplicitStackOverflowChecks() const {
    524     return !implicit_so_checks_;
    525   }
    526 
    527   void DisableVerifier();
    528   bool IsVerificationEnabled() const;
    529   bool IsVerificationSoftFail() const;
    530 
    531   void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
    532     hidden_api_policy_ = policy;
    533   }
    534 
    535   hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
    536     return hidden_api_policy_;
    537   }
    538 
    539   void SetPendingHiddenApiWarning(bool value) {
    540     pending_hidden_api_warning_ = value;
    541   }
    542 
    543   void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
    544     hidden_api_exemptions_ = exemptions;
    545   }
    546 
    547   const std::vector<std::string>& GetHiddenApiExemptions() {
    548     return hidden_api_exemptions_;
    549   }
    550 
    551   bool HasPendingHiddenApiWarning() const {
    552     return pending_hidden_api_warning_;
    553   }
    554 
    555   void SetDedupeHiddenApiWarnings(bool value) {
    556     dedupe_hidden_api_warnings_ = value;
    557   }
    558 
    559   bool ShouldDedupeHiddenApiWarnings() {
    560     return dedupe_hidden_api_warnings_;
    561   }
    562 
    563   void AlwaysSetHiddenApiWarningFlag() {
    564     always_set_hidden_api_warning_flag_ = true;
    565   }
    566 
    567   bool ShouldAlwaysSetHiddenApiWarningFlag() const {
    568     return always_set_hidden_api_warning_flag_;
    569   }
    570 
    571   void SetHiddenApiEventLogSampleRate(uint32_t rate) {
    572     hidden_api_access_event_log_rate_ = rate;
    573   }
    574 
    575   uint32_t GetHiddenApiEventLogSampleRate() const {
    576     return hidden_api_access_event_log_rate_;
    577   }
    578 
    579   const std::string& GetProcessPackageName() const {
    580     return process_package_name_;
    581   }
    582 
    583   void SetProcessPackageName(const char* package_name) {
    584     if (package_name == nullptr) {
    585       process_package_name_.clear();
    586     } else {
    587       process_package_name_ = package_name;
    588     }
    589   }
    590 
    591   bool IsDexFileFallbackEnabled() const {
    592     return allow_dex_file_fallback_;
    593   }
    594 
    595   const std::vector<std::string>& GetCpuAbilist() const {
    596     return cpu_abilist_;
    597   }
    598 
    599   bool IsRunningOnMemoryTool() const {
    600     return is_running_on_memory_tool_;
    601   }
    602 
    603   void SetTargetSdkVersion(int32_t version) {
    604     target_sdk_version_ = version;
    605   }
    606 
    607   int32_t GetTargetSdkVersion() const {
    608     return target_sdk_version_;
    609   }
    610 
    611   uint32_t GetZygoteMaxFailedBoots() const {
    612     return zygote_max_failed_boots_;
    613   }
    614 
    615   bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
    616     return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
    617   }
    618 
    619   // Create the JIT and instrumentation and code cache.
    620   void CreateJit();
    621 
    622   ArenaPool* GetArenaPool() {
    623     return arena_pool_.get();
    624   }
    625   ArenaPool* GetJitArenaPool() {
    626     return jit_arena_pool_.get();
    627   }
    628   const ArenaPool* GetArenaPool() const {
    629     return arena_pool_.get();
    630   }
    631 
    632   void ReclaimArenaPoolMemory();
    633 
    634   LinearAlloc* GetLinearAlloc() {
    635     return linear_alloc_.get();
    636   }
    637 
    638   jit::JitOptions* GetJITOptions() {
    639     return jit_options_.get();
    640   }
    641 
    642   bool IsJavaDebuggable() const {
    643     return is_java_debuggable_;
    644   }
    645 
    646   void SetJavaDebuggable(bool value);
    647 
    648   // Deoptimize the boot image, called for Java debuggable apps.
    649   void DeoptimizeBootImage();
    650 
    651   bool IsNativeDebuggable() const {
    652     return is_native_debuggable_;
    653   }
    654 
    655   void SetNativeDebuggable(bool value) {
    656     is_native_debuggable_ = value;
    657   }
    658 
    659   bool AreAsyncExceptionsThrown() const {
    660     return async_exceptions_thrown_;
    661   }
    662 
    663   void SetAsyncExceptionsThrown() {
    664     async_exceptions_thrown_ = true;
    665   }
    666 
    667   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
    668   std::string GetFingerprint() {
    669     return fingerprint_;
    670   }
    671 
    672   // Called from class linker.
    673   void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
    674 
    675   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
    676   LinearAlloc* CreateLinearAlloc();
    677 
    678   OatFileManager& GetOatFileManager() const {
    679     DCHECK(oat_file_manager_ != nullptr);
    680     return *oat_file_manager_;
    681   }
    682 
    683   double GetHashTableMinLoadFactor() const;
    684   double GetHashTableMaxLoadFactor() const;
    685 
    686   void SetSafeMode(bool mode) {
    687     safe_mode_ = mode;
    688   }
    689 
    690   bool GetDumpNativeStackOnSigQuit() const {
    691     return dump_native_stack_on_sig_quit_;
    692   }
    693 
    694   bool GetPrunedDalvikCache() const {
    695     return pruned_dalvik_cache_;
    696   }
    697 
    698   void SetPrunedDalvikCache(bool pruned) {
    699     pruned_dalvik_cache_ = pruned;
    700   }
    701 
    702   void UpdateProcessState(ProcessState process_state);
    703 
    704   // Returns true if we currently care about long mutator pause.
    705   bool InJankPerceptibleProcessState() const {
    706     return process_state_ == kProcessStateJankPerceptible;
    707   }
    708 
    709   void RegisterSensitiveThread() const;
    710 
    711   void SetZygoteNoThreadSection(bool val) {
    712     zygote_no_threads_ = val;
    713   }
    714 
    715   bool IsZygoteNoThreadSection() const {
    716     return zygote_no_threads_;
    717   }
    718 
    719   // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
    720   // optimization that makes it impossible to deoptimize.
    721   bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
    722 
    723   // Returns a saved copy of the environment (getenv/setenv values).
    724   // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
    725   char** GetEnvSnapshot() const {
    726     return env_snapshot_.GetSnapshot();
    727   }
    728 
    729   void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
    730   void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
    731 
    732   void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
    733 
    734   const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
    735     return agents_;
    736   }
    737 
    738   RuntimeCallbacks* GetRuntimeCallbacks();
    739 
    740   bool HasLoadedPlugins() const {
    741     return !plugins_.empty();
    742   }
    743 
    744   void InitThreadGroups(Thread* self);
    745 
    746   void SetDumpGCPerformanceOnShutdown(bool value) {
    747     dump_gc_performance_on_shutdown_ = value;
    748   }
    749 
    750   void IncrementDeoptimizationCount(DeoptimizationKind kind) {
    751     DCHECK_LE(kind, DeoptimizationKind::kLast);
    752     deoptimization_counts_[static_cast<size_t>(kind)]++;
    753   }
    754 
    755   uint32_t GetNumberOfDeoptimizations() const {
    756     uint32_t result = 0;
    757     for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
    758       result += deoptimization_counts_[i];
    759     }
    760     return result;
    761   }
    762 
    763   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
    764   // This is beneficial for low RAM devices since it reduces page cache thrashing.
    765   bool MAdviseRandomAccess() const {
    766     return madvise_random_access_;
    767   }
    768 
    769   const std::string& GetJdwpOptions() {
    770     return jdwp_options_;
    771   }
    772 
    773   JdwpProvider GetJdwpProvider() const {
    774     return jdwp_provider_;
    775   }
    776 
    777   static constexpr int32_t kUnsetSdkVersion = 0u;
    778 
    779  private:
    780   static void InitPlatformSignalHandlers();
    781 
    782   Runtime();
    783 
    784   void BlockSignals();
    785 
    786   bool Init(RuntimeArgumentMap&& runtime_options)
    787       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    788   void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
    789   void RegisterRuntimeNativeMethods(JNIEnv* env);
    790 
    791   void StartDaemonThreads();
    792   void StartSignalCatcher();
    793 
    794   void MaybeSaveJitProfilingInfo();
    795 
    796   // Visit all of the thread roots.
    797   void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
    798       REQUIRES_SHARED(Locks::mutator_lock_);
    799 
    800   // Visit all other roots which must be done with mutators suspended.
    801   void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
    802       REQUIRES_SHARED(Locks::mutator_lock_);
    803 
    804   // Constant roots are the roots which never change after the runtime is initialized, they only
    805   // need to be visited once per GC cycle.
    806   void VisitConstantRoots(RootVisitor* visitor)
    807       REQUIRES_SHARED(Locks::mutator_lock_);
    808 
    809   // A pointer to the active runtime or null.
    810   static Runtime* instance_;
    811 
    812   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
    813   static constexpr int kProfileForground = 0;
    814   static constexpr int kProfileBackground = 1;
    815 
    816   static constexpr uint32_t kCalleeSaveSize = 6u;
    817 
    818   // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
    819   uint64_t callee_save_methods_[kCalleeSaveSize];
    820   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
    821   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
    822   ArtMethod* resolution_method_;
    823   ArtMethod* imt_conflict_method_;
    824   // Unresolved method has the same behavior as the conflict method, it is used by the class linker
    825   // for differentiating between unfilled imt slots vs conflict slots in superclasses.
    826   ArtMethod* imt_unimplemented_method_;
    827 
    828   // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
    829   // JDWP (invalid references).
    830   GcRoot<mirror::Object> sentinel_;
    831 
    832   InstructionSet instruction_set_;
    833   QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
    834 
    835   CompilerCallbacks* compiler_callbacks_;
    836   bool is_zygote_;
    837   bool must_relocate_;
    838   bool is_concurrent_gc_enabled_;
    839   bool is_explicit_gc_disabled_;
    840   bool dex2oat_enabled_;
    841   bool image_dex2oat_enabled_;
    842 
    843   std::string compiler_executable_;
    844   std::string patchoat_executable_;
    845   std::vector<std::string> compiler_options_;
    846   std::vector<std::string> image_compiler_options_;
    847   std::string image_location_;
    848 
    849   std::string boot_class_path_string_;
    850   std::string class_path_string_;
    851   std::vector<std::string> properties_;
    852 
    853   std::list<ti::AgentSpec> agent_specs_;
    854   std::list<std::unique_ptr<ti::Agent>> agents_;
    855   std::vector<Plugin> plugins_;
    856 
    857   // The default stack size for managed threads created by the runtime.
    858   size_t default_stack_size_;
    859 
    860   gc::Heap* heap_;
    861 
    862   std::unique_ptr<ArenaPool> jit_arena_pool_;
    863   std::unique_ptr<ArenaPool> arena_pool_;
    864   // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
    865   // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
    866   // since the field arrays are int arrays in this case.
    867   std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
    868 
    869   // Shared linear alloc for now.
    870   std::unique_ptr<LinearAlloc> linear_alloc_;
    871 
    872   // The number of spins that are done before thread suspension is used to forcibly inflate.
    873   size_t max_spins_before_thin_lock_inflation_;
    874   MonitorList* monitor_list_;
    875   MonitorPool* monitor_pool_;
    876 
    877   ThreadList* thread_list_;
    878 
    879   InternTable* intern_table_;
    880 
    881   ClassLinker* class_linker_;
    882 
    883   SignalCatcher* signal_catcher_;
    884 
    885   // If true, the runtime will connect to tombstoned via a socket to
    886   // request an open file descriptor to write its traces to.
    887   bool use_tombstoned_traces_;
    888 
    889   // Location to which traces must be written on SIGQUIT. Only used if
    890   // tombstoned_traces_ == false.
    891   std::string stack_trace_file_;
    892 
    893   std::unique_ptr<JavaVMExt> java_vm_;
    894 
    895   std::unique_ptr<jit::Jit> jit_;
    896   std::unique_ptr<jit::JitOptions> jit_options_;
    897 
    898   // Fault message, printed when we get a SIGSEGV.
    899   Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    900   std::string fault_message_ GUARDED_BY(fault_message_lock_);
    901 
    902   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
    903   // the shutdown lock so that threads aren't born while we're shutting down.
    904   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    905 
    906   // Waited upon until no threads are being born.
    907   std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    908 
    909   // Set when runtime shutdown is past the point that new threads may attach.
    910   bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    911 
    912   // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
    913   bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    914 
    915   bool started_;
    916 
    917   // New flag added which tells us if the runtime has finished starting. If
    918   // this flag is set then the Daemon threads are created and the class loader
    919   // is created. This flag is needed for knowing if its safe to request CMS.
    920   bool finished_starting_;
    921 
    922   // Hooks supported by JNI_CreateJavaVM
    923   jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
    924   void (*exit_)(jint status);
    925   void (*abort_)();
    926 
    927   bool stats_enabled_;
    928   RuntimeStats stats_;
    929 
    930   const bool is_running_on_memory_tool_;
    931 
    932   std::unique_ptr<TraceConfig> trace_config_;
    933 
    934   instrumentation::Instrumentation instrumentation_;
    935 
    936   jobject main_thread_group_;
    937   jobject system_thread_group_;
    938 
    939   // As returned by ClassLoader.getSystemClassLoader().
    940   jobject system_class_loader_;
    941 
    942   // If true, then we dump the GC cumulative timings on shutdown.
    943   bool dump_gc_performance_on_shutdown_;
    944 
    945   // Transactions used for pre-initializing classes at compilation time.
    946   // Support nested transactions, maintain a list containing all transactions. Transactions are
    947   // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
    948   // as substantial data structure instead of stack.
    949   std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
    950 
    951   // If kNone, verification is disabled. kEnable by default.
    952   verifier::VerifyMode verify_;
    953 
    954   // If true, the runtime may use dex files directly with the interpreter if an oat file is not
    955   // available/usable.
    956   bool allow_dex_file_fallback_;
    957 
    958   // List of supported cpu abis.
    959   std::vector<std::string> cpu_abilist_;
    960 
    961   // Specifies target SDK version to allow workarounds for certain API levels.
    962   int32_t target_sdk_version_;
    963 
    964   // Implicit checks flags.
    965   bool implicit_null_checks_;       // NullPointer checks are implicit.
    966   bool implicit_so_checks_;         // StackOverflow checks are implicit.
    967   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
    968 
    969   // Whether or not the sig chain (and implicitly the fault handler) should be
    970   // disabled. Tools like dex2oat or patchoat don't need them. This enables
    971   // building a statically link version of dex2oat.
    972   bool no_sig_chain_;
    973 
    974   // Force the use of native bridge even if the app ISA matches the runtime ISA.
    975   bool force_native_bridge_;
    976 
    977   // Whether or not a native bridge has been loaded.
    978   //
    979   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
    980   // if standard dlopen fails to load native library associated with native activity, it calls to
    981   // the native bridge to load it and then gets the trampoline for the entry to native activity.
    982   //
    983   // The option 'native_bridge_library_filename' specifies the name of the native bridge.
    984   // When non-empty the native bridge will be loaded from the given file. An empty value means
    985   // that there's no native bridge.
    986   bool is_native_bridge_loaded_;
    987 
    988   // Whether we are running under native debugger.
    989   bool is_native_debuggable_;
    990 
    991   // whether or not any async exceptions have ever been thrown. This is used to speed up the
    992   // MterpShouldSwitchInterpreters function.
    993   bool async_exceptions_thrown_;
    994 
    995   // Whether Java code needs to be debuggable.
    996   bool is_java_debuggable_;
    997 
    998   // The maximum number of failed boots we allow before pruning the dalvik cache
    999   // and trying again. This option is only inspected when we're running as a
   1000   // zygote.
   1001   uint32_t zygote_max_failed_boots_;
   1002 
   1003   // Enable experimental opcodes that aren't fully specified yet. The intent is to
   1004   // eventually publish them as public-usable opcodes, but they aren't ready yet.
   1005   //
   1006   // Experimental opcodes should not be used by other production code.
   1007   ExperimentalFlags experimental_flags_;
   1008 
   1009   // Contains the build fingerprint, if given as a parameter.
   1010   std::string fingerprint_;
   1011 
   1012   // Oat file manager, keeps track of what oat files are open.
   1013   OatFileManager* oat_file_manager_;
   1014 
   1015   // Whether or not we are on a low RAM device.
   1016   bool is_low_memory_mode_;
   1017 
   1018   // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
   1019   // This is beneficial for low RAM devices since it reduces page cache thrashing.
   1020   bool madvise_random_access_;
   1021 
   1022   // Whether the application should run in safe mode, that is, interpreter only.
   1023   bool safe_mode_;
   1024 
   1025   // Whether access checks on hidden API should be performed.
   1026   hiddenapi::EnforcementPolicy hidden_api_policy_;
   1027 
   1028   // List of signature prefixes of methods that have been removed from the blacklist, and treated
   1029   // as if whitelisted.
   1030   std::vector<std::string> hidden_api_exemptions_;
   1031 
   1032   // Whether the application has used an API which is not restricted but we
   1033   // should issue a warning about it.
   1034   bool pending_hidden_api_warning_;
   1035 
   1036   // Do not warn about the same hidden API access violation twice.
   1037   // This is only used for testing.
   1038   bool dedupe_hidden_api_warnings_;
   1039 
   1040   // Hidden API can print warnings into the log and/or set a flag read by the
   1041   // framework to show a UI warning. If this flag is set, always set the flag
   1042   // when there is a warning. This is only used for testing.
   1043   bool always_set_hidden_api_warning_flag_;
   1044 
   1045   // How often to log hidden API access to the event log. An integer between 0
   1046   // (never) and 0x10000 (always).
   1047   uint32_t hidden_api_access_event_log_rate_;
   1048 
   1049   // The package of the app running in this process.
   1050   std::string process_package_name_;
   1051 
   1052   // Whether threads should dump their native stack on SIGQUIT.
   1053   bool dump_native_stack_on_sig_quit_;
   1054 
   1055   // Whether the dalvik cache was pruned when initializing the runtime.
   1056   bool pruned_dalvik_cache_;
   1057 
   1058   // Whether or not we currently care about pause times.
   1059   ProcessState process_state_;
   1060 
   1061   // Whether zygote code is in a section that should not start threads.
   1062   bool zygote_no_threads_;
   1063 
   1064   // The string containing requested jdwp options
   1065   std::string jdwp_options_;
   1066 
   1067   // The jdwp provider we were configured with.
   1068   JdwpProvider jdwp_provider_;
   1069 
   1070   // Saved environment.
   1071   class EnvSnapshot {
   1072    public:
   1073     EnvSnapshot() = default;
   1074     void TakeSnapshot();
   1075     char** GetSnapshot() const;
   1076 
   1077    private:
   1078     std::unique_ptr<char*[]> c_env_vector_;
   1079     std::vector<std::unique_ptr<std::string>> name_value_pairs_;
   1080 
   1081     DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
   1082   } env_snapshot_;
   1083 
   1084   // Generic system-weak holders.
   1085   std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
   1086 
   1087   std::unique_ptr<RuntimeCallbacks> callbacks_;
   1088 
   1089   std::atomic<uint32_t> deoptimization_counts_[
   1090       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
   1091 
   1092   std::unique_ptr<MemMap> protected_fault_page_;
   1093 
   1094   DISALLOW_COPY_AND_ASSIGN(Runtime);
   1095 };
   1096 
   1097 }  // namespace art
   1098 
   1099 #endif  // ART_RUNTIME_RUNTIME_H_
   1100