Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_RUNTIME_H_
     18 #define ART_RUNTIME_RUNTIME_H_
     19 
     20 #include <jni.h>
     21 #include <stdio.h>
     22 
     23 #include <iosfwd>
     24 #include <set>
     25 #include <string>
     26 #include <utility>
     27 #include <vector>
     28 
     29 #include "base/allocator.h"
     30 #include "compiler_callbacks.h"
     31 #include "gc_root.h"
     32 #include "instrumentation.h"
     33 #include "instruction_set.h"
     34 #include "jobject_comparator.h"
     35 #include "object_callbacks.h"
     36 #include "offsets.h"
     37 #include "profiler_options.h"
     38 #include "quick/quick_method_frame_info.h"
     39 #include "runtime_stats.h"
     40 #include "safe_map.h"
     41 
     42 namespace art {
     43 
     44 namespace gc {
     45   class Heap;
     46 }  // namespace gc
     47 namespace mirror {
     48   class ArtMethod;
     49   class ClassLoader;
     50   class Array;
     51   template<class T> class ObjectArray;
     52   template<class T> class PrimitiveArray;
     53   typedef PrimitiveArray<int8_t> ByteArray;
     54   class String;
     55   class Throwable;
     56 }  // namespace mirror
     57 namespace verifier {
     58 class MethodVerifier;
     59 }
     60 class ClassLinker;
     61 class DexFile;
     62 class InternTable;
     63 class JavaVMExt;
     64 class MonitorList;
     65 class MonitorPool;
     66 class NullPointerHandler;
     67 class SignalCatcher;
     68 class StackOverflowHandler;
     69 class SuspensionHandler;
     70 class ThreadList;
     71 class Trace;
     72 class Transaction;
     73 
     74 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
     75 
     76 // Not all combinations of flags are valid. You may not visit all roots as well as the new roots
     77 // (no logical reason to do this). You also may not start logging new roots and stop logging new
     78 // roots (also no logical reason to do this).
     79 enum VisitRootFlags : uint8_t {
     80   kVisitRootFlagAllRoots = 0x1,
     81   kVisitRootFlagNewRoots = 0x2,
     82   kVisitRootFlagStartLoggingNewRoots = 0x4,
     83   kVisitRootFlagStopLoggingNewRoots = 0x8,
     84   kVisitRootFlagClearRootLog = 0x10,
     85 };
     86 
     87 class Runtime {
     88  public:
     89   // Creates and initializes a new runtime.
     90   static bool Create(const RuntimeOptions& options, bool ignore_unrecognized)
     91       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
     92 
     93   bool IsCompiler() const {
     94     return compiler_callbacks_ != nullptr;
     95   }
     96 
     97   bool CanRelocate() const {
     98     return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
     99   }
    100 
    101   bool ShouldRelocate() const {
    102     return must_relocate_ && CanRelocate();
    103   }
    104 
    105   bool MustRelocateIfPossible() const {
    106     return must_relocate_;
    107   }
    108 
    109   bool IsDex2OatEnabled() const {
    110     return dex2oat_enabled_ && IsImageDex2OatEnabled();
    111   }
    112 
    113   bool IsImageDex2OatEnabled() const {
    114     return image_dex2oat_enabled_;
    115   }
    116 
    117   CompilerCallbacks* GetCompilerCallbacks() {
    118     return compiler_callbacks_;
    119   }
    120 
    121   bool IsZygote() const {
    122     return is_zygote_;
    123   }
    124 
    125   bool IsExplicitGcDisabled() const {
    126     return is_explicit_gc_disabled_;
    127   }
    128 
    129   std::string GetCompilerExecutable() const;
    130   std::string GetPatchoatExecutable() const;
    131 
    132   const std::vector<std::string>& GetCompilerOptions() const {
    133     return compiler_options_;
    134   }
    135 
    136   const std::vector<std::string>& GetImageCompilerOptions() const {
    137     return image_compiler_options_;
    138   }
    139 
    140   const std::string& GetImageLocation() const {
    141     return image_location_;
    142   }
    143 
    144   const ProfilerOptions& GetProfilerOptions() const {
    145     return profiler_options_;
    146   }
    147 
    148   // Starts a runtime, which may cause threads to be started and code to run.
    149   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
    150 
    151   bool IsShuttingDown(Thread* self);
    152   bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
    153     return shutting_down_;
    154   }
    155 
    156   size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
    157     return threads_being_born_;
    158   }
    159 
    160   void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
    161     threads_being_born_++;
    162   }
    163 
    164   void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
    165 
    166   bool IsStarted() const {
    167     return started_;
    168   }
    169 
    170   bool IsFinishedStarting() const {
    171     return finished_starting_;
    172   }
    173 
    174   static Runtime* Current() {
    175     return instance_;
    176   }
    177 
    178   // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
    179   // callers should prefer.
    180   // This isn't marked ((noreturn)) because then gcc will merge multiple calls
    181   // in a single function together. This reduces code size slightly, but means
    182   // that the native stack trace we get may point at the wrong call site.
    183   static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
    184 
    185   // Returns the "main" ThreadGroup, used when attaching user threads.
    186   jobject GetMainThreadGroup() const;
    187 
    188   // Returns the "system" ThreadGroup, used when attaching our internal threads.
    189   jobject GetSystemThreadGroup() const;
    190 
    191   // Returns the system ClassLoader which represents the CLASSPATH.
    192   jobject GetSystemClassLoader() const;
    193 
    194   // Attaches the calling native thread to the runtime.
    195   bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
    196                            bool create_peer);
    197 
    198   void CallExitHook(jint status);
    199 
    200   // Detaches the current native thread from the runtime.
    201   void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
    202 
    203   void DumpForSigQuit(std::ostream& os)
    204       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
    205   void DumpLockHolders(std::ostream& os);
    206 
    207   ~Runtime();
    208 
    209   const std::string& GetBootClassPathString() const {
    210     return boot_class_path_string_;
    211   }
    212 
    213   const std::string& GetClassPathString() const {
    214     return class_path_string_;
    215   }
    216 
    217   ClassLinker* GetClassLinker() const {
    218     return class_linker_;
    219   }
    220 
    221   size_t GetDefaultStackSize() const {
    222     return default_stack_size_;
    223   }
    224 
    225   gc::Heap* GetHeap() const {
    226     return heap_;
    227   }
    228 
    229   InternTable* GetInternTable() const {
    230     DCHECK(intern_table_ != NULL);
    231     return intern_table_;
    232   }
    233 
    234   JavaVMExt* GetJavaVM() const {
    235     return java_vm_;
    236   }
    237 
    238   size_t GetMaxSpinsBeforeThinkLockInflation() const {
    239     return max_spins_before_thin_lock_inflation_;
    240   }
    241 
    242   MonitorList* GetMonitorList() const {
    243     return monitor_list_;
    244   }
    245 
    246   MonitorPool* GetMonitorPool() const {
    247     return monitor_pool_;
    248   }
    249 
    250   mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    251 
    252   mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
    253       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    254 
    255   const std::vector<std::string>& GetProperties() const {
    256     return properties_;
    257   }
    258 
    259   ThreadList* GetThreadList() const {
    260     return thread_list_;
    261   }
    262 
    263   static const char* GetVersion() {
    264     return "2.1.0";
    265   }
    266 
    267   void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
    268   void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    269 
    270   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
    271   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
    272   void VisitRoots(RootCallback* visitor, void* arg, VisitRootFlags flags = kVisitRootFlagAllRoots)
    273       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    274 
    275   // Visit all of the roots we can do safely do concurrently.
    276   void VisitConcurrentRoots(RootCallback* visitor, void* arg,
    277                             VisitRootFlags flags = kVisitRootFlagAllRoots)
    278       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    279 
    280   // Visit all of the non thread roots, we can do this with mutators unpaused.
    281   void VisitNonThreadRoots(RootCallback* visitor, void* arg)
    282       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    283 
    284   // Visit all other roots which must be done with mutators suspended.
    285   void VisitNonConcurrentRoots(RootCallback* visitor, void* arg)
    286       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    287 
    288   // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
    289   // system weak is updated to be the visitor's returned value.
    290   void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
    291       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    292 
    293   // Constant roots are the roots which never change after the runtime is initialized, they only
    294   // need to be visited once per GC cycle.
    295   void VisitConstantRoots(RootCallback* callback, void* arg)
    296       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    297 
    298   // Returns a special method that calls into a trampoline for runtime method resolution
    299   mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    300 
    301   bool HasResolutionMethod() const {
    302     return !resolution_method_.IsNull();
    303   }
    304 
    305   void SetResolutionMethod(mirror::ArtMethod* method) {
    306     resolution_method_ = GcRoot<mirror::ArtMethod>(method);
    307   }
    308 
    309   mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    310 
    311   // Returns a special method that calls into a trampoline for runtime imt conflicts.
    312   mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    313 
    314   bool HasImtConflictMethod() const {
    315     return !imt_conflict_method_.IsNull();
    316   }
    317 
    318   void SetImtConflictMethod(mirror::ArtMethod* method) {
    319     imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
    320   }
    321 
    322   mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    323 
    324   // Returns an imt with every entry set to conflict, used as default imt for all classes.
    325   mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt()
    326       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    327 
    328   bool HasDefaultImt() const {
    329     return !default_imt_.IsNull();
    330   }
    331 
    332   void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
    333     default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt);
    334   }
    335 
    336   mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
    337       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    338 
    339   // Returns a special method that describes all callee saves being spilled to the stack.
    340   enum CalleeSaveType {
    341     kSaveAll,
    342     kRefsOnly,
    343     kRefsAndArgs,
    344     kLastCalleeSaveType  // Value used for iteration
    345   };
    346 
    347   bool HasCalleeSaveMethod(CalleeSaveType type) const {
    348     return !callee_save_methods_[type].IsNull();
    349   }
    350 
    351   mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
    352       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    353 
    354   mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
    355       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    356 
    357   QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
    358     return callee_save_method_frame_infos_[type];
    359   }
    360 
    361   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method)
    362       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    363 
    364   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
    365     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
    366   }
    367 
    368   InstructionSet GetInstructionSet() const {
    369     return instruction_set_;
    370   }
    371 
    372   void SetInstructionSet(InstructionSet instruction_set);
    373 
    374   void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
    375 
    376   mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
    377       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    378 
    379   int32_t GetStat(int kind);
    380 
    381   RuntimeStats* GetStats() {
    382     return &stats_;
    383   }
    384 
    385   bool HasStatsEnabled() const {
    386     return stats_enabled_;
    387   }
    388 
    389   void ResetStats(int kinds);
    390 
    391   void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_,
    392                                                       Locks::mutator_lock_);
    393 
    394   enum class NativeBridgeAction {  // private
    395     kUnload,
    396     kInitialize
    397   };
    398   void PreZygoteFork();
    399   bool InitZygote();
    400   void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
    401 
    402   const instrumentation::Instrumentation* GetInstrumentation() const {
    403     return &instrumentation_;
    404   }
    405 
    406   instrumentation::Instrumentation* GetInstrumentation() {
    407     return &instrumentation_;
    408   }
    409 
    410   bool UseCompileTimeClassPath() const {
    411     return use_compile_time_class_path_;
    412   }
    413 
    414   void AddMethodVerifier(verifier::MethodVerifier* verifier) LOCKS_EXCLUDED(method_verifier_lock_);
    415   void RemoveMethodVerifier(verifier::MethodVerifier* verifier)
    416       LOCKS_EXCLUDED(method_verifier_lock_);
    417 
    418   const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
    419   void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
    420 
    421   void StartProfiler(const char* profile_output_filename);
    422   void UpdateProfilerState(int state);
    423 
    424   // Transaction support.
    425   bool IsActiveTransaction() const {
    426     return preinitialization_transaction_ != nullptr;
    427   }
    428   void EnterTransactionMode(Transaction* transaction);
    429   void ExitTransactionMode();
    430   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
    431                           bool is_volatile) const;
    432   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
    433                           bool is_volatile) const;
    434   void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
    435                                  mirror::Object* value, bool is_volatile) const;
    436   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
    437       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    438   void RecordStrongStringInsertion(mirror::String* s) const
    439       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
    440   void RecordWeakStringInsertion(mirror::String* s) const
    441       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
    442   void RecordStrongStringRemoval(mirror::String* s) const
    443       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
    444   void RecordWeakStringRemoval(mirror::String* s) const
    445       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
    446 
    447   void SetFaultMessage(const std::string& message);
    448   // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
    449   // with the unexpected_signal_lock_.
    450   const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
    451     return fault_message_;
    452   }
    453 
    454   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
    455 
    456   bool ExplicitNullChecks() const {
    457     return null_pointer_handler_ == nullptr;
    458   }
    459 
    460   bool ExplicitSuspendChecks() const {
    461     return suspend_handler_ == nullptr;
    462   }
    463 
    464   bool ExplicitStackOverflowChecks() const {
    465     return stack_overflow_handler_ == nullptr;
    466   }
    467 
    468   bool IsVerificationEnabled() const {
    469     return verify_;
    470   }
    471 
    472   bool RunningOnValgrind() const {
    473     return running_on_valgrind_;
    474   }
    475 
    476   void SetTargetSdkVersion(int32_t version) {
    477     target_sdk_version_ = version;
    478   }
    479 
    480   int32_t GetTargetSdkVersion() const {
    481     return target_sdk_version_;
    482   }
    483 
    484   static const char* GetDefaultInstructionSetFeatures() {
    485     return kDefaultInstructionSetFeatures;
    486   }
    487 
    488  private:
    489   static void InitPlatformSignalHandlers();
    490 
    491   Runtime();
    492 
    493   void BlockSignals();
    494 
    495   bool Init(const RuntimeOptions& options, bool ignore_unrecognized)
    496       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
    497   void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_);
    498   void InitThreadGroups(Thread* self);
    499   void RegisterRuntimeNativeMethods(JNIEnv* env);
    500 
    501   void StartDaemonThreads();
    502   void StartSignalCatcher();
    503 
    504   // A pointer to the active runtime or NULL.
    505   static Runtime* instance_;
    506 
    507   static const char* kDefaultInstructionSetFeatures;
    508 
    509   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
    510   static constexpr int kProfileForground = 0;
    511   static constexpr int kProfileBackgrouud = 1;
    512 
    513   GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType];
    514   GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
    515   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
    516   GcRoot<mirror::ArtMethod> resolution_method_;
    517   GcRoot<mirror::ArtMethod> imt_conflict_method_;
    518   GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
    519 
    520   InstructionSet instruction_set_;
    521   QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
    522 
    523   CompilerCallbacks* compiler_callbacks_;
    524   bool is_zygote_;
    525   bool must_relocate_;
    526   bool is_concurrent_gc_enabled_;
    527   bool is_explicit_gc_disabled_;
    528   bool dex2oat_enabled_;
    529   bool image_dex2oat_enabled_;
    530 
    531   std::string compiler_executable_;
    532   std::string patchoat_executable_;
    533   std::vector<std::string> compiler_options_;
    534   std::vector<std::string> image_compiler_options_;
    535   std::string image_location_;
    536 
    537   std::string boot_class_path_string_;
    538   std::string class_path_string_;
    539   std::vector<std::string> properties_;
    540 
    541   // The default stack size for managed threads created by the runtime.
    542   size_t default_stack_size_;
    543 
    544   gc::Heap* heap_;
    545 
    546   // The number of spins that are done before thread suspension is used to forcibly inflate.
    547   size_t max_spins_before_thin_lock_inflation_;
    548   MonitorList* monitor_list_;
    549   MonitorPool* monitor_pool_;
    550 
    551   ThreadList* thread_list_;
    552 
    553   InternTable* intern_table_;
    554 
    555   ClassLinker* class_linker_;
    556 
    557   SignalCatcher* signal_catcher_;
    558   std::string stack_trace_file_;
    559 
    560   JavaVMExt* java_vm_;
    561 
    562   // Fault message, printed when we get a SIGSEGV.
    563   Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    564   std::string fault_message_ GUARDED_BY(fault_message_lock_);
    565 
    566   // Method verifier set, used so that we can update their GC roots.
    567   Mutex method_verifier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    568   std::set<verifier::MethodVerifier*> method_verifiers_;
    569 
    570   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
    571   // the shutdown lock so that threads aren't born while we're shutting down.
    572   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    573 
    574   // Waited upon until no threads are being born.
    575   std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    576 
    577   // Set when runtime shutdown is past the point that new threads may attach.
    578   bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    579 
    580   // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
    581   bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
    582 
    583   bool started_;
    584 
    585   // New flag added which tells us if the runtime has finished starting. If
    586   // this flag is set then the Daemon threads are created and the class loader
    587   // is created. This flag is needed for knowing if its safe to request CMS.
    588   bool finished_starting_;
    589 
    590   // Hooks supported by JNI_CreateJavaVM
    591   jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
    592   void (*exit_)(jint status);
    593   void (*abort_)();
    594 
    595   bool stats_enabled_;
    596   RuntimeStats stats_;
    597 
    598   const bool running_on_valgrind_;
    599 
    600   std::string profile_output_filename_;
    601   ProfilerOptions profiler_options_;
    602   bool profiler_started_;
    603 
    604   bool method_trace_;
    605   std::string method_trace_file_;
    606   size_t method_trace_file_size_;
    607   instrumentation::Instrumentation instrumentation_;
    608 
    609   typedef AllocationTrackingSafeMap<jobject, std::vector<const DexFile*>,
    610                                     kAllocatorTagCompileTimeClassPath, JobjectComparator>
    611       CompileTimeClassPaths;
    612   CompileTimeClassPaths compile_time_class_paths_;
    613   bool use_compile_time_class_path_;
    614 
    615   jobject main_thread_group_;
    616   jobject system_thread_group_;
    617 
    618   // As returned by ClassLoader.getSystemClassLoader().
    619   jobject system_class_loader_;
    620 
    621   // If true, then we dump the GC cumulative timings on shutdown.
    622   bool dump_gc_performance_on_shutdown_;
    623 
    624   // Transaction used for pre-initializing classes at compilation time.
    625   Transaction* preinitialization_transaction_;
    626   NullPointerHandler* null_pointer_handler_;
    627   SuspensionHandler* suspend_handler_;
    628   StackOverflowHandler* stack_overflow_handler_;
    629 
    630   // If false, verification is disabled. True by default.
    631   bool verify_;
    632 
    633   // Specifies target SDK version to allow workarounds for certain API levels.
    634   int32_t target_sdk_version_;
    635 
    636   // Implicit checks flags.
    637   bool implicit_null_checks_;       // NullPointer checks are implicit.
    638   bool implicit_so_checks_;         // StackOverflow checks are implicit.
    639   bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
    640 
    641   // The filename to the native bridge library. If this is not empty the native bridge will be
    642   // initialized and loaded from the given file (initialized and available). An empty value means
    643   // that there's no native bridge (initialized but not available).
    644   //
    645   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
    646   // if standard dlopen fails to load native library associated with native activity, it calls to
    647   // the native bridge to load it and then gets the trampoline for the entry to native activity.
    648   std::string native_bridge_library_filename_;
    649 
    650   DISALLOW_COPY_AND_ASSIGN(Runtime);
    651 };
    652 
    653 }  // namespace art
    654 
    655 #endif  // ART_RUNTIME_RUNTIME_H_
    656